From aa9e79e56b3a6083f9e254a6f39c917bf8394738 Mon Sep 17 00:00:00 2001 From: Leon Weninger <18707626+weningerleon@users.noreply.github.com> Date: Thu, 26 Jul 2018 11:39:23 +0200 Subject: [PATCH 01/21] Change Epi_Reg Add segmentation output --- nipype/interfaces/fsl/epi.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index e45a94ed2c..f6e9c9c3fa 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1021,6 +1021,8 @@ def _list_outputs(self): os.getcwd(), self.inputs.out_base + '_fast_wmedge.nii.gz') outputs['wmseg'] = os.path.join( os.getcwd(), self.inputs.out_base + '_fast_wmseg.nii.gz') + outputs['seg'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_fast_seg.nii.gz') return outputs From ec956b7da9f8df92caca96a0ec47af142b26dfeb Mon Sep 17 00:00:00 2001 From: Leon Weninger <18707626+weningerleon@users.noreply.github.com> Date: Thu, 26 Jul 2018 11:42:15 +0200 Subject: [PATCH 02/21] Change topup default Set default to false, in order to not overwrite the config file settings --- nipype/interfaces/fsl/epi.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index f6e9c9c3fa..0d1b36e8f9 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -199,16 +199,16 @@ class TOPUPInputSpec(FSLCommandInputSpec): # TODO: the following traits admit values separated by commas, one value # per registration level inside topup. warp_res = traits.Float( - 10.0, usedefault=True, + 10.0, usedefault=False, argstr='--warpres=%f', desc=('(approximate) resolution (in mm) of warp ' 'basis for the different sub-sampling levels' '.')) - subsamp = traits.Int(1, usedefault=True, + subsamp = traits.Int(1, usedefault=False, argstr='--subsamp=%d', desc='sub-sampling scheme') fwhm = traits.Float( 8.0, - usedefault=True, + usedefault=False, argstr='--fwhm=%f', desc='FWHM (in mm) of gaussian smoothing kernel') config = traits.String( @@ -218,10 +218,10 @@ class TOPUPInputSpec(FSLCommandInputSpec): desc=('Name of config file specifying command line ' 'arguments')) max_iter = traits.Int( - 5, usedefault=True, + 5, usedefault=False, argstr='--miter=%d', desc='max # of non-linear iterations') reg_lambda = traits.Float( - 1.0, usedefault=True, + 1.0, usedefault=False, argstr='--miter=%0.f', desc=('lambda weighting value of the ' 'regularisation term')) @@ -259,7 +259,7 @@ class TOPUPInputSpec(FSLCommandInputSpec): desc=('Minimisation method 0=Levenberg-Marquardt, ' '1=Scaled Conjugate Gradient')) splineorder = traits.Int( - 3, usedefault=True, + 3, usedefault=False, argstr='--splineorder=%d', desc=('order of spline, 2->Qadratic spline, ' '3->Cubic spline')) From 849b6f1cf6b544472bae409ce90c49af21014005 Mon Sep 17 00:00:00 2001 From: Leon Weninger Date: Thu, 26 Jul 2018 13:59:43 +0200 Subject: [PATCH 03/21] Change Epi_Reg --- .gitignore | 3 + nipype/COMMIT_INFO.txt | 6 + nipype/__init__.py | 53 + nipype/algorithms/__init__.py | 10 + nipype/algorithms/confounds.py | 1274 + nipype/algorithms/icc.py | 141 + nipype/algorithms/mesh.py | 426 + nipype/algorithms/metrics.py | 719 + nipype/algorithms/misc.py | 1580 + nipype/algorithms/modelgen.py | 896 + nipype/algorithms/rapidart.py | 777 + nipype/algorithms/stats.py | 68 + nipype/algorithms/tests/__init__.py | 3 + nipype/algorithms/tests/test_CompCor.py | 195 + nipype/algorithms/tests/test_ErrorMap.py | 78 + nipype/algorithms/tests/test_Overlap.py | 43 + nipype/algorithms/tests/test_TSNR.py | 130 + nipype/algorithms/tests/test_auto_ACompCor.py | 46 + .../tests/test_auto_ActivationCount.py | 26 + .../tests/test_auto_AddCSVColumn.py | 24 + .../algorithms/tests/test_auto_AddCSVRow.py | 22 + nipype/algorithms/tests/test_auto_AddNoise.py | 32 + .../tests/test_auto_ArtifactDetect.py | 61 + .../tests/test_auto_CalculateMedian.py | 23 + .../test_auto_CalculateNormalizedMoments.py | 22 + .../tests/test_auto_ComputeDVARS.py | 44 + .../tests/test_auto_ComputeMeshWarp.py | 30 + .../algorithms/tests/test_auto_CreateNifti.py | 23 + nipype/algorithms/tests/test_auto_Distance.py | 29 + .../tests/test_auto_FramewiseDisplacement.py | 34 + .../tests/test_auto_FuzzyOverlap.py | 30 + nipype/algorithms/tests/test_auto_Gunzip.py | 19 + nipype/algorithms/tests/test_auto_ICC.py | 26 + .../algorithms/tests/test_auto_Matlab2CSV.py | 22 + .../tests/test_auto_MergeCSVFiles.py | 27 + .../algorithms/tests/test_auto_MergeROIs.py | 23 + .../tests/test_auto_MeshWarpMaths.py | 32 + .../tests/test_auto_ModifyAffine.py | 22 + .../tests/test_auto_NonSteadyStateDetector.py | 19 + .../test_auto_NormalizeProbabilityMapSet.py | 22 + .../algorithms/tests/test_auto_P2PDistance.py | 30 + .../algorithms/tests/test_auto_PickAtlas.py | 25 + .../algorithms/tests/test_auto_Similarity.py | 25 + .../tests/test_auto_SimpleThreshold.py | 22 + .../tests/test_auto_SpecifyModel.py | 38 + .../tests/test_auto_SpecifySPMModel.py | 40 + .../tests/test_auto_SpecifySparseModel.py | 50 + .../algorithms/tests/test_auto_SplitROIs.py | 27 + .../tests/test_auto_StimulusCorrelation.py | 24 + nipype/algorithms/tests/test_auto_TCompCor.py | 48 + .../tests/test_auto_TVTKBaseInterface.py | 12 + .../algorithms/tests/test_auto_WarpPoints.py | 32 + nipype/algorithms/tests/test_confounds.py | 77 + nipype/algorithms/tests/test_icc_anova.py | 19 + nipype/algorithms/tests/test_mesh_ops.py | 85 + nipype/algorithms/tests/test_metrics.py | 58 + nipype/algorithms/tests/test_misc.py | 48 + nipype/algorithms/tests/test_modelgen.py | 263 + nipype/algorithms/tests/test_moments.py | 137 + .../algorithms/tests/test_normalize_tpms.py | 53 + nipype/algorithms/tests/test_rapidart.py | 95 + nipype/algorithms/tests/test_splitmerge.py | 32 + nipype/algorithms/tests/test_stats.py | 45 + nipype/caching/__init__.py | 2 + nipype/caching/memory.py | 300 + nipype/caching/tests/__init__.py | 1 + nipype/caching/tests/test_memory.py | 41 + nipype/conftest.py | 21 + nipype/external/__init__.py | 1 + nipype/external/cloghandler.py | 342 + nipype/external/d3.js | 9255 ++ nipype/external/due.py | 72 + nipype/external/fsl_imglob.py | 149 + nipype/external/portalocker.py | 145 + nipype/info.py | 183 + nipype/interfaces/__init__.py | 14 + nipype/interfaces/afni/__init__.py | 24 + nipype/interfaces/afni/base.py | 303 + nipype/interfaces/afni/model.py | 666 + nipype/interfaces/afni/preprocess.py | 3724 + nipype/interfaces/afni/svm.py | 168 + nipype/interfaces/afni/tests/__init__.py | 3 + .../afni/tests/test_auto_ABoverlap.py | 49 + .../afni/tests/test_auto_AFNICommand.py | 28 + .../afni/tests/test_auto_AFNICommandBase.py | 18 + .../afni/tests/test_auto_AFNIPythonCommand.py | 28 + .../afni/tests/test_auto_AFNItoNIFTI.py | 52 + .../afni/tests/test_auto_AlignEpiAnatPy.py | 68 + .../afni/tests/test_auto_Allineate.py | 122 + .../afni/tests/test_auto_AutoTLRC.py | 36 + .../afni/tests/test_auto_AutoTcorrelate.py | 52 + .../afni/tests/test_auto_Autobox.py | 50 + .../afni/tests/test_auto_Automask.py | 52 + .../afni/tests/test_auto_Axialize.py | 55 + .../afni/tests/test_auto_Bandpass.py | 68 + .../afni/tests/test_auto_BlurInMask.py | 55 + .../afni/tests/test_auto_BlurToFWHM.py | 44 + .../afni/tests/test_auto_BrickStat.py | 44 + .../interfaces/afni/tests/test_auto_Bucket.py | 39 + .../interfaces/afni/tests/test_auto_Calc.py | 58 + nipype/interfaces/afni/tests/test_auto_Cat.py | 75 + .../afni/tests/test_auto_CatMatvec.py | 55 + .../afni/tests/test_auto_CenterMass.py | 49 + .../afni/tests/test_auto_ClipLevel.py | 44 + .../afni/tests/test_auto_ConvertDset.py | 45 + .../interfaces/afni/tests/test_auto_Copy.py | 43 + .../afni/tests/test_auto_Deconvolve.py | 118 + .../afni/tests/test_auto_DegreeCentrality.py | 51 + .../afni/tests/test_auto_Despike.py | 41 + .../afni/tests/test_auto_Detrend.py | 41 + nipype/interfaces/afni/tests/test_auto_Dot.py | 50 + nipype/interfaces/afni/tests/test_auto_ECM.py | 54 + .../interfaces/afni/tests/test_auto_Edge3.py | 58 + .../interfaces/afni/tests/test_auto_Eval.py | 58 + .../interfaces/afni/tests/test_auto_FWHMx.py | 84 + nipype/interfaces/afni/tests/test_auto_Fim.py | 54 + .../afni/tests/test_auto_Fourier.py | 50 + .../interfaces/afni/tests/test_auto_GCOR.py | 37 + .../interfaces/afni/tests/test_auto_Hist.py | 56 + .../interfaces/afni/tests/test_auto_LFCD.py | 46 + .../afni/tests/test_auto_LocalBistat.py | 64 + .../afni/tests/test_auto_MaskTool.py | 57 + .../afni/tests/test_auto_Maskave.py | 51 + .../interfaces/afni/tests/test_auto_Means.py | 53 + .../interfaces/afni/tests/test_auto_Merge.py | 46 + .../interfaces/afni/tests/test_auto_Notes.py | 48 + .../afni/tests/test_auto_NwarpAdjust.py | 42 + .../afni/tests/test_auto_NwarpApply.py | 54 + .../afni/tests/test_auto_NwarpCat.py | 48 + .../afni/tests/test_auto_OneDToolPy.py | 49 + .../afni/tests/test_auto_OutlierCount.py | 77 + .../afni/tests/test_auto_QualityIndex.py | 64 + .../interfaces/afni/tests/test_auto_Qwarp.py | 168 + .../afni/tests/test_auto_QwarpPlusMinus.py | 45 + .../afni/tests/test_auto_ROIStats.py | 42 + .../interfaces/afni/tests/test_auto_Refit.py | 47 + .../afni/tests/test_auto_Remlfit.py | 109 + .../afni/tests/test_auto_Resample.py | 45 + .../afni/tests/test_auto_Retroicor.py | 68 + .../afni/tests/test_auto_SVMTest.py | 48 + .../afni/tests/test_auto_SVMTrain.py | 73 + nipype/interfaces/afni/tests/test_auto_Seg.py | 45 + .../afni/tests/test_auto_SkullStrip.py | 41 + .../afni/tests/test_auto_Synthesize.py | 51 + .../interfaces/afni/tests/test_auto_TCat.py | 46 + .../afni/tests/test_auto_TCatSubBrick.py | 44 + .../afni/tests/test_auto_TCorr1D.py | 67 + .../afni/tests/test_auto_TCorrMap.py | 143 + .../afni/tests/test_auto_TCorrelate.py | 49 + .../interfaces/afni/tests/test_auto_TNorm.py | 47 + .../afni/tests/test_auto_TProject.py | 60 + .../interfaces/afni/tests/test_auto_TShift.py | 65 + .../interfaces/afni/tests/test_auto_TStat.py | 43 + .../interfaces/afni/tests/test_auto_To3D.py | 45 + .../interfaces/afni/tests/test_auto_Undump.py | 48 + .../afni/tests/test_auto_Unifize.py | 58 + .../interfaces/afni/tests/test_auto_Volreg.py | 78 + .../interfaces/afni/tests/test_auto_Warp.py | 56 + .../interfaces/afni/tests/test_auto_ZCutUp.py | 42 + .../interfaces/afni/tests/test_auto_Zcat.py | 51 + .../afni/tests/test_auto_Zeropad.py | 88 + .../afni/tests/test_extra_Deconvolve.py | 11 + nipype/interfaces/afni/utils.py | 2985 + nipype/interfaces/ants/__init__.py | 26 + nipype/interfaces/ants/base.py | 125 + nipype/interfaces/ants/legacy.py | 325 + nipype/interfaces/ants/registration.py | 1598 + nipype/interfaces/ants/resampling.py | 571 + nipype/interfaces/ants/segmentation.py | 1642 + nipype/interfaces/ants/tests/__init__.py | 3 + .../interfaces/ants/tests/test_auto_ANTS.py | 97 + .../ants/tests/test_auto_ANTSCommand.py | 22 + .../ants/tests/test_auto_AffineInitializer.py | 69 + .../ants/tests/test_auto_AntsJointFusion.py | 98 + .../ants/tests/test_auto_ApplyTransforms.py | 64 + .../test_auto_ApplyTransformsToPoints.py | 45 + .../ants/tests/test_auto_Atropos.py | 75 + .../tests/test_auto_AverageAffineTransform.py | 44 + .../ants/tests/test_auto_AverageImages.py | 50 + .../ants/tests/test_auto_BrainExtraction.py | 78 + .../tests/test_auto_ComposeMultiTransform.py | 50 + .../test_auto_ConvertScalarImageToRGB.py | 81 + .../ants/tests/test_auto_CorticalThickness.py | 88 + ...est_auto_CreateJacobianDeterminantImage.py | 52 + .../ants/tests/test_auto_CreateTiledMosaic.py | 49 + .../ants/tests/test_auto_DenoiseImage.py | 64 + .../ants/tests/test_auto_GenWarpFields.py | 67 + .../ants/tests/test_auto_JointFusion.py | 84 + .../ants/tests/test_auto_KellyKapowski.py | 90 + .../ants/tests/test_auto_LabelGeometry.py | 51 + .../tests/test_auto_LaplacianThickness.py | 67 + .../tests/test_auto_MeasureImageSimilarity.py | 57 + .../ants/tests/test_auto_MultiplyImages.py | 49 + .../tests/test_auto_N4BiasFieldCorrection.py | 62 + .../ants/tests/test_auto_Registration.py | 160 + .../tests/test_auto_RegistrationSynQuick.py | 68 + .../test_auto_WarpImageMultiTransform.py | 68 + ..._auto_WarpTimeSeriesImageMultiTransform.py | 60 + .../ants/tests/test_auto_antsIntroduction.py | 67 + .../tests/test_auto_buildtemplateparallel.py | 72 + .../ants/tests/test_extra_Registration.py | 22 + .../interfaces/ants/tests/test_resampling.py | 90 + .../ants/tests/test_spec_JointFusion.py | 87 + nipype/interfaces/ants/utils.py | 416 + nipype/interfaces/ants/visualization.py | 193 + nipype/interfaces/base/__init__.py | 26 + nipype/interfaces/base/core.py | 1357 + nipype/interfaces/base/specs.py | 374 + nipype/interfaces/base/support.py | 303 + nipype/interfaces/base/tests/__init__.py | 1 + .../base/tests/test_auto_BaseInterface.py | 12 + .../base/tests/test_auto_CommandLine.py | 18 + .../tests/test_auto_LibraryBaseInterface.py | 12 + .../base/tests/test_auto_MpiCommandLine.py | 20 + .../tests/test_auto_SEMLikeCommandLine.py | 18 + .../base/tests/test_auto_SimpleInterface.py | 12 + .../base/tests/test_auto_StdOutCommandLine.py | 23 + nipype/interfaces/base/tests/test_core.py | 519 + .../base/tests/test_resource_monitor.py | 92 + nipype/interfaces/base/tests/test_specs.py | 387 + nipype/interfaces/base/tests/test_support.py | 61 + nipype/interfaces/base/traits_extension.py | 429 + nipype/interfaces/brainsuite/__init__.py | 4 + nipype/interfaces/brainsuite/brainsuite.py | 1763 + .../interfaces/brainsuite/tests/__init__.py | 1 + .../brainsuite/tests/test_auto_BDP.py | 99 + .../brainsuite/tests/test_auto_Bfc.py | 75 + .../brainsuite/tests/test_auto_Bse.py | 92 + .../brainsuite/tests/test_auto_Cerebro.py | 71 + .../brainsuite/tests/test_auto_Cortex.py | 52 + .../brainsuite/tests/test_auto_Dewisp.py | 37 + .../brainsuite/tests/test_auto_Dfs.py | 63 + .../brainsuite/tests/test_auto_Hemisplit.py | 57 + .../brainsuite/tests/test_auto_Pialmesh.py | 78 + .../brainsuite/tests/test_auto_Pvc.py | 45 + .../brainsuite/tests/test_auto_SVReg.py | 55 + .../brainsuite/tests/test_auto_Scrubmask.py | 44 + .../brainsuite/tests/test_auto_Skullfinder.py | 47 + .../brainsuite/tests/test_auto_Tca.py | 44 + .../tests/test_auto_ThicknessPVC.py | 22 + nipype/interfaces/bru2nii.py | 68 + nipype/interfaces/c3.py | 202 + nipype/interfaces/camino/__init__.py | 18 + nipype/interfaces/camino/calib.py | 296 + nipype/interfaces/camino/connectivity.py | 178 + nipype/interfaces/camino/convert.py | 974 + nipype/interfaces/camino/dti.py | 1516 + nipype/interfaces/camino/odf.py | 544 + nipype/interfaces/camino/tests/__init__.py | 3 + .../camino/tests/test_auto_AnalyzeHeader.py | 106 + .../tests/test_auto_ComputeEigensystem.py | 45 + .../test_auto_ComputeFractionalAnisotropy.py | 42 + .../tests/test_auto_ComputeMeanDiffusivity.py | 42 + .../tests/test_auto_ComputeTensorTrace.py | 42 + .../camino/tests/test_auto_Conmat.py | 56 + .../camino/tests/test_auto_DT2NIfTI.py | 44 + .../camino/tests/test_auto_DTIFit.py | 45 + .../camino/tests/test_auto_DTLUTGen.py | 68 + .../camino/tests/test_auto_DTMetric.py | 46 + .../camino/tests/test_auto_FSL2Scheme.py | 57 + .../camino/tests/test_auto_Image2Voxel.py | 40 + .../camino/tests/test_auto_ImageStats.py | 43 + .../camino/tests/test_auto_LinRecon.py | 48 + .../interfaces/camino/tests/test_auto_MESD.py | 60 + .../camino/tests/test_auto_ModelFit.py | 54 + .../camino/tests/test_auto_NIfTIDT2Camino.py | 41 + .../camino/tests/test_auto_PicoPDFs.py | 58 + .../camino/tests/test_auto_ProcStreamlines.py | 119 + .../camino/tests/test_auto_QBallMX.py | 54 + .../camino/tests/test_auto_SFLUTGen.py | 62 + .../camino/tests/test_auto_SFPICOCalibData.py | 87 + .../camino/tests/test_auto_SFPeaks.py | 76 + .../camino/tests/test_auto_Shredder.py | 50 + .../camino/tests/test_auto_Track.py | 82 + .../camino/tests/test_auto_TrackBallStick.py | 82 + .../camino/tests/test_auto_TrackBayesDirac.py | 97 + .../tests/test_auto_TrackBedpostxDeter.py | 90 + .../tests/test_auto_TrackBedpostxProba.py | 94 + .../camino/tests/test_auto_TrackBootstrap.py | 96 + .../camino/tests/test_auto_TrackDT.py | 82 + .../camino/tests/test_auto_TrackPICo.py | 87 + .../camino/tests/test_auto_TractShredder.py | 50 + .../camino/tests/test_auto_VtkStreamlines.py | 59 + nipype/interfaces/camino/utils.py | 85 + nipype/interfaces/camino2trackvis/__init__.py | 7 + nipype/interfaces/camino2trackvis/convert.py | 181 + .../camino2trackvis/tests/__init__.py | 1 + .../tests/test_auto_Camino2Trackvis.py | 61 + .../tests/test_auto_Trackvis2Camino.py | 39 + nipype/interfaces/cmtk/__init__.py | 6 + nipype/interfaces/cmtk/base.py | 33 + nipype/interfaces/cmtk/cmtk.py | 1008 + nipype/interfaces/cmtk/convert.py | 280 + nipype/interfaces/cmtk/nbs.py | 182 + nipype/interfaces/cmtk/nx.py | 656 + nipype/interfaces/cmtk/parcellation.py | 616 + nipype/interfaces/cmtk/tests/__init__.py | 1 + .../cmtk/tests/test_auto_AverageNetworks.py | 29 + .../cmtk/tests/test_auto_CFFBaseInterface.py | 12 + .../cmtk/tests/test_auto_CFFConverter.py | 40 + .../cmtk/tests/test_auto_CreateMatrix.py | 50 + .../cmtk/tests/test_auto_CreateNodes.py | 23 + .../cmtk/tests/test_auto_MergeCNetworks.py | 22 + .../tests/test_auto_NetworkBasedStatistic.py | 33 + .../cmtk/tests/test_auto_NetworkXMetrics.py | 44 + .../cmtk/tests/test_auto_Parcellate.py | 35 + .../interfaces/cmtk/tests/test_auto_ROIGen.py | 29 + nipype/interfaces/cmtk/tests/test_nbs.py | 60 + nipype/interfaces/dcm2nii.py | 460 + nipype/interfaces/dcmstack.py | 418 + .../interfaces/diffusion_toolkit/__init__.py | 5 + nipype/interfaces/diffusion_toolkit/base.py | 60 + nipype/interfaces/diffusion_toolkit/dti.py | 276 + nipype/interfaces/diffusion_toolkit/odf.py | 387 + .../interfaces/diffusion_toolkit/postproc.py | 117 + .../diffusion_toolkit/tests/__init__.py | 1 + .../tests/test_auto_DTIRecon.py | 61 + .../tests/test_auto_DTITracker.py | 64 + .../tests/test_auto_HARDIMat.py | 42 + .../tests/test_auto_ODFRecon.py | 70 + .../tests/test_auto_ODFTracker.py | 68 + .../tests/test_auto_SplineFilter.py | 40 + .../tests/test_auto_TrackMerge.py | 35 + nipype/interfaces/dipy/__init__.py | 7 + nipype/interfaces/dipy/anisotropic_power.py | 74 + nipype/interfaces/dipy/base.py | 77 + nipype/interfaces/dipy/preprocess.py | 318 + nipype/interfaces/dipy/reconstruction.py | 373 + nipype/interfaces/dipy/setup.py | 19 + nipype/interfaces/dipy/simulate.py | 352 + nipype/interfaces/dipy/tensors.py | 148 + nipype/interfaces/dipy/tests/__init__.py | 1 + .../dipy/tests/test_auto_APMQball.py | 26 + nipype/interfaces/dipy/tests/test_auto_CSD.py | 33 + nipype/interfaces/dipy/tests/test_auto_DTI.py | 32 + .../dipy/tests/test_auto_Denoise.py | 31 + .../dipy/tests/test_auto_DipyBaseInterface.py | 12 + .../tests/test_auto_DipyDiffusionInterface.py | 18 + .../tests/test_auto_EstimateResponseSH.py | 36 + .../dipy/tests/test_auto_RESTORE.py | 35 + .../dipy/tests/test_auto_Resample.py | 26 + .../tests/test_auto_SimulateMultiTensor.py | 43 + .../tests/test_auto_StreamlineTractography.py | 56 + .../dipy/tests/test_auto_TensorMode.py | 26 + .../dipy/tests/test_auto_TrackDensityMap.py | 26 + nipype/interfaces/dipy/tracks.py | 324 + nipype/interfaces/dtitk/__init__.py | 13 + nipype/interfaces/dtitk/base.py | 103 + nipype/interfaces/dtitk/registration.py | 489 + nipype/interfaces/dtitk/tests/__init__.py | 3 + .../dtitk/tests/test_auto_AffScalarVol.py | 59 + .../tests/test_auto_AffSymTensor3DVol.py | 63 + .../dtitk/tests/test_auto_Affine.py | 63 + .../dtitk/tests/test_auto_AffineTask.py | 63 + .../dtitk/tests/test_auto_BinThresh.py | 61 + .../dtitk/tests/test_auto_BinThreshTask.py | 61 + .../dtitk/tests/test_auto_CommandLineDtitk.py | 18 + .../dtitk/tests/test_auto_ComposeXfm.py | 37 + .../dtitk/tests/test_auto_ComposeXfmTask.py | 37 + .../dtitk/tests/test_auto_Diffeo.py | 59 + .../dtitk/tests/test_auto_DiffeoScalarVol.py | 53 + .../tests/test_auto_DiffeoSymTensor3DVol.py | 61 + .../dtitk/tests/test_auto_DiffeoTask.py | 59 + .../interfaces/dtitk/tests/test_auto_Rigid.py | 63 + .../dtitk/tests/test_auto_RigidTask.py | 63 + .../dtitk/tests/test_auto_SVAdjustVoxSp.py | 47 + .../tests/test_auto_SVAdjustVoxSpTask.py | 47 + .../dtitk/tests/test_auto_SVResample.py | 52 + .../dtitk/tests/test_auto_SVResampleTask.py | 52 + .../tests/test_auto_TVAdjustOriginTask.py | 47 + .../dtitk/tests/test_auto_TVAdjustVoxSp.py | 47 + .../tests/test_auto_TVAdjustVoxSpTask.py | 47 + .../dtitk/tests/test_auto_TVResample.py | 53 + .../dtitk/tests/test_auto_TVResampleTask.py | 53 + .../dtitk/tests/test_auto_TVtool.py | 34 + .../dtitk/tests/test_auto_TVtoolTask.py | 34 + .../dtitk/tests/test_auto_affScalarVolTask.py | 59 + .../tests/test_auto_affSymTensor3DVolTask.py | 63 + .../tests/test_auto_diffeoScalarVolTask.py | 53 + .../test_auto_diffeoSymTensor3DVolTask.py | 61 + nipype/interfaces/dtitk/utils.py | 335 + nipype/interfaces/dynamic_slicer.py | 225 + nipype/interfaces/elastix/__init__.py | 10 + nipype/interfaces/elastix/base.py | 32 + nipype/interfaces/elastix/registration.py | 271 + nipype/interfaces/elastix/tests/__init__.py | 1 + .../elastix/tests/test_auto_AnalyzeWarp.py | 43 + .../elastix/tests/test_auto_ApplyWarp.py | 43 + .../elastix/tests/test_auto_EditTransform.py | 29 + .../elastix/tests/test_auto_PointsWarp.py | 43 + .../elastix/tests/test_auto_Registration.py | 55 + nipype/interfaces/elastix/utils.py | 173 + nipype/interfaces/freesurfer/__init__.py | 29 + nipype/interfaces/freesurfer/base.py | 286 + nipype/interfaces/freesurfer/longitudinal.py | 246 + nipype/interfaces/freesurfer/model.py | 1646 + nipype/interfaces/freesurfer/preprocess.py | 3043 + nipype/interfaces/freesurfer/registration.py | 607 + .../interfaces/freesurfer/tests/__init__.py | 1 + .../freesurfer/tests/test_BBRegister.py | 139 + .../freesurfer/tests/test_FSSurfaceCommand.py | 43 + .../tests/test_auto_AddXFormToHeader.py | 43 + .../freesurfer/tests/test_auto_Aparc2Aseg.py | 53 + .../freesurfer/tests/test_auto_Apas2Aseg.py | 34 + .../freesurfer/tests/test_auto_ApplyMask.py | 52 + .../tests/test_auto_ApplyVolTransform.py | 112 + .../freesurfer/tests/test_auto_Binarize.py | 71 + .../freesurfer/tests/test_auto_CALabel.py | 55 + .../freesurfer/tests/test_auto_CANormalize.py | 55 + .../freesurfer/tests/test_auto_CARegister.py | 52 + .../test_auto_CheckTalairachAlignment.py | 42 + .../freesurfer/tests/test_auto_Concatenate.py | 49 + .../tests/test_auto_ConcatenateLTA.py | 59 + .../freesurfer/tests/test_auto_Contrast.py | 46 + .../freesurfer/tests/test_auto_Curvature.py | 41 + .../tests/test_auto_CurvatureStats.py | 58 + .../tests/test_auto_DICOMConvert.py | 28 + .../freesurfer/tests/test_auto_EMRegister.py | 49 + .../tests/test_auto_EditWMwithAseg.py | 47 + .../freesurfer/tests/test_auto_EulerNumber.py | 31 + .../tests/test_auto_ExtractMainComponent.py | 36 + .../freesurfer/tests/test_auto_FSCommand.py | 19 + .../tests/test_auto_FSCommandOpenMP.py | 20 + .../tests/test_auto_FSScriptCommand.py | 19 + .../freesurfer/tests/test_auto_FitMSParams.py | 44 + .../freesurfer/tests/test_auto_FixTopology.py | 46 + .../tests/test_auto_FuseSegmentations.py | 51 + .../freesurfer/tests/test_auto_GLMFit.py | 144 + .../freesurfer/tests/test_auto_ImageInfo.py | 42 + .../freesurfer/tests/test_auto_Jacobian.py | 44 + .../freesurfer/tests/test_auto_LTAConvert.py | 80 + .../freesurfer/tests/test_auto_Label2Annot.py | 48 + .../freesurfer/tests/test_auto_Label2Label.py | 59 + .../freesurfer/tests/test_auto_Label2Vol.py | 83 + .../tests/test_auto_MNIBiasCorrection.py | 48 + .../freesurfer/tests/test_auto_MPRtoMNI305.py | 41 + .../freesurfer/tests/test_auto_MRIConvert.py | 117 + .../freesurfer/tests/test_auto_MRICoreg.py | 86 + .../freesurfer/tests/test_auto_MRIFill.py | 42 + .../tests/test_auto_MRIMarchingCubes.py | 46 + .../freesurfer/tests/test_auto_MRIPretess.py | 52 + .../freesurfer/tests/test_auto_MRISPreproc.py | 83 + .../tests/test_auto_MRISPreprocReconAll.py | 95 + .../tests/test_auto_MRITessellate.py | 43 + .../freesurfer/tests/test_auto_MRIsCALabel.py | 63 + .../freesurfer/tests/test_auto_MRIsCalc.py | 55 + .../freesurfer/tests/test_auto_MRIsCombine.py | 37 + .../freesurfer/tests/test_auto_MRIsConvert.py | 59 + .../freesurfer/tests/test_auto_MRIsExpand.py | 60 + .../freesurfer/tests/test_auto_MRIsInflate.py | 48 + .../freesurfer/tests/test_auto_MS_LDA.py | 53 + .../tests/test_auto_MakeAverageSubject.py | 35 + .../tests/test_auto_MakeSurfaces.py | 71 + .../freesurfer/tests/test_auto_Normalize.py | 43 + .../tests/test_auto_OneSampleTTest.py | 144 + .../freesurfer/tests/test_auto_Paint.py | 46 + .../tests/test_auto_ParcellationStats.py | 81 + .../tests/test_auto_ParseDICOMDir.py | 36 + .../freesurfer/tests/test_auto_ReconAll.py | 190 + .../freesurfer/tests/test_auto_Register.py | 51 + .../tests/test_auto_RegisterAVItoTalairach.py | 49 + .../tests/test_auto_RelabelHypointensities.py | 52 + .../tests/test_auto_RemoveIntersection.py | 40 + .../freesurfer/tests/test_auto_RemoveNeck.py | 50 + .../freesurfer/tests/test_auto_Resample.py | 40 + .../tests/test_auto_RobustRegister.py | 81 + .../tests/test_auto_RobustTemplate.py | 60 + .../tests/test_auto_SampleToSurface.py | 116 + .../freesurfer/tests/test_auto_SegStats.py | 99 + .../tests/test_auto_SegStatsReconAll.py | 115 + .../freesurfer/tests/test_auto_SegmentCC.py | 52 + .../freesurfer/tests/test_auto_SegmentWM.py | 36 + .../freesurfer/tests/test_auto_Smooth.py | 58 + .../tests/test_auto_SmoothTessellation.py | 49 + .../freesurfer/tests/test_auto_Sphere.py | 43 + .../tests/test_auto_SphericalAverage.py | 63 + .../tests/test_auto_Surface2VolTransform.py | 67 + .../tests/test_auto_SurfaceSmooth.py | 55 + .../tests/test_auto_SurfaceSnapshots.py | 102 + .../tests/test_auto_SurfaceTransform.py | 60 + .../tests/test_auto_SynthesizeFLASH.py | 59 + .../tests/test_auto_TalairachAVI.py | 39 + .../freesurfer/tests/test_auto_TalairachQC.py | 31 + .../freesurfer/tests/test_auto_Tkregister2.py | 68 + .../tests/test_auto_UnpackSDICOMDir.py | 45 + .../freesurfer/tests/test_auto_VolumeMask.py | 63 + .../tests/test_auto_WatershedSkullStrip.py | 46 + .../interfaces/freesurfer/tests/test_model.py | 57 + .../freesurfer/tests/test_preprocess.py | 191 + .../interfaces/freesurfer/tests/test_utils.py | 227 + nipype/interfaces/freesurfer/utils.py | 3970 + nipype/interfaces/fsl/__init__.py | 36 + nipype/interfaces/fsl/aroma.py | 146 + nipype/interfaces/fsl/base.py | 282 + nipype/interfaces/fsl/dti.py | 1464 + nipype/interfaces/fsl/epi.py | 1224 + nipype/interfaces/fsl/fix.py | 370 + nipype/interfaces/fsl/maths.py | 621 + nipype/interfaces/fsl/model.py | 2432 + .../model_templates/feat_contrast_element.tcl | 2 + .../feat_contrast_ftest_element.tcl | 2 + .../model_templates/feat_contrast_header.tcl | 5 + .../model_templates/feat_contrast_prolog.tcl | 5 + .../feat_contrastmask_element.tcl | 3 + .../feat_contrastmask_footer.tcl | 2 + .../feat_contrastmask_header.tcl | 2 + .../fsl/model_templates/feat_contrasts.tcl | 688 + .../fsl/model_templates/feat_ev_custom.tcl | 40 + .../fsl/model_templates/feat_ev_gamma.tcl | 40 + .../fsl/model_templates/feat_ev_hrf.tcl | 34 + .../fsl/model_templates/feat_ev_none.tcl | 31 + .../fsl/model_templates/feat_ev_ortho.tcl | 2 + .../fsl/model_templates/feat_fe_copes.tcl | 3 + .../model_templates/feat_fe_ev_element.tcl | 5 + .../fsl/model_templates/feat_fe_ev_header.tcl | 45 + .../fsl/model_templates/feat_fe_featdirs.tcl | 3 + .../fsl/model_templates/feat_fe_footer.tcl | 41 + .../fsl/model_templates/feat_fe_header.tcl | 269 + .../fsl/model_templates/feat_header.tcl | 271 + .../fsl/model_templates/feat_header_l1.tcl | 271 + .../fsl/model_templates/feat_nongui.tcl | 20 + .../fsl/model_templates/featreg_header.tcl | 269 + nipype/interfaces/fsl/possum.py | 117 + nipype/interfaces/fsl/preprocess.py | 2056 + nipype/interfaces/fsl/tests/__init__.py | 1 + nipype/interfaces/fsl/tests/test_FILMGLS.py | 129 + .../fsl/tests/test_Level1Design_functions.py | 32 + .../fsl/tests/test_auto_AR1Image.py | 54 + .../fsl/tests/test_auto_AccuracyTester.py | 45 + .../fsl/tests/test_auto_ApplyMask.py | 54 + .../fsl/tests/test_auto_ApplyTOPUP.py | 56 + .../fsl/tests/test_auto_ApplyWarp.py | 62 + .../fsl/tests/test_auto_ApplyXFM.py | 160 + .../interfaces/fsl/tests/test_auto_AvScale.py | 45 + .../interfaces/fsl/tests/test_auto_B0Calc.py | 89 + .../fsl/tests/test_auto_BEDPOSTX5.py | 112 + nipype/interfaces/fsl/tests/test_auto_BET.py | 100 + .../fsl/tests/test_auto_BinaryMaths.py | 66 + .../fsl/tests/test_auto_ChangeDataType.py | 50 + .../fsl/tests/test_auto_Classifier.py | 42 + .../interfaces/fsl/tests/test_auto_Cleaner.py | 55 + .../interfaces/fsl/tests/test_auto_Cluster.py | 108 + .../interfaces/fsl/tests/test_auto_Complex.py | 163 + .../fsl/tests/test_auto_ContrastMgr.py | 62 + .../fsl/tests/test_auto_ConvertWarp.py | 67 + .../fsl/tests/test_auto_ConvertXFM.py | 58 + .../fsl/tests/test_auto_CopyGeom.py | 44 + .../interfaces/fsl/tests/test_auto_DTIFit.py | 74 + .../fsl/tests/test_auto_DilateImage.py | 68 + .../fsl/tests/test_auto_DistanceMap.py | 44 + .../fsl/tests/test_auto_DualRegression.py | 65 + .../fsl/tests/test_auto_EPIDeWarp.py | 75 + nipype/interfaces/fsl/tests/test_auto_Eddy.py | 97 + .../fsl/tests/test_auto_EddyCorrect.py | 44 + .../interfaces/fsl/tests/test_auto_EpiReg.py | 72 + .../fsl/tests/test_auto_ErodeImage.py | 68 + .../fsl/tests/test_auto_ExtractROI.py | 77 + nipype/interfaces/fsl/tests/test_auto_FAST.py | 65 + nipype/interfaces/fsl/tests/test_auto_FEAT.py | 31 + .../fsl/tests/test_auto_FEATModel.py | 44 + .../fsl/tests/test_auto_FEATRegister.py | 23 + .../interfaces/fsl/tests/test_auto_FIRST.py | 75 + .../interfaces/fsl/tests/test_auto_FLAMEO.py | 78 + .../interfaces/fsl/tests/test_auto_FLIRT.py | 157 + .../interfaces/fsl/tests/test_auto_FNIRT.py | 140 + .../fsl/tests/test_auto_FSLCommand.py | 19 + .../fsl/tests/test_auto_FSLXCommand.py | 116 + .../interfaces/fsl/tests/test_auto_FUGUE.py | 83 + .../fsl/tests/test_auto_FeatureExtractor.py | 35 + .../fsl/tests/test_auto_FilterRegressor.py | 57 + .../fsl/tests/test_auto_FindTheBiggest.py | 37 + nipype/interfaces/fsl/tests/test_auto_GLM.py | 74 + .../fsl/tests/test_auto_ICA_AROMA.py | 71 + .../fsl/tests/test_auto_ImageMaths.py | 51 + .../fsl/tests/test_auto_ImageMeants.py | 47 + .../fsl/tests/test_auto_ImageStats.py | 41 + .../interfaces/fsl/tests/test_auto_InvWarp.py | 53 + .../fsl/tests/test_auto_IsotropicSmooth.py | 61 + .../interfaces/fsl/tests/test_auto_L2Model.py | 23 + .../fsl/tests/test_auto_Level1Design.py | 29 + .../interfaces/fsl/tests/test_auto_MCFLIRT.py | 62 + .../interfaces/fsl/tests/test_auto_MELODIC.py | 85 + .../fsl/tests/test_auto_MakeDyadicVectors.py | 53 + .../fsl/tests/test_auto_MathsCommand.py | 49 + .../fsl/tests/test_auto_MaxImage.py | 54 + .../fsl/tests/test_auto_MaxnImage.py | 54 + .../fsl/tests/test_auto_MeanImage.py | 54 + .../fsl/tests/test_auto_MedianImage.py | 54 + .../interfaces/fsl/tests/test_auto_Merge.py | 47 + .../fsl/tests/test_auto_MinImage.py | 54 + .../fsl/tests/test_auto_MotionOutliers.py | 60 + .../fsl/tests/test_auto_MultiImageMaths.py | 55 + .../tests/test_auto_MultipleRegressDesign.py | 28 + .../interfaces/fsl/tests/test_auto_Overlay.py | 93 + .../interfaces/fsl/tests/test_auto_PRELUDE.py | 73 + .../fsl/tests/test_auto_PercentileImage.py | 58 + .../fsl/tests/test_auto_PlotMotionParams.py | 42 + .../fsl/tests/test_auto_PlotTimeSeries.py | 70 + .../fsl/tests/test_auto_PowerSpectrum.py | 37 + .../fsl/tests/test_auto_PrepareFieldmap.py | 56 + .../fsl/tests/test_auto_ProbTrackX.py | 92 + .../fsl/tests/test_auto_ProbTrackX2.py | 114 + .../fsl/tests/test_auto_ProjThresh.py | 36 + .../fsl/tests/test_auto_Randomise.py | 74 + .../fsl/tests/test_auto_Reorient2Std.py | 35 + .../fsl/tests/test_auto_RobustFOV.py | 47 + nipype/interfaces/fsl/tests/test_auto_SMM.py | 46 + .../interfaces/fsl/tests/test_auto_SUSAN.py | 62 + .../interfaces/fsl/tests/test_auto_SigLoss.py | 37 + .../interfaces/fsl/tests/test_auto_Slice.py | 36 + .../fsl/tests/test_auto_SliceTimer.py | 43 + .../interfaces/fsl/tests/test_auto_Slicer.py | 106 + .../interfaces/fsl/tests/test_auto_Smooth.py | 50 + .../fsl/tests/test_auto_SmoothEstimate.py | 47 + .../fsl/tests/test_auto_SpatialFilter.py | 68 + .../interfaces/fsl/tests/test_auto_Split.py | 40 + .../fsl/tests/test_auto_StdImage.py | 54 + .../fsl/tests/test_auto_SwapDimensions.py | 40 + .../interfaces/fsl/tests/test_auto_TOPUP.py | 132 + .../fsl/tests/test_auto_TemporalFilter.py | 59 + .../fsl/tests/test_auto_Threshold.py | 57 + .../fsl/tests/test_auto_TractSkeleton.py | 49 + .../fsl/tests/test_auto_Training.py | 38 + .../fsl/tests/test_auto_TrainingSetCreator.py | 29 + .../fsl/tests/test_auto_UnaryMaths.py | 54 + .../interfaces/fsl/tests/test_auto_VecReg.py | 46 + .../fsl/tests/test_auto_WarpPoints.py | 59 + .../fsl/tests/test_auto_WarpPointsFromStd.py | 54 + .../fsl/tests/test_auto_WarpPointsToStd.py | 60 + .../fsl/tests/test_auto_WarpUtils.py | 52 + .../fsl/tests/test_auto_XFibres5.py | 117 + nipype/interfaces/fsl/tests/test_base.py | 103 + nipype/interfaces/fsl/tests/test_dti.py | 424 + nipype/interfaces/fsl/tests/test_epi.py | 38 + nipype/interfaces/fsl/tests/test_maths.py | 470 + nipype/interfaces/fsl/tests/test_model.py | 68 + .../interfaces/fsl/tests/test_preprocess.py | 630 + nipype/interfaces/fsl/tests/test_utils.py | 327 + nipype/interfaces/fsl/utils.py | 2705 + nipype/interfaces/image.py | 234 + nipype/interfaces/io.py | 2864 + nipype/interfaces/matlab.py | 224 + nipype/interfaces/meshfix.py | 216 + nipype/interfaces/minc/__init__.py | 43 + nipype/interfaces/minc/base.py | 139 + nipype/interfaces/minc/minc.py | 3685 + nipype/interfaces/minc/testdata.py | 18 + nipype/interfaces/minc/tests/__init__.py | 1 + .../minc/tests/test_auto_Average.py | 155 + .../interfaces/minc/tests/test_auto_BBox.py | 54 + .../interfaces/minc/tests/test_auto_Beast.py | 86 + .../minc/tests/test_auto_BestLinReg.py | 61 + .../minc/tests/test_auto_BigAverage.py | 56 + .../interfaces/minc/tests/test_auto_Blob.py | 42 + .../interfaces/minc/tests/test_auto_Blur.py | 72 + .../interfaces/minc/tests/test_auto_Calc.py | 156 + .../minc/tests/test_auto_Convert.py | 46 + .../interfaces/minc/tests/test_auto_Copy.py | 46 + .../interfaces/minc/tests/test_auto_Dump.py | 65 + .../minc/tests/test_auto_Extract.py | 163 + .../minc/tests/test_auto_Gennlxfm.py | 44 + .../interfaces/minc/tests/test_auto_Math.py | 169 + .../interfaces/minc/tests/test_auto_NlpFit.py | 61 + .../interfaces/minc/tests/test_auto_Norm.py | 65 + nipype/interfaces/minc/tests/test_auto_Pik.py | 92 + .../minc/tests/test_auto_Resample.py | 251 + .../minc/tests/test_auto_Reshape.py | 44 + .../interfaces/minc/tests/test_auto_ToEcat.py | 50 + .../interfaces/minc/tests/test_auto_ToRaw.py | 89 + .../minc/tests/test_auto_VolSymm.py | 64 + .../minc/tests/test_auto_Volcentre.py | 46 + .../interfaces/minc/tests/test_auto_Voliso.py | 46 + .../interfaces/minc/tests/test_auto_Volpad.py | 48 + .../interfaces/minc/tests/test_auto_XfmAvg.py | 49 + .../minc/tests/test_auto_XfmConcat.py | 48 + .../minc/tests/test_auto_XfmInvert.py | 43 + nipype/interfaces/mipav/__init__.py | 12 + nipype/interfaces/mipav/developer.py | 1616 + nipype/interfaces/mipav/generate_classes.py | 55 + nipype/interfaces/mipav/tests/__init__.py | 1 + .../test_auto_JistBrainMgdmSegmentation.py | 69 + ...est_auto_JistBrainMp2rageDuraEstimation.py | 40 + ...est_auto_JistBrainMp2rageSkullStripping.py | 58 + .../test_auto_JistBrainPartialVolumeFilter.py | 39 + ...est_auto_JistCortexSurfaceMeshInflation.py | 50 + .../test_auto_JistIntensityMp2rageMasking.py | 59 + .../test_auto_JistLaminarProfileCalculator.py | 39 + .../test_auto_JistLaminarProfileGeometry.py | 41 + .../test_auto_JistLaminarProfileSampling.py | 46 + .../test_auto_JistLaminarROIAveraging.py | 40 + ...test_auto_JistLaminarVolumetricLayering.py | 59 + ...test_auto_MedicAlgorithmImageCalculator.py | 39 + .../test_auto_MedicAlgorithmLesionToads.py | 99 + .../test_auto_MedicAlgorithmMipavReorient.py | 48 + .../mipav/tests/test_auto_MedicAlgorithmN3.py | 52 + .../test_auto_MedicAlgorithmSPECTRE2010.py | 112 + ...uto_MedicAlgorithmThresholdToBinaryMask.py | 43 + .../mipav/tests/test_auto_RandomVol.py | 45 + nipype/interfaces/mixins/__init__.py | 2 + nipype/interfaces/mixins/reporting.py | 65 + nipype/interfaces/mixins/tests/__init__.py | 1 + .../tests/test_auto_ReportCapableInterface.py | 12 + nipype/interfaces/mne/__init__.py | 2 + nipype/interfaces/mne/base.py | 136 + nipype/interfaces/mne/tests/__init__.py | 1 + .../mne/tests/test_auto_WatershedBEM.py | 56 + nipype/interfaces/mrtrix/__init__.py | 15 + nipype/interfaces/mrtrix/convert.py | 265 + nipype/interfaces/mrtrix/defhdr.mat | Bin 0 -> 533 bytes nipype/interfaces/mrtrix/preprocess.py | 922 + nipype/interfaces/mrtrix/tensors.py | 612 + nipype/interfaces/mrtrix/tests/__init__.py | 1 + ..._auto_ConstrainedSphericalDeconvolution.py | 65 + .../test_auto_DWI2SphericalHarmonicsImage.py | 45 + .../mrtrix/tests/test_auto_DWI2Tensor.py | 59 + ...est_auto_DiffusionTensorStreamlineTrack.py | 125 + .../tests/test_auto_Directions2Amplitude.py | 47 + .../mrtrix/tests/test_auto_Erode.py | 48 + .../tests/test_auto_EstimateResponseForSH.py | 49 + .../mrtrix/tests/test_auto_FSL2MRTrix.py | 26 + .../mrtrix/tests/test_auto_FilterTracks.py | 73 + .../mrtrix/tests/test_auto_FindShPeaks.py | 53 + .../tests/test_auto_GenerateDirections.py | 42 + .../test_auto_GenerateWhiteMatterMask.py | 46 + .../mrtrix/tests/test_auto_MRConvert.py | 79 + .../mrtrix/tests/test_auto_MRMultiply.py | 43 + .../mrtrix/tests/test_auto_MRTransform.py | 67 + .../mrtrix/tests/test_auto_MRTrix2TrackVis.py | 28 + .../mrtrix/tests/test_auto_MRTrixInfo.py | 30 + .../mrtrix/tests/test_auto_MRTrixViewer.py | 38 + .../mrtrix/tests/test_auto_MedianFilter3D.py | 43 + ...cSphericallyDeconvolutedStreamlineTrack.py | 121 + ..._SphericallyDeconvolutedStreamlineTrack.py | 120 + .../mrtrix/tests/test_auto_StreamlineTrack.py | 120 + .../test_auto_Tensor2ApparentDiffusion.py | 43 + .../test_auto_Tensor2FractionalAnisotropy.py | 43 + .../mrtrix/tests/test_auto_Tensor2Vector.py | 43 + .../mrtrix/tests/test_auto_Threshold.py | 53 + .../mrtrix/tests/test_auto_Tracks2Prob.py | 61 + nipype/interfaces/mrtrix/tracking.py | 504 + nipype/interfaces/mrtrix3/__init__.py | 12 + nipype/interfaces/mrtrix3/base.py | 80 + nipype/interfaces/mrtrix3/connectivity.py | 309 + nipype/interfaces/mrtrix3/preprocess.py | 264 + nipype/interfaces/mrtrix3/reconst.py | 167 + nipype/interfaces/mrtrix3/tests/__init__.py | 1 + .../mrtrix3/tests/test_auto_ACTPrepareFSL.py | 36 + .../mrtrix3/tests/test_auto_BrainMask.py | 45 + .../tests/test_auto_BuildConnectome.py | 53 + .../mrtrix3/tests/test_auto_ComputeTDI.py | 58 + .../mrtrix3/tests/test_auto_DWIDenoise.py | 52 + .../mrtrix3/tests/test_auto_DWIExtract.py | 51 + .../mrtrix3/tests/test_auto_EstimateFOD.py | 87 + .../mrtrix3/tests/test_auto_FitTensor.py | 51 + .../mrtrix3/tests/test_auto_Generate5tt.py | 49 + .../mrtrix3/tests/test_auto_LabelConfig.py | 49 + .../mrtrix3/tests/test_auto_LabelConvert.py | 50 + .../mrtrix3/tests/test_auto_MRConvert.py | 61 + .../mrtrix3/tests/test_auto_MRMath.py | 50 + .../mrtrix3/tests/test_auto_MRTrix3Base.py | 18 + .../mrtrix3/tests/test_auto_Mesh2PVE.py | 42 + .../tests/test_auto_ReplaceFSwithFIRST.py | 45 + .../mrtrix3/tests/test_auto_ResponseSD.py | 71 + .../mrtrix3/tests/test_auto_TCK2VTK.py | 41 + .../mrtrix3/tests/test_auto_TensorMetrics.py | 46 + .../mrtrix3/tests/test_auto_Tractography.py | 100 + nipype/interfaces/mrtrix3/tracking.py | 285 + nipype/interfaces/mrtrix3/utils.py | 679 + nipype/interfaces/niftyfit/__init__.py | 12 + nipype/interfaces/niftyfit/asl.py | 164 + nipype/interfaces/niftyfit/base.py | 48 + nipype/interfaces/niftyfit/dwi.py | 497 + nipype/interfaces/niftyfit/qt1.py | 186 + nipype/interfaces/niftyfit/tests/__init__.py | 0 nipype/interfaces/niftyfit/tests/test_asl.py | 69 + .../niftyfit/tests/test_auto_DwiTool.py | 157 + .../niftyfit/tests/test_auto_FitAsl.py | 85 + .../niftyfit/tests/test_auto_FitDwi.py | 214 + .../niftyfit/tests/test_auto_FitQt1.py | 141 + .../tests/test_auto_NiftyFitCommand.py | 18 + nipype/interfaces/niftyfit/tests/test_dwi.py | 108 + nipype/interfaces/niftyfit/tests/test_qt1.py | 93 + nipype/interfaces/niftyreg/__init__.py | 14 + nipype/interfaces/niftyreg/base.py | 134 + nipype/interfaces/niftyreg/reg.py | 400 + nipype/interfaces/niftyreg/regutils.py | 831 + nipype/interfaces/niftyreg/tests/__init__.py | 1 + .../tests/test_auto_NiftyRegCommand.py | 22 + .../niftyreg/tests/test_auto_RegAladin.py | 73 + .../niftyreg/tests/test_auto_RegAverage.py | 94 + .../niftyreg/tests/test_auto_RegF3D.py | 99 + .../niftyreg/tests/test_auto_RegJacobian.py | 45 + .../niftyreg/tests/test_auto_RegMeasure.py | 46 + .../niftyreg/tests/test_auto_RegResample.py | 55 + .../niftyreg/tests/test_auto_RegTools.py | 53 + .../niftyreg/tests/test_auto_RegTransform.py | 153 + nipype/interfaces/niftyreg/tests/test_reg.py | 90 + .../niftyreg/tests/test_regutils.py | 469 + nipype/interfaces/niftyseg/__init__.py | 16 + nipype/interfaces/niftyseg/base.py | 36 + nipype/interfaces/niftyseg/em.py | 161 + nipype/interfaces/niftyseg/label_fusion.py | 339 + nipype/interfaces/niftyseg/lesions.py | 123 + nipype/interfaces/niftyseg/maths.py | 645 + nipype/interfaces/niftyseg/patchmatch.py | 105 + nipype/interfaces/niftyseg/stats.py | 284 + nipype/interfaces/niftyseg/tests/__init__.py | 0 .../niftyseg/tests/test_auto_BinaryMaths.py | 63 + .../tests/test_auto_BinaryMathsInteger.py | 50 + .../niftyseg/tests/test_auto_BinaryStats.py | 55 + .../niftyseg/tests/test_auto_CalcTopNCC.py | 46 + .../interfaces/niftyseg/tests/test_auto_EM.py | 85 + .../niftyseg/tests/test_auto_FillLesions.py | 53 + .../niftyseg/tests/test_auto_LabelFusion.py | 61 + .../niftyseg/tests/test_auto_MathsCommand.py | 40 + .../niftyseg/tests/test_auto_Merge.py | 46 + .../tests/test_auto_NiftySegCommand.py | 18 + .../niftyseg/tests/test_auto_PatchMatch.py | 51 + .../niftyseg/tests/test_auto_StatsCommand.py | 38 + .../niftyseg/tests/test_auto_TupleMaths.py | 69 + .../niftyseg/tests/test_auto_UnaryMaths.py | 45 + .../niftyseg/tests/test_auto_UnaryStats.py | 43 + .../niftyseg/tests/test_em_interfaces.py | 43 + .../niftyseg/tests/test_extra_PatchMatch.py | 44 + .../niftyseg/tests/test_label_fusion.py | 131 + .../interfaces/niftyseg/tests/test_lesions.py | 40 + .../interfaces/niftyseg/tests/test_maths.py | 159 + .../interfaces/niftyseg/tests/test_stats.py | 61 + nipype/interfaces/nilearn.py | 166 + nipype/interfaces/nipy/__init__.py | 4 + nipype/interfaces/nipy/base.py | 20 + nipype/interfaces/nipy/model.py | 343 + nipype/interfaces/nipy/preprocess.py | 259 + nipype/interfaces/nipy/tests/__init__.py | 1 + .../nipy/tests/test_auto_ComputeMask.py | 25 + .../nipy/tests/test_auto_EstimateContrast.py | 33 + .../interfaces/nipy/tests/test_auto_FitGLM.py | 40 + .../nipy/tests/test_auto_NipyBaseInterface.py | 12 + .../nipy/tests/test_auto_Similarity.py | 25 + .../tests/test_auto_SpaceTimeRealigner.py | 30 + .../interfaces/nipy/tests/test_auto_Trim.py | 25 + nipype/interfaces/nipy/utils.py | 95 + nipype/interfaces/nitime/__init__.py | 6 + nipype/interfaces/nitime/analysis.py | 283 + nipype/interfaces/nitime/base.py | 10 + nipype/interfaces/nitime/tests/__init__.py | 1 + .../tests/test_auto_CoherenceAnalyzer.py | 36 + .../tests/test_auto_NitimeBaseInterface.py | 12 + nipype/interfaces/nitime/tests/test_nitime.py | 82 + nipype/interfaces/petpvc.py | 236 + nipype/interfaces/quickshear.py | 91 + nipype/interfaces/semtools/__init__.py | 12 + nipype/interfaces/semtools/brains/__init__.py | 6 + nipype/interfaces/semtools/brains/classify.py | 76 + .../semtools/brains/segmentation.py | 185 + .../semtools/brains/tests/__init__.py | 1 + ...t_auto_BRAINSPosteriorToContinuousClass.py | 36 + .../brains/tests/test_auto_BRAINSTalairach.py | 57 + .../tests/test_auto_BRAINSTalairachMask.py | 34 + .../tests/test_auto_GenerateEdgeMapImage.py | 43 + .../tests/test_auto_GeneratePurePlugMask.py | 35 + .../test_auto_HistogramMatchingFilter.py | 38 + .../brains/tests/test_auto_SimilarityIndex.py | 29 + .../interfaces/semtools/brains/utilities.py | 192 + nipype/interfaces/semtools/converters.py | 95 + .../interfaces/semtools/diffusion/__init__.py | 15 + .../semtools/diffusion/diffusion.py | 604 + .../interfaces/semtools/diffusion/gtract.py | 1708 + .../semtools/diffusion/maxcurvature.py | 56 + .../semtools/diffusion/tests/__init__.py | 1 + .../diffusion/tests/test_auto_DWIConvert.py | 67 + .../tests/test_auto_compareTractInclusion.py | 33 + .../diffusion/tests/test_auto_dtiaverage.py | 32 + .../diffusion/tests/test_auto_dtiestim.py | 63 + .../diffusion/tests/test_auto_dtiprocess.py | 115 + .../tests/test_auto_extractNrrdVectorIndex.py | 33 + .../tests/test_auto_gtractAnisotropyMap.py | 32 + .../tests/test_auto_gtractAverageBvalues.py | 33 + .../tests/test_auto_gtractClipAnisotropy.py | 33 + .../tests/test_auto_gtractCoRegAnatomy.py | 55 + .../tests/test_auto_gtractConcatDwi.py | 32 + .../test_auto_gtractCopyImageOrientation.py | 32 + .../tests/test_auto_gtractCoregBvalues.py | 50 + .../tests/test_auto_gtractCostFastMarching.py | 45 + .../tests/test_auto_gtractCreateGuideFiber.py | 33 + .../test_auto_gtractFastMarchingTracking.py | 43 + .../tests/test_auto_gtractFiberTracking.py | 59 + .../tests/test_auto_gtractImageConformity.py | 32 + .../test_auto_gtractInvertBSplineTransform.py | 36 + ...test_auto_gtractInvertDisplacementField.py | 33 + .../test_auto_gtractInvertRigidTransform.py | 31 + .../test_auto_gtractResampleAnisotropy.py | 34 + .../tests/test_auto_gtractResampleB0.py | 35 + .../test_auto_gtractResampleCodeImage.py | 34 + .../test_auto_gtractResampleDWIInPlace.py | 46 + .../tests/test_auto_gtractResampleFibers.py | 36 + .../diffusion/tests/test_auto_gtractTensor.py | 47 + ...auto_gtractTransformToDisplacementField.py | 32 + .../diffusion/tests/test_auto_maxcurvature.py | 32 + .../diffusion/tractography/__init__.py | 6 + .../diffusion/tractography/commandlineonly.py | 50 + .../diffusion/tractography/fiberprocess.py | 110 + .../diffusion/tractography/fibertrack.py | 94 + .../diffusion/tractography/tests/__init__.py | 1 + .../tests/test_auto_UKFTractography.py | 70 + .../tests/test_auto_fiberprocess.py | 48 + .../tests/test_auto_fiberstats.py | 27 + .../tests/test_auto_fibertrack.py | 41 + .../diffusion/tractography/ukftractography.py | 167 + nipype/interfaces/semtools/featurecreator.py | 51 + .../interfaces/semtools/filtering/__init__.py | 10 + .../semtools/filtering/denoising.py | 83 + .../semtools/filtering/featuredetection.py | 830 + .../semtools/filtering/tests/__init__.py | 1 + .../filtering/tests/test_auto_CannyEdge.py | 33 + ...to_CannySegmentationLevelSetImageFilter.py | 43 + .../filtering/tests/test_auto_DilateImage.py | 32 + .../filtering/tests/test_auto_DilateMask.py | 33 + .../filtering/tests/test_auto_DistanceMaps.py | 32 + .../test_auto_DumpBinaryTrainingVectors.py | 27 + .../filtering/tests/test_auto_ErodeImage.py | 32 + .../tests/test_auto_FlippedDifference.py | 31 + .../test_auto_GenerateBrainClippedImage.py | 32 + .../test_auto_GenerateSummedGradientImage.py | 33 + .../tests/test_auto_GenerateTestImage.py | 35 + ...GradientAnisotropicDiffusionImageFilter.py | 33 + .../tests/test_auto_HammerAttributeCreator.py | 31 + .../tests/test_auto_NeighborhoodMean.py | 32 + .../tests/test_auto_NeighborhoodMedian.py | 32 + .../tests/test_auto_STAPLEAnalysis.py | 31 + .../test_auto_TextureFromNoiseImageFilter.py | 31 + .../tests/test_auto_TextureMeasureFilter.py | 33 + .../tests/test_auto_UnbiasedNonLocalMeans.py | 45 + nipype/interfaces/semtools/generated.sh | 1 + nipype/interfaces/semtools/legacy/__init__.py | 3 + .../semtools/legacy/registration.py | 74 + .../semtools/legacy/tests/__init__.py | 1 + .../legacy/tests/test_auto_scalartransform.py | 41 + .../semtools/registration/__init__.py | 7 + .../semtools/registration/brainsfit.py | 411 + .../semtools/registration/brainsresample.py | 106 + .../semtools/registration/brainsresize.py | 63 + .../semtools/registration/specialized.py | 568 + .../semtools/registration/tests/__init__.py | 1 + .../tests/test_auto_BRAINSDemonWarp.py | 103 + .../registration/tests/test_auto_BRAINSFit.py | 145 + .../tests/test_auto_BRAINSResample.py | 42 + .../tests/test_auto_BRAINSResize.py | 32 + .../test_auto_BRAINSTransformFromFiducials.py | 35 + .../tests/test_auto_VBRAINSDemonWarp.py | 107 + .../semtools/segmentation/__init__.py | 6 + .../semtools/segmentation/specialized.py | 929 + .../semtools/segmentation/tests/__init__.py | 1 + .../segmentation/tests/test_auto_BRAINSABC.py | 104 + .../test_auto_BRAINSConstellationDetector.py | 123 + ...BRAINSCreateLabelMapFromProbabilityMaps.py | 47 + .../segmentation/tests/test_auto_BRAINSCut.py | 44 + .../tests/test_auto_BRAINSMultiSTAPLE.py | 42 + .../tests/test_auto_BRAINSROIAuto.py | 46 + ...t_auto_BinaryMaskEditorBasedOnLandmarks.py | 47 + .../segmentation/tests/test_auto_ESLR.py | 37 + .../interfaces/semtools/testing/__init__.py | 5 + .../semtools/testing/featuredetection.py | 39 + .../testing/generateaveragelmkfile.py | 47 + .../semtools/testing/landmarkscompare.py | 44 + nipype/interfaces/semtools/tests/__init__.py | 1 + .../semtools/tests/test_auto_DWICompare.py | 27 + .../tests/test_auto_DWISimpleCompare.py | 28 + ...o_GenerateCsfClippedFromClassifiedImage.py | 30 + .../interfaces/semtools/utilities/__init__.py | 11 + .../interfaces/semtools/utilities/brains.py | 1293 + .../semtools/utilities/tests/__init__.py | 1 + .../tests/test_auto_BRAINSAlignMSP.py | 50 + .../tests/test_auto_BRAINSClipInferior.py | 33 + .../test_auto_BRAINSConstellationModeler.py | 52 + .../tests/test_auto_BRAINSEyeDetector.py | 32 + ...est_auto_BRAINSInitializedControlPoints.py | 40 + .../test_auto_BRAINSLandmarkInitializer.py | 34 + .../test_auto_BRAINSLinearModelerEPCA.py | 27 + .../tests/test_auto_BRAINSLmkTransform.py | 41 + .../utilities/tests/test_auto_BRAINSMush.py | 63 + .../tests/test_auto_BRAINSSnapShotWriter.py | 47 + .../tests/test_auto_BRAINSTransformConvert.py | 40 + ...st_auto_BRAINSTrimForegroundInDirection.py | 36 + .../tests/test_auto_CleanUpOverlapLabels.py | 30 + .../tests/test_auto_FindCenterOfBrain.py | 66 + ...auto_GenerateLabelMapFromProbabilityMap.py | 31 + .../tests/test_auto_ImageRegionPlotter.py | 35 + .../tests/test_auto_JointHistogram.py | 32 + .../tests/test_auto_ShuffleVectorsModule.py | 31 + .../utilities/tests/test_auto_fcsv_to_hdf5.py | 40 + .../tests/test_auto_insertMidACPCpoint.py | 30 + ...test_auto_landmarksConstellationAligner.py | 30 + ...test_auto_landmarksConstellationWeights.py | 32 + nipype/interfaces/slicer/__init__.py | 12 + nipype/interfaces/slicer/base.py | 6 + nipype/interfaces/slicer/converters.py | 161 + .../interfaces/slicer/diffusion/__init__.py | 6 + .../interfaces/slicer/diffusion/diffusion.py | 654 + .../slicer/diffusion/tests/__init__.py | 1 + .../diffusion/tests/test_auto_DTIexport.py | 34 + .../diffusion/tests/test_auto_DTIimport.py | 35 + .../test_auto_DWIJointRicianLMMSEFilter.py | 44 + .../tests/test_auto_DWIRicianLMMSEFilter.py | 50 + .../tests/test_auto_DWIToDTIEstimation.py | 45 + ..._auto_DiffusionTensorScalarMeasurements.py | 35 + ...est_auto_DiffusionWeightedVolumeMasking.py | 44 + .../tests/test_auto_ResampleDTIVolume.py | 71 + .../test_auto_TractographyLabelMapSeeding.py | 55 + .../interfaces/slicer/filtering/__init__.py | 18 + .../interfaces/slicer/filtering/arithmetic.py | 268 + .../slicer/filtering/checkerboardfilter.py | 54 + .../interfaces/slicer/filtering/denoising.py | 210 + .../slicer/filtering/extractskeleton.py | 59 + .../slicer/filtering/histogrammatching.py | 75 + .../slicer/filtering/imagelabelcombine.py | 50 + .../interfaces/slicer/filtering/morphology.py | 109 + .../filtering/n4itkbiasfieldcorrection.py | 98 + .../resamplescalarvectordwivolume.py | 153 + .../slicer/filtering/tests/__init__.py | 1 + .../tests/test_auto_AddScalarVolumes.py | 39 + .../tests/test_auto_CastScalarVolume.py | 35 + .../tests/test_auto_CheckerBoardFilter.py | 42 + ...test_auto_CurvatureAnisotropicDiffusion.py | 37 + .../tests/test_auto_ExtractSkeleton.py | 38 + .../test_auto_GaussianBlurImageFilter.py | 35 + .../test_auto_GradientAnisotropicDiffusion.py | 37 + .../test_auto_GrayscaleFillHoleImageFilter.py | 34 + ...test_auto_GrayscaleGrindPeakImageFilter.py | 34 + .../tests/test_auto_HistogramMatching.py | 41 + .../tests/test_auto_ImageLabelCombine.py | 39 + .../tests/test_auto_MaskScalarVolume.py | 40 + .../tests/test_auto_MedianImageFilter.py | 38 + .../tests/test_auto_MultiplyScalarVolumes.py | 39 + .../test_auto_N4ITKBiasFieldCorrection.py | 55 + ...test_auto_ResampleScalarVectorDWIVolume.py | 69 + .../tests/test_auto_SubtractScalarVolumes.py | 39 + .../tests/test_auto_ThresholdScalarVolume.py | 39 + ...auto_VotingBinaryHoleFillingImageFilter.py | 41 + .../slicer/filtering/thresholdscalarvolume.py | 61 + .../votingbinaryholefillingimagefilter.py | 64 + nipype/interfaces/slicer/generate_classes.py | 555 + nipype/interfaces/slicer/legacy/__init__.py | 10 + nipype/interfaces/slicer/legacy/converters.py | 41 + .../slicer/legacy/diffusion/__init__.py | 3 + .../slicer/legacy/diffusion/denoising.py | 76 + .../slicer/legacy/diffusion/tests/__init__.py | 1 + ...est_auto_DWIUnbiasedNonLocalMeansFilter.py | 48 + nipype/interfaces/slicer/legacy/filtering.py | 123 + .../interfaces/slicer/legacy/registration.py | 655 + .../interfaces/slicer/legacy/segmentation.py | 65 + .../slicer/legacy/tests/__init__.py | 1 + .../tests/test_auto_AffineRegistration.py | 51 + ...test_auto_BSplineDeformableRegistration.py | 57 + .../test_auto_BSplineToDeformationField.py | 31 + .../test_auto_ExpertAutomatedRegistration.py | 68 + .../tests/test_auto_LinearRegistration.py | 58 + ..._auto_MultiResolutionAffineRegistration.py | 51 + .../test_auto_OtsuThresholdImageFilter.py | 37 + .../test_auto_OtsuThresholdSegmentation.py | 38 + .../tests/test_auto_ResampleScalarVolume.py | 39 + .../tests/test_auto_RigidRegistration.py | 59 + .../slicer/quantification/__init__.py | 4 + .../quantification/changequantification.py | 86 + .../petstandarduptakevaluecomputation.py | 75 + .../slicer/quantification/tests/__init__.py | 1 + .../test_auto_IntensityDifferenceMetric.py | 51 + ..._auto_PETStandardUptakeValueComputation.py | 38 + .../slicer/registration/__init__.py | 6 + .../slicer/registration/brainsfit.py | 380 + .../slicer/registration/brainsresample.py | 103 + .../slicer/registration/specialized.py | 615 + .../slicer/registration/tests/__init__.py | 1 + .../tests/test_auto_ACPCTransform.py | 32 + .../tests/test_auto_BRAINSDemonWarp.py | 103 + .../registration/tests/test_auto_BRAINSFit.py | 142 + .../tests/test_auto_BRAINSResample.py | 42 + .../tests/test_auto_FiducialRegistration.py | 34 + .../tests/test_auto_VBRAINSDemonWarp.py | 107 + .../slicer/segmentation/__init__.py | 5 + .../simpleregiongrowingsegmentation.py | 72 + .../slicer/segmentation/specialized.py | 294 + .../slicer/segmentation/tests/__init__.py | 1 + .../tests/test_auto_BRAINSROIAuto.py | 44 + .../tests/test_auto_EMSegmentCommandLine.py | 64 + .../test_auto_RobustStatisticsSegmenter.py | 43 + ...st_auto_SimpleRegionGrowingSegmentation.py | 41 + nipype/interfaces/slicer/surface.py | 367 + nipype/interfaces/slicer/tests/__init__.py | 1 + .../tests/test_auto_DicomToNrrdConverter.py | 38 + ...test_auto_EMSegmentTransformToNewFormat.py | 31 + .../tests/test_auto_GrayscaleModelMaker.py | 40 + .../tests/test_auto_LabelMapSmoothing.py | 38 + .../slicer/tests/test_auto_MergeModels.py | 38 + .../slicer/tests/test_auto_ModelMaker.py | 52 + .../slicer/tests/test_auto_ModelToLabelMap.py | 39 + .../tests/test_auto_OrientScalarVolume.py | 35 + .../tests/test_auto_ProbeVolumeWithModel.py | 38 + .../tests/test_auto_SlicerCommandLine.py | 18 + nipype/interfaces/slicer/utilities.py | 54 + nipype/interfaces/spm/__init__.py | 15 + nipype/interfaces/spm/base.py | 626 + nipype/interfaces/spm/model.py | 1123 + nipype/interfaces/spm/preprocess.py | 2223 + nipype/interfaces/spm/tests/__init__.py | 1 + .../spm/tests/test_auto_Analyze2nii.py | 39 + .../spm/tests/test_auto_ApplyDeformations.py | 41 + .../test_auto_ApplyInverseDeformation.py | 44 + .../spm/tests/test_auto_ApplyTransform.py | 34 + .../spm/tests/test_auto_CalcCoregAffine.py | 38 + .../spm/tests/test_auto_Coregister.py | 57 + .../spm/tests/test_auto_CreateWarped.py | 41 + .../interfaces/spm/tests/test_auto_DARTEL.py | 44 + .../spm/tests/test_auto_DARTELNorm2MNI.py | 49 + .../spm/tests/test_auto_DicomImport.py | 48 + .../spm/tests/test_auto_EstimateContrast.py | 50 + .../spm/tests/test_auto_EstimateModel.py | 51 + .../spm/tests/test_auto_FactorialDesign.py | 58 + .../spm/tests/test_auto_FieldMap.py | 128 + .../spm/tests/test_auto_Level1Design.py | 53 + .../test_auto_MultipleRegressionDesign.py | 67 + .../spm/tests/test_auto_NewSegment.py | 49 + .../spm/tests/test_auto_Normalize.py | 78 + .../spm/tests/test_auto_Normalize12.py | 67 + .../tests/test_auto_OneSampleTTestDesign.py | 62 + .../spm/tests/test_auto_PairedTTestDesign.py | 64 + .../interfaces/spm/tests/test_auto_Realign.py | 59 + .../interfaces/spm/tests/test_auto_Reslice.py | 32 + .../spm/tests/test_auto_ResliceToReference.py | 36 + .../spm/tests/test_auto_SPMCommand.py | 21 + .../interfaces/spm/tests/test_auto_Segment.py | 64 + .../spm/tests/test_auto_SliceTiming.py | 57 + .../interfaces/spm/tests/test_auto_Smooth.py | 40 + .../spm/tests/test_auto_Threshold.py | 51 + .../tests/test_auto_ThresholdStatistics.py | 46 + .../tests/test_auto_TwoSampleTTestDesign.py | 68 + .../spm/tests/test_auto_VBMSegment.py | 164 + nipype/interfaces/spm/tests/test_base.py | 168 + nipype/interfaces/spm/tests/test_model.py | 44 + .../interfaces/spm/tests/test_preprocess.py | 116 + nipype/interfaces/spm/tests/test_utils.py | 83 + nipype/interfaces/spm/utils.py | 513 + nipype/interfaces/tests/__init__.py | 1 + .../tests/test_auto_BIDSDataGrabber.py | 24 + nipype/interfaces/tests/test_auto_Bru2.py | 37 + nipype/interfaces/tests/test_auto_C3d.py | 52 + .../tests/test_auto_C3dAffineTool.py | 46 + nipype/interfaces/tests/test_auto_CopyMeta.py | 24 + .../interfaces/tests/test_auto_DataFinder.py | 26 + .../interfaces/tests/test_auto_DataGrabber.py | 26 + nipype/interfaces/tests/test_auto_DataSink.py | 32 + nipype/interfaces/tests/test_auto_Dcm2nii.py | 101 + nipype/interfaces/tests/test_auto_Dcm2niix.py | 85 + nipype/interfaces/tests/test_auto_DcmStack.py | 28 + .../tests/test_auto_FreeSurferSource.py | 106 + .../tests/test_auto_GroupAndStack.py | 28 + nipype/interfaces/tests/test_auto_IOBase.py | 12 + .../tests/test_auto_JSONFileGrabber.py | 22 + .../tests/test_auto_JSONFileSink.py | 23 + .../interfaces/tests/test_auto_LookupMeta.py | 22 + .../tests/test_auto_MatlabCommand.py | 47 + .../interfaces/tests/test_auto_MergeNifti.py | 26 + nipype/interfaces/tests/test_auto_MeshFix.py | 98 + .../interfaces/tests/test_auto_MySQLSink.py | 27 + .../tests/test_auto_NiftiGeneratorBase.py | 12 + .../tests/test_auto_NilearnBaseInterface.py | 12 + nipype/interfaces/tests/test_auto_PETPVC.py | 74 + .../interfaces/tests/test_auto_Quickshear.py | 46 + nipype/interfaces/tests/test_auto_Reorient.py | 25 + nipype/interfaces/tests/test_auto_Rescale.py | 24 + .../tests/test_auto_S3DataGrabber.py | 29 + .../tests/test_auto_SEMLikeCommandLine.py | 18 + .../interfaces/tests/test_auto_SQLiteSink.py | 15 + .../tests/test_auto_SSHDataGrabber.py | 32 + .../interfaces/tests/test_auto_SelectFiles.py | 24 + .../tests/test_auto_SignalExtraction.py | 27 + .../tests/test_auto_SlicerCommandLine.py | 26 + .../interfaces/tests/test_auto_SplitNifti.py | 25 + nipype/interfaces/tests/test_auto_XNATSink.py | 32 + .../interfaces/tests/test_auto_XNATSource.py | 34 + nipype/interfaces/tests/test_extra_dcm2nii.py | 57 + nipype/interfaces/tests/test_image.py | 64 + nipype/interfaces/tests/test_io.py | 689 + nipype/interfaces/tests/test_matlab.py | 128 + nipype/interfaces/tests/test_nilearn.py | 200 + nipype/interfaces/utility/__init__.py | 13 + nipype/interfaces/utility/base.py | 431 + nipype/interfaces/utility/csv.py | 102 + nipype/interfaces/utility/tests/__init__.py | 1 + .../utility/tests/test_auto_AssertEqual.py | 15 + .../utility/tests/test_auto_CSVReader.py | 22 + .../utility/tests/test_auto_Function.py | 19 + .../tests/test_auto_IdentityInterface.py | 19 + .../utility/tests/test_auto_Merge.py | 23 + .../utility/tests/test_auto_Rename.py | 25 + .../utility/tests/test_auto_Select.py | 22 + .../utility/tests/test_auto_Split.py | 23 + nipype/interfaces/utility/tests/test_base.py | 86 + nipype/interfaces/utility/tests/test_csv.py | 30 + .../interfaces/utility/tests/test_wrappers.py | 138 + nipype/interfaces/utility/wrappers.py | 162 + nipype/interfaces/vista/__init__.py | 4 + nipype/interfaces/vista/tests/__init__.py | 1 + .../vista/tests/test_auto_Vnifti2Image.py | 42 + .../vista/tests/test_auto_VtoMat.py | 38 + nipype/interfaces/vista/vista.py | 89 + nipype/interfaces/vtkbase.py | 84 + nipype/interfaces/workbench/__init__.py | 5 + nipype/interfaces/workbench/base.py | 69 + nipype/interfaces/workbench/metric.py | 161 + nipype/interfaces/workbench/tests/__init__.py | 0 .../tests/test_auto_MetricResample.py | 85 + .../workbench/tests/test_auto_WBCommand.py | 18 + nipype/pipeline/__init__.py | 11 + nipype/pipeline/engine/__init__.py | 14 + nipype/pipeline/engine/base.py | 104 + nipype/pipeline/engine/nodes.py | 1272 + nipype/pipeline/engine/report_template.html | 264 + nipype/pipeline/engine/report_template2.html | 120 + nipype/pipeline/engine/tests/__init__.py | 3 + nipype/pipeline/engine/tests/test_base.py | 89 + nipype/pipeline/engine/tests/test_engine.py | 519 + nipype/pipeline/engine/tests/test_join.py | 661 + nipype/pipeline/engine/tests/test_nodes.py | 292 + nipype/pipeline/engine/tests/test_utils.py | 226 + .../pipeline/engine/tests/test_workflows.py | 275 + nipype/pipeline/engine/utils.py | 1674 + nipype/pipeline/engine/workflows.py | 1045 + nipype/pipeline/plugins/__init__.py | 24 + nipype/pipeline/plugins/base.py | 610 + nipype/pipeline/plugins/condor.py | 118 + nipype/pipeline/plugins/dagman.py | 179 + nipype/pipeline/plugins/debug.py | 39 + nipype/pipeline/plugins/ipython.py | 131 + nipype/pipeline/plugins/legacymultiproc.py | 382 + nipype/pipeline/plugins/linear.py | 61 + nipype/pipeline/plugins/lsf.py | 124 + nipype/pipeline/plugins/multiproc.py | 351 + nipype/pipeline/plugins/oar.py | 140 + nipype/pipeline/plugins/pbs.py | 123 + nipype/pipeline/plugins/pbsgraph.py | 65 + .../pipeline/plugins/semaphore_singleton.py | 5 + nipype/pipeline/plugins/sge.py | 450 + nipype/pipeline/plugins/sgegraph.py | 162 + nipype/pipeline/plugins/slurm.py | 136 + nipype/pipeline/plugins/slurmgraph.py | 162 + nipype/pipeline/plugins/somaflow.py | 45 + nipype/pipeline/plugins/tests/__init__.py | 3 + nipype/pipeline/plugins/tests/test_base.py | 41 + .../pipeline/plugins/tests/test_callback.py | 111 + nipype/pipeline/plugins/tests/test_debug.py | 57 + .../tests/test_legacymultiproc_nondaemon.py | 166 + nipype/pipeline/plugins/tests/test_linear.py | 47 + .../pipeline/plugins/tests/test_multiproc.py | 137 + nipype/pipeline/plugins/tests/test_oar.py | 55 + nipype/pipeline/plugins/tests/test_pbs.py | 56 + .../pipeline/plugins/tests/test_somaflow.py | 52 + nipype/pipeline/plugins/tests/test_tools.py | 63 + nipype/pipeline/plugins/tools.py | 165 + nipype/pkg_info.py | 110 + nipype/pytest.ini | 6 + nipype/refs.py | 20 + nipype/scripts/__init__.py | 0 nipype/scripts/cli.py | 258 + nipype/scripts/crash_files.py | 88 + nipype/scripts/instance.py | 46 + nipype/scripts/utils.py | 126 + nipype/sphinxext/__init__.py | 6 + nipype/sphinxext/plot_workflow.py | 768 + nipype/testing/__init__.py | 35 + nipype/testing/data/4d_dwi.nii | 0 nipype/testing/data/A.scheme | 0 nipype/testing/data/A_qmat.Bdouble | 0 nipype/testing/data/A_recon_params.Bdouble | 0 .../data/BrainSegmentationPrior01.nii.gz | 0 .../data/BrainSegmentationPrior02.nii.gz | 0 .../data/BrainSegmentationPrior03.nii.gz | 0 .../data/BrainSegmentationPrior04.nii.gz | 0 nipype/testing/data/FLASH1.mgz | 0 nipype/testing/data/FLASH2.mgz | 0 nipype/testing/data/FLASH3.mgz | 0 nipype/testing/data/Fred+orig | 0 nipype/testing/data/FreeSurferColorLUT.txt | 1 + ...eSurferColorLUT_adapted_aparc+aseg_out.pck | 0 .../testing/data/MASK_average_thal_right.nii | 0 nipype/testing/data/NWARP | 0 nipype/testing/data/PD.mgz | 0 .../ProbabilityMaskOfStudyTemplate.nii.gz | 0 nipype/testing/data/Q25_warp+tlrc.HEAD | 0 nipype/testing/data/QSH_peaks.Bdouble | 0 nipype/testing/data/README | 6 + nipype/testing/data/ROI_scale500.nii.gz | 0 nipype/testing/data/SPM.mat | 0 nipype/testing/data/SubjectA.Bfloat | 0 nipype/testing/data/T1.mgz | 0 nipype/testing/data/T1.nii | 0 nipype/testing/data/T1.nii.gz | 0 nipype/testing/data/T1_brain.nii | 0 nipype/testing/data/T1map.nii.gz | 0 nipype/testing/data/TI4D.nii.gz | 0 nipype/testing/data/TPM.nii | 0 .../testing/data/Template_1_IXI550_MNI152.nii | 0 nipype/testing/data/Template_6.nii | 0 nipype/testing/data/TransformParameters.0.txt | 0 nipype/testing/data/afni_output.3D | 0 nipype/testing/data/allFA.nii | 0 nipype/testing/data/all_FA.nii.gz | 0 nipype/testing/data/anat_coreg.mif | 0 nipype/testing/data/anatomical.nii | 0 nipype/testing/data/ants_Affine.txt | 0 nipype/testing/data/ants_Warp.nii.gz | 0 nipype/testing/data/ants_deformed.nii.gz | 0 nipype/testing/data/aparc+aseg.nii | 0 nipype/testing/data/aseg.mgz | 0 nipype/testing/data/asl.nii.gz | 0 nipype/testing/data/atlas.nii.gz | 0 nipype/testing/data/b0.nii | 0 nipype/testing/data/b0.nii.gz | 0 nipype/testing/data/b0_b0rev.nii | 0 nipype/testing/data/ballstickfit_data.Bfloat | 0 .../data/bedpostxout/do_not_delete.txt | 1 + nipype/testing/data/brain_mask.nii | 0 .../testing/data/brain_study_template.nii.gz | 0 nipype/testing/data/brain_track.Bdouble | 0 nipype/testing/data/brukerdir/fid | 0 nipype/testing/data/brukerdir/pdata/1/2dseq | 0 nipype/testing/data/bvals | 0 nipype/testing/data/bvals.scheme | 0 nipype/testing/data/bvecs | 0 nipype/testing/data/bvecs.scheme | 0 nipype/testing/data/c1s1.nii | 0 nipype/testing/data/c1s3.nii | 0 nipype/testing/data/clustering.mat | 0 nipype/testing/data/cmatrix.mat | 0 nipype/testing/data/complex.nii | 0 nipype/testing/data/config.ini | 2 + nipype/testing/data/cont1.nii | 0 nipype/testing/data/cont1a.nii | 0 nipype/testing/data/cont2.nii | 0 nipype/testing/data/cont2a.nii | 0 nipype/testing/data/converted.trk | 0 nipype/testing/data/cope.nii.gz | 0 nipype/testing/data/cope1.nii.gz | 0 nipype/testing/data/cope1run1.nii.gz | 0 nipype/testing/data/cope1run2.nii.gz | 0 nipype/testing/data/cope2run1.nii.gz | 0 nipype/testing/data/cope2run2.nii.gz | 0 nipype/testing/data/cortex.label | 0 nipype/testing/data/cov_split.mat | 0 nipype/testing/data/csd.mif | 0 nipype/testing/data/data.Bfloat | 0 nipype/testing/data/db.xml | 0 nipype/testing/data/degree.csv | 0 nipype/testing/data/degree.mat | 0 nipype/testing/data/design.con | 0 nipype/testing/data/design.mat | 0 nipype/testing/data/dicomdir/123456-1-1.dcm | 0 nipype/testing/data/diffusion.nii | 0 nipype/testing/data/diffusion_weighted.nii | 0 nipype/testing/data/dilated_wm_mask.nii | 0 nipype/testing/data/dirs.txt | 0 nipype/testing/data/dofrun1 | 0 nipype/testing/data/dofrun2 | 0 nipype/testing/data/ds003_sub-01_mc.DVARS | 19 + nipype/testing/data/ds003_sub-01_mc.nii.gz | Bin 0 -> 168086 bytes .../data/ds003_sub-01_mc_brainmask.nii.gz | Bin 0 -> 261 bytes nipype/testing/data/ds005/filler.txt | 0 nipype/testing/data/dteig.Bdouble | 0 nipype/testing/data/dti.mif | 0 nipype/testing/data/dwi.mif | 0 nipype/testing/data/dwi.nii.gz | 0 .../testing/data/dwi2anat_InverseWarp.nii.gz | 0 nipype/testing/data/dwi2anat_Warp.nii.gz | 0 nipype/testing/data/dwi2anat_coreg_Affine.txt | 0 nipype/testing/data/dwi_CSD_tracked.tck | 0 nipype/testing/data/dwi_FA.mif | 0 nipype/testing/data/dwi_WMProb.mif | 0 nipype/testing/data/dwi_evals.nii | 0 nipype/testing/data/dwi_tensor.mif | 0 nipype/testing/data/elastix.txt | 0 nipype/testing/data/encoding.txt | 0 nipype/testing/data/epi.nii | 0 nipype/testing/data/epi_acqp.txt | 0 nipype/testing/data/epi_index.txt | 0 nipype/testing/data/epi_mask.nii | 0 nipype/testing/data/epi_param.txt | 10 + nipype/testing/data/epi_phasediff.nii | 0 nipype/testing/data/epi_rev.nii | 0 nipype/testing/data/f1.1D | 0 nipype/testing/data/f2.1D | 0 nipype/testing/data/fa.nii.gz | 0 nipype/testing/data/fdir00.nii | 0 nipype/testing/data/fdir01.nii | 0 nipype/testing/data/ffra00.nii | 0 nipype/testing/data/ffra01.nii | 0 nipype/testing/data/fieldmap_mag.nii | 0 nipype/testing/data/fieldmap_mag_brain.nii | 0 .../data/fieldmap_phase_fslprepared.nii | 0 nipype/testing/data/first_merged.nii.gz | 0 nipype/testing/data/fitted_data1.Bfloat | 0 nipype/testing/data/fitted_data2.Bfloat | 0 nipype/testing/data/fixed1.nii | 0 nipype/testing/data/fixed2.nii | 0 nipype/testing/data/flash_05.mgz | 0 nipype/testing/data/flash_30.mgz | 0 nipype/testing/data/flirt.mat | 0 nipype/testing/data/fmri_timeseries.csv | 251 + .../testing/data/fmri_timeseries_nolabels.csv | 1 + nipype/testing/data/fods.mif | 0 nipype/testing/data/fsLUT_aparc+aseg.pck | 0 ...d_to-fsaverage.L.sphere.32k_fs_LR.surf.gii | 0 ....L.midthickness_va_avg.32k_fs_LR.shape.gii | 0 ....midthickness_va_avg.10k_fsavg_L.shape.gii | 0 ...average5_std_sphere.L.10k_fsavg_L.surf.gii | 0 nipype/testing/data/fsl_mcflirt_movpar.txt | 365 + .../testing/data/fsl_motion_outliers_fd.txt | 364 + .../testing/data/func2anat_InverseWarp.nii.gz | 0 .../testing/data/func2anat_coreg_Affine.txt | 0 .../data/func2anat_coreg_InverseWarp.nii.gz | 0 nipype/testing/data/func_epi_1_1.nii | 0 nipype/testing/data/func_to_struct.mat | 0 nipype/testing/data/functional.HEAD | 0 nipype/testing/data/functional.nii | 0 nipype/testing/data/functional.par | 0 nipype/testing/data/functional.rms | 0 nipype/testing/data/functional2.nii | 0 nipype/testing/data/functional3.nii | 0 nipype/testing/data/functional_1.dcm | 0 nipype/testing/data/functional_2.dcm | 0 nipype/testing/data/im1.nii | 0 nipype/testing/data/im2.nii | 0 nipype/testing/data/im3.nii | 0 nipype/testing/data/im_affine.aff | 0 nipype/testing/data/im_warp.df.nii | 0 nipype/testing/data/image.nii | 0 nipype/testing/data/image.v | 0 nipype/testing/data/indices-labels.txt | 0 nipype/testing/data/indices.txt | 0 nipype/testing/data/input1.xfm | 0 nipype/testing/data/jsongrabber.txt | 1 + nipype/testing/data/label.mgz | 0 nipype/testing/data/lh-pial.stl | 0 nipype/testing/data/lh.cope1.mgz | 0 nipype/testing/data/lh.cope1.nii.gz | 0 nipype/testing/data/lh.hippocampus.stl | 0 nipype/testing/data/lh.pial | 0 nipype/testing/data/lh.pial_converted.gii | 0 nipype/testing/data/lh.white | 0 nipype/testing/data/lta1.lta | 0 nipype/testing/data/lta2.lta | 0 nipype/testing/data/lut_file | 0 nipype/testing/data/magnitude.nii | 0 nipype/testing/data/maps.nii | 0 nipype/testing/data/mask.1D | 0 nipype/testing/data/mask.mif | 0 nipype/testing/data/mask.nii | 0 nipype/testing/data/mask.nii.gz | 0 nipype/testing/data/mean_func.nii.gz | 0 nipype/testing/data/merged_f1samples.nii.gz | 0 nipype/testing/data/merged_fsamples.nii | 0 nipype/testing/data/merged_ph1samples.nii.gz | 0 nipype/testing/data/merged_phsamples.nii | 0 nipype/testing/data/merged_th1samples.nii.gz | 0 nipype/testing/data/merged_thsamples.nii | 0 nipype/testing/data/minc_initial.xfm | 0 nipype/testing/data/minc_nlp.conf | 0 nipype/testing/data/minc_test_2D_00.mnc | 0 nipype/testing/data/minc_test_2D_01.mnc | 0 nipype/testing/data/minc_test_2D_02.mnc | 0 nipype/testing/data/minc_test_2D_03.mnc | 0 nipype/testing/data/minc_test_2D_04.mnc | 0 nipype/testing/data/minc_test_2D_05.mnc | 0 nipype/testing/data/minc_test_2D_06.mnc | 0 nipype/testing/data/minc_test_2D_07.mnc | 0 nipype/testing/data/minc_test_2D_08.mnc | 0 nipype/testing/data/minc_test_2D_09.mnc | 0 nipype/testing/data/minc_test_3D_00.mnc | 0 nipype/testing/data/minc_test_3D_01.mnc | 0 nipype/testing/data/minc_test_3D_02.mnc | 0 nipype/testing/data/minc_test_3D_03.mnc | 0 nipype/testing/data/minc_test_3D_04.mnc | 0 nipype/testing/data/minc_test_3D_05.mnc | 0 nipype/testing/data/minc_test_3D_06.mnc | 0 nipype/testing/data/minc_test_3D_07.mnc | 0 nipype/testing/data/minc_test_3D_08.mnc | 0 nipype/testing/data/minc_test_3D_09.mnc | 0 nipype/testing/data/mni.nii | 0 nipype/testing/data/mni2t1.nii | 0 nipype/testing/data/model.pklz | 0 nipype/testing/data/moving.csv | 0 nipype/testing/data/moving1.nii | 0 nipype/testing/data/moving2.nii | 0 nipype/testing/data/mrtrix3_labelconfig.txt | 0 nipype/testing/data/my_database.db | 0 nipype/testing/data/network0.aparc+aseg.nii | 0 nipype/testing/data/network0.gpickle | 0 nipype/testing/data/nodif_brain_mask.nii.gz | 0 nipype/testing/data/norm.mgz | 0 nipype/testing/data/output.csv | 0 nipype/testing/data/pdfs.Bfloat | 0 nipype/testing/data/peak_directions.mif | 0 nipype/testing/data/pet.nii.gz | 0 nipype/testing/data/pet_resliced.nii | 0 nipype/testing/data/phase.nii | 0 nipype/testing/data/rc1s1.nii | 0 nipype/testing/data/rc1s2.nii | 0 nipype/testing/data/rc2s1.nii | 0 nipype/testing/data/rc2s2.nii | 0 nipype/testing/data/realign_json.json | 34 + nipype/testing/data/ref_class0.nii | 0 nipype/testing/data/ref_class1.nii | 0 nipype/testing/data/register.dat | 0 nipype/testing/data/register.mat | 0 nipype/testing/data/resp.1D | 0 nipype/testing/data/response.txt | 0 nipype/testing/data/resting.nii | 0 nipype/testing/data/resting2anat_Warp.nii.gz | 0 .../data/resting2anat_coreg_Affine.txt | 0 nipype/testing/data/rgb.nii.gz | 0 nipype/testing/data/rh-pial.stl | 0 nipype/testing/data/rh.pial | 0 nipype/testing/data/rh.pial_converted.gii | 0 nipype/testing/data/roi01.nii | 0 nipype/testing/data/roi01_idx.npz | 0 nipype/testing/data/roi02.nii | 0 nipype/testing/data/roi02_idx.npz | 0 nipype/testing/data/roi03.nii | 0 nipype/testing/data/roi03_idx.npz | 0 nipype/testing/data/roi04.nii | 0 nipype/testing/data/roi04_idx.npz | 0 nipype/testing/data/roi05.nii | 0 nipype/testing/data/roi05_idx.npz | 0 nipype/testing/data/run1+orig | 0 nipype/testing/data/run1+orig_model | 0 nipype/testing/data/run1_categories.1D | 0 nipype/testing/data/run2+orig | 0 nipype/testing/data/run2_categories.1D | 0 nipype/testing/data/seed.1D | 0 nipype/testing/data/seed_mask.nii | 0 nipype/testing/data/seed_source.nii.gz | 0 nipype/testing/data/seeds_to_M1.nii | 0 nipype/testing/data/seeds_to_M2.nii | 0 nipype/testing/data/segmentation0.nii.gz | Bin 0 -> 28514 bytes nipype/testing/data/segmentation1.nii.gz | Bin 0 -> 26561 bytes nipype/testing/data/session_info.npz | 0 nipype/testing/data/skeleton_mask.nii.gz | 0 nipype/testing/data/slice_timing.1D | 1 + .../data/smri_ants_registration_settings.json | 179 + nipype/testing/data/spmT_0001.img | 0 nipype/testing/data/spminfo | 14 + nipype/testing/data/streamlines.trk | 0 nipype/testing/data/struct2mni.nii | 0 nipype/testing/data/struct_to_func.mat | 0 nipype/testing/data/struct_to_template.mat | 0 nipype/testing/data/structural.nii | 0 nipype/testing/data/study_template.nii.gz | 0 nipype/testing/data/sub-01_dir-LR_epi.nii.gz | 0 nipype/testing/data/sub-01_dir-RL_epi.nii.gz | 0 ...task-rest_bold_space-fsaverage5.L.func.gii | 0 nipype/testing/data/subj1.cff | 0 nipype/testing/data/subj1.pck | 0 nipype/testing/data/subj2.cff | 0 nipype/testing/data/subj2.pck | 0 nipype/testing/data/subjectDesign.con | 0 nipype/testing/data/subjectDesign.mat | 0 nipype/testing/data/surf.txt | 0 nipype/testing/data/surf01.vtk | 93104 ++++++++++++++++ nipype/testing/data/surf1.vtk | 0 nipype/testing/data/surf2.vtk | 0 nipype/testing/data/targets_MASK1.nii | 0 nipype/testing/data/targets_MASK2.nii | 0 .../testing/data/tbss_dir/do_not_delete.txt | 1 + nipype/testing/data/tdi.mif | 0 .../testing/data/tensor_fitted_data.Bdouble | 0 nipype/testing/data/timeDesign.con | 0 nipype/testing/data/timeDesign.mat | 0 nipype/testing/data/timeseries.txt | 0 nipype/testing/data/tissue+air_map.nii | 0 nipype/testing/data/tissues.nii.gz | 0 nipype/testing/data/topup_encoding.txt | 0 nipype/testing/data/topup_fieldcoef.nii.gz | 0 nipype/testing/data/topup_movpar.txt | 0 nipype/testing/data/tpm_00.nii.gz | Bin 0 -> 161167 bytes nipype/testing/data/tpm_01.nii.gz | Bin 0 -> 140347 bytes nipype/testing/data/tpm_02.nii.gz | Bin 0 -> 139761 bytes nipype/testing/data/tpms_msk.nii.gz | Bin 0 -> 20495 bytes nipype/testing/data/track1.trk | 0 nipype/testing/data/track2.trk | 0 nipype/testing/data/tracks.tck | 0 nipype/testing/data/tracks.trk | 0 nipype/testing/data/tract_data.Bfloat | 0 nipype/testing/data/tracts.Bdouble | 0 nipype/testing/data/trans.mat | 0 nipype/testing/data/tst_class0.nii | 0 nipype/testing/data/tst_class1.nii | 0 nipype/testing/data/u_rc1s1_Template.nii | 0 nipype/testing/data/u_rc1s2_Template.nii | 0 nipype/testing/data/u_rc1s3_Template.nii | 0 nipype/testing/data/varcope.nii.gz | 0 nipype/testing/data/varcope1run1.nii.gz | 0 nipype/testing/data/varcope1run2.nii.gz | 0 nipype/testing/data/varcope2run1.nii.gz | 0 nipype/testing/data/varcope2run2.nii.gz | 0 nipype/testing/data/voxel-order_data.Bfloat | 0 nipype/testing/data/vsm.nii | 0 nipype/testing/data/warpfield.nii | 0 nipype/testing/data/weights.txt | 0 nipype/testing/data/wm.mgz | 0 nipype/testing/data/wm_mask.mif | 0 nipype/testing/data/wm_undersampled.nii | 0 nipype/testing/data/zstat1.nii.gz | 0 nipype/testing/decorators.py | 99 + nipype/testing/fixtures.py | 120 + nipype/testing/tests/test_utils.py | 46 + nipype/testing/utils.py | 100 + nipype/tests/__init__.py | 0 nipype/tests/test_nipype.py | 19 + nipype/utils/README.txt | 11 + nipype/utils/__init__.py | 6 + nipype/utils/config.py | 364 + nipype/utils/docparse.py | 378 + nipype/utils/draw_gantt_chart.py | 565 + nipype/utils/filemanip.py | 981 + nipype/utils/functions.py | 48 + nipype/utils/logger.py | 113 + nipype/utils/matlabtools.py | 75 + nipype/utils/misc.py | 326 + nipype/utils/nipype2boutiques.py | 292 + nipype/utils/nipype_cmd.py | 88 + nipype/utils/onetime.py | 87 + nipype/utils/profiler.py | 382 + nipype/utils/provenance.py | 481 + nipype/utils/spm_docs.py | 63 + nipype/utils/spm_flat_config.m | 39 + nipype/utils/spm_get_doc.m | 21 + nipype/utils/tests/__init__.py | 21 + nipype/utils/tests/test_cmd.py | 93 + nipype/utils/tests/test_config.py | 292 + nipype/utils/tests/test_docparse.py | 65 + nipype/utils/tests/test_filemanip.py | 572 + nipype/utils/tests/test_functions.py | 47 + nipype/utils/tests/test_misc.py | 91 + nipype/utils/tests/test_nipype2boutiques.py | 17 + nipype/utils/tests/test_provenance.py | 40 + nipype/utils/tests/use_resources | 38 + nipype/utils/tmpdirs.py | 50 + nipype/workflows/__init__.py | 3 + nipype/workflows/data/__init__.py | 17 + nipype/workflows/data/ecc.sch | 67 + nipype/workflows/data/hmc.sch | 64 + nipype/workflows/dmri/__init__.py | 4 + nipype/workflows/dmri/camino/__init__.py | 5 + .../dmri/camino/connectivity_mapping.py | 534 + nipype/workflows/dmri/camino/diffusion.py | 245 + .../dmri/camino/group_connectivity.py | 115 + .../workflows/dmri/connectivity/__init__.py | 9 + .../dmri/connectivity/group_connectivity.py | 631 + nipype/workflows/dmri/connectivity/nx.py | 178 + nipype/workflows/dmri/dipy/__init__.py | 7 + nipype/workflows/dmri/dipy/denoise.py | 121 + nipype/workflows/dmri/dtitk/__init__.py | 8 + .../dmri/dtitk/tensor_registration.py | 144 + nipype/workflows/dmri/fsl/__init__.py | 15 + nipype/workflows/dmri/fsl/artifacts.py | 1061 + nipype/workflows/dmri/fsl/dti.py | 276 + nipype/workflows/dmri/fsl/epi.py | 885 + nipype/workflows/dmri/fsl/tbss.py | 590 + nipype/workflows/dmri/fsl/tests/__init__.py | 3 + nipype/workflows/dmri/fsl/tests/test_dti.py | 85 + nipype/workflows/dmri/fsl/tests/test_epi.py | 46 + nipype/workflows/dmri/fsl/tests/test_tbss.py | 211 + nipype/workflows/dmri/fsl/utils.py | 847 + nipype/workflows/dmri/mrtrix/__init__.py | 5 + .../dmri/mrtrix/connectivity_mapping.py | 639 + nipype/workflows/dmri/mrtrix/diffusion.py | 186 + .../dmri/mrtrix/group_connectivity.py | 139 + nipype/workflows/fmri/__init__.py | 4 + nipype/workflows/fmri/fsl/__init__.py | 8 + nipype/workflows/fmri/fsl/estimate.py | 298 + nipype/workflows/fmri/fsl/preprocess.py | 1293 + nipype/workflows/fmri/fsl/tests/__init__.py | 2 + .../fmri/fsl/tests/test_preprocess.py | 25 + nipype/workflows/fmri/spm/__init__.py | 3 + nipype/workflows/fmri/spm/estimate.py | 3 + nipype/workflows/fmri/spm/preprocess.py | 332 + nipype/workflows/fmri/spm/tests/__init__.py | 2 + nipype/workflows/graph/__init__.py | 3 + nipype/workflows/misc/__init__.py | 1 + nipype/workflows/misc/utils.py | 91 + nipype/workflows/rsfmri/__init__.py | 5 + nipype/workflows/rsfmri/fsl/__init__.py | 2 + nipype/workflows/rsfmri/fsl/resting.py | 163 + nipype/workflows/rsfmri/fsl/tests/__init__.py | 0 .../rsfmri/fsl/tests/test_resting.py | 106 + nipype/workflows/smri/__init__.py | 7 + .../workflows/smri/ants/ANTSBuildTemplate.py | 388 + nipype/workflows/smri/ants/__init__.py | 3 + .../ants/antsRegistrationBuildTemplate.py | 535 + nipype/workflows/smri/freesurfer/__init__.py | 5 + .../workflows/smri/freesurfer/autorecon1.py | 512 + .../workflows/smri/freesurfer/autorecon2.py | 720 + .../workflows/smri/freesurfer/autorecon3.py | 959 + nipype/workflows/smri/freesurfer/ba_maps.py | 172 + nipype/workflows/smri/freesurfer/bem.py | 81 + nipype/workflows/smri/freesurfer/recon.py | 604 + nipype/workflows/smri/freesurfer/utils.py | 498 + nipype/workflows/smri/niftyreg/__init__.py | 5 + nipype/workflows/smri/niftyreg/groupwise.py | 384 + nipype/workflows/warp/__init__.py | 1 + 1709 files changed, 286875 insertions(+) create mode 100644 .gitignore create mode 100644 nipype/COMMIT_INFO.txt create mode 100644 nipype/__init__.py create mode 100644 nipype/algorithms/__init__.py create mode 100644 nipype/algorithms/confounds.py create mode 100644 nipype/algorithms/icc.py create mode 100644 nipype/algorithms/mesh.py create mode 100644 nipype/algorithms/metrics.py create mode 100644 nipype/algorithms/misc.py create mode 100644 nipype/algorithms/modelgen.py create mode 100644 nipype/algorithms/rapidart.py create mode 100644 nipype/algorithms/stats.py create mode 100644 nipype/algorithms/tests/__init__.py create mode 100644 nipype/algorithms/tests/test_CompCor.py create mode 100644 nipype/algorithms/tests/test_ErrorMap.py create mode 100644 nipype/algorithms/tests/test_Overlap.py create mode 100644 nipype/algorithms/tests/test_TSNR.py create mode 100644 nipype/algorithms/tests/test_auto_ACompCor.py create mode 100644 nipype/algorithms/tests/test_auto_ActivationCount.py create mode 100644 nipype/algorithms/tests/test_auto_AddCSVColumn.py create mode 100644 nipype/algorithms/tests/test_auto_AddCSVRow.py create mode 100644 nipype/algorithms/tests/test_auto_AddNoise.py create mode 100644 nipype/algorithms/tests/test_auto_ArtifactDetect.py create mode 100644 nipype/algorithms/tests/test_auto_CalculateMedian.py create mode 100644 nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py create mode 100644 nipype/algorithms/tests/test_auto_ComputeDVARS.py create mode 100644 nipype/algorithms/tests/test_auto_ComputeMeshWarp.py create mode 100644 nipype/algorithms/tests/test_auto_CreateNifti.py create mode 100644 nipype/algorithms/tests/test_auto_Distance.py create mode 100644 nipype/algorithms/tests/test_auto_FramewiseDisplacement.py create mode 100644 nipype/algorithms/tests/test_auto_FuzzyOverlap.py create mode 100644 nipype/algorithms/tests/test_auto_Gunzip.py create mode 100644 nipype/algorithms/tests/test_auto_ICC.py create mode 100644 nipype/algorithms/tests/test_auto_Matlab2CSV.py create mode 100644 nipype/algorithms/tests/test_auto_MergeCSVFiles.py create mode 100644 nipype/algorithms/tests/test_auto_MergeROIs.py create mode 100644 nipype/algorithms/tests/test_auto_MeshWarpMaths.py create mode 100644 nipype/algorithms/tests/test_auto_ModifyAffine.py create mode 100644 nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py create mode 100644 nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py create mode 100644 nipype/algorithms/tests/test_auto_P2PDistance.py create mode 100644 nipype/algorithms/tests/test_auto_PickAtlas.py create mode 100644 nipype/algorithms/tests/test_auto_Similarity.py create mode 100644 nipype/algorithms/tests/test_auto_SimpleThreshold.py create mode 100644 nipype/algorithms/tests/test_auto_SpecifyModel.py create mode 100644 nipype/algorithms/tests/test_auto_SpecifySPMModel.py create mode 100644 nipype/algorithms/tests/test_auto_SpecifySparseModel.py create mode 100644 nipype/algorithms/tests/test_auto_SplitROIs.py create mode 100644 nipype/algorithms/tests/test_auto_StimulusCorrelation.py create mode 100644 nipype/algorithms/tests/test_auto_TCompCor.py create mode 100644 nipype/algorithms/tests/test_auto_TVTKBaseInterface.py create mode 100644 nipype/algorithms/tests/test_auto_WarpPoints.py create mode 100644 nipype/algorithms/tests/test_confounds.py create mode 100644 nipype/algorithms/tests/test_icc_anova.py create mode 100644 nipype/algorithms/tests/test_mesh_ops.py create mode 100644 nipype/algorithms/tests/test_metrics.py create mode 100644 nipype/algorithms/tests/test_misc.py create mode 100644 nipype/algorithms/tests/test_modelgen.py create mode 100644 nipype/algorithms/tests/test_moments.py create mode 100644 nipype/algorithms/tests/test_normalize_tpms.py create mode 100644 nipype/algorithms/tests/test_rapidart.py create mode 100644 nipype/algorithms/tests/test_splitmerge.py create mode 100644 nipype/algorithms/tests/test_stats.py create mode 100644 nipype/caching/__init__.py create mode 100644 nipype/caching/memory.py create mode 100644 nipype/caching/tests/__init__.py create mode 100644 nipype/caching/tests/test_memory.py create mode 100644 nipype/conftest.py create mode 100644 nipype/external/__init__.py create mode 100644 nipype/external/cloghandler.py create mode 100644 nipype/external/d3.js create mode 100644 nipype/external/due.py create mode 100755 nipype/external/fsl_imglob.py create mode 100644 nipype/external/portalocker.py create mode 100644 nipype/info.py create mode 100644 nipype/interfaces/__init__.py create mode 100644 nipype/interfaces/afni/__init__.py create mode 100644 nipype/interfaces/afni/base.py create mode 100644 nipype/interfaces/afni/model.py create mode 100644 nipype/interfaces/afni/preprocess.py create mode 100644 nipype/interfaces/afni/svm.py create mode 100644 nipype/interfaces/afni/tests/__init__.py create mode 100644 nipype/interfaces/afni/tests/test_auto_ABoverlap.py create mode 100644 nipype/interfaces/afni/tests/test_auto_AFNICommand.py create mode 100644 nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py create mode 100644 nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py create mode 100644 nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py create mode 100644 nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Allineate.py create mode 100644 nipype/interfaces/afni/tests/test_auto_AutoTLRC.py create mode 100644 nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Autobox.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Automask.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Axialize.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Bandpass.py create mode 100644 nipype/interfaces/afni/tests/test_auto_BlurInMask.py create mode 100644 nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py create mode 100644 nipype/interfaces/afni/tests/test_auto_BrickStat.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Bucket.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Calc.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Cat.py create mode 100644 nipype/interfaces/afni/tests/test_auto_CatMatvec.py create mode 100644 nipype/interfaces/afni/tests/test_auto_CenterMass.py create mode 100644 nipype/interfaces/afni/tests/test_auto_ClipLevel.py create mode 100644 nipype/interfaces/afni/tests/test_auto_ConvertDset.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Copy.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Deconvolve.py create mode 100644 nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Despike.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Detrend.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Dot.py create mode 100644 nipype/interfaces/afni/tests/test_auto_ECM.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Edge3.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Eval.py create mode 100644 nipype/interfaces/afni/tests/test_auto_FWHMx.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Fim.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Fourier.py create mode 100644 nipype/interfaces/afni/tests/test_auto_GCOR.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Hist.py create mode 100644 nipype/interfaces/afni/tests/test_auto_LFCD.py create mode 100644 nipype/interfaces/afni/tests/test_auto_LocalBistat.py create mode 100644 nipype/interfaces/afni/tests/test_auto_MaskTool.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Maskave.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Means.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Merge.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Notes.py create mode 100644 nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py create mode 100644 nipype/interfaces/afni/tests/test_auto_NwarpApply.py create mode 100644 nipype/interfaces/afni/tests/test_auto_NwarpCat.py create mode 100644 nipype/interfaces/afni/tests/test_auto_OneDToolPy.py create mode 100644 nipype/interfaces/afni/tests/test_auto_OutlierCount.py create mode 100644 nipype/interfaces/afni/tests/test_auto_QualityIndex.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Qwarp.py create mode 100644 nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py create mode 100644 nipype/interfaces/afni/tests/test_auto_ROIStats.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Refit.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Remlfit.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Resample.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Retroicor.py create mode 100644 nipype/interfaces/afni/tests/test_auto_SVMTest.py create mode 100644 nipype/interfaces/afni/tests/test_auto_SVMTrain.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Seg.py create mode 100644 nipype/interfaces/afni/tests/test_auto_SkullStrip.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Synthesize.py create mode 100644 nipype/interfaces/afni/tests/test_auto_TCat.py create mode 100644 nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py create mode 100644 nipype/interfaces/afni/tests/test_auto_TCorr1D.py create mode 100644 nipype/interfaces/afni/tests/test_auto_TCorrMap.py create mode 100644 nipype/interfaces/afni/tests/test_auto_TCorrelate.py create mode 100644 nipype/interfaces/afni/tests/test_auto_TNorm.py create mode 100644 nipype/interfaces/afni/tests/test_auto_TProject.py create mode 100644 nipype/interfaces/afni/tests/test_auto_TShift.py create mode 100644 nipype/interfaces/afni/tests/test_auto_TStat.py create mode 100644 nipype/interfaces/afni/tests/test_auto_To3D.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Undump.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Unifize.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Volreg.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Warp.py create mode 100644 nipype/interfaces/afni/tests/test_auto_ZCutUp.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Zcat.py create mode 100644 nipype/interfaces/afni/tests/test_auto_Zeropad.py create mode 100644 nipype/interfaces/afni/tests/test_extra_Deconvolve.py create mode 100644 nipype/interfaces/afni/utils.py create mode 100644 nipype/interfaces/ants/__init__.py create mode 100644 nipype/interfaces/ants/base.py create mode 100644 nipype/interfaces/ants/legacy.py create mode 100644 nipype/interfaces/ants/registration.py create mode 100644 nipype/interfaces/ants/resampling.py create mode 100644 nipype/interfaces/ants/segmentation.py create mode 100644 nipype/interfaces/ants/tests/__init__.py create mode 100644 nipype/interfaces/ants/tests/test_auto_ANTS.py create mode 100644 nipype/interfaces/ants/tests/test_auto_ANTSCommand.py create mode 100644 nipype/interfaces/ants/tests/test_auto_AffineInitializer.py create mode 100644 nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py create mode 100644 nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py create mode 100644 nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py create mode 100644 nipype/interfaces/ants/tests/test_auto_Atropos.py create mode 100644 nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py create mode 100644 nipype/interfaces/ants/tests/test_auto_AverageImages.py create mode 100644 nipype/interfaces/ants/tests/test_auto_BrainExtraction.py create mode 100644 nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py create mode 100644 nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py create mode 100644 nipype/interfaces/ants/tests/test_auto_CorticalThickness.py create mode 100644 nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py create mode 100644 nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py create mode 100644 nipype/interfaces/ants/tests/test_auto_DenoiseImage.py create mode 100644 nipype/interfaces/ants/tests/test_auto_GenWarpFields.py create mode 100644 nipype/interfaces/ants/tests/test_auto_JointFusion.py create mode 100644 nipype/interfaces/ants/tests/test_auto_KellyKapowski.py create mode 100644 nipype/interfaces/ants/tests/test_auto_LabelGeometry.py create mode 100644 nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py create mode 100644 nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py create mode 100644 nipype/interfaces/ants/tests/test_auto_MultiplyImages.py create mode 100644 nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py create mode 100644 nipype/interfaces/ants/tests/test_auto_Registration.py create mode 100644 nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py create mode 100644 nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py create mode 100644 nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py create mode 100644 nipype/interfaces/ants/tests/test_auto_antsIntroduction.py create mode 100644 nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py create mode 100644 nipype/interfaces/ants/tests/test_extra_Registration.py create mode 100644 nipype/interfaces/ants/tests/test_resampling.py create mode 100644 nipype/interfaces/ants/tests/test_spec_JointFusion.py create mode 100644 nipype/interfaces/ants/utils.py create mode 100644 nipype/interfaces/ants/visualization.py create mode 100644 nipype/interfaces/base/__init__.py create mode 100644 nipype/interfaces/base/core.py create mode 100644 nipype/interfaces/base/specs.py create mode 100644 nipype/interfaces/base/support.py create mode 100644 nipype/interfaces/base/tests/__init__.py create mode 100644 nipype/interfaces/base/tests/test_auto_BaseInterface.py create mode 100644 nipype/interfaces/base/tests/test_auto_CommandLine.py create mode 100644 nipype/interfaces/base/tests/test_auto_LibraryBaseInterface.py create mode 100644 nipype/interfaces/base/tests/test_auto_MpiCommandLine.py create mode 100644 nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py create mode 100644 nipype/interfaces/base/tests/test_auto_SimpleInterface.py create mode 100644 nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py create mode 100644 nipype/interfaces/base/tests/test_core.py create mode 100644 nipype/interfaces/base/tests/test_resource_monitor.py create mode 100644 nipype/interfaces/base/tests/test_specs.py create mode 100644 nipype/interfaces/base/tests/test_support.py create mode 100644 nipype/interfaces/base/traits_extension.py create mode 100644 nipype/interfaces/brainsuite/__init__.py create mode 100644 nipype/interfaces/brainsuite/brainsuite.py create mode 100644 nipype/interfaces/brainsuite/tests/__init__.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_BDP.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_Bfc.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_Bse.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_Cortex.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_Dfs.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_Pvc.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_SVReg.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_Tca.py create mode 100644 nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py create mode 100644 nipype/interfaces/bru2nii.py create mode 100644 nipype/interfaces/c3.py create mode 100644 nipype/interfaces/camino/__init__.py create mode 100644 nipype/interfaces/camino/calib.py create mode 100644 nipype/interfaces/camino/connectivity.py create mode 100644 nipype/interfaces/camino/convert.py create mode 100644 nipype/interfaces/camino/dti.py create mode 100644 nipype/interfaces/camino/odf.py create mode 100644 nipype/interfaces/camino/tests/__init__.py create mode 100644 nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py create mode 100644 nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py create mode 100644 nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py create mode 100644 nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py create mode 100644 nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py create mode 100644 nipype/interfaces/camino/tests/test_auto_Conmat.py create mode 100644 nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py create mode 100644 nipype/interfaces/camino/tests/test_auto_DTIFit.py create mode 100644 nipype/interfaces/camino/tests/test_auto_DTLUTGen.py create mode 100644 nipype/interfaces/camino/tests/test_auto_DTMetric.py create mode 100644 nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py create mode 100644 nipype/interfaces/camino/tests/test_auto_Image2Voxel.py create mode 100644 nipype/interfaces/camino/tests/test_auto_ImageStats.py create mode 100644 nipype/interfaces/camino/tests/test_auto_LinRecon.py create mode 100644 nipype/interfaces/camino/tests/test_auto_MESD.py create mode 100644 nipype/interfaces/camino/tests/test_auto_ModelFit.py create mode 100644 nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py create mode 100644 nipype/interfaces/camino/tests/test_auto_PicoPDFs.py create mode 100644 nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py create mode 100644 nipype/interfaces/camino/tests/test_auto_QBallMX.py create mode 100644 nipype/interfaces/camino/tests/test_auto_SFLUTGen.py create mode 100644 nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py create mode 100644 nipype/interfaces/camino/tests/test_auto_SFPeaks.py create mode 100644 nipype/interfaces/camino/tests/test_auto_Shredder.py create mode 100644 nipype/interfaces/camino/tests/test_auto_Track.py create mode 100644 nipype/interfaces/camino/tests/test_auto_TrackBallStick.py create mode 100644 nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py create mode 100644 nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py create mode 100644 nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py create mode 100644 nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py create mode 100644 nipype/interfaces/camino/tests/test_auto_TrackDT.py create mode 100644 nipype/interfaces/camino/tests/test_auto_TrackPICo.py create mode 100644 nipype/interfaces/camino/tests/test_auto_TractShredder.py create mode 100644 nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py create mode 100644 nipype/interfaces/camino/utils.py create mode 100644 nipype/interfaces/camino2trackvis/__init__.py create mode 100644 nipype/interfaces/camino2trackvis/convert.py create mode 100644 nipype/interfaces/camino2trackvis/tests/__init__.py create mode 100644 nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py create mode 100644 nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py create mode 100644 nipype/interfaces/cmtk/__init__.py create mode 100644 nipype/interfaces/cmtk/base.py create mode 100644 nipype/interfaces/cmtk/cmtk.py create mode 100644 nipype/interfaces/cmtk/convert.py create mode 100644 nipype/interfaces/cmtk/nbs.py create mode 100644 nipype/interfaces/cmtk/nx.py create mode 100644 nipype/interfaces/cmtk/parcellation.py create mode 100644 nipype/interfaces/cmtk/tests/__init__.py create mode 100644 nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py create mode 100644 nipype/interfaces/cmtk/tests/test_auto_CFFBaseInterface.py create mode 100644 nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py create mode 100644 nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py create mode 100644 nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py create mode 100644 nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py create mode 100644 nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py create mode 100644 nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py create mode 100644 nipype/interfaces/cmtk/tests/test_auto_Parcellate.py create mode 100644 nipype/interfaces/cmtk/tests/test_auto_ROIGen.py create mode 100644 nipype/interfaces/cmtk/tests/test_nbs.py create mode 100644 nipype/interfaces/dcm2nii.py create mode 100644 nipype/interfaces/dcmstack.py create mode 100644 nipype/interfaces/diffusion_toolkit/__init__.py create mode 100644 nipype/interfaces/diffusion_toolkit/base.py create mode 100644 nipype/interfaces/diffusion_toolkit/dti.py create mode 100644 nipype/interfaces/diffusion_toolkit/odf.py create mode 100644 nipype/interfaces/diffusion_toolkit/postproc.py create mode 100644 nipype/interfaces/diffusion_toolkit/tests/__init__.py create mode 100644 nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py create mode 100644 nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py create mode 100644 nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py create mode 100644 nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py create mode 100644 nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py create mode 100644 nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py create mode 100644 nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py create mode 100644 nipype/interfaces/dipy/__init__.py create mode 100644 nipype/interfaces/dipy/anisotropic_power.py create mode 100644 nipype/interfaces/dipy/base.py create mode 100644 nipype/interfaces/dipy/preprocess.py create mode 100644 nipype/interfaces/dipy/reconstruction.py create mode 100644 nipype/interfaces/dipy/setup.py create mode 100644 nipype/interfaces/dipy/simulate.py create mode 100644 nipype/interfaces/dipy/tensors.py create mode 100644 nipype/interfaces/dipy/tests/__init__.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_APMQball.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_CSD.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_DTI.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_Denoise.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_RESTORE.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_Resample.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_TensorMode.py create mode 100644 nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py create mode 100644 nipype/interfaces/dipy/tracks.py create mode 100644 nipype/interfaces/dtitk/__init__.py create mode 100644 nipype/interfaces/dtitk/base.py create mode 100644 nipype/interfaces/dtitk/registration.py create mode 100644 nipype/interfaces/dtitk/tests/__init__.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_Affine.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_AffineTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_BinThresh.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_Diffeo.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_Rigid.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_RigidTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_SVResample.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_TVResample.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_TVtool.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py create mode 100644 nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py create mode 100644 nipype/interfaces/dtitk/utils.py create mode 100644 nipype/interfaces/dynamic_slicer.py create mode 100644 nipype/interfaces/elastix/__init__.py create mode 100644 nipype/interfaces/elastix/base.py create mode 100644 nipype/interfaces/elastix/registration.py create mode 100644 nipype/interfaces/elastix/tests/__init__.py create mode 100644 nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py create mode 100644 nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py create mode 100644 nipype/interfaces/elastix/tests/test_auto_EditTransform.py create mode 100644 nipype/interfaces/elastix/tests/test_auto_PointsWarp.py create mode 100644 nipype/interfaces/elastix/tests/test_auto_Registration.py create mode 100644 nipype/interfaces/elastix/utils.py create mode 100644 nipype/interfaces/freesurfer/__init__.py create mode 100644 nipype/interfaces/freesurfer/base.py create mode 100644 nipype/interfaces/freesurfer/longitudinal.py create mode 100644 nipype/interfaces/freesurfer/model.py create mode 100644 nipype/interfaces/freesurfer/preprocess.py create mode 100644 nipype/interfaces/freesurfer/registration.py create mode 100644 nipype/interfaces/freesurfer/tests/__init__.py create mode 100644 nipype/interfaces/freesurfer/tests/test_BBRegister.py create mode 100644 nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Binarize.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_CALabel.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_CARegister.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Contrast.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Curvature.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Normalize.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Paint.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Register.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Resample.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_SegStats.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Smooth.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Sphere.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py create mode 100644 nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py create mode 100644 nipype/interfaces/freesurfer/tests/test_model.py create mode 100644 nipype/interfaces/freesurfer/tests/test_preprocess.py create mode 100644 nipype/interfaces/freesurfer/tests/test_utils.py create mode 100644 nipype/interfaces/freesurfer/utils.py create mode 100644 nipype/interfaces/fsl/__init__.py create mode 100644 nipype/interfaces/fsl/aroma.py create mode 100644 nipype/interfaces/fsl/base.py create mode 100644 nipype/interfaces/fsl/dti.py create mode 100644 nipype/interfaces/fsl/epi.py create mode 100644 nipype/interfaces/fsl/fix.py create mode 100644 nipype/interfaces/fsl/maths.py create mode 100644 nipype/interfaces/fsl/model.py create mode 100644 nipype/interfaces/fsl/model_templates/feat_contrast_element.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_contrast_ftest_element.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_contrast_header.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_contrast_prolog.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_contrastmask_element.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_contrastmask_footer.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_contrastmask_header.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_contrasts.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_ev_custom.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_ev_gamma.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_ev_hrf.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_ev_none.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_ev_ortho.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_fe_copes.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_fe_ev_element.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_fe_ev_header.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_fe_featdirs.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_fe_footer.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_fe_header.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_header.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_header_l1.tcl create mode 100644 nipype/interfaces/fsl/model_templates/feat_nongui.tcl create mode 100644 nipype/interfaces/fsl/model_templates/featreg_header.tcl create mode 100644 nipype/interfaces/fsl/possum.py create mode 100644 nipype/interfaces/fsl/preprocess.py create mode 100644 nipype/interfaces/fsl/tests/__init__.py create mode 100644 nipype/interfaces/fsl/tests/test_FILMGLS.py create mode 100644 nipype/interfaces/fsl/tests/test_Level1Design_functions.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_AR1Image.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ApplyMask.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_AvScale.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_B0Calc.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_BET.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Classifier.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Cleaner.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Cluster.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Complex.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_CopyGeom.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_DTIFit.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_DilateImage.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_DistanceMap.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_DualRegression.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Eddy.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_EpiReg.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ErodeImage.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ExtractROI.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FAST.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FEAT.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FEATModel.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FEATRegister.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FIRST.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FLAMEO.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FLIRT.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FNIRT.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FSLCommand.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FUGUE.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_GLM.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ImageMaths.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ImageMeants.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ImageStats.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_InvWarp.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_L2Model.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Level1Design.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_MELODIC.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_MathsCommand.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_MaxImage.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_MaxnImage.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_MeanImage.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_MedianImage.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Merge.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_MinImage.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Overlay.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_PRELUDE.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_PercentileImage.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_ProjThresh.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Randomise.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_RobustFOV.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_SMM.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_SUSAN.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_SigLoss.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Slice.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_SliceTimer.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Slicer.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Smooth.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Split.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_StdImage.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_TOPUP.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Threshold.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_Training.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_VecReg.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_WarpPoints.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_WarpUtils.py create mode 100644 nipype/interfaces/fsl/tests/test_auto_XFibres5.py create mode 100644 nipype/interfaces/fsl/tests/test_base.py create mode 100644 nipype/interfaces/fsl/tests/test_dti.py create mode 100644 nipype/interfaces/fsl/tests/test_epi.py create mode 100644 nipype/interfaces/fsl/tests/test_maths.py create mode 100644 nipype/interfaces/fsl/tests/test_model.py create mode 100644 nipype/interfaces/fsl/tests/test_preprocess.py create mode 100644 nipype/interfaces/fsl/tests/test_utils.py create mode 100644 nipype/interfaces/fsl/utils.py create mode 100644 nipype/interfaces/image.py create mode 100644 nipype/interfaces/io.py create mode 100644 nipype/interfaces/matlab.py create mode 100644 nipype/interfaces/meshfix.py create mode 100644 nipype/interfaces/minc/__init__.py create mode 100644 nipype/interfaces/minc/base.py create mode 100644 nipype/interfaces/minc/minc.py create mode 100644 nipype/interfaces/minc/testdata.py create mode 100644 nipype/interfaces/minc/tests/__init__.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Average.py create mode 100644 nipype/interfaces/minc/tests/test_auto_BBox.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Beast.py create mode 100644 nipype/interfaces/minc/tests/test_auto_BestLinReg.py create mode 100644 nipype/interfaces/minc/tests/test_auto_BigAverage.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Blob.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Blur.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Calc.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Convert.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Copy.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Dump.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Extract.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Gennlxfm.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Math.py create mode 100644 nipype/interfaces/minc/tests/test_auto_NlpFit.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Norm.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Pik.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Resample.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Reshape.py create mode 100644 nipype/interfaces/minc/tests/test_auto_ToEcat.py create mode 100644 nipype/interfaces/minc/tests/test_auto_ToRaw.py create mode 100644 nipype/interfaces/minc/tests/test_auto_VolSymm.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Volcentre.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Voliso.py create mode 100644 nipype/interfaces/minc/tests/test_auto_Volpad.py create mode 100644 nipype/interfaces/minc/tests/test_auto_XfmAvg.py create mode 100644 nipype/interfaces/minc/tests/test_auto_XfmConcat.py create mode 100644 nipype/interfaces/minc/tests/test_auto_XfmInvert.py create mode 100644 nipype/interfaces/mipav/__init__.py create mode 100644 nipype/interfaces/mipav/developer.py create mode 100644 nipype/interfaces/mipav/generate_classes.py create mode 100644 nipype/interfaces/mipav/tests/__init__.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py create mode 100644 nipype/interfaces/mipav/tests/test_auto_RandomVol.py create mode 100644 nipype/interfaces/mixins/__init__.py create mode 100644 nipype/interfaces/mixins/reporting.py create mode 100644 nipype/interfaces/mixins/tests/__init__.py create mode 100644 nipype/interfaces/mixins/tests/test_auto_ReportCapableInterface.py create mode 100644 nipype/interfaces/mne/__init__.py create mode 100644 nipype/interfaces/mne/base.py create mode 100644 nipype/interfaces/mne/tests/__init__.py create mode 100644 nipype/interfaces/mne/tests/test_auto_WatershedBEM.py create mode 100644 nipype/interfaces/mrtrix/__init__.py create mode 100644 nipype/interfaces/mrtrix/convert.py create mode 100644 nipype/interfaces/mrtrix/defhdr.mat create mode 100644 nipype/interfaces/mrtrix/preprocess.py create mode 100644 nipype/interfaces/mrtrix/tensors.py create mode 100644 nipype/interfaces/mrtrix/tests/__init__.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_Erode.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_Threshold.py create mode 100644 nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py create mode 100644 nipype/interfaces/mrtrix/tracking.py create mode 100644 nipype/interfaces/mrtrix3/__init__.py create mode 100644 nipype/interfaces/mrtrix3/base.py create mode 100644 nipype/interfaces/mrtrix3/connectivity.py create mode 100644 nipype/interfaces/mrtrix3/preprocess.py create mode 100644 nipype/interfaces/mrtrix3/reconst.py create mode 100644 nipype/interfaces/mrtrix3/tests/__init__.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py create mode 100644 nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py create mode 100644 nipype/interfaces/mrtrix3/tracking.py create mode 100644 nipype/interfaces/mrtrix3/utils.py create mode 100644 nipype/interfaces/niftyfit/__init__.py create mode 100644 nipype/interfaces/niftyfit/asl.py create mode 100644 nipype/interfaces/niftyfit/base.py create mode 100644 nipype/interfaces/niftyfit/dwi.py create mode 100644 nipype/interfaces/niftyfit/qt1.py create mode 100644 nipype/interfaces/niftyfit/tests/__init__.py create mode 100644 nipype/interfaces/niftyfit/tests/test_asl.py create mode 100644 nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py create mode 100644 nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py create mode 100644 nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py create mode 100644 nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py create mode 100644 nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py create mode 100644 nipype/interfaces/niftyfit/tests/test_dwi.py create mode 100644 nipype/interfaces/niftyfit/tests/test_qt1.py create mode 100644 nipype/interfaces/niftyreg/__init__.py create mode 100644 nipype/interfaces/niftyreg/base.py create mode 100644 nipype/interfaces/niftyreg/reg.py create mode 100644 nipype/interfaces/niftyreg/regutils.py create mode 100644 nipype/interfaces/niftyreg/tests/__init__.py create mode 100644 nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py create mode 100644 nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py create mode 100644 nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py create mode 100644 nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py create mode 100644 nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py create mode 100644 nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py create mode 100644 nipype/interfaces/niftyreg/tests/test_auto_RegResample.py create mode 100644 nipype/interfaces/niftyreg/tests/test_auto_RegTools.py create mode 100644 nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py create mode 100644 nipype/interfaces/niftyreg/tests/test_reg.py create mode 100644 nipype/interfaces/niftyreg/tests/test_regutils.py create mode 100644 nipype/interfaces/niftyseg/__init__.py create mode 100644 nipype/interfaces/niftyseg/base.py create mode 100644 nipype/interfaces/niftyseg/em.py create mode 100644 nipype/interfaces/niftyseg/label_fusion.py create mode 100644 nipype/interfaces/niftyseg/lesions.py create mode 100644 nipype/interfaces/niftyseg/maths.py create mode 100644 nipype/interfaces/niftyseg/patchmatch.py create mode 100644 nipype/interfaces/niftyseg/stats.py create mode 100644 nipype/interfaces/niftyseg/tests/__init__.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_EM.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_Merge.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py create mode 100644 nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py create mode 100644 nipype/interfaces/niftyseg/tests/test_em_interfaces.py create mode 100644 nipype/interfaces/niftyseg/tests/test_extra_PatchMatch.py create mode 100644 nipype/interfaces/niftyseg/tests/test_label_fusion.py create mode 100644 nipype/interfaces/niftyseg/tests/test_lesions.py create mode 100644 nipype/interfaces/niftyseg/tests/test_maths.py create mode 100644 nipype/interfaces/niftyseg/tests/test_stats.py create mode 100644 nipype/interfaces/nilearn.py create mode 100644 nipype/interfaces/nipy/__init__.py create mode 100644 nipype/interfaces/nipy/base.py create mode 100644 nipype/interfaces/nipy/model.py create mode 100644 nipype/interfaces/nipy/preprocess.py create mode 100644 nipype/interfaces/nipy/tests/__init__.py create mode 100644 nipype/interfaces/nipy/tests/test_auto_ComputeMask.py create mode 100644 nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py create mode 100644 nipype/interfaces/nipy/tests/test_auto_FitGLM.py create mode 100644 nipype/interfaces/nipy/tests/test_auto_NipyBaseInterface.py create mode 100644 nipype/interfaces/nipy/tests/test_auto_Similarity.py create mode 100644 nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py create mode 100644 nipype/interfaces/nipy/tests/test_auto_Trim.py create mode 100644 nipype/interfaces/nipy/utils.py create mode 100644 nipype/interfaces/nitime/__init__.py create mode 100644 nipype/interfaces/nitime/analysis.py create mode 100644 nipype/interfaces/nitime/base.py create mode 100644 nipype/interfaces/nitime/tests/__init__.py create mode 100644 nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py create mode 100644 nipype/interfaces/nitime/tests/test_auto_NitimeBaseInterface.py create mode 100644 nipype/interfaces/nitime/tests/test_nitime.py create mode 100644 nipype/interfaces/petpvc.py create mode 100644 nipype/interfaces/quickshear.py create mode 100644 nipype/interfaces/semtools/__init__.py create mode 100644 nipype/interfaces/semtools/brains/__init__.py create mode 100644 nipype/interfaces/semtools/brains/classify.py create mode 100644 nipype/interfaces/semtools/brains/segmentation.py create mode 100644 nipype/interfaces/semtools/brains/tests/__init__.py create mode 100644 nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py create mode 100644 nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py create mode 100644 nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py create mode 100644 nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py create mode 100644 nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py create mode 100644 nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py create mode 100644 nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py create mode 100644 nipype/interfaces/semtools/brains/utilities.py create mode 100644 nipype/interfaces/semtools/converters.py create mode 100644 nipype/interfaces/semtools/diffusion/__init__.py create mode 100644 nipype/interfaces/semtools/diffusion/diffusion.py create mode 100644 nipype/interfaces/semtools/diffusion/gtract.py create mode 100644 nipype/interfaces/semtools/diffusion/maxcurvature.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/__init__.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py create mode 100644 nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py create mode 100644 nipype/interfaces/semtools/diffusion/tractography/__init__.py create mode 100644 nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py create mode 100644 nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py create mode 100644 nipype/interfaces/semtools/diffusion/tractography/fibertrack.py create mode 100644 nipype/interfaces/semtools/diffusion/tractography/tests/__init__.py create mode 100644 nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py create mode 100644 nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py create mode 100644 nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py create mode 100644 nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py create mode 100644 nipype/interfaces/semtools/diffusion/tractography/ukftractography.py create mode 100644 nipype/interfaces/semtools/featurecreator.py create mode 100644 nipype/interfaces/semtools/filtering/__init__.py create mode 100644 nipype/interfaces/semtools/filtering/denoising.py create mode 100644 nipype/interfaces/semtools/filtering/featuredetection.py create mode 100644 nipype/interfaces/semtools/filtering/tests/__init__.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py create mode 100644 nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py create mode 100644 nipype/interfaces/semtools/generated.sh create mode 100644 nipype/interfaces/semtools/legacy/__init__.py create mode 100644 nipype/interfaces/semtools/legacy/registration.py create mode 100644 nipype/interfaces/semtools/legacy/tests/__init__.py create mode 100644 nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py create mode 100644 nipype/interfaces/semtools/registration/__init__.py create mode 100644 nipype/interfaces/semtools/registration/brainsfit.py create mode 100644 nipype/interfaces/semtools/registration/brainsresample.py create mode 100644 nipype/interfaces/semtools/registration/brainsresize.py create mode 100644 nipype/interfaces/semtools/registration/specialized.py create mode 100644 nipype/interfaces/semtools/registration/tests/__init__.py create mode 100644 nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py create mode 100644 nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py create mode 100644 nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py create mode 100644 nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py create mode 100644 nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py create mode 100644 nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py create mode 100644 nipype/interfaces/semtools/segmentation/__init__.py create mode 100644 nipype/interfaces/semtools/segmentation/specialized.py create mode 100644 nipype/interfaces/semtools/segmentation/tests/__init__.py create mode 100644 nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py create mode 100644 nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py create mode 100644 nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py create mode 100644 nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py create mode 100644 nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py create mode 100644 nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py create mode 100644 nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py create mode 100644 nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py create mode 100644 nipype/interfaces/semtools/testing/__init__.py create mode 100644 nipype/interfaces/semtools/testing/featuredetection.py create mode 100644 nipype/interfaces/semtools/testing/generateaveragelmkfile.py create mode 100644 nipype/interfaces/semtools/testing/landmarkscompare.py create mode 100644 nipype/interfaces/semtools/tests/__init__.py create mode 100644 nipype/interfaces/semtools/tests/test_auto_DWICompare.py create mode 100644 nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py create mode 100644 nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py create mode 100644 nipype/interfaces/semtools/utilities/__init__.py create mode 100644 nipype/interfaces/semtools/utilities/brains.py create mode 100644 nipype/interfaces/semtools/utilities/tests/__init__.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py create mode 100644 nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py create mode 100644 nipype/interfaces/slicer/__init__.py create mode 100644 nipype/interfaces/slicer/base.py create mode 100644 nipype/interfaces/slicer/converters.py create mode 100644 nipype/interfaces/slicer/diffusion/__init__.py create mode 100644 nipype/interfaces/slicer/diffusion/diffusion.py create mode 100644 nipype/interfaces/slicer/diffusion/tests/__init__.py create mode 100644 nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py create mode 100644 nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py create mode 100644 nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py create mode 100644 nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py create mode 100644 nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py create mode 100644 nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py create mode 100644 nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py create mode 100644 nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py create mode 100644 nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py create mode 100644 nipype/interfaces/slicer/filtering/__init__.py create mode 100644 nipype/interfaces/slicer/filtering/arithmetic.py create mode 100644 nipype/interfaces/slicer/filtering/checkerboardfilter.py create mode 100644 nipype/interfaces/slicer/filtering/denoising.py create mode 100644 nipype/interfaces/slicer/filtering/extractskeleton.py create mode 100644 nipype/interfaces/slicer/filtering/histogrammatching.py create mode 100644 nipype/interfaces/slicer/filtering/imagelabelcombine.py create mode 100644 nipype/interfaces/slicer/filtering/morphology.py create mode 100644 nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py create mode 100644 nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py create mode 100644 nipype/interfaces/slicer/filtering/tests/__init__.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py create mode 100644 nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py create mode 100644 nipype/interfaces/slicer/filtering/thresholdscalarvolume.py create mode 100644 nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py create mode 100644 nipype/interfaces/slicer/generate_classes.py create mode 100644 nipype/interfaces/slicer/legacy/__init__.py create mode 100644 nipype/interfaces/slicer/legacy/converters.py create mode 100644 nipype/interfaces/slicer/legacy/diffusion/__init__.py create mode 100644 nipype/interfaces/slicer/legacy/diffusion/denoising.py create mode 100644 nipype/interfaces/slicer/legacy/diffusion/tests/__init__.py create mode 100644 nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py create mode 100644 nipype/interfaces/slicer/legacy/filtering.py create mode 100644 nipype/interfaces/slicer/legacy/registration.py create mode 100644 nipype/interfaces/slicer/legacy/segmentation.py create mode 100644 nipype/interfaces/slicer/legacy/tests/__init__.py create mode 100644 nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py create mode 100644 nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py create mode 100644 nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py create mode 100644 nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py create mode 100644 nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py create mode 100644 nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py create mode 100644 nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py create mode 100644 nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py create mode 100644 nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py create mode 100644 nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py create mode 100644 nipype/interfaces/slicer/quantification/__init__.py create mode 100644 nipype/interfaces/slicer/quantification/changequantification.py create mode 100644 nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py create mode 100644 nipype/interfaces/slicer/quantification/tests/__init__.py create mode 100644 nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py create mode 100644 nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py create mode 100644 nipype/interfaces/slicer/registration/__init__.py create mode 100644 nipype/interfaces/slicer/registration/brainsfit.py create mode 100644 nipype/interfaces/slicer/registration/brainsresample.py create mode 100644 nipype/interfaces/slicer/registration/specialized.py create mode 100644 nipype/interfaces/slicer/registration/tests/__init__.py create mode 100644 nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py create mode 100644 nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py create mode 100644 nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py create mode 100644 nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py create mode 100644 nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py create mode 100644 nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py create mode 100644 nipype/interfaces/slicer/segmentation/__init__.py create mode 100644 nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py create mode 100644 nipype/interfaces/slicer/segmentation/specialized.py create mode 100644 nipype/interfaces/slicer/segmentation/tests/__init__.py create mode 100644 nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py create mode 100644 nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py create mode 100644 nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py create mode 100644 nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py create mode 100644 nipype/interfaces/slicer/surface.py create mode 100644 nipype/interfaces/slicer/tests/__init__.py create mode 100644 nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py create mode 100644 nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py create mode 100644 nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py create mode 100644 nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py create mode 100644 nipype/interfaces/slicer/tests/test_auto_MergeModels.py create mode 100644 nipype/interfaces/slicer/tests/test_auto_ModelMaker.py create mode 100644 nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py create mode 100644 nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py create mode 100644 nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py create mode 100644 nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py create mode 100644 nipype/interfaces/slicer/utilities.py create mode 100644 nipype/interfaces/spm/__init__.py create mode 100644 nipype/interfaces/spm/base.py create mode 100644 nipype/interfaces/spm/model.py create mode 100644 nipype/interfaces/spm/preprocess.py create mode 100644 nipype/interfaces/spm/tests/__init__.py create mode 100644 nipype/interfaces/spm/tests/test_auto_Analyze2nii.py create mode 100644 nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py create mode 100644 nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py create mode 100644 nipype/interfaces/spm/tests/test_auto_ApplyTransform.py create mode 100644 nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py create mode 100644 nipype/interfaces/spm/tests/test_auto_Coregister.py create mode 100644 nipype/interfaces/spm/tests/test_auto_CreateWarped.py create mode 100644 nipype/interfaces/spm/tests/test_auto_DARTEL.py create mode 100644 nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py create mode 100644 nipype/interfaces/spm/tests/test_auto_DicomImport.py create mode 100644 nipype/interfaces/spm/tests/test_auto_EstimateContrast.py create mode 100644 nipype/interfaces/spm/tests/test_auto_EstimateModel.py create mode 100644 nipype/interfaces/spm/tests/test_auto_FactorialDesign.py create mode 100644 nipype/interfaces/spm/tests/test_auto_FieldMap.py create mode 100644 nipype/interfaces/spm/tests/test_auto_Level1Design.py create mode 100644 nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py create mode 100644 nipype/interfaces/spm/tests/test_auto_NewSegment.py create mode 100644 nipype/interfaces/spm/tests/test_auto_Normalize.py create mode 100644 nipype/interfaces/spm/tests/test_auto_Normalize12.py create mode 100644 nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py create mode 100644 nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py create mode 100644 nipype/interfaces/spm/tests/test_auto_Realign.py create mode 100644 nipype/interfaces/spm/tests/test_auto_Reslice.py create mode 100644 nipype/interfaces/spm/tests/test_auto_ResliceToReference.py create mode 100644 nipype/interfaces/spm/tests/test_auto_SPMCommand.py create mode 100644 nipype/interfaces/spm/tests/test_auto_Segment.py create mode 100644 nipype/interfaces/spm/tests/test_auto_SliceTiming.py create mode 100644 nipype/interfaces/spm/tests/test_auto_Smooth.py create mode 100644 nipype/interfaces/spm/tests/test_auto_Threshold.py create mode 100644 nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py create mode 100644 nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py create mode 100644 nipype/interfaces/spm/tests/test_auto_VBMSegment.py create mode 100644 nipype/interfaces/spm/tests/test_base.py create mode 100644 nipype/interfaces/spm/tests/test_model.py create mode 100644 nipype/interfaces/spm/tests/test_preprocess.py create mode 100644 nipype/interfaces/spm/tests/test_utils.py create mode 100644 nipype/interfaces/spm/utils.py create mode 100644 nipype/interfaces/tests/__init__.py create mode 100644 nipype/interfaces/tests/test_auto_BIDSDataGrabber.py create mode 100644 nipype/interfaces/tests/test_auto_Bru2.py create mode 100644 nipype/interfaces/tests/test_auto_C3d.py create mode 100644 nipype/interfaces/tests/test_auto_C3dAffineTool.py create mode 100644 nipype/interfaces/tests/test_auto_CopyMeta.py create mode 100644 nipype/interfaces/tests/test_auto_DataFinder.py create mode 100644 nipype/interfaces/tests/test_auto_DataGrabber.py create mode 100644 nipype/interfaces/tests/test_auto_DataSink.py create mode 100644 nipype/interfaces/tests/test_auto_Dcm2nii.py create mode 100644 nipype/interfaces/tests/test_auto_Dcm2niix.py create mode 100644 nipype/interfaces/tests/test_auto_DcmStack.py create mode 100644 nipype/interfaces/tests/test_auto_FreeSurferSource.py create mode 100644 nipype/interfaces/tests/test_auto_GroupAndStack.py create mode 100644 nipype/interfaces/tests/test_auto_IOBase.py create mode 100644 nipype/interfaces/tests/test_auto_JSONFileGrabber.py create mode 100644 nipype/interfaces/tests/test_auto_JSONFileSink.py create mode 100644 nipype/interfaces/tests/test_auto_LookupMeta.py create mode 100644 nipype/interfaces/tests/test_auto_MatlabCommand.py create mode 100644 nipype/interfaces/tests/test_auto_MergeNifti.py create mode 100644 nipype/interfaces/tests/test_auto_MeshFix.py create mode 100644 nipype/interfaces/tests/test_auto_MySQLSink.py create mode 100644 nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py create mode 100644 nipype/interfaces/tests/test_auto_NilearnBaseInterface.py create mode 100644 nipype/interfaces/tests/test_auto_PETPVC.py create mode 100644 nipype/interfaces/tests/test_auto_Quickshear.py create mode 100644 nipype/interfaces/tests/test_auto_Reorient.py create mode 100644 nipype/interfaces/tests/test_auto_Rescale.py create mode 100644 nipype/interfaces/tests/test_auto_S3DataGrabber.py create mode 100644 nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py create mode 100644 nipype/interfaces/tests/test_auto_SQLiteSink.py create mode 100644 nipype/interfaces/tests/test_auto_SSHDataGrabber.py create mode 100644 nipype/interfaces/tests/test_auto_SelectFiles.py create mode 100644 nipype/interfaces/tests/test_auto_SignalExtraction.py create mode 100644 nipype/interfaces/tests/test_auto_SlicerCommandLine.py create mode 100644 nipype/interfaces/tests/test_auto_SplitNifti.py create mode 100644 nipype/interfaces/tests/test_auto_XNATSink.py create mode 100644 nipype/interfaces/tests/test_auto_XNATSource.py create mode 100644 nipype/interfaces/tests/test_extra_dcm2nii.py create mode 100644 nipype/interfaces/tests/test_image.py create mode 100644 nipype/interfaces/tests/test_io.py create mode 100644 nipype/interfaces/tests/test_matlab.py create mode 100644 nipype/interfaces/tests/test_nilearn.py create mode 100644 nipype/interfaces/utility/__init__.py create mode 100644 nipype/interfaces/utility/base.py create mode 100644 nipype/interfaces/utility/csv.py create mode 100644 nipype/interfaces/utility/tests/__init__.py create mode 100644 nipype/interfaces/utility/tests/test_auto_AssertEqual.py create mode 100644 nipype/interfaces/utility/tests/test_auto_CSVReader.py create mode 100644 nipype/interfaces/utility/tests/test_auto_Function.py create mode 100644 nipype/interfaces/utility/tests/test_auto_IdentityInterface.py create mode 100644 nipype/interfaces/utility/tests/test_auto_Merge.py create mode 100644 nipype/interfaces/utility/tests/test_auto_Rename.py create mode 100644 nipype/interfaces/utility/tests/test_auto_Select.py create mode 100644 nipype/interfaces/utility/tests/test_auto_Split.py create mode 100644 nipype/interfaces/utility/tests/test_base.py create mode 100644 nipype/interfaces/utility/tests/test_csv.py create mode 100644 nipype/interfaces/utility/tests/test_wrappers.py create mode 100644 nipype/interfaces/utility/wrappers.py create mode 100644 nipype/interfaces/vista/__init__.py create mode 100644 nipype/interfaces/vista/tests/__init__.py create mode 100644 nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py create mode 100644 nipype/interfaces/vista/tests/test_auto_VtoMat.py create mode 100644 nipype/interfaces/vista/vista.py create mode 100644 nipype/interfaces/vtkbase.py create mode 100644 nipype/interfaces/workbench/__init__.py create mode 100644 nipype/interfaces/workbench/base.py create mode 100644 nipype/interfaces/workbench/metric.py create mode 100644 nipype/interfaces/workbench/tests/__init__.py create mode 100644 nipype/interfaces/workbench/tests/test_auto_MetricResample.py create mode 100644 nipype/interfaces/workbench/tests/test_auto_WBCommand.py create mode 100644 nipype/pipeline/__init__.py create mode 100644 nipype/pipeline/engine/__init__.py create mode 100644 nipype/pipeline/engine/base.py create mode 100644 nipype/pipeline/engine/nodes.py create mode 100644 nipype/pipeline/engine/report_template.html create mode 100644 nipype/pipeline/engine/report_template2.html create mode 100644 nipype/pipeline/engine/tests/__init__.py create mode 100644 nipype/pipeline/engine/tests/test_base.py create mode 100644 nipype/pipeline/engine/tests/test_engine.py create mode 100644 nipype/pipeline/engine/tests/test_join.py create mode 100644 nipype/pipeline/engine/tests/test_nodes.py create mode 100644 nipype/pipeline/engine/tests/test_utils.py create mode 100644 nipype/pipeline/engine/tests/test_workflows.py create mode 100644 nipype/pipeline/engine/utils.py create mode 100644 nipype/pipeline/engine/workflows.py create mode 100644 nipype/pipeline/plugins/__init__.py create mode 100644 nipype/pipeline/plugins/base.py create mode 100644 nipype/pipeline/plugins/condor.py create mode 100644 nipype/pipeline/plugins/dagman.py create mode 100644 nipype/pipeline/plugins/debug.py create mode 100644 nipype/pipeline/plugins/ipython.py create mode 100644 nipype/pipeline/plugins/legacymultiproc.py create mode 100644 nipype/pipeline/plugins/linear.py create mode 100644 nipype/pipeline/plugins/lsf.py create mode 100644 nipype/pipeline/plugins/multiproc.py create mode 100644 nipype/pipeline/plugins/oar.py create mode 100644 nipype/pipeline/plugins/pbs.py create mode 100644 nipype/pipeline/plugins/pbsgraph.py create mode 100644 nipype/pipeline/plugins/semaphore_singleton.py create mode 100644 nipype/pipeline/plugins/sge.py create mode 100644 nipype/pipeline/plugins/sgegraph.py create mode 100644 nipype/pipeline/plugins/slurm.py create mode 100644 nipype/pipeline/plugins/slurmgraph.py create mode 100644 nipype/pipeline/plugins/somaflow.py create mode 100644 nipype/pipeline/plugins/tests/__init__.py create mode 100644 nipype/pipeline/plugins/tests/test_base.py create mode 100644 nipype/pipeline/plugins/tests/test_callback.py create mode 100644 nipype/pipeline/plugins/tests/test_debug.py create mode 100644 nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py create mode 100644 nipype/pipeline/plugins/tests/test_linear.py create mode 100644 nipype/pipeline/plugins/tests/test_multiproc.py create mode 100644 nipype/pipeline/plugins/tests/test_oar.py create mode 100644 nipype/pipeline/plugins/tests/test_pbs.py create mode 100644 nipype/pipeline/plugins/tests/test_somaflow.py create mode 100644 nipype/pipeline/plugins/tests/test_tools.py create mode 100644 nipype/pipeline/plugins/tools.py create mode 100644 nipype/pkg_info.py create mode 100644 nipype/pytest.ini create mode 100644 nipype/refs.py create mode 100644 nipype/scripts/__init__.py create mode 100644 nipype/scripts/cli.py create mode 100644 nipype/scripts/crash_files.py create mode 100644 nipype/scripts/instance.py create mode 100644 nipype/scripts/utils.py create mode 100644 nipype/sphinxext/__init__.py create mode 100644 nipype/sphinxext/plot_workflow.py create mode 100644 nipype/testing/__init__.py create mode 100644 nipype/testing/data/4d_dwi.nii create mode 100644 nipype/testing/data/A.scheme create mode 100644 nipype/testing/data/A_qmat.Bdouble create mode 100644 nipype/testing/data/A_recon_params.Bdouble create mode 100644 nipype/testing/data/BrainSegmentationPrior01.nii.gz create mode 100644 nipype/testing/data/BrainSegmentationPrior02.nii.gz create mode 100644 nipype/testing/data/BrainSegmentationPrior03.nii.gz create mode 100644 nipype/testing/data/BrainSegmentationPrior04.nii.gz create mode 100644 nipype/testing/data/FLASH1.mgz create mode 100644 nipype/testing/data/FLASH2.mgz create mode 100644 nipype/testing/data/FLASH3.mgz create mode 100644 nipype/testing/data/Fred+orig create mode 100644 nipype/testing/data/FreeSurferColorLUT.txt create mode 100644 nipype/testing/data/FreeSurferColorLUT_adapted_aparc+aseg_out.pck create mode 100644 nipype/testing/data/MASK_average_thal_right.nii create mode 100644 nipype/testing/data/NWARP create mode 100644 nipype/testing/data/PD.mgz create mode 100644 nipype/testing/data/ProbabilityMaskOfStudyTemplate.nii.gz create mode 100644 nipype/testing/data/Q25_warp+tlrc.HEAD create mode 100644 nipype/testing/data/QSH_peaks.Bdouble create mode 100644 nipype/testing/data/README create mode 100644 nipype/testing/data/ROI_scale500.nii.gz create mode 100644 nipype/testing/data/SPM.mat create mode 100644 nipype/testing/data/SubjectA.Bfloat create mode 100644 nipype/testing/data/T1.mgz create mode 100644 nipype/testing/data/T1.nii create mode 100644 nipype/testing/data/T1.nii.gz create mode 100644 nipype/testing/data/T1_brain.nii create mode 100644 nipype/testing/data/T1map.nii.gz create mode 100644 nipype/testing/data/TI4D.nii.gz create mode 100644 nipype/testing/data/TPM.nii create mode 100644 nipype/testing/data/Template_1_IXI550_MNI152.nii create mode 100644 nipype/testing/data/Template_6.nii create mode 100644 nipype/testing/data/TransformParameters.0.txt create mode 100644 nipype/testing/data/afni_output.3D create mode 100644 nipype/testing/data/allFA.nii create mode 100644 nipype/testing/data/all_FA.nii.gz create mode 100644 nipype/testing/data/anat_coreg.mif create mode 100644 nipype/testing/data/anatomical.nii create mode 100644 nipype/testing/data/ants_Affine.txt create mode 100644 nipype/testing/data/ants_Warp.nii.gz create mode 100644 nipype/testing/data/ants_deformed.nii.gz create mode 100644 nipype/testing/data/aparc+aseg.nii create mode 100644 nipype/testing/data/aseg.mgz create mode 100644 nipype/testing/data/asl.nii.gz create mode 100644 nipype/testing/data/atlas.nii.gz create mode 100644 nipype/testing/data/b0.nii create mode 100644 nipype/testing/data/b0.nii.gz create mode 100644 nipype/testing/data/b0_b0rev.nii create mode 100644 nipype/testing/data/ballstickfit_data.Bfloat create mode 100644 nipype/testing/data/bedpostxout/do_not_delete.txt create mode 100644 nipype/testing/data/brain_mask.nii create mode 100644 nipype/testing/data/brain_study_template.nii.gz create mode 100644 nipype/testing/data/brain_track.Bdouble create mode 100644 nipype/testing/data/brukerdir/fid create mode 100644 nipype/testing/data/brukerdir/pdata/1/2dseq create mode 100644 nipype/testing/data/bvals create mode 100644 nipype/testing/data/bvals.scheme create mode 100644 nipype/testing/data/bvecs create mode 100644 nipype/testing/data/bvecs.scheme create mode 100644 nipype/testing/data/c1s1.nii create mode 100644 nipype/testing/data/c1s3.nii create mode 100644 nipype/testing/data/clustering.mat create mode 100644 nipype/testing/data/cmatrix.mat create mode 100644 nipype/testing/data/complex.nii create mode 100644 nipype/testing/data/config.ini create mode 100644 nipype/testing/data/cont1.nii create mode 100644 nipype/testing/data/cont1a.nii create mode 100644 nipype/testing/data/cont2.nii create mode 100644 nipype/testing/data/cont2a.nii create mode 100644 nipype/testing/data/converted.trk create mode 100644 nipype/testing/data/cope.nii.gz create mode 100644 nipype/testing/data/cope1.nii.gz create mode 100644 nipype/testing/data/cope1run1.nii.gz create mode 100644 nipype/testing/data/cope1run2.nii.gz create mode 100644 nipype/testing/data/cope2run1.nii.gz create mode 100644 nipype/testing/data/cope2run2.nii.gz create mode 100644 nipype/testing/data/cortex.label create mode 100644 nipype/testing/data/cov_split.mat create mode 100644 nipype/testing/data/csd.mif create mode 100644 nipype/testing/data/data.Bfloat create mode 100644 nipype/testing/data/db.xml create mode 100644 nipype/testing/data/degree.csv create mode 100644 nipype/testing/data/degree.mat create mode 100644 nipype/testing/data/design.con create mode 100644 nipype/testing/data/design.mat create mode 100644 nipype/testing/data/dicomdir/123456-1-1.dcm create mode 100644 nipype/testing/data/diffusion.nii create mode 100644 nipype/testing/data/diffusion_weighted.nii create mode 100644 nipype/testing/data/dilated_wm_mask.nii create mode 100644 nipype/testing/data/dirs.txt create mode 100644 nipype/testing/data/dofrun1 create mode 100644 nipype/testing/data/dofrun2 create mode 100644 nipype/testing/data/ds003_sub-01_mc.DVARS create mode 100644 nipype/testing/data/ds003_sub-01_mc.nii.gz create mode 100644 nipype/testing/data/ds003_sub-01_mc_brainmask.nii.gz create mode 100644 nipype/testing/data/ds005/filler.txt create mode 100644 nipype/testing/data/dteig.Bdouble create mode 100644 nipype/testing/data/dti.mif create mode 100644 nipype/testing/data/dwi.mif create mode 100644 nipype/testing/data/dwi.nii.gz create mode 100644 nipype/testing/data/dwi2anat_InverseWarp.nii.gz create mode 100644 nipype/testing/data/dwi2anat_Warp.nii.gz create mode 100644 nipype/testing/data/dwi2anat_coreg_Affine.txt create mode 100644 nipype/testing/data/dwi_CSD_tracked.tck create mode 100644 nipype/testing/data/dwi_FA.mif create mode 100644 nipype/testing/data/dwi_WMProb.mif create mode 100644 nipype/testing/data/dwi_evals.nii create mode 100644 nipype/testing/data/dwi_tensor.mif create mode 100644 nipype/testing/data/elastix.txt create mode 100644 nipype/testing/data/encoding.txt create mode 100644 nipype/testing/data/epi.nii create mode 100644 nipype/testing/data/epi_acqp.txt create mode 100644 nipype/testing/data/epi_index.txt create mode 100644 nipype/testing/data/epi_mask.nii create mode 100644 nipype/testing/data/epi_param.txt create mode 100644 nipype/testing/data/epi_phasediff.nii create mode 100644 nipype/testing/data/epi_rev.nii create mode 100644 nipype/testing/data/f1.1D create mode 100644 nipype/testing/data/f2.1D create mode 100644 nipype/testing/data/fa.nii.gz create mode 100644 nipype/testing/data/fdir00.nii create mode 100644 nipype/testing/data/fdir01.nii create mode 100644 nipype/testing/data/ffra00.nii create mode 100644 nipype/testing/data/ffra01.nii create mode 100644 nipype/testing/data/fieldmap_mag.nii create mode 100644 nipype/testing/data/fieldmap_mag_brain.nii create mode 100644 nipype/testing/data/fieldmap_phase_fslprepared.nii create mode 100644 nipype/testing/data/first_merged.nii.gz create mode 100644 nipype/testing/data/fitted_data1.Bfloat create mode 100644 nipype/testing/data/fitted_data2.Bfloat create mode 100644 nipype/testing/data/fixed1.nii create mode 100644 nipype/testing/data/fixed2.nii create mode 100644 nipype/testing/data/flash_05.mgz create mode 100644 nipype/testing/data/flash_30.mgz create mode 100644 nipype/testing/data/flirt.mat create mode 100644 nipype/testing/data/fmri_timeseries.csv create mode 100644 nipype/testing/data/fmri_timeseries_nolabels.csv create mode 100644 nipype/testing/data/fods.mif create mode 100644 nipype/testing/data/fsLUT_aparc+aseg.pck create mode 100644 nipype/testing/data/fs_LR-deformed_to-fsaverage.L.sphere.32k_fs_LR.surf.gii create mode 100644 nipype/testing/data/fs_LR.L.midthickness_va_avg.32k_fs_LR.shape.gii create mode 100644 nipype/testing/data/fsaverage5.L.midthickness_va_avg.10k_fsavg_L.shape.gii create mode 100644 nipype/testing/data/fsaverage5_std_sphere.L.10k_fsavg_L.surf.gii create mode 100644 nipype/testing/data/fsl_mcflirt_movpar.txt create mode 100644 nipype/testing/data/fsl_motion_outliers_fd.txt create mode 100644 nipype/testing/data/func2anat_InverseWarp.nii.gz create mode 100644 nipype/testing/data/func2anat_coreg_Affine.txt create mode 100644 nipype/testing/data/func2anat_coreg_InverseWarp.nii.gz create mode 100644 nipype/testing/data/func_epi_1_1.nii create mode 100644 nipype/testing/data/func_to_struct.mat create mode 100644 nipype/testing/data/functional.HEAD create mode 100644 nipype/testing/data/functional.nii create mode 100644 nipype/testing/data/functional.par create mode 100644 nipype/testing/data/functional.rms create mode 100644 nipype/testing/data/functional2.nii create mode 100644 nipype/testing/data/functional3.nii create mode 100644 nipype/testing/data/functional_1.dcm create mode 100644 nipype/testing/data/functional_2.dcm create mode 100644 nipype/testing/data/im1.nii create mode 100644 nipype/testing/data/im2.nii create mode 100644 nipype/testing/data/im3.nii create mode 100644 nipype/testing/data/im_affine.aff create mode 100644 nipype/testing/data/im_warp.df.nii create mode 100644 nipype/testing/data/image.nii create mode 100644 nipype/testing/data/image.v create mode 100644 nipype/testing/data/indices-labels.txt create mode 100644 nipype/testing/data/indices.txt create mode 100644 nipype/testing/data/input1.xfm create mode 100644 nipype/testing/data/jsongrabber.txt create mode 100644 nipype/testing/data/label.mgz create mode 100644 nipype/testing/data/lh-pial.stl create mode 100644 nipype/testing/data/lh.cope1.mgz create mode 100644 nipype/testing/data/lh.cope1.nii.gz create mode 100644 nipype/testing/data/lh.hippocampus.stl create mode 100644 nipype/testing/data/lh.pial create mode 100644 nipype/testing/data/lh.pial_converted.gii create mode 100644 nipype/testing/data/lh.white create mode 100644 nipype/testing/data/lta1.lta create mode 100644 nipype/testing/data/lta2.lta create mode 100644 nipype/testing/data/lut_file create mode 100644 nipype/testing/data/magnitude.nii create mode 100644 nipype/testing/data/maps.nii create mode 100644 nipype/testing/data/mask.1D create mode 100644 nipype/testing/data/mask.mif create mode 100644 nipype/testing/data/mask.nii create mode 100644 nipype/testing/data/mask.nii.gz create mode 100644 nipype/testing/data/mean_func.nii.gz create mode 100644 nipype/testing/data/merged_f1samples.nii.gz create mode 100644 nipype/testing/data/merged_fsamples.nii create mode 100644 nipype/testing/data/merged_ph1samples.nii.gz create mode 100644 nipype/testing/data/merged_phsamples.nii create mode 100644 nipype/testing/data/merged_th1samples.nii.gz create mode 100644 nipype/testing/data/merged_thsamples.nii create mode 100644 nipype/testing/data/minc_initial.xfm create mode 100644 nipype/testing/data/minc_nlp.conf create mode 100644 nipype/testing/data/minc_test_2D_00.mnc create mode 100644 nipype/testing/data/minc_test_2D_01.mnc create mode 100644 nipype/testing/data/minc_test_2D_02.mnc create mode 100644 nipype/testing/data/minc_test_2D_03.mnc create mode 100644 nipype/testing/data/minc_test_2D_04.mnc create mode 100644 nipype/testing/data/minc_test_2D_05.mnc create mode 100644 nipype/testing/data/minc_test_2D_06.mnc create mode 100644 nipype/testing/data/minc_test_2D_07.mnc create mode 100644 nipype/testing/data/minc_test_2D_08.mnc create mode 100644 nipype/testing/data/minc_test_2D_09.mnc create mode 100644 nipype/testing/data/minc_test_3D_00.mnc create mode 100644 nipype/testing/data/minc_test_3D_01.mnc create mode 100644 nipype/testing/data/minc_test_3D_02.mnc create mode 100644 nipype/testing/data/minc_test_3D_03.mnc create mode 100644 nipype/testing/data/minc_test_3D_04.mnc create mode 100644 nipype/testing/data/minc_test_3D_05.mnc create mode 100644 nipype/testing/data/minc_test_3D_06.mnc create mode 100644 nipype/testing/data/minc_test_3D_07.mnc create mode 100644 nipype/testing/data/minc_test_3D_08.mnc create mode 100644 nipype/testing/data/minc_test_3D_09.mnc create mode 100644 nipype/testing/data/mni.nii create mode 100644 nipype/testing/data/mni2t1.nii create mode 100644 nipype/testing/data/model.pklz create mode 100644 nipype/testing/data/moving.csv create mode 100644 nipype/testing/data/moving1.nii create mode 100644 nipype/testing/data/moving2.nii create mode 100644 nipype/testing/data/mrtrix3_labelconfig.txt create mode 100644 nipype/testing/data/my_database.db create mode 100644 nipype/testing/data/network0.aparc+aseg.nii create mode 100644 nipype/testing/data/network0.gpickle create mode 100644 nipype/testing/data/nodif_brain_mask.nii.gz create mode 100644 nipype/testing/data/norm.mgz create mode 100644 nipype/testing/data/output.csv create mode 100644 nipype/testing/data/pdfs.Bfloat create mode 100644 nipype/testing/data/peak_directions.mif create mode 100644 nipype/testing/data/pet.nii.gz create mode 100644 nipype/testing/data/pet_resliced.nii create mode 100644 nipype/testing/data/phase.nii create mode 100644 nipype/testing/data/rc1s1.nii create mode 100644 nipype/testing/data/rc1s2.nii create mode 100644 nipype/testing/data/rc2s1.nii create mode 100644 nipype/testing/data/rc2s2.nii create mode 100644 nipype/testing/data/realign_json.json create mode 100644 nipype/testing/data/ref_class0.nii create mode 100644 nipype/testing/data/ref_class1.nii create mode 100644 nipype/testing/data/register.dat create mode 100644 nipype/testing/data/register.mat create mode 100644 nipype/testing/data/resp.1D create mode 100644 nipype/testing/data/response.txt create mode 100644 nipype/testing/data/resting.nii create mode 100644 nipype/testing/data/resting2anat_Warp.nii.gz create mode 100644 nipype/testing/data/resting2anat_coreg_Affine.txt create mode 100644 nipype/testing/data/rgb.nii.gz create mode 100644 nipype/testing/data/rh-pial.stl create mode 100644 nipype/testing/data/rh.pial create mode 100644 nipype/testing/data/rh.pial_converted.gii create mode 100644 nipype/testing/data/roi01.nii create mode 100644 nipype/testing/data/roi01_idx.npz create mode 100644 nipype/testing/data/roi02.nii create mode 100644 nipype/testing/data/roi02_idx.npz create mode 100644 nipype/testing/data/roi03.nii create mode 100644 nipype/testing/data/roi03_idx.npz create mode 100644 nipype/testing/data/roi04.nii create mode 100644 nipype/testing/data/roi04_idx.npz create mode 100644 nipype/testing/data/roi05.nii create mode 100644 nipype/testing/data/roi05_idx.npz create mode 100644 nipype/testing/data/run1+orig create mode 100644 nipype/testing/data/run1+orig_model create mode 100644 nipype/testing/data/run1_categories.1D create mode 100644 nipype/testing/data/run2+orig create mode 100644 nipype/testing/data/run2_categories.1D create mode 100644 nipype/testing/data/seed.1D create mode 100644 nipype/testing/data/seed_mask.nii create mode 100644 nipype/testing/data/seed_source.nii.gz create mode 100644 nipype/testing/data/seeds_to_M1.nii create mode 100644 nipype/testing/data/seeds_to_M2.nii create mode 100644 nipype/testing/data/segmentation0.nii.gz create mode 100644 nipype/testing/data/segmentation1.nii.gz create mode 100644 nipype/testing/data/session_info.npz create mode 100644 nipype/testing/data/skeleton_mask.nii.gz create mode 100644 nipype/testing/data/slice_timing.1D create mode 100644 nipype/testing/data/smri_ants_registration_settings.json create mode 100644 nipype/testing/data/spmT_0001.img create mode 100644 nipype/testing/data/spminfo create mode 100644 nipype/testing/data/streamlines.trk create mode 100644 nipype/testing/data/struct2mni.nii create mode 100644 nipype/testing/data/struct_to_func.mat create mode 100644 nipype/testing/data/struct_to_template.mat create mode 100644 nipype/testing/data/structural.nii create mode 100644 nipype/testing/data/study_template.nii.gz create mode 100644 nipype/testing/data/sub-01_dir-LR_epi.nii.gz create mode 100644 nipype/testing/data/sub-01_dir-RL_epi.nii.gz create mode 100644 nipype/testing/data/sub-01_task-rest_bold_space-fsaverage5.L.func.gii create mode 100644 nipype/testing/data/subj1.cff create mode 100644 nipype/testing/data/subj1.pck create mode 100644 nipype/testing/data/subj2.cff create mode 100644 nipype/testing/data/subj2.pck create mode 100644 nipype/testing/data/subjectDesign.con create mode 100644 nipype/testing/data/subjectDesign.mat create mode 100644 nipype/testing/data/surf.txt create mode 100644 nipype/testing/data/surf01.vtk create mode 100644 nipype/testing/data/surf1.vtk create mode 100644 nipype/testing/data/surf2.vtk create mode 100644 nipype/testing/data/targets_MASK1.nii create mode 100644 nipype/testing/data/targets_MASK2.nii create mode 100644 nipype/testing/data/tbss_dir/do_not_delete.txt create mode 100644 nipype/testing/data/tdi.mif create mode 100644 nipype/testing/data/tensor_fitted_data.Bdouble create mode 100644 nipype/testing/data/timeDesign.con create mode 100644 nipype/testing/data/timeDesign.mat create mode 100644 nipype/testing/data/timeseries.txt create mode 100644 nipype/testing/data/tissue+air_map.nii create mode 100644 nipype/testing/data/tissues.nii.gz create mode 100644 nipype/testing/data/topup_encoding.txt create mode 100644 nipype/testing/data/topup_fieldcoef.nii.gz create mode 100644 nipype/testing/data/topup_movpar.txt create mode 100644 nipype/testing/data/tpm_00.nii.gz create mode 100644 nipype/testing/data/tpm_01.nii.gz create mode 100644 nipype/testing/data/tpm_02.nii.gz create mode 100644 nipype/testing/data/tpms_msk.nii.gz create mode 100644 nipype/testing/data/track1.trk create mode 100644 nipype/testing/data/track2.trk create mode 100644 nipype/testing/data/tracks.tck create mode 100644 nipype/testing/data/tracks.trk create mode 100644 nipype/testing/data/tract_data.Bfloat create mode 100644 nipype/testing/data/tracts.Bdouble create mode 100644 nipype/testing/data/trans.mat create mode 100644 nipype/testing/data/tst_class0.nii create mode 100644 nipype/testing/data/tst_class1.nii create mode 100644 nipype/testing/data/u_rc1s1_Template.nii create mode 100644 nipype/testing/data/u_rc1s2_Template.nii create mode 100644 nipype/testing/data/u_rc1s3_Template.nii create mode 100644 nipype/testing/data/varcope.nii.gz create mode 100644 nipype/testing/data/varcope1run1.nii.gz create mode 100644 nipype/testing/data/varcope1run2.nii.gz create mode 100644 nipype/testing/data/varcope2run1.nii.gz create mode 100644 nipype/testing/data/varcope2run2.nii.gz create mode 100644 nipype/testing/data/voxel-order_data.Bfloat create mode 100644 nipype/testing/data/vsm.nii create mode 100644 nipype/testing/data/warpfield.nii create mode 100644 nipype/testing/data/weights.txt create mode 100644 nipype/testing/data/wm.mgz create mode 100644 nipype/testing/data/wm_mask.mif create mode 100644 nipype/testing/data/wm_undersampled.nii create mode 100644 nipype/testing/data/zstat1.nii.gz create mode 100644 nipype/testing/decorators.py create mode 100644 nipype/testing/fixtures.py create mode 100644 nipype/testing/tests/test_utils.py create mode 100644 nipype/testing/utils.py create mode 100644 nipype/tests/__init__.py create mode 100644 nipype/tests/test_nipype.py create mode 100644 nipype/utils/README.txt create mode 100644 nipype/utils/__init__.py create mode 100644 nipype/utils/config.py create mode 100644 nipype/utils/docparse.py create mode 100644 nipype/utils/draw_gantt_chart.py create mode 100644 nipype/utils/filemanip.py create mode 100644 nipype/utils/functions.py create mode 100644 nipype/utils/logger.py create mode 100644 nipype/utils/matlabtools.py create mode 100644 nipype/utils/misc.py create mode 100644 nipype/utils/nipype2boutiques.py create mode 100644 nipype/utils/nipype_cmd.py create mode 100644 nipype/utils/onetime.py create mode 100644 nipype/utils/profiler.py create mode 100644 nipype/utils/provenance.py create mode 100644 nipype/utils/spm_docs.py create mode 100644 nipype/utils/spm_flat_config.m create mode 100644 nipype/utils/spm_get_doc.m create mode 100644 nipype/utils/tests/__init__.py create mode 100644 nipype/utils/tests/test_cmd.py create mode 100644 nipype/utils/tests/test_config.py create mode 100644 nipype/utils/tests/test_docparse.py create mode 100644 nipype/utils/tests/test_filemanip.py create mode 100644 nipype/utils/tests/test_functions.py create mode 100644 nipype/utils/tests/test_misc.py create mode 100644 nipype/utils/tests/test_nipype2boutiques.py create mode 100644 nipype/utils/tests/test_provenance.py create mode 100755 nipype/utils/tests/use_resources create mode 100644 nipype/utils/tmpdirs.py create mode 100644 nipype/workflows/__init__.py create mode 100644 nipype/workflows/data/__init__.py create mode 100644 nipype/workflows/data/ecc.sch create mode 100644 nipype/workflows/data/hmc.sch create mode 100644 nipype/workflows/dmri/__init__.py create mode 100644 nipype/workflows/dmri/camino/__init__.py create mode 100644 nipype/workflows/dmri/camino/connectivity_mapping.py create mode 100644 nipype/workflows/dmri/camino/diffusion.py create mode 100644 nipype/workflows/dmri/camino/group_connectivity.py create mode 100644 nipype/workflows/dmri/connectivity/__init__.py create mode 100644 nipype/workflows/dmri/connectivity/group_connectivity.py create mode 100644 nipype/workflows/dmri/connectivity/nx.py create mode 100644 nipype/workflows/dmri/dipy/__init__.py create mode 100644 nipype/workflows/dmri/dipy/denoise.py create mode 100644 nipype/workflows/dmri/dtitk/__init__.py create mode 100644 nipype/workflows/dmri/dtitk/tensor_registration.py create mode 100644 nipype/workflows/dmri/fsl/__init__.py create mode 100644 nipype/workflows/dmri/fsl/artifacts.py create mode 100644 nipype/workflows/dmri/fsl/dti.py create mode 100644 nipype/workflows/dmri/fsl/epi.py create mode 100644 nipype/workflows/dmri/fsl/tbss.py create mode 100644 nipype/workflows/dmri/fsl/tests/__init__.py create mode 100644 nipype/workflows/dmri/fsl/tests/test_dti.py create mode 100644 nipype/workflows/dmri/fsl/tests/test_epi.py create mode 100644 nipype/workflows/dmri/fsl/tests/test_tbss.py create mode 100644 nipype/workflows/dmri/fsl/utils.py create mode 100644 nipype/workflows/dmri/mrtrix/__init__.py create mode 100644 nipype/workflows/dmri/mrtrix/connectivity_mapping.py create mode 100644 nipype/workflows/dmri/mrtrix/diffusion.py create mode 100644 nipype/workflows/dmri/mrtrix/group_connectivity.py create mode 100644 nipype/workflows/fmri/__init__.py create mode 100644 nipype/workflows/fmri/fsl/__init__.py create mode 100644 nipype/workflows/fmri/fsl/estimate.py create mode 100644 nipype/workflows/fmri/fsl/preprocess.py create mode 100644 nipype/workflows/fmri/fsl/tests/__init__.py create mode 100644 nipype/workflows/fmri/fsl/tests/test_preprocess.py create mode 100644 nipype/workflows/fmri/spm/__init__.py create mode 100644 nipype/workflows/fmri/spm/estimate.py create mode 100644 nipype/workflows/fmri/spm/preprocess.py create mode 100644 nipype/workflows/fmri/spm/tests/__init__.py create mode 100644 nipype/workflows/graph/__init__.py create mode 100644 nipype/workflows/misc/__init__.py create mode 100644 nipype/workflows/misc/utils.py create mode 100644 nipype/workflows/rsfmri/__init__.py create mode 100644 nipype/workflows/rsfmri/fsl/__init__.py create mode 100644 nipype/workflows/rsfmri/fsl/resting.py create mode 100644 nipype/workflows/rsfmri/fsl/tests/__init__.py create mode 100644 nipype/workflows/rsfmri/fsl/tests/test_resting.py create mode 100644 nipype/workflows/smri/__init__.py create mode 100644 nipype/workflows/smri/ants/ANTSBuildTemplate.py create mode 100644 nipype/workflows/smri/ants/__init__.py create mode 100644 nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py create mode 100644 nipype/workflows/smri/freesurfer/__init__.py create mode 100644 nipype/workflows/smri/freesurfer/autorecon1.py create mode 100644 nipype/workflows/smri/freesurfer/autorecon2.py create mode 100644 nipype/workflows/smri/freesurfer/autorecon3.py create mode 100644 nipype/workflows/smri/freesurfer/ba_maps.py create mode 100644 nipype/workflows/smri/freesurfer/bem.py create mode 100644 nipype/workflows/smri/freesurfer/recon.py create mode 100644 nipype/workflows/smri/freesurfer/utils.py create mode 100644 nipype/workflows/smri/niftyreg/__init__.py create mode 100644 nipype/workflows/smri/niftyreg/groupwise.py create mode 100644 nipype/workflows/warp/__init__.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..1c9f5df3fc --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +* +!.gitignore +!./nipype/* diff --git a/nipype/COMMIT_INFO.txt b/nipype/COMMIT_INFO.txt new file mode 100644 index 0000000000..7a32053bd4 --- /dev/null +++ b/nipype/COMMIT_INFO.txt @@ -0,0 +1,6 @@ +# This is an ini file that may contain information about the code state +[commit hash] +# The line below may contain a valid hash if it has been substituted during 'git archive' +archive_subst_hash=$Format:%%h$ +# This line may be modified by the install process +install_hash= diff --git a/nipype/__init__.py b/nipype/__init__.py new file mode 100644 index 0000000000..ad961e7df4 --- /dev/null +++ b/nipype/__init__.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +from distutils.version import LooseVersion + +from .info import (LONG_DESCRIPTION as __doc__, URL as __url__, STATUS as + __status__, __version__) +from .utils.config import NipypeConfig +from .utils.logger import Logging +from .refs import due +from .pkg_info import get_pkg_info as _get_pkg_info + +try: + import faulthandler + faulthandler.enable() +except (ImportError, IOError) as e: + pass + +config = NipypeConfig() +logging = Logging(config) + + +class NipypeTester(object): + def __call__(self, doctests=True, parallel=True): + try: + import pytest + except: + raise RuntimeError( + 'py.test not installed, run: pip install pytest') + args = [] + if not doctests: + args.extend(['-p', 'no:doctest']) + if not parallel: + args.append('-n0') + args.append(os.path.dirname(__file__)) + pytest.main(args=args) + + +test = NipypeTester() + + +def get_info(): + """Returns package information""" + return _get_pkg_info(os.path.dirname(__file__)) + + +from .pipeline import Node, MapNode, JoinNode, Workflow +from .interfaces import (DataGrabber, DataSink, SelectFiles, IdentityInterface, + Rename, Function, Select, Merge) diff --git a/nipype/algorithms/__init__.py b/nipype/algorithms/__init__.py new file mode 100644 index 0000000000..a2909a3501 --- /dev/null +++ b/nipype/algorithms/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Package contains pure python neuroimaging algorithms + +Exaples: artifactdetect + +""" +__docformat__ = 'restructuredtext' diff --git a/nipype/algorithms/confounds.py b/nipype/algorithms/confounds.py new file mode 100644 index 0000000000..d0f9a5733a --- /dev/null +++ b/nipype/algorithms/confounds.py @@ -0,0 +1,1274 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +''' +Algorithms to compute confounds in :abbr:`fMRI (functional MRI)` +''' +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range + +import os +import os.path as op + +import nibabel as nb +import numpy as np +from numpy.polynomial import Legendre +from scipy import linalg + +from .. import config, logging +from ..external.due import BibTeX +from ..interfaces.base import (traits, TraitedSpec, BaseInterface, + BaseInterfaceInputSpec, File, isdefined, + InputMultiPath, OutputMultiPath) +from ..utils import NUMPY_MMAP +from ..utils.misc import normalize_mc_params + +IFLOGGER = logging.getLogger('nipype.interface') + + +class ComputeDVARSInputSpec(BaseInterfaceInputSpec): + in_file = File( + exists=True, mandatory=True, desc='functional data, after HMC') + in_mask = File(exists=True, mandatory=True, desc='a brain mask') + remove_zerovariance = traits.Bool( + True, usedefault=True, desc='remove voxels with zero variance') + save_std = traits.Bool( + True, usedefault=True, desc='save standardized DVARS') + save_nstd = traits.Bool( + False, usedefault=True, desc='save non-standardized DVARS') + save_vxstd = traits.Bool( + False, usedefault=True, desc='save voxel-wise standardized DVARS') + save_all = traits.Bool(False, usedefault=True, desc='output all DVARS') + + series_tr = traits.Float(desc='repetition time in sec.') + save_plot = traits.Bool(False, usedefault=True, desc='write DVARS plot') + figdpi = traits.Int(100, usedefault=True, desc='output dpi for the plot') + figsize = traits.Tuple( + traits.Float(11.7), + traits.Float(2.3), + usedefault=True, + desc='output figure size') + figformat = traits.Enum( + 'png', 'pdf', 'svg', usedefault=True, desc='output format for figures') + intensity_normalization = traits.Float( + 1000.0, + usedefault=True, + desc='Divide value in each voxel at each timepoint ' + 'by the median calculated across all voxels' + 'and timepoints within the mask (if specified)' + 'and then multiply by the value specified by' + 'this parameter. By using the default (1000)' + 'output DVARS will be expressed in ' + 'x10 % BOLD units compatible with Power et al.' + '2012. Set this to 0 to disable intensity' + 'normalization altogether.') + + +class ComputeDVARSOutputSpec(TraitedSpec): + out_std = File(exists=True, desc='output text file') + out_nstd = File(exists=True, desc='output text file') + out_vxstd = File(exists=True, desc='output text file') + out_all = File(exists=True, desc='output text file') + avg_std = traits.Float() + avg_nstd = traits.Float() + avg_vxstd = traits.Float() + fig_std = File(exists=True, desc='output DVARS plot') + fig_nstd = File(exists=True, desc='output DVARS plot') + fig_vxstd = File(exists=True, desc='output DVARS plot') + + +class ComputeDVARS(BaseInterface): + """ + Computes the DVARS. + """ + input_spec = ComputeDVARSInputSpec + output_spec = ComputeDVARSOutputSpec + references_ = [{ + 'entry': + BibTeX("""\ +@techreport{nichols_notes_2013, + address = {Coventry, UK}, + title = {Notes on {Creating} a {Standardized} {Version} of {DVARS}}, + url = {http://www2.warwick.ac.uk/fac/sci/statistics/staff/academic-\ +research/nichols/scripts/fsl/standardizeddvars.pdf}, + urldate = {2016-08-16}, + institution = {University of Warwick}, + author = {Nichols, Thomas}, + year = {2013} +}"""), + 'tags': ['method'] + }, { + 'entry': + BibTeX("""\ +@article{power_spurious_2012, + title = {Spurious but systematic correlations in functional connectivity {MRI} networks \ +arise from subject motion}, + volume = {59}, + doi = {10.1016/j.neuroimage.2011.10.018}, + number = {3}, + urldate = {2016-08-16}, + journal = {NeuroImage}, + author = {Power, Jonathan D. and Barnes, Kelly A. and Snyder, Abraham Z. and Schlaggar, \ +Bradley L. and Petersen, Steven E.}, + year = {2012}, + pages = {2142--2154}, +} +"""), + 'tags': ['method'] + }] + + def __init__(self, **inputs): + self._results = {} + super(ComputeDVARS, self).__init__(**inputs) + + def _gen_fname(self, suffix, ext=None): + fname, in_ext = op.splitext(op.basename(self.inputs.in_file)) + + if in_ext == '.gz': + fname, in_ext2 = op.splitext(fname) + in_ext = in_ext2 + in_ext + + if ext is None: + ext = in_ext + + if ext.startswith('.'): + ext = ext[1:] + + return op.abspath('{}_{}.{}'.format(fname, suffix, ext)) + + def _run_interface(self, runtime): + dvars = compute_dvars( + self.inputs.in_file, + self.inputs.in_mask, + remove_zerovariance=self.inputs.remove_zerovariance, + intensity_normalization=self.inputs.intensity_normalization) + + (self._results['avg_std'], self._results['avg_nstd'], + self._results['avg_vxstd']) = np.mean( + dvars, axis=1).astype(float) + + tr = None + if isdefined(self.inputs.series_tr): + tr = self.inputs.series_tr + + if self.inputs.save_std: + out_file = self._gen_fname('dvars_std', ext='tsv') + np.savetxt(out_file, dvars[0], fmt=b'%0.6f') + self._results['out_std'] = out_file + + if self.inputs.save_plot: + self._results['fig_std'] = self._gen_fname( + 'dvars_std', ext=self.inputs.figformat) + fig = plot_confound( + dvars[0], + self.inputs.figsize, + 'Standardized DVARS', + series_tr=tr) + fig.savefig( + self._results['fig_std'], + dpi=float(self.inputs.figdpi), + format=self.inputs.figformat, + bbox_inches='tight') + fig.clf() + + if self.inputs.save_nstd: + out_file = self._gen_fname('dvars_nstd', ext='tsv') + np.savetxt(out_file, dvars[1], fmt=b'%0.6f') + self._results['out_nstd'] = out_file + + if self.inputs.save_plot: + self._results['fig_nstd'] = self._gen_fname( + 'dvars_nstd', ext=self.inputs.figformat) + fig = plot_confound( + dvars[1], self.inputs.figsize, 'DVARS', series_tr=tr) + fig.savefig( + self._results['fig_nstd'], + dpi=float(self.inputs.figdpi), + format=self.inputs.figformat, + bbox_inches='tight') + fig.clf() + + if self.inputs.save_vxstd: + out_file = self._gen_fname('dvars_vxstd', ext='tsv') + np.savetxt(out_file, dvars[2], fmt=b'%0.6f') + self._results['out_vxstd'] = out_file + + if self.inputs.save_plot: + self._results['fig_vxstd'] = self._gen_fname( + 'dvars_vxstd', ext=self.inputs.figformat) + fig = plot_confound( + dvars[2], + self.inputs.figsize, + 'Voxelwise std DVARS', + series_tr=tr) + fig.savefig( + self._results['fig_vxstd'], + dpi=float(self.inputs.figdpi), + format=self.inputs.figformat, + bbox_inches='tight') + fig.clf() + + if self.inputs.save_all: + out_file = self._gen_fname('dvars', ext='tsv') + np.savetxt( + out_file, + np.vstack(dvars).T, + fmt=b'%0.8f', + delimiter=b'\t', + header='std DVARS\tnon-std DVARS\tvx-wise std DVARS', + comments='') + self._results['out_all'] = out_file + + return runtime + + def _list_outputs(self): + return self._results + + +class FramewiseDisplacementInputSpec(BaseInterfaceInputSpec): + in_file = File(exists=True, mandatory=True, desc='motion parameters') + parameter_source = traits.Enum( + "FSL", + "AFNI", + "SPM", + "FSFAST", + "NIPY", + desc="Source of movement parameters", + mandatory=True) + radius = traits.Float( + 50, + usedefault=True, + desc='radius in mm to calculate angular FDs, 50mm is the ' + 'default since it is used in Power et al. 2012') + out_file = File( + 'fd_power_2012.txt', usedefault=True, desc='output file name') + out_figure = File( + 'fd_power_2012.pdf', usedefault=True, desc='output figure name') + series_tr = traits.Float(desc='repetition time in sec.') + save_plot = traits.Bool(False, usedefault=True, desc='write FD plot') + normalize = traits.Bool( + False, usedefault=True, desc='calculate FD in mm/s') + figdpi = traits.Int( + 100, usedefault=True, desc='output dpi for the FD plot') + figsize = traits.Tuple( + traits.Float(11.7), + traits.Float(2.3), + usedefault=True, + desc='output figure size') + + +class FramewiseDisplacementOutputSpec(TraitedSpec): + out_file = File(desc='calculated FD per timestep') + out_figure = File(desc='output image file') + fd_average = traits.Float(desc='average FD') + + +class FramewiseDisplacement(BaseInterface): + """ + Calculate the :abbr:`FD (framewise displacement)` as in [Power2012]_. + This implementation reproduces the calculation in fsl_motion_outliers + + .. [Power2012] Power et al., Spurious but systematic correlations in functional + connectivity MRI networks arise from subject motion, NeuroImage 59(3), + 2012. doi:`10.1016/j.neuroimage.2011.10.018 + `_. + + + """ + + input_spec = FramewiseDisplacementInputSpec + output_spec = FramewiseDisplacementOutputSpec + + references_ = [{ + 'entry': + BibTeX("""\ +@article{power_spurious_2012, + title = {Spurious but systematic correlations in functional connectivity {MRI} networks \ +arise from subject motion}, + volume = {59}, + doi = {10.1016/j.neuroimage.2011.10.018}, + number = {3}, + urldate = {2016-08-16}, + journal = {NeuroImage}, + author = {Power, Jonathan D. and Barnes, Kelly A. and Snyder, Abraham Z. and Schlaggar, \ +Bradley L. and Petersen, Steven E.}, + year = {2012}, + pages = {2142--2154}, +} +"""), + 'tags': ['method'] + }] + + def _run_interface(self, runtime): + mpars = np.loadtxt(self.inputs.in_file) # mpars is N_t x 6 + mpars = np.apply_along_axis( + func1d=normalize_mc_params, + axis=1, + arr=mpars, + source=self.inputs.parameter_source) + diff = mpars[:-1, :6] - mpars[1:, :6] + diff[:, 3:6] *= self.inputs.radius + fd_res = np.abs(diff).sum(axis=1) + + self._results = { + 'out_file': op.abspath(self.inputs.out_file), + 'fd_average': float(fd_res.mean()) + } + np.savetxt( + self.inputs.out_file, + fd_res, + header='FramewiseDisplacement', + comments='') + + if self.inputs.save_plot: + tr = None + if isdefined(self.inputs.series_tr): + tr = self.inputs.series_tr + + if self.inputs.normalize and tr is None: + IFLOGGER.warn('FD plot cannot be normalized if TR is not set') + + self._results['out_figure'] = op.abspath(self.inputs.out_figure) + fig = plot_confound( + fd_res, + self.inputs.figsize, + 'FD', + units='mm', + series_tr=tr, + normalize=self.inputs.normalize) + fig.savefig( + self._results['out_figure'], + dpi=float(self.inputs.figdpi), + format=self.inputs.out_figure[-3:], + bbox_inches='tight') + fig.clf() + + return runtime + + def _list_outputs(self): + return self._results + + +class CompCorInputSpec(BaseInterfaceInputSpec): + realigned_file = File( + exists=True, mandatory=True, desc='already realigned brain image (4D)') + mask_files = InputMultiPath( + File(exists=True), + desc=('One or more mask files that determines ' + 'ROI (3D). When more that one file is ' + 'provided `merge_method` or ' + '`merge_index` must be provided')) + merge_method = traits.Enum( + 'union', + 'intersect', + 'none', + xor=['mask_index'], + requires=['mask_files'], + desc=('Merge method if multiple masks are ' + 'present - `union` uses voxels included in' + ' at least one input mask, `intersect` ' + 'uses only voxels present in all input ' + 'masks, `none` performs CompCor on ' + 'each mask individually')) + mask_index = traits.Range( + low=0, + xor=['merge_method'], + requires=['mask_files'], + desc=('Position of mask in `mask_files` to use - ' + 'first is the default.')) + components_file = traits.Str( + 'components_file.txt', + usedefault=True, + desc='Filename to store physiological components') + num_components = traits.Int(6, usedefault=True) # 6 for BOLD, 4 for ASL + pre_filter = traits.Enum( + 'polynomial', + 'cosine', + False, + usedefault=True, + desc='Detrend time series prior to component ' + 'extraction') + use_regress_poly = traits.Bool( + deprecated='0.15.0', + new_name='pre_filter', + desc=('use polynomial regression ' + 'pre-component extraction')) + regress_poly_degree = traits.Range( + low=1, value=1, usedefault=True, desc='the degree polynomial to use') + header_prefix = traits.Str( + desc=('the desired header for the output tsv ' + 'file (one column). If undefined, will ' + 'default to "CompCor"')) + high_pass_cutoff = traits.Float( + 128, + usedefault=True, + desc='Cutoff (in seconds) for "cosine" pre-filter') + repetition_time = traits.Float( + desc='Repetition time (TR) of series - derived from image header if ' + 'unspecified') + save_pre_filter = traits.Either( + traits.Bool, File, desc='Save pre-filter basis as text file') + ignore_initial_volumes = traits.Range( + low=0, + usedefault=True, + desc='Number of volumes at start of series to ignore') + + +class CompCorOutputSpec(TraitedSpec): + components_file = File( + exists=True, desc='text file containing the noise components') + pre_filter_file = File(desc='text file containing high-pass filter basis') + + +class CompCor(BaseInterface): + """ + Interface with core CompCor computation, used in aCompCor and tCompCor + + CompCor provides three pre-filter options, all of which include per-voxel + mean removal: + - polynomial: Legendre polynomial basis + - cosine: Discrete cosine basis + - False: mean-removal only + + In the case of ``polynomial`` and ``cosine`` filters, a pre-filter file may + be saved with a row for each volume/timepoint, and a column for each + non-constant regressor. + If no non-constant (mean-removal) columns are used, this file may be empty. + + If ``ignore_initial_volumes`` is set, then the specified number of initial + volumes are excluded both from pre-filtering and CompCor component + extraction. + Each column in the components and pre-filter files are prefixe with zeros + for each excluded volume so that the number of rows continues to match the + number of volumes in the input file. + In addition, for each excluded volume, a column is added to the pre-filter + file with a 1 in the corresponding row. + + Example + ------- + + >>> ccinterface = CompCor() + >>> ccinterface.inputs.realigned_file = 'functional.nii' + >>> ccinterface.inputs.mask_files = 'mask.nii' + >>> ccinterface.inputs.num_components = 1 + >>> ccinterface.inputs.pre_filter = 'polynomial' + >>> ccinterface.inputs.regress_poly_degree = 2 + + """ + input_spec = CompCorInputSpec + output_spec = CompCorOutputSpec + references_ = [{ + 'entry': + BibTeX( + "@article{compcor_2007," + "title = {A component based noise correction method (CompCor) for BOLD and perfusion based}," + "volume = {37}," + "number = {1}," + "doi = {10.1016/j.neuroimage.2007.04.042}," + "urldate = {2016-08-13}," + "journal = {NeuroImage}," + "author = {Behzadi, Yashar and Restom, Khaled and Liau, Joy and Liu, Thomas T.}," + "year = {2007}," + "pages = {90-101},}"), + 'tags': ['method', 'implementation'] + }] + + def __init__(self, *args, **kwargs): + ''' exactly the same as compcor except the header ''' + super(CompCor, self).__init__(*args, **kwargs) + self._header = 'CompCor' + + def _run_interface(self, runtime): + mask_images = [] + if isdefined(self.inputs.mask_files): + mask_images = combine_mask_files(self.inputs.mask_files, + self.inputs.merge_method, + self.inputs.mask_index) + + if self.inputs.use_regress_poly: + self.inputs.pre_filter = 'polynomial' + + # Degree 0 == remove mean; see compute_noise_components + degree = (self.inputs.regress_poly_degree + if self.inputs.pre_filter == 'polynomial' else 0) + + imgseries = nb.load(self.inputs.realigned_file, mmap=NUMPY_MMAP) + + if len(imgseries.shape) != 4: + raise ValueError('{} expected a 4-D nifti file. Input {} has ' + '{} dimensions (shape {})'.format( + self._header, self.inputs.realigned_file, + len(imgseries.shape), imgseries.shape)) + + if len(mask_images) == 0: + img = nb.Nifti1Image( + np.ones(imgseries.shape[:3], dtype=np.bool), + affine=imgseries.affine, + header=imgseries.header) + mask_images = [img] + + skip_vols = self.inputs.ignore_initial_volumes + if skip_vols: + imgseries = imgseries.__class__( + imgseries.get_data()[..., skip_vols:], imgseries.affine, + imgseries.header) + + mask_images = self._process_masks(mask_images, imgseries.get_data()) + + TR = 0 + if self.inputs.pre_filter == 'cosine': + if isdefined(self.inputs.repetition_time): + TR = self.inputs.repetition_time + else: + # Derive TR from NIfTI header, if possible + try: + TR = imgseries.header.get_zooms()[3] + if imgseries.header.get_xyzt_units()[1] == 'msec': + TR /= 1000 + except (AttributeError, IndexError): + TR = 0 + + if TR == 0: + raise ValueError( + '{} cannot detect repetition time from image - ' + 'Set the repetition_time input'.format(self._header)) + + components, filter_basis = compute_noise_components( + imgseries.get_data(), mask_images, self.inputs.num_components, + self.inputs.pre_filter, degree, self.inputs.high_pass_cutoff, TR) + + if skip_vols: + old_comp = components + nrows = skip_vols + components.shape[0] + components = np.zeros( + (nrows, components.shape[1]), dtype=components.dtype) + components[skip_vols:] = old_comp + + components_file = os.path.join(os.getcwd(), + self.inputs.components_file) + np.savetxt( + components_file, + components, + fmt=b"%.10f", + delimiter='\t', + header=self._make_headers(components.shape[1]), + comments='') + + if self.inputs.pre_filter and self.inputs.save_pre_filter: + pre_filter_file = self._list_outputs()['pre_filter_file'] + ftype = { + 'polynomial': 'Legendre', + 'cosine': 'Cosine' + }[self.inputs.pre_filter] + ncols = filter_basis.shape[1] if filter_basis.size > 0 else 0 + header = ['{}{:02d}'.format(ftype, i) for i in range(ncols)] + if skip_vols: + old_basis = filter_basis + # nrows defined above + filter_basis = np.zeros( + (nrows, ncols + skip_vols), dtype=filter_basis.dtype) + if old_basis.size > 0: + filter_basis[skip_vols:, :ncols] = old_basis + filter_basis[:skip_vols, -skip_vols:] = np.eye(skip_vols) + header.extend([ + 'NonSteadyStateOutlier{:02d}'.format(i) + for i in range(skip_vols) + ]) + np.savetxt( + pre_filter_file, + filter_basis, + fmt=b'%.10f', + delimiter='\t', + header='\t'.join(header), + comments='') + + return runtime + + def _process_masks(self, mask_images, timeseries=None): + return mask_images + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['components_file'] = os.path.abspath( + self.inputs.components_file) + + save_pre_filter = self.inputs.save_pre_filter + if save_pre_filter: + if isinstance(save_pre_filter, bool): + save_pre_filter = os.path.abspath('pre_filter.tsv') + outputs['pre_filter_file'] = save_pre_filter + + return outputs + + def _make_headers(self, num_col): + header = self.inputs.header_prefix if \ + isdefined(self.inputs.header_prefix) else self._header + headers = ['{}{:02d}'.format(header, i) for i in range(num_col)] + return '\t'.join(headers) + + +class ACompCor(CompCor): + """ + Anatomical compcor: for inputs and outputs, see CompCor. + When the mask provided is an anatomical mask, then CompCor + is equivalent to ACompCor. + """ + + def __init__(self, *args, **kwargs): + ''' exactly the same as compcor except the header ''' + super(ACompCor, self).__init__(*args, **kwargs) + self._header = 'aCompCor' + + +class TCompCorInputSpec(CompCorInputSpec): + # and all the fields in CompCorInputSpec + percentile_threshold = traits.Range( + low=0., + high=1., + value=.02, + exclude_low=True, + exclude_high=True, + usedefault=True, + desc='the percentile ' + 'used to select highest-variance ' + 'voxels, represented by a number ' + 'between 0 and 1, exclusive. By ' + 'default, this value is set to .02. ' + 'That is, the 2% of voxels ' + 'with the highest variance are used.') + + +class TCompCorOutputSpec(CompCorOutputSpec): + # and all the fields in CompCorOutputSpec + high_variance_masks = OutputMultiPath( + File(exists=True), + desc=(("voxels exceeding the variance" + " threshold"))) + + +class TCompCor(CompCor): + """ + Interface for tCompCor. Computes a ROI mask based on variance of voxels. + + Example + ------- + + >>> ccinterface = TCompCor() + >>> ccinterface.inputs.realigned_file = 'functional.nii' + >>> ccinterface.inputs.mask_files = 'mask.nii' + >>> ccinterface.inputs.num_components = 1 + >>> ccinterface.inputs.pre_filter = 'polynomial' + >>> ccinterface.inputs.regress_poly_degree = 2 + >>> ccinterface.inputs.percentile_threshold = .03 + + """ + + input_spec = TCompCorInputSpec + output_spec = TCompCorOutputSpec + + def __init__(self, *args, **kwargs): + ''' exactly the same as compcor except the header ''' + super(TCompCor, self).__init__(*args, **kwargs) + self._header = 'tCompCor' + self._mask_files = [] + + def _process_masks(self, mask_images, timeseries=None): + out_images = [] + self._mask_files = [] + for i, img in enumerate(mask_images): + mask = img.get_data().astype(np.bool) + imgseries = timeseries[mask, :] + imgseries = regress_poly(2, imgseries)[0] + tSTD = _compute_tSTD(imgseries, 0, axis=-1) + threshold_std = np.percentile( + tSTD, + np.round(100. * + (1. - self.inputs.percentile_threshold)).astype(int)) + mask_data = np.zeros_like(mask) + mask_data[mask != 0] = tSTD >= threshold_std + out_image = nb.Nifti1Image( + mask_data, affine=img.affine, header=img.header) + + # save mask + mask_file = os.path.abspath('mask_{:03d}.nii.gz'.format(i)) + out_image.to_filename(mask_file) + IFLOGGER.debug('tCompcor computed and saved mask of shape %s to ' + 'mask_file %s', str(mask.shape), mask_file) + self._mask_files.append(mask_file) + out_images.append(out_image) + return out_images + + def _list_outputs(self): + outputs = super(TCompCor, self)._list_outputs() + outputs['high_variance_masks'] = self._mask_files + return outputs + + +class TSNRInputSpec(BaseInterfaceInputSpec): + in_file = InputMultiPath( + File(exists=True), + mandatory=True, + desc='realigned 4D file or a list of 3D files') + regress_poly = traits.Range(low=1, desc='Remove polynomials') + tsnr_file = File( + 'tsnr.nii.gz', + usedefault=True, + hash_files=False, + desc='output tSNR file') + mean_file = File( + 'mean.nii.gz', + usedefault=True, + hash_files=False, + desc='output mean file') + stddev_file = File( + 'stdev.nii.gz', + usedefault=True, + hash_files=False, + desc='output tSNR file') + detrended_file = File( + 'detrend.nii.gz', + usedefault=True, + hash_files=False, + desc='input file after detrending') + + +class TSNROutputSpec(TraitedSpec): + tsnr_file = File(exists=True, desc='tsnr image file') + mean_file = File(exists=True, desc='mean image file') + stddev_file = File(exists=True, desc='std dev image file') + detrended_file = File(desc='detrended input file') + + +class TSNR(BaseInterface): + """ + Computes the time-course SNR for a time series + + Typically you want to run this on a realigned time-series. + + Example + ------- + + >>> tsnr = TSNR() + >>> tsnr.inputs.in_file = 'functional.nii' + >>> res = tsnr.run() # doctest: +SKIP + + """ + input_spec = TSNRInputSpec + output_spec = TSNROutputSpec + + def _run_interface(self, runtime): + img = nb.load(self.inputs.in_file[0], mmap=NUMPY_MMAP) + header = img.header.copy() + vollist = [ + nb.load(filename, mmap=NUMPY_MMAP) + for filename in self.inputs.in_file + ] + data = np.concatenate( + [ + vol.get_data().reshape(vol.shape[:3] + (-1, )) + for vol in vollist + ], + axis=3) + data = np.nan_to_num(data) + + if data.dtype.kind == 'i': + header.set_data_dtype(np.float32) + data = data.astype(np.float32) + + if isdefined(self.inputs.regress_poly): + data = regress_poly( + self.inputs.regress_poly, data, remove_mean=False)[0] + img = nb.Nifti1Image(data, img.affine, header) + nb.save(img, op.abspath(self.inputs.detrended_file)) + + meanimg = np.mean(data, axis=3) + stddevimg = np.std(data, axis=3) + tsnr = np.zeros_like(meanimg) + tsnr[stddevimg > 1.e-3] = meanimg[stddevimg > 1.e-3] / stddevimg[ + stddevimg > 1.e-3] + img = nb.Nifti1Image(tsnr, img.affine, header) + nb.save(img, op.abspath(self.inputs.tsnr_file)) + img = nb.Nifti1Image(meanimg, img.affine, header) + nb.save(img, op.abspath(self.inputs.mean_file)) + img = nb.Nifti1Image(stddevimg, img.affine, header) + nb.save(img, op.abspath(self.inputs.stddev_file)) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + for k in ['tsnr_file', 'mean_file', 'stddev_file']: + outputs[k] = op.abspath(getattr(self.inputs, k)) + + if isdefined(self.inputs.regress_poly): + outputs['detrended_file'] = op.abspath(self.inputs.detrended_file) + return outputs + + +class NonSteadyStateDetectorInputSpec(BaseInterfaceInputSpec): + in_file = File(exists=True, mandatory=True, desc='4D NIFTI EPI file') + + +class NonSteadyStateDetectorOutputSpec(TraitedSpec): + n_volumes_to_discard = traits.Int(desc='Number of non-steady state volumes' + 'detected in the beginning of the scan.') + + +class NonSteadyStateDetector(BaseInterface): + """ + Returns the number of non-steady state volumes detected at the beginning + of the scan. + """ + + input_spec = NonSteadyStateDetectorInputSpec + output_spec = NonSteadyStateDetectorOutputSpec + + def _run_interface(self, runtime): + in_nii = nb.load(self.inputs.in_file) + global_signal = in_nii.get_data()[:, :, :, :50].mean(axis=0).mean( + axis=0).mean(axis=0) + + self._results = {'n_volumes_to_discard': is_outlier(global_signal)} + + return runtime + + def _list_outputs(self): + return self._results + + +def compute_dvars(in_file, + in_mask, + remove_zerovariance=False, + intensity_normalization=1000): + """ + Compute the :abbr:`DVARS (D referring to temporal + derivative of timecourses, VARS referring to RMS variance over voxels)` + [Power2012]_. + + Particularly, the *standardized* :abbr:`DVARS (D referring to temporal + derivative of timecourses, VARS referring to RMS variance over voxels)` + [Nichols2013]_ are computed. + + .. [Nichols2013] Nichols T, `Notes on creating a standardized version of + DVARS `_, 2013. + + .. note:: Implementation details + + Uses the implementation of the `Yule-Walker equations + from nitime + `_ + for the :abbr:`AR (auto-regressive)` filtering of the fMRI signal. + + :param numpy.ndarray func: functional data, after head-motion-correction. + :param numpy.ndarray mask: a 3D mask of the brain + :param bool output_all: write out all dvars + :param str out_file: a path to which the standardized dvars should be saved. + :return: the standardized DVARS + + """ + import numpy as np + import nibabel as nb + from nitime.algorithms import AR_est_YW + import warnings + + func = nb.load(in_file, mmap=NUMPY_MMAP).get_data().astype(np.float32) + mask = nb.load(in_mask, mmap=NUMPY_MMAP).get_data().astype(np.uint8) + + if len(func.shape) != 4: + raise RuntimeError("Input fMRI dataset should be 4-dimensional") + + idx = np.where(mask > 0) + mfunc = func[idx[0], idx[1], idx[2], :] + + if intensity_normalization != 0: + mfunc = (mfunc / np.median(mfunc)) * intensity_normalization + + # Robust standard deviation (we are using "lower" interpolation + # because this is what FSL is doing + func_sd = (np.percentile(mfunc, 75, axis=1, interpolation="lower") - + np.percentile(mfunc, 25, axis=1, interpolation="lower")) / 1.349 + + if remove_zerovariance: + mfunc = mfunc[func_sd != 0, :] + func_sd = func_sd[func_sd != 0] + + # Compute (non-robust) estimate of lag-1 autocorrelation + ar1 = np.apply_along_axis(AR_est_YW, 1, + regress_poly(0, mfunc, + remove_mean=True)[0].astype( + np.float32), 1)[:, 0] + + # Compute (predicted) standard deviation of temporal difference time series + diff_sdhat = np.squeeze(np.sqrt(((1 - ar1) * 2).tolist())) * func_sd + diff_sd_mean = diff_sdhat.mean() + + # Compute temporal difference time series + func_diff = np.diff(mfunc, axis=1) + + # DVARS (no standardization) + dvars_nstd = np.sqrt(np.square(func_diff).mean(axis=0)) + + # standardization + dvars_stdz = dvars_nstd / diff_sd_mean + + with warnings.catch_warnings(): # catch, e.g., divide by zero errors + warnings.filterwarnings('error') + + # voxelwise standardization + diff_vx_stdz = np.square( + func_diff / np.array([diff_sdhat] * func_diff.shape[-1]).T) + dvars_vx_stdz = np.sqrt(diff_vx_stdz.mean(axis=0)) + + return (dvars_stdz, dvars_nstd, dvars_vx_stdz) + + +def plot_confound(tseries, + figsize, + name, + units=None, + series_tr=None, + normalize=False): + """ + A helper function to plot :abbr:`fMRI (functional MRI)` confounds. + + """ + import matplotlib + matplotlib.use(config.get('execution', 'matplotlib_backend')) + import matplotlib.pyplot as plt + from matplotlib.gridspec import GridSpec + from matplotlib.backends.backend_pdf import FigureCanvasPdf as FigureCanvas + import seaborn as sns + + fig = plt.Figure(figsize=figsize) + FigureCanvas(fig) + grid = GridSpec(1, 2, width_ratios=[3, 1], wspace=0.025) + grid.update(hspace=1.0, right=0.95, left=0.1, bottom=0.2) + + ax = fig.add_subplot(grid[0, :-1]) + if normalize and series_tr is not None: + tseries /= series_tr + + ax.plot(tseries) + ax.set_xlim((0, len(tseries))) + ylabel = name + if units is not None: + ylabel += (' speed [{}/s]' if normalize else ' [{}]').format(units) + ax.set_ylabel(ylabel) + + xlabel = 'Frame #' + if series_tr is not None: + xlabel = 'Frame # ({} sec TR)'.format(series_tr) + ax.set_xlabel(xlabel) + ylim = ax.get_ylim() + + ax = fig.add_subplot(grid[0, -1]) + sns.distplot(tseries, vertical=True, ax=ax) + ax.set_xlabel('Frames') + ax.set_ylim(ylim) + ax.set_yticklabels([]) + return fig + + +def is_outlier(points, thresh=3.5): + """ + Returns a boolean array with True if points are outliers and False + otherwise. + + :param nparray points: an numobservations by numdimensions numpy array of observations + :param float thresh: the modified z-score to use as a threshold. Observations with + a modified z-score (based on the median absolute deviation) greater + than this value will be classified as outliers. + + :return: A bolean mask, of size numobservations-length array. + + .. note:: References + + Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and + Handle Outliers", The ASQC Basic References in Quality Control: + Statistical Techniques, Edward F. Mykytka, Ph.D., Editor. + + """ + if len(points.shape) == 1: + points = points[:, None] + median = np.median(points, axis=0) + diff = np.sum((points - median)**2, axis=-1) + diff = np.sqrt(diff) + med_abs_deviation = np.median(diff) + + modified_z_score = 0.6745 * diff / med_abs_deviation + + timepoints_to_discard = 0 + for i in range(len(modified_z_score)): + if modified_z_score[i] <= thresh: + break + else: + timepoints_to_discard += 1 + + return timepoints_to_discard + + +def cosine_filter(data, timestep, period_cut, remove_mean=True, axis=-1): + datashape = data.shape + timepoints = datashape[axis] + + data = data.reshape((-1, timepoints)) + + frametimes = timestep * np.arange(timepoints) + X = _full_rank(_cosine_drift(period_cut, frametimes))[0] + non_constant_regressors = X[:, :-1] if X.shape[1] > 1 else np.array([]) + + betas = np.linalg.lstsq(X, data.T)[0] + + if not remove_mean: + X = X[:, :-1] + betas = betas[:-1] + + residuals = data - X.dot(betas).T + + return residuals.reshape(datashape), non_constant_regressors + + +def regress_poly(degree, data, remove_mean=True, axis=-1): + """ + Returns data with degree polynomial regressed out. + + :param bool remove_mean: whether or not demean data (i.e. degree 0), + :param int axis: numpy array axes along which regression is performed + + """ + IFLOGGER.debug('Performing polynomial regression on data of shape %s', + str(data.shape)) + + datashape = data.shape + timepoints = datashape[axis] + + # Rearrange all voxel-wise time-series in rows + data = data.reshape((-1, timepoints)) + + # Generate design matrix + X = np.ones((timepoints, 1)) # quick way to calc degree 0 + for i in range(degree): + polynomial_func = Legendre.basis(i + 1) + value_array = np.linspace(-1, 1, timepoints) + X = np.hstack((X, polynomial_func(value_array)[:, np.newaxis])) + + non_constant_regressors = X[:, :-1] if X.shape[1] > 1 else np.array([]) + + # Calculate coefficients + betas = np.linalg.pinv(X).dot(data.T) + + # Estimation + if remove_mean: + datahat = X.dot(betas).T + else: # disregard the first layer of X, which is degree 0 + datahat = X[:, 1:].dot(betas[1:, ...]).T + regressed_data = data - datahat + + # Back to original shape + return regressed_data.reshape(datashape), non_constant_regressors + + +def combine_mask_files(mask_files, mask_method=None, mask_index=None): + """Combines input mask files into a single nibabel image + + A helper function for CompCor + + mask_files: a list + one or more binary mask files + mask_method: enum ('union', 'intersect', 'none') + determines how to combine masks + mask_index: an integer + determines which file to return (mutually exclusive with mask_method) + + returns: a list of nibabel images + """ + + if isdefined(mask_index) or not isdefined(mask_method): + if not isdefined(mask_index): + if len(mask_files) == 1: + mask_index = 0 + else: + raise ValueError(('When more than one mask file is provided, ' + 'one of merge_method or mask_index must be ' + 'set')) + if mask_index < len(mask_files): + mask = nb.load(mask_files[mask_index], mmap=NUMPY_MMAP) + return [mask] + raise ValueError(('mask_index {0} must be less than number of mask ' + 'files {1}').format(mask_index, len(mask_files))) + masks = [] + if mask_method == 'none': + for filename in mask_files: + masks.append(nb.load(filename, mmap=NUMPY_MMAP)) + return masks + + if mask_method == 'union': + mask = None + for filename in mask_files: + img = nb.load(filename, mmap=NUMPY_MMAP) + if mask is None: + mask = img.get_data() > 0 + np.logical_or(mask, img.get_data() > 0, mask) + img = nb.Nifti1Image(mask, img.affine, header=img.header) + return [img] + + if mask_method == 'intersect': + mask = None + for filename in mask_files: + img = nb.load(filename, mmap=NUMPY_MMAP) + if mask is None: + mask = img.get_data() > 0 + np.logical_and(mask, img.get_data() > 0, mask) + img = nb.Nifti1Image(mask, img.affine, header=img.header) + return [img] + + +def compute_noise_components(imgseries, mask_images, num_components, + filter_type, degree, period_cut, repetition_time): + """Compute the noise components from the imgseries for each mask + + imgseries: a nibabel img + mask_images: a list of nibabel images + num_components: number of noise components to return + filter_type: type off filter to apply to time series before computing + noise components. + 'polynomial' - Legendre polynomial basis + 'cosine' - Discrete cosine (DCT) basis + False - None (mean-removal only) + + Filter options: + + degree: order of polynomial used to remove trends from the timeseries + period_cut: minimum period (in sec) for DCT high-pass filter + repetition_time: time (in sec) between volume acquisitions + + returns: + + components: a numpy array + basis: a numpy array containing the (non-constant) filter regressors + + """ + components = None + basis = np.array([]) + for img in mask_images: + mask = img.get_data().astype(np.bool) + if imgseries.shape[:3] != mask.shape: + raise ValueError( + 'Inputs for CompCor, timeseries and mask, do not have ' + 'matching spatial dimensions ({} and {}, respectively)'.format( + imgseries.shape[:3], mask.shape)) + + voxel_timecourses = imgseries[mask, :] + + # Zero-out any bad values + voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0 + + # Currently support Legendre-polynomial or cosine or detrending + # With no filter, the mean is nonetheless removed (poly w/ degree 0) + if filter_type == 'cosine': + voxel_timecourses, basis = cosine_filter( + voxel_timecourses, repetition_time, period_cut) + elif filter_type in ('polynomial', False): + # from paper: + # "The constant and linear trends of the columns in the matrix M were + # removed [prior to ...]" + voxel_timecourses, basis = regress_poly(degree, voxel_timecourses) + + # "Voxel time series from the noise ROI (either anatomical or tSTD) were + # placed in a matrix M of size Nxm, with time along the row dimension + # and voxels along the column dimension." + M = voxel_timecourses.T + + # "[... were removed] prior to column-wise variance normalization." + M = M / _compute_tSTD(M, 1.) + + # "The covariance matrix C = MMT was constructed and decomposed into its + # principal components using a singular value decomposition." + u, _, _ = linalg.svd(M, full_matrices=False) + if components is None: + components = u[:, :num_components] + else: + components = np.hstack((components, u[:, :num_components])) + if components is None and num_components > 0: + raise ValueError('No components found') + return components, basis + + +def _compute_tSTD(M, x, axis=0): + stdM = np.std(M, axis=axis) + # set bad values to x + stdM[stdM == 0] = x + stdM[np.isnan(stdM)] = x + return stdM + + +# _cosine_drift and _full_rank copied from nipy/modalities/fmri/design_matrix +# +# Nipy release: 0.4.1 +# Modified for smooth integration in CompCor classes + + +def _cosine_drift(period_cut, frametimes): + """Create a cosine drift matrix with periods greater or equals to period_cut + + Parameters + ---------- + period_cut: float + Cut period of the low-pass filter (in sec) + frametimes: array of shape(nscans) + The sampling times (in sec) + + Returns + ------- + cdrift: array of shape(n_scans, n_drifts) + cosin drifts plus a constant regressor at cdrift[:,0] + + Ref: http://en.wikipedia.org/wiki/Discrete_cosine_transform DCT-II + """ + len_tim = len(frametimes) + n_times = np.arange(len_tim) + hfcut = 1. / period_cut # input parameter is the period + + # frametimes.max() should be (len_tim-1)*dt + dt = frametimes[1] - frametimes[0] + # hfcut = 1/(2*dt) yields len_time + # If series is too short, return constant regressor + order = max(int(np.floor(2 * len_tim * hfcut * dt)), 1) + cdrift = np.zeros((len_tim, order)) + nfct = np.sqrt(2.0 / len_tim) + + for k in range(1, order): + cdrift[:, k - 1] = nfct * np.cos( + (np.pi / len_tim) * (n_times + .5) * k) + + cdrift[:, order - 1] = 1. # or 1./sqrt(len_tim) to normalize + return cdrift + + +def _full_rank(X, cmax=1e15): + """ + This function possibly adds a scalar matrix to X + to guarantee that the condition number is smaller than a given threshold. + + Parameters + ---------- + X: array of shape(nrows, ncols) + cmax=1.e-15, float tolerance for condition number + + Returns + ------- + X: array of shape(nrows, ncols) after regularization + cmax=1.e-15, float tolerance for condition number + """ + U, s, V = np.linalg.svd(X, 0) + smax, smin = s.max(), s.min() + c = smax / smin + if c < cmax: + return X, c + IFLOGGER.warn('Matrix is singular at working precision, regularizing...') + lda = (smax - cmax * smin) / (cmax - 1) + s = s + lda + X = np.dot(U, np.dot(np.diag(s), V)) + return X, cmax diff --git a/nipype/algorithms/icc.py b/nipype/algorithms/icc.py new file mode 100644 index 0000000000..5d5ec1c39f --- /dev/null +++ b/nipype/algorithms/icc.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range +import os +import numpy as np +from numpy import ones, kron, mean, eye, hstack, dot, tile +import nibabel as nb +from scipy.linalg import pinv +from ..interfaces.base import BaseInterfaceInputSpec, TraitedSpec, \ + BaseInterface, traits, File +from ..utils import NUMPY_MMAP + + +class ICCInputSpec(BaseInterfaceInputSpec): + subjects_sessions = traits.List( + traits.List(File(exists=True)), + desc="n subjects m sessions 3D stat files", + mandatory=True) + mask = File(exists=True, mandatory=True) + + +class ICCOutputSpec(TraitedSpec): + icc_map = File(exists=True) + session_var_map = File(exists=True, desc="variance between sessions") + subject_var_map = File(exists=True, desc="variance between subjects") + + +class ICC(BaseInterface): + ''' + Calculates Interclass Correlation Coefficient (3,1) as defined in + P. E. Shrout & Joseph L. Fleiss (1979). "Intraclass Correlations: Uses in + Assessing Rater Reliability". Psychological Bulletin 86 (2): 420-428. This + particular implementation is aimed at relaibility (test-retest) studies. + ''' + input_spec = ICCInputSpec + output_spec = ICCOutputSpec + + def _run_interface(self, runtime): + maskdata = nb.load(self.inputs.mask).get_data() + maskdata = np.logical_not( + np.logical_or(maskdata == 0, np.isnan(maskdata))) + + session_datas = [[ + nb.load(fname, mmap=NUMPY_MMAP).get_data()[maskdata].reshape( + -1, 1) for fname in sessions + ] for sessions in self.inputs.subjects_sessions] + list_of_sessions = [ + np.dstack(session_data) for session_data in session_datas + ] + all_data = np.hstack(list_of_sessions) + icc = np.zeros(session_datas[0][0].shape) + session_F = np.zeros(session_datas[0][0].shape) + session_var = np.zeros(session_datas[0][0].shape) + subject_var = np.zeros(session_datas[0][0].shape) + + for x in range(icc.shape[0]): + Y = all_data[x, :, :] + icc[x], subject_var[x], session_var[x], session_F[ + x], _, _ = ICC_rep_anova(Y) + + nim = nb.load(self.inputs.subjects_sessions[0][0]) + new_data = np.zeros(nim.shape) + new_data[maskdata] = icc.reshape(-1, ) + new_img = nb.Nifti1Image(new_data, nim.affine, nim.header) + nb.save(new_img, 'icc_map.nii') + + new_data = np.zeros(nim.shape) + new_data[maskdata] = session_var.reshape(-1, ) + new_img = nb.Nifti1Image(new_data, nim.affine, nim.header) + nb.save(new_img, 'session_var_map.nii') + + new_data = np.zeros(nim.shape) + new_data[maskdata] = subject_var.reshape(-1, ) + new_img = nb.Nifti1Image(new_data, nim.affine, nim.header) + nb.save(new_img, 'subject_var_map.nii') + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['icc_map'] = os.path.abspath('icc_map.nii') + outputs['session_var_map'] = os.path.abspath('session_var_map.nii') + outputs['subject_var_map'] = os.path.abspath('subject_var_map.nii') + return outputs + + +def ICC_rep_anova(Y): + ''' + the data Y are entered as a 'table' ie subjects are in rows and repeated + measures in columns + + One Sample Repeated measure ANOVA + + Y = XB + E with X = [FaTor / Subjects] + ''' + + [nb_subjects, nb_conditions] = Y.shape + dfc = nb_conditions - 1 + dfe = (nb_subjects - 1) * dfc + dfr = nb_subjects - 1 + + # Compute the repeated measure effect + # ------------------------------------ + + # Sum Square Total + mean_Y = mean(Y) + SST = ((Y - mean_Y)**2).sum() + + # create the design matrix for the different levels + x = kron(eye(nb_conditions), ones((nb_subjects, 1))) # sessions + x0 = tile(eye(nb_subjects), (nb_conditions, 1)) # subjects + X = hstack([x, x0]) + + # Sum Square Error + predicted_Y = dot(dot(dot(X, pinv(dot(X.T, X))), X.T), Y.flatten('F')) + residuals = Y.flatten('F') - predicted_Y + SSE = (residuals**2).sum() + + residuals.shape = Y.shape + + MSE = SSE / dfe + + # Sum square session effect - between colums/sessions + SSC = ((mean(Y, 0) - mean_Y)**2).sum() * nb_subjects + MSC = SSC / dfc / nb_subjects + + session_effect_F = MSC / MSE + + # Sum Square subject effect - between rows/subjects + SSR = SST - SSC - SSE + MSR = SSR / dfr + + # ICC(3,1) = (mean square subjeT - mean square error) / + # (mean square subjeT + (k-1)*-mean square error) + ICC = (MSR - MSE) / (MSR + dfc * MSE) + + e_var = MSE # variance of error + r_var = (MSR - MSE) / nb_conditions # variance between subjects + + return ICC, r_var, e_var, session_effect_F, dfc, dfe diff --git a/nipype/algorithms/mesh.py b/nipype/algorithms/mesh.py new file mode 100644 index 0000000000..eeb2ddbb80 --- /dev/null +++ b/nipype/algorithms/mesh.py @@ -0,0 +1,426 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Miscellaneous algorithms for 2D contours and 3D triangularized meshes handling +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import zip, str, bytes + +import os.path as op +import numpy as np +from numpy import linalg as nla + +from .. import logging +from ..interfaces.base import (BaseInterface, traits, TraitedSpec, File, + BaseInterfaceInputSpec) +from ..interfaces.vtkbase import tvtk +from ..interfaces import vtkbase as VTKInfo +IFLOGGER = logging.getLogger('nipype.interface') + + +class TVTKBaseInterface(BaseInterface): + """ A base class for interfaces using VTK """ + + _redirect_x = True + + def __init__(self, **inputs): + if VTKInfo.no_tvtk(): + raise ImportError('This interface requires tvtk to run.') + super(TVTKBaseInterface, self).__init__(**inputs) + + +class WarpPointsInputSpec(BaseInterfaceInputSpec): + points = File( + exists=True, mandatory=True, desc='file containing the point set') + warp = File( + exists=True, + mandatory=True, + desc='dense deformation field to be applied') + interp = traits.Enum( + 'cubic', + 'nearest', + 'linear', + usedefault=True, + mandatory=True, + desc='interpolation') + out_points = File( + name_source='points', + name_template='%s_warped', + output_name='out_points', + keep_extension=True, + desc='the warped point set') + + +class WarpPointsOutputSpec(TraitedSpec): + out_points = File(desc='the warped point set') + + +class WarpPoints(TVTKBaseInterface): + """ + Applies a displacement field to a point set given in vtk format. + Any discrete deformation field, given in physical coordinates and + which volume covers the extent of the vtk point set, is a valid + ``warp`` file. FSL interfaces are compatible, for instance any + field computed with :class:`nipype.interfaces.fsl.utils.ConvertWarp`. + + Example:: + + from nipype.algorithms.mesh import WarpPoints + wp = WarpPoints() + wp.inputs.points = 'surf1.vtk' + wp.inputs.warp = 'warpfield.nii' + res = wp.run() + + """ + input_spec = WarpPointsInputSpec + output_spec = WarpPointsOutputSpec + + def _gen_fname(self, in_file, suffix='generated', ext=None): + fname, fext = op.splitext(op.basename(in_file)) + + if fext == '.gz': + fname, fext2 = op.splitext(fname) + fext = fext2 + fext + + if ext is None: + ext = fext + + if ext[0] == '.': + ext = ext[1:] + return op.abspath('%s_%s.%s' % (fname, suffix, ext)) + + def _run_interface(self, runtime): + import nibabel as nb + from scipy import ndimage + + r = tvtk.PolyDataReader(file_name=self.inputs.points) + r.update() + mesh = VTKInfo.vtk_output(r) + points = np.array(mesh.points) + warp_dims = nb.funcs.four_to_three(nb.load(self.inputs.warp)) + + affine = warp_dims[0].affine + # voxsize = warp_dims[0].header.get_zooms() + vox2ras = affine[0:3, 0:3] + ras2vox = np.linalg.inv(vox2ras) + origin = affine[0:3, 3] + voxpoints = np.array([np.dot(ras2vox, (p - origin)) for p in points]) + + warps = [] + for axis in warp_dims: + wdata = axis.get_data() + if np.any(wdata != 0): + + warp = ndimage.map_coordinates(wdata, voxpoints.transpose()) + else: + warp = np.zeros((points.shape[0], )) + + warps.append(warp) + + disps = np.squeeze(np.dstack(warps)) + newpoints = [p + d for p, d in zip(points, disps)] + mesh.points = newpoints + w = tvtk.PolyDataWriter() + VTKInfo.configure_input_data(w, mesh) + w.file_name = self._gen_fname( + self.inputs.points, suffix='warped', ext='.vtk') + w.write() + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_points'] = self._gen_fname( + self.inputs.points, suffix='warped', ext='.vtk') + return outputs + + +class ComputeMeshWarpInputSpec(BaseInterfaceInputSpec): + surface1 = File( + exists=True, + mandatory=True, + desc=('Reference surface (vtk format) to which compute ' + 'distance.')) + surface2 = File( + exists=True, + mandatory=True, + desc=('Test surface (vtk format) from which compute ' + 'distance.')) + metric = traits.Enum( + 'euclidean', + 'sqeuclidean', + usedefault=True, + desc='norm used to report distance') + weighting = traits.Enum( + 'none', + 'area', + usedefault=True, + desc=('"none": no weighting is performed, surface": edge distance is ' + 'weighted by the corresponding surface area')) + out_warp = File( + 'surfwarp.vtk', + usedefault=True, + desc='vtk file based on surface1 and warpings mapping it ' + 'to surface2') + out_file = File( + 'distance.npy', + usedefault=True, + desc='numpy file keeping computed distances and weights') + + +class ComputeMeshWarpOutputSpec(TraitedSpec): + distance = traits.Float(desc="computed distance") + out_warp = File( + exists=True, + desc=('vtk file with the vertex-wise ' + 'mapping of surface1 to surface2')) + out_file = File( + exists=True, desc='numpy file keeping computed distances and weights') + + +class ComputeMeshWarp(TVTKBaseInterface): + """ + Calculates a the vertex-wise warping to get surface2 from surface1. + It also reports the average distance of vertices, using the norm specified + as input. + + .. warning: + + A point-to-point correspondence between surfaces is required + + + Example:: + + import nipype.algorithms.mesh as m + dist = m.ComputeMeshWarp() + dist.inputs.surface1 = 'surf1.vtk' + dist.inputs.surface2 = 'surf2.vtk' + res = dist.run() + + """ + + input_spec = ComputeMeshWarpInputSpec + output_spec = ComputeMeshWarpOutputSpec + + def _triangle_area(self, A, B, C): + A = np.array(A) + B = np.array(B) + C = np.array(C) + ABxAC = nla.norm(A - B) * nla.norm(A - C) + prod = np.dot(B - A, C - A) + angle = np.arccos(prod / ABxAC) + area = 0.5 * ABxAC * np.sin(angle) + return area + + def _run_interface(self, runtime): + r1 = tvtk.PolyDataReader(file_name=self.inputs.surface1) + r2 = tvtk.PolyDataReader(file_name=self.inputs.surface2) + vtk1 = VTKInfo.vtk_output(r1) + vtk2 = VTKInfo.vtk_output(r2) + r1.update() + r2.update() + assert (len(vtk1.points) == len(vtk2.points)) + + points1 = np.array(vtk1.points) + points2 = np.array(vtk2.points) + + diff = points2 - points1 + weights = np.ones(len(diff)) + + try: + errvector = nla.norm(diff, axis=1) + except TypeError: # numpy < 1.9 + errvector = np.apply_along_axis(nla.norm, 1, diff) + + if self.inputs.metric == 'sqeuclidean': + errvector **= 2 + + if self.inputs.weighting == 'area': + faces = vtk1.polys.to_array().reshape(-1, 4).astype(int)[:, 1:] + + for i, p1 in enumerate(points2): + # compute surfaces, set in weight + w = 0.0 + point_faces = faces[(faces[:, :] == i).any(axis=1)] + + for idset in point_faces: + fp1 = points1[int(idset[0])] + fp2 = points1[int(idset[1])] + fp3 = points1[int(idset[2])] + w += self._triangle_area(fp1, fp2, fp3) + weights[i] = w + + result = np.vstack([errvector, weights]) + np.save(op.abspath(self.inputs.out_file), result.transpose()) + + out_mesh = tvtk.PolyData() + out_mesh.points = vtk1.points + out_mesh.polys = vtk1.polys + out_mesh.point_data.vectors = diff + out_mesh.point_data.vectors.name = 'warpings' + writer = tvtk.PolyDataWriter( + file_name=op.abspath(self.inputs.out_warp)) + VTKInfo.configure_input_data(writer, out_mesh) + writer.write() + + self._distance = np.average(errvector, weights=weights) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs['out_warp'] = op.abspath(self.inputs.out_warp) + outputs['distance'] = self._distance + return outputs + + +class MeshWarpMathsInputSpec(BaseInterfaceInputSpec): + in_surf = File( + exists=True, + mandatory=True, + desc=('Input surface in vtk format, with associated warp ' + 'field as point data (ie. from ComputeMeshWarp')) + float_trait = traits.Either( + traits.Float(1.0), + traits.Tuple(traits.Float(1.0), traits.Float(1.0), traits.Float(1.0))) + + operator = traits.Either( + float_trait, + File(exists=True), + default=1.0, + usedefault=True, + mandatory=True, + desc='image, float or tuple of floats to act as operator') + + operation = traits.Enum( + 'sum', + 'sub', + 'mul', + 'div', + usedefault=True, + desc='operation to be performed') + + out_warp = File( + 'warp_maths.vtk', + usedefault=True, + desc='vtk file based on in_surf and warpings mapping it ' + 'to out_file') + out_file = File( + 'warped_surf.vtk', usedefault=True, desc='vtk with surface warped') + + +class MeshWarpMathsOutputSpec(TraitedSpec): + out_warp = File( + exists=True, + desc=('vtk file with the vertex-wise ' + 'mapping of surface1 to surface2')) + out_file = File(exists=True, desc='vtk with surface warped') + + +class MeshWarpMaths(TVTKBaseInterface): + """ + Performs the most basic mathematical operations on the warping field + defined at each vertex of the input surface. A surface with scalar + or vector data can be used as operator for non-uniform operations. + + .. warning: + + A point-to-point correspondence between surfaces is required + + + Example:: + + import nipype.algorithms.mesh as m + mmath = m.MeshWarpMaths() + mmath.inputs.in_surf = 'surf1.vtk' + mmath.inputs.operator = 'surf2.vtk' + mmath.inputs.operation = 'mul' + res = mmath.run() + + """ + + input_spec = MeshWarpMathsInputSpec + output_spec = MeshWarpMathsOutputSpec + + def _run_interface(self, runtime): + r1 = tvtk.PolyDataReader(file_name=self.inputs.in_surf) + vtk1 = VTKInfo.vtk_output(r1) + r1.update() + points1 = np.array(vtk1.points) + + if vtk1.point_data.vectors is None: + raise RuntimeError('No warping field was found in in_surf') + + operator = self.inputs.operator + opfield = np.ones_like(points1) + + if isinstance(operator, (str, bytes)): + r2 = tvtk.PolyDataReader(file_name=self.inputs.surface2) + vtk2 = VTKInfo.vtk_output(r2) + r2.update() + assert (len(points1) == len(vtk2.points)) + + opfield = vtk2.point_data.vectors + + if opfield is None: + opfield = vtk2.point_data.scalars + + if opfield is None: + raise RuntimeError('No operator values found in operator file') + + opfield = np.array(opfield) + + if opfield.shape[1] < points1.shape[1]: + opfield = np.array([opfield.tolist()] * points1.shape[1]).T + else: + operator = np.atleast_1d(operator) + opfield *= operator + + warping = np.array(vtk1.point_data.vectors) + + if self.inputs.operation == 'sum': + warping += opfield + elif self.inputs.operation == 'sub': + warping -= opfield + elif self.inputs.operation == 'mul': + warping *= opfield + elif self.inputs.operation == 'div': + warping /= opfield + + vtk1.point_data.vectors = warping + writer = tvtk.PolyDataWriter( + file_name=op.abspath(self.inputs.out_warp)) + VTKInfo.configure_input_data(writer, vtk1) + writer.write() + + vtk1.point_data.vectors = None + vtk1.points = points1 + warping + writer = tvtk.PolyDataWriter( + file_name=op.abspath(self.inputs.out_file)) + VTKInfo.configure_input_data(writer, vtk1) + writer.write() + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs['out_warp'] = op.abspath(self.inputs.out_warp) + return outputs + + +class P2PDistance(ComputeMeshWarp): + """ + Calculates a point-to-point (p2p) distance between two corresponding + VTK-readable meshes or contours. + + A point-to-point correspondence between nodes is required + + .. deprecated:: 1.0-dev + Use :py:class:`ComputeMeshWarp` instead. + """ + + def __init__(self, **inputs): + super(P2PDistance, self).__init__(**inputs) + IFLOGGER.warn('This interface has been deprecated since 1.0, please ' + 'use ComputeMeshWarp') diff --git a/nipype/algorithms/metrics.py b/nipype/algorithms/metrics.py new file mode 100644 index 0000000000..d9074c48d3 --- /dev/null +++ b/nipype/algorithms/metrics.py @@ -0,0 +1,719 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +''' +Image assessment algorithms. Typical overlap and error computation +measures to evaluate results from other processing units. +''' +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import zip, range + +import os +import os.path as op + +import nibabel as nb +import numpy as np +from scipy.ndimage.morphology import binary_erosion +from scipy.spatial.distance import cdist, euclidean, dice, jaccard +from scipy.ndimage.measurements import center_of_mass, label + +from .. import config, logging + +from ..interfaces.base import ( + SimpleInterface, BaseInterface, traits, TraitedSpec, File, + InputMultiPath, BaseInterfaceInputSpec, + isdefined) +from ..interfaces.nipy.base import NipyBaseInterface + +iflogger = logging.getLogger('nipype.interface') + + +class DistanceInputSpec(BaseInterfaceInputSpec): + volume1 = File( + exists=True, + mandatory=True, + desc="Has to have the same dimensions as volume2.") + volume2 = File( + exists=True, + mandatory=True, + desc="Has to have the same dimensions as volume1.") + method = traits.Enum( + "eucl_min", + "eucl_cog", + "eucl_mean", + "eucl_wmean", + "eucl_max", + desc='""eucl_min": Euclidean distance between two closest points\ + "eucl_cog": mean Euclidian distance between the Center of Gravity\ + of volume1 and CoGs of volume2\ + "eucl_mean": mean Euclidian minimum distance of all volume2 voxels\ + to volume1\ + "eucl_wmean": mean Euclidian minimum distance of all volume2 voxels\ + to volume1 weighted by their values\ + "eucl_max": maximum over minimum Euclidian distances of all volume2\ + voxels to volume1 (also known as the Hausdorff distance)', + usedefault=True) + mask_volume = File( + exists=True, desc="calculate overlap only within this mask.") + + +class DistanceOutputSpec(TraitedSpec): + distance = traits.Float() + point1 = traits.Array(shape=(3, )) + point2 = traits.Array(shape=(3, )) + histogram = File() + + +class Distance(BaseInterface): + """Calculates distance between two volumes. + """ + input_spec = DistanceInputSpec + output_spec = DistanceOutputSpec + + _hist_filename = "hist.pdf" + + def _find_border(self, data): + eroded = binary_erosion(data) + border = np.logical_and(data, np.logical_not(eroded)) + return border + + def _get_coordinates(self, data, affine): + if len(data.shape) == 4: + data = data[:, :, :, 0] + indices = np.vstack(np.nonzero(data)) + indices = np.vstack((indices, np.ones(indices.shape[1]))) + coordinates = np.dot(affine, indices) + return coordinates[:3, :] + + def _eucl_min(self, nii1, nii2): + origdata1 = nii1.get_data().astype(np.bool) + border1 = self._find_border(origdata1) + + origdata2 = nii2.get_data().astype(np.bool) + border2 = self._find_border(origdata2) + + set1_coordinates = self._get_coordinates(border1, nii1.affine) + + set2_coordinates = self._get_coordinates(border2, nii2.affine) + + dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T) + (point1, point2) = np.unravel_index( + np.argmin(dist_matrix), dist_matrix.shape) + return (euclidean(set1_coordinates.T[point1, :], + set2_coordinates.T[point2, :]), + set1_coordinates.T[point1, :], set2_coordinates.T[point2, :]) + + def _eucl_cog(self, nii1, nii2): + origdata1 = np.logical_and(nii1.get_data() != 0, + np.logical_not(np.isnan(nii1.get_data()))) + cog_t = np.array(center_of_mass(origdata1.copy())).reshape(-1, 1) + cog_t = np.vstack((cog_t, np.array([1]))) + cog_t_coor = np.dot(nii1.affine, cog_t)[:3, :] + + origdata2 = np.logical_and(nii2.get_data() != 0, + np.logical_not(np.isnan(nii2.get_data()))) + (labeled_data, n_labels) = label(origdata2) + + cogs = np.ones((4, n_labels)) + + for i in range(n_labels): + cogs[:3, i] = np.array( + center_of_mass(origdata2, labeled_data, i + 1)) + + cogs_coor = np.dot(nii2.affine, cogs)[:3, :] + + dist_matrix = cdist(cog_t_coor.T, cogs_coor.T) + + return np.mean(dist_matrix) + + def _eucl_mean(self, nii1, nii2, weighted=False): + origdata1 = nii1.get_data().astype(np.bool) + border1 = self._find_border(origdata1) + + origdata2 = nii2.get_data().astype(np.bool) + + set1_coordinates = self._get_coordinates(border1, nii1.affine) + set2_coordinates = self._get_coordinates(origdata2, nii2.affine) + + dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T) + min_dist_matrix = np.amin(dist_matrix, axis=0) + import matplotlib + matplotlib.use(config.get('execution', 'matplotlib_backend')) + import matplotlib.pyplot as plt + plt.figure() + plt.hist(min_dist_matrix, 50, normed=1, facecolor='green') + plt.savefig(self._hist_filename) + plt.clf() + plt.close() + + if weighted: + return np.average( + min_dist_matrix, weights=nii2.get_data()[origdata2].flat) + else: + return np.mean(min_dist_matrix) + + def _eucl_max(self, nii1, nii2): + origdata1 = nii1.get_data() + origdata1 = np.logical_not( + np.logical_or(origdata1 == 0, np.isnan(origdata1))) + origdata2 = nii2.get_data() + origdata2 = np.logical_not( + np.logical_or(origdata2 == 0, np.isnan(origdata2))) + + if isdefined(self.inputs.mask_volume): + maskdata = nb.load(self.inputs.mask_volume).get_data() + maskdata = np.logical_not( + np.logical_or(maskdata == 0, np.isnan(maskdata))) + origdata1 = np.logical_and(maskdata, origdata1) + origdata2 = np.logical_and(maskdata, origdata2) + + if origdata1.max() == 0 or origdata2.max() == 0: + return np.NaN + + border1 = self._find_border(origdata1) + border2 = self._find_border(origdata2) + + set1_coordinates = self._get_coordinates(border1, nii1.affine) + set2_coordinates = self._get_coordinates(border2, nii2.affine) + distances = cdist(set1_coordinates.T, set2_coordinates.T) + mins = np.concatenate((np.amin(distances, axis=0), + np.amin(distances, axis=1))) + + return np.max(mins) + + def _run_interface(self, runtime): + # there is a bug in some scipy ndimage methods that gets tripped by memory mapped objects + nii1 = nb.load(self.inputs.volume1, mmap=False) + nii2 = nb.load(self.inputs.volume2, mmap=False) + + if self.inputs.method == "eucl_min": + self._distance, self._point1, self._point2 = self._eucl_min( + nii1, nii2) + + elif self.inputs.method == "eucl_cog": + self._distance = self._eucl_cog(nii1, nii2) + + elif self.inputs.method == "eucl_mean": + self._distance = self._eucl_mean(nii1, nii2) + + elif self.inputs.method == "eucl_wmean": + self._distance = self._eucl_mean(nii1, nii2, weighted=True) + elif self.inputs.method == "eucl_max": + self._distance = self._eucl_max(nii1, nii2) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['distance'] = self._distance + if self.inputs.method == "eucl_min": + outputs['point1'] = self._point1 + outputs['point2'] = self._point2 + elif self.inputs.method in ["eucl_mean", "eucl_wmean"]: + outputs['histogram'] = os.path.abspath(self._hist_filename) + return outputs + + +class OverlapInputSpec(BaseInterfaceInputSpec): + volume1 = File( + exists=True, + mandatory=True, + desc='Has to have the same dimensions as volume2.') + volume2 = File( + exists=True, + mandatory=True, + desc='Has to have the same dimensions as volume1.') + mask_volume = File( + exists=True, desc='calculate overlap only within this mask.') + bg_overlap = traits.Bool( + False, + usedefault=True, + mandatory=True, + desc='consider zeros as a label') + out_file = File('diff.nii', usedefault=True) + weighting = traits.Enum( + 'none', + 'volume', + 'squared_vol', + usedefault=True, + desc=('\'none\': no class-overlap weighting is ' + 'performed. \'volume\': computed class-' + 'overlaps are weighted by class volume ' + '\'squared_vol\': computed class-overlaps ' + 'are weighted by the squared volume of ' + 'the class')) + vol_units = traits.Enum( + 'voxel', + 'mm', + mandatory=True, + usedefault=True, + desc='units for volumes') + + +class OverlapOutputSpec(TraitedSpec): + jaccard = traits.Float(desc='averaged jaccard index') + dice = traits.Float(desc='averaged dice index') + roi_ji = traits.List( + traits.Float(), desc=('the Jaccard index (JI) per ROI')) + roi_di = traits.List(traits.Float(), desc=('the Dice index (DI) per ROI')) + volume_difference = traits.Float(desc=('averaged volume difference')) + roi_voldiff = traits.List( + traits.Float(), desc=('volume differences of ROIs')) + labels = traits.List(traits.Int(), desc=('detected labels')) + diff_file = File(exists=True, desc='error map of differences') + + +class Overlap(BaseInterface): + """ + Calculates Dice and Jaccard's overlap measures between two ROI maps. + The interface is backwards compatible with the former version in + which only binary files were accepted. + + The averaged values of overlap indices can be weighted. Volumes + now can be reported in :math:`mm^3`, although they are given in voxels + to keep backwards compatibility. + + Example + ------- + + >>> overlap = Overlap() + >>> overlap.inputs.volume1 = 'cont1.nii' + >>> overlap.inputs.volume2 = 'cont2.nii' + >>> res = overlap.run() # doctest: +SKIP + + """ + input_spec = OverlapInputSpec + output_spec = OverlapOutputSpec + + def _bool_vec_dissimilarity(self, booldata1, booldata2, method): + methods = {'dice': dice, 'jaccard': jaccard} + if not (np.any(booldata1) or np.any(booldata2)): + return 0 + return 1 - methods[method](booldata1.flat, booldata2.flat) + + def _run_interface(self, runtime): + nii1 = nb.load(self.inputs.volume1) + nii2 = nb.load(self.inputs.volume2) + + scale = 1.0 + + if self.inputs.vol_units == 'mm': + voxvol = nii1.header.get_zooms() + for i in range(nii1.get_data().ndim - 1): + scale = scale * voxvol[i] + + data1 = nii1.get_data() + data1[np.logical_or(data1 < 0, np.isnan(data1))] = 0 + max1 = int(data1.max()) + data1 = data1.astype(np.min_scalar_type(max1)) + data2 = nii2.get_data().astype(np.min_scalar_type(max1)) + data2[np.logical_or(data1 < 0, np.isnan(data1))] = 0 + + if isdefined(self.inputs.mask_volume): + maskdata = nb.load(self.inputs.mask_volume).get_data() + maskdata = ~np.logical_or(maskdata == 0, np.isnan(maskdata)) + data1[~maskdata] = 0 + data2[~maskdata] = 0 + + res = [] + volumes1 = [] + volumes2 = [] + + labels = np.unique(data1[data1 > 0].reshape(-1)).tolist() + if self.inputs.bg_overlap: + labels.insert(0, 0) + + for l in labels: + res.append( + self._bool_vec_dissimilarity( + data1 == l, data2 == l, method='jaccard')) + volumes1.append(scale * len(data1[data1 == l])) + volumes2.append(scale * len(data2[data2 == l])) + + results = dict(jaccard=[], dice=[]) + results['jaccard'] = np.array(res) + results['dice'] = 2.0 * results['jaccard'] / (results['jaccard'] + 1.0) + + weights = np.ones((len(volumes1), ), dtype=np.float32) + if self.inputs.weighting != 'none': + weights = weights / np.array(volumes1) + if self.inputs.weighting == 'squared_vol': + weights = weights**2 + weights = weights / np.sum(weights) + + both_data = np.zeros(data1.shape) + both_data[(data1 - data2) != 0] = 1 + + nb.save( + nb.Nifti1Image(both_data, nii1.affine, nii1.header), + self.inputs.out_file) + + self._labels = labels + self._ove_rois = results + self._vol_rois = ( + np.array(volumes1) - np.array(volumes2)) / np.array(volumes1) + + self._dice = round(np.sum(weights * results['dice']), 5) + self._jaccard = round(np.sum(weights * results['jaccard']), 5) + self._volume = np.sum(weights * self._vol_rois) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['labels'] = self._labels + outputs['jaccard'] = self._jaccard + outputs['dice'] = self._dice + outputs['volume_difference'] = self._volume + + outputs['roi_ji'] = self._ove_rois['jaccard'].tolist() + outputs['roi_di'] = self._ove_rois['dice'].tolist() + outputs['roi_voldiff'] = self._vol_rois.tolist() + outputs['diff_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class FuzzyOverlapInputSpec(BaseInterfaceInputSpec): + in_ref = InputMultiPath( + File(exists=True), + mandatory=True, + desc='Reference image. Requires the same dimensions as in_tst.') + in_tst = InputMultiPath( + File(exists=True), + mandatory=True, + desc='Test image. Requires the same dimensions as in_ref.') + in_mask = File(exists=True, desc='calculate overlap only within mask') + weighting = traits.Enum( + 'none', + 'volume', + 'squared_vol', + usedefault=True, + desc=('\'none\': no class-overlap weighting is ' + 'performed. \'volume\': computed class-' + 'overlaps are weighted by class volume ' + '\'squared_vol\': computed class-overlaps ' + 'are weighted by the squared volume of ' + 'the class')) + out_file = File( + 'diff.nii', + desc='alternative name for resulting difference-map', + usedefault=True) + + +class FuzzyOverlapOutputSpec(TraitedSpec): + jaccard = traits.Float(desc='Fuzzy Jaccard Index (fJI), all the classes') + dice = traits.Float(desc='Fuzzy Dice Index (fDI), all the classes') + class_fji = traits.List( + traits.Float(), + desc='Array containing the fJIs of each computed class') + class_fdi = traits.List( + traits.Float(), + desc='Array containing the fDIs of each computed class') + + +class FuzzyOverlap(SimpleInterface): + """Calculates various overlap measures between two maps, using the fuzzy + definition proposed in: Crum et al., Generalized Overlap Measures for + Evaluation and Validation in Medical Image Analysis, IEEE Trans. Med. + Ima. 25(11),pp 1451-1461, Nov. 2006. + + in_ref and in_tst are lists of 2/3D images, each element on the list + containing one volume fraction map of a class in a fuzzy partition + of the domain. + + Example + ------- + + >>> overlap = FuzzyOverlap() + >>> overlap.inputs.in_ref = [ 'ref_class0.nii', 'ref_class1.nii' ] + >>> overlap.inputs.in_tst = [ 'tst_class0.nii', 'tst_class1.nii' ] + >>> overlap.inputs.weighting = 'volume' + >>> res = overlap.run() # doctest: +SKIP + """ + + input_spec = FuzzyOverlapInputSpec + output_spec = FuzzyOverlapOutputSpec + + def _run_interface(self, runtime): + # Load data + refdata = nb.concat_images(self.inputs.in_ref).get_data() + tstdata = nb.concat_images(self.inputs.in_tst).get_data() + + # Data must have same shape + if not refdata.shape == tstdata.shape: + raise RuntimeError( + 'Size of "in_tst" %s must match that of "in_ref" %s.' % + (tstdata.shape, refdata.shape)) + + ncomp = refdata.shape[-1] + + # Load mask + mask = np.ones_like(refdata, dtype=bool) + if isdefined(self.inputs.in_mask): + mask = nb.load(self.inputs.in_mask).get_data() + mask = mask > 0 + mask = np.repeat(mask[..., np.newaxis], ncomp, -1) + assert mask.shape == refdata.shape + + # Drop data outside mask + refdata = refdata[mask] + tstdata = tstdata[mask] + + if np.any(refdata < 0.0): + iflogger.warning('Negative values encountered in "in_ref" input, ' + 'taking absolute values.') + refdata = np.abs(refdata) + + if np.any(tstdata < 0.0): + iflogger.warning('Negative values encountered in "in_tst" input, ' + 'taking absolute values.') + tstdata = np.abs(tstdata) + + if np.any(refdata > 1.0): + iflogger.warning('Values greater than 1.0 found in "in_ref" input, ' + 'scaling values.') + refdata /= refdata.max() + + if np.any(tstdata > 1.0): + iflogger.warning('Values greater than 1.0 found in "in_tst" input, ' + 'scaling values.') + tstdata /= tstdata.max() + + numerators = np.atleast_2d( + np.minimum(refdata, tstdata).reshape((-1, ncomp))) + denominators = np.atleast_2d( + np.maximum(refdata, tstdata).reshape((-1, ncomp))) + + jaccards = numerators.sum(axis=0) / denominators.sum(axis=0) + + # Calculate weights + weights = np.ones_like(jaccards, dtype=float) + if self.inputs.weighting != "none": + volumes = np.sum((refdata + tstdata) > 0, axis=1).reshape((-1, ncomp)) + weights = 1.0 / volumes + if self.inputs.weighting == "squared_vol": + weights = weights**2 + + weights = weights / np.sum(weights) + dices = 2.0 * jaccards / (jaccards + 1.0) + + # Fill-in the results object + self._results['jaccard'] = float(weights.dot(jaccards)) + self._results['dice'] = float(weights.dot(dices)) + self._results['class_fji'] = [float(v) for v in jaccards] + self._results['class_fdi'] = [float(v) for v in dices] + return runtime + + +class ErrorMapInputSpec(BaseInterfaceInputSpec): + in_ref = File( + exists=True, + mandatory=True, + desc="Reference image. Requires the same dimensions as in_tst.") + in_tst = File( + exists=True, + mandatory=True, + desc="Test image. Requires the same dimensions as in_ref.") + mask = File(exists=True, desc="calculate overlap only within this mask.") + metric = traits.Enum( + "sqeuclidean", + "euclidean", + desc='error map metric (as implemented in scipy cdist)', + usedefault=True, + mandatory=True) + out_map = File(desc="Name for the output file") + + +class ErrorMapOutputSpec(TraitedSpec): + out_map = File(exists=True, desc="resulting error map") + distance = traits.Float(desc="Average distance between volume 1 and 2") + + +class ErrorMap(BaseInterface): + """ Calculates the error (distance) map between two input volumes. + + Example + ------- + + >>> errormap = ErrorMap() + >>> errormap.inputs.in_ref = 'cont1.nii' + >>> errormap.inputs.in_tst = 'cont2.nii' + >>> res = errormap.run() # doctest: +SKIP + """ + input_spec = ErrorMapInputSpec + output_spec = ErrorMapOutputSpec + _out_file = '' + + def _run_interface(self, runtime): + # Get two numpy data matrices + nii_ref = nb.load(self.inputs.in_ref) + ref_data = np.squeeze(nii_ref.get_data()) + tst_data = np.squeeze(nb.load(self.inputs.in_tst).get_data()) + assert (ref_data.ndim == tst_data.ndim) + + # Load mask + comps = 1 + mapshape = ref_data.shape + + if (ref_data.ndim == 4): + comps = ref_data.shape[-1] + mapshape = ref_data.shape[:-1] + + if isdefined(self.inputs.mask): + msk = nb.load(self.inputs.mask).get_data() + if (mapshape != msk.shape): + raise RuntimeError("Mask should match volume shape, \ + mask is %s and volumes are %s" % + (list(msk.shape), list(mapshape))) + else: + msk = np.ones(shape=mapshape) + + # Flatten both volumes and make the pixel differennce + mskvector = msk.reshape(-1) + msk_idxs = np.where(mskvector == 1) + refvector = ref_data.reshape(-1, comps)[msk_idxs].astype(np.float32) + tstvector = tst_data.reshape(-1, comps)[msk_idxs].astype(np.float32) + diffvector = (refvector - tstvector) + + # Scale the difference + if self.inputs.metric == 'sqeuclidean': + errvector = diffvector**2 + if (comps > 1): + errvector = np.sum(errvector, axis=1) + else: + errvector = np.squeeze(errvector) + elif self.inputs.metric == 'euclidean': + errvector = np.linalg.norm(diffvector, axis=1) + + errvectorexp = np.zeros_like( + mskvector, dtype=np.float32) # The default type is uint8 + errvectorexp[msk_idxs] = errvector + + # Get averaged error + self._distance = np.average( + errvector) # Only average the masked voxels + + errmap = errvectorexp.reshape(mapshape) + + hdr = nii_ref.header.copy() + hdr.set_data_dtype(np.float32) + hdr['data_type'] = 16 + hdr.set_data_shape(mapshape) + + if not isdefined(self.inputs.out_map): + fname, ext = op.splitext(op.basename(self.inputs.in_tst)) + if ext == '.gz': + fname, ext2 = op.splitext(fname) + ext = ext2 + ext + self._out_file = op.abspath(fname + "_errmap" + ext) + else: + self._out_file = self.inputs.out_map + + nb.Nifti1Image(errmap.astype(np.float32), nii_ref.affine, + hdr).to_filename(self._out_file) + + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_map'] = self._out_file + outputs['distance'] = self._distance + return outputs + + +class SimilarityInputSpec(BaseInterfaceInputSpec): + volume1 = File(exists=True, desc="3D/4D volume", mandatory=True) + volume2 = File(exists=True, desc="3D/4D volume", mandatory=True) + mask1 = File(exists=True, desc="3D volume") + mask2 = File(exists=True, desc="3D volume") + metric = traits.Either( + traits.Enum('cc', 'cr', 'crl1', 'mi', 'nmi', 'slr'), + traits.Callable(), + desc="""str or callable +Cost-function for assessing image similarity. If a string, +one of 'cc': correlation coefficient, 'cr': correlation +ratio, 'crl1': L1-norm based correlation ratio, 'mi': mutual +information, 'nmi': normalized mutual information, 'slr': +supervised log-likelihood ratio. If a callable, it should +take a two-dimensional array representing the image joint +histogram as an input and return a float.""", + usedefault=True) + + +class SimilarityOutputSpec(TraitedSpec): + similarity = traits.List( + traits.Float(desc="Similarity between volume 1 and 2, frame by frame")) + + +class Similarity(NipyBaseInterface): + """Calculates similarity between two 3D or 4D volumes. Both volumes have to be in + the same coordinate system, same space within that coordinate system and + with the same voxel dimensions. + + .. note:: This interface is an extension of + :py:class:`nipype.interfaces.nipy.utils.Similarity` to support 4D files. + Requires :py:mod:`nipy` + + Example + ------- + >>> from nipype.algorithms.metrics import Similarity + >>> similarity = Similarity() + >>> similarity.inputs.volume1 = 'rc1s1.nii' + >>> similarity.inputs.volume2 = 'rc1s2.nii' + >>> similarity.inputs.mask1 = 'mask.nii' + >>> similarity.inputs.mask2 = 'mask.nii' + >>> similarity.inputs.metric = 'cr' + >>> res = similarity.run() # doctest: +SKIP + """ + + input_spec = SimilarityInputSpec + output_spec = SimilarityOutputSpec + + def _run_interface(self, runtime): + from nipy.algorithms.registration.histogram_registration import HistogramRegistration + from nipy.algorithms.registration.affine import Affine + + vol1_nii = nb.load(self.inputs.volume1) + vol2_nii = nb.load(self.inputs.volume2) + + dims = vol1_nii.get_data().ndim + + if dims == 3 or dims == 2: + vols1 = [vol1_nii] + vols2 = [vol2_nii] + if dims == 4: + vols1 = nb.four_to_three(vol1_nii) + vols2 = nb.four_to_three(vol2_nii) + + if dims < 2 or dims > 4: + raise RuntimeError( + 'Image dimensions not supported (detected %dD file)' % dims) + + if isdefined(self.inputs.mask1): + mask1 = nb.load(self.inputs.mask1).get_data() == 1 + else: + mask1 = None + + if isdefined(self.inputs.mask2): + mask2 = nb.load(self.inputs.mask2).get_data() == 1 + else: + mask2 = None + + self._similarity = [] + + for ts1, ts2 in zip(vols1, vols2): + histreg = HistogramRegistration( + from_img=ts1, + to_img=ts2, + similarity=self.inputs.metric, + from_mask=mask1, + to_mask=mask2) + self._similarity.append(histreg.eval(Affine())) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['similarity'] = self._similarity + return outputs diff --git a/nipype/algorithms/misc.py b/nipype/algorithms/misc.py new file mode 100644 index 0000000000..97906c1d69 --- /dev/null +++ b/nipype/algorithms/misc.py @@ -0,0 +1,1580 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +''' +Miscellaneous algorithms +''' +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, zip, range, open +from future.utils import raise_from + +import os +import os.path as op + +import nibabel as nb +import numpy as np +from math import floor, ceil +from scipy.ndimage.morphology import grey_dilation +import scipy.io as sio +import itertools +import scipy.stats as stats +import warnings + +from .. import logging +from . import metrics as nam +from ..interfaces.base import ( + BaseInterface, traits, TraitedSpec, File, InputMultiPath, OutputMultiPath, + BaseInterfaceInputSpec, isdefined, DynamicTraitedSpec, Undefined) +from ..utils.filemanip import fname_presuffix, split_filename, ensure_list +from ..utils import NUMPY_MMAP + +from . import confounds + +iflogger = logging.getLogger('nipype.interface') + + +class PickAtlasInputSpec(BaseInterfaceInputSpec): + atlas = File( + exists=True, + desc="Location of the atlas that will be used.", + mandatory=True) + labels = traits.Either( + traits.Int, + traits.List(traits.Int), + desc=("Labels of regions that will be included in the mask. Must be\ + compatible with the atlas used."), + mandatory=True) + hemi = traits.Enum( + 'both', + 'left', + 'right', + desc="Restrict the mask to only one hemisphere: left or right", + usedefault=True) + dilation_size = traits.Int( + usedefault=True, + desc="Defines how much the mask will be dilated (expanded in 3D).") + output_file = File(desc="Where to store the output mask.") + + +class PickAtlasOutputSpec(TraitedSpec): + mask_file = File(exists=True, desc="output mask file") + + +class PickAtlas(BaseInterface): + """Returns ROI masks given an atlas and a list of labels. Supports dilation + and left right masking (assuming the atlas is properly aligned). + """ + + input_spec = PickAtlasInputSpec + output_spec = PickAtlasOutputSpec + + def _run_interface(self, runtime): + nim = self._get_brodmann_area() + nb.save(nim, self._gen_output_filename()) + + return runtime + + def _gen_output_filename(self): + if not isdefined(self.inputs.output_file): + output = fname_presuffix( + fname=self.inputs.atlas, + suffix="_mask", + newpath=os.getcwd(), + use_ext=True) + else: + output = os.path.realpath(self.inputs.output_file) + return output + + def _get_brodmann_area(self): + nii = nb.load(self.inputs.atlas) + origdata = nii.get_data() + newdata = np.zeros(origdata.shape) + + if not isinstance(self.inputs.labels, list): + labels = [self.inputs.labels] + else: + labels = self.inputs.labels + for lab in labels: + newdata[origdata == lab] = 1 + if self.inputs.hemi == 'right': + newdata[int(floor(float(origdata.shape[0]) / 2)):, :, :] = 0 + elif self.inputs.hemi == 'left': + newdata[:int(ceil(float(origdata.shape[0]) / 2)), :, :] = 0 + + if self.inputs.dilation_size != 0: + newdata = grey_dilation(newdata, + (2 * self.inputs.dilation_size + 1, + 2 * self.inputs.dilation_size + 1, + 2 * self.inputs.dilation_size + 1)) + + return nb.Nifti1Image(newdata, nii.affine, nii.header) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['mask_file'] = self._gen_output_filename() + return outputs + + +class SimpleThresholdInputSpec(BaseInterfaceInputSpec): + volumes = InputMultiPath( + File(exists=True), desc='volumes to be thresholded', mandatory=True) + threshold = traits.Float( + desc='volumes to be thresholdedeverything below this value will be set\ + to zero', + mandatory=True) + + +class SimpleThresholdOutputSpec(TraitedSpec): + thresholded_volumes = OutputMultiPath( + File(exists=True), desc="thresholded volumes") + + +class SimpleThreshold(BaseInterface): + """Applies a threshold to input volumes + """ + input_spec = SimpleThresholdInputSpec + output_spec = SimpleThresholdOutputSpec + + def _run_interface(self, runtime): + for fname in self.inputs.volumes: + img = nb.load(fname, mmap=NUMPY_MMAP) + data = np.array(img.get_data()) + + active_map = data > self.inputs.threshold + + thresholded_map = np.zeros(data.shape) + thresholded_map[active_map] = data[active_map] + + new_img = nb.Nifti1Image(thresholded_map, img.affine, img.header) + _, base, _ = split_filename(fname) + nb.save(new_img, base + '_thresholded.nii') + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["thresholded_volumes"] = [] + for fname in self.inputs.volumes: + _, base, _ = split_filename(fname) + outputs["thresholded_volumes"].append( + os.path.abspath(base + '_thresholded.nii')) + return outputs + + +class ModifyAffineInputSpec(BaseInterfaceInputSpec): + volumes = InputMultiPath( + File(exists=True), + desc='volumes which affine matrices will be modified', + mandatory=True) + transformation_matrix = traits.Array( + value=np.eye(4), + shape=(4, 4), + desc="transformation matrix that will be left multiplied by the\ + affine matrix", + usedefault=True) + + +class ModifyAffineOutputSpec(TraitedSpec): + transformed_volumes = OutputMultiPath(File(exist=True)) + + +class ModifyAffine(BaseInterface): + """Left multiplies the affine matrix with a specified values. Saves the volume + as a nifti file. + """ + input_spec = ModifyAffineInputSpec + output_spec = ModifyAffineOutputSpec + + def _gen_output_filename(self, name): + _, base, _ = split_filename(name) + return os.path.abspath(base + "_transformed.nii") + + def _run_interface(self, runtime): + for fname in self.inputs.volumes: + img = nb.load(fname, mmap=NUMPY_MMAP) + + affine = img.affine + affine = np.dot(self.inputs.transformation_matrix, affine) + + nb.save( + nb.Nifti1Image(img.get_data(), affine, img.header), + self._gen_output_filename(fname)) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['transformed_volumes'] = [] + for fname in self.inputs.volumes: + outputs['transformed_volumes'].append( + self._gen_output_filename(fname)) + return outputs + + +class CreateNiftiInputSpec(BaseInterfaceInputSpec): + data_file = File(exists=True, mandatory=True, desc="ANALYZE img file") + header_file = File( + exists=True, mandatory=True, desc="corresponding ANALYZE hdr file") + affine = traits.Array(desc="affine transformation array") + + +class CreateNiftiOutputSpec(TraitedSpec): + nifti_file = File(exists=True) + + +class CreateNifti(BaseInterface): + """Creates a nifti volume + """ + input_spec = CreateNiftiInputSpec + output_spec = CreateNiftiOutputSpec + + def _gen_output_file_name(self): + _, base, _ = split_filename(self.inputs.data_file) + return os.path.abspath(base + ".nii") + + def _run_interface(self, runtime): + with open(self.inputs.header_file, 'rb') as hdr_file: + hdr = nb.AnalyzeHeader.from_fileobj(hdr_file) + + if isdefined(self.inputs.affine): + affine = self.inputs.affine + else: + affine = None + + with open(self.inputs.data_file, 'rb') as data_file: + data = hdr.data_from_fileobj(data_file) + + img = nb.Nifti1Image(data, affine, hdr) + nb.save(img, self._gen_output_file_name()) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['nifti_file'] = self._gen_output_file_name() + return outputs + + +class GunzipInputSpec(BaseInterfaceInputSpec): + in_file = File(exists=True, mandatory=True) + + +class GunzipOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class Gunzip(BaseInterface): + """Gunzip wrapper + + >>> from nipype.algorithms.misc import Gunzip + >>> gunzip = Gunzip(in_file='tpms_msk.nii.gz') + >>> res = gunzip.run() + >>> res.outputs.out_file # doctest: +ELLIPSIS + '.../tpms_msk.nii' + + .. testcleanup:: + + >>> os.unlink('tpms_msk.nii') + """ + input_spec = GunzipInputSpec + output_spec = GunzipOutputSpec + + def _gen_output_file_name(self): + _, base, ext = split_filename(self.inputs.in_file) + if ext[-3:].lower() == ".gz": + ext = ext[:-3] + return os.path.abspath(base + ext) + + def _run_interface(self, runtime): + import gzip + import shutil + with gzip.open(self.inputs.in_file, 'rb') as in_file: + with open(self._gen_output_file_name(), 'wb') as out_file: + shutil.copyfileobj(in_file, out_file) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = self._gen_output_file_name() + return outputs + + +def replaceext(in_list, ext): + out_list = list() + for filename in in_list: + path, name, _ = split_filename(op.abspath(filename)) + out_name = op.join(path, name) + ext + out_list.append(out_name) + return out_list + + +def matlab2csv(in_array, name, reshape): + output_array = np.asarray(in_array) + if reshape: + if len(np.shape(output_array)) > 1: + output_array = np.reshape( + output_array, + (np.shape(output_array)[0] * np.shape(output_array)[1], 1)) + iflogger.info(np.shape(output_array)) + output_name = op.abspath(name + '.csv') + np.savetxt(output_name, output_array, delimiter=',') + return output_name + + +class Matlab2CSVInputSpec(TraitedSpec): + in_file = File(exists=True, mandatory=True, desc='Input MATLAB .mat file') + reshape_matrix = traits.Bool( + True, + usedefault=True, + desc='The output of this interface is meant for R, so matrices will be\ + reshaped to vectors by default.') + + +class Matlab2CSVOutputSpec(TraitedSpec): + csv_files = OutputMultiPath( + File(desc='Output CSV files for each variable saved in the input .mat\ + file')) + + +class Matlab2CSV(BaseInterface): + """Simple interface to save the components of a MATLAB .mat file as a text + file with comma-separated values (CSVs). + + CSV files are easily loaded in R, for use in statistical processing. + For further information, see cran.r-project.org/doc/manuals/R-data.pdf + + Example + ------- + + >>> from nipype.algorithms import misc + >>> mat2csv = misc.Matlab2CSV() + >>> mat2csv.inputs.in_file = 'cmatrix.mat' + >>> mat2csv.run() # doctest: +SKIP + """ + input_spec = Matlab2CSVInputSpec + output_spec = Matlab2CSVOutputSpec + + def _run_interface(self, runtime): + in_dict = sio.loadmat(op.abspath(self.inputs.in_file)) + + # Check if the file has multiple variables in it. If it does, loop + # through them and save them as individual CSV files. + # If not, save the variable as a single CSV file using the input file + # name and a .csv extension. + + saved_variables = list() + for key in list(in_dict.keys()): + if not key.startswith('__'): + if isinstance(in_dict[key][0], np.ndarray): + saved_variables.append(key) + else: + iflogger.info('One of the keys in the input file, %s, is ' + 'not a Numpy array', key) + + if len(saved_variables) > 1: + iflogger.info('%i variables found:', len(saved_variables)) + iflogger.info(saved_variables) + for variable in saved_variables: + iflogger.info('...Converting %s - type %s - to CSV', variable, + type(in_dict[variable])) + matlab2csv(in_dict[variable], variable, + self.inputs.reshape_matrix) + elif len(saved_variables) == 1: + _, name, _ = split_filename(self.inputs.in_file) + variable = saved_variables[0] + iflogger.info('Single variable found %s, type %s:', variable, + type(in_dict[variable])) + iflogger.info('...Converting %s to CSV from %s', variable, + self.inputs.in_file) + matlab2csv(in_dict[variable], name, self.inputs.reshape_matrix) + else: + iflogger.error('No values in the MATLAB file?!') + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + in_dict = sio.loadmat(op.abspath(self.inputs.in_file)) + saved_variables = list() + for key in list(in_dict.keys()): + if not key.startswith('__'): + if isinstance(in_dict[key][0], np.ndarray): + saved_variables.append(key) + else: + iflogger.error('One of the keys in the input file, %s, is ' + 'not a Numpy array', key) + + if len(saved_variables) > 1: + outputs['csv_files'] = replaceext(saved_variables, '.csv') + elif len(saved_variables) == 1: + _, name, ext = split_filename(self.inputs.in_file) + outputs['csv_files'] = op.abspath(name + '.csv') + else: + iflogger.error('No values in the MATLAB file?!') + return outputs + + +def merge_csvs(in_list): + for idx, in_file in enumerate(in_list): + try: + in_array = np.loadtxt(in_file, delimiter=',') + except ValueError: + try: + in_array = np.loadtxt(in_file, delimiter=',', skiprows=1) + except ValueError: + with open(in_file, 'r') as first: + header_line = first.readline() + + header_list = header_line.split(',') + n_cols = len(header_list) + try: + in_array = np.loadtxt( + in_file, + delimiter=',', + skiprows=1, + usecols=list(range(1, n_cols))) + except ValueError: + in_array = np.loadtxt( + in_file, + delimiter=',', + skiprows=1, + usecols=list(range(1, n_cols - 1))) + if idx == 0: + out_array = in_array + else: + out_array = np.dstack((out_array, in_array)) + out_array = np.squeeze(out_array) + iflogger.info('Final output array shape:') + iflogger.info(np.shape(out_array)) + return out_array + + +def remove_identical_paths(in_files): + import os.path as op + from ..utils.filemanip import split_filename + if len(in_files) > 1: + out_names = list() + commonprefix = op.commonprefix(in_files) + lastslash = commonprefix.rfind('/') + commonpath = commonprefix[0:(lastslash + 1)] + for fileidx, in_file in enumerate(in_files): + path, name, ext = split_filename(in_file) + in_file = op.join(path, name) + name = in_file.replace(commonpath, '') + name = name.replace('_subject_id_', '') + out_names.append(name) + else: + path, name, ext = split_filename(in_files[0]) + out_names = [name] + return out_names + + +def maketypelist(rowheadings, shape, extraheadingBool, extraheading): + typelist = [] + if rowheadings: + typelist.append(('heading', 'a40')) + if len(shape) > 1: + for idx in range(1, (min(shape) + 1)): + typelist.append((str(idx), float)) + else: + for idx in range(1, (shape[0] + 1)): + typelist.append((str(idx), float)) + if extraheadingBool: + typelist.append((extraheading, 'a40')) + iflogger.info(typelist) + return typelist + + +def makefmtlist(output_array, typelist, rowheadingsBool, shape, + extraheadingBool): + fmtlist = [] + if rowheadingsBool: + fmtlist.append('%s') + if len(shape) > 1: + output = np.zeros(max(shape), typelist) + for idx in range(1, min(shape) + 1): + output[str(idx)] = output_array[:, idx - 1] + fmtlist.append('%f') + else: + output = np.zeros(1, typelist) + for idx in range(1, len(output_array) + 1): + output[str(idx)] = output_array[idx - 1] + fmtlist.append('%f') + if extraheadingBool: + fmtlist.append('%s') + fmt = ','.join(fmtlist) + return fmt, output + + +class MergeCSVFilesInputSpec(TraitedSpec): + in_files = InputMultiPath( + File(exists=True), + mandatory=True, + desc='Input comma-separated value (CSV) files') + out_file = File( + 'merged.csv', + usedefault=True, + desc='Output filename for merged CSV file') + column_headings = traits.List( + traits.Str, + desc='List of column headings to save in merged CSV file\ + (must be equal to number of input files). If left undefined, these\ + will be pulled from the input filenames.') + row_headings = traits.List( + traits.Str, + desc='List of row headings to save in merged CSV file\ + (must be equal to number of rows in the input files).') + row_heading_title = traits.Str( + 'label', + usedefault=True, + desc='Column heading for the row headings\ + added') + extra_column_heading = traits.Str( + desc='New heading to add for the added field.') + extra_field = traits.Str( + desc='New field to add to each row. This is useful for saving the\ + group or subject ID in the file.') + + +class MergeCSVFilesOutputSpec(TraitedSpec): + csv_file = File(desc='Output CSV file containing columns ') + + +class MergeCSVFiles(BaseInterface): + """This interface is designed to facilitate data loading in the R environment. + It takes input CSV files and merges them into a single CSV file. + If provided, it will also incorporate column heading names into the + resulting CSV file. + + CSV files are easily loaded in R, for use in statistical processing. + For further information, see cran.r-project.org/doc/manuals/R-data.pdf + + Example + ------- + + >>> from nipype.algorithms import misc + >>> mat2csv = misc.MergeCSVFiles() + >>> mat2csv.inputs.in_files = ['degree.mat','clustering.mat'] + >>> mat2csv.inputs.column_headings = ['degree','clustering'] + >>> mat2csv.run() # doctest: +SKIP + """ + input_spec = MergeCSVFilesInputSpec + output_spec = MergeCSVFilesOutputSpec + + def _run_interface(self, runtime): + extraheadingBool = False + extraheading = '' + rowheadingsBool = False + """ + This block defines the column headings. + """ + if isdefined(self.inputs.column_headings): + iflogger.info('Column headings have been provided:') + headings = self.inputs.column_headings + else: + iflogger.info( + 'Column headings not provided! Pulled from input filenames:') + headings = remove_identical_paths(self.inputs.in_files) + + if isdefined(self.inputs.extra_field): + if isdefined(self.inputs.extra_column_heading): + extraheading = self.inputs.extra_column_heading + iflogger.info('Extra column heading provided: %s', + extraheading) + else: + extraheading = 'type' + iflogger.info( + 'Extra column heading was not defined. Using "type"') + headings.append(extraheading) + extraheadingBool = True + + if len(self.inputs.in_files) == 1: + iflogger.warn('Only one file input!') + + if isdefined(self.inputs.row_headings): + iflogger.info('Row headings have been provided. Adding "labels"' + 'column header.') + prefix = '"{p}","'.format(p=self.inputs.row_heading_title) + csv_headings = prefix + '","'.join( + itertools.chain(headings)) + '"\n' + rowheadingsBool = True + else: + iflogger.info('Row headings have not been provided.') + csv_headings = '"' + '","'.join(itertools.chain(headings)) + '"\n' + + iflogger.info('Final Headings:') + iflogger.info(csv_headings) + """ + Next we merge the arrays and define the output text file + """ + + output_array = merge_csvs(self.inputs.in_files) + _, name, ext = split_filename(self.inputs.out_file) + if not ext == '.csv': + ext = '.csv' + + out_file = op.abspath(name + ext) + with open(out_file, 'w') as file_handle: + file_handle.write(csv_headings) + + shape = np.shape(output_array) + typelist = maketypelist(rowheadingsBool, shape, extraheadingBool, + extraheading) + fmt, output = makefmtlist(output_array, typelist, rowheadingsBool, + shape, extraheadingBool) + + if rowheadingsBool: + row_heading_list = self.inputs.row_headings + row_heading_list_with_quotes = [] + for row_heading in row_heading_list: + row_heading_with_quotes = '"' + row_heading + '"' + row_heading_list_with_quotes.append(row_heading_with_quotes) + row_headings = np.array(row_heading_list_with_quotes, dtype='|S40') + output['heading'] = row_headings + + if isdefined(self.inputs.extra_field): + extrafieldlist = [] + if len(shape) > 1: + mx = shape[0] + else: + mx = 1 + for idx in range(0, mx): + extrafieldlist.append(self.inputs.extra_field) + iflogger.info(len(extrafieldlist)) + output[extraheading] = extrafieldlist + iflogger.info(output) + iflogger.info(fmt) + with open(out_file, 'a') as file_handle: + np.savetxt(file_handle, output, fmt, delimiter=',') + + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + _, name, ext = split_filename(self.inputs.out_file) + if not ext == '.csv': + ext = '.csv' + out_file = op.abspath(name + ext) + outputs['csv_file'] = out_file + return outputs + + +class AddCSVColumnInputSpec(TraitedSpec): + in_file = File( + exists=True, + mandatory=True, + desc='Input comma-separated value (CSV) files') + out_file = File( + 'extra_heading.csv', + usedefault=True, + desc='Output filename for merged CSV file') + extra_column_heading = traits.Str( + desc='New heading to add for the added field.') + extra_field = traits.Str( + desc='New field to add to each row. This is useful for saving the\ + group or subject ID in the file.') + + +class AddCSVColumnOutputSpec(TraitedSpec): + csv_file = File(desc='Output CSV file containing columns ') + + +class AddCSVColumn(BaseInterface): + """Short interface to add an extra column and field to a text file + + Example + ------- + + >>> from nipype.algorithms import misc + >>> addcol = misc.AddCSVColumn() + >>> addcol.inputs.in_file = 'degree.csv' + >>> addcol.inputs.extra_column_heading = 'group' + >>> addcol.inputs.extra_field = 'male' + >>> addcol.run() # doctest: +SKIP + """ + input_spec = AddCSVColumnInputSpec + output_spec = AddCSVColumnOutputSpec + + def _run_interface(self, runtime): + in_file = open(self.inputs.in_file, 'r') + _, name, ext = split_filename(self.inputs.out_file) + if not ext == '.csv': + ext = '.csv' + out_file = op.abspath(name + ext) + + out_file = open(out_file, 'w') + firstline = in_file.readline() + firstline = firstline.replace('\n', '') + new_firstline = firstline + ',"' + \ + self.inputs.extra_column_heading + '"\n' + out_file.write(new_firstline) + for line in in_file: + new_line = line.replace('\n', '') + new_line = new_line + ',' + self.inputs.extra_field + '\n' + out_file.write(new_line) + in_file.close() + out_file.close() + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + _, name, ext = split_filename(self.inputs.out_file) + if not ext == '.csv': + ext = '.csv' + out_file = op.abspath(name + ext) + outputs['csv_file'] = out_file + return outputs + + +class AddCSVRowInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + in_file = traits.File( + mandatory=True, desc='Input comma-separated value (CSV) files') + _outputs = traits.Dict(traits.Any, value={}, usedefault=True) + + def __setattr__(self, key, value): + if key not in self.copyable_trait_names(): + if not isdefined(value): + super(AddCSVRowInputSpec, self).__setattr__(key, value) + self._outputs[key] = value + else: + if key in self._outputs: + self._outputs[key] = value + super(AddCSVRowInputSpec, self).__setattr__(key, value) + + +class AddCSVRowOutputSpec(TraitedSpec): + csv_file = File(desc='Output CSV file containing rows ') + + +class AddCSVRow(BaseInterface): + """Simple interface to add an extra row to a csv file + + .. note:: Requires `pandas `_ + + .. warning:: Multi-platform thread-safe execution is possible with + `lockfile `_. Please + recall that (1) this module is alpha software; and (2) it should be + installed for thread-safe writing. + If lockfile is not installed, then the interface is not thread-safe. + + + Example + ------- + + >>> from nipype.algorithms import misc + >>> addrow = misc.AddCSVRow() + >>> addrow.inputs.in_file = 'scores.csv' + >>> addrow.inputs.si = 0.74 + >>> addrow.inputs.di = 0.93 + >>> addrow.inputs.subject_id = 'S400' + >>> addrow.inputs.list_of_values = [ 0.4, 0.7, 0.3 ] + >>> addrow.run() # doctest: +SKIP + """ + input_spec = AddCSVRowInputSpec + output_spec = AddCSVRowOutputSpec + + def __init__(self, infields=None, force_run=True, **kwargs): + super(AddCSVRow, self).__init__(**kwargs) + undefined_traits = {} + self._infields = infields + self._have_lock = False + self._lock = None + + if infields: + for key in infields: + self.inputs.add_trait(key, traits.Any) + self.inputs._outputs[key] = Undefined + undefined_traits[key] = Undefined + self.inputs.trait_set(trait_change_notify=False, **undefined_traits) + + if force_run: + self._always_run = True + + def _run_interface(self, runtime): + try: + import pandas as pd + except ImportError as e: + raise_from( + ImportError('This interface requires pandas ' + '(http://pandas.pydata.org/) to run.'), e) + + try: + import lockfile as pl + self._have_lock = True + except ImportError: + from warnings import warn + warn(('Python module lockfile was not found: AddCSVRow will not be' + ' thread-safe in multi-processor execution')) + + input_dict = {} + for key, val in list(self.inputs._outputs.items()): + # expand lists to several columns + if key == 'trait_added' and val in self.inputs.copyable_trait_names( + ): + continue + + if isinstance(val, list): + for i, v in enumerate(val): + input_dict['%s_%d' % (key, i)] = v + else: + input_dict[key] = val + + df = pd.DataFrame([input_dict]) + + if self._have_lock: + self._lock = pl.FileLock(self.inputs.in_file) + + # Acquire lock + self._lock.acquire() + + if op.exists(self.inputs.in_file): + formerdf = pd.read_csv(self.inputs.in_file, index_col=0) + df = pd.concat([formerdf, df], ignore_index=True) + + with open(self.inputs.in_file, 'w') as f: + df.to_csv(f) + + if self._have_lock: + self._lock.release() + + # Using nipype.external.portalocker this might be something like: + # with pl.Lock(self.inputs.in_file, timeout=1) as fh: + # if op.exists(fh): + # formerdf = pd.read_csv(fh, index_col=0) + # df = pd.concat([formerdf, df], ignore_index=True) + # df.to_csv(fh) + + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['csv_file'] = self.inputs.in_file + return outputs + + def _outputs(self): + return self._add_output_traits(super(AddCSVRow, self)._outputs()) + + def _add_output_traits(self, base): + return base + + +class CalculateNormalizedMomentsInputSpec(TraitedSpec): + timeseries_file = File( + exists=True, + mandatory=True, + desc='Text file with timeseries in columns and timepoints in rows,\ + whitespace separated') + moment = traits.Int( + mandatory=True, + desc="Define which moment should be calculated, 3 for skewness, 4 for\ + kurtosis.") + + +class CalculateNormalizedMomentsOutputSpec(TraitedSpec): + moments = traits.List(traits.Float(), desc='Moments') + + +class CalculateNormalizedMoments(BaseInterface): + """Calculates moments of timeseries. + + Example + ------- + + >>> from nipype.algorithms import misc + >>> skew = misc.CalculateNormalizedMoments() + >>> skew.inputs.moment = 3 + >>> skew.inputs.timeseries_file = 'timeseries.txt' + >>> skew.run() # doctest: +SKIP + """ + input_spec = CalculateNormalizedMomentsInputSpec + output_spec = CalculateNormalizedMomentsOutputSpec + + def _run_interface(self, runtime): + + self._moments = calc_moments(self.inputs.timeseries_file, + self.inputs.moment) + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['skewness'] = self._moments + return outputs + + +def calc_moments(timeseries_file, moment): + """Returns nth moment (3 for skewness, 4 for kurtosis) of timeseries + (list of values; one per timeseries). + + Keyword arguments: + timeseries_file -- text file with white space separated timepoints in rows + + """ + timeseries = np.genfromtxt(timeseries_file) + + m2 = stats.moment(timeseries, 2, axis=0) + m3 = stats.moment(timeseries, moment, axis=0) + zero = (m2 == 0) + return np.where(zero, 0, m3 / m2**(moment / 2.0)) + + +class AddNoiseInputSpec(TraitedSpec): + in_file = File( + exists=True, + mandatory=True, + desc='input image that will be corrupted with noise') + in_mask = File( + exists=True, + desc=('input mask, voxels outside this mask ' + 'will be considered background')) + snr = traits.Float(10.0, desc='desired output SNR in dB', usedefault=True) + dist = traits.Enum( + 'normal', + 'rician', + usedefault=True, + mandatory=True, + desc=('desired noise distribution')) + bg_dist = traits.Enum( + 'normal', + 'rayleigh', + usedefault=True, + mandatory=True, + desc=('desired noise distribution, currently ' + 'only normal is implemented')) + out_file = File(desc='desired output filename') + + +class AddNoiseOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='corrupted image') + + +class AddNoise(BaseInterface): + """ + Corrupts with noise the input image + + + Example + ------- + >>> from nipype.algorithms.misc import AddNoise + >>> noise = AddNoise() + >>> noise.inputs.in_file = 'T1.nii' + >>> noise.inputs.in_mask = 'mask.nii' + >>> noise.snr = 30.0 + >>> noise.run() # doctest: +SKIP + + """ + input_spec = AddNoiseInputSpec + output_spec = AddNoiseOutputSpec + + def _run_interface(self, runtime): + in_image = nb.load(self.inputs.in_file) + in_data = in_image.get_data() + snr = self.inputs.snr + + if isdefined(self.inputs.in_mask): + in_mask = nb.load(self.inputs.in_mask).get_data() + else: + in_mask = np.ones_like(in_data) + + result = self.gen_noise( + in_data, + mask=in_mask, + snr_db=snr, + dist=self.inputs.dist, + bg_dist=self.inputs.bg_dist) + res_im = nb.Nifti1Image(result, in_image.affine, in_image.header) + res_im.to_filename(self._gen_output_filename()) + return runtime + + def _gen_output_filename(self): + if not isdefined(self.inputs.out_file): + _, base, ext = split_filename(self.inputs.in_file) + out_file = os.path.abspath('%s_SNR%03.2f%s' % + (base, self.inputs.snr, ext)) + else: + out_file = self.inputs.out_file + + return out_file + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self._gen_output_filename() + return outputs + + def gen_noise(self, + image, + mask=None, + snr_db=10.0, + dist='normal', + bg_dist='normal'): + """ + Generates a copy of an image with a certain amount of + added gaussian noise (rayleigh for background in mask) + """ + from math import sqrt + snr = sqrt(np.power(10.0, snr_db / 10.0)) + + if mask is None: + mask = np.ones_like(image) + else: + mask[mask > 0] = 1 + mask[mask < 1] = 0 + + if mask.ndim < image.ndim: + mask = np.rollaxis(np.array([mask] * image.shape[3]), 0, 4) + + signal = image[mask > 0].reshape(-1) + + if dist == 'normal': + signal = signal - signal.mean() + sigma_n = sqrt(signal.var() / snr) + noise = np.random.normal(size=image.shape, scale=sigma_n) + + if (np.any(mask == 0)) and (bg_dist == 'rayleigh'): + bg_noise = np.random.rayleigh(size=image.shape, scale=sigma_n) + noise[mask == 0] = bg_noise[mask == 0] + + im_noise = image + noise + + elif dist == 'rician': + sigma_n = signal.mean() / snr + n_1 = np.random.normal(size=image.shape, scale=sigma_n) + n_2 = np.random.normal(size=image.shape, scale=sigma_n) + stde_1 = n_1 / sqrt(2.0) + stde_2 = n_2 / sqrt(2.0) + im_noise = np.sqrt((image + stde_1)**2 + (stde_2)**2) + else: + raise NotImplementedError(('Only normal and rician distributions ' + 'are supported')) + + return im_noise + + +class NormalizeProbabilityMapSetInputSpec(TraitedSpec): + in_files = InputMultiPath( + File(exists=True, mandatory=True, desc='The tpms to be normalized')) + in_mask = File( + exists=True, desc='Masked voxels must sum up 1.0, 0.0 otherwise.') + + +class NormalizeProbabilityMapSetOutputSpec(TraitedSpec): + out_files = OutputMultiPath(File(exists=True), desc="normalized maps") + + +class NormalizeProbabilityMapSet(BaseInterface): + """ Returns the input tissue probability maps (tpms, aka volume fractions) + normalized to sum up 1.0 at each voxel within the mask. + + .. note:: Please recall this is not a spatial normalization algorithm + + + Example + ------- + + >>> from nipype.algorithms import misc + >>> normalize = misc.NormalizeProbabilityMapSet() + >>> normalize.inputs.in_files = [ 'tpm_00.nii.gz', 'tpm_01.nii.gz', \ +'tpm_02.nii.gz' ] + >>> normalize.inputs.in_mask = 'tpms_msk.nii.gz' + >>> normalize.run() # doctest: +SKIP + """ + input_spec = NormalizeProbabilityMapSetInputSpec + output_spec = NormalizeProbabilityMapSetOutputSpec + + def _run_interface(self, runtime): + mask = None + + if isdefined(self.inputs.in_mask): + mask = self.inputs.in_mask + + self._out_filenames = normalize_tpms(self.inputs.in_files, mask) + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_files'] = self._out_filenames + return outputs + + +class SplitROIsInputSpec(TraitedSpec): + in_file = File(exists=True, mandatory=True, desc='file to be splitted') + in_mask = File(exists=True, desc='only process files inside mask') + roi_size = traits.Tuple( + traits.Int, traits.Int, traits.Int, desc='desired ROI size') + + +class SplitROIsOutputSpec(TraitedSpec): + out_files = OutputMultiPath(File(exists=True), desc='the resulting ROIs') + out_masks = OutputMultiPath( + File(exists=True), desc='a mask indicating valid values') + out_index = OutputMultiPath( + File(exists=True), desc='arrays keeping original locations') + + +class SplitROIs(BaseInterface): + """ + Splits a 3D image in small chunks to enable parallel processing. + ROIs keep time series structure in 4D images. + + Example + ------- + + >>> from nipype.algorithms import misc + >>> rois = misc.SplitROIs() + >>> rois.inputs.in_file = 'diffusion.nii' + >>> rois.inputs.in_mask = 'mask.nii' + >>> rois.run() # doctest: +SKIP + + """ + input_spec = SplitROIsInputSpec + output_spec = SplitROIsOutputSpec + + def _run_interface(self, runtime): + mask = None + roisize = None + self._outnames = {} + + if isdefined(self.inputs.in_mask): + mask = self.inputs.in_mask + if isdefined(self.inputs.roi_size): + roisize = self.inputs.roi_size + + res = split_rois(self.inputs.in_file, mask, roisize) + self._outnames['out_files'] = res[0] + self._outnames['out_masks'] = res[1] + self._outnames['out_index'] = res[2] + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + for k, v in list(self._outnames.items()): + outputs[k] = v + return outputs + + +class MergeROIsInputSpec(TraitedSpec): + in_files = InputMultiPath( + File(exists=True, mandatory=True, desc='files to be re-merged')) + in_index = InputMultiPath( + File(exists=True, mandatory=True), + desc='array keeping original locations') + in_reference = File(exists=True, desc='reference file') + + +class MergeROIsOutputSpec(TraitedSpec): + merged_file = File(exists=True, desc='the recomposed file') + + +class MergeROIs(BaseInterface): + """ + Splits a 3D image in small chunks to enable parallel processing. + ROIs keep time series structure in 4D images. + + Example + ------- + + >>> from nipype.algorithms import misc + >>> rois = misc.MergeROIs() + >>> rois.inputs.in_files = ['roi%02d.nii' % i for i in range(1, 6)] + >>> rois.inputs.in_reference = 'mask.nii' + >>> rois.inputs.in_index = ['roi%02d_idx.npz' % i for i in range(1, 6)] + >>> rois.run() # doctest: +SKIP + + """ + input_spec = MergeROIsInputSpec + output_spec = MergeROIsOutputSpec + + def _run_interface(self, runtime): + res = merge_rois(self.inputs.in_files, self.inputs.in_index, + self.inputs.in_reference) + self._merged = res + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['merged_file'] = self._merged + return outputs + + +def normalize_tpms(in_files, in_mask=None, out_files=None): + """ + Returns the input tissue probability maps (tpms, aka volume fractions) + normalized to sum up 1.0 at each voxel within the mask. + """ + import nibabel as nb + import numpy as np + import os.path as op + + in_files = np.atleast_1d(in_files).tolist() + + if out_files is None: + out_files = [] + + if len(out_files) != len(in_files): + for i, finname in enumerate(in_files): + fname, fext = op.splitext(op.basename(finname)) + if fext == '.gz': + fname, fext2 = op.splitext(fname) + fext = fext2 + fext + + out_file = op.abspath('%s_norm_%02d%s' % (fname, i, fext)) + out_files += [out_file] + + imgs = [nb.load(fim, mmap=NUMPY_MMAP) for fim in in_files] + + if len(in_files) == 1: + img_data = imgs[0].get_data() + img_data[img_data > 0.0] = 1.0 + hdr = imgs[0].header.copy() + hdr['data_type'] = 16 + hdr.set_data_dtype(np.float32) + nb.save( + nb.Nifti1Image(img_data.astype(np.float32), imgs[0].affine, hdr), + out_files[0]) + return out_files[0] + + img_data = np.array([im.get_data() for im in imgs]).astype(np.float32) + # img_data[img_data>1.0] = 1.0 + img_data[img_data < 0.0] = 0.0 + weights = np.sum(img_data, axis=0) + + msk = np.ones_like(imgs[0].get_data()) + msk[weights <= 0] = 0 + + if in_mask is not None: + msk = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() + msk[msk <= 0] = 0 + msk[msk > 0] = 1 + + msk = np.ma.masked_equal(msk, 0) + + for i, out_file in enumerate(out_files): + data = np.ma.masked_equal(img_data[i], 0) + probmap = data / weights + hdr = imgs[i].header.copy() + hdr['data_type'] = 16 + hdr.set_data_dtype('float32') + nb.save( + nb.Nifti1Image(probmap.astype(np.float32), imgs[i].affine, hdr), + out_file) + + return out_files + + +def split_rois(in_file, mask=None, roishape=None): + """ + Splits an image in ROIs for parallel processing + """ + import nibabel as nb + import numpy as np + from math import sqrt, ceil + import os.path as op + + if roishape is None: + roishape = (10, 10, 1) + + im = nb.load(in_file, mmap=NUMPY_MMAP) + imshape = im.shape + dshape = imshape[:3] + nvols = imshape[-1] + roisize = roishape[0] * roishape[1] * roishape[2] + droishape = (roishape[0], roishape[1], roishape[2], nvols) + + if mask is not None: + mask = nb.load(mask, mmap=NUMPY_MMAP).get_data() + mask[mask > 0] = 1 + mask[mask < 1] = 0 + else: + mask = np.ones(dshape) + + mask = mask.reshape(-1).astype(np.uint8) + nzels = np.nonzero(mask) + els = np.sum(mask) + nrois = int(ceil(els / float(roisize))) + + data = im.get_data().reshape((mask.size, -1)) + data = np.squeeze(data.take(nzels, axis=0)) + nvols = data.shape[-1] + + roidefname = op.abspath('onesmask.nii.gz') + nb.Nifti1Image(np.ones(roishape, dtype=np.uint8), None, + None).to_filename(roidefname) + + out_files = [] + out_mask = [] + out_idxs = [] + + for i in range(nrois): + first = i * roisize + last = (i + 1) * roisize + fill = 0 + + if last > els: + fill = last - els + last = els + + droi = data[first:last, ...] + iname = op.abspath('roi%010d_idx' % i) + out_idxs.append(iname + '.npz') + np.savez(iname, (nzels[0][first:last], )) + + if fill > 0: + droi = np.vstack((droi, + np.zeros( + (int(fill), int(nvols)), dtype=np.float32))) + partialmsk = np.ones((roisize, ), dtype=np.uint8) + partialmsk[-int(fill):] = 0 + partname = op.abspath('partialmask.nii.gz') + nb.Nifti1Image(partialmsk.reshape(roishape), None, + None).to_filename(partname) + out_mask.append(partname) + else: + out_mask.append(roidefname) + + fname = op.abspath('roi%010d.nii.gz' % i) + nb.Nifti1Image(droi.reshape(droishape), None, None).to_filename(fname) + out_files.append(fname) + return out_files, out_mask, out_idxs + + +def merge_rois(in_files, in_idxs, in_ref, dtype=None, out_file=None): + """ + Re-builds an image resulting from a parallelized processing + """ + import nibabel as nb + import numpy as np + import os.path as op + import subprocess as sp + + if out_file is None: + out_file = op.abspath('merged.nii.gz') + + if dtype is None: + dtype = np.float32 + + # if file is compressed, uncompress using os + # to avoid memory errors + if op.splitext(in_ref)[1] == '.gz': + try: + iflogger.info('uncompress %i', in_ref) + sp.check_call(['gunzip', in_ref], stdout=sp.PIPE, shell=True) + in_ref = op.splitext(in_ref)[0] + except: + pass + + ref = nb.load(in_ref, mmap=NUMPY_MMAP) + aff = ref.affine + hdr = ref.header.copy() + rsh = ref.shape + del ref + npix = rsh[0] * rsh[1] * rsh[2] + fcdata = nb.load(in_files[0]).get_data() + + if fcdata.ndim == 4: + ndirs = fcdata.shape[-1] + else: + ndirs = 1 + newshape = (rsh[0], rsh[1], rsh[2], ndirs) + hdr.set_data_dtype(dtype) + hdr.set_xyzt_units('mm', 'sec') + + if ndirs < 300: + data = np.zeros((npix, ndirs)) + for cname, iname in zip(in_files, in_idxs): + f = np.load(iname) + idxs = np.squeeze(f['arr_0']) + cdata = nb.load( + cname, mmap=NUMPY_MMAP).get_data().reshape(-1, ndirs) + nels = len(idxs) + idata = (idxs, ) + try: + data[idata, ...] = cdata[0:nels, ...] + except: + print(('Consistency between indexes and chunks was ' + 'lost: data=%s, chunk=%s') % (str(data.shape), + str(cdata.shape))) + raise + + hdr.set_data_shape(newshape) + nb.Nifti1Image(data.reshape(newshape).astype(dtype), aff, + hdr).to_filename(out_file) + + else: + hdr.set_data_shape(rsh[:3]) + nii = [] + for d in range(ndirs): + fname = op.abspath('vol%06d.nii' % d) + nb.Nifti1Image(np.zeros(rsh[:3]), aff, hdr).to_filename(fname) + nii.append(fname) + + for cname, iname in zip(in_files, in_idxs): + f = np.load(iname) + idxs = np.squeeze(f['arr_0']) + + for d, fname in enumerate(nii): + data = nb.load(fname, mmap=NUMPY_MMAP).get_data().reshape(-1) + cdata = nb.load( + cname, mmap=NUMPY_MMAP).get_data().reshape(-1, ndirs)[:, d] + nels = len(idxs) + idata = (idxs, ) + data[idata] = cdata[0:nels] + nb.Nifti1Image(data.reshape(rsh[:3]), aff, + hdr).to_filename(fname) + + imgs = [nb.load(im, mmap=NUMPY_MMAP) for im in nii] + allim = nb.concat_images(imgs) + allim.to_filename(out_file) + + return out_file + + +class CalculateMedianInputSpec(BaseInterfaceInputSpec): + in_files = InputMultiPath( + File( + exists=True, + mandatory=True, + desc="One or more realigned Nifti 4D timeseries")) + median_file = traits.Str(desc="Filename prefix to store median images") + median_per_file = traits.Bool( + False, usedefault=True, desc="Calculate a median file for each Nifti") + + +class CalculateMedianOutputSpec(TraitedSpec): + median_files = OutputMultiPath( + File(exists=True), desc="One or more median images") + + +class CalculateMedian(BaseInterface): + """ + Computes an average of the median across one or more 4D Nifti timeseries + + Example + ------- + + >>> from nipype.algorithms.misc import CalculateMedian + >>> mean = CalculateMedian() + >>> mean.inputs.in_files = 'functional.nii' + >>> mean.run() # doctest: +SKIP + + """ + input_spec = CalculateMedianInputSpec + output_spec = CalculateMedianOutputSpec + + def __init__(self, *args, **kwargs): + super(CalculateMedian, self).__init__(*args, **kwargs) + self._median_files = [] + + def _gen_fname(self, suffix, idx=None, ext=None): + if idx: + in_file = self.inputs.in_files[idx] + else: + if isinstance(self.inputs.in_files, list): + in_file = self.inputs.in_files[0] + else: + in_file = self.inputs.in_files + fname, in_ext = op.splitext(op.basename(in_file)) + if in_ext == '.gz': + fname, in_ext2 = op.splitext(fname) + in_ext = in_ext2 + in_ext + if ext is None: + ext = in_ext + if ext.startswith('.'): + ext = ext[1:] + if self.inputs.median_file: + outname = self.inputs.median_file + else: + outname = '{}_{}'.format(fname, suffix) + if idx: + outname += str(idx) + return op.abspath('{}.{}'.format(outname, ext)) + + def _run_interface(self, runtime): + total = None + self._median_files = [] + for idx, fname in enumerate(ensure_list(self.inputs.in_files)): + img = nb.load(fname, mmap=NUMPY_MMAP) + data = np.median(img.get_data(), axis=3) + if self.inputs.median_per_file: + self._median_files.append(self._write_nifti(img, data, idx)) + else: + if total is None: + total = data + else: + total += data + if not self.inputs.median_per_file: + self._median_files.append(self._write_nifti(img, total, idx)) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['median_files'] = self._median_files + return outputs + + def _write_nifti(self, img, data, idx, suffix='median'): + if self.inputs.median_per_file: + median_img = nb.Nifti1Image(data, img.affine, img.header) + filename = self._gen_fname(suffix, idx=idx) + else: + median_img = nb.Nifti1Image(data / (idx + 1), img.affine, + img.header) + filename = self._gen_fname(suffix) + median_img.to_filename(filename) + return filename + + +# Deprecated interfaces ------------------------------------------------------ + + +class Distance(nam.Distance): + """Calculates distance between two volumes. + + .. deprecated:: 0.10.0 + Use :py:class:`nipype.algorithms.metrics.Distance` instead. + """ + + def __init__(self, **inputs): + super(nam.Distance, self).__init__(**inputs) + warnings.warn(("This interface has been deprecated since 0.10.0," + " please use nipype.algorithms.metrics.Distance"), + DeprecationWarning) + + +class Overlap(nam.Overlap): + """Calculates various overlap measures between two maps. + + .. deprecated:: 0.10.0 + Use :py:class:`nipype.algorithms.metrics.Overlap` instead. + """ + + def __init__(self, **inputs): + super(nam.Overlap, self).__init__(**inputs) + warnings.warn(("This interface has been deprecated since 0.10.0," + " please use nipype.algorithms.metrics.Overlap"), + DeprecationWarning) + + +class FuzzyOverlap(nam.FuzzyOverlap): + """Calculates various overlap measures between two maps, using a fuzzy + definition. + + .. deprecated:: 0.10.0 + Use :py:class:`nipype.algorithms.metrics.FuzzyOverlap` instead. + """ + + def __init__(self, **inputs): + super(nam.FuzzyOverlap, self).__init__(**inputs) + warnings.warn(("This interface has been deprecated since 0.10.0," + " please use nipype.algorithms.metrics.FuzzyOverlap"), + DeprecationWarning) + + +class TSNR(confounds.TSNR): + """ + .. deprecated:: 0.12.1 + Use :py:class:`nipype.algorithms.confounds.TSNR` instead + """ + + def __init__(self, **inputs): + super(confounds.TSNR, self).__init__(**inputs) + warnings.warn(("This interface has been moved since 0.12.0," + " please use nipype.algorithms.confounds.TSNR"), + UserWarning) diff --git a/nipype/algorithms/modelgen.py b/nipype/algorithms/modelgen.py new file mode 100644 index 0000000000..7741139201 --- /dev/null +++ b/nipype/algorithms/modelgen.py @@ -0,0 +1,896 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The modelgen module provides classes for specifying designs for individual +subject analysis of task-based fMRI experiments. In particular it also includes +algorithms for generating regressors for sparse and sparse-clustered acquisition +experiments. + +These functions include: + + * SpecifyModel: allows specification of sparse and non-sparse models +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, str, bytes, int + +from copy import deepcopy +import os + +from nibabel import load +import numpy as np +from scipy.special import gammaln + +from ..utils import NUMPY_MMAP +from ..interfaces.base import (BaseInterface, TraitedSpec, InputMultiPath, + traits, File, Bunch, BaseInterfaceInputSpec, + isdefined) +from ..utils.filemanip import ensure_list +from ..utils.misc import normalize_mc_params +from .. import config, logging +iflogger = logging.getLogger('nipype.interface') + + +def gcd(a, b): + """Returns the greatest common divisor of two integers + + uses Euclid's algorithm + + >>> gcd(4, 5) + 1 + >>> gcd(4, 8) + 4 + >>> gcd(22, 55) + 11 + + """ + while b > 0: + a, b = b, a % b + return a + + +def spm_hrf(RT, P=None, fMRI_T=16): + """ python implementation of spm_hrf + + see spm_hrf for implementation details + + % RT - scan repeat time + % p - parameters of the response function (two gamma + % functions) + % defaults (seconds) + % p(0) - delay of response (relative to onset) 6 + % p(1) - delay of undershoot (relative to onset) 16 + % p(2) - dispersion of response 1 + % p(3) - dispersion of undershoot 1 + % p(4) - ratio of response to undershoot 6 + % p(5) - onset (seconds) 0 + % p(6) - length of kernel (seconds) 32 + % + % hrf - hemodynamic response function + % p - parameters of the response function + + the following code using scipy.stats.distributions.gamma + doesn't return the same result as the spm_Gpdf function :: + + hrf = gamma.pdf(u, p[0]/p[2], scale=dt/p[2]) - + gamma.pdf(u, p[1]/p[3], scale=dt/p[3])/p[4] + + >>> print(spm_hrf(2)) + [ 0.00000000e+00 8.65660810e-02 3.74888236e-01 3.84923382e-01 + 2.16117316e-01 7.68695653e-02 1.62017720e-03 -3.06078117e-02 + -3.73060781e-02 -3.08373716e-02 -2.05161334e-02 -1.16441637e-02 + -5.82063147e-03 -2.61854250e-03 -1.07732374e-03 -4.10443522e-04 + -1.46257507e-04] + + """ + p = np.array([6, 16, 1, 1, 6, 0, 32], dtype=float) + if P is not None: + p[0:len(P)] = P + + _spm_Gpdf = lambda x, h, l: np.exp(h * np.log(l) + (h - 1) * np.log(x) - (l * x) - gammaln(h)) + # modelled hemodynamic response function - {mixture of Gammas} + dt = RT / float(fMRI_T) + u = np.arange(0, int(p[6] / dt + 1)) - p[5] / dt + with np.errstate(divide='ignore'): # Known division-by-zero + hrf = _spm_Gpdf(u, p[0] / p[2], dt / p[2]) - _spm_Gpdf( + u, p[1] / p[3], dt / p[3]) / p[4] + idx = np.arange(0, int((p[6] / RT) + 1)) * fMRI_T + hrf = hrf[idx] + hrf = hrf / np.sum(hrf) + return hrf + + +def orth(x_in, y_in): + """Orthogonalize y_in with respect to x_in. + + >>> orth_expected = np.array([1.7142857142857144, 0.42857142857142883, \ + -0.85714285714285676]) + >>> err = np.abs(np.array(orth([1, 2, 3],[4, 5, 6]) - orth_expected)) + >>> all(err < np.finfo(float).eps) + True + + """ + x = np.array(x_in)[:, None] + y = np.array(y_in)[:, None] + y = y - np.dot(x, np.dot(np.linalg.inv(np.dot(x.T, x)), np.dot(x.T, y))) + if np.linalg.norm(y, 1) > np.exp(-32): + y = y[:, 0].tolist() + else: + y = y_in + return y + + +def scale_timings(timelist, input_units, output_units, time_repetition): + """Scales timings given input and output units (scans/secs) + + Parameters + ---------- + + timelist: list of times to scale + input_units: 'secs' or 'scans' + output_units: Ibid. + time_repetition: float in seconds + + """ + if input_units == output_units: + _scalefactor = 1. + + if (input_units == 'scans') and (output_units == 'secs'): + _scalefactor = time_repetition + + if (input_units == 'secs') and (output_units == 'scans'): + _scalefactor = 1. / time_repetition + timelist = [np.max([0., _scalefactor * t]) for t in timelist] + return timelist + + +def gen_info(run_event_files): + """Generate subject_info structure from a list of event files + """ + info = [] + for i, event_files in enumerate(run_event_files): + runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) + for event_file in event_files: + _, name = os.path.split(event_file) + if '.run' in name: + name, _ = name.split('.run%03d' % (i + 1)) + elif '.txt' in name: + name, _ = name.split('.txt') + + runinfo.conditions.append(name) + event_info = np.atleast_2d(np.loadtxt(event_file)) + runinfo.onsets.append(event_info[:, 0].tolist()) + if event_info.shape[1] > 1: + runinfo.durations.append(event_info[:, 1].tolist()) + else: + runinfo.durations.append([0]) + + if event_info.shape[1] > 2: + runinfo.amplitudes.append(event_info[:, 2].tolist()) + else: + delattr(runinfo, 'amplitudes') + info.append(runinfo) + return info + + +class SpecifyModelInputSpec(BaseInterfaceInputSpec): + subject_info = InputMultiPath( + Bunch, + mandatory=True, + xor=['subject_info', 'event_files'], + desc='Bunch or List(Bunch) subject-specific ' + 'condition information. see ' + ':ref:`SpecifyModel` or ' + 'SpecifyModel.__doc__ for details') + event_files = InputMultiPath( + traits.List(File(exists=True)), + mandatory=True, + xor=['subject_info', 'event_files'], + desc='List of event description files 1, 2 or 3 ' + 'column format corresponding to onsets, ' + 'durations and amplitudes') + realignment_parameters = InputMultiPath( + File(exists=True), + desc='Realignment parameters returned ' + 'by motion correction algorithm', + copyfile=False) + parameter_source = traits.Enum( + "SPM", + "FSL", + "AFNI", + "FSFAST", + "NIPY", + usedefault=True, + desc="Source of motion parameters") + outlier_files = InputMultiPath( + File(exists=True), + desc='Files containing scan outlier indices ' + 'that should be tossed', + copyfile=False) + functional_runs = InputMultiPath( + traits.Either(traits.List(File(exists=True)), File(exists=True)), + mandatory=True, + desc='Data files for model. List of 4D ' + 'files or list of list of 3D ' + 'files per session', + copyfile=False) + input_units = traits.Enum( + 'secs', + 'scans', + mandatory=True, + desc='Units of event onsets and durations (secs ' + 'or scans). Output units are always in secs') + high_pass_filter_cutoff = traits.Float( + mandatory=True, desc='High-pass filter cutoff in secs') + time_repetition = traits.Float( + mandatory=True, + desc='Time between the start of one volume ' + 'to the start of the next image volume.') + # Not implemented yet + # polynomial_order = traits.Range(0, low=0, + # desc ='Number of polynomial functions to model high pass filter.') + + +class SpecifyModelOutputSpec(TraitedSpec): + session_info = traits.Any(desc='Session info for level1designs') + + +class SpecifyModel(BaseInterface): + """Makes a model specification compatible with spm/fsl designers. + + The subject_info field should contain paradigm information in the form of + a Bunch or a list of Bunch. The Bunch should contain the following + information:: + + [Mandatory] + - conditions : list of names + - onsets : lists of onsets corresponding to each condition + - durations : lists of durations corresponding to each condition. Should be + left to a single 0 if all events are being modelled as impulses. + + [Optional] + - regressor_names : list of str + list of names corresponding to each column. Should be None if + automatically assigned. + - regressors : list of lists + values for each regressor - must correspond to the number of + volumes in the functional run + - amplitudes : lists of amplitudes for each event. This will be ignored by + SPM's Level1Design. + + The following two (tmod, pmod) will be ignored by any Level1Design class + other than SPM: + + - tmod : lists of conditions that should be temporally modulated. Should + default to None if not being used. + - pmod : list of Bunch corresponding to conditions + - name : name of parametric modulator + - param : values of the modulator + - poly : degree of modulation + + Alternatively, you can provide information through event files. + + The event files have to be in 1, 2 or 3 column format with the columns + corresponding to Onsets, Durations and Amplitudes and they have to have the + name event_name.runXXX... e.g.: Words.run001.txt. The event_name part will + be used to create the condition names. + + Examples + -------- + + >>> from nipype.algorithms import modelgen + >>> from nipype.interfaces.base import Bunch + >>> s = modelgen.SpecifyModel() + >>> s.inputs.input_units = 'secs' + >>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii'] + >>> s.inputs.time_repetition = 6 + >>> s.inputs.high_pass_filter_cutoff = 128. + >>> evs_run2 = Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]]) + >>> evs_run3 = Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]]) + >>> s.inputs.subject_info = [evs_run2, evs_run3] + + Using pmod: + + >>> evs_run2 = Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 50], [100, 180]], \ +durations=[[0], [0]], pmod=[Bunch(name=['amp'], poly=[2], param=[[1, 2]]), \ +None]) + >>> evs_run3 = Bunch(conditions=['cond1', 'cond2'], onsets=[[20, 120], [80, 160]], \ +durations=[[0], [0]], pmod=[Bunch(name=['amp'], poly=[2], param=[[1, 2]]), \ +None]) + >>> s.inputs.subject_info = [evs_run2, evs_run3] + + """ + input_spec = SpecifyModelInputSpec + output_spec = SpecifyModelOutputSpec + + def _generate_standard_design(self, + infolist, + functional_runs=None, + realignment_parameters=None, + outliers=None): + """ Generates a standard design matrix paradigm given information about + each run + """ + sessinfo = [] + output_units = 'secs' + if 'output_units' in self.inputs.traits(): + output_units = self.inputs.output_units + + for i, info in enumerate(infolist): + sessinfo.insert(i, dict(cond=[])) + if isdefined(self.inputs.high_pass_filter_cutoff): + sessinfo[i]['hpf'] = \ + np.float(self.inputs.high_pass_filter_cutoff) + + if hasattr(info, 'conditions') and info.conditions is not None: + for cid, cond in enumerate(info.conditions): + sessinfo[i]['cond'].insert(cid, dict()) + sessinfo[i]['cond'][cid]['name'] = info.conditions[cid] + scaled_onset = scale_timings( + info.onsets[cid], self.inputs.input_units, + output_units, self.inputs.time_repetition) + sessinfo[i]['cond'][cid]['onset'] = scaled_onset + scaled_duration = scale_timings( + info.durations[cid], self.inputs.input_units, + output_units, self.inputs.time_repetition) + sessinfo[i]['cond'][cid]['duration'] = scaled_duration + if hasattr(info, 'amplitudes') and info.amplitudes: + sessinfo[i]['cond'][cid]['amplitudes'] = \ + info.amplitudes[cid] + + if hasattr(info, 'tmod') and info.tmod and \ + len(info.tmod) > cid: + sessinfo[i]['cond'][cid]['tmod'] = info.tmod[cid] + + if hasattr(info, 'pmod') and info.pmod and \ + len(info.pmod) > cid: + if info.pmod[cid]: + sessinfo[i]['cond'][cid]['pmod'] = [] + for j, name in enumerate(info.pmod[cid].name): + sessinfo[i]['cond'][cid]['pmod'].insert(j, {}) + sessinfo[i]['cond'][cid]['pmod'][j]['name'] = \ + name + sessinfo[i]['cond'][cid]['pmod'][j]['poly'] = \ + info.pmod[cid].poly[j] + sessinfo[i]['cond'][cid]['pmod'][j]['param'] = \ + info.pmod[cid].param[j] + + sessinfo[i]['regress'] = [] + if hasattr(info, 'regressors') and info.regressors is not None: + for j, r in enumerate(info.regressors): + sessinfo[i]['regress'].insert(j, dict(name='', val=[])) + if hasattr(info, 'regressor_names') and \ + info.regressor_names is not None: + sessinfo[i]['regress'][j]['name'] = \ + info.regressor_names[j] + else: + sessinfo[i]['regress'][j]['name'] = 'UR%d' % (j + 1) + sessinfo[i]['regress'][j]['val'] = info.regressors[j] + sessinfo[i]['scans'] = functional_runs[i] + + if realignment_parameters is not None: + for i, rp in enumerate(realignment_parameters): + mc = realignment_parameters[i] + for col in range(mc.shape[1]): + colidx = len(sessinfo[i]['regress']) + sessinfo[i]['regress'].insert(colidx, dict( + name='', val=[])) + sessinfo[i]['regress'][colidx]['name'] = 'Realign%d' % ( + col + 1) + sessinfo[i]['regress'][colidx]['val'] = mc[:, col].tolist() + + if outliers is not None: + for i, out in enumerate(outliers): + numscans = 0 + for f in ensure_list(sessinfo[i]['scans']): + shape = load(f, mmap=NUMPY_MMAP).shape + if len(shape) == 3 or shape[3] == 1: + iflogger.warning('You are using 3D instead of 4D ' + 'files. Are you sure this was ' + 'intended?') + numscans += 1 + else: + numscans += shape[3] + + for j, scanno in enumerate(out): + colidx = len(sessinfo[i]['regress']) + sessinfo[i]['regress'].insert(colidx, dict( + name='', val=[])) + sessinfo[i]['regress'][colidx]['name'] = 'Outlier%d' % ( + j + 1) + sessinfo[i]['regress'][colidx]['val'] = \ + np.zeros((1, numscans))[0].tolist() + sessinfo[i]['regress'][colidx]['val'][int(scanno)] = 1 + return sessinfo + + def _generate_design(self, infolist=None): + """Generate design specification for a typical fmri paradigm + """ + realignment_parameters = [] + if isdefined(self.inputs.realignment_parameters): + for parfile in self.inputs.realignment_parameters: + realignment_parameters.append( + np.apply_along_axis( + func1d=normalize_mc_params, + axis=1, + arr=np.loadtxt(parfile), + source=self.inputs.parameter_source)) + outliers = [] + if isdefined(self.inputs.outlier_files): + for filename in self.inputs.outlier_files: + try: + outindices = np.loadtxt(filename, dtype=int) + except IOError: + outliers.append([]) + else: + if outindices.size == 1: + outliers.append([outindices.tolist()]) + else: + outliers.append(outindices.tolist()) + + if infolist is None: + if isdefined(self.inputs.subject_info): + infolist = self.inputs.subject_info + else: + infolist = gen_info(self.inputs.event_files) + self._sessinfo = self._generate_standard_design( + infolist, + functional_runs=self.inputs.functional_runs, + realignment_parameters=realignment_parameters, + outliers=outliers) + + def _run_interface(self, runtime): + """ + """ + self._sessioninfo = None + self._generate_design() + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + if not hasattr(self, '_sessinfo'): + self._generate_design() + outputs['session_info'] = self._sessinfo + + return outputs + + +class SpecifySPMModelInputSpec(SpecifyModelInputSpec): + concatenate_runs = traits.Bool( + False, + usedefault=True, + desc='Concatenate all runs to look like a ' + 'single session.') + output_units = traits.Enum( + 'secs', + 'scans', + usedefault=True, + desc='Units of design event onsets and durations ' + '(secs or scans)') + + +class SpecifySPMModel(SpecifyModel): + """Adds SPM specific options to SpecifyModel + + adds: + - concatenate_runs + - output_units + + Examples + -------- + + >>> from nipype.algorithms import modelgen + >>> from nipype.interfaces.base import Bunch + >>> s = modelgen.SpecifySPMModel() + >>> s.inputs.input_units = 'secs' + >>> s.inputs.output_units = 'scans' + >>> s.inputs.high_pass_filter_cutoff = 128. + >>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii'] + >>> s.inputs.time_repetition = 6 + >>> s.inputs.concatenate_runs = True + >>> evs_run2 = Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]]) + >>> evs_run3 = Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]]) + >>> s.inputs.subject_info = [evs_run2, evs_run3] + + """ + + input_spec = SpecifySPMModelInputSpec + + def _concatenate_info(self, infolist): + nscans = [] + for i, f in enumerate(self.inputs.functional_runs): + if isinstance(f, list): + numscans = len(f) + elif isinstance(f, (str, bytes)): + img = load(f, mmap=NUMPY_MMAP) + numscans = img.shape[3] + else: + raise Exception('Functional input not specified correctly') + nscans.insert(i, numscans) + + # now combine all fields into 1 + # names, onsets, durations, amplitudes, pmod, tmod, regressor_names, + # regressors + infoout = infolist[0] + for j, val in enumerate(infolist[0].durations): + if len(infolist[0].onsets[j]) > 1 and len(val) == 1: + infoout.durations[j] = ( + infolist[0].durations[j] * len(infolist[0].onsets[j])) + + for i, info in enumerate(infolist[1:]): + # info.[conditions, tmod] remain the same + if info.onsets: + for j, val in enumerate(info.onsets): + if self.inputs.input_units == 'secs': + onsets = np.array(info.onsets[j]) +\ + self.inputs.time_repetition * \ + sum(nscans[0:(i + 1)]) + infoout.onsets[j].extend(onsets.tolist()) + else: + onsets = np.array(info.onsets[j]) + \ + sum(nscans[0:(i + 1)]) + infoout.onsets[j].extend(onsets.tolist()) + + for j, val in enumerate(info.durations): + if len(info.onsets[j]) > 1 and len(val) == 1: + infoout.durations[j].extend( + info.durations[j] * len(info.onsets[j])) + elif len(info.onsets[j]) == len(val): + infoout.durations[j].extend(info.durations[j]) + else: + raise ValueError('Mismatch in number of onsets and \ + durations for run {0}, condition \ + {1}'.format(i + 2, j + 1)) + + if hasattr(info, 'amplitudes') and info.amplitudes: + for j, val in enumerate(info.amplitudes): + infoout.amplitudes[j].extend(info.amplitudes[j]) + + if hasattr(info, 'pmod') and info.pmod: + for j, val in enumerate(info.pmod): + if val: + for key, data in enumerate(val.param): + infoout.pmod[j].param[key].extend(data) + + if hasattr(info, 'regressors') and info.regressors: + # assumes same ordering of regressors across different + # runs and the same names for the regressors + for j, v in enumerate(info.regressors): + infoout.regressors[j].extend(info.regressors[j]) + + # insert session regressors + if not hasattr(infoout, 'regressors') or not infoout.regressors: + infoout.regressors = [] + onelist = np.zeros((1, sum(nscans))) + onelist[0, sum(nscans[0:i]):sum(nscans[0:(i + 1)])] = 1 + infoout.regressors.insert( + len(infoout.regressors), + onelist.tolist()[0]) + return [infoout], nscans + + def _generate_design(self, infolist=None): + if not isdefined(self.inputs.concatenate_runs) or \ + not self.inputs.concatenate_runs: + super(SpecifySPMModel, self)._generate_design(infolist=infolist) + return + + if isdefined(self.inputs.subject_info): + infolist = self.inputs.subject_info + else: + infolist = gen_info(self.inputs.event_files) + concatlist, nscans = self._concatenate_info(infolist) + functional_runs = [ensure_list(self.inputs.functional_runs)] + realignment_parameters = [] + if isdefined(self.inputs.realignment_parameters): + realignment_parameters = [] + for parfile in self.inputs.realignment_parameters: + mc = np.apply_along_axis( + func1d=normalize_mc_params, + axis=1, + arr=np.loadtxt(parfile), + source=self.inputs.parameter_source) + if not realignment_parameters: + realignment_parameters.insert(0, mc) + else: + realignment_parameters[0] = \ + np.concatenate((realignment_parameters[0], mc)) + outliers = [] + if isdefined(self.inputs.outlier_files): + outliers = [[]] + for i, filename in enumerate(self.inputs.outlier_files): + try: + out = np.loadtxt(filename) + except IOError: + iflogger.warn('Error reading outliers file %s', filename) + out = np.array([]) + + if out.size > 0: + iflogger.debug('fname=%s, out=%s, nscans=%d', filename, + out, sum(nscans[0:i])) + sumscans = out.astype(int) + sum(nscans[0:i]) + + if out.size == 1: + outliers[0] += [np.array(sumscans, dtype=int).tolist()] + else: + outliers[0] += np.array(sumscans, dtype=int).tolist() + + self._sessinfo = self._generate_standard_design( + concatlist, + functional_runs=functional_runs, + realignment_parameters=realignment_parameters, + outliers=outliers) + + +class SpecifySparseModelInputSpec(SpecifyModelInputSpec): + time_acquisition = traits.Float( + 0, + mandatory=True, + desc='Time in seconds to acquire a single ' + 'image volume') + volumes_in_cluster = traits.Range( + 1, usedefault=True, desc='Number of scan volumes in a cluster') + model_hrf = traits.Bool(desc='Model sparse events with hrf') + stimuli_as_impulses = traits.Bool( + True, desc='Treat each stimulus to be impulse-like', usedefault=True) + use_temporal_deriv = traits.Bool( + requires=['model_hrf'], + desc='Create a temporal derivative in ' + 'addition to regular regressor') + scale_regressors = traits.Bool( + True, desc='Scale regressors by the peak', usedefault=True) + scan_onset = traits.Float( + 0.0, + desc='Start of scanning relative to onset of run in secs', + usedefault=True) + save_plot = traits.Bool( + desc=('Save plot of sparse design calculation ' + '(requires matplotlib)')) + + +class SpecifySparseModelOutputSpec(SpecifyModelOutputSpec): + sparse_png_file = File(desc='PNG file showing sparse design') + sparse_svg_file = File(desc='SVG file showing sparse design') + + +class SpecifySparseModel(SpecifyModel): + """ Specify a sparse model that is compatible with spm/fsl designers + + References + ---------- + + .. [1] Perrachione TK and Ghosh SS (2013) Optimized design and analysis of + sparse-sampling fMRI experiments. Front. Neurosci. 7:55 + http://journal.frontiersin.org/Journal/10.3389/fnins.2013.00055/abstract + + Examples + -------- + + >>> from nipype.algorithms import modelgen + >>> from nipype.interfaces.base import Bunch + >>> s = modelgen.SpecifySparseModel() + >>> s.inputs.input_units = 'secs' + >>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii'] + >>> s.inputs.time_repetition = 6 + >>> s.inputs.time_acquisition = 2 + >>> s.inputs.high_pass_filter_cutoff = 128. + >>> s.inputs.model_hrf = True + >>> evs_run2 = Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], \ +durations=[[1]]) + >>> evs_run3 = Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], \ +durations=[[1]]) + >>> s.inputs.subject_info = [evs_run2, evs_run3] + + """ + input_spec = SpecifySparseModelInputSpec + output_spec = SpecifySparseModelOutputSpec + + def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): + """Generates a regressor for a sparse/clustered-sparse acquisition + """ + bplot = False + if isdefined(self.inputs.save_plot) and self.inputs.save_plot: + bplot = True + import matplotlib + matplotlib.use(config.get('execution', 'matplotlib_backend')) + import matplotlib.pyplot as plt + + TR = np.round(self.inputs.time_repetition * 1000) # in ms + if self.inputs.time_acquisition: + TA = np.round(self.inputs.time_acquisition * 1000) # in ms + else: + TA = TR # in ms + nvol = self.inputs.volumes_in_cluster + SCANONSET = np.round(self.inputs.scan_onset * 1000) + total_time = TR * (nscans - nvol) / nvol + TA * nvol + SCANONSET + SILENCE = TR - TA * nvol + dt = TA / 10.0 + durations = np.round(np.array(i_durations) * 1000) + if len(durations) == 1: + durations = durations * np.ones((len(i_onsets))) + onsets = np.round(np.array(i_onsets) * 1000) + dttemp = gcd(TA, gcd(SILENCE, TR)) + if dt < dttemp: + if dttemp % dt != 0: + dt = float(gcd(dttemp, dt)) + + if dt < 1: + raise Exception('Time multiple less than 1 ms') + iflogger.info('Setting dt = %d ms\n', dt) + npts = int(np.ceil(total_time / dt)) + times = np.arange(0, total_time, dt) * 1e-3 + timeline = np.zeros((npts)) + timeline2 = np.zeros((npts)) + if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: + hrf = spm_hrf(dt * 1e-3) + reg_scale = 1.0 + if self.inputs.scale_regressors: + boxcar = np.zeros(int(50.0 * 1e3 / dt)) + if self.inputs.stimuli_as_impulses: + boxcar[int(1.0 * 1e3 / dt)] = 1.0 + reg_scale = float(TA / dt) + else: + boxcar[int(1.0 * 1e3 / dt):int(2.0 * 1e3 / dt)] = 1.0 + + if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: + response = np.convolve(boxcar, hrf) + reg_scale = 1.0 / response.max() + iflogger.info('response sum: %.4f max: %.4f', response.sum(), + response.max()) + iflogger.info('reg_scale: %.4f', reg_scale) + + for i, t in enumerate(onsets): + idx = int(np.round(t / dt)) + if i_amplitudes: + if len(i_amplitudes) > 1: + timeline2[idx] = i_amplitudes[i] + else: + timeline2[idx] = i_amplitudes[0] + else: + timeline2[idx] = 1 + + if bplot: + plt.subplot(4, 1, 1) + plt.plot(times, timeline2) + + if not self.inputs.stimuli_as_impulses: + if durations[i] == 0: + durations[i] = TA * nvol + stimdur = np.ones((int(durations[i] / dt))) + timeline2 = np.convolve(timeline2, stimdur)[0:len(timeline2)] + timeline += timeline2 + timeline2[:] = 0 + + if bplot: + plt.subplot(4, 1, 2) + plt.plot(times, timeline) + + if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: + timeline = np.convolve(timeline, hrf)[0:len(timeline)] + if isdefined(self.inputs.use_temporal_deriv) and \ + self.inputs.use_temporal_deriv: + # create temporal deriv + timederiv = np.concatenate(([0], np.diff(timeline))) + + if bplot: + plt.subplot(4, 1, 3) + plt.plot(times, timeline) + if isdefined(self.inputs.use_temporal_deriv) and \ + self.inputs.use_temporal_deriv: + plt.plot(times, timederiv) + # sample timeline + timeline2 = np.zeros((npts)) + reg = [] + regderiv = [] + for i, trial in enumerate(np.arange(nscans) / nvol): + scanstart = int((SCANONSET + trial * TR + (i % nvol) * TA) / dt) + scanidx = scanstart + np.arange(int(TA / dt)) + timeline2[scanidx] = np.max(timeline) + reg.insert(i, np.mean(timeline[scanidx]) * reg_scale) + if isdefined(self.inputs.use_temporal_deriv) and \ + self.inputs.use_temporal_deriv: + regderiv.insert(i, np.mean(timederiv[scanidx]) * reg_scale) + + if isdefined(self.inputs.use_temporal_deriv) and \ + self.inputs.use_temporal_deriv: + iflogger.info('orthoganlizing derivative w.r.t. main regressor') + regderiv = orth(reg, regderiv) + + if bplot: + plt.subplot(4, 1, 3) + plt.plot(times, timeline2) + plt.subplot(4, 1, 4) + plt.bar(np.arange(len(reg)), reg, width=0.5) + plt.savefig('sparse.png') + plt.savefig('sparse.svg') + + if regderiv: + return [reg, regderiv] + else: + return reg + + def _cond_to_regress(self, info, nscans): + """Converts condition information to full regressors + """ + reg = [] + regnames = [] + for i, cond in enumerate(info.conditions): + if hasattr(info, 'amplitudes') and info.amplitudes: + amplitudes = info.amplitudes[i] + else: + amplitudes = None + regnames.insert(len(regnames), cond) + scaled_onsets = scale_timings(info.onsets[i], + self.inputs.input_units, 'secs', + self.inputs.time_repetition) + scaled_durations = scale_timings(info.durations[i], + self.inputs.input_units, 'secs', + self.inputs.time_repetition) + regressor = self._gen_regress(scaled_onsets, scaled_durations, + amplitudes, nscans) + if isdefined(self.inputs.use_temporal_deriv) and \ + self.inputs.use_temporal_deriv: + reg.insert(len(reg), regressor[0]) + regnames.insert(len(regnames), cond + '_D') + reg.insert(len(reg), regressor[1]) + else: + reg.insert(len(reg), regressor) + # need to deal with temporal and parametric modulators + # for sparse-clustered acquisitions enter T1-effect regressors + nvol = self.inputs.volumes_in_cluster + if nvol > 1: + for i in range(nvol - 1): + treg = np.zeros((nscans / nvol, nvol)) + treg[:, i] = 1 + reg.insert(len(reg), treg.ravel().tolist()) + regnames.insert(len(regnames), 'T1effect_%d' % i) + return reg, regnames + + def _generate_clustered_design(self, infolist): + """Generates condition information for sparse-clustered + designs. + + """ + infoout = deepcopy(infolist) + for i, info in enumerate(infolist): + infoout[i].conditions = None + infoout[i].onsets = None + infoout[i].durations = None + if info.conditions: + img = load(self.inputs.functional_runs[i], mmap=NUMPY_MMAP) + nscans = img.shape[3] + reg, regnames = self._cond_to_regress(info, nscans) + if hasattr(infoout[i], 'regressors') and infoout[i].regressors: + if not infoout[i].regressor_names: + infoout[i].regressor_names = \ + ['R%d' % j for j in range(len(infoout[i].regressors))] + else: + infoout[i].regressors = [] + infoout[i].regressor_names = [] + + for j, r in enumerate(reg): + regidx = len(infoout[i].regressors) + infoout[i].regressor_names.insert(regidx, regnames[j]) + infoout[i].regressors.insert(regidx, r) + return infoout + + def _generate_design(self, infolist=None): + if isdefined(self.inputs.subject_info): + infolist = self.inputs.subject_info + else: + infolist = gen_info(self.inputs.event_files) + sparselist = self._generate_clustered_design(infolist) + super(SpecifySparseModel, self)._generate_design(infolist=sparselist) + + def _list_outputs(self): + outputs = self._outputs().get() + if not hasattr(self, '_sessinfo'): + self._generate_design() + outputs['session_info'] = self._sessinfo + + if isdefined(self.inputs.save_plot) and self.inputs.save_plot: + outputs['sparse_png_file'] = os.path.join(os.getcwd(), + 'sparse.png') + outputs['sparse_svg_file'] = os.path.join(os.getcwd(), + 'sparse.svg') + return outputs diff --git a/nipype/algorithms/rapidart.py b/nipype/algorithms/rapidart.py new file mode 100644 index 0000000000..a4d5b592c9 --- /dev/null +++ b/nipype/algorithms/rapidart.py @@ -0,0 +1,777 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The rapidart module provides routines for artifact detection and region of +interest analysis. + +These functions include: + + * ArtifactDetect: performs artifact detection on functional images + + * StimulusCorrelation: determines correlation between stimuli + schedule and movement/intensity parameters +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open, range, str, bytes + +import os +from copy import deepcopy + +from nibabel import load, funcs, Nifti1Image +import numpy as np +from scipy import signal +import scipy.io as sio + +from ..utils import NUMPY_MMAP +from ..interfaces.base import (BaseInterface, traits, InputMultiPath, + OutputMultiPath, TraitedSpec, File, + BaseInterfaceInputSpec, isdefined) +from ..utils.filemanip import ensure_list, save_json, split_filename +from ..utils.misc import find_indices, normalize_mc_params +from .. import logging, config +iflogger = logging.getLogger('nipype.interface') + + +def _get_affine_matrix(params, source): + """Return affine matrix given a set of translation and rotation parameters + + params : np.array (upto 12 long) in native package format + source : the package that generated the parameters + supports SPM, AFNI, FSFAST, FSL, NIPY + """ + if source == 'NIPY': + # nipy does not store typical euler angles, use nipy to convert + from nipy.algorithms.registration import to_matrix44 + return to_matrix44(params) + + params = normalize_mc_params(params, source) + # process for FSL, SPM, AFNI and FSFAST + rotfunc = lambda x: np.array([[np.cos(x), np.sin(x)], + [-np.sin(x), np.cos(x)]]) + q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0]) + if len(params) < 12: + params = np.hstack((params, q[len(params):])) + params.shape = (len(params), ) + # Translation + T = np.eye(4) + T[0:3, -1] = params[0:3] + # Rotation + Rx = np.eye(4) + Rx[1:3, 1:3] = rotfunc(params[3]) + Ry = np.eye(4) + Ry[(0, 0, 2, 2), (0, 2, 0, 2)] = rotfunc(params[4]).ravel() + Rz = np.eye(4) + Rz[0:2, 0:2] = rotfunc(params[5]) + # Scaling + S = np.eye(4) + S[0:3, 0:3] = np.diag(params[6:9]) + # Shear + Sh = np.eye(4) + Sh[(0, 0, 1), (1, 2, 2)] = params[9:12] + if source in ('AFNI', 'FSFAST'): + return np.dot(T, np.dot(Ry, np.dot(Rx, np.dot(Rz, np.dot(S, Sh))))) + return np.dot(T, np.dot(Rx, np.dot(Ry, np.dot(Rz, np.dot(S, Sh))))) + + +def _calc_norm(mc, use_differences, source, brain_pts=None): + """Calculates the maximum overall displacement of the midpoints + of the faces of a cube due to translation and rotation. + + Parameters + ---------- + mc : motion parameter estimates + [3 translation, 3 rotation (radians)] + use_differences : boolean + brain_pts : [4 x n_points] of coordinates + + Returns + ------- + + norm : at each time point + displacement : euclidean distance (mm) of displacement at each coordinate + + """ + + affines = [ + _get_affine_matrix(mc[i, :], source) for i in range(mc.shape[0]) + ] + return _calc_norm_affine(affines, use_differences, brain_pts) + + +def _calc_norm_affine(affines, use_differences, brain_pts=None): + """Calculates the maximum overall displacement of the midpoints + of the faces of a cube due to translation and rotation. + + Parameters + ---------- + affines : list of [4 x 4] affine matrices + use_differences : boolean + brain_pts : [4 x n_points] of coordinates + + Returns + ------- + + norm : at each time point + displacement : euclidean distance (mm) of displacement at each coordinate + + """ + + if brain_pts is None: + respos = np.diag([70, 70, 75]) + resneg = np.diag([-70, -110, -45]) + all_pts = np.vstack((np.hstack((respos, resneg)), np.ones((1, 6)))) + displacement = None + else: + all_pts = brain_pts + n_pts = all_pts.size - all_pts.shape[1] + newpos = np.zeros((len(affines), n_pts)) + if brain_pts is not None: + displacement = np.zeros((len(affines), int(n_pts / 3))) + for i, affine in enumerate(affines): + newpos[i, :] = np.dot(affine, all_pts)[0:3, :].ravel() + if brain_pts is not None: + displacement[i, :] = np.sqrt( + np.sum( + np.power( + np.reshape(newpos[i, :], + (3, all_pts.shape[1])) - all_pts[0:3, :], + 2), + axis=0)) + # np.savez('displacement.npz', newpos=newpos, pts=all_pts) + normdata = np.zeros(len(affines)) + if use_differences: + newpos = np.concatenate( + (np.zeros((1, n_pts)), np.diff(newpos, n=1, axis=0)), axis=0) + for i in range(newpos.shape[0]): + normdata[i] = \ + np.max(np.sqrt(np.sum( + np.reshape(np.power(np.abs(newpos[i, :]), 2), + (3, all_pts.shape[1])), + axis=0))) + else: + newpos = np.abs(signal.detrend(newpos, axis=0, type='constant')) + normdata = np.sqrt(np.mean(np.power(newpos, 2), axis=1)) + return normdata, displacement + + +class ArtifactDetectInputSpec(BaseInterfaceInputSpec): + realigned_files = InputMultiPath( + File(exists=True), + desc=("Names of realigned functional data " + "files"), + mandatory=True) + realignment_parameters = InputMultiPath( + File(exists=True), + mandatory=True, + desc=("Names of realignment " + "parameters corresponding to " + "the functional data files")) + parameter_source = traits.Enum( + "SPM", + "FSL", + "AFNI", + "NiPy", + "FSFAST", + desc="Source of movement parameters", + mandatory=True) + use_differences = traits.ListBool( + [True, False], + minlen=2, + maxlen=2, + usedefault=True, + desc=("Use differences between successive" + " motion (first element) and " + "intensity parameter (second " + "element) estimates in order to " + "determine outliers. " + "(default is [True, False])")) + use_norm = traits.Bool( + True, + usedefault=True, + requires=['norm_threshold'], + desc=("Uses a composite of the motion parameters in " + "order to determine outliers.")) + norm_threshold = traits.Float( + xor=['rotation_threshold', 'translation_threshold'], + mandatory=True, + desc=("Threshold to use to detect motion-rela" + "ted outliers when composite motion is " + "being used")) + rotation_threshold = traits.Float( + mandatory=True, + xor=['norm_threshold'], + desc=("Threshold (in radians) to use to " + "detect rotation-related outliers")) + translation_threshold = traits.Float( + mandatory=True, + xor=['norm_threshold'], + desc=("Threshold (in mm) to use to " + "detect translation-related " + "outliers")) + zintensity_threshold = traits.Float( + mandatory=True, + desc=("Intensity Z-threshold use to " + "detection images that deviate " + "from the mean")) + mask_type = traits.Enum( + 'spm_global', + 'file', + 'thresh', + mandatory=True, + desc=("Type of mask that should be used to mask the" + " functional data. *spm_global* uses an " + "spm_global like calculation to determine the" + " brain mask. *file* specifies a brain mask " + "file (should be an image file consisting of " + "0s and 1s). *thresh* specifies a threshold " + "to use. By default all voxels are used," + "unless one of these mask types are defined")) + mask_file = File( + exists=True, desc="Mask file to be used if mask_type is 'file'.") + mask_threshold = traits.Float( + desc=("Mask threshold to be used if mask_type" + " is 'thresh'.")) + intersect_mask = traits.Bool( + True, usedefault=True, + desc=("Intersect the masks when computed from " + "spm_global.")) + save_plot = traits.Bool( + True, desc="save plots containing outliers", usedefault=True) + plot_type = traits.Enum( + 'png', + 'svg', + 'eps', + 'pdf', + desc="file type of the outlier plot", + usedefault=True) + bound_by_brainmask = traits.Bool( + False, + desc=("use the brain mask to " + "determine bounding box" + "for composite norm (works" + "for SPM and Nipy - currently" + "inaccurate for FSL, AFNI"), + usedefault=True) + global_threshold = traits.Float( + 8.0, + desc=("use this threshold when mask " + "type equal's spm_global"), + usedefault=True) + + +class ArtifactDetectOutputSpec(TraitedSpec): + outlier_files = OutputMultiPath( + File(exists=True), + desc=("One file for each functional run " + "containing a list of 0-based indices" + " corresponding to outlier volumes")) + intensity_files = OutputMultiPath( + File(exists=True), + desc=("One file for each functional run " + "containing the global intensity " + "values determined from the " + "brainmask")) + norm_files = OutputMultiPath( + File, + desc=("One file for each functional run " + "containing the composite norm")) + statistic_files = OutputMultiPath( + File(exists=True), + desc=("One file for each functional run " + "containing information about the " + "different types of artifacts and " + "if design info is provided then " + "details of stimulus correlated " + "motion and a listing or artifacts " + "by event type.")) + plot_files = OutputMultiPath( + File, + desc=("One image file for each functional run " + "containing the detected outliers")) + mask_files = OutputMultiPath( + File, + desc=("One image file for each functional run " + "containing the mask used for global " + "signal calculation")) + displacement_files = OutputMultiPath( + File, + desc=("One image file for each " + "functional run containing the " + "voxel displacement timeseries")) + + +class ArtifactDetect(BaseInterface): + """Detects outliers in a functional imaging series + + Uses intensity and motion parameters to infer outliers. If `use_norm` is + True, it computes the movement of the center of each face a cuboid centered + around the head and returns the maximal movement across the centers. If you + wish to use individual thresholds instead, import `Undefined` from + `nipype.interfaces.base` and set `....inputs.use_norm = Undefined` + + + Examples + -------- + + >>> ad = ArtifactDetect() + >>> ad.inputs.realigned_files = 'functional.nii' + >>> ad.inputs.realignment_parameters = 'functional.par' + >>> ad.inputs.parameter_source = 'FSL' + >>> ad.inputs.norm_threshold = 1 + >>> ad.inputs.use_differences = [True, False] + >>> ad.inputs.zintensity_threshold = 3 + >>> ad.run() # doctest: +SKIP + """ + + input_spec = ArtifactDetectInputSpec + output_spec = ArtifactDetectOutputSpec + + def __init__(self, **inputs): + super(ArtifactDetect, self).__init__(**inputs) + + def _get_output_filenames(self, motionfile, output_dir): + """Generate output files based on motion filenames + + Parameters + ---------- + + motionfile: file/string + Filename for motion parameter file + output_dir: string + output directory in which the files will be generated + """ + if isinstance(motionfile, (str, bytes)): + infile = motionfile + elif isinstance(motionfile, list): + infile = motionfile[0] + else: + raise Exception("Unknown type of file") + _, filename, ext = split_filename(infile) + artifactfile = os.path.join(output_dir, ''.join(('art.', filename, + '_outliers.txt'))) + intensityfile = os.path.join(output_dir, ''.join(('global_intensity.', + filename, '.txt'))) + statsfile = os.path.join(output_dir, ''.join(('stats.', filename, + '.txt'))) + normfile = os.path.join(output_dir, ''.join(('norm.', filename, + '.txt'))) + plotfile = os.path.join(output_dir, ''.join(('plot.', filename, '.', + self.inputs.plot_type))) + displacementfile = os.path.join(output_dir, ''.join(('disp.', filename, + ext))) + maskfile = os.path.join(output_dir, ''.join(('mask.', filename, ext))) + return (artifactfile, intensityfile, statsfile, normfile, plotfile, + displacementfile, maskfile) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['outlier_files'] = [] + outputs['intensity_files'] = [] + outputs['statistic_files'] = [] + outputs['mask_files'] = [] + if isdefined(self.inputs.use_norm) and self.inputs.use_norm: + outputs['norm_files'] = [] + if self.inputs.bound_by_brainmask: + outputs['displacement_files'] = [] + if isdefined(self.inputs.save_plot) and self.inputs.save_plot: + outputs['plot_files'] = [] + for i, f in enumerate(ensure_list(self.inputs.realigned_files)): + (outlierfile, intensityfile, statsfile, normfile, plotfile, + displacementfile, maskfile) = \ + self._get_output_filenames(f, os.getcwd()) + outputs['outlier_files'].insert(i, outlierfile) + outputs['intensity_files'].insert(i, intensityfile) + outputs['statistic_files'].insert(i, statsfile) + outputs['mask_files'].insert(i, maskfile) + if isdefined(self.inputs.use_norm) and self.inputs.use_norm: + outputs['norm_files'].insert(i, normfile) + if self.inputs.bound_by_brainmask: + outputs['displacement_files'].insert(i, displacementfile) + if isdefined(self.inputs.save_plot) and self.inputs.save_plot: + outputs['plot_files'].insert(i, plotfile) + return outputs + + def _plot_outliers_with_wave(self, wave, outliers, name): + import matplotlib + matplotlib.use(config.get("execution", "matplotlib_backend")) + import matplotlib.pyplot as plt + plt.plot(wave) + plt.ylim([wave.min(), wave.max()]) + plt.xlim([0, len(wave) - 1]) + if len(outliers): + plt.plot( + np.tile(outliers[:, None], (1, 2)).T, + np.tile([wave.min(), wave.max()], (len(outliers), 1)).T, 'r') + plt.xlabel('Scans - 0-based') + plt.ylabel(name) + + def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None): + """ + Core routine for detecting outliers + """ + if not cwd: + cwd = os.getcwd() + + # read in functional image + if isinstance(imgfile, (str, bytes)): + nim = load(imgfile, mmap=NUMPY_MMAP) + elif isinstance(imgfile, list): + if len(imgfile) == 1: + nim = load(imgfile[0], mmap=NUMPY_MMAP) + else: + images = [load(f, mmap=NUMPY_MMAP) for f in imgfile] + nim = funcs.concat_images(images) + + # compute global intensity signal + (x, y, z, timepoints) = nim.shape + + data = nim.get_data() + affine = nim.affine + g = np.zeros((timepoints, 1)) + masktype = self.inputs.mask_type + if masktype == 'spm_global': # spm_global like calculation + iflogger.debug('art: using spm global') + intersect_mask = self.inputs.intersect_mask + if intersect_mask: + mask = np.ones((x, y, z), dtype=bool) + for t0 in range(timepoints): + vol = data[:, :, :, t0] + # Use an SPM like approach + mask_tmp = vol > \ + (np.nanmean(vol) / self.inputs.global_threshold) + mask = mask * mask_tmp + for t0 in range(timepoints): + vol = data[:, :, :, t0] + g[t0] = np.nanmean(vol[mask]) + if len(find_indices(mask)) < (np.prod((x, y, z)) / 10): + intersect_mask = False + g = np.zeros((timepoints, 1)) + if not intersect_mask: + iflogger.info('not intersect_mask is True') + mask = np.zeros((x, y, z, timepoints)) + for t0 in range(timepoints): + vol = data[:, :, :, t0] + mask_tmp = vol > \ + (np.nanmean(vol) / self.inputs.global_threshold) + mask[:, :, :, t0] = mask_tmp + g[t0] = np.nansum(vol * mask_tmp) / np.nansum(mask_tmp) + elif masktype == 'file': # uses a mask image to determine intensity + maskimg = load(self.inputs.mask_file, mmap=NUMPY_MMAP) + mask = maskimg.get_data() + affine = maskimg.affine + mask = mask > 0.5 + for t0 in range(timepoints): + vol = data[:, :, :, t0] + g[t0] = np.nanmean(vol[mask]) + elif masktype == 'thresh': # uses a fixed signal threshold + for t0 in range(timepoints): + vol = data[:, :, :, t0] + mask = vol > self.inputs.mask_threshold + g[t0] = np.nanmean(vol[mask]) + else: + mask = np.ones((x, y, z)) + g = np.nanmean(data[mask > 0, :], 1) + + # compute normalized intensity values + gz = signal.detrend(g, axis=0) # detrend the signal + if self.inputs.use_differences[1]: + gz = np.concatenate( + (np.zeros((1, 1)), np.diff(gz, n=1, axis=0)), axis=0) + gz = (gz - np.mean(gz)) / np.std(gz) # normalize the detrended signal + iidx = find_indices(abs(gz) > self.inputs.zintensity_threshold) + + # read in motion parameters + mc_in = np.loadtxt(motionfile) + mc = deepcopy(mc_in) + + (artifactfile, intensityfile, statsfile, normfile, plotfile, + displacementfile, maskfile) = self._get_output_filenames( + imgfile, cwd) + mask_img = Nifti1Image(mask.astype(np.uint8), affine) + mask_img.to_filename(maskfile) + + if self.inputs.use_norm: + brain_pts = None + if self.inputs.bound_by_brainmask: + voxel_coords = np.nonzero(mask) + coords = np.vstack((voxel_coords[0], + np.vstack((voxel_coords[1], + voxel_coords[2])))).T + brain_pts = np.dot(affine, + np.hstack((coords, + np.ones((coords.shape[0], + 1)))).T) + # calculate the norm of the motion parameters + normval, displacement = _calc_norm( + mc, + self.inputs.use_differences[0], + self.inputs.parameter_source, + brain_pts=brain_pts) + tidx = find_indices(normval > self.inputs.norm_threshold) + ridx = find_indices(normval < 0) + if displacement is not None: + dmap = np.zeros((x, y, z, timepoints), dtype=np.float) + for i in range(timepoints): + dmap[voxel_coords[0], voxel_coords[1], voxel_coords[2], + i] = displacement[i, :] + dimg = Nifti1Image(dmap, affine) + dimg.to_filename(displacementfile) + else: + if self.inputs.use_differences[0]: + mc = np.concatenate( + (np.zeros((1, 6)), np.diff(mc_in, n=1, axis=0)), axis=0) + traval = mc[:, 0:3] # translation parameters (mm) + rotval = mc[:, 3:6] # rotation parameters (rad) + tidx = find_indices( + np.sum(abs(traval) > self.inputs.translation_threshold, 1) > 0) + ridx = find_indices( + np.sum(abs(rotval) > self.inputs.rotation_threshold, 1) > 0) + + outliers = np.unique(np.union1d(iidx, np.union1d(tidx, ridx))) + + # write output to outputfile + np.savetxt(artifactfile, outliers, fmt=b'%d', delimiter=' ') + np.savetxt(intensityfile, g, fmt=b'%.2f', delimiter=' ') + if self.inputs.use_norm: + np.savetxt(normfile, normval, fmt=b'%.4f', delimiter=' ') + + if isdefined(self.inputs.save_plot) and self.inputs.save_plot: + import matplotlib + matplotlib.use(config.get("execution", "matplotlib_backend")) + import matplotlib.pyplot as plt + fig = plt.figure() + if isdefined(self.inputs.use_norm) and self.inputs.use_norm: + plt.subplot(211) + else: + plt.subplot(311) + self._plot_outliers_with_wave(gz, iidx, 'Intensity') + if isdefined(self.inputs.use_norm) and self.inputs.use_norm: + plt.subplot(212) + self._plot_outliers_with_wave(normval, np.union1d(tidx, ridx), + 'Norm (mm)') + else: + diff = '' + if self.inputs.use_differences[0]: + diff = 'diff' + plt.subplot(312) + self._plot_outliers_with_wave(traval, tidx, + 'Translation (mm)' + diff) + plt.subplot(313) + self._plot_outliers_with_wave(rotval, ridx, + 'Rotation (rad)' + diff) + plt.savefig(plotfile) + plt.close(fig) + + motion_outliers = np.union1d(tidx, ridx) + stats = [ + { + 'motion_file': motionfile, + 'functional_file': imgfile + }, + { + 'common_outliers': len(np.intersect1d(iidx, motion_outliers)), + 'intensity_outliers': len(np.setdiff1d(iidx, motion_outliers)), + 'motion_outliers': len(np.setdiff1d(motion_outliers, iidx)), + }, + { + 'motion': [ + { + 'using differences': self.inputs.use_differences[0] + }, + { + 'mean': np.mean(mc_in, axis=0).tolist(), + 'min': np.min(mc_in, axis=0).tolist(), + 'max': np.max(mc_in, axis=0).tolist(), + 'std': np.std(mc_in, axis=0).tolist() + }, + ] + }, + { + 'intensity': [ + { + 'using differences': self.inputs.use_differences[1] + }, + { + 'mean': np.mean(gz, axis=0).tolist(), + 'min': np.min(gz, axis=0).tolist(), + 'max': np.max(gz, axis=0).tolist(), + 'std': np.std(gz, axis=0).tolist() + }, + ] + }, + ] + if self.inputs.use_norm: + stats.insert( + 3, { + 'motion_norm': { + 'mean': np.mean(normval, axis=0).tolist(), + 'min': np.min(normval, axis=0).tolist(), + 'max': np.max(normval, axis=0).tolist(), + 'std': np.std(normval, axis=0).tolist(), + } + }) + save_json(statsfile, stats) + + def _run_interface(self, runtime): + """Execute this module. + """ + funcfilelist = ensure_list(self.inputs.realigned_files) + motparamlist = ensure_list(self.inputs.realignment_parameters) + for i, imgf in enumerate(funcfilelist): + self._detect_outliers_core( + imgf, motparamlist[i], i, cwd=os.getcwd()) + return runtime + + +class StimCorrInputSpec(BaseInterfaceInputSpec): + realignment_parameters = InputMultiPath( + File(exists=True), + mandatory=True, + desc=("Names of realignment " + "parameters corresponding to " + "the functional data files")) + intensity_values = InputMultiPath( + File(exists=True), + mandatory=True, + desc=("Name of file containing intensity " + "values")) + spm_mat_file = File( + exists=True, + mandatory=True, + desc="SPM mat file (use pre-estimate SPM.mat file)") + concatenated_design = traits.Bool( + mandatory=True, + desc=("state if the design matrix " + "contains concatenated sessions")) + + +class StimCorrOutputSpec(TraitedSpec): + stimcorr_files = OutputMultiPath( + File(exists=True), + desc=("List of files containing " + "correlation values")) + + +class StimulusCorrelation(BaseInterface): + """Determines if stimuli are correlated with motion or intensity + parameters. + + Currently this class supports an SPM generated design matrix and requires + intensity parameters. This implies that one must run + :ref:`ArtifactDetect ` + and :ref:`Level1Design ` prior to + running this or provide an SPM.mat file and intensity parameters through + some other means. + + Examples + -------- + + >>> sc = StimulusCorrelation() + >>> sc.inputs.realignment_parameters = 'functional.par' + >>> sc.inputs.intensity_values = 'functional.rms' + >>> sc.inputs.spm_mat_file = 'SPM.mat' + >>> sc.inputs.concatenated_design = False + >>> sc.run() # doctest: +SKIP + + """ + + input_spec = StimCorrInputSpec + output_spec = StimCorrOutputSpec + + def _get_output_filenames(self, motionfile, output_dir): + """Generate output files based on motion filenames + + Parameters + ---------- + motionfile: file/string + Filename for motion parameter file + output_dir: string + output directory in which the files will be generated + """ + (_, filename) = os.path.split(motionfile) + (filename, _) = os.path.splitext(filename) + corrfile = os.path.join(output_dir, ''.join(('qa.', filename, + '_stimcorr.txt'))) + return corrfile + + def _stimcorr_core(self, motionfile, intensityfile, designmatrix, + cwd=None): + """ + Core routine for determining stimulus correlation + + """ + if not cwd: + cwd = os.getcwd() + # read in motion parameters + mc_in = np.loadtxt(motionfile) + g_in = np.loadtxt(intensityfile) + g_in.shape = g_in.shape[0], 1 + dcol = designmatrix.shape[1] + mccol = mc_in.shape[1] + concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in)) + cm = np.corrcoef(concat_matrix, rowvar=0) + corrfile = self._get_output_filenames(motionfile, cwd) + # write output to outputfile + file = open(corrfile, 'w') + file.write("Stats for:\n") + file.write("Stimulus correlated motion:\n%s\n" % motionfile) + for i in range(dcol): + file.write("SCM.%d:" % i) + for v in cm[i, dcol + np.arange(mccol)]: + file.write(" %.2f" % v) + file.write('\n') + file.write("Stimulus correlated intensity:\n%s\n" % intensityfile) + for i in range(dcol): + file.write("SCI.%d: %.2f\n" % (i, cm[i, -1])) + file.close() + + def _get_spm_submatrix(self, spmmat, sessidx, rows=None): + """ + Parameters + ---------- + spmmat: scipy matlab object + full SPM.mat file loaded into a scipy object + sessidx: int + index to session that needs to be extracted. + """ + designmatrix = spmmat['SPM'][0][0].xX[0][0].X + U = spmmat['SPM'][0][0].Sess[0][sessidx].U[0] + if rows is None: + rows = spmmat['SPM'][0][0].Sess[0][sessidx].row[0] - 1 + cols = (spmmat['SPM'][0][0].Sess[0][sessidx].col[0][list( + range(len(U)))] - 1) + outmatrix = designmatrix.take( + rows.tolist(), axis=0).take( + cols.tolist(), axis=1) + return outmatrix + + def _run_interface(self, runtime): + """Execute this module. + """ + motparamlist = self.inputs.realignment_parameters + intensityfiles = self.inputs.intensity_values + spmmat = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False) + nrows = [] + for i in range(len(motparamlist)): + sessidx = i + rows = None + if self.inputs.concatenated_design: + sessidx = 0 + mc_in = np.loadtxt(motparamlist[i]) + rows = np.sum(nrows) + np.arange(mc_in.shape[0]) + nrows.append(mc_in.shape[0]) + matrix = self._get_spm_submatrix(spmmat, sessidx, rows) + self._stimcorr_core(motparamlist[i], intensityfiles[i], matrix, + os.getcwd()) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + files = [] + for i, f in enumerate(self.inputs.realignment_parameters): + files.insert(i, self._get_output_filenames(f, os.getcwd())) + if files: + outputs['stimcorr_files'] = files + return outputs diff --git a/nipype/algorithms/stats.py b/nipype/algorithms/stats.py new file mode 100644 index 0000000000..51a3bc9088 --- /dev/null +++ b/nipype/algorithms/stats.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Managing statistical maps +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os +import nibabel as nb +import numpy as np + +from ..interfaces.base import ( + BaseInterfaceInputSpec, TraitedSpec, SimpleInterface, + traits, InputMultiPath, File +) +from ..utils.filemanip import split_filename + + +class ActivationCountInputSpec(BaseInterfaceInputSpec): + in_files = InputMultiPath(File(exists=True), mandatory=True, + desc='input file, generally a list of z-stat maps') + threshold = traits.Float( + mandatory=True, desc='binarization threshold. E.g. a threshold of 1.65 ' + 'corresponds to a two-sided Z-test of p<.10') + + +class ActivationCountOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output activation count map') + acm_pos = File(exists=True, desc='positive activation count map') + acm_neg = File(exists=True, desc='negative activation count map') + + +class ActivationCount(SimpleInterface): + """ + Calculate a simple Activation Count Maps + + Adapted from: https://github.com/poldracklab/CNP_task_analysis/\ + blob/61c27f5992db9d8800884f8ffceb73e6957db8af/CNP_2nd_level_ACM.py + """ + input_spec = ActivationCountInputSpec + output_spec = ActivationCountOutputSpec + + def _run_interface(self, runtime): + allmaps = nb.concat_images(self.inputs.in_files).get_data() + acm_pos = np.mean(allmaps > self.inputs.threshold, + axis=3, dtype=np.float32) + acm_neg = np.mean(allmaps < -1.0 * self.inputs.threshold, + axis=3, dtype=np.float32) + acm_diff = acm_pos - acm_neg + + template_fname = self.inputs.in_files[0] + ext = split_filename(template_fname)[2] + fname_fmt = os.path.join(runtime.cwd, 'acm_{}' + ext).format + + self._results['out_file'] = fname_fmt('diff') + self._results['acm_pos'] = fname_fmt('pos') + self._results['acm_neg'] = fname_fmt('neg') + + img = nb.load(template_fname) + img.__class__(acm_diff, img.affine, img.header).to_filename( + self._results['out_file']) + img.__class__(acm_pos, img.affine, img.header).to_filename( + self._results['acm_pos']) + img.__class__(acm_neg, img.affine, img.header).to_filename( + self._results['acm_neg']) + + return runtime diff --git a/nipype/algorithms/tests/__init__.py b/nipype/algorithms/tests/__init__.py new file mode 100644 index 0000000000..99fb243f19 --- /dev/null +++ b/nipype/algorithms/tests/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/algorithms/tests/test_CompCor.py b/nipype/algorithms/tests/test_CompCor.py new file mode 100644 index 0000000000..488ad3c960 --- /dev/null +++ b/nipype/algorithms/tests/test_CompCor.py @@ -0,0 +1,195 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import os + +import nibabel as nb +import numpy as np + +import pytest +from ...testing import utils +from ..confounds import CompCor, TCompCor, ACompCor + + +class TestCompCor(): + ''' Note: Tests currently do a poor job of testing functionality ''' + + filenames = { + 'functionalnii': 'compcorfunc.nii', + 'masknii': 'compcormask.nii', + 'masknii2': 'compcormask2.nii', + 'components_file': None + } + + @pytest.fixture(autouse=True) + def setup_class(self, tmpdir): + # setup + tmpdir.chdir() + noise = np.fromfunction(self.fake_noise_fun, self.fake_data.shape) + self.realigned_file = utils.save_toy_nii( + self.fake_data + noise, self.filenames['functionalnii']) + mask = np.ones(self.fake_data.shape[:3]) + mask[0, 0, 0] = 0 + mask[0, 0, 1] = 0 + mask1 = utils.save_toy_nii(mask, self.filenames['masknii']) + + other_mask = np.ones(self.fake_data.shape[:3]) + other_mask[0, 1, 0] = 0 + other_mask[1, 1, 0] = 0 + mask2 = utils.save_toy_nii(other_mask, self.filenames['masknii2']) + + self.mask_files = [mask1, mask2] + + def test_compcor(self): + expected_components = [['-0.1989607212', '-0.5753813646'], [ + '0.5692369697', '0.5674945949' + ], ['-0.6662573243', + '0.4675843432'], ['0.4206466244', '-0.3361270124'], + ['-0.1246655485', '-0.1235705610']] + + self.run_cc( + CompCor( + realigned_file=self.realigned_file, + mask_files=self.mask_files, + mask_index=0), expected_components) + + self.run_cc( + ACompCor( + realigned_file=self.realigned_file, + mask_files=self.mask_files, + mask_index=0, + components_file='acc_components_file'), expected_components, + 'aCompCor') + + def test_tcompcor(self): + ccinterface = TCompCor( + realigned_file=self.realigned_file, percentile_threshold=0.75) + self.run_cc(ccinterface, [['-0.1114536190', '-0.4632908609'], [ + '0.4566907310', '0.6983205193' + ], ['-0.7132557407', '0.1340170559'], [ + '0.5022537643', '-0.5098322262' + ], ['-0.1342351356', '0.1407855119']], 'tCompCor') + + def test_tcompcor_no_percentile(self): + ccinterface = TCompCor(realigned_file=self.realigned_file) + ccinterface.run() + + mask = nb.load('mask_000.nii.gz').get_data() + num_nonmasked_voxels = np.count_nonzero(mask) + assert num_nonmasked_voxels == 1 + + def test_compcor_no_regress_poly(self): + self.run_cc( + CompCor( + realigned_file=self.realigned_file, + mask_files=self.mask_files, + mask_index=0, + pre_filter=False), [['0.4451946442', '-0.7683311482'], [ + '-0.4285129505', '-0.0926034137' + ], ['0.5721540256', '0.5608764842'], [ + '-0.5367548139', '0.0059943226' + ], ['-0.0520809054', '0.2940637551']]) + + def test_tcompcor_asymmetric_dim(self): + asymmetric_shape = (2, 3, 4, 5) + asymmetric_data = utils.save_toy_nii( + np.zeros(asymmetric_shape), 'asymmetric.nii') + + TCompCor(realigned_file=asymmetric_data).run() + assert nb.load( + 'mask_000.nii.gz').get_data().shape == asymmetric_shape[:3] + + def test_compcor_bad_input_shapes(self): + # dim 0 is < dim 0 of self.mask_files (2) + shape_less_than = (1, 2, 2, 5) + # dim 0 is > dim 0 of self.mask_files (2) + shape_more_than = (3, 3, 3, 5) + + for data_shape in (shape_less_than, shape_more_than): + data_file = utils.save_toy_nii(np.zeros(data_shape), 'temp.nii') + interface = CompCor( + realigned_file=data_file, mask_files=self.mask_files[0]) + with pytest.raises(ValueError, message="Dimension mismatch"): + interface.run() + + def test_tcompcor_bad_input_dim(self): + bad_dims = (2, 2, 2) + data_file = utils.save_toy_nii(np.zeros(bad_dims), 'temp.nii') + interface = TCompCor(realigned_file=data_file) + with pytest.raises(ValueError, message='Not a 4D file'): + interface.run() + + def test_tcompcor_merge_intersect_masks(self): + for method in ['union', 'intersect']: + TCompCor( + realigned_file=self.realigned_file, + mask_files=self.mask_files, + merge_method=method).run() + if method == 'union': + assert np.array_equal( + nb.load('mask_000.nii.gz').get_data(), + ([[[0, 0], [0, 0]], [[0, 0], [1, 0]]])) + if method == 'intersect': + assert np.array_equal( + nb.load('mask_000.nii.gz').get_data(), + ([[[0, 0], [0, 0]], [[0, 1], [0, 0]]])) + + def test_tcompcor_index_mask(self): + TCompCor( + realigned_file=self.realigned_file, + mask_files=self.mask_files, + mask_index=1).run() + assert np.array_equal( + nb.load('mask_000.nii.gz').get_data(), + ([[[0, 0], [0, 0]], [[0, 1], [0, 0]]])) + + def test_tcompcor_multi_mask_no_index(self): + interface = TCompCor( + realigned_file=self.realigned_file, mask_files=self.mask_files) + with pytest.raises(ValueError, message='more than one mask file'): + interface.run() + + def run_cc(self, + ccinterface, + expected_components, + expected_header='CompCor'): + # run + ccresult = ccinterface.run() + + # assert + expected_file = ccinterface._list_outputs()['components_file'] + assert ccresult.outputs.components_file == expected_file + assert os.path.exists(expected_file) + assert os.path.getsize(expected_file) > 0 + assert ccinterface.inputs.num_components == 6 + + with open(ccresult.outputs.components_file, 'r') as components_file: + expected_n_components = min(ccinterface.inputs.num_components, + self.fake_data.shape[3]) + + components_data = [line.split('\t') for line in components_file] + + # the first item will be '#', we can throw it out + header = components_data.pop(0) + expected_header = [ + expected_header + '{:02d}'.format(i) + for i in range(expected_n_components) + ] + for i, heading in enumerate(header): + assert expected_header[i] in heading + + num_got_timepoints = len(components_data) + assert num_got_timepoints == self.fake_data.shape[3] + for index, timepoint in enumerate(components_data): + assert (len(timepoint) == ccinterface.inputs.num_components + or len(timepoint) == self.fake_data.shape[3]) + assert timepoint[:2] == expected_components[index] + return ccresult + + @staticmethod + def fake_noise_fun(i, j, l, m): + return m * i + l - j + + fake_data = np.array([[[[8, 5, 3, 8, 0], [6, 7, 4, 7, 1]], + [[7, 9, 1, 6, 5], [0, 7, 4, 7, 7]]], + [[[2, 4, 5, 7, 0], [1, 7, 0, 5, 4]], + [[7, 3, 9, 0, 4], [9, 4, 1, 5, 0]]]]) diff --git a/nipype/algorithms/tests/test_ErrorMap.py b/nipype/algorithms/tests/test_ErrorMap.py new file mode 100644 index 0000000000..cfd30b0b74 --- /dev/null +++ b/nipype/algorithms/tests/test_ErrorMap.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import pytest +from nipype.testing import example_data +from nipype.algorithms.metrics import ErrorMap +import nibabel as nb +import numpy as np +import os + + +def test_errormap(tmpdir): + + # Single-Spectual + # Make two fake 2*2*2 voxel volumes + # John von Neumann's birthday + volume1 = np.array([[[2.0, 8.0], [1.0, 2.0]], [[1.0, 9.0], [0.0, 3.0]]]) + # Alan Turing's birthday + volume2 = np.array([[[0.0, 7.0], [2.0, 3.0]], [[1.0, 9.0], [1.0, 2.0]]]) + mask = np.array([[[1, 0], [0, 1]], [[1, 0], [0, 1]]]) + + img1 = nb.Nifti1Image(volume1, np.eye(4)) + img2 = nb.Nifti1Image(volume2, np.eye(4)) + maskimg = nb.Nifti1Image(mask, np.eye(4)) + + nb.save(img1, tmpdir.join('von.nii.gz').strpath) + nb.save(img2, tmpdir.join('alan.nii.gz').strpath) + nb.save(maskimg, tmpdir.join('mask.nii.gz').strpath) + + # Default metric + errmap = ErrorMap() + errmap.inputs.in_tst = tmpdir.join('von.nii.gz').strpath + errmap.inputs.in_ref = tmpdir.join('alan.nii.gz').strpath + errmap.out_map = tmpdir.join('out_map.nii.gz').strpath + result = errmap.run() + assert result.outputs.distance == 1.125 + + # Square metric + errmap.inputs.metric = 'sqeuclidean' + result = errmap.run() + assert result.outputs.distance == 1.125 + + # Linear metric + errmap.inputs.metric = 'euclidean' + result = errmap.run() + assert result.outputs.distance == 0.875 + + # Masked + errmap.inputs.mask = tmpdir.join('mask.nii.gz').strpath + result = errmap.run() + assert result.outputs.distance == 1.0 + + # Multi-Spectual + # Raymond Vahan Damadian's birthday + volume3 = np.array([[[1.0, 6.0], [0.0, 3.0]], [[1.0, 9.0], [3.0, 6.0]]]) + + msvolume1 = np.zeros(shape=(2, 2, 2, 2)) + msvolume1[:, :, :, 0] = volume1 + msvolume1[:, :, :, 1] = volume3 + msimg1 = nb.Nifti1Image(msvolume1, np.eye(4)) + + msvolume2 = np.zeros(shape=(2, 2, 2, 2)) + msvolume2[:, :, :, 0] = volume3 + msvolume2[:, :, :, 1] = volume1 + msimg2 = nb.Nifti1Image(msvolume2, np.eye(4)) + + nb.save(msimg1, tmpdir.join('von-ray.nii.gz').strpath) + nb.save(msimg2, tmpdir.join('alan-ray.nii.gz').strpath) + + errmap.inputs.in_tst = tmpdir.join('von-ray.nii.gz').strpath + errmap.inputs.in_ref = tmpdir.join('alan-ray.nii.gz').strpath + errmap.inputs.metric = 'sqeuclidean' + result = errmap.run() + assert result.outputs.distance == 5.5 + + errmap.inputs.metric = 'euclidean' + result = errmap.run() + assert result.outputs.distance == np.float32(1.25 * (2**0.5)) diff --git a/nipype/algorithms/tests/test_Overlap.py b/nipype/algorithms/tests/test_Overlap.py new file mode 100644 index 0000000000..786a7328b8 --- /dev/null +++ b/nipype/algorithms/tests/test_Overlap.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import os + +from nipype.testing import (example_data) + +import numpy as np + + +def test_overlap(tmpdir): + from nipype.algorithms.metrics import Overlap + + def check_close(val1, val2): + import numpy.testing as npt + return npt.assert_almost_equal(val1, val2, decimal=3) + + in1 = example_data('segmentation0.nii.gz') + in2 = example_data('segmentation1.nii.gz') + + tmpdir.chdir() + overlap = Overlap() + overlap.inputs.volume1 = in1 + overlap.inputs.volume2 = in1 + res = overlap.run() + check_close(res.outputs.jaccard, 1.0) + + overlap = Overlap() + overlap.inputs.volume1 = in1 + overlap.inputs.volume2 = in2 + res = overlap.run() + check_close(res.outputs.jaccard, 0.99705) + + overlap = Overlap() + overlap.inputs.volume1 = in1 + overlap.inputs.volume2 = in2 + overlap.inputs.vol_units = 'mm' + res = overlap.run() + check_close(res.outputs.jaccard, 0.99705) + check_close(res.outputs.roi_voldiff, np.array([0.0063086, -0.0025506, + 0.0])) diff --git a/nipype/algorithms/tests/test_TSNR.py b/nipype/algorithms/tests/test_TSNR.py new file mode 100644 index 0000000000..1d192ec056 --- /dev/null +++ b/nipype/algorithms/tests/test_TSNR.py @@ -0,0 +1,130 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from ...testing import utils +from ..confounds import TSNR +from .. import misc + +import pytest +import numpy.testing as npt +import mock +import nibabel as nb +import numpy as np +import os + + +class TestTSNR(): + ''' Note: Tests currently do a poor job of testing functionality ''' + + in_filenames = { + 'in_file': 'tsnrinfile.nii', + } + + out_filenames = { # default output file names + 'detrended_file': 'detrend.nii.gz', + 'mean_file': 'mean.nii.gz', + 'stddev_file': 'stdev.nii.gz', + 'tsnr_file': 'tsnr.nii.gz' + } + + @pytest.fixture(autouse=True) + def setup_class(self, tmpdir): + # setup temp folder + tmpdir.chdir() + + utils.save_toy_nii(self.fake_data, self.in_filenames['in_file']) + + def test_tsnr(self): + # run + tsnrresult = TSNR(in_file=self.in_filenames['in_file']).run() + + # assert + self.assert_expected_outputs( + tsnrresult, { + 'mean_file': (2.8, 7.4), + 'stddev_file': (0.8, 2.9), + 'tsnr_file': (1.3, 9.25) + }) + + def test_tsnr_withpoly1(self): + # run + tsnrresult = TSNR( + in_file=self.in_filenames['in_file'], regress_poly=1).run() + + # assert + self.assert_expected_outputs_poly( + tsnrresult, { + 'detrended_file': (-0.1, 8.7), + 'mean_file': (2.8, 7.4), + 'stddev_file': (0.75, 2.75), + 'tsnr_file': (1.4, 9.9) + }) + + def test_tsnr_withpoly2(self): + # run + tsnrresult = TSNR( + in_file=self.in_filenames['in_file'], regress_poly=2).run() + + # assert + self.assert_expected_outputs_poly( + tsnrresult, { + 'detrended_file': (-0.22, 8.55), + 'mean_file': (2.8, 7.7), + 'stddev_file': (0.21, 2.4), + 'tsnr_file': (1.7, 35.9) + }) + + def test_tsnr_withpoly3(self): + # run + tsnrresult = TSNR( + in_file=self.in_filenames['in_file'], regress_poly=3).run() + + # assert + self.assert_expected_outputs_poly( + tsnrresult, { + 'detrended_file': (1.8, 7.95), + 'mean_file': (2.8, 7.7), + 'stddev_file': (0.1, 1.7), + 'tsnr_file': (2.6, 57.3) + }) + + @mock.patch('warnings.warn') + def test_warning(self, mock_warn): + ''' test that usage of misc.TSNR trips a warning to use + confounds.TSNR instead ''' + # run + misc.TSNR(in_file=self.in_filenames['in_file']) + + # assert + assert True in [ + args[0].count('confounds') > 0 + for _, args, _ in mock_warn.mock_calls + ] + + def assert_expected_outputs_poly(self, tsnrresult, expected_ranges): + assert os.path.basename(tsnrresult.outputs.detrended_file) == \ + self.out_filenames['detrended_file'] + self.assert_expected_outputs(tsnrresult, expected_ranges) + + def assert_expected_outputs(self, tsnrresult, expected_ranges): + self.assert_default_outputs(tsnrresult.outputs) + self.assert_unchanged(expected_ranges) + + def assert_default_outputs(self, outputs): + assert os.path.basename(outputs.mean_file) == \ + self.out_filenames['mean_file'] + assert os.path.basename(outputs.stddev_file) == \ + self.out_filenames['stddev_file'] + assert os.path.basename(outputs.tsnr_file) == \ + self.out_filenames['tsnr_file'] + + def assert_unchanged(self, expected_ranges): + for key, (min_, max_) in expected_ranges.items(): + data = np.asarray(nb.load(self.out_filenames[key]).dataobj) + npt.assert_almost_equal(np.amin(data), min_, decimal=1) + npt.assert_almost_equal(np.amax(data), max_, decimal=1) + + fake_data = np.array([[[[2, 4, 3, 9, 1], [3, 6, 4, 7, 4]], + [[8, 3, 4, 6, 2], [4, 0, 4, 4, 2]]], + [[[9, 7, 5, 5, 7], [7, 8, 4, 8, 4]], + [[0, 4, 7, 1, 7], [6, 8, 8, 8, 7]]]]) diff --git a/nipype/algorithms/tests/test_auto_ACompCor.py b/nipype/algorithms/tests/test_auto_ACompCor.py new file mode 100644 index 0000000000..eadbf3e126 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_ACompCor.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..confounds import ACompCor + + +def test_ACompCor_inputs(): + input_map = dict( + components_file=dict(usedefault=True, ), + header_prefix=dict(), + high_pass_cutoff=dict(usedefault=True, ), + ignore_initial_volumes=dict(usedefault=True, ), + mask_files=dict(), + mask_index=dict( + requires=['mask_files'], + xor=['merge_method'], + ), + merge_method=dict( + requires=['mask_files'], + xor=['mask_index'], + ), + num_components=dict(usedefault=True, ), + pre_filter=dict(usedefault=True, ), + realigned_file=dict(mandatory=True, ), + regress_poly_degree=dict(usedefault=True, ), + repetition_time=dict(), + save_pre_filter=dict(), + use_regress_poly=dict( + deprecated='0.15.0', + new_name='pre_filter', + ), + ) + inputs = ACompCor.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ACompCor_outputs(): + output_map = dict( + components_file=dict(), + pre_filter_file=dict(), + ) + outputs = ACompCor.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_ActivationCount.py b/nipype/algorithms/tests/test_auto_ActivationCount.py new file mode 100644 index 0000000000..d6e3ff7165 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_ActivationCount.py @@ -0,0 +1,26 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..stats import ActivationCount + + +def test_ActivationCount_inputs(): + input_map = dict( + in_files=dict(mandatory=True, ), + threshold=dict(mandatory=True, ), + ) + inputs = ActivationCount.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ActivationCount_outputs(): + output_map = dict( + acm_neg=dict(), + acm_pos=dict(), + out_file=dict(), + ) + outputs = ActivationCount.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_AddCSVColumn.py b/nipype/algorithms/tests/test_auto_AddCSVColumn.py new file mode 100644 index 0000000000..feedcf46e8 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_AddCSVColumn.py @@ -0,0 +1,24 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import AddCSVColumn + + +def test_AddCSVColumn_inputs(): + input_map = dict( + extra_column_heading=dict(), + extra_field=dict(), + in_file=dict(mandatory=True, ), + out_file=dict(usedefault=True, ), + ) + inputs = AddCSVColumn.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AddCSVColumn_outputs(): + output_map = dict(csv_file=dict(), ) + outputs = AddCSVColumn.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_AddCSVRow.py b/nipype/algorithms/tests/test_auto_AddCSVRow.py new file mode 100644 index 0000000000..4666a147d2 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_AddCSVRow.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import AddCSVRow + + +def test_AddCSVRow_inputs(): + input_map = dict( + _outputs=dict(usedefault=True, ), + in_file=dict(mandatory=True, ), + ) + inputs = AddCSVRow.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AddCSVRow_outputs(): + output_map = dict(csv_file=dict(), ) + outputs = AddCSVRow.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_AddNoise.py b/nipype/algorithms/tests/test_auto_AddNoise.py new file mode 100644 index 0000000000..6e0655a93e --- /dev/null +++ b/nipype/algorithms/tests/test_auto_AddNoise.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import AddNoise + + +def test_AddNoise_inputs(): + input_map = dict( + bg_dist=dict( + mandatory=True, + usedefault=True, + ), + dist=dict( + mandatory=True, + usedefault=True, + ), + in_file=dict(mandatory=True, ), + in_mask=dict(), + out_file=dict(), + snr=dict(usedefault=True, ), + ) + inputs = AddNoise.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AddNoise_outputs(): + output_map = dict(out_file=dict(), ) + outputs = AddNoise.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_ArtifactDetect.py b/nipype/algorithms/tests/test_auto_ArtifactDetect.py new file mode 100644 index 0000000000..85c57b8823 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_ArtifactDetect.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..rapidart import ArtifactDetect + + +def test_ArtifactDetect_inputs(): + input_map = dict( + bound_by_brainmask=dict(usedefault=True, ), + global_threshold=dict(usedefault=True, ), + intersect_mask=dict(usedefault=True, ), + mask_file=dict(), + mask_threshold=dict(), + mask_type=dict(mandatory=True, ), + norm_threshold=dict( + mandatory=True, + xor=['rotation_threshold', 'translation_threshold'], + ), + parameter_source=dict(mandatory=True, ), + plot_type=dict(usedefault=True, ), + realigned_files=dict(mandatory=True, ), + realignment_parameters=dict(mandatory=True, ), + rotation_threshold=dict( + mandatory=True, + xor=['norm_threshold'], + ), + save_plot=dict(usedefault=True, ), + translation_threshold=dict( + mandatory=True, + xor=['norm_threshold'], + ), + use_differences=dict( + maxlen=2, + minlen=2, + usedefault=True, + ), + use_norm=dict( + requires=['norm_threshold'], + usedefault=True, + ), + zintensity_threshold=dict(mandatory=True, ), + ) + inputs = ArtifactDetect.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ArtifactDetect_outputs(): + output_map = dict( + displacement_files=dict(), + intensity_files=dict(), + mask_files=dict(), + norm_files=dict(), + outlier_files=dict(), + plot_files=dict(), + statistic_files=dict(), + ) + outputs = ArtifactDetect.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_CalculateMedian.py b/nipype/algorithms/tests/test_auto_CalculateMedian.py new file mode 100644 index 0000000000..1f9aa6cd4d --- /dev/null +++ b/nipype/algorithms/tests/test_auto_CalculateMedian.py @@ -0,0 +1,23 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import CalculateMedian + + +def test_CalculateMedian_inputs(): + input_map = dict( + in_files=dict(), + median_file=dict(), + median_per_file=dict(usedefault=True, ), + ) + inputs = CalculateMedian.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CalculateMedian_outputs(): + output_map = dict(median_files=dict(), ) + outputs = CalculateMedian.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py new file mode 100644 index 0000000000..3dbbd772c8 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_CalculateNormalizedMoments.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import CalculateNormalizedMoments + + +def test_CalculateNormalizedMoments_inputs(): + input_map = dict( + moment=dict(mandatory=True, ), + timeseries_file=dict(mandatory=True, ), + ) + inputs = CalculateNormalizedMoments.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CalculateNormalizedMoments_outputs(): + output_map = dict(moments=dict(), ) + outputs = CalculateNormalizedMoments.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_ComputeDVARS.py b/nipype/algorithms/tests/test_auto_ComputeDVARS.py new file mode 100644 index 0000000000..ca263c77ac --- /dev/null +++ b/nipype/algorithms/tests/test_auto_ComputeDVARS.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..confounds import ComputeDVARS + + +def test_ComputeDVARS_inputs(): + input_map = dict( + figdpi=dict(usedefault=True, ), + figformat=dict(usedefault=True, ), + figsize=dict(usedefault=True, ), + in_file=dict(mandatory=True, ), + in_mask=dict(mandatory=True, ), + intensity_normalization=dict(usedefault=True, ), + remove_zerovariance=dict(usedefault=True, ), + save_all=dict(usedefault=True, ), + save_nstd=dict(usedefault=True, ), + save_plot=dict(usedefault=True, ), + save_std=dict(usedefault=True, ), + save_vxstd=dict(usedefault=True, ), + series_tr=dict(), + ) + inputs = ComputeDVARS.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ComputeDVARS_outputs(): + output_map = dict( + avg_nstd=dict(), + avg_std=dict(), + avg_vxstd=dict(), + fig_nstd=dict(), + fig_std=dict(), + fig_vxstd=dict(), + out_all=dict(), + out_nstd=dict(), + out_std=dict(), + out_vxstd=dict(), + ) + outputs = ComputeDVARS.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py new file mode 100644 index 0000000000..0308653786 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_ComputeMeshWarp.py @@ -0,0 +1,30 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..mesh import ComputeMeshWarp + + +def test_ComputeMeshWarp_inputs(): + input_map = dict( + metric=dict(usedefault=True, ), + out_file=dict(usedefault=True, ), + out_warp=dict(usedefault=True, ), + surface1=dict(mandatory=True, ), + surface2=dict(mandatory=True, ), + weighting=dict(usedefault=True, ), + ) + inputs = ComputeMeshWarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ComputeMeshWarp_outputs(): + output_map = dict( + distance=dict(), + out_file=dict(), + out_warp=dict(), + ) + outputs = ComputeMeshWarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_CreateNifti.py b/nipype/algorithms/tests/test_auto_CreateNifti.py new file mode 100644 index 0000000000..f8bf8a405f --- /dev/null +++ b/nipype/algorithms/tests/test_auto_CreateNifti.py @@ -0,0 +1,23 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import CreateNifti + + +def test_CreateNifti_inputs(): + input_map = dict( + affine=dict(), + data_file=dict(mandatory=True, ), + header_file=dict(mandatory=True, ), + ) + inputs = CreateNifti.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CreateNifti_outputs(): + output_map = dict(nifti_file=dict(), ) + outputs = CreateNifti.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_Distance.py b/nipype/algorithms/tests/test_auto_Distance.py new file mode 100644 index 0000000000..2c5d098d73 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_Distance.py @@ -0,0 +1,29 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import Distance + + +def test_Distance_inputs(): + input_map = dict( + mask_volume=dict(), + method=dict(usedefault=True, ), + volume1=dict(mandatory=True, ), + volume2=dict(mandatory=True, ), + ) + inputs = Distance.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Distance_outputs(): + output_map = dict( + distance=dict(), + histogram=dict(), + point1=dict(), + point2=dict(), + ) + outputs = Distance.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py new file mode 100644 index 0000000000..685dec61e8 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_FramewiseDisplacement.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..confounds import FramewiseDisplacement + + +def test_FramewiseDisplacement_inputs(): + input_map = dict( + figdpi=dict(usedefault=True, ), + figsize=dict(usedefault=True, ), + in_file=dict(mandatory=True, ), + normalize=dict(usedefault=True, ), + out_figure=dict(usedefault=True, ), + out_file=dict(usedefault=True, ), + parameter_source=dict(mandatory=True, ), + radius=dict(usedefault=True, ), + save_plot=dict(usedefault=True, ), + series_tr=dict(), + ) + inputs = FramewiseDisplacement.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FramewiseDisplacement_outputs(): + output_map = dict( + fd_average=dict(), + out_figure=dict(), + out_file=dict(), + ) + outputs = FramewiseDisplacement.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_FuzzyOverlap.py b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py new file mode 100644 index 0000000000..e9e28aaa44 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_FuzzyOverlap.py @@ -0,0 +1,30 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import FuzzyOverlap + + +def test_FuzzyOverlap_inputs(): + input_map = dict( + in_mask=dict(), + in_ref=dict(mandatory=True, ), + in_tst=dict(mandatory=True, ), + out_file=dict(usedefault=True, ), + weighting=dict(usedefault=True, ), + ) + inputs = FuzzyOverlap.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FuzzyOverlap_outputs(): + output_map = dict( + class_fdi=dict(), + class_fji=dict(), + dice=dict(), + jaccard=dict(), + ) + outputs = FuzzyOverlap.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_Gunzip.py b/nipype/algorithms/tests/test_auto_Gunzip.py new file mode 100644 index 0000000000..f12e1f9b45 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_Gunzip.py @@ -0,0 +1,19 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import Gunzip + + +def test_Gunzip_inputs(): + input_map = dict(in_file=dict(mandatory=True, ), ) + inputs = Gunzip.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Gunzip_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Gunzip.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_ICC.py b/nipype/algorithms/tests/test_auto_ICC.py new file mode 100644 index 0000000000..1a4a2b1517 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_ICC.py @@ -0,0 +1,26 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..icc import ICC + + +def test_ICC_inputs(): + input_map = dict( + mask=dict(mandatory=True, ), + subjects_sessions=dict(mandatory=True, ), + ) + inputs = ICC.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ICC_outputs(): + output_map = dict( + icc_map=dict(), + session_var_map=dict(), + subject_var_map=dict(), + ) + outputs = ICC.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_Matlab2CSV.py b/nipype/algorithms/tests/test_auto_Matlab2CSV.py new file mode 100644 index 0000000000..fcc1648bf9 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_Matlab2CSV.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import Matlab2CSV + + +def test_Matlab2CSV_inputs(): + input_map = dict( + in_file=dict(mandatory=True, ), + reshape_matrix=dict(usedefault=True, ), + ) + inputs = Matlab2CSV.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Matlab2CSV_outputs(): + output_map = dict(csv_files=dict(), ) + outputs = Matlab2CSV.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_MergeCSVFiles.py b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py new file mode 100644 index 0000000000..fd882e850a --- /dev/null +++ b/nipype/algorithms/tests/test_auto_MergeCSVFiles.py @@ -0,0 +1,27 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import MergeCSVFiles + + +def test_MergeCSVFiles_inputs(): + input_map = dict( + column_headings=dict(), + extra_column_heading=dict(), + extra_field=dict(), + in_files=dict(mandatory=True, ), + out_file=dict(usedefault=True, ), + row_heading_title=dict(usedefault=True, ), + row_headings=dict(), + ) + inputs = MergeCSVFiles.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MergeCSVFiles_outputs(): + output_map = dict(csv_file=dict(), ) + outputs = MergeCSVFiles.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_MergeROIs.py b/nipype/algorithms/tests/test_auto_MergeROIs.py new file mode 100644 index 0000000000..01b2b097a8 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_MergeROIs.py @@ -0,0 +1,23 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import MergeROIs + + +def test_MergeROIs_inputs(): + input_map = dict( + in_files=dict(), + in_index=dict(), + in_reference=dict(), + ) + inputs = MergeROIs.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MergeROIs_outputs(): + output_map = dict(merged_file=dict(), ) + outputs = MergeROIs.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_MeshWarpMaths.py b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py new file mode 100644 index 0000000000..f89b16017b --- /dev/null +++ b/nipype/algorithms/tests/test_auto_MeshWarpMaths.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..mesh import MeshWarpMaths + + +def test_MeshWarpMaths_inputs(): + input_map = dict( + float_trait=dict(), + in_surf=dict(mandatory=True, ), + operation=dict(usedefault=True, ), + operator=dict( + mandatory=True, + usedefault=True, + ), + out_file=dict(usedefault=True, ), + out_warp=dict(usedefault=True, ), + ) + inputs = MeshWarpMaths.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MeshWarpMaths_outputs(): + output_map = dict( + out_file=dict(), + out_warp=dict(), + ) + outputs = MeshWarpMaths.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_ModifyAffine.py b/nipype/algorithms/tests/test_auto_ModifyAffine.py new file mode 100644 index 0000000000..a0c4150a98 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_ModifyAffine.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import ModifyAffine + + +def test_ModifyAffine_inputs(): + input_map = dict( + transformation_matrix=dict(usedefault=True, ), + volumes=dict(mandatory=True, ), + ) + inputs = ModifyAffine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ModifyAffine_outputs(): + output_map = dict(transformed_volumes=dict(), ) + outputs = ModifyAffine.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py new file mode 100644 index 0000000000..b86fe3df03 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_NonSteadyStateDetector.py @@ -0,0 +1,19 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..confounds import NonSteadyStateDetector + + +def test_NonSteadyStateDetector_inputs(): + input_map = dict(in_file=dict(mandatory=True, ), ) + inputs = NonSteadyStateDetector.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_NonSteadyStateDetector_outputs(): + output_map = dict(n_volumes_to_discard=dict(), ) + outputs = NonSteadyStateDetector.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py new file mode 100644 index 0000000000..4c9a5584d0 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_NormalizeProbabilityMapSet.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import NormalizeProbabilityMapSet + + +def test_NormalizeProbabilityMapSet_inputs(): + input_map = dict( + in_files=dict(), + in_mask=dict(), + ) + inputs = NormalizeProbabilityMapSet.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_NormalizeProbabilityMapSet_outputs(): + output_map = dict(out_files=dict(), ) + outputs = NormalizeProbabilityMapSet.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_P2PDistance.py b/nipype/algorithms/tests/test_auto_P2PDistance.py new file mode 100644 index 0000000000..9948e3675e --- /dev/null +++ b/nipype/algorithms/tests/test_auto_P2PDistance.py @@ -0,0 +1,30 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..mesh import P2PDistance + + +def test_P2PDistance_inputs(): + input_map = dict( + metric=dict(usedefault=True, ), + out_file=dict(usedefault=True, ), + out_warp=dict(usedefault=True, ), + surface1=dict(mandatory=True, ), + surface2=dict(mandatory=True, ), + weighting=dict(usedefault=True, ), + ) + inputs = P2PDistance.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_P2PDistance_outputs(): + output_map = dict( + distance=dict(), + out_file=dict(), + out_warp=dict(), + ) + outputs = P2PDistance.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_PickAtlas.py b/nipype/algorithms/tests/test_auto_PickAtlas.py new file mode 100644 index 0000000000..3b15c302e5 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_PickAtlas.py @@ -0,0 +1,25 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import PickAtlas + + +def test_PickAtlas_inputs(): + input_map = dict( + atlas=dict(mandatory=True, ), + dilation_size=dict(usedefault=True, ), + hemi=dict(usedefault=True, ), + labels=dict(mandatory=True, ), + output_file=dict(), + ) + inputs = PickAtlas.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PickAtlas_outputs(): + output_map = dict(mask_file=dict(), ) + outputs = PickAtlas.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_Similarity.py b/nipype/algorithms/tests/test_auto_Similarity.py new file mode 100644 index 0000000000..b2ad79b5eb --- /dev/null +++ b/nipype/algorithms/tests/test_auto_Similarity.py @@ -0,0 +1,25 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..metrics import Similarity + + +def test_Similarity_inputs(): + input_map = dict( + mask1=dict(), + mask2=dict(), + metric=dict(usedefault=True, ), + volume1=dict(mandatory=True, ), + volume2=dict(mandatory=True, ), + ) + inputs = Similarity.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Similarity_outputs(): + output_map = dict(similarity=dict(), ) + outputs = Similarity.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_SimpleThreshold.py b/nipype/algorithms/tests/test_auto_SimpleThreshold.py new file mode 100644 index 0000000000..4e34d86799 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_SimpleThreshold.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import SimpleThreshold + + +def test_SimpleThreshold_inputs(): + input_map = dict( + threshold=dict(mandatory=True, ), + volumes=dict(mandatory=True, ), + ) + inputs = SimpleThreshold.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SimpleThreshold_outputs(): + output_map = dict(thresholded_volumes=dict(), ) + outputs = SimpleThreshold.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_SpecifyModel.py b/nipype/algorithms/tests/test_auto_SpecifyModel.py new file mode 100644 index 0000000000..452a048764 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_SpecifyModel.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..modelgen import SpecifyModel + + +def test_SpecifyModel_inputs(): + input_map = dict( + event_files=dict( + mandatory=True, + xor=['subject_info', 'event_files'], + ), + functional_runs=dict( + copyfile=False, + mandatory=True, + ), + high_pass_filter_cutoff=dict(mandatory=True, ), + input_units=dict(mandatory=True, ), + outlier_files=dict(copyfile=False, ), + parameter_source=dict(usedefault=True, ), + realignment_parameters=dict(copyfile=False, ), + subject_info=dict( + mandatory=True, + xor=['subject_info', 'event_files'], + ), + time_repetition=dict(mandatory=True, ), + ) + inputs = SpecifyModel.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SpecifyModel_outputs(): + output_map = dict(session_info=dict(), ) + outputs = SpecifyModel.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_SpecifySPMModel.py b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py new file mode 100644 index 0000000000..1f3ec7058d --- /dev/null +++ b/nipype/algorithms/tests/test_auto_SpecifySPMModel.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..modelgen import SpecifySPMModel + + +def test_SpecifySPMModel_inputs(): + input_map = dict( + concatenate_runs=dict(usedefault=True, ), + event_files=dict( + mandatory=True, + xor=['subject_info', 'event_files'], + ), + functional_runs=dict( + copyfile=False, + mandatory=True, + ), + high_pass_filter_cutoff=dict(mandatory=True, ), + input_units=dict(mandatory=True, ), + outlier_files=dict(copyfile=False, ), + output_units=dict(usedefault=True, ), + parameter_source=dict(usedefault=True, ), + realignment_parameters=dict(copyfile=False, ), + subject_info=dict( + mandatory=True, + xor=['subject_info', 'event_files'], + ), + time_repetition=dict(mandatory=True, ), + ) + inputs = SpecifySPMModel.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SpecifySPMModel_outputs(): + output_map = dict(session_info=dict(), ) + outputs = SpecifySPMModel.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_SpecifySparseModel.py b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py new file mode 100644 index 0000000000..93fc035fc2 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_SpecifySparseModel.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..modelgen import SpecifySparseModel + + +def test_SpecifySparseModel_inputs(): + input_map = dict( + event_files=dict( + mandatory=True, + xor=['subject_info', 'event_files'], + ), + functional_runs=dict( + copyfile=False, + mandatory=True, + ), + high_pass_filter_cutoff=dict(mandatory=True, ), + input_units=dict(mandatory=True, ), + model_hrf=dict(), + outlier_files=dict(copyfile=False, ), + parameter_source=dict(usedefault=True, ), + realignment_parameters=dict(copyfile=False, ), + save_plot=dict(), + scale_regressors=dict(usedefault=True, ), + scan_onset=dict(usedefault=True, ), + stimuli_as_impulses=dict(usedefault=True, ), + subject_info=dict( + mandatory=True, + xor=['subject_info', 'event_files'], + ), + time_acquisition=dict(mandatory=True, ), + time_repetition=dict(mandatory=True, ), + use_temporal_deriv=dict(requires=['model_hrf'], ), + volumes_in_cluster=dict(usedefault=True, ), + ) + inputs = SpecifySparseModel.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SpecifySparseModel_outputs(): + output_map = dict( + session_info=dict(), + sparse_png_file=dict(), + sparse_svg_file=dict(), + ) + outputs = SpecifySparseModel.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_SplitROIs.py b/nipype/algorithms/tests/test_auto_SplitROIs.py new file mode 100644 index 0000000000..963926666d --- /dev/null +++ b/nipype/algorithms/tests/test_auto_SplitROIs.py @@ -0,0 +1,27 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..misc import SplitROIs + + +def test_SplitROIs_inputs(): + input_map = dict( + in_file=dict(mandatory=True, ), + in_mask=dict(), + roi_size=dict(), + ) + inputs = SplitROIs.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SplitROIs_outputs(): + output_map = dict( + out_files=dict(), + out_index=dict(), + out_masks=dict(), + ) + outputs = SplitROIs.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_StimulusCorrelation.py b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py new file mode 100644 index 0000000000..8c7ef276d9 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_StimulusCorrelation.py @@ -0,0 +1,24 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..rapidart import StimulusCorrelation + + +def test_StimulusCorrelation_inputs(): + input_map = dict( + concatenated_design=dict(mandatory=True, ), + intensity_values=dict(mandatory=True, ), + realignment_parameters=dict(mandatory=True, ), + spm_mat_file=dict(mandatory=True, ), + ) + inputs = StimulusCorrelation.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_StimulusCorrelation_outputs(): + output_map = dict(stimcorr_files=dict(), ) + outputs = StimulusCorrelation.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_TCompCor.py b/nipype/algorithms/tests/test_auto_TCompCor.py new file mode 100644 index 0000000000..44b01b2972 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_TCompCor.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..confounds import TCompCor + + +def test_TCompCor_inputs(): + input_map = dict( + components_file=dict(usedefault=True, ), + header_prefix=dict(), + high_pass_cutoff=dict(usedefault=True, ), + ignore_initial_volumes=dict(usedefault=True, ), + mask_files=dict(), + mask_index=dict( + requires=['mask_files'], + xor=['merge_method'], + ), + merge_method=dict( + requires=['mask_files'], + xor=['mask_index'], + ), + num_components=dict(usedefault=True, ), + percentile_threshold=dict(usedefault=True, ), + pre_filter=dict(usedefault=True, ), + realigned_file=dict(mandatory=True, ), + regress_poly_degree=dict(usedefault=True, ), + repetition_time=dict(), + save_pre_filter=dict(), + use_regress_poly=dict( + deprecated='0.15.0', + new_name='pre_filter', + ), + ) + inputs = TCompCor.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TCompCor_outputs(): + output_map = dict( + components_file=dict(), + high_variance_masks=dict(), + pre_filter_file=dict(), + ) + outputs = TCompCor.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_TVTKBaseInterface.py b/nipype/algorithms/tests/test_auto_TVTKBaseInterface.py new file mode 100644 index 0000000000..9fc2d17aba --- /dev/null +++ b/nipype/algorithms/tests/test_auto_TVTKBaseInterface.py @@ -0,0 +1,12 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..mesh import TVTKBaseInterface + + +def test_TVTKBaseInterface_inputs(): + input_map = dict() + inputs = TVTKBaseInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_auto_WarpPoints.py b/nipype/algorithms/tests/test_auto_WarpPoints.py new file mode 100644 index 0000000000..b6965065a2 --- /dev/null +++ b/nipype/algorithms/tests/test_auto_WarpPoints.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..mesh import WarpPoints + + +def test_WarpPoints_inputs(): + input_map = dict( + interp=dict( + mandatory=True, + usedefault=True, + ), + out_points=dict( + keep_extension=True, + name_source='points', + name_template='%s_warped', + output_name='out_points', + ), + points=dict(mandatory=True, ), + warp=dict(mandatory=True, ), + ) + inputs = WarpPoints.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_WarpPoints_outputs(): + output_map = dict(out_points=dict(), ) + outputs = WarpPoints.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/algorithms/tests/test_confounds.py b/nipype/algorithms/tests/test_confounds.py new file mode 100644 index 0000000000..2c601374ab --- /dev/null +++ b/nipype/algorithms/tests/test_confounds.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +import os + +from io import open + +import pytest +from nipype.testing import example_data +from nipype.algorithms.confounds import FramewiseDisplacement, ComputeDVARS, \ + is_outlier +import numpy as np + +nonitime = True +try: + import nitime + nonitime = False +except ImportError: + pass + + +def test_fd(tmpdir): + tempdir = tmpdir.strpath + ground_truth = np.loadtxt(example_data('fsl_motion_outliers_fd.txt')) + fdisplacement = FramewiseDisplacement( + in_file=example_data('fsl_mcflirt_movpar.txt'), + out_file=tempdir + '/fd.txt', + parameter_source="FSL") + res = fdisplacement.run() + + with open(res.outputs.out_file) as all_lines: + for line in all_lines: + assert 'FramewiseDisplacement' in line + break + + assert np.allclose( + ground_truth, np.loadtxt(res.outputs.out_file, skiprows=1), atol=.16) + assert np.abs(ground_truth.mean() - res.outputs.fd_average) < 1e-2 + + +@pytest.mark.skipif(nonitime, reason="nitime is not installed") +def test_dvars(tmpdir): + ground_truth = np.loadtxt(example_data('ds003_sub-01_mc.DVARS')) + dvars = ComputeDVARS( + in_file=example_data('ds003_sub-01_mc.nii.gz'), + in_mask=example_data('ds003_sub-01_mc_brainmask.nii.gz'), + save_all=True, + intensity_normalization=0) + tmpdir.chdir() + res = dvars.run() + + dv1 = np.loadtxt(res.outputs.out_all, skiprows=1) + assert (np.abs(dv1[:, 0] - ground_truth[:, 0]).sum() / len(dv1)) < 0.05 + + assert (np.abs(dv1[:, 1] - ground_truth[:, 1]).sum() / len(dv1)) < 0.05 + + assert (np.abs(dv1[:, 2] - ground_truth[:, 2]).sum() / len(dv1)) < 0.05 + + dvars = ComputeDVARS( + in_file=example_data('ds003_sub-01_mc.nii.gz'), + in_mask=example_data('ds003_sub-01_mc_brainmask.nii.gz'), + save_all=True) + res = dvars.run() + + dv1 = np.loadtxt(res.outputs.out_all, skiprows=1) + assert (np.abs(dv1[:, 0] - ground_truth[:, 0]).sum() / len(dv1)) < 0.05 + + assert (np.abs(dv1[:, 1] - ground_truth[:, 1]).sum() / len(dv1)) > 0.05 + + assert (np.abs(dv1[:, 2] - ground_truth[:, 2]).sum() / len(dv1)) < 0.05 + + +def test_outliers(): + np.random.seed(0) + in_data = np.random.randn(100) + in_data[0] += 10 + + assert is_outlier(in_data) == 1 diff --git a/nipype/algorithms/tests/test_icc_anova.py b/nipype/algorithms/tests/test_icc_anova.py new file mode 100644 index 0000000000..ef19b7f410 --- /dev/null +++ b/nipype/algorithms/tests/test_icc_anova.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +from __future__ import division +import numpy as np +from nipype.algorithms.icc import ICC_rep_anova + + +def test_ICC_rep_anova(): + # see table 2 in P. E. Shrout & Joseph L. Fleiss (1979). "Intraclass + # Correlations: Uses in Assessing Rater Reliability". Psychological + # Bulletin 86 (2): 420-428 + Y = np.array([[9, 2, 5, 8], [6, 1, 3, 2], [8, 4, 6, 8], [7, 1, 2, 6], + [10, 5, 6, 9], [6, 2, 4, 7]]) + + icc, r_var, e_var, _, dfc, dfe = ICC_rep_anova(Y) + # see table 4 + assert round(icc, 2) == 0.71 + assert dfc == 3 + assert dfe == 15 + assert np.isclose(r_var / (r_var + e_var), icc) diff --git a/nipype/algorithms/tests/test_mesh_ops.py b/nipype/algorithms/tests/test_mesh_ops.py new file mode 100644 index 0000000000..a08a5a97c3 --- /dev/null +++ b/nipype/algorithms/tests/test_mesh_ops.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import os + +import pytest +import nipype.testing as npt +from nipype.testing import example_data +import numpy as np +from nipype.algorithms import mesh as m +from ...interfaces import vtkbase as VTKInfo + + +@pytest.mark.skipif(VTKInfo.no_tvtk(), reason="tvtk is not installed") +def test_ident_distances(tmpdir): + tmpdir.chdir() + + in_surf = example_data('surf01.vtk') + dist_ident = m.ComputeMeshWarp() + dist_ident.inputs.surface1 = in_surf + dist_ident.inputs.surface2 = in_surf + dist_ident.inputs.out_file = tmpdir.join('distance.npy').strpath + res = dist_ident.run() + assert res.outputs.distance == 0.0 + + dist_ident.inputs.weighting = 'area' + res = dist_ident.run() + assert res.outputs.distance == 0.0 + + +@pytest.mark.skipif(VTKInfo.no_tvtk(), reason="tvtk is not installed") +def test_trans_distances(tmpdir): + from ...interfaces.vtkbase import tvtk + + in_surf = example_data('surf01.vtk') + warped_surf = tmpdir.join('warped.vtk').strpath + + inc = np.array([0.7, 0.3, -0.2]) + + r1 = tvtk.PolyDataReader(file_name=in_surf) + vtk1 = VTKInfo.vtk_output(r1) + r1.update() + vtk1.points = np.array(vtk1.points) + inc + + writer = tvtk.PolyDataWriter(file_name=warped_surf) + VTKInfo.configure_input_data(writer, vtk1) + writer.write() + + dist = m.ComputeMeshWarp() + dist.inputs.surface1 = in_surf + dist.inputs.surface2 = warped_surf + dist.inputs.out_file = tmpdir.join('distance.npy').strpath + res = dist.run() + assert np.allclose(res.outputs.distance, np.linalg.norm(inc), 4) + dist.inputs.weighting = 'area' + res = dist.run() + assert np.allclose(res.outputs.distance, np.linalg.norm(inc), 4) + + +@pytest.mark.skipif(VTKInfo.no_tvtk(), reason="tvtk is not installed") +def test_warppoints(tmpdir): + tmpdir.chdir() + + # TODO: include regression tests for when tvtk is installed + + +@pytest.mark.skipif(VTKInfo.no_tvtk(), reason="tvtk is not installed") +def test_meshwarpmaths(tmpdir): + tmpdir.chdir() + + # TODO: include regression tests for when tvtk is installed + + +@pytest.mark.skipif(not VTKInfo.no_tvtk(), reason="tvtk is installed") +def test_importerror(): + with pytest.raises(ImportError): + m.ComputeMeshWarp() + + with pytest.raises(ImportError): + m.WarpPoints() + + with pytest.raises(ImportError): + m.MeshWarpMaths() diff --git a/nipype/algorithms/tests/test_metrics.py b/nipype/algorithms/tests/test_metrics.py new file mode 100644 index 0000000000..fb876b3c72 --- /dev/null +++ b/nipype/algorithms/tests/test_metrics.py @@ -0,0 +1,58 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import numpy as np +import nibabel as nb +from nipype.testing import example_data +from ..metrics import FuzzyOverlap + + +def test_fuzzy_overlap(tmpdir): + tmpdir.chdir() + + # Tests with tissue probability maps + in_mask = example_data('tpms_msk.nii.gz') + tpms = [example_data('tpm_%02d.nii.gz' % i) for i in range(3)] + out = FuzzyOverlap(in_ref=tpms[0], in_tst=tpms[0]).run().outputs + assert out.dice == 1 + + out = FuzzyOverlap( + in_mask=in_mask, in_ref=tpms[0], in_tst=tpms[0]).run().outputs + assert out.dice == 1 + + out = FuzzyOverlap( + in_mask=in_mask, in_ref=tpms[0], in_tst=tpms[1]).run().outputs + assert 0 < out.dice < 1 + + out = FuzzyOverlap(in_ref=tpms, in_tst=tpms).run().outputs + assert out.dice == 1.0 + + out = FuzzyOverlap( + in_mask=in_mask, in_ref=tpms, in_tst=tpms).run().outputs + assert out.dice == 1.0 + + # Tests with synthetic 3x3x3 images + data = np.zeros((3, 3, 3), dtype=float) + data[0, 0, 0] = 0.5 + data[2, 2, 2] = 0.25 + data[1, 1, 1] = 0.3 + nb.Nifti1Image(data, np.eye(4)).to_filename('test1.nii.gz') + + data = np.zeros((3, 3, 3), dtype=float) + data[0, 0, 0] = 0.6 + data[1, 1, 1] = 0.3 + nb.Nifti1Image(data, np.eye(4)).to_filename('test2.nii.gz') + + out = FuzzyOverlap(in_ref='test1.nii.gz', in_tst='test2.nii.gz').run().outputs + assert np.allclose(out.dice, 0.82051) + + # Just considering the mask, the central pixel + # that raised the index now is left aside. + data = np.zeros((3, 3, 3), dtype=int) + data[0, 0, 0] = 1 + data[2, 2, 2] = 1 + nb.Nifti1Image(data, np.eye(4)).to_filename('mask.nii.gz') + + out = FuzzyOverlap(in_ref='test1.nii.gz', in_tst='test2.nii.gz', + in_mask='mask.nii.gz').run().outputs + assert np.allclose(out.dice, 0.74074) diff --git a/nipype/algorithms/tests/test_misc.py b/nipype/algorithms/tests/test_misc.py new file mode 100644 index 0000000000..e9d5cbdb3c --- /dev/null +++ b/nipype/algorithms/tests/test_misc.py @@ -0,0 +1,48 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import pytest +import os + +import nibabel as nb + +from nipype.algorithms import misc +from nipype.utils.filemanip import fname_presuffix +from nipype.testing.fixtures import create_analyze_pair_file_in_directory +from nipype.utils import NUMPY_MMAP +from nipype.testing import example_data + + +def test_CreateNifti(create_analyze_pair_file_in_directory): + + filelist, outdir = create_analyze_pair_file_in_directory + + create_nifti = misc.CreateNifti() + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + create_nifti.run() + + # .inputs based parameters setting + create_nifti.inputs.header_file = filelist[0] + create_nifti.inputs.data_file = fname_presuffix( + filelist[0], '', '.img', use_ext=False) + + result = create_nifti.run() + + assert os.path.exists(result.outputs.nifti_file) + assert nb.load(result.outputs.nifti_file, mmap=NUMPY_MMAP) + + +def test_CalculateMedian(create_analyze_pair_file_in_directory): + + mean = misc.CalculateMedian() + + with pytest.raises(TypeError): + mean.run() + + mean.inputs.in_files = example_data('ds003_sub-01_mc.nii.gz') + eg = mean.run() + + assert os.path.exists(eg.outputs.median_files) + assert nb.load(eg.outputs.median_files, mmap=NUMPY_MMAP) diff --git a/nipype/algorithms/tests/test_modelgen.py b/nipype/algorithms/tests/test_modelgen.py new file mode 100644 index 0000000000..824a634354 --- /dev/null +++ b/nipype/algorithms/tests/test_modelgen.py @@ -0,0 +1,263 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import division + +from copy import deepcopy +import os + +from nibabel import Nifti1Image +import numpy as np + +import pytest +import numpy.testing as npt +from nipype.interfaces.base import Bunch, TraitError +from nipype.algorithms.modelgen import (SpecifyModel, SpecifySparseModel, + SpecifySPMModel) + + +def test_modelgen1(tmpdir): + filename1 = tmpdir.join('test1.nii').strpath + filename2 = tmpdir.join('test2.nii').strpath + Nifti1Image(np.random.rand(10, 10, 10, 200), + np.eye(4)).to_filename(filename1) + Nifti1Image(np.random.rand(10, 10, 10, 200), + np.eye(4)).to_filename(filename2) + s = SpecifyModel() + s.inputs.input_units = 'scans' + set_output_units = lambda: setattr(s.inputs, 'output_units', 'scans') + with pytest.raises(TraitError): + set_output_units() + s.inputs.functional_runs = [filename1, filename2] + s.inputs.time_repetition = 6 + s.inputs.high_pass_filter_cutoff = 128. + info = [ + Bunch( + conditions=['cond1'], + onsets=[[2, 50, 100, 180]], + durations=[[1]], + amplitudes=None, + pmod=None, + regressors=None, + regressor_names=None, + tmod=None), + Bunch( + conditions=['cond1'], + onsets=[[30, 40, 100, 150]], + durations=[[1]], + amplitudes=None, + pmod=None, + regressors=None, + regressor_names=None, + tmod=None) + ] + s.inputs.subject_info = info + res = s.run() + assert len(res.outputs.session_info) == 2 + assert len(res.outputs.session_info[0]['regress']) == 0 + assert len(res.outputs.session_info[0]['cond']) == 1 + npt.assert_almost_equal( + np.array(res.outputs.session_info[0]['cond'][0]['onset']), + np.array([12, 300, 600, 1080])) + info = [ + Bunch(conditions=['cond1'], onsets=[[2]], durations=[[1]]), + Bunch(conditions=['cond1'], onsets=[[3]], durations=[[1]]) + ] + s.inputs.subject_info = deepcopy(info) + res = s.run() + npt.assert_almost_equal( + np.array(res.outputs.session_info[0]['cond'][0]['duration']), + np.array([6.])) + npt.assert_almost_equal( + np.array(res.outputs.session_info[1]['cond'][0]['duration']), + np.array([6.])) + info = [ + Bunch( + conditions=['cond1', 'cond2'], + onsets=[[2, 3], [2]], + durations=[[1, 1], [1]]), + Bunch( + conditions=['cond1', 'cond2'], + onsets=[[2, 3], [2, 4]], + durations=[[1, 1], [1, 1]]) + ] + s.inputs.subject_info = deepcopy(info) + s.inputs.input_units = 'scans' + res = s.run() + npt.assert_almost_equal( + np.array(res.outputs.session_info[0]['cond'][0]['duration']), + np.array([6., 6.])) + npt.assert_almost_equal( + np.array(res.outputs.session_info[0]['cond'][1]['duration']), + np.array([ + 6., + ])) + npt.assert_almost_equal( + np.array(res.outputs.session_info[1]['cond'][1]['duration']), + np.array([6., 6.])) + + +def test_modelgen_spm_concat(tmpdir): + filename1 = tmpdir.join('test1.nii').strpath + filename2 = tmpdir.join('test2.nii').strpath + Nifti1Image(np.random.rand(10, 10, 10, 30), + np.eye(4)).to_filename(filename1) + Nifti1Image(np.random.rand(10, 10, 10, 30), + np.eye(4)).to_filename(filename2) + + # Test case when only one duration is passed, as being the same for all onsets. + s = SpecifySPMModel() + s.inputs.input_units = 'secs' + s.inputs.concatenate_runs = True + setattr(s.inputs, 'output_units', 'secs') + assert s.inputs.output_units == 'secs' + s.inputs.functional_runs = [filename1, filename2] + s.inputs.time_repetition = 6 + s.inputs.high_pass_filter_cutoff = 128. + info = [ + Bunch( + conditions=['cond1'], onsets=[[2, 50, 100, 170]], durations=[[1]]), + Bunch( + conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]]) + ] + s.inputs.subject_info = deepcopy(info) + res = s.run() + assert len(res.outputs.session_info) == 1 + assert len(res.outputs.session_info[0]['regress']) == 1 + assert np.sum(res.outputs.session_info[0]['regress'][0]['val']) == 30 + assert len(res.outputs.session_info[0]['cond']) == 1 + npt.assert_almost_equal( + np.array(res.outputs.session_info[0]['cond'][0]['onset']), + np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0])) + npt.assert_almost_equal( + np.array(res.outputs.session_info[0]['cond'][0]['duration']), + np.array([1., 1., 1., 1., 1., 1., 1., 1.])) + + # Test case of scans as output units instead of seconds + setattr(s.inputs, 'output_units', 'scans') + assert s.inputs.output_units == 'scans' + s.inputs.subject_info = deepcopy(info) + res = s.run() + npt.assert_almost_equal( + np.array(res.outputs.session_info[0]['cond'][0]['onset']), + np.array([2.0, 50.0, 100.0, 170.0, 210.0, 220.0, 280.0, 330.0]) / 6) + + # Test case for no concatenation with seconds as output units + s.inputs.concatenate_runs = False + s.inputs.subject_info = deepcopy(info) + s.inputs.output_units = 'secs' + res = s.run() + npt.assert_almost_equal( + np.array(res.outputs.session_info[0]['cond'][0]['onset']), + np.array([2.0, 50.0, 100.0, 170.0])) + + # Test case for variable number of events in separate runs, sometimes unique. + filename3 = tmpdir.join('test3.nii').strpath + Nifti1Image(np.random.rand(10, 10, 10, 30), + np.eye(4)).to_filename(filename3) + s.inputs.functional_runs = [filename1, filename2, filename3] + info = [ + Bunch( + conditions=['cond1', 'cond2'], + onsets=[[2, 3], [2]], + durations=[[1, 1], [1]]), + Bunch( + conditions=['cond1', 'cond2'], + onsets=[[2, 3], [2, 4]], + durations=[[1, 1], [1, 1]]), + Bunch( + conditions=['cond1', 'cond2'], + onsets=[[2, 3], [2]], + durations=[[1, 1], [1]]) + ] + s.inputs.subject_info = deepcopy(info) + res = s.run() + npt.assert_almost_equal( + np.array(res.outputs.session_info[0]['cond'][0]['duration']), + np.array([1., 1.])) + npt.assert_almost_equal( + np.array(res.outputs.session_info[0]['cond'][1]['duration']), + np.array([ + 1., + ])) + npt.assert_almost_equal( + np.array(res.outputs.session_info[1]['cond'][1]['duration']), + np.array([1., 1.])) + npt.assert_almost_equal( + np.array(res.outputs.session_info[2]['cond'][1]['duration']), + np.array([ + 1., + ])) + + # Test case for variable number of events in concatenated runs, sometimes unique. + s.inputs.concatenate_runs = True + info = [ + Bunch( + conditions=['cond1', 'cond2'], + onsets=[[2, 3], [2]], + durations=[[1, 1], [1]]), + Bunch( + conditions=['cond1', 'cond2'], + onsets=[[2, 3], [2, 4]], + durations=[[1, 1], [1, 1]]), + Bunch( + conditions=['cond1', 'cond2'], + onsets=[[2, 3], [2]], + durations=[[1, 1], [1]]) + ] + s.inputs.subject_info = deepcopy(info) + res = s.run() + npt.assert_almost_equal( + np.array(res.outputs.session_info[0]['cond'][0]['duration']), + np.array([1., 1., 1., 1., 1., 1.])) + npt.assert_almost_equal( + np.array(res.outputs.session_info[0]['cond'][1]['duration']), + np.array([1., 1., 1., 1.])) + + +def test_modelgen_sparse(tmpdir): + filename1 = tmpdir.join('test1.nii').strpath + filename2 = tmpdir.join('test2.nii').strpath + Nifti1Image(np.random.rand(10, 10, 10, 50), + np.eye(4)).to_filename(filename1) + Nifti1Image(np.random.rand(10, 10, 10, 50), + np.eye(4)).to_filename(filename2) + s = SpecifySparseModel() + s.inputs.input_units = 'secs' + s.inputs.functional_runs = [filename1, filename2] + s.inputs.time_repetition = 6 + info = [ + Bunch( + conditions=['cond1'], onsets=[[0, 50, 100, 180]], durations=[[2]]), + Bunch( + conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]]) + ] + s.inputs.subject_info = info + s.inputs.volumes_in_cluster = 1 + s.inputs.time_acquisition = 2 + s.inputs.high_pass_filter_cutoff = np.inf + res = s.run() + assert len(res.outputs.session_info) == 2 + assert len(res.outputs.session_info[0]['regress']) == 1 + assert len(res.outputs.session_info[0]['cond']) == 0 + + s.inputs.stimuli_as_impulses = False + res = s.run() + assert res.outputs.session_info[0]['regress'][0]['val'][0] == 1.0 + + s.inputs.model_hrf = True + res = s.run() + npt.assert_almost_equal( + res.outputs.session_info[0]['regress'][0]['val'][0], + 0.016675298129743384) + assert len(res.outputs.session_info[0]['regress']) == 1 + s.inputs.use_temporal_deriv = True + res = s.run() + + assert len(res.outputs.session_info[0]['regress']) == 2 + npt.assert_almost_equal( + res.outputs.session_info[0]['regress'][0]['val'][0], + 0.016675298129743384) + npt.assert_almost_equal( + res.outputs.session_info[1]['regress'][1]['val'][5], + 0.007671459162258378) diff --git a/nipype/algorithms/tests/test_moments.py b/nipype/algorithms/tests/test_moments.py new file mode 100644 index 0000000000..fa174a79e4 --- /dev/null +++ b/nipype/algorithms/tests/test_moments.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- +import numpy as np +from nipype.algorithms.misc import calc_moments + + +def test_skew(tmpdir): + data = """14.62418305 5.916396751 -1.658088086 4.71113546 1.598428608 5.612553811 -5.004056368 -4.057513911 +11.16365251 17.32688599 -3.099920667 2.630189741 2.389709914 0.379332731 -0.2899694205 -4.363591482 +2.059205599 23.90705054 0.7180462297 -1.976963652 7.487682025 -5.583986129 1.094800525 -2.319858134 +-1.907579712 22.08277347 4.595575886 -3.869054671 8.214834769 -3.442156385 2.428766374 0.7736184662 +0.6535290043 14.1320384 0.9458768261 -2.577892846 -0.8925440241 3.177128674 6.048546332 1.736059675 +3.149271524 8.106285467 -6.173280371 -0.5146958863 -11.83574747 4.066575201 9.160589786 0.1680632718 +3.089673173 8.736851925 -5.624227736 1.386441126 -12.58621755 -0.726443824 8.036414499 -0.3318169666 +2.685349599 9.968755255 2.965603277 2.634928414 -3.783441929 -1.858587372 3.238274675 2.594880211 +0.870577208 2.323455904 7.840351954 1.635436162 2.451630603 2.834494164 -1.384081764 5.840475644 +-4.421008251 -12.78755879 2.985581265 -1.609381512 -0.1816579797 5.448215202 -2.855889998 5.041186537 +-8.502455278 -22.66799593 -3.964218147 -4.180363107 -5.061764789 2.439737668 -0.9988071581 1.437142327 +-5.355058719 -19.00567875 -4.803737548 -3.884369973 -4.977945181 -0.4758749938 1.894453988 0.003263759218 +1.29682909 -8.295173365 -1.51226274 -1.611159469 -2.5403281 -0.2155584519 2.597114132 1.16528519 +3.162947556 -3.093405654 0.4782790153 1.015061011 -2.755821487 -1.015685899 0.1402527399 0.05435017236 +0.9158883917 -6.679241736 0.9376568982 3.175011335 -2.712383777 -3.836563374 -2.270503748 -4.593165145 +0.5468675209 -11.14130502 1.420140475 3.506045445 2.777240829 -3.14187819 -0.7823285883 -6.84663074 +-0.5754863055 -9.638785593 0.2926825231 1.039079149 9.613209645 1.300380032 3.755092776 -2.30881605 +-9.12095608 -5.422145216 -3.089096046 -1.913969236 8.36828235 1.622740946 6.756285589 4.803793558 +-18.6459149 -5.677906762 -4.447399529 -1.826561667 -1.179681537 -3.51737806 6.062770694 7.743917051 +-14.12032005 -9.346953111 -0.3927872312 0.5116398162 -8.814603334 -4.191932775 3.735940996 5.926107194 +3.984986352 -7.490234063 5.101302343 0.6359344324 -8.098435707 3.372259941 1.603560776 2.787631701 +16.74369044 2.523688856 4.825375014 -2.888386026 -2.929939078 7.41176576 -0.9444665519 -0.5476924783 +13.0864062 10.44887074 -2.409155335 -6.466987193 2.038766622 -0.9844478726 -3.872608358 -3.903240663 +3.888161509 7.356308659 -9.783752602 -6.593576679 7.785360016 -11.59798121 -5.359996968 -4.646576281 +2.919034842 0.4926039084 -9.765686304 -3.169484175 13.3885185 -10.00053277 -5.284251069 -1.953467094 +7.762685816 3.138596183 -2.417670781 2.087535944 12.09072814 0.3201456619 -5.986630196 -0.393473785 +8.598656701 12.64638617 4.32929224 6.665685612 2.52013659 4.924021467 -7.729146671 -2.531538284 +4.286211902 12.70121508 4.197284784 7.586579174 -4.511459665 1.039992021 -7.200406996 -2.678018972 +-0.206805413 -1.118395095 1.251956053 4.927663964 -0.3269306726 -1.614001868 -2.858296125 3.708027659 +-3.615745533 -13.26040515 4.163662563 3.376525012 6.876574727 1.021356663 1.813515644 9.401028448 +-6.392625018 -11.19412506 11.70010341 5.557449086 3.188483207 3.033109557 3.108015432 5.00732323 +-5.697688304 -1.564055358 12.53451981 6.641295722 -9.330508253 1.60952695 1.985401431 -4.635334005 +-0.4739120366 5.308731294 3.209488234 1.907340382 -15.26443399 1.262158357 1.288838724 -6.54661201 +3.733208755 11.99608217 -4.121352088 -3.787629919 -8.977806581 3.760241115 1.048439633 -0.2497259139 +1.633682769 21.98252106 0.008457593931 -2.863301753 -1.475378656 4.854200462 -0.156717616 2.028483989 +-4.262983941 24.73198623 6.529712692 1.286325062 -1.857794734 2.962236297 -1.586154566 -3.6478191 +-7.502330557 10.60931417 2.397686502 -1.56459968 -4.721655517 2.006857078 -1.490344215 -7.044842318 +-5.198726771 -8.273929595 -7.6127574 -11.03862432 -1.592101433 3.747829535 -0.06779667515 -2.412618507 +0.7334095101 -11.76661769 -9.165804187 -14.81298889 5.36362746 4.955331255 1.673488979 2.0899358 +5.517823916 -1.529874203 -2.421802273 -6.947139589 8.366593034 3.55375893 4.03335273 -0.05524186477 +1.474077483 2.649817521 7.255506458 6.068405441 -2.220943179 -0.6343270953 1.382522916 -2.748044018 +-6.776840898 2.855345278 -3.570626044 1.654853143 -2.838161622 0.755210647 7.252510904 1.235575241 +-14.86022341 -0.8943548346 -10.36165869 -1.966680076 -3.641426564 -3.670112785 8.644955043 6.859610046 +-7.145239483 -0.1458937017 -3.867994525 -0.9484554762 -2.48227248 -8.36071796 2.539637492 5.399990929 +8.804929045 1.925551314 3.240568033 1.273961559 2.104351411 -6.141864838 -5.255423549 -0.7896387751 +9.735755254 -1.862844212 -2.552156104 -0.3902178948 5.745817797 -1.515932976 -8.546922674 -3.440929455 +-5.837957148 -8.226266393 -13.20837554 -4.385043051 2.553090991 -4.209818986 -8.331176217 -1.707250641 +-12.64051676 -8.2399894 -12.76990779 -5.960467624 -4.294427772 -10.92374675 -8.6902905 0.3421994093 +1.17028221 -1.953361346 -2.607159313 -4.896369845 -4.519583123 -8.055510792 -9.019182555 3.36412153 +14.48433641 2.152584104 3.178007658 -3.9792054 3.873546228 5.321306118 -5.445499499 8.684509027 +8.116988393 0.4683619278 1.046001596 -3.128586059 10.0250152 12.58326776 1.447856102 10.18164703 +-4.706381289 -1.788728553 0.6563335204 -0.5831451131 5.744824049 3.988876139 5.65836796 2.189197844 +-2.76704126 -0.495980308 6.533235978 2.372759856 -2.792331174 -7.896310272 3.502571539 -8.556072249 +8.315654337 0.7043190657 11.38508989 2.565286445 -5.081739754 -6.900720718 -1.667312154 -10.59024727 +9.909297104 -2.934946689 8.968652164 -0.5610029798 -0.6957945725 3.815352939 -4.277309457 -4.346939024 +3.809478921 -8.178727502 2.78966603 -4.568498377 3.295953611 9.457549108 -2.931773943 -0.04922082646 +4.940986376 -6.906199411 -0.6726182267 -6.550149966 3.251783239 6.324220141 0.1496185048 -1.7701633 +10.55846735 1.720423345 -0.02248084003 -4.475053837 0.3943175795 3.615274407 3.17786214 -4.661015894 +5.164991215 7.975239079 2.030845129 1.259865261 -3.543706118 6.424886561 5.257164014 -5.686755714 +-7.85360929 4.585684687 2.641661508 6.399259194 -5.791994946 9.620021677 5.059618162 -5.841773643 +-7.887333445 -1.663863126 0.531225876 6.442066641 -2.580841985 8.356612294 2.609875283 -3.391732494 +7.467417207 0.7346301535 -2.719728468 2.822035284 4.54698989 4.221046784 0.791568596 3.728706407 +14.76100347 9.382305581 -3.17219641 1.381585183 7.754995237 -0.3908054543 1.355349478 9.807914939 +0.1267792801 9.818588278 0.5608772817 3.633460684 3.711951896 -5.421004876 1.162611597 7.001274262 +-19.35283277 -2.103358718 4.16130701 4.67192889 -0.8231375514 -8.81474386 -2.846417531 -1.268859264 +-20.80038431 -11.76135621 2.944594891 1.64388247 -0.1668629943 -6.707442921 -6.544901517 -3.830974298 +-5.592905106 -6.057725588 -1.233208621 -1.339964983 0.7299911265 -0.7530015377 -3.117175727 1.142381884 +7.890421323 8.119524766 -2.606602104 0.007101965698 -4.473969864 1.35750371 5.357618774 4.161238035 +9.600782899 14.52365435 0.1990637024 3.403466406 -11.59507852 -3.675154543 8.718678202 0.7825822225 +3.703846665 8.748127367 3.135332804 4.127582534 -12.38852274 -9.447080613 3.417599727 -1.915488323 +-3.011725724 -0.5381126202 3.567929983 2.184591464 -7.411651508 -9.252946446 -1.827784625 1.560496584 +-7.142629796 -5.355184696 3.289780212 1.113331632 -3.105505654 -5.606446238 0.1961208934 6.334603712 +-6.659543803 -4.245675975 3.726757782 1.953178495 -0.7484610023 -4.426403774 3.716311729 6.200735049 +-1.643440395 0.7536090906 2.509268017 2.15471156 2.374200456 -3.774138064 -0.1428981969 2.646676328 +3.686406766 4.827058909 -2.458101484 -0.39559615 5.082577298 3.167157352 -8.147321924 -0.03506891856 +4.407495284 2.5606793 -8.149493446 -4.632729429 4.938050013 14.56344531 -9.374945991 -1.3893417 +-0.1687177084 -4.106757231 -9.343602374 -7.415904922 4.749022091 18.81314153 -1.749200795 -2.02566815 +-6.507688641 -6.001538055 -6.108916568 -6.784929595 7.21051134 10.59847744 5.776257506 -0.4990570991 +-9.820082348 -0.5741078285 -4.687969138 -4.377866052 7.40862329 -0.06470407472 6.857336593 2.745243336 +-7.04365894 2.689020958 -8.804350547 -3.506610093 0.5732906688 -1.771827007 4.332768659 3.537426733 +-0.4346222942 -2.295147419 -12.91289393 -3.95705062 -7.130741497 1.478867856 2.340197798 -0.2224791818 +2.355519667 -7.446912611 -8.580935982 -1.515500603 -6.545362285 -2.460234117 0.4822626914 -5.261252431 +-3.230857748 -4.456435972 3.105258325 4.868182005 -0.3155725672 -12.9461276 -1.81314629 -7.915543953 +-10.61694158 1.023409988 11.23695246 9.13393953 2.080132446 -15.68433051 -2.452603277 -8.067702457 +-8.952785439 0.3914623321 9.072213866 5.788054925 0.5661677477 -4.862572943 -1.253393229 -6.497656047 +1.825216246 -2.868761361 2.684946057 -1.702605515 2.524615008 6.658427102 -1.464383881 -3.333412097 +10.52499456 -1.807928838 1.602770946 -5.693835167 7.025193015 6.172728664 -3.989160551 -0.7754719889 +10.83430082 0.3010957187 5.703164372 -4.7215044 5.747620411 -0.6137370397 -5.393253651 -1.967790019 +9.084992271 -1.297359974 7.313272774 -2.919262371 -0.341939585 -0.488964096 -3.962652217 -5.129527247 +11.86896398 -0.4901633845 3.193953846 -1.811431925 -0.3604987261 6.192234507 -2.348495577 -4.159036411 +14.81736012 7.870835671 -2.04922723 0.122245812 7.807201578 8.435263453 -1.994392703 2.494961459 +10.99679669 13.62141018 -3.175917696 1.68947873 12.43613872 4.131979444 -0.8035598171 8.583091116 +3.538171963 6.008200439 0.5876902994 0.4403643142 6.183013749 2.012581919 1.090536757 8.392496526 +0.5460594103 -6.259032909 6.647104433 -1.43557129 -3.452884137 4.366160275 -0.2274303705 3.900139848 +1.772017802 -8.109091085 10.50095909 -0.1621391129 -7.608906136 2.481208401 -4.509906047 0.7763248812 +0.606115406 -2.603199426 7.692974034 2.104967053 -8.226098406 -6.837921596 -4.561655055 1.015397953 +-2.978847372 -2.385761908 -0.8339871055 0.6707971346 -9.874595181 -13.39338209 3.157380259 2.413897035 +-2.985013991 -5.160444086 -7.29279473 -2.371762765 -10.03622895 -9.34912711 10.97609581 2.654665151 +-1.068091568 -0.2479914452 -6.107351633 -0.9239821871 -5.835733231 -2.189236707 9.811317248 1.508754364 +-6.520427038 7.430392097 -1.95095948 4.15525371 -2.032963385 -2.693509918 2.091753969 0.4782648423 +-18.09299987 4.740223292 -2.838854108 6.118069011 -3.664423954 -7.91518773 -2.533067915 1.120941519 +-19.32711056 -3.231687054 -8.04776777 3.689162869 -6.952885159 -6.854774161 -1.172264884 2.581894309 +-2.203996345 -0.5339747203 -10.27858531 1.833505163 -5.406679162 1.678416611 0.871971912 1.837113402 +15.60657966 8.749980935 -7.560269196 1.70515063 0.1003191195 8.04135078 1.044572756 -1.582641946 +12.19564564 5.273436246 -4.367517279 -0.0400759142 4.431313549 7.067826794 2.741622337 -3.458851463 +-6.44120462 -9.849684434 -1.946651925 -2.183436603 6.686267514 4.016449169 6.302612811 -0.9698019507 +-13.80878408 -13.92578887 3.071419193 -0.156449455 8.551444945 4.051266929 5.541317929 1.901010931 +-1.084801367 -1.267516734 9.774222689 3.461150291 8.195043157 4.77412064 -2.223359889 0.07143463336 +11.95939854 7.195316999 11.93418631 1.472618288 3.247038347 2.656123844 -9.091445458 -4.097157466 +-2.752420619 -1.103781682 -3.382675846 -3.9326499 0.3168555978 -2.600573426 -9.409987851 -1.564842317 +-11.68718367 -12.62978052 -7.436711849 -11.05071165 -4.535693861 -4.973062537 -9.154275121 -0.8478464554 +-11.1129098 -8.014294516 -5.818564146 -6.557508409 -4.920322355 -2.444494132 -0.762850219 -1.035995467 +-0.1942650118 5.507757423 -0.6713848498 2.045539379 0.2907563314 2.654730384 5.268838031 -2.711154892 +6.638825325 9.118078409 2.220738816 5.875202986 0.6059672284 -5.305207318 -0.08004872831 -2.950039659 +12.18704972 0.6256114468 2.352153233 8.701077613 4.804756766 -6.163162012 -1.779998967 -6.493561445 +4.442326811 -15.10908307 4.919949591 3.969210961 7.004029439 0.1398435001 -4.659976897 -3.899267451 +-7.594265524 -20.77328745 5.94521557 -2.385814065 3.224509406 8.943882025 -3.270587613 3.470325906 +-8.696673766 -12.29052026 -0.3763403003 -5.55470641 -3.51572569 12.51259902 3.753517263 8.67338497 +-0.5057854071 -2.415896554 -9.663571931 -5.714041661 -6.037933426 8.673756933 10.03557773 8.629816199 +3.622185659 0.4716627142 -10.92515308 -3.705286841 -2.776089545 2.271920902 9.251504922 5.744980887 +""" + f = tmpdir.join("filetest") + f.write(data) + skewness = calc_moments(f.strpath, 3) + assert np.allclose( + skewness, + np.array([ + -0.23418937314622, 0.2946365564954823, -0.05781002053540932, + -0.3512508282578762, -0.07035664150233077, -0.01935867699166935, + 0.00483863369427428, 0.21879460029850167 + ])) diff --git a/nipype/algorithms/tests/test_normalize_tpms.py b/nipype/algorithms/tests/test_normalize_tpms.py new file mode 100644 index 0000000000..a65cc66770 --- /dev/null +++ b/nipype/algorithms/tests/test_normalize_tpms.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from builtins import range +import os + +import pytest +from nipype.testing import example_data + +import numpy as np +import nibabel as nb +import nipype.testing as nit + +from nipype.algorithms.misc import normalize_tpms +from nipype.utils import NUMPY_MMAP + + +def test_normalize_tpms(tmpdir): + + in_mask = example_data('tpms_msk.nii.gz') + mskdata = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() + mskdata[mskdata > 0.0] = 1.0 + + mapdata = [] + in_files = [] + out_files = [] + + for i in range(3): + mapname = example_data('tpm_%02d.nii.gz' % i) + filename = tmpdir.join('modtpm_%02d.nii.gz' % i).strpath + out_files.append(tmpdir.join('normtpm_%02d.nii.gz' % i).strpath) + + im = nb.load(mapname, mmap=NUMPY_MMAP) + data = im.get_data() + mapdata.append(data.copy()) + + nb.Nifti1Image(2.0 * (data * mskdata), im.affine, + im.header).to_filename(filename) + in_files.append(filename) + + normalize_tpms(in_files, in_mask, out_files=out_files) + + sumdata = np.zeros_like(mskdata) + + for i, tstfname in enumerate(out_files): + normdata = nb.load(tstfname, mmap=NUMPY_MMAP).get_data() + sumdata += normdata + assert np.all(normdata[mskdata == 0] == 0) + assert np.allclose(normdata, mapdata[i]) + + assert np.allclose(sumdata[sumdata > 0.0], 1.0) diff --git a/nipype/algorithms/tests/test_rapidart.py b/nipype/algorithms/tests/test_rapidart.py new file mode 100644 index 0000000000..9c29648626 --- /dev/null +++ b/nipype/algorithms/tests/test_rapidart.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import division + +import numpy as np + +import numpy.testing as npt +from .. import rapidart as ra +from ...interfaces.base import Bunch + + +def test_ad_init(): + ad = ra.ArtifactDetect(use_differences=[True, False]) + assert ad.inputs.use_differences[0] + assert not ad.inputs.use_differences[1] + + +def test_ad_output_filenames(): + ad = ra.ArtifactDetect() + outputdir = '/tmp' + f = 'motion.nii' + (outlierfile, intensityfile, statsfile, normfile, plotfile, + displacementfile, maskfile) = ad._get_output_filenames(f, outputdir) + assert outlierfile == '/tmp/art.motion_outliers.txt' + assert intensityfile == '/tmp/global_intensity.motion.txt' + assert statsfile == '/tmp/stats.motion.txt' + assert normfile == '/tmp/norm.motion.txt' + assert plotfile == '/tmp/plot.motion.png' + assert displacementfile == '/tmp/disp.motion.nii' + assert maskfile == '/tmp/mask.motion.nii' + + +def test_ad_get_affine_matrix(): + matrix = ra._get_affine_matrix(np.array([0]), 'SPM') + npt.assert_equal(matrix, np.eye(4)) + # test translation + params = [1, 2, 3] + matrix = ra._get_affine_matrix(params, 'SPM') + out = np.eye(4) + out[0:3, 3] = params + npt.assert_equal(matrix, out) + # test rotation + params = np.array([0, 0, 0, np.pi / 2, np.pi / 2, np.pi / 2]) + matrix = ra._get_affine_matrix(params, 'SPM') + out = np.array([0, 0, 1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1]).reshape( + (4, 4)) + npt.assert_almost_equal(matrix, out) + # test scaling + params = np.array([0, 0, 0, 0, 0, 0, 1, 2, 3]) + matrix = ra._get_affine_matrix(params, 'SPM') + out = np.array([1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 0, 0, 1]).reshape( + (4, 4)) + npt.assert_equal(matrix, out) + # test shear + params = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 3]) + matrix = ra._get_affine_matrix(params, 'SPM') + out = np.array([1, 1, 2, 0, 0, 1, 3, 0, 0, 0, 1, 0, 0, 0, 0, 1]).reshape( + (4, 4)) + npt.assert_equal(matrix, out) + + +def test_ad_get_norm(): + params = np.array([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, np.pi / 4, np.pi / 4, np.pi / 4, 0, 0, 0, + -np.pi / 4, -np.pi / 4, -np.pi / 4 + ]).reshape((3, 6)) + norm, _ = ra._calc_norm(params, False, 'SPM') + npt.assert_almost_equal(norm, + np.array([18.86436316, 37.74610158, 31.29780829])) + norm, _ = ra._calc_norm(params, True, 'SPM') + npt.assert_almost_equal(norm, np.array([0., 143.72192614, 173.92527131])) + + +def test_sc_init(): + sc = ra.StimulusCorrelation(concatenated_design=True) + assert sc.inputs.concatenated_design + + +def test_sc_populate_inputs(): + sc = ra.StimulusCorrelation() + inputs = Bunch( + realignment_parameters=None, + intensity_values=None, + spm_mat_file=None, + concatenated_design=None) + assert set(sc.inputs.__dict__.keys()) == set(inputs.__dict__.keys()) + + +def test_sc_output_filenames(): + sc = ra.StimulusCorrelation() + outputdir = '/tmp' + f = 'motion.nii' + corrfile = sc._get_output_filenames(f, outputdir) + assert corrfile == '/tmp/qa.motion_stimcorr.txt' diff --git a/nipype/algorithms/tests/test_splitmerge.py b/nipype/algorithms/tests/test_splitmerge.py new file mode 100644 index 0000000000..f05d291028 --- /dev/null +++ b/nipype/algorithms/tests/test_splitmerge.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from nipype.testing import example_data +from nipype.utils import NUMPY_MMAP + + +def test_split_and_merge(tmpdir): + import numpy as np + import nibabel as nb + import os.path as op + import os + + from nipype.algorithms.misc import split_rois, merge_rois + + in_mask = example_data('tpms_msk.nii.gz') + dwfile = tmpdir.join('dwi.nii.gz').strpath + mskdata = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() + aff = nb.load(in_mask, mmap=NUMPY_MMAP).affine + + dwshape = (mskdata.shape[0], mskdata.shape[1], mskdata.shape[2], 6) + dwdata = np.random.normal(size=dwshape) + tmpdir.chdir() + nb.Nifti1Image(dwdata.astype(np.float32), aff, None).to_filename(dwfile) + + resdw, resmsk, resid = split_rois(dwfile, in_mask, roishape=(20, 20, 2)) + merged = merge_rois(resdw, resid, in_mask) + dwmerged = nb.load(merged, mmap=NUMPY_MMAP).get_data() + + dwmasked = dwdata * mskdata[:, :, :, np.newaxis] + + assert np.allclose(dwmasked, dwmerged) diff --git a/nipype/algorithms/tests/test_stats.py b/nipype/algorithms/tests/test_stats.py new file mode 100644 index 0000000000..9a4c7525b5 --- /dev/null +++ b/nipype/algorithms/tests/test_stats.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import numpy as np +import nibabel as nb +from nipype.algorithms.stats import ActivationCount +import pytest + + +def test_ActivationCount(tmpdir): + tmpdir.chdir() + in_files = ['{:d}.nii'.format(i) for i in range(3)] + for fname in in_files: + nb.Nifti1Image(np.random.normal(size=(5, 5, 5)), + np.eye(4)).to_filename(fname) + + acm = ActivationCount(in_files=in_files, threshold=1.65) + res = acm.run() + diff = nb.load(res.outputs.out_file) + pos = nb.load(res.outputs.acm_pos) + neg = nb.load(res.outputs.acm_neg) + assert np.allclose(diff.get_data(), pos.get_data() - neg.get_data()) + + +@pytest.mark.parametrize("threshold, above_thresh", [ + (1, 15.865), # above one standard deviation (one side) + (2, 2.275), # above two standard deviations (one side) + (3, 0.135) # above three standard deviations (one side) +]) +def test_ActivationCount_normaldistr(tmpdir, threshold, above_thresh): + tmpdir.chdir() + in_files = ['{:d}.nii'.format(i) for i in range(3)] + for fname in in_files: + nb.Nifti1Image(np.random.normal(size=(100, 100, 100)), + np.eye(4)).to_filename(fname) + + acm = ActivationCount(in_files=in_files, threshold=threshold) + res = acm.run() + pos = nb.load(res.outputs.acm_pos) + neg = nb.load(res.outputs.acm_neg) + assert np.isclose(pos.get_data().mean(), + above_thresh * 1.e-2, rtol=0.1, atol=1.e-4) + assert np.isclose(neg.get_data().mean(), + above_thresh * 1.e-2, rtol=0.1, atol=1.e-4) diff --git a/nipype/caching/__init__.py b/nipype/caching/__init__.py new file mode 100644 index 0000000000..1e99ed4428 --- /dev/null +++ b/nipype/caching/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +from .memory import Memory diff --git a/nipype/caching/memory.py b/nipype/caching/memory.py new file mode 100644 index 0000000000..9fcf694d4b --- /dev/null +++ b/nipype/caching/memory.py @@ -0,0 +1,300 @@ +# -*- coding: utf-8 -*- +""" +Using nipype with persistence and lazy recomputation but without explicit +name-steps pipeline: getting back scope in command-line based programming. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import object, open + +import os +import hashlib +import pickle +import time +import shutil +import glob + +from ..interfaces.base import BaseInterface +from ..pipeline.engine import Node +from ..pipeline.engine.utils import modify_paths + +############################################################################### +# PipeFunc object: callable interface to nipype.interface objects + + +class PipeFunc(object): + """ Callable interface to nipype.interface objects + + Use this to wrap nipype.interface object and call them + specifying their input with keyword arguments:: + + fsl_merge = PipeFunc(fsl.Merge, base_dir='.') + out = fsl_merge(in_files=files, dimension='t') + """ + + def __init__(self, interface, base_dir, callback=None): + """ + + Parameters + =========== + interface: a nipype interface class + The interface class to wrap + base_dir: a string + The directory in which the computation will be + stored + callback: a callable + An optional callable called each time after the function + is called. + """ + if not (isinstance(interface, type) + and issubclass(interface, BaseInterface)): + raise ValueError('the interface argument should be a nipype ' + 'interface class, but %s (type %s) was passed.' % + (interface, type(interface))) + self.interface = interface + base_dir = os.path.abspath(base_dir) + if not os.path.exists(base_dir) and os.path.isdir(base_dir): + raise ValueError('base_dir should be an existing directory') + self.base_dir = base_dir + doc = '%s\n%s' % (self.interface.__doc__, + self.interface.help(returnhelp=True)) + self.__doc__ = doc + self.callback = callback + + def __call__(self, **kwargs): + kwargs = modify_paths(kwargs, relative=False) + interface = self.interface() + # Set the inputs early to get some argument checking + interface.inputs.trait_set(**kwargs) + # Make a name for our node + inputs = interface.inputs.get_hashval() + hasher = hashlib.new('md5') + hasher.update(pickle.dumps(inputs)) + dir_name = '%s-%s' % (interface.__class__.__module__.replace('.', '-'), + interface.__class__.__name__) + job_name = hasher.hexdigest() + node = Node(interface, name=job_name) + node.base_dir = os.path.join(self.base_dir, dir_name) + + cwd = os.getcwd() + try: + out = node.run() + finally: + # node.run() changes to the node directory - if something goes + # wrong before it cds back you would end up in strange places + os.chdir(cwd) + if self.callback is not None: + self.callback(dir_name, job_name) + return out + + def __repr__(self): + return '{}({}.{}), base_dir={})'.format( + self.__class__.__name__, self.interface.__module__, + self.interface.__name__, self.base_dir) + + +############################################################################### +# Memory manager: provide some tracking about what is computed when, to +# be able to flush the disk + + +def read_log(filename, run_dict=None): + if run_dict is None: + run_dict = dict() + + with open(filename, 'r') as logfile: + for line in logfile: + dir_name, job_name = line[:-1].split('/') + jobs = run_dict.get(dir_name, set()) + jobs.add(job_name) + run_dict[dir_name] = jobs + return run_dict + + +def rm_all_but(base_dir, dirs_to_keep, warn=False): + """ Remove all the sub-directories of base_dir, but those listed + + Parameters + ============ + base_dir: string + The base directory + dirs_to_keep: set + The names of the directories to keep + """ + try: + all_dirs = os.listdir(base_dir) + except OSError: + "Dir has been deleted" + return + all_dirs = [d for d in all_dirs if not d.startswith('log.')] + dirs_to_rm = list(dirs_to_keep.symmetric_difference(all_dirs)) + for dir_name in dirs_to_rm: + dir_name = os.path.join(base_dir, dir_name) + if os.path.exists(dir_name): + if warn: + print('removing directory: %s' % dir_name) + shutil.rmtree(dir_name) + + +class _MemoryCallback(object): + "An object to avoid closures and have everything pickle" + + def __init__(self, memory): + self.memory = memory + + def __call__(self, dir_name, job_name): + self.memory._log_name(dir_name, job_name) + + +class Memory(object): + """ Memory context to provide caching for interfaces + + Parameters + ========== + base_dir: string + The directory name of the location for the caching + + Methods + ======= + cache + Creates a cacheable function from an nipype Interface class + clear_previous_runs + Removes from the disk all the runs that where not used after + the creation time of the specific Memory instance + clear_previous_runs + Removes from the disk all the runs that where not used after + the given time + """ + + def __init__(self, base_dir): + base_dir = os.path.join(os.path.abspath(base_dir), 'nipype_mem') + if not os.path.exists(base_dir): + os.mkdir(base_dir) + elif not os.path.isdir(base_dir): + raise ValueError('base_dir should be a directory') + self.base_dir = base_dir + open(os.path.join(base_dir, 'log.current'), 'a').close() + + def cache(self, interface): + """ Returns a callable that caches the output of an interface + + Parameters + ========== + interface: nipype interface + The nipype interface class to be wrapped and cached + + Returns + ======= + pipe_func: a PipeFunc callable object + An object that can be used as a function to apply the + interface to arguments. Inputs of the interface are given + as keyword arguments, bearing the same name as the name + in the inputs specs of the interface. + + Examples + ======== + + >>> from tempfile import mkdtemp + >>> mem = Memory(mkdtemp()) + >>> from nipype.interfaces import fsl + + Here we create a callable that can be used to apply an + fsl.Merge interface to files + + >>> fsl_merge = mem.cache(fsl.Merge) + + Now we apply it to a list of files. We need to specify the + list of input files and the dimension along which the files + should be merged. + + >>> results = fsl_merge(in_files=['a.nii', 'b.nii'], + ... dimension='t') # doctest: +SKIP + + We can retrieve the resulting file from the outputs: + >>> results.outputs.merged_file # doctest: +SKIP + '...' + """ + return PipeFunc(interface, self.base_dir, _MemoryCallback(self)) + + def _log_name(self, dir_name, job_name): + """ Increment counters tracking which cached function get executed. + """ + base_dir = self.base_dir + # Every counter is a file opened in append mode and closed + # immediately to avoid race conditions in parallel computing: + # file appends are atomic + with open(os.path.join(base_dir, 'log.current'), 'a') as currentlog: + currentlog.write('%s/%s\n' % (dir_name, job_name)) + + t = time.localtime() + year_dir = os.path.join(base_dir, 'log.%i' % t.tm_year) + try: + os.mkdir(year_dir) + except OSError: + "Dir exists" + month_dir = os.path.join(year_dir, '%02i' % t.tm_mon) + try: + os.mkdir(month_dir) + except OSError: + "Dir exists" + + with open(os.path.join(month_dir, '%02i.log' % t.tm_mday), + 'a') as rotatefile: + rotatefile.write('%s/%s\n' % (dir_name, job_name)) + + def clear_previous_runs(self, warn=True): + """ Remove all the cache that where not used in the latest run of + the memory object: i.e. since the corresponding Python object + was created. + + Parameters + ========== + warn: boolean, optional + If true, echoes warning messages for all directory + removed + """ + base_dir = self.base_dir + latest_runs = read_log(os.path.join(base_dir, 'log.current')) + self._clear_all_but(latest_runs, warn=warn) + + def clear_runs_since(self, day=None, month=None, year=None, warn=True): + """ Remove all the cache that where not used since the given date + + Parameters + ========== + day, month, year: integers, optional + The integers specifying the latest day (in localtime) that + a node should have been accessed to be kept. If not + given, the current date is used. + warn: boolean, optional + If true, echoes warning messages for all directory + removed + """ + t = time.localtime() + day = day if day is not None else t.tm_mday + month = month if month is not None else t.tm_mon + year = year if year is not None else t.tm_year + base_dir = self.base_dir + cut_off_file = '%s/log.%i/%02i/%02i.log' % (base_dir, year, month, day) + logs_to_flush = list() + recent_runs = dict() + for log_name in glob.glob('%s/log.*/*/*.log' % base_dir): + if log_name < cut_off_file: + logs_to_flush.append(log_name) + else: + recent_runs = read_log(log_name, recent_runs) + self._clear_all_but(recent_runs, warn=warn) + for log_name in logs_to_flush: + os.remove(log_name) + + def _clear_all_but(self, runs, warn=True): + """ Remove all the runs appart from those given to the function + input. + """ + rm_all_but(self.base_dir, set(runs.keys()), warn=warn) + for dir_name, job_names in list(runs.items()): + rm_all_but( + os.path.join(self.base_dir, dir_name), job_names, warn=warn) + + def __repr__(self): + return '{}(base_dir={})'.format(self.__class__.__name__, self.base_dir) diff --git a/nipype/caching/tests/__init__.py b/nipype/caching/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/caching/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/caching/tests/test_memory.py b/nipype/caching/tests/test_memory.py new file mode 100644 index 0000000000..3ea594f22a --- /dev/null +++ b/nipype/caching/tests/test_memory.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +""" Test the nipype interface caching mechanism +""" + +from .. import Memory +from ...pipeline.engine.tests.test_engine import EngineTestInterface + +from ... import config +config.set_default_config() + +nb_runs = 0 + + +class SideEffectInterface(EngineTestInterface): + def _run_interface(self, runtime): + global nb_runs + nb_runs += 1 + runtime.returncode = 0 + return runtime + + +def test_caching(tmpdir): + old_rerun = config.get('execution', 'stop_on_first_rerun') + try: + # Prevent rerun to check that evaluation is computed only once + config.set('execution', 'stop_on_first_rerun', 'true') + mem = Memory(tmpdir.strpath) + first_nb_run = nb_runs + results = mem.cache(SideEffectInterface)(input1=2, input2=1) + assert nb_runs == first_nb_run + 1 + assert results.outputs.output1 == [1, 2] + results = mem.cache(SideEffectInterface)(input1=2, input2=1) + # Check that the node hasn't been rerun + assert nb_runs == first_nb_run + 1 + assert results.outputs.output1 == [1, 2] + results = mem.cache(SideEffectInterface)(input1=1, input2=1) + # Check that the node hasn been rerun + assert nb_runs == first_nb_run + 2 + assert results.outputs.output1 == [1, 1] + finally: + config.set('execution', 'stop_on_first_rerun', old_rerun) diff --git a/nipype/conftest.py b/nipype/conftest.py new file mode 100644 index 0000000000..9e646a2a4f --- /dev/null +++ b/nipype/conftest.py @@ -0,0 +1,21 @@ +import pytest +import numpy +import os + +DATADIR = os.path.realpath( + os.path.join(os.path.dirname(__file__), 'testing/data')) + + +@pytest.fixture(autouse=True) +def add_np(doctest_namespace): + doctest_namespace['np'] = numpy + doctest_namespace['os'] = os + + doctest_namespace["datadir"] = DATADIR + + +@pytest.fixture(autouse=True) +def in_testing(request): + # This seems to be a reliable way to distinguish tests from doctests + if request.function is None: + os.chdir(DATADIR) diff --git a/nipype/external/__init__.py b/nipype/external/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/external/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/external/cloghandler.py b/nipype/external/cloghandler.py new file mode 100644 index 0000000000..5fda934c84 --- /dev/null +++ b/nipype/external/cloghandler.py @@ -0,0 +1,342 @@ +# -*- coding: utf-8 -*- +# Copyright 2008 Lowell Alleman +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy +# of the License at http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" cloghandler.py: A smart replacement for the standard RotatingFileHandler + +ConcurrentRotatingFileHandler: This class is a log handler which is a drop-in +replacement for the python standard log handler 'RotateFileHandler', the primary +difference being that this handler will continue to write to the same file if +the file cannot be rotated for some reason, whereas the RotatingFileHandler will +strictly adhere to the maximum file size. Unfortunately, if you are using the +RotatingFileHandler on Windows, you will find that once an attempted rotation +fails, all subsequent log messages are dropped. The other major advantage of +this module is that multiple processes can safely write to a single log file. + +To put it another way: This module's top priority is preserving your log +records, whereas the standard library attempts to limit disk usage, which can +potentially drop log messages. If you are trying to determine which module to +use, there are number of considerations: What is most important: strict disk +space usage or preservation of log messages? What OSes are you supporting? Can +you afford to have processes blocked by file locks? + +Concurrent access is handled by using file locks, which should ensure that log +messages are not dropped or clobbered. This means that a file lock is acquired +and released for every log message that is written to disk. (On Windows, you may +also run into a temporary situation where the log file must be opened and closed +for each log message.) This can have potentially performance implications. In my +testing, performance was more than adequate, but if you need a high-volume or +low-latency solution, I suggest you look elsewhere. + +This module currently only support the 'nt' and 'posix' platforms due to the +usage of the portalocker module. I do not have access to any other platforms +for testing, patches are welcome. + +See the README file for an example usage of this module. + +""" + +from builtins import range + +__version__ = "$Id: cloghandler.py 6175 2009-11-02 18:40:35Z lowell $" +__author__ = "Lowell Alleman" +__all__ = [ + "ConcurrentRotatingFileHandler", +] + +import os +import sys +from random import randint +from logging import Handler +from logging.handlers import BaseRotatingHandler + +try: + import codecs +except ImportError: + codecs = None + +# Question/TODO: Should we have a fallback mode if we can't load portalocker / +# we should still be better off than with the standard RotattingFileHandler +# class, right? We do some rename checking... that should prevent some file +# clobbering that the builtin class allows. + +# sibling module than handles all the ugly platform-specific details of file locking +from .portalocker import lock, unlock, LOCK_EX, LOCK_NB, LockException + +# A client can set this to true to automatically convert relative paths to +# absolute paths (which will also hide the absolute path warnings) +FORCE_ABSOLUTE_PATH = False + + +class ConcurrentRotatingFileHandler(BaseRotatingHandler): + """ + Handler for logging to a set of files, which switches from one file to the + next when the current file reaches a certain size. Multiple processes can + write to the log file concurrently, but this may mean that the file will + exceed the given size. + """ + + def __init__(self, + filename, + mode='a', + maxBytes=0, + backupCount=0, + encoding=None, + debug=True, + supress_abs_warn=False): + """ + Open the specified file and use it as the stream for logging. + + By default, the file grows indefinitely. You can specify particular + values of maxBytes and backupCount to allow the file to rollover at + a predetermined size. + + Rollover occurs whenever the current log file is nearly maxBytes in + length. If backupCount is >= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + + On Windows, it is not possible to rename a file that is currently opened + by another process. This means that it is not possible to rotate the + log files if multiple processes is using the same log file. In this + case, the current log file will continue to grow until the rotation can + be completed successfully. In order for rotation to be possible, all of + the other processes need to close the file first. A mechanism, called + "degraded" mode, has been created for this scenario. In degraded mode, + the log file is closed after each log message is written. So once all + processes have entered degraded mode, the next rotate log attempt should + be successful and then normal logging can be resumed. + + This log handler assumes that all concurrent processes logging to a + single file will are using only this class, and that the exact same + parameters are provided to each instance of this class. If, for + example, two different processes are using this class, but with + different values for 'maxBytes' or 'backupCount', then odd behavior is + expected. The same is true if this class is used by one application, but + the RotatingFileHandler is used by another. + + NOTE: You should always provide 'filename' as an absolute path, since + this class will need to re-open the file during rotation. If your + application call os.chdir() then subsequent log files could be created + in the wrong directory. + """ + # The question of absolute paths: I'm not sure what the 'right thing' is + # to do here. RotatingFileHander simply ignores this possibility. I was + # going call os.path.abspath(), but that potentially limits uses. For + # example, on Linux (any posix system?) you can rename a directory of a + # running app, and the app wouldn't notice as long as it only opens new + # files using relative paths. But since that's not a "normal" thing to + # do, and having an app call os.chdir() is a much more likely scenario + # that should be supported. For the moment, we are just going to warn + # the user if they provide a relative path and do some other voodoo + # logic that you'll just have to review for yourself. + + # if the given filename contains no path, we make an absolute path + if not os.path.isabs(filename): + if FORCE_ABSOLUTE_PATH or \ + not os.path.split(filename)[0]: + filename = os.path.abspath(filename) + elif not supress_abs_warn: + from warnings import warn + warn( + "The given 'filename' should be an absolute path. If your " + "application calls os.chdir(), your logs may get messed up. " + "Use 'supress_abs_warn=True' to hide this message.") + try: + BaseRotatingHandler.__init__(self, filename, mode, encoding) + except TypeError: # Due to a different logging release without encoding support (Python 2.4.1 and earlier?) + BaseRotatingHandler.__init__(self, filename, mode) + self.encoding = encoding + + self._rotateFailed = False + self.maxBytes = maxBytes + self.backupCount = backupCount + # Prevent multiple extensions on the lock file (Only handles the normal "*.log" case.) + if filename.endswith(".log"): + lock_file = filename[:-4] + else: + lock_file = filename + self.stream_lock = open(lock_file + ".lock", "w") + + # For debug mode, swap out the "_degrade()" method with a more a verbose one. + if debug: + self._degrade = self._degrade_debug + + def _openFile(self, mode): + if self.encoding: + self.stream = codecs.open(self.baseFilename, mode, self.encoding) + else: + self.stream = open(self.baseFilename, mode) + + def acquire(self): + """ Acquire thread and file locks. Also re-opening log file when running + in 'degraded' mode. """ + # handle thread lock + Handler.acquire(self) + lock(self.stream_lock, LOCK_EX) + if self.stream.closed: + self._openFile(self.mode) + + def release(self): + """ Release file and thread locks. Flush stream and take care of closing + stream in 'degraded' mode. """ + try: + if not self.stream.closed: + self.stream.flush() + if self._rotateFailed: + self.stream.close() + except IOError: + if self._rotateFailed: + self.stream.close() + finally: + try: + unlock(self.stream_lock) + finally: + # release thread lock + Handler.release(self) + + def close(self): + """ + Closes the stream. + """ + if not self.stream.closed: + self.stream.flush() + self.stream.close() + Handler.close(self) + + def flush(self): + """ flush(): Do nothing. + + Since a flush is issued in release(), we don't do it here. To do a flush + here, it would be necessary to re-lock everything, and it is just easier + and cleaner to do it all in release(), rather than requiring two lock + ops per handle() call. + + Doing a flush() here would also introduces a window of opportunity for + another process to write to the log file in between calling + stream.write() and stream.flush(), which seems like a bad thing. """ + pass + + def _degrade(self, degrade, msg, *args): + """ Set degrade mode or not. Ignore msg. """ + self._rotateFailed = degrade + del msg, args # avoid pychecker warnings + + def _degrade_debug(self, degrade, msg, *args): + """ A more colorful version of _degade(). (This is enabled by passing + "debug=True" at initialization). + """ + if degrade: + if not self._rotateFailed: + sys.stderr.write("Degrade mode - ENTERING - (pid=%d) %s\n" % + (os.getpid(), msg % args)) + self._rotateFailed = True + else: + if self._rotateFailed: + sys.stderr.write("Degrade mode - EXITING - (pid=%d) %s\n" % + (os.getpid(), msg % args)) + self._rotateFailed = False + + def doRollover(self): + """ + Do a rollover, as described in __init__(). + """ + if self.backupCount <= 0: + # Don't keep any backups, just overwrite the existing backup file + # Locking doesn't much matter here; since we are overwriting it anyway + self.stream.close() + self._openFile("w") + return + self.stream.close() + try: + # Attempt to rename logfile to tempname: There is a slight race-condition here, but it seems unavoidable + tmpname = None + while not tmpname or os.path.exists(tmpname): + tmpname = "%s.rotate.%08d" % (self.baseFilename, + randint(0, 99999999)) + try: + # Do a rename test to determine if we can successfully rename the log file + os.rename(self.baseFilename, tmpname) + except (IOError, OSError): + exc_value = sys.exc_info()[1] + self._degrade(True, "rename failed. File in use? " + "exception=%s", exc_value) + return + + # Q: Is there some way to protect this code from a KeboardInterupt? + # This isn't necessarily a data loss issue, but it certainly would + # break the rotation process during my stress testing. + + # There is currently no mechanism in place to handle the situation + # where one of these log files cannot be renamed. (Example, user + # opens "logfile.3" in notepad) + for i in range(self.backupCount - 1, 0, -1): + sfn = "%s.%d" % (self.baseFilename, i) + dfn = "%s.%d" % (self.baseFilename, i + 1) + if os.path.exists(sfn): + # print "%s -> %s" % (sfn, dfn) + if os.path.exists(dfn): + os.remove(dfn) + os.rename(sfn, dfn) + dfn = self.baseFilename + ".1" + if os.path.exists(dfn): + os.remove(dfn) + os.rename(tmpname, dfn) + # print "%s -> %s" % (self.baseFilename, dfn) + self._degrade(False, "Rotation completed") + finally: + self._openFile(self.mode) + + def shouldRollover(self, record): + """ + Determine if rollover should occur. + + For those that are keeping track. This differs from the standard + library's RotatingLogHandler class. Because there is no promise to keep + the file size under maxBytes we ignore the length of the current record. + """ + del record # avoid pychecker warnings + if self._shouldRollover(): + # if some other process already did the rollover we might + # checked log.1, so we reopen the stream and check again on + # the right log file + self.stream.close() + self._openFile(self.mode) + return self._shouldRollover() + return False + + def _shouldRollover(self): + if self.maxBytes > 0: # are we rolling over? + try: + self.stream.seek( + 0, 2) # due to non-posix-compliant Windows feature + except IOError: + return True + if self.stream.tell() >= self.maxBytes: + return True + else: + self._degrade(False, + "Rotation done or not needed at this time") + return False + + +# Publish this class to the "logging.handlers" module so that it can be use +# from a logging config file via logging.config.fileConfig(). +import logging.handlers +logging.handlers.ConcurrentRotatingFileHandler = ConcurrentRotatingFileHandler diff --git a/nipype/external/d3.js b/nipype/external/d3.js new file mode 100644 index 0000000000..e1ddb0379e --- /dev/null +++ b/nipype/external/d3.js @@ -0,0 +1,9255 @@ +!function() { + var d3 = { + version: "3.4.8" + }; + if (!Date.now) Date.now = function() { + return +new Date(); + }; + var d3_arraySlice = [].slice, d3_array = function(list) { + return d3_arraySlice.call(list); + }; + var d3_document = document, d3_documentElement = d3_document.documentElement, d3_window = window; + try { + d3_array(d3_documentElement.childNodes)[0].nodeType; + } catch (e) { + d3_array = function(list) { + var i = list.length, array = new Array(i); + while (i--) array[i] = list[i]; + return array; + }; + } + try { + d3_document.createElement("div").style.setProperty("opacity", 0, ""); + } catch (error) { + var d3_element_prototype = d3_window.Element.prototype, d3_element_setAttribute = d3_element_prototype.setAttribute, d3_element_setAttributeNS = d3_element_prototype.setAttributeNS, d3_style_prototype = d3_window.CSSStyleDeclaration.prototype, d3_style_setProperty = d3_style_prototype.setProperty; + d3_element_prototype.setAttribute = function(name, value) { + d3_element_setAttribute.call(this, name, value + ""); + }; + d3_element_prototype.setAttributeNS = function(space, local, value) { + d3_element_setAttributeNS.call(this, space, local, value + ""); + }; + d3_style_prototype.setProperty = function(name, value, priority) { + d3_style_setProperty.call(this, name, value + "", priority); + }; + } + d3.ascending = d3_ascending; + function d3_ascending(a, b) { + return a < b ? -1 : a > b ? 1 : a >= b ? 0 : NaN; + } + d3.descending = function(a, b) { + return b < a ? -1 : b > a ? 1 : b >= a ? 0 : NaN; + }; + d3.min = function(array, f) { + var i = -1, n = array.length, a, b; + if (arguments.length === 1) { + while (++i < n && !((a = array[i]) != null && a <= a)) a = undefined; + while (++i < n) if ((b = array[i]) != null && a > b) a = b; + } else { + while (++i < n && !((a = f.call(array, array[i], i)) != null && a <= a)) a = undefined; + while (++i < n) if ((b = f.call(array, array[i], i)) != null && a > b) a = b; + } + return a; + }; + d3.max = function(array, f) { + var i = -1, n = array.length, a, b; + if (arguments.length === 1) { + while (++i < n && !((a = array[i]) != null && a <= a)) a = undefined; + while (++i < n) if ((b = array[i]) != null && b > a) a = b; + } else { + while (++i < n && !((a = f.call(array, array[i], i)) != null && a <= a)) a = undefined; + while (++i < n) if ((b = f.call(array, array[i], i)) != null && b > a) a = b; + } + return a; + }; + d3.extent = function(array, f) { + var i = -1, n = array.length, a, b, c; + if (arguments.length === 1) { + while (++i < n && !((a = c = array[i]) != null && a <= a)) a = c = undefined; + while (++i < n) if ((b = array[i]) != null) { + if (a > b) a = b; + if (c < b) c = b; + } + } else { + while (++i < n && !((a = c = f.call(array, array[i], i)) != null && a <= a)) a = undefined; + while (++i < n) if ((b = f.call(array, array[i], i)) != null) { + if (a > b) a = b; + if (c < b) c = b; + } + } + return [ a, c ]; + }; + d3.sum = function(array, f) { + var s = 0, n = array.length, a, i = -1; + if (arguments.length === 1) { + while (++i < n) if (!isNaN(a = +array[i])) s += a; + } else { + while (++i < n) if (!isNaN(a = +f.call(array, array[i], i))) s += a; + } + return s; + }; + function d3_number(x) { + return x != null && !isNaN(x); + } + d3.mean = function(array, f) { + var s = 0, n = array.length, a, i = -1, j = n; + if (arguments.length === 1) { + while (++i < n) if (d3_number(a = array[i])) s += a; else --j; + } else { + while (++i < n) if (d3_number(a = f.call(array, array[i], i))) s += a; else --j; + } + return j ? s / j : undefined; + }; + d3.quantile = function(values, p) { + var H = (values.length - 1) * p + 1, h = Math.floor(H), v = +values[h - 1], e = H - h; + return e ? v + e * (values[h] - v) : v; + }; + d3.median = function(array, f) { + if (arguments.length > 1) array = array.map(f); + array = array.filter(d3_number); + return array.length ? d3.quantile(array.sort(d3_ascending), .5) : undefined; + }; + function d3_bisector(compare) { + return { + left: function(a, x, lo, hi) { + if (arguments.length < 3) lo = 0; + if (arguments.length < 4) hi = a.length; + while (lo < hi) { + var mid = lo + hi >>> 1; + if (compare(a[mid], x) < 0) lo = mid + 1; else hi = mid; + } + return lo; + }, + right: function(a, x, lo, hi) { + if (arguments.length < 3) lo = 0; + if (arguments.length < 4) hi = a.length; + while (lo < hi) { + var mid = lo + hi >>> 1; + if (compare(a[mid], x) > 0) hi = mid; else lo = mid + 1; + } + return lo; + } + }; + } + var d3_bisect = d3_bisector(d3_ascending); + d3.bisectLeft = d3_bisect.left; + d3.bisect = d3.bisectRight = d3_bisect.right; + d3.bisector = function(f) { + return d3_bisector(f.length === 1 ? function(d, x) { + return d3_ascending(f(d), x); + } : f); + }; + d3.shuffle = function(array) { + var m = array.length, t, i; + while (m) { + i = Math.random() * m-- | 0; + t = array[m], array[m] = array[i], array[i] = t; + } + return array; + }; + d3.permute = function(array, indexes) { + var i = indexes.length, permutes = new Array(i); + while (i--) permutes[i] = array[indexes[i]]; + return permutes; + }; + d3.pairs = function(array) { + var i = 0, n = array.length - 1, p0, p1 = array[0], pairs = new Array(n < 0 ? 0 : n); + while (i < n) pairs[i] = [ p0 = p1, p1 = array[++i] ]; + return pairs; + }; + d3.zip = function() { + if (!(n = arguments.length)) return []; + for (var i = -1, m = d3.min(arguments, d3_zipLength), zips = new Array(m); ++i < m; ) { + for (var j = -1, n, zip = zips[i] = new Array(n); ++j < n; ) { + zip[j] = arguments[j][i]; + } + } + return zips; + }; + function d3_zipLength(d) { + return d.length; + } + d3.transpose = function(matrix) { + return d3.zip.apply(d3, matrix); + }; + d3.keys = function(map) { + var keys = []; + for (var key in map) keys.push(key); + return keys; + }; + d3.values = function(map) { + var values = []; + for (var key in map) values.push(map[key]); + return values; + }; + d3.entries = function(map) { + var entries = []; + for (var key in map) entries.push({ + key: key, + value: map[key] + }); + return entries; + }; + d3.merge = function(arrays) { + var n = arrays.length, m, i = -1, j = 0, merged, array; + while (++i < n) j += arrays[i].length; + merged = new Array(j); + while (--n >= 0) { + array = arrays[n]; + m = array.length; + while (--m >= 0) { + merged[--j] = array[m]; + } + } + return merged; + }; + var abs = Math.abs; + d3.range = function(start, stop, step) { + if (arguments.length < 3) { + step = 1; + if (arguments.length < 2) { + stop = start; + start = 0; + } + } + if ((stop - start) / step === Infinity) throw new Error("infinite range"); + var range = [], k = d3_range_integerScale(abs(step)), i = -1, j; + start *= k, stop *= k, step *= k; + if (step < 0) while ((j = start + step * ++i) > stop) range.push(j / k); else while ((j = start + step * ++i) < stop) range.push(j / k); + return range; + }; + function d3_range_integerScale(x) { + var k = 1; + while (x * k % 1) k *= 10; + return k; + } + function d3_class(ctor, properties) { + try { + for (var key in properties) { + Object.defineProperty(ctor.prototype, key, { + value: properties[key], + enumerable: false + }); + } + } catch (e) { + ctor.prototype = properties; + } + } + d3.map = function(object) { + var map = new d3_Map(); + if (object instanceof d3_Map) object.forEach(function(key, value) { + map.set(key, value); + }); else for (var key in object) map.set(key, object[key]); + return map; + }; + function d3_Map() {} + d3_class(d3_Map, { + has: d3_map_has, + get: function(key) { + return this[d3_map_prefix + key]; + }, + set: function(key, value) { + return this[d3_map_prefix + key] = value; + }, + remove: d3_map_remove, + keys: d3_map_keys, + values: function() { + var values = []; + this.forEach(function(key, value) { + values.push(value); + }); + return values; + }, + entries: function() { + var entries = []; + this.forEach(function(key, value) { + entries.push({ + key: key, + value: value + }); + }); + return entries; + }, + size: d3_map_size, + empty: d3_map_empty, + forEach: function(f) { + for (var key in this) if (key.charCodeAt(0) === d3_map_prefixCode) f.call(this, key.substring(1), this[key]); + } + }); + var d3_map_prefix = "\x00", d3_map_prefixCode = d3_map_prefix.charCodeAt(0); + function d3_map_has(key) { + return d3_map_prefix + key in this; + } + function d3_map_remove(key) { + key = d3_map_prefix + key; + return key in this && delete this[key]; + } + function d3_map_keys() { + var keys = []; + this.forEach(function(key) { + keys.push(key); + }); + return keys; + } + function d3_map_size() { + var size = 0; + for (var key in this) if (key.charCodeAt(0) === d3_map_prefixCode) ++size; + return size; + } + function d3_map_empty() { + for (var key in this) if (key.charCodeAt(0) === d3_map_prefixCode) return false; + return true; + } + d3.nest = function() { + var nest = {}, keys = [], sortKeys = [], sortValues, rollup; + function map(mapType, array, depth) { + if (depth >= keys.length) return rollup ? rollup.call(nest, array) : sortValues ? array.sort(sortValues) : array; + var i = -1, n = array.length, key = keys[depth++], keyValue, object, setter, valuesByKey = new d3_Map(), values; + while (++i < n) { + if (values = valuesByKey.get(keyValue = key(object = array[i]))) { + values.push(object); + } else { + valuesByKey.set(keyValue, [ object ]); + } + } + if (mapType) { + object = mapType(); + setter = function(keyValue, values) { + object.set(keyValue, map(mapType, values, depth)); + }; + } else { + object = {}; + setter = function(keyValue, values) { + object[keyValue] = map(mapType, values, depth); + }; + } + valuesByKey.forEach(setter); + return object; + } + function entries(map, depth) { + if (depth >= keys.length) return map; + var array = [], sortKey = sortKeys[depth++]; + map.forEach(function(key, keyMap) { + array.push({ + key: key, + values: entries(keyMap, depth) + }); + }); + return sortKey ? array.sort(function(a, b) { + return sortKey(a.key, b.key); + }) : array; + } + nest.map = function(array, mapType) { + return map(mapType, array, 0); + }; + nest.entries = function(array) { + return entries(map(d3.map, array, 0), 0); + }; + nest.key = function(d) { + keys.push(d); + return nest; + }; + nest.sortKeys = function(order) { + sortKeys[keys.length - 1] = order; + return nest; + }; + nest.sortValues = function(order) { + sortValues = order; + return nest; + }; + nest.rollup = function(f) { + rollup = f; + return nest; + }; + return nest; + }; + d3.set = function(array) { + var set = new d3_Set(); + if (array) for (var i = 0, n = array.length; i < n; ++i) set.add(array[i]); + return set; + }; + function d3_Set() {} + d3_class(d3_Set, { + has: d3_map_has, + add: function(value) { + this[d3_map_prefix + value] = true; + return value; + }, + remove: function(value) { + value = d3_map_prefix + value; + return value in this && delete this[value]; + }, + values: d3_map_keys, + size: d3_map_size, + empty: d3_map_empty, + forEach: function(f) { + for (var value in this) if (value.charCodeAt(0) === d3_map_prefixCode) f.call(this, value.substring(1)); + } + }); + d3.behavior = {}; + d3.rebind = function(target, source) { + var i = 1, n = arguments.length, method; + while (++i < n) target[method = arguments[i]] = d3_rebind(target, source, source[method]); + return target; + }; + function d3_rebind(target, source, method) { + return function() { + var value = method.apply(source, arguments); + return value === source ? target : value; + }; + } + function d3_vendorSymbol(object, name) { + if (name in object) return name; + name = name.charAt(0).toUpperCase() + name.substring(1); + for (var i = 0, n = d3_vendorPrefixes.length; i < n; ++i) { + var prefixName = d3_vendorPrefixes[i] + name; + if (prefixName in object) return prefixName; + } + } + var d3_vendorPrefixes = [ "webkit", "ms", "moz", "Moz", "o", "O" ]; + function d3_noop() {} + d3.dispatch = function() { + var dispatch = new d3_dispatch(), i = -1, n = arguments.length; + while (++i < n) dispatch[arguments[i]] = d3_dispatch_event(dispatch); + return dispatch; + }; + function d3_dispatch() {} + d3_dispatch.prototype.on = function(type, listener) { + var i = type.indexOf("."), name = ""; + if (i >= 0) { + name = type.substring(i + 1); + type = type.substring(0, i); + } + if (type) return arguments.length < 2 ? this[type].on(name) : this[type].on(name, listener); + if (arguments.length === 2) { + if (listener == null) for (type in this) { + if (this.hasOwnProperty(type)) this[type].on(name, null); + } + return this; + } + }; + function d3_dispatch_event(dispatch) { + var listeners = [], listenerByName = new d3_Map(); + function event() { + var z = listeners, i = -1, n = z.length, l; + while (++i < n) if (l = z[i].on) l.apply(this, arguments); + return dispatch; + } + event.on = function(name, listener) { + var l = listenerByName.get(name), i; + if (arguments.length < 2) return l && l.on; + if (l) { + l.on = null; + listeners = listeners.slice(0, i = listeners.indexOf(l)).concat(listeners.slice(i + 1)); + listenerByName.remove(name); + } + if (listener) listeners.push(listenerByName.set(name, { + on: listener + })); + return dispatch; + }; + return event; + } + d3.event = null; + function d3_eventPreventDefault() { + d3.event.preventDefault(); + } + function d3_eventSource() { + var e = d3.event, s; + while (s = e.sourceEvent) e = s; + return e; + } + function d3_eventDispatch(target) { + var dispatch = new d3_dispatch(), i = 0, n = arguments.length; + while (++i < n) dispatch[arguments[i]] = d3_dispatch_event(dispatch); + dispatch.of = function(thiz, argumentz) { + return function(e1) { + try { + var e0 = e1.sourceEvent = d3.event; + e1.target = target; + d3.event = e1; + dispatch[e1.type].apply(thiz, argumentz); + } finally { + d3.event = e0; + } + }; + }; + return dispatch; + } + d3.requote = function(s) { + return s.replace(d3_requote_re, "\\$&"); + }; + var d3_requote_re = /[\\\^\$\*\+\?\|\[\]\(\)\.\{\}]/g; + var d3_subclass = {}.__proto__ ? function(object, prototype) { + object.__proto__ = prototype; + } : function(object, prototype) { + for (var property in prototype) object[property] = prototype[property]; + }; + function d3_selection(groups) { + d3_subclass(groups, d3_selectionPrototype); + return groups; + } + var d3_select = function(s, n) { + return n.querySelector(s); + }, d3_selectAll = function(s, n) { + return n.querySelectorAll(s); + }, d3_selectMatcher = d3_documentElement[d3_vendorSymbol(d3_documentElement, "matchesSelector")], d3_selectMatches = function(n, s) { + return d3_selectMatcher.call(n, s); + }; + if (typeof Sizzle === "function") { + d3_select = function(s, n) { + return Sizzle(s, n)[0] || null; + }; + d3_selectAll = Sizzle; + d3_selectMatches = Sizzle.matchesSelector; + } + d3.selection = function() { + return d3_selectionRoot; + }; + var d3_selectionPrototype = d3.selection.prototype = []; + d3_selectionPrototype.select = function(selector) { + var subgroups = [], subgroup, subnode, group, node; + selector = d3_selection_selector(selector); + for (var j = -1, m = this.length; ++j < m; ) { + subgroups.push(subgroup = []); + subgroup.parentNode = (group = this[j]).parentNode; + for (var i = -1, n = group.length; ++i < n; ) { + if (node = group[i]) { + subgroup.push(subnode = selector.call(node, node.__data__, i, j)); + if (subnode && "__data__" in node) subnode.__data__ = node.__data__; + } else { + subgroup.push(null); + } + } + } + return d3_selection(subgroups); + }; + function d3_selection_selector(selector) { + return typeof selector === "function" ? selector : function() { + return d3_select(selector, this); + }; + } + d3_selectionPrototype.selectAll = function(selector) { + var subgroups = [], subgroup, node; + selector = d3_selection_selectorAll(selector); + for (var j = -1, m = this.length; ++j < m; ) { + for (var group = this[j], i = -1, n = group.length; ++i < n; ) { + if (node = group[i]) { + subgroups.push(subgroup = d3_array(selector.call(node, node.__data__, i, j))); + subgroup.parentNode = node; + } + } + } + return d3_selection(subgroups); + }; + function d3_selection_selectorAll(selector) { + return typeof selector === "function" ? selector : function() { + return d3_selectAll(selector, this); + }; + } + var d3_nsPrefix = { + svg: "http://www.w3.org/2000/svg", + xhtml: "http://www.w3.org/1999/xhtml", + xlink: "http://www.w3.org/1999/xlink", + xml: "http://www.w3.org/XML/1998/namespace", + xmlns: "http://www.w3.org/2000/xmlns/" + }; + d3.ns = { + prefix: d3_nsPrefix, + qualify: function(name) { + var i = name.indexOf(":"), prefix = name; + if (i >= 0) { + prefix = name.substring(0, i); + name = name.substring(i + 1); + } + return d3_nsPrefix.hasOwnProperty(prefix) ? { + space: d3_nsPrefix[prefix], + local: name + } : name; + } + }; + d3_selectionPrototype.attr = function(name, value) { + if (arguments.length < 2) { + if (typeof name === "string") { + var node = this.node(); + name = d3.ns.qualify(name); + return name.local ? node.getAttributeNS(name.space, name.local) : node.getAttribute(name); + } + for (value in name) this.each(d3_selection_attr(value, name[value])); + return this; + } + return this.each(d3_selection_attr(name, value)); + }; + function d3_selection_attr(name, value) { + name = d3.ns.qualify(name); + function attrNull() { + this.removeAttribute(name); + } + function attrNullNS() { + this.removeAttributeNS(name.space, name.local); + } + function attrConstant() { + this.setAttribute(name, value); + } + function attrConstantNS() { + this.setAttributeNS(name.space, name.local, value); + } + function attrFunction() { + var x = value.apply(this, arguments); + if (x == null) this.removeAttribute(name); else this.setAttribute(name, x); + } + function attrFunctionNS() { + var x = value.apply(this, arguments); + if (x == null) this.removeAttributeNS(name.space, name.local); else this.setAttributeNS(name.space, name.local, x); + } + return value == null ? name.local ? attrNullNS : attrNull : typeof value === "function" ? name.local ? attrFunctionNS : attrFunction : name.local ? attrConstantNS : attrConstant; + } + function d3_collapse(s) { + return s.trim().replace(/\s+/g, " "); + } + d3_selectionPrototype.classed = function(name, value) { + if (arguments.length < 2) { + if (typeof name === "string") { + var node = this.node(), n = (name = d3_selection_classes(name)).length, i = -1; + if (value = node.classList) { + while (++i < n) if (!value.contains(name[i])) return false; + } else { + value = node.getAttribute("class"); + while (++i < n) if (!d3_selection_classedRe(name[i]).test(value)) return false; + } + return true; + } + for (value in name) this.each(d3_selection_classed(value, name[value])); + return this; + } + return this.each(d3_selection_classed(name, value)); + }; + function d3_selection_classedRe(name) { + return new RegExp("(?:^|\\s+)" + d3.requote(name) + "(?:\\s+|$)", "g"); + } + function d3_selection_classes(name) { + return name.trim().split(/^|\s+/); + } + function d3_selection_classed(name, value) { + name = d3_selection_classes(name).map(d3_selection_classedName); + var n = name.length; + function classedConstant() { + var i = -1; + while (++i < n) name[i](this, value); + } + function classedFunction() { + var i = -1, x = value.apply(this, arguments); + while (++i < n) name[i](this, x); + } + return typeof value === "function" ? classedFunction : classedConstant; + } + function d3_selection_classedName(name) { + var re = d3_selection_classedRe(name); + return function(node, value) { + if (c = node.classList) return value ? c.add(name) : c.remove(name); + var c = node.getAttribute("class") || ""; + if (value) { + re.lastIndex = 0; + if (!re.test(c)) node.setAttribute("class", d3_collapse(c + " " + name)); + } else { + node.setAttribute("class", d3_collapse(c.replace(re, " "))); + } + }; + } + d3_selectionPrototype.style = function(name, value, priority) { + var n = arguments.length; + if (n < 3) { + if (typeof name !== "string") { + if (n < 2) value = ""; + for (priority in name) this.each(d3_selection_style(priority, name[priority], value)); + return this; + } + if (n < 2) return d3_window.getComputedStyle(this.node(), null).getPropertyValue(name); + priority = ""; + } + return this.each(d3_selection_style(name, value, priority)); + }; + function d3_selection_style(name, value, priority) { + function styleNull() { + this.style.removeProperty(name); + } + function styleConstant() { + this.style.setProperty(name, value, priority); + } + function styleFunction() { + var x = value.apply(this, arguments); + if (x == null) this.style.removeProperty(name); else this.style.setProperty(name, x, priority); + } + return value == null ? styleNull : typeof value === "function" ? styleFunction : styleConstant; + } + d3_selectionPrototype.property = function(name, value) { + if (arguments.length < 2) { + if (typeof name === "string") return this.node()[name]; + for (value in name) this.each(d3_selection_property(value, name[value])); + return this; + } + return this.each(d3_selection_property(name, value)); + }; + function d3_selection_property(name, value) { + function propertyNull() { + delete this[name]; + } + function propertyConstant() { + this[name] = value; + } + function propertyFunction() { + var x = value.apply(this, arguments); + if (x == null) delete this[name]; else this[name] = x; + } + return value == null ? propertyNull : typeof value === "function" ? propertyFunction : propertyConstant; + } + d3_selectionPrototype.text = function(value) { + return arguments.length ? this.each(typeof value === "function" ? function() { + var v = value.apply(this, arguments); + this.textContent = v == null ? "" : v; + } : value == null ? function() { + this.textContent = ""; + } : function() { + this.textContent = value; + }) : this.node().textContent; + }; + d3_selectionPrototype.html = function(value) { + return arguments.length ? this.each(typeof value === "function" ? function() { + var v = value.apply(this, arguments); + this.innerHTML = v == null ? "" : v; + } : value == null ? function() { + this.innerHTML = ""; + } : function() { + this.innerHTML = value; + }) : this.node().innerHTML; + }; + d3_selectionPrototype.append = function(name) { + name = d3_selection_creator(name); + return this.select(function() { + return this.appendChild(name.apply(this, arguments)); + }); + }; + function d3_selection_creator(name) { + return typeof name === "function" ? name : (name = d3.ns.qualify(name)).local ? function() { + return this.ownerDocument.createElementNS(name.space, name.local); + } : function() { + return this.ownerDocument.createElementNS(this.namespaceURI, name); + }; + } + d3_selectionPrototype.insert = function(name, before) { + name = d3_selection_creator(name); + before = d3_selection_selector(before); + return this.select(function() { + return this.insertBefore(name.apply(this, arguments), before.apply(this, arguments) || null); + }); + }; + d3_selectionPrototype.remove = function() { + return this.each(function() { + var parent = this.parentNode; + if (parent) parent.removeChild(this); + }); + }; + d3_selectionPrototype.data = function(value, key) { + var i = -1, n = this.length, group, node; + if (!arguments.length) { + value = new Array(n = (group = this[0]).length); + while (++i < n) { + if (node = group[i]) { + value[i] = node.__data__; + } + } + return value; + } + function bind(group, groupData) { + var i, n = group.length, m = groupData.length, n0 = Math.min(n, m), updateNodes = new Array(m), enterNodes = new Array(m), exitNodes = new Array(n), node, nodeData; + if (key) { + var nodeByKeyValue = new d3_Map(), dataByKeyValue = new d3_Map(), keyValues = [], keyValue; + for (i = -1; ++i < n; ) { + keyValue = key.call(node = group[i], node.__data__, i); + if (nodeByKeyValue.has(keyValue)) { + exitNodes[i] = node; + } else { + nodeByKeyValue.set(keyValue, node); + } + keyValues.push(keyValue); + } + for (i = -1; ++i < m; ) { + keyValue = key.call(groupData, nodeData = groupData[i], i); + if (node = nodeByKeyValue.get(keyValue)) { + updateNodes[i] = node; + node.__data__ = nodeData; + } else if (!dataByKeyValue.has(keyValue)) { + enterNodes[i] = d3_selection_dataNode(nodeData); + } + dataByKeyValue.set(keyValue, nodeData); + nodeByKeyValue.remove(keyValue); + } + for (i = -1; ++i < n; ) { + if (nodeByKeyValue.has(keyValues[i])) { + exitNodes[i] = group[i]; + } + } + } else { + for (i = -1; ++i < n0; ) { + node = group[i]; + nodeData = groupData[i]; + if (node) { + node.__data__ = nodeData; + updateNodes[i] = node; + } else { + enterNodes[i] = d3_selection_dataNode(nodeData); + } + } + for (;i < m; ++i) { + enterNodes[i] = d3_selection_dataNode(groupData[i]); + } + for (;i < n; ++i) { + exitNodes[i] = group[i]; + } + } + enterNodes.update = updateNodes; + enterNodes.parentNode = updateNodes.parentNode = exitNodes.parentNode = group.parentNode; + enter.push(enterNodes); + update.push(updateNodes); + exit.push(exitNodes); + } + var enter = d3_selection_enter([]), update = d3_selection([]), exit = d3_selection([]); + if (typeof value === "function") { + while (++i < n) { + bind(group = this[i], value.call(group, group.parentNode.__data__, i)); + } + } else { + while (++i < n) { + bind(group = this[i], value); + } + } + update.enter = function() { + return enter; + }; + update.exit = function() { + return exit; + }; + return update; + }; + function d3_selection_dataNode(data) { + return { + __data__: data + }; + } + d3_selectionPrototype.datum = function(value) { + return arguments.length ? this.property("__data__", value) : this.property("__data__"); + }; + d3_selectionPrototype.filter = function(filter) { + var subgroups = [], subgroup, group, node; + if (typeof filter !== "function") filter = d3_selection_filter(filter); + for (var j = 0, m = this.length; j < m; j++) { + subgroups.push(subgroup = []); + subgroup.parentNode = (group = this[j]).parentNode; + for (var i = 0, n = group.length; i < n; i++) { + if ((node = group[i]) && filter.call(node, node.__data__, i, j)) { + subgroup.push(node); + } + } + } + return d3_selection(subgroups); + }; + function d3_selection_filter(selector) { + return function() { + return d3_selectMatches(this, selector); + }; + } + d3_selectionPrototype.order = function() { + for (var j = -1, m = this.length; ++j < m; ) { + for (var group = this[j], i = group.length - 1, next = group[i], node; --i >= 0; ) { + if (node = group[i]) { + if (next && next !== node.nextSibling) next.parentNode.insertBefore(node, next); + next = node; + } + } + } + return this; + }; + d3_selectionPrototype.sort = function(comparator) { + comparator = d3_selection_sortComparator.apply(this, arguments); + for (var j = -1, m = this.length; ++j < m; ) this[j].sort(comparator); + return this.order(); + }; + function d3_selection_sortComparator(comparator) { + if (!arguments.length) comparator = d3_ascending; + return function(a, b) { + return a && b ? comparator(a.__data__, b.__data__) : !a - !b; + }; + } + d3_selectionPrototype.each = function(callback) { + return d3_selection_each(this, function(node, i, j) { + callback.call(node, node.__data__, i, j); + }); + }; + function d3_selection_each(groups, callback) { + for (var j = 0, m = groups.length; j < m; j++) { + for (var group = groups[j], i = 0, n = group.length, node; i < n; i++) { + if (node = group[i]) callback(node, i, j); + } + } + return groups; + } + d3_selectionPrototype.call = function(callback) { + var args = d3_array(arguments); + callback.apply(args[0] = this, args); + return this; + }; + d3_selectionPrototype.empty = function() { + return !this.node(); + }; + d3_selectionPrototype.node = function() { + for (var j = 0, m = this.length; j < m; j++) { + for (var group = this[j], i = 0, n = group.length; i < n; i++) { + var node = group[i]; + if (node) return node; + } + } + return null; + }; + d3_selectionPrototype.size = function() { + var n = 0; + this.each(function() { + ++n; + }); + return n; + }; + function d3_selection_enter(selection) { + d3_subclass(selection, d3_selection_enterPrototype); + return selection; + } + var d3_selection_enterPrototype = []; + d3.selection.enter = d3_selection_enter; + d3.selection.enter.prototype = d3_selection_enterPrototype; + d3_selection_enterPrototype.append = d3_selectionPrototype.append; + d3_selection_enterPrototype.empty = d3_selectionPrototype.empty; + d3_selection_enterPrototype.node = d3_selectionPrototype.node; + d3_selection_enterPrototype.call = d3_selectionPrototype.call; + d3_selection_enterPrototype.size = d3_selectionPrototype.size; + d3_selection_enterPrototype.select = function(selector) { + var subgroups = [], subgroup, subnode, upgroup, group, node; + for (var j = -1, m = this.length; ++j < m; ) { + upgroup = (group = this[j]).update; + subgroups.push(subgroup = []); + subgroup.parentNode = group.parentNode; + for (var i = -1, n = group.length; ++i < n; ) { + if (node = group[i]) { + subgroup.push(upgroup[i] = subnode = selector.call(group.parentNode, node.__data__, i, j)); + subnode.__data__ = node.__data__; + } else { + subgroup.push(null); + } + } + } + return d3_selection(subgroups); + }; + d3_selection_enterPrototype.insert = function(name, before) { + if (arguments.length < 2) before = d3_selection_enterInsertBefore(this); + return d3_selectionPrototype.insert.call(this, name, before); + }; + function d3_selection_enterInsertBefore(enter) { + var i0, j0; + return function(d, i, j) { + var group = enter[j].update, n = group.length, node; + if (j != j0) j0 = j, i0 = 0; + if (i >= i0) i0 = i + 1; + while (!(node = group[i0]) && ++i0 < n) ; + return node; + }; + } + d3_selectionPrototype.transition = function() { + var id = d3_transitionInheritId || ++d3_transitionId, subgroups = [], subgroup, node, transition = d3_transitionInherit || { + time: Date.now(), + ease: d3_ease_cubicInOut, + delay: 0, + duration: 250 + }; + for (var j = -1, m = this.length; ++j < m; ) { + subgroups.push(subgroup = []); + for (var group = this[j], i = -1, n = group.length; ++i < n; ) { + if (node = group[i]) d3_transitionNode(node, i, id, transition); + subgroup.push(node); + } + } + return d3_transition(subgroups, id); + }; + d3_selectionPrototype.interrupt = function() { + return this.each(d3_selection_interrupt); + }; + function d3_selection_interrupt() { + var lock = this.__transition__; + if (lock) ++lock.active; + } + d3.select = function(node) { + var group = [ typeof node === "string" ? d3_select(node, d3_document) : node ]; + group.parentNode = d3_documentElement; + return d3_selection([ group ]); + }; + d3.selectAll = function(nodes) { + var group = d3_array(typeof nodes === "string" ? d3_selectAll(nodes, d3_document) : nodes); + group.parentNode = d3_documentElement; + return d3_selection([ group ]); + }; + var d3_selectionRoot = d3.select(d3_documentElement); + d3_selectionPrototype.on = function(type, listener, capture) { + var n = arguments.length; + if (n < 3) { + if (typeof type !== "string") { + if (n < 2) listener = false; + for (capture in type) this.each(d3_selection_on(capture, type[capture], listener)); + return this; + } + if (n < 2) return (n = this.node()["__on" + type]) && n._; + capture = false; + } + return this.each(d3_selection_on(type, listener, capture)); + }; + function d3_selection_on(type, listener, capture) { + var name = "__on" + type, i = type.indexOf("."), wrap = d3_selection_onListener; + if (i > 0) type = type.substring(0, i); + var filter = d3_selection_onFilters.get(type); + if (filter) type = filter, wrap = d3_selection_onFilter; + function onRemove() { + var l = this[name]; + if (l) { + this.removeEventListener(type, l, l.$); + delete this[name]; + } + } + function onAdd() { + var l = wrap(listener, d3_array(arguments)); + onRemove.call(this); + this.addEventListener(type, this[name] = l, l.$ = capture); + l._ = listener; + } + function removeAll() { + var re = new RegExp("^__on([^.]+)" + d3.requote(type) + "$"), match; + for (var name in this) { + if (match = name.match(re)) { + var l = this[name]; + this.removeEventListener(match[1], l, l.$); + delete this[name]; + } + } + } + return i ? listener ? onAdd : onRemove : listener ? d3_noop : removeAll; + } + var d3_selection_onFilters = d3.map({ + mouseenter: "mouseover", + mouseleave: "mouseout" + }); + d3_selection_onFilters.forEach(function(k) { + if ("on" + k in d3_document) d3_selection_onFilters.remove(k); + }); + function d3_selection_onListener(listener, argumentz) { + return function(e) { + var o = d3.event; + d3.event = e; + argumentz[0] = this.__data__; + try { + listener.apply(this, argumentz); + } finally { + d3.event = o; + } + }; + } + function d3_selection_onFilter(listener, argumentz) { + var l = d3_selection_onListener(listener, argumentz); + return function(e) { + var target = this, related = e.relatedTarget; + if (!related || related !== target && !(related.compareDocumentPosition(target) & 8)) { + l.call(target, e); + } + }; + } + var d3_event_dragSelect = "onselectstart" in d3_document ? null : d3_vendorSymbol(d3_documentElement.style, "userSelect"), d3_event_dragId = 0; + function d3_event_dragSuppress() { + var name = ".dragsuppress-" + ++d3_event_dragId, click = "click" + name, w = d3.select(d3_window).on("touchmove" + name, d3_eventPreventDefault).on("dragstart" + name, d3_eventPreventDefault).on("selectstart" + name, d3_eventPreventDefault); + if (d3_event_dragSelect) { + var style = d3_documentElement.style, select = style[d3_event_dragSelect]; + style[d3_event_dragSelect] = "none"; + } + return function(suppressClick) { + w.on(name, null); + if (d3_event_dragSelect) style[d3_event_dragSelect] = select; + if (suppressClick) { + function off() { + w.on(click, null); + } + w.on(click, function() { + d3_eventPreventDefault(); + off(); + }, true); + setTimeout(off, 0); + } + }; + } + d3.mouse = function(container) { + return d3_mousePoint(container, d3_eventSource()); + }; + function d3_mousePoint(container, e) { + if (e.changedTouches) e = e.changedTouches[0]; + var svg = container.ownerSVGElement || container; + if (svg.createSVGPoint) { + var point = svg.createSVGPoint(); + point.x = e.clientX, point.y = e.clientY; + point = point.matrixTransform(container.getScreenCTM().inverse()); + return [ point.x, point.y ]; + } + var rect = container.getBoundingClientRect(); + return [ e.clientX - rect.left - container.clientLeft, e.clientY - rect.top - container.clientTop ]; + } + d3.touches = function(container, touches) { + if (arguments.length < 2) touches = d3_eventSource().touches; + return touches ? d3_array(touches).map(function(touch) { + var point = d3_mousePoint(container, touch); + point.identifier = touch.identifier; + return point; + }) : []; + }; + d3.behavior.drag = function() { + var event = d3_eventDispatch(drag, "drag", "dragstart", "dragend"), origin = null, mousedown = dragstart(d3_noop, d3.mouse, d3_behavior_dragMouseSubject, "mousemove", "mouseup"), touchstart = dragstart(d3_behavior_dragTouchId, d3.touch, d3_behavior_dragTouchSubject, "touchmove", "touchend"); + function drag() { + this.on("mousedown.drag", mousedown).on("touchstart.drag", touchstart); + } + function dragstart(id, position, subject, move, end) { + return function() { + var that = this, target = d3.event.target, parent = that.parentNode, dispatch = event.of(that, arguments), dragged = 0, dragId = id(), dragName = ".drag" + (dragId == null ? "" : "-" + dragId), dragOffset, dragSubject = d3.select(subject()).on(move + dragName, moved).on(end + dragName, ended), dragRestore = d3_event_dragSuppress(), position0 = position(parent, dragId); + if (origin) { + dragOffset = origin.apply(that, arguments); + dragOffset = [ dragOffset.x - position0[0], dragOffset.y - position0[1] ]; + } else { + dragOffset = [ 0, 0 ]; + } + dispatch({ + type: "dragstart" + }); + function moved() { + var position1 = position(parent, dragId), dx, dy; + if (!position1) return; + dx = position1[0] - position0[0]; + dy = position1[1] - position0[1]; + dragged |= dx | dy; + position0 = position1; + dispatch({ + type: "drag", + x: position1[0] + dragOffset[0], + y: position1[1] + dragOffset[1], + dx: dx, + dy: dy + }); + } + function ended() { + if (!position(parent, dragId)) return; + dragSubject.on(move + dragName, null).on(end + dragName, null); + dragRestore(dragged && d3.event.target === target); + dispatch({ + type: "dragend" + }); + } + }; + } + drag.origin = function(x) { + if (!arguments.length) return origin; + origin = x; + return drag; + }; + return d3.rebind(drag, event, "on"); + }; + function d3_behavior_dragTouchId() { + return d3.event.changedTouches[0].identifier; + } + function d3_behavior_dragTouchSubject() { + return d3.event.target; + } + function d3_behavior_dragMouseSubject() { + return d3_window; + } + var π = Math.PI, τ = 2 * π, halfπ = π / 2, ε = 1e-6, ε2 = ε * ε, d3_radians = π / 180, d3_degrees = 180 / π; + function d3_sgn(x) { + return x > 0 ? 1 : x < 0 ? -1 : 0; + } + function d3_cross2d(a, b, c) { + return (b[0] - a[0]) * (c[1] - a[1]) - (b[1] - a[1]) * (c[0] - a[0]); + } + function d3_acos(x) { + return x > 1 ? 0 : x < -1 ? π : Math.acos(x); + } + function d3_asin(x) { + return x > 1 ? halfπ : x < -1 ? -halfπ : Math.asin(x); + } + function d3_sinh(x) { + return ((x = Math.exp(x)) - 1 / x) / 2; + } + function d3_cosh(x) { + return ((x = Math.exp(x)) + 1 / x) / 2; + } + function d3_tanh(x) { + return ((x = Math.exp(2 * x)) - 1) / (x + 1); + } + function d3_haversin(x) { + return (x = Math.sin(x / 2)) * x; + } + var ρ = Math.SQRT2, ρ2 = 2, ρ4 = 4; + d3.interpolateZoom = function(p0, p1) { + var ux0 = p0[0], uy0 = p0[1], w0 = p0[2], ux1 = p1[0], uy1 = p1[1], w1 = p1[2]; + var dx = ux1 - ux0, dy = uy1 - uy0, d2 = dx * dx + dy * dy, d1 = Math.sqrt(d2), b0 = (w1 * w1 - w0 * w0 + ρ4 * d2) / (2 * w0 * ρ2 * d1), b1 = (w1 * w1 - w0 * w0 - ρ4 * d2) / (2 * w1 * ρ2 * d1), r0 = Math.log(Math.sqrt(b0 * b0 + 1) - b0), r1 = Math.log(Math.sqrt(b1 * b1 + 1) - b1), dr = r1 - r0, S = (dr || Math.log(w1 / w0)) / ρ; + function interpolate(t) { + var s = t * S; + if (dr) { + var coshr0 = d3_cosh(r0), u = w0 / (ρ2 * d1) * (coshr0 * d3_tanh(ρ * s + r0) - d3_sinh(r0)); + return [ ux0 + u * dx, uy0 + u * dy, w0 * coshr0 / d3_cosh(ρ * s + r0) ]; + } + return [ ux0 + t * dx, uy0 + t * dy, w0 * Math.exp(ρ * s) ]; + } + interpolate.duration = S * 1e3; + return interpolate; + }; + d3.behavior.zoom = function() { + var view = { + x: 0, + y: 0, + k: 1 + }, translate0, center, size = [ 960, 500 ], scaleExtent = d3_behavior_zoomInfinity, mousedown = "mousedown.zoom", mousemove = "mousemove.zoom", mouseup = "mouseup.zoom", mousewheelTimer, touchstart = "touchstart.zoom", touchtime, event = d3_eventDispatch(zoom, "zoomstart", "zoom", "zoomend"), x0, x1, y0, y1; + function zoom(g) { + g.on(mousedown, mousedowned).on(d3_behavior_zoomWheel + ".zoom", mousewheeled).on(mousemove, mousewheelreset).on("dblclick.zoom", dblclicked).on(touchstart, touchstarted); + } + zoom.event = function(g) { + g.each(function() { + var dispatch = event.of(this, arguments), view1 = view; + if (d3_transitionInheritId) { + d3.select(this).transition().each("start.zoom", function() { + view = this.__chart__ || { + x: 0, + y: 0, + k: 1 + }; + zoomstarted(dispatch); + }).tween("zoom:zoom", function() { + var dx = size[0], dy = size[1], cx = dx / 2, cy = dy / 2, i = d3.interpolateZoom([ (cx - view.x) / view.k, (cy - view.y) / view.k, dx / view.k ], [ (cx - view1.x) / view1.k, (cy - view1.y) / view1.k, dx / view1.k ]); + return function(t) { + var l = i(t), k = dx / l[2]; + this.__chart__ = view = { + x: cx - l[0] * k, + y: cy - l[1] * k, + k: k + }; + zoomed(dispatch); + }; + }).each("end.zoom", function() { + zoomended(dispatch); + }); + } else { + this.__chart__ = view; + zoomstarted(dispatch); + zoomed(dispatch); + zoomended(dispatch); + } + }); + }; + zoom.translate = function(_) { + if (!arguments.length) return [ view.x, view.y ]; + view = { + x: +_[0], + y: +_[1], + k: view.k + }; + rescale(); + return zoom; + }; + zoom.scale = function(_) { + if (!arguments.length) return view.k; + view = { + x: view.x, + y: view.y, + k: +_ + }; + rescale(); + return zoom; + }; + zoom.scaleExtent = function(_) { + if (!arguments.length) return scaleExtent; + scaleExtent = _ == null ? d3_behavior_zoomInfinity : [ +_[0], +_[1] ]; + return zoom; + }; + zoom.center = function(_) { + if (!arguments.length) return center; + center = _ && [ +_[0], +_[1] ]; + return zoom; + }; + zoom.size = function(_) { + if (!arguments.length) return size; + size = _ && [ +_[0], +_[1] ]; + return zoom; + }; + zoom.x = function(z) { + if (!arguments.length) return x1; + x1 = z; + x0 = z.copy(); + view = { + x: 0, + y: 0, + k: 1 + }; + return zoom; + }; + zoom.y = function(z) { + if (!arguments.length) return y1; + y1 = z; + y0 = z.copy(); + view = { + x: 0, + y: 0, + k: 1 + }; + return zoom; + }; + function location(p) { + return [ (p[0] - view.x) / view.k, (p[1] - view.y) / view.k ]; + } + function point(l) { + return [ l[0] * view.k + view.x, l[1] * view.k + view.y ]; + } + function scaleTo(s) { + view.k = Math.max(scaleExtent[0], Math.min(scaleExtent[1], s)); + } + function translateTo(p, l) { + l = point(l); + view.x += p[0] - l[0]; + view.y += p[1] - l[1]; + } + function rescale() { + if (x1) x1.domain(x0.range().map(function(x) { + return (x - view.x) / view.k; + }).map(x0.invert)); + if (y1) y1.domain(y0.range().map(function(y) { + return (y - view.y) / view.k; + }).map(y0.invert)); + } + function zoomstarted(dispatch) { + dispatch({ + type: "zoomstart" + }); + } + function zoomed(dispatch) { + rescale(); + dispatch({ + type: "zoom", + scale: view.k, + translate: [ view.x, view.y ] + }); + } + function zoomended(dispatch) { + dispatch({ + type: "zoomend" + }); + } + function mousedowned() { + var that = this, target = d3.event.target, dispatch = event.of(that, arguments), dragged = 0, subject = d3.select(d3_window).on(mousemove, moved).on(mouseup, ended), location0 = location(d3.mouse(that)), dragRestore = d3_event_dragSuppress(); + d3_selection_interrupt.call(that); + zoomstarted(dispatch); + function moved() { + dragged = 1; + translateTo(d3.mouse(that), location0); + zoomed(dispatch); + } + function ended() { + subject.on(mousemove, d3_window === that ? mousewheelreset : null).on(mouseup, null); + dragRestore(dragged && d3.event.target === target); + zoomended(dispatch); + } + } + function touchstarted() { + var that = this, dispatch = event.of(that, arguments), locations0 = {}, distance0 = 0, scale0, zoomName = ".zoom-" + d3.event.changedTouches[0].identifier, touchmove = "touchmove" + zoomName, touchend = "touchend" + zoomName, targets = [], subject = d3.select(that).on(mousedown, null).on(touchstart, started), dragRestore = d3_event_dragSuppress(); + d3_selection_interrupt.call(that); + started(); + zoomstarted(dispatch); + function relocate() { + var touches = d3.touches(that); + scale0 = view.k; + touches.forEach(function(t) { + if (t.identifier in locations0) locations0[t.identifier] = location(t); + }); + return touches; + } + function started() { + var target = d3.event.target; + d3.select(target).on(touchmove, moved).on(touchend, ended); + targets.push(target); + var changed = d3.event.changedTouches; + for (var i = 0, n = changed.length; i < n; ++i) { + locations0[changed[i].identifier] = null; + } + var touches = relocate(), now = Date.now(); + if (touches.length === 1) { + if (now - touchtime < 500) { + var p = touches[0], l = locations0[p.identifier]; + scaleTo(view.k * 2); + translateTo(p, l); + d3_eventPreventDefault(); + zoomed(dispatch); + } + touchtime = now; + } else if (touches.length > 1) { + var p = touches[0], q = touches[1], dx = p[0] - q[0], dy = p[1] - q[1]; + distance0 = dx * dx + dy * dy; + } + } + function moved() { + var touches = d3.touches(that), p0, l0, p1, l1; + for (var i = 0, n = touches.length; i < n; ++i, l1 = null) { + p1 = touches[i]; + if (l1 = locations0[p1.identifier]) { + if (l0) break; + p0 = p1, l0 = l1; + } + } + if (l1) { + var distance1 = (distance1 = p1[0] - p0[0]) * distance1 + (distance1 = p1[1] - p0[1]) * distance1, scale1 = distance0 && Math.sqrt(distance1 / distance0); + p0 = [ (p0[0] + p1[0]) / 2, (p0[1] + p1[1]) / 2 ]; + l0 = [ (l0[0] + l1[0]) / 2, (l0[1] + l1[1]) / 2 ]; + scaleTo(scale1 * scale0); + } + touchtime = null; + translateTo(p0, l0); + zoomed(dispatch); + } + function ended() { + if (d3.event.touches.length) { + var changed = d3.event.changedTouches; + for (var i = 0, n = changed.length; i < n; ++i) { + delete locations0[changed[i].identifier]; + } + for (var identifier in locations0) { + return void relocate(); + } + } + d3.selectAll(targets).on(zoomName, null); + subject.on(mousedown, mousedowned).on(touchstart, touchstarted); + dragRestore(); + zoomended(dispatch); + } + } + function mousewheeled() { + var dispatch = event.of(this, arguments); + if (mousewheelTimer) clearTimeout(mousewheelTimer); else d3_selection_interrupt.call(this), + zoomstarted(dispatch); + mousewheelTimer = setTimeout(function() { + mousewheelTimer = null; + zoomended(dispatch); + }, 50); + d3_eventPreventDefault(); + var point = center || d3.mouse(this); + if (!translate0) translate0 = location(point); + scaleTo(Math.pow(2, d3_behavior_zoomDelta() * .002) * view.k); + translateTo(point, translate0); + zoomed(dispatch); + } + function mousewheelreset() { + translate0 = null; + } + function dblclicked() { + var dispatch = event.of(this, arguments), p = d3.mouse(this), l = location(p), k = Math.log(view.k) / Math.LN2; + zoomstarted(dispatch); + scaleTo(Math.pow(2, d3.event.shiftKey ? Math.ceil(k) - 1 : Math.floor(k) + 1)); + translateTo(p, l); + zoomed(dispatch); + zoomended(dispatch); + } + return d3.rebind(zoom, event, "on"); + }; + var d3_behavior_zoomInfinity = [ 0, Infinity ]; + var d3_behavior_zoomDelta, d3_behavior_zoomWheel = "onwheel" in d3_document ? (d3_behavior_zoomDelta = function() { + return -d3.event.deltaY * (d3.event.deltaMode ? 120 : 1); + }, "wheel") : "onmousewheel" in d3_document ? (d3_behavior_zoomDelta = function() { + return d3.event.wheelDelta; + }, "mousewheel") : (d3_behavior_zoomDelta = function() { + return -d3.event.detail; + }, "MozMousePixelScroll"); + function d3_Color() {} + d3_Color.prototype.toString = function() { + return this.rgb() + ""; + }; + d3.hsl = function(h, s, l) { + return arguments.length === 1 ? h instanceof d3_Hsl ? d3_hsl(h.h, h.s, h.l) : d3_rgb_parse("" + h, d3_rgb_hsl, d3_hsl) : d3_hsl(+h, +s, +l); + }; + function d3_hsl(h, s, l) { + return new d3_Hsl(h, s, l); + } + function d3_Hsl(h, s, l) { + this.h = h; + this.s = s; + this.l = l; + } + var d3_hslPrototype = d3_Hsl.prototype = new d3_Color(); + d3_hslPrototype.brighter = function(k) { + k = Math.pow(.7, arguments.length ? k : 1); + return d3_hsl(this.h, this.s, this.l / k); + }; + d3_hslPrototype.darker = function(k) { + k = Math.pow(.7, arguments.length ? k : 1); + return d3_hsl(this.h, this.s, k * this.l); + }; + d3_hslPrototype.rgb = function() { + return d3_hsl_rgb(this.h, this.s, this.l); + }; + function d3_hsl_rgb(h, s, l) { + var m1, m2; + h = isNaN(h) ? 0 : (h %= 360) < 0 ? h + 360 : h; + s = isNaN(s) ? 0 : s < 0 ? 0 : s > 1 ? 1 : s; + l = l < 0 ? 0 : l > 1 ? 1 : l; + m2 = l <= .5 ? l * (1 + s) : l + s - l * s; + m1 = 2 * l - m2; + function v(h) { + if (h > 360) h -= 360; else if (h < 0) h += 360; + if (h < 60) return m1 + (m2 - m1) * h / 60; + if (h < 180) return m2; + if (h < 240) return m1 + (m2 - m1) * (240 - h) / 60; + return m1; + } + function vv(h) { + return Math.round(v(h) * 255); + } + return d3_rgb(vv(h + 120), vv(h), vv(h - 120)); + } + d3.hcl = function(h, c, l) { + return arguments.length === 1 ? h instanceof d3_Hcl ? d3_hcl(h.h, h.c, h.l) : h instanceof d3_Lab ? d3_lab_hcl(h.l, h.a, h.b) : d3_lab_hcl((h = d3_rgb_lab((h = d3.rgb(h)).r, h.g, h.b)).l, h.a, h.b) : d3_hcl(+h, +c, +l); + }; + function d3_hcl(h, c, l) { + return new d3_Hcl(h, c, l); + } + function d3_Hcl(h, c, l) { + this.h = h; + this.c = c; + this.l = l; + } + var d3_hclPrototype = d3_Hcl.prototype = new d3_Color(); + d3_hclPrototype.brighter = function(k) { + return d3_hcl(this.h, this.c, Math.min(100, this.l + d3_lab_K * (arguments.length ? k : 1))); + }; + d3_hclPrototype.darker = function(k) { + return d3_hcl(this.h, this.c, Math.max(0, this.l - d3_lab_K * (arguments.length ? k : 1))); + }; + d3_hclPrototype.rgb = function() { + return d3_hcl_lab(this.h, this.c, this.l).rgb(); + }; + function d3_hcl_lab(h, c, l) { + if (isNaN(h)) h = 0; + if (isNaN(c)) c = 0; + return d3_lab(l, Math.cos(h *= d3_radians) * c, Math.sin(h) * c); + } + d3.lab = function(l, a, b) { + return arguments.length === 1 ? l instanceof d3_Lab ? d3_lab(l.l, l.a, l.b) : l instanceof d3_Hcl ? d3_hcl_lab(l.l, l.c, l.h) : d3_rgb_lab((l = d3.rgb(l)).r, l.g, l.b) : d3_lab(+l, +a, +b); + }; + function d3_lab(l, a, b) { + return new d3_Lab(l, a, b); + } + function d3_Lab(l, a, b) { + this.l = l; + this.a = a; + this.b = b; + } + var d3_lab_K = 18; + var d3_lab_X = .95047, d3_lab_Y = 1, d3_lab_Z = 1.08883; + var d3_labPrototype = d3_Lab.prototype = new d3_Color(); + d3_labPrototype.brighter = function(k) { + return d3_lab(Math.min(100, this.l + d3_lab_K * (arguments.length ? k : 1)), this.a, this.b); + }; + d3_labPrototype.darker = function(k) { + return d3_lab(Math.max(0, this.l - d3_lab_K * (arguments.length ? k : 1)), this.a, this.b); + }; + d3_labPrototype.rgb = function() { + return d3_lab_rgb(this.l, this.a, this.b); + }; + function d3_lab_rgb(l, a, b) { + var y = (l + 16) / 116, x = y + a / 500, z = y - b / 200; + x = d3_lab_xyz(x) * d3_lab_X; + y = d3_lab_xyz(y) * d3_lab_Y; + z = d3_lab_xyz(z) * d3_lab_Z; + return d3_rgb(d3_xyz_rgb(3.2404542 * x - 1.5371385 * y - .4985314 * z), d3_xyz_rgb(-.969266 * x + 1.8760108 * y + .041556 * z), d3_xyz_rgb(.0556434 * x - .2040259 * y + 1.0572252 * z)); + } + function d3_lab_hcl(l, a, b) { + return l > 0 ? d3_hcl(Math.atan2(b, a) * d3_degrees, Math.sqrt(a * a + b * b), l) : d3_hcl(NaN, NaN, l); + } + function d3_lab_xyz(x) { + return x > .206893034 ? x * x * x : (x - 4 / 29) / 7.787037; + } + function d3_xyz_lab(x) { + return x > .008856 ? Math.pow(x, 1 / 3) : 7.787037 * x + 4 / 29; + } + function d3_xyz_rgb(r) { + return Math.round(255 * (r <= .00304 ? 12.92 * r : 1.055 * Math.pow(r, 1 / 2.4) - .055)); + } + d3.rgb = function(r, g, b) { + return arguments.length === 1 ? r instanceof d3_Rgb ? d3_rgb(r.r, r.g, r.b) : d3_rgb_parse("" + r, d3_rgb, d3_hsl_rgb) : d3_rgb(~~r, ~~g, ~~b); + }; + function d3_rgbNumber(value) { + return d3_rgb(value >> 16, value >> 8 & 255, value & 255); + } + function d3_rgbString(value) { + return d3_rgbNumber(value) + ""; + } + function d3_rgb(r, g, b) { + return new d3_Rgb(r, g, b); + } + function d3_Rgb(r, g, b) { + this.r = r; + this.g = g; + this.b = b; + } + var d3_rgbPrototype = d3_Rgb.prototype = new d3_Color(); + d3_rgbPrototype.brighter = function(k) { + k = Math.pow(.7, arguments.length ? k : 1); + var r = this.r, g = this.g, b = this.b, i = 30; + if (!r && !g && !b) return d3_rgb(i, i, i); + if (r && r < i) r = i; + if (g && g < i) g = i; + if (b && b < i) b = i; + return d3_rgb(Math.min(255, ~~(r / k)), Math.min(255, ~~(g / k)), Math.min(255, ~~(b / k))); + }; + d3_rgbPrototype.darker = function(k) { + k = Math.pow(.7, arguments.length ? k : 1); + return d3_rgb(~~(k * this.r), ~~(k * this.g), ~~(k * this.b)); + }; + d3_rgbPrototype.hsl = function() { + return d3_rgb_hsl(this.r, this.g, this.b); + }; + d3_rgbPrototype.toString = function() { + return "#" + d3_rgb_hex(this.r) + d3_rgb_hex(this.g) + d3_rgb_hex(this.b); + }; + function d3_rgb_hex(v) { + return v < 16 ? "0" + Math.max(0, v).toString(16) : Math.min(255, v).toString(16); + } + function d3_rgb_parse(format, rgb, hsl) { + var r = 0, g = 0, b = 0, m1, m2, color; + m1 = /([a-z]+)\((.*)\)/i.exec(format); + if (m1) { + m2 = m1[2].split(","); + switch (m1[1]) { + case "hsl": + { + return hsl(parseFloat(m2[0]), parseFloat(m2[1]) / 100, parseFloat(m2[2]) / 100); + } + + case "rgb": + { + return rgb(d3_rgb_parseNumber(m2[0]), d3_rgb_parseNumber(m2[1]), d3_rgb_parseNumber(m2[2])); + } + } + } + if (color = d3_rgb_names.get(format)) return rgb(color.r, color.g, color.b); + if (format != null && format.charAt(0) === "#" && !isNaN(color = parseInt(format.substring(1), 16))) { + if (format.length === 4) { + r = (color & 3840) >> 4; + r = r >> 4 | r; + g = color & 240; + g = g >> 4 | g; + b = color & 15; + b = b << 4 | b; + } else if (format.length === 7) { + r = (color & 16711680) >> 16; + g = (color & 65280) >> 8; + b = color & 255; + } + } + return rgb(r, g, b); + } + function d3_rgb_hsl(r, g, b) { + var min = Math.min(r /= 255, g /= 255, b /= 255), max = Math.max(r, g, b), d = max - min, h, s, l = (max + min) / 2; + if (d) { + s = l < .5 ? d / (max + min) : d / (2 - max - min); + if (r == max) h = (g - b) / d + (g < b ? 6 : 0); else if (g == max) h = (b - r) / d + 2; else h = (r - g) / d + 4; + h *= 60; + } else { + h = NaN; + s = l > 0 && l < 1 ? 0 : h; + } + return d3_hsl(h, s, l); + } + function d3_rgb_lab(r, g, b) { + r = d3_rgb_xyz(r); + g = d3_rgb_xyz(g); + b = d3_rgb_xyz(b); + var x = d3_xyz_lab((.4124564 * r + .3575761 * g + .1804375 * b) / d3_lab_X), y = d3_xyz_lab((.2126729 * r + .7151522 * g + .072175 * b) / d3_lab_Y), z = d3_xyz_lab((.0193339 * r + .119192 * g + .9503041 * b) / d3_lab_Z); + return d3_lab(116 * y - 16, 500 * (x - y), 200 * (y - z)); + } + function d3_rgb_xyz(r) { + return (r /= 255) <= .04045 ? r / 12.92 : Math.pow((r + .055) / 1.055, 2.4); + } + function d3_rgb_parseNumber(c) { + var f = parseFloat(c); + return c.charAt(c.length - 1) === "%" ? Math.round(f * 2.55) : f; + } + var d3_rgb_names = d3.map({ + aliceblue: 15792383, + antiquewhite: 16444375, + aqua: 65535, + aquamarine: 8388564, + azure: 15794175, + beige: 16119260, + bisque: 16770244, + black: 0, + blanchedalmond: 16772045, + blue: 255, + blueviolet: 9055202, + brown: 10824234, + burlywood: 14596231, + cadetblue: 6266528, + chartreuse: 8388352, + chocolate: 13789470, + coral: 16744272, + cornflowerblue: 6591981, + cornsilk: 16775388, + crimson: 14423100, + cyan: 65535, + darkblue: 139, + darkcyan: 35723, + darkgoldenrod: 12092939, + darkgray: 11119017, + darkgreen: 25600, + darkgrey: 11119017, + darkkhaki: 12433259, + darkmagenta: 9109643, + darkolivegreen: 5597999, + darkorange: 16747520, + darkorchid: 10040012, + darkred: 9109504, + darksalmon: 15308410, + darkseagreen: 9419919, + darkslateblue: 4734347, + darkslategray: 3100495, + darkslategrey: 3100495, + darkturquoise: 52945, + darkviolet: 9699539, + deeppink: 16716947, + deepskyblue: 49151, + dimgray: 6908265, + dimgrey: 6908265, + dodgerblue: 2003199, + firebrick: 11674146, + floralwhite: 16775920, + forestgreen: 2263842, + fuchsia: 16711935, + gainsboro: 14474460, + ghostwhite: 16316671, + gold: 16766720, + goldenrod: 14329120, + gray: 8421504, + green: 32768, + greenyellow: 11403055, + grey: 8421504, + honeydew: 15794160, + hotpink: 16738740, + indianred: 13458524, + indigo: 4915330, + ivory: 16777200, + khaki: 15787660, + lavender: 15132410, + lavenderblush: 16773365, + lawngreen: 8190976, + lemonchiffon: 16775885, + lightblue: 11393254, + lightcoral: 15761536, + lightcyan: 14745599, + lightgoldenrodyellow: 16448210, + lightgray: 13882323, + lightgreen: 9498256, + lightgrey: 13882323, + lightpink: 16758465, + lightsalmon: 16752762, + lightseagreen: 2142890, + lightskyblue: 8900346, + lightslategray: 7833753, + lightslategrey: 7833753, + lightsteelblue: 11584734, + lightyellow: 16777184, + lime: 65280, + limegreen: 3329330, + linen: 16445670, + magenta: 16711935, + maroon: 8388608, + mediumaquamarine: 6737322, + mediumblue: 205, + mediumorchid: 12211667, + mediumpurple: 9662683, + mediumseagreen: 3978097, + mediumslateblue: 8087790, + mediumspringgreen: 64154, + mediumturquoise: 4772300, + mediumvioletred: 13047173, + midnightblue: 1644912, + mintcream: 16121850, + mistyrose: 16770273, + moccasin: 16770229, + navajowhite: 16768685, + navy: 128, + oldlace: 16643558, + olive: 8421376, + olivedrab: 7048739, + orange: 16753920, + orangered: 16729344, + orchid: 14315734, + palegoldenrod: 15657130, + palegreen: 10025880, + paleturquoise: 11529966, + palevioletred: 14381203, + papayawhip: 16773077, + peachpuff: 16767673, + peru: 13468991, + pink: 16761035, + plum: 14524637, + powderblue: 11591910, + purple: 8388736, + red: 16711680, + rosybrown: 12357519, + royalblue: 4286945, + saddlebrown: 9127187, + salmon: 16416882, + sandybrown: 16032864, + seagreen: 3050327, + seashell: 16774638, + sienna: 10506797, + silver: 12632256, + skyblue: 8900331, + slateblue: 6970061, + slategray: 7372944, + slategrey: 7372944, + snow: 16775930, + springgreen: 65407, + steelblue: 4620980, + tan: 13808780, + teal: 32896, + thistle: 14204888, + tomato: 16737095, + turquoise: 4251856, + violet: 15631086, + wheat: 16113331, + white: 16777215, + whitesmoke: 16119285, + yellow: 16776960, + yellowgreen: 10145074 + }); + d3_rgb_names.forEach(function(key, value) { + d3_rgb_names.set(key, d3_rgbNumber(value)); + }); + function d3_functor(v) { + return typeof v === "function" ? v : function() { + return v; + }; + } + d3.functor = d3_functor; + function d3_identity(d) { + return d; + } + d3.xhr = d3_xhrType(d3_identity); + function d3_xhrType(response) { + return function(url, mimeType, callback) { + if (arguments.length === 2 && typeof mimeType === "function") callback = mimeType, + mimeType = null; + return d3_xhr(url, mimeType, response, callback); + }; + } + function d3_xhr(url, mimeType, response, callback) { + var xhr = {}, dispatch = d3.dispatch("beforesend", "progress", "load", "error"), headers = {}, request = new XMLHttpRequest(), responseType = null; + if (d3_window.XDomainRequest && !("withCredentials" in request) && /^(http(s)?:)?\/\//.test(url)) request = new XDomainRequest(); + "onload" in request ? request.onload = request.onerror = respond : request.onreadystatechange = function() { + request.readyState > 3 && respond(); + }; + function respond() { + var status = request.status, result; + if (!status && request.responseText || status >= 200 && status < 300 || status === 304) { + try { + result = response.call(xhr, request); + } catch (e) { + dispatch.error.call(xhr, e); + return; + } + dispatch.load.call(xhr, result); + } else { + dispatch.error.call(xhr, request); + } + } + request.onprogress = function(event) { + var o = d3.event; + d3.event = event; + try { + dispatch.progress.call(xhr, request); + } finally { + d3.event = o; + } + }; + xhr.header = function(name, value) { + name = (name + "").toLowerCase(); + if (arguments.length < 2) return headers[name]; + if (value == null) delete headers[name]; else headers[name] = value + ""; + return xhr; + }; + xhr.mimeType = function(value) { + if (!arguments.length) return mimeType; + mimeType = value == null ? null : value + ""; + return xhr; + }; + xhr.responseType = function(value) { + if (!arguments.length) return responseType; + responseType = value; + return xhr; + }; + xhr.response = function(value) { + response = value; + return xhr; + }; + [ "get", "post" ].forEach(function(method) { + xhr[method] = function() { + return xhr.send.apply(xhr, [ method ].concat(d3_array(arguments))); + }; + }); + xhr.send = function(method, data, callback) { + if (arguments.length === 2 && typeof data === "function") callback = data, data = null; + request.open(method, url, true); + if (mimeType != null && !("accept" in headers)) headers["accept"] = mimeType + ",*/*"; + if (request.setRequestHeader) for (var name in headers) request.setRequestHeader(name, headers[name]); + if (mimeType != null && request.overrideMimeType) request.overrideMimeType(mimeType); + if (responseType != null) request.responseType = responseType; + if (callback != null) xhr.on("error", callback).on("load", function(request) { + callback(null, request); + }); + dispatch.beforesend.call(xhr, request); + request.send(data == null ? null : data); + return xhr; + }; + xhr.abort = function() { + request.abort(); + return xhr; + }; + d3.rebind(xhr, dispatch, "on"); + return callback == null ? xhr : xhr.get(d3_xhr_fixCallback(callback)); + } + function d3_xhr_fixCallback(callback) { + return callback.length === 1 ? function(error, request) { + callback(error == null ? request : null); + } : callback; + } + d3.dsv = function(delimiter, mimeType) { + var reFormat = new RegExp('["' + delimiter + "\n]"), delimiterCode = delimiter.charCodeAt(0); + function dsv(url, row, callback) { + if (arguments.length < 3) callback = row, row = null; + var xhr = d3_xhr(url, mimeType, row == null ? response : typedResponse(row), callback); + xhr.row = function(_) { + return arguments.length ? xhr.response((row = _) == null ? response : typedResponse(_)) : row; + }; + return xhr; + } + function response(request) { + return dsv.parse(request.responseText); + } + function typedResponse(f) { + return function(request) { + return dsv.parse(request.responseText, f); + }; + } + dsv.parse = function(text, f) { + var o; + return dsv.parseRows(text, function(row, i) { + if (o) return o(row, i - 1); + var a = new Function("d", "return {" + row.map(function(name, i) { + return JSON.stringify(name) + ": d[" + i + "]"; + }).join(",") + "}"); + o = f ? function(row, i) { + return f(a(row), i); + } : a; + }); + }; + dsv.parseRows = function(text, f) { + var EOL = {}, EOF = {}, rows = [], N = text.length, I = 0, n = 0, t, eol; + function token() { + if (I >= N) return EOF; + if (eol) return eol = false, EOL; + var j = I; + if (text.charCodeAt(j) === 34) { + var i = j; + while (i++ < N) { + if (text.charCodeAt(i) === 34) { + if (text.charCodeAt(i + 1) !== 34) break; + ++i; + } + } + I = i + 2; + var c = text.charCodeAt(i + 1); + if (c === 13) { + eol = true; + if (text.charCodeAt(i + 2) === 10) ++I; + } else if (c === 10) { + eol = true; + } + return text.substring(j + 1, i).replace(/""/g, '"'); + } + while (I < N) { + var c = text.charCodeAt(I++), k = 1; + if (c === 10) eol = true; else if (c === 13) { + eol = true; + if (text.charCodeAt(I) === 10) ++I, ++k; + } else if (c !== delimiterCode) continue; + return text.substring(j, I - k); + } + return text.substring(j); + } + while ((t = token()) !== EOF) { + var a = []; + while (t !== EOL && t !== EOF) { + a.push(t); + t = token(); + } + if (f && !(a = f(a, n++))) continue; + rows.push(a); + } + return rows; + }; + dsv.format = function(rows) { + if (Array.isArray(rows[0])) return dsv.formatRows(rows); + var fieldSet = new d3_Set(), fields = []; + rows.forEach(function(row) { + for (var field in row) { + if (!fieldSet.has(field)) { + fields.push(fieldSet.add(field)); + } + } + }); + return [ fields.map(formatValue).join(delimiter) ].concat(rows.map(function(row) { + return fields.map(function(field) { + return formatValue(row[field]); + }).join(delimiter); + })).join("\n"); + }; + dsv.formatRows = function(rows) { + return rows.map(formatRow).join("\n"); + }; + function formatRow(row) { + return row.map(formatValue).join(delimiter); + } + function formatValue(text) { + return reFormat.test(text) ? '"' + text.replace(/\"/g, '""') + '"' : text; + } + return dsv; + }; + d3.csv = d3.dsv(",", "text/csv"); + d3.tsv = d3.dsv(" ", "text/tab-separated-values"); + d3.touch = function(container, touches, identifier) { + if (arguments.length < 3) identifier = touches, touches = d3_eventSource().changedTouches; + if (touches) for (var i = 0, n = touches.length, touch; i < n; ++i) { + if ((touch = touches[i]).identifier === identifier) { + return d3_mousePoint(container, touch); + } + } + }; + var d3_timer_queueHead, d3_timer_queueTail, d3_timer_interval, d3_timer_timeout, d3_timer_active, d3_timer_frame = d3_window[d3_vendorSymbol(d3_window, "requestAnimationFrame")] || function(callback) { + setTimeout(callback, 17); + }; + d3.timer = function(callback, delay, then) { + var n = arguments.length; + if (n < 2) delay = 0; + if (n < 3) then = Date.now(); + var time = then + delay, timer = { + c: callback, + t: time, + f: false, + n: null + }; + if (d3_timer_queueTail) d3_timer_queueTail.n = timer; else d3_timer_queueHead = timer; + d3_timer_queueTail = timer; + if (!d3_timer_interval) { + d3_timer_timeout = clearTimeout(d3_timer_timeout); + d3_timer_interval = 1; + d3_timer_frame(d3_timer_step); + } + }; + function d3_timer_step() { + var now = d3_timer_mark(), delay = d3_timer_sweep() - now; + if (delay > 24) { + if (isFinite(delay)) { + clearTimeout(d3_timer_timeout); + d3_timer_timeout = setTimeout(d3_timer_step, delay); + } + d3_timer_interval = 0; + } else { + d3_timer_interval = 1; + d3_timer_frame(d3_timer_step); + } + } + d3.timer.flush = function() { + d3_timer_mark(); + d3_timer_sweep(); + }; + function d3_timer_mark() { + var now = Date.now(); + d3_timer_active = d3_timer_queueHead; + while (d3_timer_active) { + if (now >= d3_timer_active.t) d3_timer_active.f = d3_timer_active.c(now - d3_timer_active.t); + d3_timer_active = d3_timer_active.n; + } + return now; + } + function d3_timer_sweep() { + var t0, t1 = d3_timer_queueHead, time = Infinity; + while (t1) { + if (t1.f) { + t1 = t0 ? t0.n = t1.n : d3_timer_queueHead = t1.n; + } else { + if (t1.t < time) time = t1.t; + t1 = (t0 = t1).n; + } + } + d3_timer_queueTail = t0; + return time; + } + function d3_format_precision(x, p) { + return p - (x ? Math.ceil(Math.log(x) / Math.LN10) : 1); + } + d3.round = function(x, n) { + return n ? Math.round(x * (n = Math.pow(10, n))) / n : Math.round(x); + }; + var d3_formatPrefixes = [ "y", "z", "a", "f", "p", "n", "µ", "m", "", "k", "M", "G", "T", "P", "E", "Z", "Y" ].map(d3_formatPrefix); + d3.formatPrefix = function(value, precision) { + var i = 0; + if (value) { + if (value < 0) value *= -1; + if (precision) value = d3.round(value, d3_format_precision(value, precision)); + i = 1 + Math.floor(1e-12 + Math.log(value) / Math.LN10); + i = Math.max(-24, Math.min(24, Math.floor((i - 1) / 3) * 3)); + } + return d3_formatPrefixes[8 + i / 3]; + }; + function d3_formatPrefix(d, i) { + var k = Math.pow(10, abs(8 - i) * 3); + return { + scale: i > 8 ? function(d) { + return d / k; + } : function(d) { + return d * k; + }, + symbol: d + }; + } + function d3_locale_numberFormat(locale) { + var locale_decimal = locale.decimal, locale_thousands = locale.thousands, locale_grouping = locale.grouping, locale_currency = locale.currency, formatGroup = locale_grouping ? function(value) { + var i = value.length, t = [], j = 0, g = locale_grouping[0]; + while (i > 0 && g > 0) { + t.push(value.substring(i -= g, i + g)); + g = locale_grouping[j = (j + 1) % locale_grouping.length]; + } + return t.reverse().join(locale_thousands); + } : d3_identity; + return function(specifier) { + var match = d3_format_re.exec(specifier), fill = match[1] || " ", align = match[2] || ">", sign = match[3] || "", symbol = match[4] || "", zfill = match[5], width = +match[6], comma = match[7], precision = match[8], type = match[9], scale = 1, prefix = "", suffix = "", integer = false; + if (precision) precision = +precision.substring(1); + if (zfill || fill === "0" && align === "=") { + zfill = fill = "0"; + align = "="; + if (comma) width -= Math.floor((width - 1) / 4); + } + switch (type) { + case "n": + comma = true; + type = "g"; + break; + + case "%": + scale = 100; + suffix = "%"; + type = "f"; + break; + + case "p": + scale = 100; + suffix = "%"; + type = "r"; + break; + + case "b": + case "o": + case "x": + case "X": + if (symbol === "#") prefix = "0" + type.toLowerCase(); + + case "c": + case "d": + integer = true; + precision = 0; + break; + + case "s": + scale = -1; + type = "r"; + break; + } + if (symbol === "$") prefix = locale_currency[0], suffix = locale_currency[1]; + if (type == "r" && !precision) type = "g"; + if (precision != null) { + if (type == "g") precision = Math.max(1, Math.min(21, precision)); else if (type == "e" || type == "f") precision = Math.max(0, Math.min(20, precision)); + } + type = d3_format_types.get(type) || d3_format_typeDefault; + var zcomma = zfill && comma; + return function(value) { + var fullSuffix = suffix; + if (integer && value % 1) return ""; + var negative = value < 0 || value === 0 && 1 / value < 0 ? (value = -value, "-") : sign; + if (scale < 0) { + var unit = d3.formatPrefix(value, precision); + value = unit.scale(value); + fullSuffix = unit.symbol + suffix; + } else { + value *= scale; + } + value = type(value, precision); + var i = value.lastIndexOf("."), before = i < 0 ? value : value.substring(0, i), after = i < 0 ? "" : locale_decimal + value.substring(i + 1); + if (!zfill && comma) before = formatGroup(before); + var length = prefix.length + before.length + after.length + (zcomma ? 0 : negative.length), padding = length < width ? new Array(length = width - length + 1).join(fill) : ""; + if (zcomma) before = formatGroup(padding + before); + negative += prefix; + value = before + after; + return (align === "<" ? negative + value + padding : align === ">" ? padding + negative + value : align === "^" ? padding.substring(0, length >>= 1) + negative + value + padding.substring(length) : negative + (zcomma ? value : padding + value)) + fullSuffix; + }; + }; + } + var d3_format_re = /(?:([^{])?([<>=^]))?([+\- ])?([$#])?(0)?(\d+)?(,)?(\.-?\d+)?([a-z%])?/i; + var d3_format_types = d3.map({ + b: function(x) { + return x.toString(2); + }, + c: function(x) { + return String.fromCharCode(x); + }, + o: function(x) { + return x.toString(8); + }, + x: function(x) { + return x.toString(16); + }, + X: function(x) { + return x.toString(16).toUpperCase(); + }, + g: function(x, p) { + return x.toPrecision(p); + }, + e: function(x, p) { + return x.toExponential(p); + }, + f: function(x, p) { + return x.toFixed(p); + }, + r: function(x, p) { + return (x = d3.round(x, d3_format_precision(x, p))).toFixed(Math.max(0, Math.min(20, d3_format_precision(x * (1 + 1e-15), p)))); + } + }); + function d3_format_typeDefault(x) { + return x + ""; + } + var d3_time = d3.time = {}, d3_date = Date; + function d3_date_utc() { + this._ = new Date(arguments.length > 1 ? Date.UTC.apply(this, arguments) : arguments[0]); + } + d3_date_utc.prototype = { + getDate: function() { + return this._.getUTCDate(); + }, + getDay: function() { + return this._.getUTCDay(); + }, + getFullYear: function() { + return this._.getUTCFullYear(); + }, + getHours: function() { + return this._.getUTCHours(); + }, + getMilliseconds: function() { + return this._.getUTCMilliseconds(); + }, + getMinutes: function() { + return this._.getUTCMinutes(); + }, + getMonth: function() { + return this._.getUTCMonth(); + }, + getSeconds: function() { + return this._.getUTCSeconds(); + }, + getTime: function() { + return this._.getTime(); + }, + getTimezoneOffset: function() { + return 0; + }, + valueOf: function() { + return this._.valueOf(); + }, + setDate: function() { + d3_time_prototype.setUTCDate.apply(this._, arguments); + }, + setDay: function() { + d3_time_prototype.setUTCDay.apply(this._, arguments); + }, + setFullYear: function() { + d3_time_prototype.setUTCFullYear.apply(this._, arguments); + }, + setHours: function() { + d3_time_prototype.setUTCHours.apply(this._, arguments); + }, + setMilliseconds: function() { + d3_time_prototype.setUTCMilliseconds.apply(this._, arguments); + }, + setMinutes: function() { + d3_time_prototype.setUTCMinutes.apply(this._, arguments); + }, + setMonth: function() { + d3_time_prototype.setUTCMonth.apply(this._, arguments); + }, + setSeconds: function() { + d3_time_prototype.setUTCSeconds.apply(this._, arguments); + }, + setTime: function() { + d3_time_prototype.setTime.apply(this._, arguments); + } + }; + var d3_time_prototype = Date.prototype; + function d3_time_interval(local, step, number) { + function round(date) { + var d0 = local(date), d1 = offset(d0, 1); + return date - d0 < d1 - date ? d0 : d1; + } + function ceil(date) { + step(date = local(new d3_date(date - 1)), 1); + return date; + } + function offset(date, k) { + step(date = new d3_date(+date), k); + return date; + } + function range(t0, t1, dt) { + var time = ceil(t0), times = []; + if (dt > 1) { + while (time < t1) { + if (!(number(time) % dt)) times.push(new Date(+time)); + step(time, 1); + } + } else { + while (time < t1) times.push(new Date(+time)), step(time, 1); + } + return times; + } + function range_utc(t0, t1, dt) { + try { + d3_date = d3_date_utc; + var utc = new d3_date_utc(); + utc._ = t0; + return range(utc, t1, dt); + } finally { + d3_date = Date; + } + } + local.floor = local; + local.round = round; + local.ceil = ceil; + local.offset = offset; + local.range = range; + var utc = local.utc = d3_time_interval_utc(local); + utc.floor = utc; + utc.round = d3_time_interval_utc(round); + utc.ceil = d3_time_interval_utc(ceil); + utc.offset = d3_time_interval_utc(offset); + utc.range = range_utc; + return local; + } + function d3_time_interval_utc(method) { + return function(date, k) { + try { + d3_date = d3_date_utc; + var utc = new d3_date_utc(); + utc._ = date; + return method(utc, k)._; + } finally { + d3_date = Date; + } + }; + } + d3_time.year = d3_time_interval(function(date) { + date = d3_time.day(date); + date.setMonth(0, 1); + return date; + }, function(date, offset) { + date.setFullYear(date.getFullYear() + offset); + }, function(date) { + return date.getFullYear(); + }); + d3_time.years = d3_time.year.range; + d3_time.years.utc = d3_time.year.utc.range; + d3_time.day = d3_time_interval(function(date) { + var day = new d3_date(2e3, 0); + day.setFullYear(date.getFullYear(), date.getMonth(), date.getDate()); + return day; + }, function(date, offset) { + date.setDate(date.getDate() + offset); + }, function(date) { + return date.getDate() - 1; + }); + d3_time.days = d3_time.day.range; + d3_time.days.utc = d3_time.day.utc.range; + d3_time.dayOfYear = function(date) { + var year = d3_time.year(date); + return Math.floor((date - year - (date.getTimezoneOffset() - year.getTimezoneOffset()) * 6e4) / 864e5); + }; + [ "sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday" ].forEach(function(day, i) { + i = 7 - i; + var interval = d3_time[day] = d3_time_interval(function(date) { + (date = d3_time.day(date)).setDate(date.getDate() - (date.getDay() + i) % 7); + return date; + }, function(date, offset) { + date.setDate(date.getDate() + Math.floor(offset) * 7); + }, function(date) { + var day = d3_time.year(date).getDay(); + return Math.floor((d3_time.dayOfYear(date) + (day + i) % 7) / 7) - (day !== i); + }); + d3_time[day + "s"] = interval.range; + d3_time[day + "s"].utc = interval.utc.range; + d3_time[day + "OfYear"] = function(date) { + var day = d3_time.year(date).getDay(); + return Math.floor((d3_time.dayOfYear(date) + (day + i) % 7) / 7); + }; + }); + d3_time.week = d3_time.sunday; + d3_time.weeks = d3_time.sunday.range; + d3_time.weeks.utc = d3_time.sunday.utc.range; + d3_time.weekOfYear = d3_time.sundayOfYear; + function d3_locale_timeFormat(locale) { + var locale_dateTime = locale.dateTime, locale_date = locale.date, locale_time = locale.time, locale_periods = locale.periods, locale_days = locale.days, locale_shortDays = locale.shortDays, locale_months = locale.months, locale_shortMonths = locale.shortMonths; + function d3_time_format(template) { + var n = template.length; + function format(date) { + var string = [], i = -1, j = 0, c, p, f; + while (++i < n) { + if (template.charCodeAt(i) === 37) { + string.push(template.substring(j, i)); + if ((p = d3_time_formatPads[c = template.charAt(++i)]) != null) c = template.charAt(++i); + if (f = d3_time_formats[c]) c = f(date, p == null ? c === "e" ? " " : "0" : p); + string.push(c); + j = i + 1; + } + } + string.push(template.substring(j, i)); + return string.join(""); + } + format.parse = function(string) { + var d = { + y: 1900, + m: 0, + d: 1, + H: 0, + M: 0, + S: 0, + L: 0, + Z: null + }, i = d3_time_parse(d, template, string, 0); + if (i != string.length) return null; + if ("p" in d) d.H = d.H % 12 + d.p * 12; + var localZ = d.Z != null && d3_date !== d3_date_utc, date = new (localZ ? d3_date_utc : d3_date)(); + if ("j" in d) date.setFullYear(d.y, 0, d.j); else if ("w" in d && ("W" in d || "U" in d)) { + date.setFullYear(d.y, 0, 1); + date.setFullYear(d.y, 0, "W" in d ? (d.w + 6) % 7 + d.W * 7 - (date.getDay() + 5) % 7 : d.w + d.U * 7 - (date.getDay() + 6) % 7); + } else date.setFullYear(d.y, d.m, d.d); + date.setHours(d.H + Math.floor(d.Z / 100), d.M + d.Z % 100, d.S, d.L); + return localZ ? date._ : date; + }; + format.toString = function() { + return template; + }; + return format; + } + function d3_time_parse(date, template, string, j) { + var c, p, t, i = 0, n = template.length, m = string.length; + while (i < n) { + if (j >= m) return -1; + c = template.charCodeAt(i++); + if (c === 37) { + t = template.charAt(i++); + p = d3_time_parsers[t in d3_time_formatPads ? template.charAt(i++) : t]; + if (!p || (j = p(date, string, j)) < 0) return -1; + } else if (c != string.charCodeAt(j++)) { + return -1; + } + } + return j; + } + d3_time_format.utc = function(template) { + var local = d3_time_format(template); + function format(date) { + try { + d3_date = d3_date_utc; + var utc = new d3_date(); + utc._ = date; + return local(utc); + } finally { + d3_date = Date; + } + } + format.parse = function(string) { + try { + d3_date = d3_date_utc; + var date = local.parse(string); + return date && date._; + } finally { + d3_date = Date; + } + }; + format.toString = local.toString; + return format; + }; + d3_time_format.multi = d3_time_format.utc.multi = d3_time_formatMulti; + var d3_time_periodLookup = d3.map(), d3_time_dayRe = d3_time_formatRe(locale_days), d3_time_dayLookup = d3_time_formatLookup(locale_days), d3_time_dayAbbrevRe = d3_time_formatRe(locale_shortDays), d3_time_dayAbbrevLookup = d3_time_formatLookup(locale_shortDays), d3_time_monthRe = d3_time_formatRe(locale_months), d3_time_monthLookup = d3_time_formatLookup(locale_months), d3_time_monthAbbrevRe = d3_time_formatRe(locale_shortMonths), d3_time_monthAbbrevLookup = d3_time_formatLookup(locale_shortMonths); + locale_periods.forEach(function(p, i) { + d3_time_periodLookup.set(p.toLowerCase(), i); + }); + var d3_time_formats = { + a: function(d) { + return locale_shortDays[d.getDay()]; + }, + A: function(d) { + return locale_days[d.getDay()]; + }, + b: function(d) { + return locale_shortMonths[d.getMonth()]; + }, + B: function(d) { + return locale_months[d.getMonth()]; + }, + c: d3_time_format(locale_dateTime), + d: function(d, p) { + return d3_time_formatPad(d.getDate(), p, 2); + }, + e: function(d, p) { + return d3_time_formatPad(d.getDate(), p, 2); + }, + H: function(d, p) { + return d3_time_formatPad(d.getHours(), p, 2); + }, + I: function(d, p) { + return d3_time_formatPad(d.getHours() % 12 || 12, p, 2); + }, + j: function(d, p) { + return d3_time_formatPad(1 + d3_time.dayOfYear(d), p, 3); + }, + L: function(d, p) { + return d3_time_formatPad(d.getMilliseconds(), p, 3); + }, + m: function(d, p) { + return d3_time_formatPad(d.getMonth() + 1, p, 2); + }, + M: function(d, p) { + return d3_time_formatPad(d.getMinutes(), p, 2); + }, + p: function(d) { + return locale_periods[+(d.getHours() >= 12)]; + }, + S: function(d, p) { + return d3_time_formatPad(d.getSeconds(), p, 2); + }, + U: function(d, p) { + return d3_time_formatPad(d3_time.sundayOfYear(d), p, 2); + }, + w: function(d) { + return d.getDay(); + }, + W: function(d, p) { + return d3_time_formatPad(d3_time.mondayOfYear(d), p, 2); + }, + x: d3_time_format(locale_date), + X: d3_time_format(locale_time), + y: function(d, p) { + return d3_time_formatPad(d.getFullYear() % 100, p, 2); + }, + Y: function(d, p) { + return d3_time_formatPad(d.getFullYear() % 1e4, p, 4); + }, + Z: d3_time_zone, + "%": function() { + return "%"; + } + }; + var d3_time_parsers = { + a: d3_time_parseWeekdayAbbrev, + A: d3_time_parseWeekday, + b: d3_time_parseMonthAbbrev, + B: d3_time_parseMonth, + c: d3_time_parseLocaleFull, + d: d3_time_parseDay, + e: d3_time_parseDay, + H: d3_time_parseHour24, + I: d3_time_parseHour24, + j: d3_time_parseDayOfYear, + L: d3_time_parseMilliseconds, + m: d3_time_parseMonthNumber, + M: d3_time_parseMinutes, + p: d3_time_parseAmPm, + S: d3_time_parseSeconds, + U: d3_time_parseWeekNumberSunday, + w: d3_time_parseWeekdayNumber, + W: d3_time_parseWeekNumberMonday, + x: d3_time_parseLocaleDate, + X: d3_time_parseLocaleTime, + y: d3_time_parseYear, + Y: d3_time_parseFullYear, + Z: d3_time_parseZone, + "%": d3_time_parseLiteralPercent + }; + function d3_time_parseWeekdayAbbrev(date, string, i) { + d3_time_dayAbbrevRe.lastIndex = 0; + var n = d3_time_dayAbbrevRe.exec(string.substring(i)); + return n ? (date.w = d3_time_dayAbbrevLookup.get(n[0].toLowerCase()), i + n[0].length) : -1; + } + function d3_time_parseWeekday(date, string, i) { + d3_time_dayRe.lastIndex = 0; + var n = d3_time_dayRe.exec(string.substring(i)); + return n ? (date.w = d3_time_dayLookup.get(n[0].toLowerCase()), i + n[0].length) : -1; + } + function d3_time_parseMonthAbbrev(date, string, i) { + d3_time_monthAbbrevRe.lastIndex = 0; + var n = d3_time_monthAbbrevRe.exec(string.substring(i)); + return n ? (date.m = d3_time_monthAbbrevLookup.get(n[0].toLowerCase()), i + n[0].length) : -1; + } + function d3_time_parseMonth(date, string, i) { + d3_time_monthRe.lastIndex = 0; + var n = d3_time_monthRe.exec(string.substring(i)); + return n ? (date.m = d3_time_monthLookup.get(n[0].toLowerCase()), i + n[0].length) : -1; + } + function d3_time_parseLocaleFull(date, string, i) { + return d3_time_parse(date, d3_time_formats.c.toString(), string, i); + } + function d3_time_parseLocaleDate(date, string, i) { + return d3_time_parse(date, d3_time_formats.x.toString(), string, i); + } + function d3_time_parseLocaleTime(date, string, i) { + return d3_time_parse(date, d3_time_formats.X.toString(), string, i); + } + function d3_time_parseAmPm(date, string, i) { + var n = d3_time_periodLookup.get(string.substring(i, i += 2).toLowerCase()); + return n == null ? -1 : (date.p = n, i); + } + return d3_time_format; + } + var d3_time_formatPads = { + "-": "", + _: " ", + "0": "0" + }, d3_time_numberRe = /^\s*\d+/, d3_time_percentRe = /^%/; + function d3_time_formatPad(value, fill, width) { + var sign = value < 0 ? "-" : "", string = (sign ? -value : value) + "", length = string.length; + return sign + (length < width ? new Array(width - length + 1).join(fill) + string : string); + } + function d3_time_formatRe(names) { + return new RegExp("^(?:" + names.map(d3.requote).join("|") + ")", "i"); + } + function d3_time_formatLookup(names) { + var map = new d3_Map(), i = -1, n = names.length; + while (++i < n) map.set(names[i].toLowerCase(), i); + return map; + } + function d3_time_parseWeekdayNumber(date, string, i) { + d3_time_numberRe.lastIndex = 0; + var n = d3_time_numberRe.exec(string.substring(i, i + 1)); + return n ? (date.w = +n[0], i + n[0].length) : -1; + } + function d3_time_parseWeekNumberSunday(date, string, i) { + d3_time_numberRe.lastIndex = 0; + var n = d3_time_numberRe.exec(string.substring(i)); + return n ? (date.U = +n[0], i + n[0].length) : -1; + } + function d3_time_parseWeekNumberMonday(date, string, i) { + d3_time_numberRe.lastIndex = 0; + var n = d3_time_numberRe.exec(string.substring(i)); + return n ? (date.W = +n[0], i + n[0].length) : -1; + } + function d3_time_parseFullYear(date, string, i) { + d3_time_numberRe.lastIndex = 0; + var n = d3_time_numberRe.exec(string.substring(i, i + 4)); + return n ? (date.y = +n[0], i + n[0].length) : -1; + } + function d3_time_parseYear(date, string, i) { + d3_time_numberRe.lastIndex = 0; + var n = d3_time_numberRe.exec(string.substring(i, i + 2)); + return n ? (date.y = d3_time_expandYear(+n[0]), i + n[0].length) : -1; + } + function d3_time_parseZone(date, string, i) { + return /^[+-]\d{4}$/.test(string = string.substring(i, i + 5)) ? (date.Z = -string, + i + 5) : -1; + } + function d3_time_expandYear(d) { + return d + (d > 68 ? 1900 : 2e3); + } + function d3_time_parseMonthNumber(date, string, i) { + d3_time_numberRe.lastIndex = 0; + var n = d3_time_numberRe.exec(string.substring(i, i + 2)); + return n ? (date.m = n[0] - 1, i + n[0].length) : -1; + } + function d3_time_parseDay(date, string, i) { + d3_time_numberRe.lastIndex = 0; + var n = d3_time_numberRe.exec(string.substring(i, i + 2)); + return n ? (date.d = +n[0], i + n[0].length) : -1; + } + function d3_time_parseDayOfYear(date, string, i) { + d3_time_numberRe.lastIndex = 0; + var n = d3_time_numberRe.exec(string.substring(i, i + 3)); + return n ? (date.j = +n[0], i + n[0].length) : -1; + } + function d3_time_parseHour24(date, string, i) { + d3_time_numberRe.lastIndex = 0; + var n = d3_time_numberRe.exec(string.substring(i, i + 2)); + return n ? (date.H = +n[0], i + n[0].length) : -1; + } + function d3_time_parseMinutes(date, string, i) { + d3_time_numberRe.lastIndex = 0; + var n = d3_time_numberRe.exec(string.substring(i, i + 2)); + return n ? (date.M = +n[0], i + n[0].length) : -1; + } + function d3_time_parseSeconds(date, string, i) { + d3_time_numberRe.lastIndex = 0; + var n = d3_time_numberRe.exec(string.substring(i, i + 2)); + return n ? (date.S = +n[0], i + n[0].length) : -1; + } + function d3_time_parseMilliseconds(date, string, i) { + d3_time_numberRe.lastIndex = 0; + var n = d3_time_numberRe.exec(string.substring(i, i + 3)); + return n ? (date.L = +n[0], i + n[0].length) : -1; + } + function d3_time_zone(d) { + var z = d.getTimezoneOffset(), zs = z > 0 ? "-" : "+", zh = ~~(abs(z) / 60), zm = abs(z) % 60; + return zs + d3_time_formatPad(zh, "0", 2) + d3_time_formatPad(zm, "0", 2); + } + function d3_time_parseLiteralPercent(date, string, i) { + d3_time_percentRe.lastIndex = 0; + var n = d3_time_percentRe.exec(string.substring(i, i + 1)); + return n ? i + n[0].length : -1; + } + function d3_time_formatMulti(formats) { + var n = formats.length, i = -1; + while (++i < n) formats[i][0] = this(formats[i][0]); + return function(date) { + var i = 0, f = formats[i]; + while (!f[1](date)) f = formats[++i]; + return f[0](date); + }; + } + d3.locale = function(locale) { + return { + numberFormat: d3_locale_numberFormat(locale), + timeFormat: d3_locale_timeFormat(locale) + }; + }; + var d3_locale_enUS = d3.locale({ + decimal: ".", + thousands: ",", + grouping: [ 3 ], + currency: [ "$", "" ], + dateTime: "%a %b %e %X %Y", + date: "%m/%d/%Y", + time: "%H:%M:%S", + periods: [ "AM", "PM" ], + days: [ "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday" ], + shortDays: [ "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" ], + months: [ "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" ], + shortMonths: [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" ] + }); + d3.format = d3_locale_enUS.numberFormat; + d3.geo = {}; + function d3_adder() {} + d3_adder.prototype = { + s: 0, + t: 0, + add: function(y) { + d3_adderSum(y, this.t, d3_adderTemp); + d3_adderSum(d3_adderTemp.s, this.s, this); + if (this.s) this.t += d3_adderTemp.t; else this.s = d3_adderTemp.t; + }, + reset: function() { + this.s = this.t = 0; + }, + valueOf: function() { + return this.s; + } + }; + var d3_adderTemp = new d3_adder(); + function d3_adderSum(a, b, o) { + var x = o.s = a + b, bv = x - a, av = x - bv; + o.t = a - av + (b - bv); + } + d3.geo.stream = function(object, listener) { + if (object && d3_geo_streamObjectType.hasOwnProperty(object.type)) { + d3_geo_streamObjectType[object.type](object, listener); + } else { + d3_geo_streamGeometry(object, listener); + } + }; + function d3_geo_streamGeometry(geometry, listener) { + if (geometry && d3_geo_streamGeometryType.hasOwnProperty(geometry.type)) { + d3_geo_streamGeometryType[geometry.type](geometry, listener); + } + } + var d3_geo_streamObjectType = { + Feature: function(feature, listener) { + d3_geo_streamGeometry(feature.geometry, listener); + }, + FeatureCollection: function(object, listener) { + var features = object.features, i = -1, n = features.length; + while (++i < n) d3_geo_streamGeometry(features[i].geometry, listener); + } + }; + var d3_geo_streamGeometryType = { + Sphere: function(object, listener) { + listener.sphere(); + }, + Point: function(object, listener) { + object = object.coordinates; + listener.point(object[0], object[1], object[2]); + }, + MultiPoint: function(object, listener) { + var coordinates = object.coordinates, i = -1, n = coordinates.length; + while (++i < n) object = coordinates[i], listener.point(object[0], object[1], object[2]); + }, + LineString: function(object, listener) { + d3_geo_streamLine(object.coordinates, listener, 0); + }, + MultiLineString: function(object, listener) { + var coordinates = object.coordinates, i = -1, n = coordinates.length; + while (++i < n) d3_geo_streamLine(coordinates[i], listener, 0); + }, + Polygon: function(object, listener) { + d3_geo_streamPolygon(object.coordinates, listener); + }, + MultiPolygon: function(object, listener) { + var coordinates = object.coordinates, i = -1, n = coordinates.length; + while (++i < n) d3_geo_streamPolygon(coordinates[i], listener); + }, + GeometryCollection: function(object, listener) { + var geometries = object.geometries, i = -1, n = geometries.length; + while (++i < n) d3_geo_streamGeometry(geometries[i], listener); + } + }; + function d3_geo_streamLine(coordinates, listener, closed) { + var i = -1, n = coordinates.length - closed, coordinate; + listener.lineStart(); + while (++i < n) coordinate = coordinates[i], listener.point(coordinate[0], coordinate[1], coordinate[2]); + listener.lineEnd(); + } + function d3_geo_streamPolygon(coordinates, listener) { + var i = -1, n = coordinates.length; + listener.polygonStart(); + while (++i < n) d3_geo_streamLine(coordinates[i], listener, 1); + listener.polygonEnd(); + } + d3.geo.area = function(object) { + d3_geo_areaSum = 0; + d3.geo.stream(object, d3_geo_area); + return d3_geo_areaSum; + }; + var d3_geo_areaSum, d3_geo_areaRingSum = new d3_adder(); + var d3_geo_area = { + sphere: function() { + d3_geo_areaSum += 4 * π; + }, + point: d3_noop, + lineStart: d3_noop, + lineEnd: d3_noop, + polygonStart: function() { + d3_geo_areaRingSum.reset(); + d3_geo_area.lineStart = d3_geo_areaRingStart; + }, + polygonEnd: function() { + var area = 2 * d3_geo_areaRingSum; + d3_geo_areaSum += area < 0 ? 4 * π + area : area; + d3_geo_area.lineStart = d3_geo_area.lineEnd = d3_geo_area.point = d3_noop; + } + }; + function d3_geo_areaRingStart() { + var λ00, φ00, λ0, cosφ0, sinφ0; + d3_geo_area.point = function(λ, φ) { + d3_geo_area.point = nextPoint; + λ0 = (λ00 = λ) * d3_radians, cosφ0 = Math.cos(φ = (φ00 = φ) * d3_radians / 2 + π / 4), + sinφ0 = Math.sin(φ); + }; + function nextPoint(λ, φ) { + λ *= d3_radians; + φ = φ * d3_radians / 2 + π / 4; + var dλ = λ - λ0, sdλ = dλ >= 0 ? 1 : -1, adλ = sdλ * dλ, cosφ = Math.cos(φ), sinφ = Math.sin(φ), k = sinφ0 * sinφ, u = cosφ0 * cosφ + k * Math.cos(adλ), v = k * sdλ * Math.sin(adλ); + d3_geo_areaRingSum.add(Math.atan2(v, u)); + λ0 = λ, cosφ0 = cosφ, sinφ0 = sinφ; + } + d3_geo_area.lineEnd = function() { + nextPoint(λ00, φ00); + }; + } + function d3_geo_cartesian(spherical) { + var λ = spherical[0], φ = spherical[1], cosφ = Math.cos(φ); + return [ cosφ * Math.cos(λ), cosφ * Math.sin(λ), Math.sin(φ) ]; + } + function d3_geo_cartesianDot(a, b) { + return a[0] * b[0] + a[1] * b[1] + a[2] * b[2]; + } + function d3_geo_cartesianCross(a, b) { + return [ a[1] * b[2] - a[2] * b[1], a[2] * b[0] - a[0] * b[2], a[0] * b[1] - a[1] * b[0] ]; + } + function d3_geo_cartesianAdd(a, b) { + a[0] += b[0]; + a[1] += b[1]; + a[2] += b[2]; + } + function d3_geo_cartesianScale(vector, k) { + return [ vector[0] * k, vector[1] * k, vector[2] * k ]; + } + function d3_geo_cartesianNormalize(d) { + var l = Math.sqrt(d[0] * d[0] + d[1] * d[1] + d[2] * d[2]); + d[0] /= l; + d[1] /= l; + d[2] /= l; + } + function d3_geo_spherical(cartesian) { + return [ Math.atan2(cartesian[1], cartesian[0]), d3_asin(cartesian[2]) ]; + } + function d3_geo_sphericalEqual(a, b) { + return abs(a[0] - b[0]) < ε && abs(a[1] - b[1]) < ε; + } + d3.geo.bounds = function() { + var λ0, φ0, λ1, φ1, λ_, λ__, φ__, p0, dλSum, ranges, range; + var bound = { + point: point, + lineStart: lineStart, + lineEnd: lineEnd, + polygonStart: function() { + bound.point = ringPoint; + bound.lineStart = ringStart; + bound.lineEnd = ringEnd; + dλSum = 0; + d3_geo_area.polygonStart(); + }, + polygonEnd: function() { + d3_geo_area.polygonEnd(); + bound.point = point; + bound.lineStart = lineStart; + bound.lineEnd = lineEnd; + if (d3_geo_areaRingSum < 0) λ0 = -(λ1 = 180), φ0 = -(φ1 = 90); else if (dλSum > ε) φ1 = 90; else if (dλSum < -ε) φ0 = -90; + range[0] = λ0, range[1] = λ1; + } + }; + function point(λ, φ) { + ranges.push(range = [ λ0 = λ, λ1 = λ ]); + if (φ < φ0) φ0 = φ; + if (φ > φ1) φ1 = φ; + } + function linePoint(λ, φ) { + var p = d3_geo_cartesian([ λ * d3_radians, φ * d3_radians ]); + if (p0) { + var normal = d3_geo_cartesianCross(p0, p), equatorial = [ normal[1], -normal[0], 0 ], inflection = d3_geo_cartesianCross(equatorial, normal); + d3_geo_cartesianNormalize(inflection); + inflection = d3_geo_spherical(inflection); + var dλ = λ - λ_, s = dλ > 0 ? 1 : -1, λi = inflection[0] * d3_degrees * s, antimeridian = abs(dλ) > 180; + if (antimeridian ^ (s * λ_ < λi && λi < s * λ)) { + var φi = inflection[1] * d3_degrees; + if (φi > φ1) φ1 = φi; + } else if (λi = (λi + 360) % 360 - 180, antimeridian ^ (s * λ_ < λi && λi < s * λ)) { + var φi = -inflection[1] * d3_degrees; + if (φi < φ0) φ0 = φi; + } else { + if (φ < φ0) φ0 = φ; + if (φ > φ1) φ1 = φ; + } + if (antimeridian) { + if (λ < λ_) { + if (angle(λ0, λ) > angle(λ0, λ1)) λ1 = λ; + } else { + if (angle(λ, λ1) > angle(λ0, λ1)) λ0 = λ; + } + } else { + if (λ1 >= λ0) { + if (λ < λ0) λ0 = λ; + if (λ > λ1) λ1 = λ; + } else { + if (λ > λ_) { + if (angle(λ0, λ) > angle(λ0, λ1)) λ1 = λ; + } else { + if (angle(λ, λ1) > angle(λ0, λ1)) λ0 = λ; + } + } + } + } else { + point(λ, φ); + } + p0 = p, λ_ = λ; + } + function lineStart() { + bound.point = linePoint; + } + function lineEnd() { + range[0] = λ0, range[1] = λ1; + bound.point = point; + p0 = null; + } + function ringPoint(λ, φ) { + if (p0) { + var dλ = λ - λ_; + dλSum += abs(dλ) > 180 ? dλ + (dλ > 0 ? 360 : -360) : dλ; + } else λ__ = λ, φ__ = φ; + d3_geo_area.point(λ, φ); + linePoint(λ, φ); + } + function ringStart() { + d3_geo_area.lineStart(); + } + function ringEnd() { + ringPoint(λ__, φ__); + d3_geo_area.lineEnd(); + if (abs(dλSum) > ε) λ0 = -(λ1 = 180); + range[0] = λ0, range[1] = λ1; + p0 = null; + } + function angle(λ0, λ1) { + return (λ1 -= λ0) < 0 ? λ1 + 360 : λ1; + } + function compareRanges(a, b) { + return a[0] - b[0]; + } + function withinRange(x, range) { + return range[0] <= range[1] ? range[0] <= x && x <= range[1] : x < range[0] || range[1] < x; + } + return function(feature) { + φ1 = λ1 = -(λ0 = φ0 = Infinity); + ranges = []; + d3.geo.stream(feature, bound); + var n = ranges.length; + if (n) { + ranges.sort(compareRanges); + for (var i = 1, a = ranges[0], b, merged = [ a ]; i < n; ++i) { + b = ranges[i]; + if (withinRange(b[0], a) || withinRange(b[1], a)) { + if (angle(a[0], b[1]) > angle(a[0], a[1])) a[1] = b[1]; + if (angle(b[0], a[1]) > angle(a[0], a[1])) a[0] = b[0]; + } else { + merged.push(a = b); + } + } + var best = -Infinity, dλ; + for (var n = merged.length - 1, i = 0, a = merged[n], b; i <= n; a = b, ++i) { + b = merged[i]; + if ((dλ = angle(a[1], b[0])) > best) best = dλ, λ0 = b[0], λ1 = a[1]; + } + } + ranges = range = null; + return λ0 === Infinity || φ0 === Infinity ? [ [ NaN, NaN ], [ NaN, NaN ] ] : [ [ λ0, φ0 ], [ λ1, φ1 ] ]; + }; + }(); + d3.geo.centroid = function(object) { + d3_geo_centroidW0 = d3_geo_centroidW1 = d3_geo_centroidX0 = d3_geo_centroidY0 = d3_geo_centroidZ0 = d3_geo_centroidX1 = d3_geo_centroidY1 = d3_geo_centroidZ1 = d3_geo_centroidX2 = d3_geo_centroidY2 = d3_geo_centroidZ2 = 0; + d3.geo.stream(object, d3_geo_centroid); + var x = d3_geo_centroidX2, y = d3_geo_centroidY2, z = d3_geo_centroidZ2, m = x * x + y * y + z * z; + if (m < ε2) { + x = d3_geo_centroidX1, y = d3_geo_centroidY1, z = d3_geo_centroidZ1; + if (d3_geo_centroidW1 < ε) x = d3_geo_centroidX0, y = d3_geo_centroidY0, z = d3_geo_centroidZ0; + m = x * x + y * y + z * z; + if (m < ε2) return [ NaN, NaN ]; + } + return [ Math.atan2(y, x) * d3_degrees, d3_asin(z / Math.sqrt(m)) * d3_degrees ]; + }; + var d3_geo_centroidW0, d3_geo_centroidW1, d3_geo_centroidX0, d3_geo_centroidY0, d3_geo_centroidZ0, d3_geo_centroidX1, d3_geo_centroidY1, d3_geo_centroidZ1, d3_geo_centroidX2, d3_geo_centroidY2, d3_geo_centroidZ2; + var d3_geo_centroid = { + sphere: d3_noop, + point: d3_geo_centroidPoint, + lineStart: d3_geo_centroidLineStart, + lineEnd: d3_geo_centroidLineEnd, + polygonStart: function() { + d3_geo_centroid.lineStart = d3_geo_centroidRingStart; + }, + polygonEnd: function() { + d3_geo_centroid.lineStart = d3_geo_centroidLineStart; + } + }; + function d3_geo_centroidPoint(λ, φ) { + λ *= d3_radians; + var cosφ = Math.cos(φ *= d3_radians); + d3_geo_centroidPointXYZ(cosφ * Math.cos(λ), cosφ * Math.sin(λ), Math.sin(φ)); + } + function d3_geo_centroidPointXYZ(x, y, z) { + ++d3_geo_centroidW0; + d3_geo_centroidX0 += (x - d3_geo_centroidX0) / d3_geo_centroidW0; + d3_geo_centroidY0 += (y - d3_geo_centroidY0) / d3_geo_centroidW0; + d3_geo_centroidZ0 += (z - d3_geo_centroidZ0) / d3_geo_centroidW0; + } + function d3_geo_centroidLineStart() { + var x0, y0, z0; + d3_geo_centroid.point = function(λ, φ) { + λ *= d3_radians; + var cosφ = Math.cos(φ *= d3_radians); + x0 = cosφ * Math.cos(λ); + y0 = cosφ * Math.sin(λ); + z0 = Math.sin(φ); + d3_geo_centroid.point = nextPoint; + d3_geo_centroidPointXYZ(x0, y0, z0); + }; + function nextPoint(λ, φ) { + λ *= d3_radians; + var cosφ = Math.cos(φ *= d3_radians), x = cosφ * Math.cos(λ), y = cosφ * Math.sin(λ), z = Math.sin(φ), w = Math.atan2(Math.sqrt((w = y0 * z - z0 * y) * w + (w = z0 * x - x0 * z) * w + (w = x0 * y - y0 * x) * w), x0 * x + y0 * y + z0 * z); + d3_geo_centroidW1 += w; + d3_geo_centroidX1 += w * (x0 + (x0 = x)); + d3_geo_centroidY1 += w * (y0 + (y0 = y)); + d3_geo_centroidZ1 += w * (z0 + (z0 = z)); + d3_geo_centroidPointXYZ(x0, y0, z0); + } + } + function d3_geo_centroidLineEnd() { + d3_geo_centroid.point = d3_geo_centroidPoint; + } + function d3_geo_centroidRingStart() { + var λ00, φ00, x0, y0, z0; + d3_geo_centroid.point = function(λ, φ) { + λ00 = λ, φ00 = φ; + d3_geo_centroid.point = nextPoint; + λ *= d3_radians; + var cosφ = Math.cos(φ *= d3_radians); + x0 = cosφ * Math.cos(λ); + y0 = cosφ * Math.sin(λ); + z0 = Math.sin(φ); + d3_geo_centroidPointXYZ(x0, y0, z0); + }; + d3_geo_centroid.lineEnd = function() { + nextPoint(λ00, φ00); + d3_geo_centroid.lineEnd = d3_geo_centroidLineEnd; + d3_geo_centroid.point = d3_geo_centroidPoint; + }; + function nextPoint(λ, φ) { + λ *= d3_radians; + var cosφ = Math.cos(φ *= d3_radians), x = cosφ * Math.cos(λ), y = cosφ * Math.sin(λ), z = Math.sin(φ), cx = y0 * z - z0 * y, cy = z0 * x - x0 * z, cz = x0 * y - y0 * x, m = Math.sqrt(cx * cx + cy * cy + cz * cz), u = x0 * x + y0 * y + z0 * z, v = m && -d3_acos(u) / m, w = Math.atan2(m, u); + d3_geo_centroidX2 += v * cx; + d3_geo_centroidY2 += v * cy; + d3_geo_centroidZ2 += v * cz; + d3_geo_centroidW1 += w; + d3_geo_centroidX1 += w * (x0 + (x0 = x)); + d3_geo_centroidY1 += w * (y0 + (y0 = y)); + d3_geo_centroidZ1 += w * (z0 + (z0 = z)); + d3_geo_centroidPointXYZ(x0, y0, z0); + } + } + function d3_true() { + return true; + } + function d3_geo_clipPolygon(segments, compare, clipStartInside, interpolate, listener) { + var subject = [], clip = []; + segments.forEach(function(segment) { + if ((n = segment.length - 1) <= 0) return; + var n, p0 = segment[0], p1 = segment[n]; + if (d3_geo_sphericalEqual(p0, p1)) { + listener.lineStart(); + for (var i = 0; i < n; ++i) listener.point((p0 = segment[i])[0], p0[1]); + listener.lineEnd(); + return; + } + var a = new d3_geo_clipPolygonIntersection(p0, segment, null, true), b = new d3_geo_clipPolygonIntersection(p0, null, a, false); + a.o = b; + subject.push(a); + clip.push(b); + a = new d3_geo_clipPolygonIntersection(p1, segment, null, false); + b = new d3_geo_clipPolygonIntersection(p1, null, a, true); + a.o = b; + subject.push(a); + clip.push(b); + }); + clip.sort(compare); + d3_geo_clipPolygonLinkCircular(subject); + d3_geo_clipPolygonLinkCircular(clip); + if (!subject.length) return; + for (var i = 0, entry = clipStartInside, n = clip.length; i < n; ++i) { + clip[i].e = entry = !entry; + } + var start = subject[0], points, point; + while (1) { + var current = start, isSubject = true; + while (current.v) if ((current = current.n) === start) return; + points = current.z; + listener.lineStart(); + do { + current.v = current.o.v = true; + if (current.e) { + if (isSubject) { + for (var i = 0, n = points.length; i < n; ++i) listener.point((point = points[i])[0], point[1]); + } else { + interpolate(current.x, current.n.x, 1, listener); + } + current = current.n; + } else { + if (isSubject) { + points = current.p.z; + for (var i = points.length - 1; i >= 0; --i) listener.point((point = points[i])[0], point[1]); + } else { + interpolate(current.x, current.p.x, -1, listener); + } + current = current.p; + } + current = current.o; + points = current.z; + isSubject = !isSubject; + } while (!current.v); + listener.lineEnd(); + } + } + function d3_geo_clipPolygonLinkCircular(array) { + if (!(n = array.length)) return; + var n, i = 0, a = array[0], b; + while (++i < n) { + a.n = b = array[i]; + b.p = a; + a = b; + } + a.n = b = array[0]; + b.p = a; + } + function d3_geo_clipPolygonIntersection(point, points, other, entry) { + this.x = point; + this.z = points; + this.o = other; + this.e = entry; + this.v = false; + this.n = this.p = null; + } + function d3_geo_clip(pointVisible, clipLine, interpolate, clipStart) { + return function(rotate, listener) { + var line = clipLine(listener), rotatedClipStart = rotate.invert(clipStart[0], clipStart[1]); + var clip = { + point: point, + lineStart: lineStart, + lineEnd: lineEnd, + polygonStart: function() { + clip.point = pointRing; + clip.lineStart = ringStart; + clip.lineEnd = ringEnd; + segments = []; + polygon = []; + }, + polygonEnd: function() { + clip.point = point; + clip.lineStart = lineStart; + clip.lineEnd = lineEnd; + segments = d3.merge(segments); + var clipStartInside = d3_geo_pointInPolygon(rotatedClipStart, polygon); + if (segments.length) { + if (!polygonStarted) listener.polygonStart(), polygonStarted = true; + d3_geo_clipPolygon(segments, d3_geo_clipSort, clipStartInside, interpolate, listener); + } else if (clipStartInside) { + if (!polygonStarted) listener.polygonStart(), polygonStarted = true; + listener.lineStart(); + interpolate(null, null, 1, listener); + listener.lineEnd(); + } + if (polygonStarted) listener.polygonEnd(), polygonStarted = false; + segments = polygon = null; + }, + sphere: function() { + listener.polygonStart(); + listener.lineStart(); + interpolate(null, null, 1, listener); + listener.lineEnd(); + listener.polygonEnd(); + } + }; + function point(λ, φ) { + var point = rotate(λ, φ); + if (pointVisible(λ = point[0], φ = point[1])) listener.point(λ, φ); + } + function pointLine(λ, φ) { + var point = rotate(λ, φ); + line.point(point[0], point[1]); + } + function lineStart() { + clip.point = pointLine; + line.lineStart(); + } + function lineEnd() { + clip.point = point; + line.lineEnd(); + } + var segments; + var buffer = d3_geo_clipBufferListener(), ringListener = clipLine(buffer), polygonStarted = false, polygon, ring; + function pointRing(λ, φ) { + ring.push([ λ, φ ]); + var point = rotate(λ, φ); + ringListener.point(point[0], point[1]); + } + function ringStart() { + ringListener.lineStart(); + ring = []; + } + function ringEnd() { + pointRing(ring[0][0], ring[0][1]); + ringListener.lineEnd(); + var clean = ringListener.clean(), ringSegments = buffer.buffer(), segment, n = ringSegments.length; + ring.pop(); + polygon.push(ring); + ring = null; + if (!n) return; + if (clean & 1) { + segment = ringSegments[0]; + var n = segment.length - 1, i = -1, point; + if (n > 0) { + if (!polygonStarted) listener.polygonStart(), polygonStarted = true; + listener.lineStart(); + while (++i < n) listener.point((point = segment[i])[0], point[1]); + listener.lineEnd(); + } + return; + } + if (n > 1 && clean & 2) ringSegments.push(ringSegments.pop().concat(ringSegments.shift())); + segments.push(ringSegments.filter(d3_geo_clipSegmentLength1)); + } + return clip; + }; + } + function d3_geo_clipSegmentLength1(segment) { + return segment.length > 1; + } + function d3_geo_clipBufferListener() { + var lines = [], line; + return { + lineStart: function() { + lines.push(line = []); + }, + point: function(λ, φ) { + line.push([ λ, φ ]); + }, + lineEnd: d3_noop, + buffer: function() { + var buffer = lines; + lines = []; + line = null; + return buffer; + }, + rejoin: function() { + if (lines.length > 1) lines.push(lines.pop().concat(lines.shift())); + } + }; + } + function d3_geo_clipSort(a, b) { + return ((a = a.x)[0] < 0 ? a[1] - halfπ - ε : halfπ - a[1]) - ((b = b.x)[0] < 0 ? b[1] - halfπ - ε : halfπ - b[1]); + } + function d3_geo_pointInPolygon(point, polygon) { + var meridian = point[0], parallel = point[1], meridianNormal = [ Math.sin(meridian), -Math.cos(meridian), 0 ], polarAngle = 0, winding = 0; + d3_geo_areaRingSum.reset(); + for (var i = 0, n = polygon.length; i < n; ++i) { + var ring = polygon[i], m = ring.length; + if (!m) continue; + var point0 = ring[0], λ0 = point0[0], φ0 = point0[1] / 2 + π / 4, sinφ0 = Math.sin(φ0), cosφ0 = Math.cos(φ0), j = 1; + while (true) { + if (j === m) j = 0; + point = ring[j]; + var λ = point[0], φ = point[1] / 2 + π / 4, sinφ = Math.sin(φ), cosφ = Math.cos(φ), dλ = λ - λ0, sdλ = dλ >= 0 ? 1 : -1, adλ = sdλ * dλ, antimeridian = adλ > π, k = sinφ0 * sinφ; + d3_geo_areaRingSum.add(Math.atan2(k * sdλ * Math.sin(adλ), cosφ0 * cosφ + k * Math.cos(adλ))); + polarAngle += antimeridian ? dλ + sdλ * τ : dλ; + if (antimeridian ^ λ0 >= meridian ^ λ >= meridian) { + var arc = d3_geo_cartesianCross(d3_geo_cartesian(point0), d3_geo_cartesian(point)); + d3_geo_cartesianNormalize(arc); + var intersection = d3_geo_cartesianCross(meridianNormal, arc); + d3_geo_cartesianNormalize(intersection); + var φarc = (antimeridian ^ dλ >= 0 ? -1 : 1) * d3_asin(intersection[2]); + if (parallel > φarc || parallel === φarc && (arc[0] || arc[1])) { + winding += antimeridian ^ dλ >= 0 ? 1 : -1; + } + } + if (!j++) break; + λ0 = λ, sinφ0 = sinφ, cosφ0 = cosφ, point0 = point; + } + } + return (polarAngle < -ε || polarAngle < ε && d3_geo_areaRingSum < 0) ^ winding & 1; + } + var d3_geo_clipAntimeridian = d3_geo_clip(d3_true, d3_geo_clipAntimeridianLine, d3_geo_clipAntimeridianInterpolate, [ -π, -π / 2 ]); + function d3_geo_clipAntimeridianLine(listener) { + var λ0 = NaN, φ0 = NaN, sλ0 = NaN, clean; + return { + lineStart: function() { + listener.lineStart(); + clean = 1; + }, + point: function(λ1, φ1) { + var sλ1 = λ1 > 0 ? π : -π, dλ = abs(λ1 - λ0); + if (abs(dλ - π) < ε) { + listener.point(λ0, φ0 = (φ0 + φ1) / 2 > 0 ? halfπ : -halfπ); + listener.point(sλ0, φ0); + listener.lineEnd(); + listener.lineStart(); + listener.point(sλ1, φ0); + listener.point(λ1, φ0); + clean = 0; + } else if (sλ0 !== sλ1 && dλ >= π) { + if (abs(λ0 - sλ0) < ε) λ0 -= sλ0 * ε; + if (abs(λ1 - sλ1) < ε) λ1 -= sλ1 * ε; + φ0 = d3_geo_clipAntimeridianIntersect(λ0, φ0, λ1, φ1); + listener.point(sλ0, φ0); + listener.lineEnd(); + listener.lineStart(); + listener.point(sλ1, φ0); + clean = 0; + } + listener.point(λ0 = λ1, φ0 = φ1); + sλ0 = sλ1; + }, + lineEnd: function() { + listener.lineEnd(); + λ0 = φ0 = NaN; + }, + clean: function() { + return 2 - clean; + } + }; + } + function d3_geo_clipAntimeridianIntersect(λ0, φ0, λ1, φ1) { + var cosφ0, cosφ1, sinλ0_λ1 = Math.sin(λ0 - λ1); + return abs(sinλ0_λ1) > ε ? Math.atan((Math.sin(φ0) * (cosφ1 = Math.cos(φ1)) * Math.sin(λ1) - Math.sin(φ1) * (cosφ0 = Math.cos(φ0)) * Math.sin(λ0)) / (cosφ0 * cosφ1 * sinλ0_λ1)) : (φ0 + φ1) / 2; + } + function d3_geo_clipAntimeridianInterpolate(from, to, direction, listener) { + var φ; + if (from == null) { + φ = direction * halfπ; + listener.point(-π, φ); + listener.point(0, φ); + listener.point(π, φ); + listener.point(π, 0); + listener.point(π, -φ); + listener.point(0, -φ); + listener.point(-π, -φ); + listener.point(-π, 0); + listener.point(-π, φ); + } else if (abs(from[0] - to[0]) > ε) { + var s = from[0] < to[0] ? π : -π; + φ = direction * s / 2; + listener.point(-s, φ); + listener.point(0, φ); + listener.point(s, φ); + } else { + listener.point(to[0], to[1]); + } + } + function d3_geo_clipCircle(radius) { + var cr = Math.cos(radius), smallRadius = cr > 0, notHemisphere = abs(cr) > ε, interpolate = d3_geo_circleInterpolate(radius, 6 * d3_radians); + return d3_geo_clip(visible, clipLine, interpolate, smallRadius ? [ 0, -radius ] : [ -π, radius - π ]); + function visible(λ, φ) { + return Math.cos(λ) * Math.cos(φ) > cr; + } + function clipLine(listener) { + var point0, c0, v0, v00, clean; + return { + lineStart: function() { + v00 = v0 = false; + clean = 1; + }, + point: function(λ, φ) { + var point1 = [ λ, φ ], point2, v = visible(λ, φ), c = smallRadius ? v ? 0 : code(λ, φ) : v ? code(λ + (λ < 0 ? π : -π), φ) : 0; + if (!point0 && (v00 = v0 = v)) listener.lineStart(); + if (v !== v0) { + point2 = intersect(point0, point1); + if (d3_geo_sphericalEqual(point0, point2) || d3_geo_sphericalEqual(point1, point2)) { + point1[0] += ε; + point1[1] += ε; + v = visible(point1[0], point1[1]); + } + } + if (v !== v0) { + clean = 0; + if (v) { + listener.lineStart(); + point2 = intersect(point1, point0); + listener.point(point2[0], point2[1]); + } else { + point2 = intersect(point0, point1); + listener.point(point2[0], point2[1]); + listener.lineEnd(); + } + point0 = point2; + } else if (notHemisphere && point0 && smallRadius ^ v) { + var t; + if (!(c & c0) && (t = intersect(point1, point0, true))) { + clean = 0; + if (smallRadius) { + listener.lineStart(); + listener.point(t[0][0], t[0][1]); + listener.point(t[1][0], t[1][1]); + listener.lineEnd(); + } else { + listener.point(t[1][0], t[1][1]); + listener.lineEnd(); + listener.lineStart(); + listener.point(t[0][0], t[0][1]); + } + } + } + if (v && (!point0 || !d3_geo_sphericalEqual(point0, point1))) { + listener.point(point1[0], point1[1]); + } + point0 = point1, v0 = v, c0 = c; + }, + lineEnd: function() { + if (v0) listener.lineEnd(); + point0 = null; + }, + clean: function() { + return clean | (v00 && v0) << 1; + } + }; + } + function intersect(a, b, two) { + var pa = d3_geo_cartesian(a), pb = d3_geo_cartesian(b); + var n1 = [ 1, 0, 0 ], n2 = d3_geo_cartesianCross(pa, pb), n2n2 = d3_geo_cartesianDot(n2, n2), n1n2 = n2[0], determinant = n2n2 - n1n2 * n1n2; + if (!determinant) return !two && a; + var c1 = cr * n2n2 / determinant, c2 = -cr * n1n2 / determinant, n1xn2 = d3_geo_cartesianCross(n1, n2), A = d3_geo_cartesianScale(n1, c1), B = d3_geo_cartesianScale(n2, c2); + d3_geo_cartesianAdd(A, B); + var u = n1xn2, w = d3_geo_cartesianDot(A, u), uu = d3_geo_cartesianDot(u, u), t2 = w * w - uu * (d3_geo_cartesianDot(A, A) - 1); + if (t2 < 0) return; + var t = Math.sqrt(t2), q = d3_geo_cartesianScale(u, (-w - t) / uu); + d3_geo_cartesianAdd(q, A); + q = d3_geo_spherical(q); + if (!two) return q; + var λ0 = a[0], λ1 = b[0], φ0 = a[1], φ1 = b[1], z; + if (λ1 < λ0) z = λ0, λ0 = λ1, λ1 = z; + var δλ = λ1 - λ0, polar = abs(δλ - π) < ε, meridian = polar || δλ < ε; + if (!polar && φ1 < φ0) z = φ0, φ0 = φ1, φ1 = z; + if (meridian ? polar ? φ0 + φ1 > 0 ^ q[1] < (abs(q[0] - λ0) < ε ? φ0 : φ1) : φ0 <= q[1] && q[1] <= φ1 : δλ > π ^ (λ0 <= q[0] && q[0] <= λ1)) { + var q1 = d3_geo_cartesianScale(u, (-w + t) / uu); + d3_geo_cartesianAdd(q1, A); + return [ q, d3_geo_spherical(q1) ]; + } + } + function code(λ, φ) { + var r = smallRadius ? radius : π - radius, code = 0; + if (λ < -r) code |= 1; else if (λ > r) code |= 2; + if (φ < -r) code |= 4; else if (φ > r) code |= 8; + return code; + } + } + function d3_geom_clipLine(x0, y0, x1, y1) { + return function(line) { + var a = line.a, b = line.b, ax = a.x, ay = a.y, bx = b.x, by = b.y, t0 = 0, t1 = 1, dx = bx - ax, dy = by - ay, r; + r = x0 - ax; + if (!dx && r > 0) return; + r /= dx; + if (dx < 0) { + if (r < t0) return; + if (r < t1) t1 = r; + } else if (dx > 0) { + if (r > t1) return; + if (r > t0) t0 = r; + } + r = x1 - ax; + if (!dx && r < 0) return; + r /= dx; + if (dx < 0) { + if (r > t1) return; + if (r > t0) t0 = r; + } else if (dx > 0) { + if (r < t0) return; + if (r < t1) t1 = r; + } + r = y0 - ay; + if (!dy && r > 0) return; + r /= dy; + if (dy < 0) { + if (r < t0) return; + if (r < t1) t1 = r; + } else if (dy > 0) { + if (r > t1) return; + if (r > t0) t0 = r; + } + r = y1 - ay; + if (!dy && r < 0) return; + r /= dy; + if (dy < 0) { + if (r > t1) return; + if (r > t0) t0 = r; + } else if (dy > 0) { + if (r < t0) return; + if (r < t1) t1 = r; + } + if (t0 > 0) line.a = { + x: ax + t0 * dx, + y: ay + t0 * dy + }; + if (t1 < 1) line.b = { + x: ax + t1 * dx, + y: ay + t1 * dy + }; + return line; + }; + } + var d3_geo_clipExtentMAX = 1e9; + d3.geo.clipExtent = function() { + var x0, y0, x1, y1, stream, clip, clipExtent = { + stream: function(output) { + if (stream) stream.valid = false; + stream = clip(output); + stream.valid = true; + return stream; + }, + extent: function(_) { + if (!arguments.length) return [ [ x0, y0 ], [ x1, y1 ] ]; + clip = d3_geo_clipExtent(x0 = +_[0][0], y0 = +_[0][1], x1 = +_[1][0], y1 = +_[1][1]); + if (stream) stream.valid = false, stream = null; + return clipExtent; + } + }; + return clipExtent.extent([ [ 0, 0 ], [ 960, 500 ] ]); + }; + function d3_geo_clipExtent(x0, y0, x1, y1) { + return function(listener) { + var listener_ = listener, bufferListener = d3_geo_clipBufferListener(), clipLine = d3_geom_clipLine(x0, y0, x1, y1), segments, polygon, ring; + var clip = { + point: point, + lineStart: lineStart, + lineEnd: lineEnd, + polygonStart: function() { + listener = bufferListener; + segments = []; + polygon = []; + clean = true; + }, + polygonEnd: function() { + listener = listener_; + segments = d3.merge(segments); + var clipStartInside = insidePolygon([ x0, y1 ]), inside = clean && clipStartInside, visible = segments.length; + if (inside || visible) { + listener.polygonStart(); + if (inside) { + listener.lineStart(); + interpolate(null, null, 1, listener); + listener.lineEnd(); + } + if (visible) { + d3_geo_clipPolygon(segments, compare, clipStartInside, interpolate, listener); + } + listener.polygonEnd(); + } + segments = polygon = ring = null; + } + }; + function insidePolygon(p) { + var wn = 0, n = polygon.length, y = p[1]; + for (var i = 0; i < n; ++i) { + for (var j = 1, v = polygon[i], m = v.length, a = v[0], b; j < m; ++j) { + b = v[j]; + if (a[1] <= y) { + if (b[1] > y && d3_cross2d(a, b, p) > 0) ++wn; + } else { + if (b[1] <= y && d3_cross2d(a, b, p) < 0) --wn; + } + a = b; + } + } + return wn !== 0; + } + function interpolate(from, to, direction, listener) { + var a = 0, a1 = 0; + if (from == null || (a = corner(from, direction)) !== (a1 = corner(to, direction)) || comparePoints(from, to) < 0 ^ direction > 0) { + do { + listener.point(a === 0 || a === 3 ? x0 : x1, a > 1 ? y1 : y0); + } while ((a = (a + direction + 4) % 4) !== a1); + } else { + listener.point(to[0], to[1]); + } + } + function pointVisible(x, y) { + return x0 <= x && x <= x1 && y0 <= y && y <= y1; + } + function point(x, y) { + if (pointVisible(x, y)) listener.point(x, y); + } + var x__, y__, v__, x_, y_, v_, first, clean; + function lineStart() { + clip.point = linePoint; + if (polygon) polygon.push(ring = []); + first = true; + v_ = false; + x_ = y_ = NaN; + } + function lineEnd() { + if (segments) { + linePoint(x__, y__); + if (v__ && v_) bufferListener.rejoin(); + segments.push(bufferListener.buffer()); + } + clip.point = point; + if (v_) listener.lineEnd(); + } + function linePoint(x, y) { + x = Math.max(-d3_geo_clipExtentMAX, Math.min(d3_geo_clipExtentMAX, x)); + y = Math.max(-d3_geo_clipExtentMAX, Math.min(d3_geo_clipExtentMAX, y)); + var v = pointVisible(x, y); + if (polygon) ring.push([ x, y ]); + if (first) { + x__ = x, y__ = y, v__ = v; + first = false; + if (v) { + listener.lineStart(); + listener.point(x, y); + } + } else { + if (v && v_) listener.point(x, y); else { + var l = { + a: { + x: x_, + y: y_ + }, + b: { + x: x, + y: y + } + }; + if (clipLine(l)) { + if (!v_) { + listener.lineStart(); + listener.point(l.a.x, l.a.y); + } + listener.point(l.b.x, l.b.y); + if (!v) listener.lineEnd(); + clean = false; + } else if (v) { + listener.lineStart(); + listener.point(x, y); + clean = false; + } + } + } + x_ = x, y_ = y, v_ = v; + } + return clip; + }; + function corner(p, direction) { + return abs(p[0] - x0) < ε ? direction > 0 ? 0 : 3 : abs(p[0] - x1) < ε ? direction > 0 ? 2 : 1 : abs(p[1] - y0) < ε ? direction > 0 ? 1 : 0 : direction > 0 ? 3 : 2; + } + function compare(a, b) { + return comparePoints(a.x, b.x); + } + function comparePoints(a, b) { + var ca = corner(a, 1), cb = corner(b, 1); + return ca !== cb ? ca - cb : ca === 0 ? b[1] - a[1] : ca === 1 ? a[0] - b[0] : ca === 2 ? a[1] - b[1] : b[0] - a[0]; + } + } + function d3_geo_compose(a, b) { + function compose(x, y) { + return x = a(x, y), b(x[0], x[1]); + } + if (a.invert && b.invert) compose.invert = function(x, y) { + return x = b.invert(x, y), x && a.invert(x[0], x[1]); + }; + return compose; + } + function d3_geo_conic(projectAt) { + var φ0 = 0, φ1 = π / 3, m = d3_geo_projectionMutator(projectAt), p = m(φ0, φ1); + p.parallels = function(_) { + if (!arguments.length) return [ φ0 / π * 180, φ1 / π * 180 ]; + return m(φ0 = _[0] * π / 180, φ1 = _[1] * π / 180); + }; + return p; + } + function d3_geo_conicEqualArea(φ0, φ1) { + var sinφ0 = Math.sin(φ0), n = (sinφ0 + Math.sin(φ1)) / 2, C = 1 + sinφ0 * (2 * n - sinφ0), ρ0 = Math.sqrt(C) / n; + function forward(λ, φ) { + var ρ = Math.sqrt(C - 2 * n * Math.sin(φ)) / n; + return [ ρ * Math.sin(λ *= n), ρ0 - ρ * Math.cos(λ) ]; + } + forward.invert = function(x, y) { + var ρ0_y = ρ0 - y; + return [ Math.atan2(x, ρ0_y) / n, d3_asin((C - (x * x + ρ0_y * ρ0_y) * n * n) / (2 * n)) ]; + }; + return forward; + } + (d3.geo.conicEqualArea = function() { + return d3_geo_conic(d3_geo_conicEqualArea); + }).raw = d3_geo_conicEqualArea; + d3.geo.albers = function() { + return d3.geo.conicEqualArea().rotate([ 96, 0 ]).center([ -.6, 38.7 ]).parallels([ 29.5, 45.5 ]).scale(1070); + }; + d3.geo.albersUsa = function() { + var lower48 = d3.geo.albers(); + var alaska = d3.geo.conicEqualArea().rotate([ 154, 0 ]).center([ -2, 58.5 ]).parallels([ 55, 65 ]); + var hawaii = d3.geo.conicEqualArea().rotate([ 157, 0 ]).center([ -3, 19.9 ]).parallels([ 8, 18 ]); + var point, pointStream = { + point: function(x, y) { + point = [ x, y ]; + } + }, lower48Point, alaskaPoint, hawaiiPoint; + function albersUsa(coordinates) { + var x = coordinates[0], y = coordinates[1]; + point = null; + (lower48Point(x, y), point) || (alaskaPoint(x, y), point) || hawaiiPoint(x, y); + return point; + } + albersUsa.invert = function(coordinates) { + var k = lower48.scale(), t = lower48.translate(), x = (coordinates[0] - t[0]) / k, y = (coordinates[1] - t[1]) / k; + return (y >= .12 && y < .234 && x >= -.425 && x < -.214 ? alaska : y >= .166 && y < .234 && x >= -.214 && x < -.115 ? hawaii : lower48).invert(coordinates); + }; + albersUsa.stream = function(stream) { + var lower48Stream = lower48.stream(stream), alaskaStream = alaska.stream(stream), hawaiiStream = hawaii.stream(stream); + return { + point: function(x, y) { + lower48Stream.point(x, y); + alaskaStream.point(x, y); + hawaiiStream.point(x, y); + }, + sphere: function() { + lower48Stream.sphere(); + alaskaStream.sphere(); + hawaiiStream.sphere(); + }, + lineStart: function() { + lower48Stream.lineStart(); + alaskaStream.lineStart(); + hawaiiStream.lineStart(); + }, + lineEnd: function() { + lower48Stream.lineEnd(); + alaskaStream.lineEnd(); + hawaiiStream.lineEnd(); + }, + polygonStart: function() { + lower48Stream.polygonStart(); + alaskaStream.polygonStart(); + hawaiiStream.polygonStart(); + }, + polygonEnd: function() { + lower48Stream.polygonEnd(); + alaskaStream.polygonEnd(); + hawaiiStream.polygonEnd(); + } + }; + }; + albersUsa.precision = function(_) { + if (!arguments.length) return lower48.precision(); + lower48.precision(_); + alaska.precision(_); + hawaii.precision(_); + return albersUsa; + }; + albersUsa.scale = function(_) { + if (!arguments.length) return lower48.scale(); + lower48.scale(_); + alaska.scale(_ * .35); + hawaii.scale(_); + return albersUsa.translate(lower48.translate()); + }; + albersUsa.translate = function(_) { + if (!arguments.length) return lower48.translate(); + var k = lower48.scale(), x = +_[0], y = +_[1]; + lower48Point = lower48.translate(_).clipExtent([ [ x - .455 * k, y - .238 * k ], [ x + .455 * k, y + .238 * k ] ]).stream(pointStream).point; + alaskaPoint = alaska.translate([ x - .307 * k, y + .201 * k ]).clipExtent([ [ x - .425 * k + ε, y + .12 * k + ε ], [ x - .214 * k - ε, y + .234 * k - ε ] ]).stream(pointStream).point; + hawaiiPoint = hawaii.translate([ x - .205 * k, y + .212 * k ]).clipExtent([ [ x - .214 * k + ε, y + .166 * k + ε ], [ x - .115 * k - ε, y + .234 * k - ε ] ]).stream(pointStream).point; + return albersUsa; + }; + return albersUsa.scale(1070); + }; + var d3_geo_pathAreaSum, d3_geo_pathAreaPolygon, d3_geo_pathArea = { + point: d3_noop, + lineStart: d3_noop, + lineEnd: d3_noop, + polygonStart: function() { + d3_geo_pathAreaPolygon = 0; + d3_geo_pathArea.lineStart = d3_geo_pathAreaRingStart; + }, + polygonEnd: function() { + d3_geo_pathArea.lineStart = d3_geo_pathArea.lineEnd = d3_geo_pathArea.point = d3_noop; + d3_geo_pathAreaSum += abs(d3_geo_pathAreaPolygon / 2); + } + }; + function d3_geo_pathAreaRingStart() { + var x00, y00, x0, y0; + d3_geo_pathArea.point = function(x, y) { + d3_geo_pathArea.point = nextPoint; + x00 = x0 = x, y00 = y0 = y; + }; + function nextPoint(x, y) { + d3_geo_pathAreaPolygon += y0 * x - x0 * y; + x0 = x, y0 = y; + } + d3_geo_pathArea.lineEnd = function() { + nextPoint(x00, y00); + }; + } + var d3_geo_pathBoundsX0, d3_geo_pathBoundsY0, d3_geo_pathBoundsX1, d3_geo_pathBoundsY1; + var d3_geo_pathBounds = { + point: d3_geo_pathBoundsPoint, + lineStart: d3_noop, + lineEnd: d3_noop, + polygonStart: d3_noop, + polygonEnd: d3_noop + }; + function d3_geo_pathBoundsPoint(x, y) { + if (x < d3_geo_pathBoundsX0) d3_geo_pathBoundsX0 = x; + if (x > d3_geo_pathBoundsX1) d3_geo_pathBoundsX1 = x; + if (y < d3_geo_pathBoundsY0) d3_geo_pathBoundsY0 = y; + if (y > d3_geo_pathBoundsY1) d3_geo_pathBoundsY1 = y; + } + function d3_geo_pathBuffer() { + var pointCircle = d3_geo_pathBufferCircle(4.5), buffer = []; + var stream = { + point: point, + lineStart: function() { + stream.point = pointLineStart; + }, + lineEnd: lineEnd, + polygonStart: function() { + stream.lineEnd = lineEndPolygon; + }, + polygonEnd: function() { + stream.lineEnd = lineEnd; + stream.point = point; + }, + pointRadius: function(_) { + pointCircle = d3_geo_pathBufferCircle(_); + return stream; + }, + result: function() { + if (buffer.length) { + var result = buffer.join(""); + buffer = []; + return result; + } + } + }; + function point(x, y) { + buffer.push("M", x, ",", y, pointCircle); + } + function pointLineStart(x, y) { + buffer.push("M", x, ",", y); + stream.point = pointLine; + } + function pointLine(x, y) { + buffer.push("L", x, ",", y); + } + function lineEnd() { + stream.point = point; + } + function lineEndPolygon() { + buffer.push("Z"); + } + return stream; + } + function d3_geo_pathBufferCircle(radius) { + return "m0," + radius + "a" + radius + "," + radius + " 0 1,1 0," + -2 * radius + "a" + radius + "," + radius + " 0 1,1 0," + 2 * radius + "z"; + } + var d3_geo_pathCentroid = { + point: d3_geo_pathCentroidPoint, + lineStart: d3_geo_pathCentroidLineStart, + lineEnd: d3_geo_pathCentroidLineEnd, + polygonStart: function() { + d3_geo_pathCentroid.lineStart = d3_geo_pathCentroidRingStart; + }, + polygonEnd: function() { + d3_geo_pathCentroid.point = d3_geo_pathCentroidPoint; + d3_geo_pathCentroid.lineStart = d3_geo_pathCentroidLineStart; + d3_geo_pathCentroid.lineEnd = d3_geo_pathCentroidLineEnd; + } + }; + function d3_geo_pathCentroidPoint(x, y) { + d3_geo_centroidX0 += x; + d3_geo_centroidY0 += y; + ++d3_geo_centroidZ0; + } + function d3_geo_pathCentroidLineStart() { + var x0, y0; + d3_geo_pathCentroid.point = function(x, y) { + d3_geo_pathCentroid.point = nextPoint; + d3_geo_pathCentroidPoint(x0 = x, y0 = y); + }; + function nextPoint(x, y) { + var dx = x - x0, dy = y - y0, z = Math.sqrt(dx * dx + dy * dy); + d3_geo_centroidX1 += z * (x0 + x) / 2; + d3_geo_centroidY1 += z * (y0 + y) / 2; + d3_geo_centroidZ1 += z; + d3_geo_pathCentroidPoint(x0 = x, y0 = y); + } + } + function d3_geo_pathCentroidLineEnd() { + d3_geo_pathCentroid.point = d3_geo_pathCentroidPoint; + } + function d3_geo_pathCentroidRingStart() { + var x00, y00, x0, y0; + d3_geo_pathCentroid.point = function(x, y) { + d3_geo_pathCentroid.point = nextPoint; + d3_geo_pathCentroidPoint(x00 = x0 = x, y00 = y0 = y); + }; + function nextPoint(x, y) { + var dx = x - x0, dy = y - y0, z = Math.sqrt(dx * dx + dy * dy); + d3_geo_centroidX1 += z * (x0 + x) / 2; + d3_geo_centroidY1 += z * (y0 + y) / 2; + d3_geo_centroidZ1 += z; + z = y0 * x - x0 * y; + d3_geo_centroidX2 += z * (x0 + x); + d3_geo_centroidY2 += z * (y0 + y); + d3_geo_centroidZ2 += z * 3; + d3_geo_pathCentroidPoint(x0 = x, y0 = y); + } + d3_geo_pathCentroid.lineEnd = function() { + nextPoint(x00, y00); + }; + } + function d3_geo_pathContext(context) { + var pointRadius = 4.5; + var stream = { + point: point, + lineStart: function() { + stream.point = pointLineStart; + }, + lineEnd: lineEnd, + polygonStart: function() { + stream.lineEnd = lineEndPolygon; + }, + polygonEnd: function() { + stream.lineEnd = lineEnd; + stream.point = point; + }, + pointRadius: function(_) { + pointRadius = _; + return stream; + }, + result: d3_noop + }; + function point(x, y) { + context.moveTo(x, y); + context.arc(x, y, pointRadius, 0, τ); + } + function pointLineStart(x, y) { + context.moveTo(x, y); + stream.point = pointLine; + } + function pointLine(x, y) { + context.lineTo(x, y); + } + function lineEnd() { + stream.point = point; + } + function lineEndPolygon() { + context.closePath(); + } + return stream; + } + function d3_geo_resample(project) { + var δ2 = .5, cosMinDistance = Math.cos(30 * d3_radians), maxDepth = 16; + function resample(stream) { + return (maxDepth ? resampleRecursive : resampleNone)(stream); + } + function resampleNone(stream) { + return d3_geo_transformPoint(stream, function(x, y) { + x = project(x, y); + stream.point(x[0], x[1]); + }); + } + function resampleRecursive(stream) { + var λ00, φ00, x00, y00, a00, b00, c00, λ0, x0, y0, a0, b0, c0; + var resample = { + point: point, + lineStart: lineStart, + lineEnd: lineEnd, + polygonStart: function() { + stream.polygonStart(); + resample.lineStart = ringStart; + }, + polygonEnd: function() { + stream.polygonEnd(); + resample.lineStart = lineStart; + } + }; + function point(x, y) { + x = project(x, y); + stream.point(x[0], x[1]); + } + function lineStart() { + x0 = NaN; + resample.point = linePoint; + stream.lineStart(); + } + function linePoint(λ, φ) { + var c = d3_geo_cartesian([ λ, φ ]), p = project(λ, φ); + resampleLineTo(x0, y0, λ0, a0, b0, c0, x0 = p[0], y0 = p[1], λ0 = λ, a0 = c[0], b0 = c[1], c0 = c[2], maxDepth, stream); + stream.point(x0, y0); + } + function lineEnd() { + resample.point = point; + stream.lineEnd(); + } + function ringStart() { + lineStart(); + resample.point = ringPoint; + resample.lineEnd = ringEnd; + } + function ringPoint(λ, φ) { + linePoint(λ00 = λ, φ00 = φ), x00 = x0, y00 = y0, a00 = a0, b00 = b0, c00 = c0; + resample.point = linePoint; + } + function ringEnd() { + resampleLineTo(x0, y0, λ0, a0, b0, c0, x00, y00, λ00, a00, b00, c00, maxDepth, stream); + resample.lineEnd = lineEnd; + lineEnd(); + } + return resample; + } + function resampleLineTo(x0, y0, λ0, a0, b0, c0, x1, y1, λ1, a1, b1, c1, depth, stream) { + var dx = x1 - x0, dy = y1 - y0, d2 = dx * dx + dy * dy; + if (d2 > 4 * δ2 && depth--) { + var a = a0 + a1, b = b0 + b1, c = c0 + c1, m = Math.sqrt(a * a + b * b + c * c), φ2 = Math.asin(c /= m), λ2 = abs(abs(c) - 1) < ε || abs(λ0 - λ1) < ε ? (λ0 + λ1) / 2 : Math.atan2(b, a), p = project(λ2, φ2), x2 = p[0], y2 = p[1], dx2 = x2 - x0, dy2 = y2 - y0, dz = dy * dx2 - dx * dy2; + if (dz * dz / d2 > δ2 || abs((dx * dx2 + dy * dy2) / d2 - .5) > .3 || a0 * a1 + b0 * b1 + c0 * c1 < cosMinDistance) { + resampleLineTo(x0, y0, λ0, a0, b0, c0, x2, y2, λ2, a /= m, b /= m, c, depth, stream); + stream.point(x2, y2); + resampleLineTo(x2, y2, λ2, a, b, c, x1, y1, λ1, a1, b1, c1, depth, stream); + } + } + } + resample.precision = function(_) { + if (!arguments.length) return Math.sqrt(δ2); + maxDepth = (δ2 = _ * _) > 0 && 16; + return resample; + }; + return resample; + } + d3.geo.path = function() { + var pointRadius = 4.5, projection, context, projectStream, contextStream, cacheStream; + function path(object) { + if (object) { + if (typeof pointRadius === "function") contextStream.pointRadius(+pointRadius.apply(this, arguments)); + if (!cacheStream || !cacheStream.valid) cacheStream = projectStream(contextStream); + d3.geo.stream(object, cacheStream); + } + return contextStream.result(); + } + path.area = function(object) { + d3_geo_pathAreaSum = 0; + d3.geo.stream(object, projectStream(d3_geo_pathArea)); + return d3_geo_pathAreaSum; + }; + path.centroid = function(object) { + d3_geo_centroidX0 = d3_geo_centroidY0 = d3_geo_centroidZ0 = d3_geo_centroidX1 = d3_geo_centroidY1 = d3_geo_centroidZ1 = d3_geo_centroidX2 = d3_geo_centroidY2 = d3_geo_centroidZ2 = 0; + d3.geo.stream(object, projectStream(d3_geo_pathCentroid)); + return d3_geo_centroidZ2 ? [ d3_geo_centroidX2 / d3_geo_centroidZ2, d3_geo_centroidY2 / d3_geo_centroidZ2 ] : d3_geo_centroidZ1 ? [ d3_geo_centroidX1 / d3_geo_centroidZ1, d3_geo_centroidY1 / d3_geo_centroidZ1 ] : d3_geo_centroidZ0 ? [ d3_geo_centroidX0 / d3_geo_centroidZ0, d3_geo_centroidY0 / d3_geo_centroidZ0 ] : [ NaN, NaN ]; + }; + path.bounds = function(object) { + d3_geo_pathBoundsX1 = d3_geo_pathBoundsY1 = -(d3_geo_pathBoundsX0 = d3_geo_pathBoundsY0 = Infinity); + d3.geo.stream(object, projectStream(d3_geo_pathBounds)); + return [ [ d3_geo_pathBoundsX0, d3_geo_pathBoundsY0 ], [ d3_geo_pathBoundsX1, d3_geo_pathBoundsY1 ] ]; + }; + path.projection = function(_) { + if (!arguments.length) return projection; + projectStream = (projection = _) ? _.stream || d3_geo_pathProjectStream(_) : d3_identity; + return reset(); + }; + path.context = function(_) { + if (!arguments.length) return context; + contextStream = (context = _) == null ? new d3_geo_pathBuffer() : new d3_geo_pathContext(_); + if (typeof pointRadius !== "function") contextStream.pointRadius(pointRadius); + return reset(); + }; + path.pointRadius = function(_) { + if (!arguments.length) return pointRadius; + pointRadius = typeof _ === "function" ? _ : (contextStream.pointRadius(+_), +_); + return path; + }; + function reset() { + cacheStream = null; + return path; + } + return path.projection(d3.geo.albersUsa()).context(null); + }; + function d3_geo_pathProjectStream(project) { + var resample = d3_geo_resample(function(x, y) { + return project([ x * d3_degrees, y * d3_degrees ]); + }); + return function(stream) { + return d3_geo_projectionRadians(resample(stream)); + }; + } + d3.geo.transform = function(methods) { + return { + stream: function(stream) { + var transform = new d3_geo_transform(stream); + for (var k in methods) transform[k] = methods[k]; + return transform; + } + }; + }; + function d3_geo_transform(stream) { + this.stream = stream; + } + d3_geo_transform.prototype = { + point: function(x, y) { + this.stream.point(x, y); + }, + sphere: function() { + this.stream.sphere(); + }, + lineStart: function() { + this.stream.lineStart(); + }, + lineEnd: function() { + this.stream.lineEnd(); + }, + polygonStart: function() { + this.stream.polygonStart(); + }, + polygonEnd: function() { + this.stream.polygonEnd(); + } + }; + function d3_geo_transformPoint(stream, point) { + return { + point: point, + sphere: function() { + stream.sphere(); + }, + lineStart: function() { + stream.lineStart(); + }, + lineEnd: function() { + stream.lineEnd(); + }, + polygonStart: function() { + stream.polygonStart(); + }, + polygonEnd: function() { + stream.polygonEnd(); + } + }; + } + d3.geo.projection = d3_geo_projection; + d3.geo.projectionMutator = d3_geo_projectionMutator; + function d3_geo_projection(project) { + return d3_geo_projectionMutator(function() { + return project; + })(); + } + function d3_geo_projectionMutator(projectAt) { + var project, rotate, projectRotate, projectResample = d3_geo_resample(function(x, y) { + x = project(x, y); + return [ x[0] * k + δx, δy - x[1] * k ]; + }), k = 150, x = 480, y = 250, λ = 0, φ = 0, δλ = 0, δφ = 0, δγ = 0, δx, δy, preclip = d3_geo_clipAntimeridian, postclip = d3_identity, clipAngle = null, clipExtent = null, stream; + function projection(point) { + point = projectRotate(point[0] * d3_radians, point[1] * d3_radians); + return [ point[0] * k + δx, δy - point[1] * k ]; + } + function invert(point) { + point = projectRotate.invert((point[0] - δx) / k, (δy - point[1]) / k); + return point && [ point[0] * d3_degrees, point[1] * d3_degrees ]; + } + projection.stream = function(output) { + if (stream) stream.valid = false; + stream = d3_geo_projectionRadians(preclip(rotate, projectResample(postclip(output)))); + stream.valid = true; + return stream; + }; + projection.clipAngle = function(_) { + if (!arguments.length) return clipAngle; + preclip = _ == null ? (clipAngle = _, d3_geo_clipAntimeridian) : d3_geo_clipCircle((clipAngle = +_) * d3_radians); + return invalidate(); + }; + projection.clipExtent = function(_) { + if (!arguments.length) return clipExtent; + clipExtent = _; + postclip = _ ? d3_geo_clipExtent(_[0][0], _[0][1], _[1][0], _[1][1]) : d3_identity; + return invalidate(); + }; + projection.scale = function(_) { + if (!arguments.length) return k; + k = +_; + return reset(); + }; + projection.translate = function(_) { + if (!arguments.length) return [ x, y ]; + x = +_[0]; + y = +_[1]; + return reset(); + }; + projection.center = function(_) { + if (!arguments.length) return [ λ * d3_degrees, φ * d3_degrees ]; + λ = _[0] % 360 * d3_radians; + φ = _[1] % 360 * d3_radians; + return reset(); + }; + projection.rotate = function(_) { + if (!arguments.length) return [ δλ * d3_degrees, δφ * d3_degrees, δγ * d3_degrees ]; + δλ = _[0] % 360 * d3_radians; + δφ = _[1] % 360 * d3_radians; + δγ = _.length > 2 ? _[2] % 360 * d3_radians : 0; + return reset(); + }; + d3.rebind(projection, projectResample, "precision"); + function reset() { + projectRotate = d3_geo_compose(rotate = d3_geo_rotation(δλ, δφ, δγ), project); + var center = project(λ, φ); + δx = x - center[0] * k; + δy = y + center[1] * k; + return invalidate(); + } + function invalidate() { + if (stream) stream.valid = false, stream = null; + return projection; + } + return function() { + project = projectAt.apply(this, arguments); + projection.invert = project.invert && invert; + return reset(); + }; + } + function d3_geo_projectionRadians(stream) { + return d3_geo_transformPoint(stream, function(x, y) { + stream.point(x * d3_radians, y * d3_radians); + }); + } + function d3_geo_equirectangular(λ, φ) { + return [ λ, φ ]; + } + (d3.geo.equirectangular = function() { + return d3_geo_projection(d3_geo_equirectangular); + }).raw = d3_geo_equirectangular.invert = d3_geo_equirectangular; + d3.geo.rotation = function(rotate) { + rotate = d3_geo_rotation(rotate[0] % 360 * d3_radians, rotate[1] * d3_radians, rotate.length > 2 ? rotate[2] * d3_radians : 0); + function forward(coordinates) { + coordinates = rotate(coordinates[0] * d3_radians, coordinates[1] * d3_radians); + return coordinates[0] *= d3_degrees, coordinates[1] *= d3_degrees, coordinates; + } + forward.invert = function(coordinates) { + coordinates = rotate.invert(coordinates[0] * d3_radians, coordinates[1] * d3_radians); + return coordinates[0] *= d3_degrees, coordinates[1] *= d3_degrees, coordinates; + }; + return forward; + }; + function d3_geo_identityRotation(λ, φ) { + return [ λ > π ? λ - τ : λ < -π ? λ + τ : λ, φ ]; + } + d3_geo_identityRotation.invert = d3_geo_equirectangular; + function d3_geo_rotation(δλ, δφ, δγ) { + return δλ ? δφ || δγ ? d3_geo_compose(d3_geo_rotationλ(δλ), d3_geo_rotationφγ(δφ, δγ)) : d3_geo_rotationλ(δλ) : δφ || δγ ? d3_geo_rotationφγ(δφ, δγ) : d3_geo_identityRotation; + } + function d3_geo_forwardRotationλ(δλ) { + return function(λ, φ) { + return λ += δλ, [ λ > π ? λ - τ : λ < -π ? λ + τ : λ, φ ]; + }; + } + function d3_geo_rotationλ(δλ) { + var rotation = d3_geo_forwardRotationλ(δλ); + rotation.invert = d3_geo_forwardRotationλ(-δλ); + return rotation; + } + function d3_geo_rotationφγ(δφ, δγ) { + var cosδφ = Math.cos(δφ), sinδφ = Math.sin(δφ), cosδγ = Math.cos(δγ), sinδγ = Math.sin(δγ); + function rotation(λ, φ) { + var cosφ = Math.cos(φ), x = Math.cos(λ) * cosφ, y = Math.sin(λ) * cosφ, z = Math.sin(φ), k = z * cosδφ + x * sinδφ; + return [ Math.atan2(y * cosδγ - k * sinδγ, x * cosδφ - z * sinδφ), d3_asin(k * cosδγ + y * sinδγ) ]; + } + rotation.invert = function(λ, φ) { + var cosφ = Math.cos(φ), x = Math.cos(λ) * cosφ, y = Math.sin(λ) * cosφ, z = Math.sin(φ), k = z * cosδγ - y * sinδγ; + return [ Math.atan2(y * cosδγ + z * sinδγ, x * cosδφ + k * sinδφ), d3_asin(k * cosδφ - x * sinδφ) ]; + }; + return rotation; + } + d3.geo.circle = function() { + var origin = [ 0, 0 ], angle, precision = 6, interpolate; + function circle() { + var center = typeof origin === "function" ? origin.apply(this, arguments) : origin, rotate = d3_geo_rotation(-center[0] * d3_radians, -center[1] * d3_radians, 0).invert, ring = []; + interpolate(null, null, 1, { + point: function(x, y) { + ring.push(x = rotate(x, y)); + x[0] *= d3_degrees, x[1] *= d3_degrees; + } + }); + return { + type: "Polygon", + coordinates: [ ring ] + }; + } + circle.origin = function(x) { + if (!arguments.length) return origin; + origin = x; + return circle; + }; + circle.angle = function(x) { + if (!arguments.length) return angle; + interpolate = d3_geo_circleInterpolate((angle = +x) * d3_radians, precision * d3_radians); + return circle; + }; + circle.precision = function(_) { + if (!arguments.length) return precision; + interpolate = d3_geo_circleInterpolate(angle * d3_radians, (precision = +_) * d3_radians); + return circle; + }; + return circle.angle(90); + }; + function d3_geo_circleInterpolate(radius, precision) { + var cr = Math.cos(radius), sr = Math.sin(radius); + return function(from, to, direction, listener) { + var step = direction * precision; + if (from != null) { + from = d3_geo_circleAngle(cr, from); + to = d3_geo_circleAngle(cr, to); + if (direction > 0 ? from < to : from > to) from += direction * τ; + } else { + from = radius + direction * τ; + to = radius - .5 * step; + } + for (var point, t = from; direction > 0 ? t > to : t < to; t -= step) { + listener.point((point = d3_geo_spherical([ cr, -sr * Math.cos(t), -sr * Math.sin(t) ]))[0], point[1]); + } + }; + } + function d3_geo_circleAngle(cr, point) { + var a = d3_geo_cartesian(point); + a[0] -= cr; + d3_geo_cartesianNormalize(a); + var angle = d3_acos(-a[1]); + return ((-a[2] < 0 ? -angle : angle) + 2 * Math.PI - ε) % (2 * Math.PI); + } + d3.geo.distance = function(a, b) { + var Δλ = (b[0] - a[0]) * d3_radians, φ0 = a[1] * d3_radians, φ1 = b[1] * d3_radians, sinΔλ = Math.sin(Δλ), cosΔλ = Math.cos(Δλ), sinφ0 = Math.sin(φ0), cosφ0 = Math.cos(φ0), sinφ1 = Math.sin(φ1), cosφ1 = Math.cos(φ1), t; + return Math.atan2(Math.sqrt((t = cosφ1 * sinΔλ) * t + (t = cosφ0 * sinφ1 - sinφ0 * cosφ1 * cosΔλ) * t), sinφ0 * sinφ1 + cosφ0 * cosφ1 * cosΔλ); + }; + d3.geo.graticule = function() { + var x1, x0, X1, X0, y1, y0, Y1, Y0, dx = 10, dy = dx, DX = 90, DY = 360, x, y, X, Y, precision = 2.5; + function graticule() { + return { + type: "MultiLineString", + coordinates: lines() + }; + } + function lines() { + return d3.range(Math.ceil(X0 / DX) * DX, X1, DX).map(X).concat(d3.range(Math.ceil(Y0 / DY) * DY, Y1, DY).map(Y)).concat(d3.range(Math.ceil(x0 / dx) * dx, x1, dx).filter(function(x) { + return abs(x % DX) > ε; + }).map(x)).concat(d3.range(Math.ceil(y0 / dy) * dy, y1, dy).filter(function(y) { + return abs(y % DY) > ε; + }).map(y)); + } + graticule.lines = function() { + return lines().map(function(coordinates) { + return { + type: "LineString", + coordinates: coordinates + }; + }); + }; + graticule.outline = function() { + return { + type: "Polygon", + coordinates: [ X(X0).concat(Y(Y1).slice(1), X(X1).reverse().slice(1), Y(Y0).reverse().slice(1)) ] + }; + }; + graticule.extent = function(_) { + if (!arguments.length) return graticule.minorExtent(); + return graticule.majorExtent(_).minorExtent(_); + }; + graticule.majorExtent = function(_) { + if (!arguments.length) return [ [ X0, Y0 ], [ X1, Y1 ] ]; + X0 = +_[0][0], X1 = +_[1][0]; + Y0 = +_[0][1], Y1 = +_[1][1]; + if (X0 > X1) _ = X0, X0 = X1, X1 = _; + if (Y0 > Y1) _ = Y0, Y0 = Y1, Y1 = _; + return graticule.precision(precision); + }; + graticule.minorExtent = function(_) { + if (!arguments.length) return [ [ x0, y0 ], [ x1, y1 ] ]; + x0 = +_[0][0], x1 = +_[1][0]; + y0 = +_[0][1], y1 = +_[1][1]; + if (x0 > x1) _ = x0, x0 = x1, x1 = _; + if (y0 > y1) _ = y0, y0 = y1, y1 = _; + return graticule.precision(precision); + }; + graticule.step = function(_) { + if (!arguments.length) return graticule.minorStep(); + return graticule.majorStep(_).minorStep(_); + }; + graticule.majorStep = function(_) { + if (!arguments.length) return [ DX, DY ]; + DX = +_[0], DY = +_[1]; + return graticule; + }; + graticule.minorStep = function(_) { + if (!arguments.length) return [ dx, dy ]; + dx = +_[0], dy = +_[1]; + return graticule; + }; + graticule.precision = function(_) { + if (!arguments.length) return precision; + precision = +_; + x = d3_geo_graticuleX(y0, y1, 90); + y = d3_geo_graticuleY(x0, x1, precision); + X = d3_geo_graticuleX(Y0, Y1, 90); + Y = d3_geo_graticuleY(X0, X1, precision); + return graticule; + }; + return graticule.majorExtent([ [ -180, -90 + ε ], [ 180, 90 - ε ] ]).minorExtent([ [ -180, -80 - ε ], [ 180, 80 + ε ] ]); + }; + function d3_geo_graticuleX(y0, y1, dy) { + var y = d3.range(y0, y1 - ε, dy).concat(y1); + return function(x) { + return y.map(function(y) { + return [ x, y ]; + }); + }; + } + function d3_geo_graticuleY(x0, x1, dx) { + var x = d3.range(x0, x1 - ε, dx).concat(x1); + return function(y) { + return x.map(function(x) { + return [ x, y ]; + }); + }; + } + function d3_source(d) { + return d.source; + } + function d3_target(d) { + return d.target; + } + d3.geo.greatArc = function() { + var source = d3_source, source_, target = d3_target, target_; + function greatArc() { + return { + type: "LineString", + coordinates: [ source_ || source.apply(this, arguments), target_ || target.apply(this, arguments) ] + }; + } + greatArc.distance = function() { + return d3.geo.distance(source_ || source.apply(this, arguments), target_ || target.apply(this, arguments)); + }; + greatArc.source = function(_) { + if (!arguments.length) return source; + source = _, source_ = typeof _ === "function" ? null : _; + return greatArc; + }; + greatArc.target = function(_) { + if (!arguments.length) return target; + target = _, target_ = typeof _ === "function" ? null : _; + return greatArc; + }; + greatArc.precision = function() { + return arguments.length ? greatArc : 0; + }; + return greatArc; + }; + d3.geo.interpolate = function(source, target) { + return d3_geo_interpolate(source[0] * d3_radians, source[1] * d3_radians, target[0] * d3_radians, target[1] * d3_radians); + }; + function d3_geo_interpolate(x0, y0, x1, y1) { + var cy0 = Math.cos(y0), sy0 = Math.sin(y0), cy1 = Math.cos(y1), sy1 = Math.sin(y1), kx0 = cy0 * Math.cos(x0), ky0 = cy0 * Math.sin(x0), kx1 = cy1 * Math.cos(x1), ky1 = cy1 * Math.sin(x1), d = 2 * Math.asin(Math.sqrt(d3_haversin(y1 - y0) + cy0 * cy1 * d3_haversin(x1 - x0))), k = 1 / Math.sin(d); + var interpolate = d ? function(t) { + var B = Math.sin(t *= d) * k, A = Math.sin(d - t) * k, x = A * kx0 + B * kx1, y = A * ky0 + B * ky1, z = A * sy0 + B * sy1; + return [ Math.atan2(y, x) * d3_degrees, Math.atan2(z, Math.sqrt(x * x + y * y)) * d3_degrees ]; + } : function() { + return [ x0 * d3_degrees, y0 * d3_degrees ]; + }; + interpolate.distance = d; + return interpolate; + } + d3.geo.length = function(object) { + d3_geo_lengthSum = 0; + d3.geo.stream(object, d3_geo_length); + return d3_geo_lengthSum; + }; + var d3_geo_lengthSum; + var d3_geo_length = { + sphere: d3_noop, + point: d3_noop, + lineStart: d3_geo_lengthLineStart, + lineEnd: d3_noop, + polygonStart: d3_noop, + polygonEnd: d3_noop + }; + function d3_geo_lengthLineStart() { + var λ0, sinφ0, cosφ0; + d3_geo_length.point = function(λ, φ) { + λ0 = λ * d3_radians, sinφ0 = Math.sin(φ *= d3_radians), cosφ0 = Math.cos(φ); + d3_geo_length.point = nextPoint; + }; + d3_geo_length.lineEnd = function() { + d3_geo_length.point = d3_geo_length.lineEnd = d3_noop; + }; + function nextPoint(λ, φ) { + var sinφ = Math.sin(φ *= d3_radians), cosφ = Math.cos(φ), t = abs((λ *= d3_radians) - λ0), cosΔλ = Math.cos(t); + d3_geo_lengthSum += Math.atan2(Math.sqrt((t = cosφ * Math.sin(t)) * t + (t = cosφ0 * sinφ - sinφ0 * cosφ * cosΔλ) * t), sinφ0 * sinφ + cosφ0 * cosφ * cosΔλ); + λ0 = λ, sinφ0 = sinφ, cosφ0 = cosφ; + } + } + function d3_geo_azimuthal(scale, angle) { + function azimuthal(λ, φ) { + var cosλ = Math.cos(λ), cosφ = Math.cos(φ), k = scale(cosλ * cosφ); + return [ k * cosφ * Math.sin(λ), k * Math.sin(φ) ]; + } + azimuthal.invert = function(x, y) { + var ρ = Math.sqrt(x * x + y * y), c = angle(ρ), sinc = Math.sin(c), cosc = Math.cos(c); + return [ Math.atan2(x * sinc, ρ * cosc), Math.asin(ρ && y * sinc / ρ) ]; + }; + return azimuthal; + } + var d3_geo_azimuthalEqualArea = d3_geo_azimuthal(function(cosλcosφ) { + return Math.sqrt(2 / (1 + cosλcosφ)); + }, function(ρ) { + return 2 * Math.asin(ρ / 2); + }); + (d3.geo.azimuthalEqualArea = function() { + return d3_geo_projection(d3_geo_azimuthalEqualArea); + }).raw = d3_geo_azimuthalEqualArea; + var d3_geo_azimuthalEquidistant = d3_geo_azimuthal(function(cosλcosφ) { + var c = Math.acos(cosλcosφ); + return c && c / Math.sin(c); + }, d3_identity); + (d3.geo.azimuthalEquidistant = function() { + return d3_geo_projection(d3_geo_azimuthalEquidistant); + }).raw = d3_geo_azimuthalEquidistant; + function d3_geo_conicConformal(φ0, φ1) { + var cosφ0 = Math.cos(φ0), t = function(φ) { + return Math.tan(π / 4 + φ / 2); + }, n = φ0 === φ1 ? Math.sin(φ0) : Math.log(cosφ0 / Math.cos(φ1)) / Math.log(t(φ1) / t(φ0)), F = cosφ0 * Math.pow(t(φ0), n) / n; + if (!n) return d3_geo_mercator; + function forward(λ, φ) { + if (F > 0) { + if (φ < -halfπ + ε) φ = -halfπ + ε; + } else { + if (φ > halfπ - ε) φ = halfπ - ε; + } + var ρ = F / Math.pow(t(φ), n); + return [ ρ * Math.sin(n * λ), F - ρ * Math.cos(n * λ) ]; + } + forward.invert = function(x, y) { + var ρ0_y = F - y, ρ = d3_sgn(n) * Math.sqrt(x * x + ρ0_y * ρ0_y); + return [ Math.atan2(x, ρ0_y) / n, 2 * Math.atan(Math.pow(F / ρ, 1 / n)) - halfπ ]; + }; + return forward; + } + (d3.geo.conicConformal = function() { + return d3_geo_conic(d3_geo_conicConformal); + }).raw = d3_geo_conicConformal; + function d3_geo_conicEquidistant(φ0, φ1) { + var cosφ0 = Math.cos(φ0), n = φ0 === φ1 ? Math.sin(φ0) : (cosφ0 - Math.cos(φ1)) / (φ1 - φ0), G = cosφ0 / n + φ0; + if (abs(n) < ε) return d3_geo_equirectangular; + function forward(λ, φ) { + var ρ = G - φ; + return [ ρ * Math.sin(n * λ), G - ρ * Math.cos(n * λ) ]; + } + forward.invert = function(x, y) { + var ρ0_y = G - y; + return [ Math.atan2(x, ρ0_y) / n, G - d3_sgn(n) * Math.sqrt(x * x + ρ0_y * ρ0_y) ]; + }; + return forward; + } + (d3.geo.conicEquidistant = function() { + return d3_geo_conic(d3_geo_conicEquidistant); + }).raw = d3_geo_conicEquidistant; + var d3_geo_gnomonic = d3_geo_azimuthal(function(cosλcosφ) { + return 1 / cosλcosφ; + }, Math.atan); + (d3.geo.gnomonic = function() { + return d3_geo_projection(d3_geo_gnomonic); + }).raw = d3_geo_gnomonic; + function d3_geo_mercator(λ, φ) { + return [ λ, Math.log(Math.tan(π / 4 + φ / 2)) ]; + } + d3_geo_mercator.invert = function(x, y) { + return [ x, 2 * Math.atan(Math.exp(y)) - halfπ ]; + }; + function d3_geo_mercatorProjection(project) { + var m = d3_geo_projection(project), scale = m.scale, translate = m.translate, clipExtent = m.clipExtent, clipAuto; + m.scale = function() { + var v = scale.apply(m, arguments); + return v === m ? clipAuto ? m.clipExtent(null) : m : v; + }; + m.translate = function() { + var v = translate.apply(m, arguments); + return v === m ? clipAuto ? m.clipExtent(null) : m : v; + }; + m.clipExtent = function(_) { + var v = clipExtent.apply(m, arguments); + if (v === m) { + if (clipAuto = _ == null) { + var k = π * scale(), t = translate(); + clipExtent([ [ t[0] - k, t[1] - k ], [ t[0] + k, t[1] + k ] ]); + } + } else if (clipAuto) { + v = null; + } + return v; + }; + return m.clipExtent(null); + } + (d3.geo.mercator = function() { + return d3_geo_mercatorProjection(d3_geo_mercator); + }).raw = d3_geo_mercator; + var d3_geo_orthographic = d3_geo_azimuthal(function() { + return 1; + }, Math.asin); + (d3.geo.orthographic = function() { + return d3_geo_projection(d3_geo_orthographic); + }).raw = d3_geo_orthographic; + var d3_geo_stereographic = d3_geo_azimuthal(function(cosλcosφ) { + return 1 / (1 + cosλcosφ); + }, function(ρ) { + return 2 * Math.atan(ρ); + }); + (d3.geo.stereographic = function() { + return d3_geo_projection(d3_geo_stereographic); + }).raw = d3_geo_stereographic; + function d3_geo_transverseMercator(λ, φ) { + return [ Math.log(Math.tan(π / 4 + φ / 2)), -λ ]; + } + d3_geo_transverseMercator.invert = function(x, y) { + return [ -y, 2 * Math.atan(Math.exp(x)) - halfπ ]; + }; + (d3.geo.transverseMercator = function() { + var projection = d3_geo_mercatorProjection(d3_geo_transverseMercator), center = projection.center, rotate = projection.rotate; + projection.center = function(_) { + return _ ? center([ -_[1], _[0] ]) : (_ = center(), [ -_[1], _[0] ]); + }; + projection.rotate = function(_) { + return _ ? rotate([ _[0], _[1], _.length > 2 ? _[2] + 90 : 90 ]) : (_ = rotate(), + [ _[0], _[1], _[2] - 90 ]); + }; + return projection.rotate([ 0, 0 ]); + }).raw = d3_geo_transverseMercator; + d3.geom = {}; + function d3_geom_pointX(d) { + return d[0]; + } + function d3_geom_pointY(d) { + return d[1]; + } + d3.geom.hull = function(vertices) { + var x = d3_geom_pointX, y = d3_geom_pointY; + if (arguments.length) return hull(vertices); + function hull(data) { + if (data.length < 3) return []; + var fx = d3_functor(x), fy = d3_functor(y), i, n = data.length, points = [], flippedPoints = []; + for (i = 0; i < n; i++) { + points.push([ +fx.call(this, data[i], i), +fy.call(this, data[i], i), i ]); + } + points.sort(d3_geom_hullOrder); + for (i = 0; i < n; i++) flippedPoints.push([ points[i][0], -points[i][1] ]); + var upper = d3_geom_hullUpper(points), lower = d3_geom_hullUpper(flippedPoints); + var skipLeft = lower[0] === upper[0], skipRight = lower[lower.length - 1] === upper[upper.length - 1], polygon = []; + for (i = upper.length - 1; i >= 0; --i) polygon.push(data[points[upper[i]][2]]); + for (i = +skipLeft; i < lower.length - skipRight; ++i) polygon.push(data[points[lower[i]][2]]); + return polygon; + } + hull.x = function(_) { + return arguments.length ? (x = _, hull) : x; + }; + hull.y = function(_) { + return arguments.length ? (y = _, hull) : y; + }; + return hull; + }; + function d3_geom_hullUpper(points) { + var n = points.length, hull = [ 0, 1 ], hs = 2; + for (var i = 2; i < n; i++) { + while (hs > 1 && d3_cross2d(points[hull[hs - 2]], points[hull[hs - 1]], points[i]) <= 0) --hs; + hull[hs++] = i; + } + return hull.slice(0, hs); + } + function d3_geom_hullOrder(a, b) { + return a[0] - b[0] || a[1] - b[1]; + } + d3.geom.polygon = function(coordinates) { + d3_subclass(coordinates, d3_geom_polygonPrototype); + return coordinates; + }; + var d3_geom_polygonPrototype = d3.geom.polygon.prototype = []; + d3_geom_polygonPrototype.area = function() { + var i = -1, n = this.length, a, b = this[n - 1], area = 0; + while (++i < n) { + a = b; + b = this[i]; + area += a[1] * b[0] - a[0] * b[1]; + } + return area * .5; + }; + d3_geom_polygonPrototype.centroid = function(k) { + var i = -1, n = this.length, x = 0, y = 0, a, b = this[n - 1], c; + if (!arguments.length) k = -1 / (6 * this.area()); + while (++i < n) { + a = b; + b = this[i]; + c = a[0] * b[1] - b[0] * a[1]; + x += (a[0] + b[0]) * c; + y += (a[1] + b[1]) * c; + } + return [ x * k, y * k ]; + }; + d3_geom_polygonPrototype.clip = function(subject) { + var input, closed = d3_geom_polygonClosed(subject), i = -1, n = this.length - d3_geom_polygonClosed(this), j, m, a = this[n - 1], b, c, d; + while (++i < n) { + input = subject.slice(); + subject.length = 0; + b = this[i]; + c = input[(m = input.length - closed) - 1]; + j = -1; + while (++j < m) { + d = input[j]; + if (d3_geom_polygonInside(d, a, b)) { + if (!d3_geom_polygonInside(c, a, b)) { + subject.push(d3_geom_polygonIntersect(c, d, a, b)); + } + subject.push(d); + } else if (d3_geom_polygonInside(c, a, b)) { + subject.push(d3_geom_polygonIntersect(c, d, a, b)); + } + c = d; + } + if (closed) subject.push(subject[0]); + a = b; + } + return subject; + }; + function d3_geom_polygonInside(p, a, b) { + return (b[0] - a[0]) * (p[1] - a[1]) < (b[1] - a[1]) * (p[0] - a[0]); + } + function d3_geom_polygonIntersect(c, d, a, b) { + var x1 = c[0], x3 = a[0], x21 = d[0] - x1, x43 = b[0] - x3, y1 = c[1], y3 = a[1], y21 = d[1] - y1, y43 = b[1] - y3, ua = (x43 * (y1 - y3) - y43 * (x1 - x3)) / (y43 * x21 - x43 * y21); + return [ x1 + ua * x21, y1 + ua * y21 ]; + } + function d3_geom_polygonClosed(coordinates) { + var a = coordinates[0], b = coordinates[coordinates.length - 1]; + return !(a[0] - b[0] || a[1] - b[1]); + } + var d3_geom_voronoiEdges, d3_geom_voronoiCells, d3_geom_voronoiBeaches, d3_geom_voronoiBeachPool = [], d3_geom_voronoiFirstCircle, d3_geom_voronoiCircles, d3_geom_voronoiCirclePool = []; + function d3_geom_voronoiBeach() { + d3_geom_voronoiRedBlackNode(this); + this.edge = this.site = this.circle = null; + } + function d3_geom_voronoiCreateBeach(site) { + var beach = d3_geom_voronoiBeachPool.pop() || new d3_geom_voronoiBeach(); + beach.site = site; + return beach; + } + function d3_geom_voronoiDetachBeach(beach) { + d3_geom_voronoiDetachCircle(beach); + d3_geom_voronoiBeaches.remove(beach); + d3_geom_voronoiBeachPool.push(beach); + d3_geom_voronoiRedBlackNode(beach); + } + function d3_geom_voronoiRemoveBeach(beach) { + var circle = beach.circle, x = circle.x, y = circle.cy, vertex = { + x: x, + y: y + }, previous = beach.P, next = beach.N, disappearing = [ beach ]; + d3_geom_voronoiDetachBeach(beach); + var lArc = previous; + while (lArc.circle && abs(x - lArc.circle.x) < ε && abs(y - lArc.circle.cy) < ε) { + previous = lArc.P; + disappearing.unshift(lArc); + d3_geom_voronoiDetachBeach(lArc); + lArc = previous; + } + disappearing.unshift(lArc); + d3_geom_voronoiDetachCircle(lArc); + var rArc = next; + while (rArc.circle && abs(x - rArc.circle.x) < ε && abs(y - rArc.circle.cy) < ε) { + next = rArc.N; + disappearing.push(rArc); + d3_geom_voronoiDetachBeach(rArc); + rArc = next; + } + disappearing.push(rArc); + d3_geom_voronoiDetachCircle(rArc); + var nArcs = disappearing.length, iArc; + for (iArc = 1; iArc < nArcs; ++iArc) { + rArc = disappearing[iArc]; + lArc = disappearing[iArc - 1]; + d3_geom_voronoiSetEdgeEnd(rArc.edge, lArc.site, rArc.site, vertex); + } + lArc = disappearing[0]; + rArc = disappearing[nArcs - 1]; + rArc.edge = d3_geom_voronoiCreateEdge(lArc.site, rArc.site, null, vertex); + d3_geom_voronoiAttachCircle(lArc); + d3_geom_voronoiAttachCircle(rArc); + } + function d3_geom_voronoiAddBeach(site) { + var x = site.x, directrix = site.y, lArc, rArc, dxl, dxr, node = d3_geom_voronoiBeaches._; + while (node) { + dxl = d3_geom_voronoiLeftBreakPoint(node, directrix) - x; + if (dxl > ε) node = node.L; else { + dxr = x - d3_geom_voronoiRightBreakPoint(node, directrix); + if (dxr > ε) { + if (!node.R) { + lArc = node; + break; + } + node = node.R; + } else { + if (dxl > -ε) { + lArc = node.P; + rArc = node; + } else if (dxr > -ε) { + lArc = node; + rArc = node.N; + } else { + lArc = rArc = node; + } + break; + } + } + } + var newArc = d3_geom_voronoiCreateBeach(site); + d3_geom_voronoiBeaches.insert(lArc, newArc); + if (!lArc && !rArc) return; + if (lArc === rArc) { + d3_geom_voronoiDetachCircle(lArc); + rArc = d3_geom_voronoiCreateBeach(lArc.site); + d3_geom_voronoiBeaches.insert(newArc, rArc); + newArc.edge = rArc.edge = d3_geom_voronoiCreateEdge(lArc.site, newArc.site); + d3_geom_voronoiAttachCircle(lArc); + d3_geom_voronoiAttachCircle(rArc); + return; + } + if (!rArc) { + newArc.edge = d3_geom_voronoiCreateEdge(lArc.site, newArc.site); + return; + } + d3_geom_voronoiDetachCircle(lArc); + d3_geom_voronoiDetachCircle(rArc); + var lSite = lArc.site, ax = lSite.x, ay = lSite.y, bx = site.x - ax, by = site.y - ay, rSite = rArc.site, cx = rSite.x - ax, cy = rSite.y - ay, d = 2 * (bx * cy - by * cx), hb = bx * bx + by * by, hc = cx * cx + cy * cy, vertex = { + x: (cy * hb - by * hc) / d + ax, + y: (bx * hc - cx * hb) / d + ay + }; + d3_geom_voronoiSetEdgeEnd(rArc.edge, lSite, rSite, vertex); + newArc.edge = d3_geom_voronoiCreateEdge(lSite, site, null, vertex); + rArc.edge = d3_geom_voronoiCreateEdge(site, rSite, null, vertex); + d3_geom_voronoiAttachCircle(lArc); + d3_geom_voronoiAttachCircle(rArc); + } + function d3_geom_voronoiLeftBreakPoint(arc, directrix) { + var site = arc.site, rfocx = site.x, rfocy = site.y, pby2 = rfocy - directrix; + if (!pby2) return rfocx; + var lArc = arc.P; + if (!lArc) return -Infinity; + site = lArc.site; + var lfocx = site.x, lfocy = site.y, plby2 = lfocy - directrix; + if (!plby2) return lfocx; + var hl = lfocx - rfocx, aby2 = 1 / pby2 - 1 / plby2, b = hl / plby2; + if (aby2) return (-b + Math.sqrt(b * b - 2 * aby2 * (hl * hl / (-2 * plby2) - lfocy + plby2 / 2 + rfocy - pby2 / 2))) / aby2 + rfocx; + return (rfocx + lfocx) / 2; + } + function d3_geom_voronoiRightBreakPoint(arc, directrix) { + var rArc = arc.N; + if (rArc) return d3_geom_voronoiLeftBreakPoint(rArc, directrix); + var site = arc.site; + return site.y === directrix ? site.x : Infinity; + } + function d3_geom_voronoiCell(site) { + this.site = site; + this.edges = []; + } + d3_geom_voronoiCell.prototype.prepare = function() { + var halfEdges = this.edges, iHalfEdge = halfEdges.length, edge; + while (iHalfEdge--) { + edge = halfEdges[iHalfEdge].edge; + if (!edge.b || !edge.a) halfEdges.splice(iHalfEdge, 1); + } + halfEdges.sort(d3_geom_voronoiHalfEdgeOrder); + return halfEdges.length; + }; + function d3_geom_voronoiCloseCells(extent) { + var x0 = extent[0][0], x1 = extent[1][0], y0 = extent[0][1], y1 = extent[1][1], x2, y2, x3, y3, cells = d3_geom_voronoiCells, iCell = cells.length, cell, iHalfEdge, halfEdges, nHalfEdges, start, end; + while (iCell--) { + cell = cells[iCell]; + if (!cell || !cell.prepare()) continue; + halfEdges = cell.edges; + nHalfEdges = halfEdges.length; + iHalfEdge = 0; + while (iHalfEdge < nHalfEdges) { + end = halfEdges[iHalfEdge].end(), x3 = end.x, y3 = end.y; + start = halfEdges[++iHalfEdge % nHalfEdges].start(), x2 = start.x, y2 = start.y; + if (abs(x3 - x2) > ε || abs(y3 - y2) > ε) { + halfEdges.splice(iHalfEdge, 0, new d3_geom_voronoiHalfEdge(d3_geom_voronoiCreateBorderEdge(cell.site, end, abs(x3 - x0) < ε && y1 - y3 > ε ? { + x: x0, + y: abs(x2 - x0) < ε ? y2 : y1 + } : abs(y3 - y1) < ε && x1 - x3 > ε ? { + x: abs(y2 - y1) < ε ? x2 : x1, + y: y1 + } : abs(x3 - x1) < ε && y3 - y0 > ε ? { + x: x1, + y: abs(x2 - x1) < ε ? y2 : y0 + } : abs(y3 - y0) < ε && x3 - x0 > ε ? { + x: abs(y2 - y0) < ε ? x2 : x0, + y: y0 + } : null), cell.site, null)); + ++nHalfEdges; + } + } + } + } + function d3_geom_voronoiHalfEdgeOrder(a, b) { + return b.angle - a.angle; + } + function d3_geom_voronoiCircle() { + d3_geom_voronoiRedBlackNode(this); + this.x = this.y = this.arc = this.site = this.cy = null; + } + function d3_geom_voronoiAttachCircle(arc) { + var lArc = arc.P, rArc = arc.N; + if (!lArc || !rArc) return; + var lSite = lArc.site, cSite = arc.site, rSite = rArc.site; + if (lSite === rSite) return; + var bx = cSite.x, by = cSite.y, ax = lSite.x - bx, ay = lSite.y - by, cx = rSite.x - bx, cy = rSite.y - by; + var d = 2 * (ax * cy - ay * cx); + if (d >= -ε2) return; + var ha = ax * ax + ay * ay, hc = cx * cx + cy * cy, x = (cy * ha - ay * hc) / d, y = (ax * hc - cx * ha) / d, cy = y + by; + var circle = d3_geom_voronoiCirclePool.pop() || new d3_geom_voronoiCircle(); + circle.arc = arc; + circle.site = cSite; + circle.x = x + bx; + circle.y = cy + Math.sqrt(x * x + y * y); + circle.cy = cy; + arc.circle = circle; + var before = null, node = d3_geom_voronoiCircles._; + while (node) { + if (circle.y < node.y || circle.y === node.y && circle.x <= node.x) { + if (node.L) node = node.L; else { + before = node.P; + break; + } + } else { + if (node.R) node = node.R; else { + before = node; + break; + } + } + } + d3_geom_voronoiCircles.insert(before, circle); + if (!before) d3_geom_voronoiFirstCircle = circle; + } + function d3_geom_voronoiDetachCircle(arc) { + var circle = arc.circle; + if (circle) { + if (!circle.P) d3_geom_voronoiFirstCircle = circle.N; + d3_geom_voronoiCircles.remove(circle); + d3_geom_voronoiCirclePool.push(circle); + d3_geom_voronoiRedBlackNode(circle); + arc.circle = null; + } + } + function d3_geom_voronoiClipEdges(extent) { + var edges = d3_geom_voronoiEdges, clip = d3_geom_clipLine(extent[0][0], extent[0][1], extent[1][0], extent[1][1]), i = edges.length, e; + while (i--) { + e = edges[i]; + if (!d3_geom_voronoiConnectEdge(e, extent) || !clip(e) || abs(e.a.x - e.b.x) < ε && abs(e.a.y - e.b.y) < ε) { + e.a = e.b = null; + edges.splice(i, 1); + } + } + } + function d3_geom_voronoiConnectEdge(edge, extent) { + var vb = edge.b; + if (vb) return true; + var va = edge.a, x0 = extent[0][0], x1 = extent[1][0], y0 = extent[0][1], y1 = extent[1][1], lSite = edge.l, rSite = edge.r, lx = lSite.x, ly = lSite.y, rx = rSite.x, ry = rSite.y, fx = (lx + rx) / 2, fy = (ly + ry) / 2, fm, fb; + if (ry === ly) { + if (fx < x0 || fx >= x1) return; + if (lx > rx) { + if (!va) va = { + x: fx, + y: y0 + }; else if (va.y >= y1) return; + vb = { + x: fx, + y: y1 + }; + } else { + if (!va) va = { + x: fx, + y: y1 + }; else if (va.y < y0) return; + vb = { + x: fx, + y: y0 + }; + } + } else { + fm = (lx - rx) / (ry - ly); + fb = fy - fm * fx; + if (fm < -1 || fm > 1) { + if (lx > rx) { + if (!va) va = { + x: (y0 - fb) / fm, + y: y0 + }; else if (va.y >= y1) return; + vb = { + x: (y1 - fb) / fm, + y: y1 + }; + } else { + if (!va) va = { + x: (y1 - fb) / fm, + y: y1 + }; else if (va.y < y0) return; + vb = { + x: (y0 - fb) / fm, + y: y0 + }; + } + } else { + if (ly < ry) { + if (!va) va = { + x: x0, + y: fm * x0 + fb + }; else if (va.x >= x1) return; + vb = { + x: x1, + y: fm * x1 + fb + }; + } else { + if (!va) va = { + x: x1, + y: fm * x1 + fb + }; else if (va.x < x0) return; + vb = { + x: x0, + y: fm * x0 + fb + }; + } + } + } + edge.a = va; + edge.b = vb; + return true; + } + function d3_geom_voronoiEdge(lSite, rSite) { + this.l = lSite; + this.r = rSite; + this.a = this.b = null; + } + function d3_geom_voronoiCreateEdge(lSite, rSite, va, vb) { + var edge = new d3_geom_voronoiEdge(lSite, rSite); + d3_geom_voronoiEdges.push(edge); + if (va) d3_geom_voronoiSetEdgeEnd(edge, lSite, rSite, va); + if (vb) d3_geom_voronoiSetEdgeEnd(edge, rSite, lSite, vb); + d3_geom_voronoiCells[lSite.i].edges.push(new d3_geom_voronoiHalfEdge(edge, lSite, rSite)); + d3_geom_voronoiCells[rSite.i].edges.push(new d3_geom_voronoiHalfEdge(edge, rSite, lSite)); + return edge; + } + function d3_geom_voronoiCreateBorderEdge(lSite, va, vb) { + var edge = new d3_geom_voronoiEdge(lSite, null); + edge.a = va; + edge.b = vb; + d3_geom_voronoiEdges.push(edge); + return edge; + } + function d3_geom_voronoiSetEdgeEnd(edge, lSite, rSite, vertex) { + if (!edge.a && !edge.b) { + edge.a = vertex; + edge.l = lSite; + edge.r = rSite; + } else if (edge.l === rSite) { + edge.b = vertex; + } else { + edge.a = vertex; + } + } + function d3_geom_voronoiHalfEdge(edge, lSite, rSite) { + var va = edge.a, vb = edge.b; + this.edge = edge; + this.site = lSite; + this.angle = rSite ? Math.atan2(rSite.y - lSite.y, rSite.x - lSite.x) : edge.l === lSite ? Math.atan2(vb.x - va.x, va.y - vb.y) : Math.atan2(va.x - vb.x, vb.y - va.y); + } + d3_geom_voronoiHalfEdge.prototype = { + start: function() { + return this.edge.l === this.site ? this.edge.a : this.edge.b; + }, + end: function() { + return this.edge.l === this.site ? this.edge.b : this.edge.a; + } + }; + function d3_geom_voronoiRedBlackTree() { + this._ = null; + } + function d3_geom_voronoiRedBlackNode(node) { + node.U = node.C = node.L = node.R = node.P = node.N = null; + } + d3_geom_voronoiRedBlackTree.prototype = { + insert: function(after, node) { + var parent, grandpa, uncle; + if (after) { + node.P = after; + node.N = after.N; + if (after.N) after.N.P = node; + after.N = node; + if (after.R) { + after = after.R; + while (after.L) after = after.L; + after.L = node; + } else { + after.R = node; + } + parent = after; + } else if (this._) { + after = d3_geom_voronoiRedBlackFirst(this._); + node.P = null; + node.N = after; + after.P = after.L = node; + parent = after; + } else { + node.P = node.N = null; + this._ = node; + parent = null; + } + node.L = node.R = null; + node.U = parent; + node.C = true; + after = node; + while (parent && parent.C) { + grandpa = parent.U; + if (parent === grandpa.L) { + uncle = grandpa.R; + if (uncle && uncle.C) { + parent.C = uncle.C = false; + grandpa.C = true; + after = grandpa; + } else { + if (after === parent.R) { + d3_geom_voronoiRedBlackRotateLeft(this, parent); + after = parent; + parent = after.U; + } + parent.C = false; + grandpa.C = true; + d3_geom_voronoiRedBlackRotateRight(this, grandpa); + } + } else { + uncle = grandpa.L; + if (uncle && uncle.C) { + parent.C = uncle.C = false; + grandpa.C = true; + after = grandpa; + } else { + if (after === parent.L) { + d3_geom_voronoiRedBlackRotateRight(this, parent); + after = parent; + parent = after.U; + } + parent.C = false; + grandpa.C = true; + d3_geom_voronoiRedBlackRotateLeft(this, grandpa); + } + } + parent = after.U; + } + this._.C = false; + }, + remove: function(node) { + if (node.N) node.N.P = node.P; + if (node.P) node.P.N = node.N; + node.N = node.P = null; + var parent = node.U, sibling, left = node.L, right = node.R, next, red; + if (!left) next = right; else if (!right) next = left; else next = d3_geom_voronoiRedBlackFirst(right); + if (parent) { + if (parent.L === node) parent.L = next; else parent.R = next; + } else { + this._ = next; + } + if (left && right) { + red = next.C; + next.C = node.C; + next.L = left; + left.U = next; + if (next !== right) { + parent = next.U; + next.U = node.U; + node = next.R; + parent.L = node; + next.R = right; + right.U = next; + } else { + next.U = parent; + parent = next; + node = next.R; + } + } else { + red = node.C; + node = next; + } + if (node) node.U = parent; + if (red) return; + if (node && node.C) { + node.C = false; + return; + } + do { + if (node === this._) break; + if (node === parent.L) { + sibling = parent.R; + if (sibling.C) { + sibling.C = false; + parent.C = true; + d3_geom_voronoiRedBlackRotateLeft(this, parent); + sibling = parent.R; + } + if (sibling.L && sibling.L.C || sibling.R && sibling.R.C) { + if (!sibling.R || !sibling.R.C) { + sibling.L.C = false; + sibling.C = true; + d3_geom_voronoiRedBlackRotateRight(this, sibling); + sibling = parent.R; + } + sibling.C = parent.C; + parent.C = sibling.R.C = false; + d3_geom_voronoiRedBlackRotateLeft(this, parent); + node = this._; + break; + } + } else { + sibling = parent.L; + if (sibling.C) { + sibling.C = false; + parent.C = true; + d3_geom_voronoiRedBlackRotateRight(this, parent); + sibling = parent.L; + } + if (sibling.L && sibling.L.C || sibling.R && sibling.R.C) { + if (!sibling.L || !sibling.L.C) { + sibling.R.C = false; + sibling.C = true; + d3_geom_voronoiRedBlackRotateLeft(this, sibling); + sibling = parent.L; + } + sibling.C = parent.C; + parent.C = sibling.L.C = false; + d3_geom_voronoiRedBlackRotateRight(this, parent); + node = this._; + break; + } + } + sibling.C = true; + node = parent; + parent = parent.U; + } while (!node.C); + if (node) node.C = false; + } + }; + function d3_geom_voronoiRedBlackRotateLeft(tree, node) { + var p = node, q = node.R, parent = p.U; + if (parent) { + if (parent.L === p) parent.L = q; else parent.R = q; + } else { + tree._ = q; + } + q.U = parent; + p.U = q; + p.R = q.L; + if (p.R) p.R.U = p; + q.L = p; + } + function d3_geom_voronoiRedBlackRotateRight(tree, node) { + var p = node, q = node.L, parent = p.U; + if (parent) { + if (parent.L === p) parent.L = q; else parent.R = q; + } else { + tree._ = q; + } + q.U = parent; + p.U = q; + p.L = q.R; + if (p.L) p.L.U = p; + q.R = p; + } + function d3_geom_voronoiRedBlackFirst(node) { + while (node.L) node = node.L; + return node; + } + function d3_geom_voronoi(sites, bbox) { + var site = sites.sort(d3_geom_voronoiVertexOrder).pop(), x0, y0, circle; + d3_geom_voronoiEdges = []; + d3_geom_voronoiCells = new Array(sites.length); + d3_geom_voronoiBeaches = new d3_geom_voronoiRedBlackTree(); + d3_geom_voronoiCircles = new d3_geom_voronoiRedBlackTree(); + while (true) { + circle = d3_geom_voronoiFirstCircle; + if (site && (!circle || site.y < circle.y || site.y === circle.y && site.x < circle.x)) { + if (site.x !== x0 || site.y !== y0) { + d3_geom_voronoiCells[site.i] = new d3_geom_voronoiCell(site); + d3_geom_voronoiAddBeach(site); + x0 = site.x, y0 = site.y; + } + site = sites.pop(); + } else if (circle) { + d3_geom_voronoiRemoveBeach(circle.arc); + } else { + break; + } + } + if (bbox) d3_geom_voronoiClipEdges(bbox), d3_geom_voronoiCloseCells(bbox); + var diagram = { + cells: d3_geom_voronoiCells, + edges: d3_geom_voronoiEdges + }; + d3_geom_voronoiBeaches = d3_geom_voronoiCircles = d3_geom_voronoiEdges = d3_geom_voronoiCells = null; + return diagram; + } + function d3_geom_voronoiVertexOrder(a, b) { + return b.y - a.y || b.x - a.x; + } + d3.geom.voronoi = function(points) { + var x = d3_geom_pointX, y = d3_geom_pointY, fx = x, fy = y, clipExtent = d3_geom_voronoiClipExtent; + if (points) return voronoi(points); + function voronoi(data) { + var polygons = new Array(data.length), x0 = clipExtent[0][0], y0 = clipExtent[0][1], x1 = clipExtent[1][0], y1 = clipExtent[1][1]; + d3_geom_voronoi(sites(data), clipExtent).cells.forEach(function(cell, i) { + var edges = cell.edges, site = cell.site, polygon = polygons[i] = edges.length ? edges.map(function(e) { + var s = e.start(); + return [ s.x, s.y ]; + }) : site.x >= x0 && site.x <= x1 && site.y >= y0 && site.y <= y1 ? [ [ x0, y1 ], [ x1, y1 ], [ x1, y0 ], [ x0, y0 ] ] : []; + polygon.point = data[i]; + }); + return polygons; + } + function sites(data) { + return data.map(function(d, i) { + return { + x: Math.round(fx(d, i) / ε) * ε, + y: Math.round(fy(d, i) / ε) * ε, + i: i + }; + }); + } + voronoi.links = function(data) { + return d3_geom_voronoi(sites(data)).edges.filter(function(edge) { + return edge.l && edge.r; + }).map(function(edge) { + return { + source: data[edge.l.i], + target: data[edge.r.i] + }; + }); + }; + voronoi.triangles = function(data) { + var triangles = []; + d3_geom_voronoi(sites(data)).cells.forEach(function(cell, i) { + var site = cell.site, edges = cell.edges.sort(d3_geom_voronoiHalfEdgeOrder), j = -1, m = edges.length, e0, s0, e1 = edges[m - 1].edge, s1 = e1.l === site ? e1.r : e1.l; + while (++j < m) { + e0 = e1; + s0 = s1; + e1 = edges[j].edge; + s1 = e1.l === site ? e1.r : e1.l; + if (i < s0.i && i < s1.i && d3_geom_voronoiTriangleArea(site, s0, s1) < 0) { + triangles.push([ data[i], data[s0.i], data[s1.i] ]); + } + } + }); + return triangles; + }; + voronoi.x = function(_) { + return arguments.length ? (fx = d3_functor(x = _), voronoi) : x; + }; + voronoi.y = function(_) { + return arguments.length ? (fy = d3_functor(y = _), voronoi) : y; + }; + voronoi.clipExtent = function(_) { + if (!arguments.length) return clipExtent === d3_geom_voronoiClipExtent ? null : clipExtent; + clipExtent = _ == null ? d3_geom_voronoiClipExtent : _; + return voronoi; + }; + voronoi.size = function(_) { + if (!arguments.length) return clipExtent === d3_geom_voronoiClipExtent ? null : clipExtent && clipExtent[1]; + return voronoi.clipExtent(_ && [ [ 0, 0 ], _ ]); + }; + return voronoi; + }; + var d3_geom_voronoiClipExtent = [ [ -1e6, -1e6 ], [ 1e6, 1e6 ] ]; + function d3_geom_voronoiTriangleArea(a, b, c) { + return (a.x - c.x) * (b.y - a.y) - (a.x - b.x) * (c.y - a.y); + } + d3.geom.delaunay = function(vertices) { + return d3.geom.voronoi().triangles(vertices); + }; + d3.geom.quadtree = function(points, x1, y1, x2, y2) { + var x = d3_geom_pointX, y = d3_geom_pointY, compat; + if (compat = arguments.length) { + x = d3_geom_quadtreeCompatX; + y = d3_geom_quadtreeCompatY; + if (compat === 3) { + y2 = y1; + x2 = x1; + y1 = x1 = 0; + } + return quadtree(points); + } + function quadtree(data) { + var d, fx = d3_functor(x), fy = d3_functor(y), xs, ys, i, n, x1_, y1_, x2_, y2_; + if (x1 != null) { + x1_ = x1, y1_ = y1, x2_ = x2, y2_ = y2; + } else { + x2_ = y2_ = -(x1_ = y1_ = Infinity); + xs = [], ys = []; + n = data.length; + if (compat) for (i = 0; i < n; ++i) { + d = data[i]; + if (d.x < x1_) x1_ = d.x; + if (d.y < y1_) y1_ = d.y; + if (d.x > x2_) x2_ = d.x; + if (d.y > y2_) y2_ = d.y; + xs.push(d.x); + ys.push(d.y); + } else for (i = 0; i < n; ++i) { + var x_ = +fx(d = data[i], i), y_ = +fy(d, i); + if (x_ < x1_) x1_ = x_; + if (y_ < y1_) y1_ = y_; + if (x_ > x2_) x2_ = x_; + if (y_ > y2_) y2_ = y_; + xs.push(x_); + ys.push(y_); + } + } + var dx = x2_ - x1_, dy = y2_ - y1_; + if (dx > dy) y2_ = y1_ + dx; else x2_ = x1_ + dy; + function insert(n, d, x, y, x1, y1, x2, y2) { + if (isNaN(x) || isNaN(y)) return; + if (n.leaf) { + var nx = n.x, ny = n.y; + if (nx != null) { + if (abs(nx - x) + abs(ny - y) < .01) { + insertChild(n, d, x, y, x1, y1, x2, y2); + } else { + var nPoint = n.point; + n.x = n.y = n.point = null; + insertChild(n, nPoint, nx, ny, x1, y1, x2, y2); + insertChild(n, d, x, y, x1, y1, x2, y2); + } + } else { + n.x = x, n.y = y, n.point = d; + } + } else { + insertChild(n, d, x, y, x1, y1, x2, y2); + } + } + function insertChild(n, d, x, y, x1, y1, x2, y2) { + var sx = (x1 + x2) * .5, sy = (y1 + y2) * .5, right = x >= sx, bottom = y >= sy, i = (bottom << 1) + right; + n.leaf = false; + n = n.nodes[i] || (n.nodes[i] = d3_geom_quadtreeNode()); + if (right) x1 = sx; else x2 = sx; + if (bottom) y1 = sy; else y2 = sy; + insert(n, d, x, y, x1, y1, x2, y2); + } + var root = d3_geom_quadtreeNode(); + root.add = function(d) { + insert(root, d, +fx(d, ++i), +fy(d, i), x1_, y1_, x2_, y2_); + }; + root.visit = function(f) { + d3_geom_quadtreeVisit(f, root, x1_, y1_, x2_, y2_); + }; + i = -1; + if (x1 == null) { + while (++i < n) { + insert(root, data[i], xs[i], ys[i], x1_, y1_, x2_, y2_); + } + --i; + } else data.forEach(root.add); + xs = ys = data = d = null; + return root; + } + quadtree.x = function(_) { + return arguments.length ? (x = _, quadtree) : x; + }; + quadtree.y = function(_) { + return arguments.length ? (y = _, quadtree) : y; + }; + quadtree.extent = function(_) { + if (!arguments.length) return x1 == null ? null : [ [ x1, y1 ], [ x2, y2 ] ]; + if (_ == null) x1 = y1 = x2 = y2 = null; else x1 = +_[0][0], y1 = +_[0][1], x2 = +_[1][0], + y2 = +_[1][1]; + return quadtree; + }; + quadtree.size = function(_) { + if (!arguments.length) return x1 == null ? null : [ x2 - x1, y2 - y1 ]; + if (_ == null) x1 = y1 = x2 = y2 = null; else x1 = y1 = 0, x2 = +_[0], y2 = +_[1]; + return quadtree; + }; + return quadtree; + }; + function d3_geom_quadtreeCompatX(d) { + return d.x; + } + function d3_geom_quadtreeCompatY(d) { + return d.y; + } + function d3_geom_quadtreeNode() { + return { + leaf: true, + nodes: [], + point: null, + x: null, + y: null + }; + } + function d3_geom_quadtreeVisit(f, node, x1, y1, x2, y2) { + if (!f(node, x1, y1, x2, y2)) { + var sx = (x1 + x2) * .5, sy = (y1 + y2) * .5, children = node.nodes; + if (children[0]) d3_geom_quadtreeVisit(f, children[0], x1, y1, sx, sy); + if (children[1]) d3_geom_quadtreeVisit(f, children[1], sx, y1, x2, sy); + if (children[2]) d3_geom_quadtreeVisit(f, children[2], x1, sy, sx, y2); + if (children[3]) d3_geom_quadtreeVisit(f, children[3], sx, sy, x2, y2); + } + } + d3.interpolateRgb = d3_interpolateRgb; + function d3_interpolateRgb(a, b) { + a = d3.rgb(a); + b = d3.rgb(b); + var ar = a.r, ag = a.g, ab = a.b, br = b.r - ar, bg = b.g - ag, bb = b.b - ab; + return function(t) { + return "#" + d3_rgb_hex(Math.round(ar + br * t)) + d3_rgb_hex(Math.round(ag + bg * t)) + d3_rgb_hex(Math.round(ab + bb * t)); + }; + } + d3.interpolateObject = d3_interpolateObject; + function d3_interpolateObject(a, b) { + var i = {}, c = {}, k; + for (k in a) { + if (k in b) { + i[k] = d3_interpolate(a[k], b[k]); + } else { + c[k] = a[k]; + } + } + for (k in b) { + if (!(k in a)) { + c[k] = b[k]; + } + } + return function(t) { + for (k in i) c[k] = i[k](t); + return c; + }; + } + d3.interpolateNumber = d3_interpolateNumber; + function d3_interpolateNumber(a, b) { + b -= a = +a; + return function(t) { + return a + b * t; + }; + } + d3.interpolateString = d3_interpolateString; + function d3_interpolateString(a, b) { + var bi = d3_interpolate_numberA.lastIndex = d3_interpolate_numberB.lastIndex = 0, am, bm, bs, i = -1, s = [], q = []; + a = a + "", b = b + ""; + while ((am = d3_interpolate_numberA.exec(a)) && (bm = d3_interpolate_numberB.exec(b))) { + if ((bs = bm.index) > bi) { + bs = b.substring(bi, bs); + if (s[i]) s[i] += bs; else s[++i] = bs; + } + if ((am = am[0]) === (bm = bm[0])) { + if (s[i]) s[i] += bm; else s[++i] = bm; + } else { + s[++i] = null; + q.push({ + i: i, + x: d3_interpolateNumber(am, bm) + }); + } + bi = d3_interpolate_numberB.lastIndex; + } + if (bi < b.length) { + bs = b.substring(bi); + if (s[i]) s[i] += bs; else s[++i] = bs; + } + return s.length < 2 ? q[0] ? (b = q[0].x, function(t) { + return b(t) + ""; + }) : function() { + return b; + } : (b = q.length, function(t) { + for (var i = 0, o; i < b; ++i) s[(o = q[i]).i] = o.x(t); + return s.join(""); + }); + } + var d3_interpolate_numberA = /[-+]?(?:\d+\.?\d*|\.?\d+)(?:[eE][-+]?\d+)?/g, d3_interpolate_numberB = new RegExp(d3_interpolate_numberA.source, "g"); + d3.interpolate = d3_interpolate; + function d3_interpolate(a, b) { + var i = d3.interpolators.length, f; + while (--i >= 0 && !(f = d3.interpolators[i](a, b))) ; + return f; + } + d3.interpolators = [ function(a, b) { + var t = typeof b; + return (t === "string" ? d3_rgb_names.has(b) || /^(#|rgb\(|hsl\()/.test(b) ? d3_interpolateRgb : d3_interpolateString : b instanceof d3_Color ? d3_interpolateRgb : Array.isArray(b) ? d3_interpolateArray : t === "object" && isNaN(b) ? d3_interpolateObject : d3_interpolateNumber)(a, b); + } ]; + d3.interpolateArray = d3_interpolateArray; + function d3_interpolateArray(a, b) { + var x = [], c = [], na = a.length, nb = b.length, n0 = Math.min(a.length, b.length), i; + for (i = 0; i < n0; ++i) x.push(d3_interpolate(a[i], b[i])); + for (;i < na; ++i) c[i] = a[i]; + for (;i < nb; ++i) c[i] = b[i]; + return function(t) { + for (i = 0; i < n0; ++i) c[i] = x[i](t); + return c; + }; + } + var d3_ease_default = function() { + return d3_identity; + }; + var d3_ease = d3.map({ + linear: d3_ease_default, + poly: d3_ease_poly, + quad: function() { + return d3_ease_quad; + }, + cubic: function() { + return d3_ease_cubic; + }, + sin: function() { + return d3_ease_sin; + }, + exp: function() { + return d3_ease_exp; + }, + circle: function() { + return d3_ease_circle; + }, + elastic: d3_ease_elastic, + back: d3_ease_back, + bounce: function() { + return d3_ease_bounce; + } + }); + var d3_ease_mode = d3.map({ + "in": d3_identity, + out: d3_ease_reverse, + "in-out": d3_ease_reflect, + "out-in": function(f) { + return d3_ease_reflect(d3_ease_reverse(f)); + } + }); + d3.ease = function(name) { + var i = name.indexOf("-"), t = i >= 0 ? name.substring(0, i) : name, m = i >= 0 ? name.substring(i + 1) : "in"; + t = d3_ease.get(t) || d3_ease_default; + m = d3_ease_mode.get(m) || d3_identity; + return d3_ease_clamp(m(t.apply(null, d3_arraySlice.call(arguments, 1)))); + }; + function d3_ease_clamp(f) { + return function(t) { + return t <= 0 ? 0 : t >= 1 ? 1 : f(t); + }; + } + function d3_ease_reverse(f) { + return function(t) { + return 1 - f(1 - t); + }; + } + function d3_ease_reflect(f) { + return function(t) { + return .5 * (t < .5 ? f(2 * t) : 2 - f(2 - 2 * t)); + }; + } + function d3_ease_quad(t) { + return t * t; + } + function d3_ease_cubic(t) { + return t * t * t; + } + function d3_ease_cubicInOut(t) { + if (t <= 0) return 0; + if (t >= 1) return 1; + var t2 = t * t, t3 = t2 * t; + return 4 * (t < .5 ? t3 : 3 * (t - t2) + t3 - .75); + } + function d3_ease_poly(e) { + return function(t) { + return Math.pow(t, e); + }; + } + function d3_ease_sin(t) { + return 1 - Math.cos(t * halfπ); + } + function d3_ease_exp(t) { + return Math.pow(2, 10 * (t - 1)); + } + function d3_ease_circle(t) { + return 1 - Math.sqrt(1 - t * t); + } + function d3_ease_elastic(a, p) { + var s; + if (arguments.length < 2) p = .45; + if (arguments.length) s = p / τ * Math.asin(1 / a); else a = 1, s = p / 4; + return function(t) { + return 1 + a * Math.pow(2, -10 * t) * Math.sin((t - s) * τ / p); + }; + } + function d3_ease_back(s) { + if (!s) s = 1.70158; + return function(t) { + return t * t * ((s + 1) * t - s); + }; + } + function d3_ease_bounce(t) { + return t < 1 / 2.75 ? 7.5625 * t * t : t < 2 / 2.75 ? 7.5625 * (t -= 1.5 / 2.75) * t + .75 : t < 2.5 / 2.75 ? 7.5625 * (t -= 2.25 / 2.75) * t + .9375 : 7.5625 * (t -= 2.625 / 2.75) * t + .984375; + } + d3.interpolateHcl = d3_interpolateHcl; + function d3_interpolateHcl(a, b) { + a = d3.hcl(a); + b = d3.hcl(b); + var ah = a.h, ac = a.c, al = a.l, bh = b.h - ah, bc = b.c - ac, bl = b.l - al; + if (isNaN(bc)) bc = 0, ac = isNaN(ac) ? b.c : ac; + if (isNaN(bh)) bh = 0, ah = isNaN(ah) ? b.h : ah; else if (bh > 180) bh -= 360; else if (bh < -180) bh += 360; + return function(t) { + return d3_hcl_lab(ah + bh * t, ac + bc * t, al + bl * t) + ""; + }; + } + d3.interpolateHsl = d3_interpolateHsl; + function d3_interpolateHsl(a, b) { + a = d3.hsl(a); + b = d3.hsl(b); + var ah = a.h, as = a.s, al = a.l, bh = b.h - ah, bs = b.s - as, bl = b.l - al; + if (isNaN(bs)) bs = 0, as = isNaN(as) ? b.s : as; + if (isNaN(bh)) bh = 0, ah = isNaN(ah) ? b.h : ah; else if (bh > 180) bh -= 360; else if (bh < -180) bh += 360; + return function(t) { + return d3_hsl_rgb(ah + bh * t, as + bs * t, al + bl * t) + ""; + }; + } + d3.interpolateLab = d3_interpolateLab; + function d3_interpolateLab(a, b) { + a = d3.lab(a); + b = d3.lab(b); + var al = a.l, aa = a.a, ab = a.b, bl = b.l - al, ba = b.a - aa, bb = b.b - ab; + return function(t) { + return d3_lab_rgb(al + bl * t, aa + ba * t, ab + bb * t) + ""; + }; + } + d3.interpolateRound = d3_interpolateRound; + function d3_interpolateRound(a, b) { + b -= a; + return function(t) { + return Math.round(a + b * t); + }; + } + d3.transform = function(string) { + var g = d3_document.createElementNS(d3.ns.prefix.svg, "g"); + return (d3.transform = function(string) { + if (string != null) { + g.setAttribute("transform", string); + var t = g.transform.baseVal.consolidate(); + } + return new d3_transform(t ? t.matrix : d3_transformIdentity); + })(string); + }; + function d3_transform(m) { + var r0 = [ m.a, m.b ], r1 = [ m.c, m.d ], kx = d3_transformNormalize(r0), kz = d3_transformDot(r0, r1), ky = d3_transformNormalize(d3_transformCombine(r1, r0, -kz)) || 0; + if (r0[0] * r1[1] < r1[0] * r0[1]) { + r0[0] *= -1; + r0[1] *= -1; + kx *= -1; + kz *= -1; + } + this.rotate = (kx ? Math.atan2(r0[1], r0[0]) : Math.atan2(-r1[0], r1[1])) * d3_degrees; + this.translate = [ m.e, m.f ]; + this.scale = [ kx, ky ]; + this.skew = ky ? Math.atan2(kz, ky) * d3_degrees : 0; + } + d3_transform.prototype.toString = function() { + return "translate(" + this.translate + ")rotate(" + this.rotate + ")skewX(" + this.skew + ")scale(" + this.scale + ")"; + }; + function d3_transformDot(a, b) { + return a[0] * b[0] + a[1] * b[1]; + } + function d3_transformNormalize(a) { + var k = Math.sqrt(d3_transformDot(a, a)); + if (k) { + a[0] /= k; + a[1] /= k; + } + return k; + } + function d3_transformCombine(a, b, k) { + a[0] += k * b[0]; + a[1] += k * b[1]; + return a; + } + var d3_transformIdentity = { + a: 1, + b: 0, + c: 0, + d: 1, + e: 0, + f: 0 + }; + d3.interpolateTransform = d3_interpolateTransform; + function d3_interpolateTransform(a, b) { + var s = [], q = [], n, A = d3.transform(a), B = d3.transform(b), ta = A.translate, tb = B.translate, ra = A.rotate, rb = B.rotate, wa = A.skew, wb = B.skew, ka = A.scale, kb = B.scale; + if (ta[0] != tb[0] || ta[1] != tb[1]) { + s.push("translate(", null, ",", null, ")"); + q.push({ + i: 1, + x: d3_interpolateNumber(ta[0], tb[0]) + }, { + i: 3, + x: d3_interpolateNumber(ta[1], tb[1]) + }); + } else if (tb[0] || tb[1]) { + s.push("translate(" + tb + ")"); + } else { + s.push(""); + } + if (ra != rb) { + if (ra - rb > 180) rb += 360; else if (rb - ra > 180) ra += 360; + q.push({ + i: s.push(s.pop() + "rotate(", null, ")") - 2, + x: d3_interpolateNumber(ra, rb) + }); + } else if (rb) { + s.push(s.pop() + "rotate(" + rb + ")"); + } + if (wa != wb) { + q.push({ + i: s.push(s.pop() + "skewX(", null, ")") - 2, + x: d3_interpolateNumber(wa, wb) + }); + } else if (wb) { + s.push(s.pop() + "skewX(" + wb + ")"); + } + if (ka[0] != kb[0] || ka[1] != kb[1]) { + n = s.push(s.pop() + "scale(", null, ",", null, ")"); + q.push({ + i: n - 4, + x: d3_interpolateNumber(ka[0], kb[0]) + }, { + i: n - 2, + x: d3_interpolateNumber(ka[1], kb[1]) + }); + } else if (kb[0] != 1 || kb[1] != 1) { + s.push(s.pop() + "scale(" + kb + ")"); + } + n = q.length; + return function(t) { + var i = -1, o; + while (++i < n) s[(o = q[i]).i] = o.x(t); + return s.join(""); + }; + } + function d3_uninterpolateNumber(a, b) { + b = b - (a = +a) ? 1 / (b - a) : 0; + return function(x) { + return (x - a) * b; + }; + } + function d3_uninterpolateClamp(a, b) { + b = b - (a = +a) ? 1 / (b - a) : 0; + return function(x) { + return Math.max(0, Math.min(1, (x - a) * b)); + }; + } + d3.layout = {}; + d3.layout.bundle = function() { + return function(links) { + var paths = [], i = -1, n = links.length; + while (++i < n) paths.push(d3_layout_bundlePath(links[i])); + return paths; + }; + }; + function d3_layout_bundlePath(link) { + var start = link.source, end = link.target, lca = d3_layout_bundleLeastCommonAncestor(start, end), points = [ start ]; + while (start !== lca) { + start = start.parent; + points.push(start); + } + var k = points.length; + while (end !== lca) { + points.splice(k, 0, end); + end = end.parent; + } + return points; + } + function d3_layout_bundleAncestors(node) { + var ancestors = [], parent = node.parent; + while (parent != null) { + ancestors.push(node); + node = parent; + parent = parent.parent; + } + ancestors.push(node); + return ancestors; + } + function d3_layout_bundleLeastCommonAncestor(a, b) { + if (a === b) return a; + var aNodes = d3_layout_bundleAncestors(a), bNodes = d3_layout_bundleAncestors(b), aNode = aNodes.pop(), bNode = bNodes.pop(), sharedNode = null; + while (aNode === bNode) { + sharedNode = aNode; + aNode = aNodes.pop(); + bNode = bNodes.pop(); + } + return sharedNode; + } + d3.layout.chord = function() { + var chord = {}, chords, groups, matrix, n, padding = 0, sortGroups, sortSubgroups, sortChords; + function relayout() { + var subgroups = {}, groupSums = [], groupIndex = d3.range(n), subgroupIndex = [], k, x, x0, i, j; + chords = []; + groups = []; + k = 0, i = -1; + while (++i < n) { + x = 0, j = -1; + while (++j < n) { + x += matrix[i][j]; + } + groupSums.push(x); + subgroupIndex.push(d3.range(n)); + k += x; + } + if (sortGroups) { + groupIndex.sort(function(a, b) { + return sortGroups(groupSums[a], groupSums[b]); + }); + } + if (sortSubgroups) { + subgroupIndex.forEach(function(d, i) { + d.sort(function(a, b) { + return sortSubgroups(matrix[i][a], matrix[i][b]); + }); + }); + } + k = (τ - padding * n) / k; + x = 0, i = -1; + while (++i < n) { + x0 = x, j = -1; + while (++j < n) { + var di = groupIndex[i], dj = subgroupIndex[di][j], v = matrix[di][dj], a0 = x, a1 = x += v * k; + subgroups[di + "-" + dj] = { + index: di, + subindex: dj, + startAngle: a0, + endAngle: a1, + value: v + }; + } + groups[di] = { + index: di, + startAngle: x0, + endAngle: x, + value: (x - x0) / k + }; + x += padding; + } + i = -1; + while (++i < n) { + j = i - 1; + while (++j < n) { + var source = subgroups[i + "-" + j], target = subgroups[j + "-" + i]; + if (source.value || target.value) { + chords.push(source.value < target.value ? { + source: target, + target: source + } : { + source: source, + target: target + }); + } + } + } + if (sortChords) resort(); + } + function resort() { + chords.sort(function(a, b) { + return sortChords((a.source.value + a.target.value) / 2, (b.source.value + b.target.value) / 2); + }); + } + chord.matrix = function(x) { + if (!arguments.length) return matrix; + n = (matrix = x) && matrix.length; + chords = groups = null; + return chord; + }; + chord.padding = function(x) { + if (!arguments.length) return padding; + padding = x; + chords = groups = null; + return chord; + }; + chord.sortGroups = function(x) { + if (!arguments.length) return sortGroups; + sortGroups = x; + chords = groups = null; + return chord; + }; + chord.sortSubgroups = function(x) { + if (!arguments.length) return sortSubgroups; + sortSubgroups = x; + chords = null; + return chord; + }; + chord.sortChords = function(x) { + if (!arguments.length) return sortChords; + sortChords = x; + if (chords) resort(); + return chord; + }; + chord.chords = function() { + if (!chords) relayout(); + return chords; + }; + chord.groups = function() { + if (!groups) relayout(); + return groups; + }; + return chord; + }; + d3.layout.force = function() { + var force = {}, event = d3.dispatch("start", "tick", "end"), size = [ 1, 1 ], drag, alpha, friction = .9, linkDistance = d3_layout_forceLinkDistance, linkStrength = d3_layout_forceLinkStrength, charge = -30, chargeDistance2 = d3_layout_forceChargeDistance2, gravity = .1, theta2 = .64, nodes = [], links = [], distances, strengths, charges; + function repulse(node) { + return function(quad, x1, _, x2) { + if (quad.point !== node) { + var dx = quad.cx - node.x, dy = quad.cy - node.y, dw = x2 - x1, dn = dx * dx + dy * dy; + if (dw * dw / theta2 < dn) { + if (dn < chargeDistance2) { + var k = quad.charge / dn; + node.px -= dx * k; + node.py -= dy * k; + } + return true; + } + if (quad.point && dn && dn < chargeDistance2) { + var k = quad.pointCharge / dn; + node.px -= dx * k; + node.py -= dy * k; + } + } + return !quad.charge; + }; + } + force.tick = function() { + if ((alpha *= .99) < .005) { + event.end({ + type: "end", + alpha: alpha = 0 + }); + return true; + } + var n = nodes.length, m = links.length, q, i, o, s, t, l, k, x, y; + for (i = 0; i < m; ++i) { + o = links[i]; + s = o.source; + t = o.target; + x = t.x - s.x; + y = t.y - s.y; + if (l = x * x + y * y) { + l = alpha * strengths[i] * ((l = Math.sqrt(l)) - distances[i]) / l; + x *= l; + y *= l; + t.x -= x * (k = s.weight / (t.weight + s.weight)); + t.y -= y * k; + s.x += x * (k = 1 - k); + s.y += y * k; + } + } + if (k = alpha * gravity) { + x = size[0] / 2; + y = size[1] / 2; + i = -1; + if (k) while (++i < n) { + o = nodes[i]; + o.x += (x - o.x) * k; + o.y += (y - o.y) * k; + } + } + if (charge) { + d3_layout_forceAccumulate(q = d3.geom.quadtree(nodes), alpha, charges); + i = -1; + while (++i < n) { + if (!(o = nodes[i]).fixed) { + q.visit(repulse(o)); + } + } + } + i = -1; + while (++i < n) { + o = nodes[i]; + if (o.fixed) { + o.x = o.px; + o.y = o.py; + } else { + o.x -= (o.px - (o.px = o.x)) * friction; + o.y -= (o.py - (o.py = o.y)) * friction; + } + } + event.tick({ + type: "tick", + alpha: alpha + }); + }; + force.nodes = function(x) { + if (!arguments.length) return nodes; + nodes = x; + return force; + }; + force.links = function(x) { + if (!arguments.length) return links; + links = x; + return force; + }; + force.size = function(x) { + if (!arguments.length) return size; + size = x; + return force; + }; + force.linkDistance = function(x) { + if (!arguments.length) return linkDistance; + linkDistance = typeof x === "function" ? x : +x; + return force; + }; + force.distance = force.linkDistance; + force.linkStrength = function(x) { + if (!arguments.length) return linkStrength; + linkStrength = typeof x === "function" ? x : +x; + return force; + }; + force.friction = function(x) { + if (!arguments.length) return friction; + friction = +x; + return force; + }; + force.charge = function(x) { + if (!arguments.length) return charge; + charge = typeof x === "function" ? x : +x; + return force; + }; + force.chargeDistance = function(x) { + if (!arguments.length) return Math.sqrt(chargeDistance2); + chargeDistance2 = x * x; + return force; + }; + force.gravity = function(x) { + if (!arguments.length) return gravity; + gravity = +x; + return force; + }; + force.theta = function(x) { + if (!arguments.length) return Math.sqrt(theta2); + theta2 = x * x; + return force; + }; + force.alpha = function(x) { + if (!arguments.length) return alpha; + x = +x; + if (alpha) { + if (x > 0) alpha = x; else alpha = 0; + } else if (x > 0) { + event.start({ + type: "start", + alpha: alpha = x + }); + d3.timer(force.tick); + } + return force; + }; + force.start = function() { + var i, n = nodes.length, m = links.length, w = size[0], h = size[1], neighbors, o; + for (i = 0; i < n; ++i) { + (o = nodes[i]).index = i; + o.weight = 0; + } + for (i = 0; i < m; ++i) { + o = links[i]; + if (typeof o.source == "number") o.source = nodes[o.source]; + if (typeof o.target == "number") o.target = nodes[o.target]; + ++o.source.weight; + ++o.target.weight; + } + for (i = 0; i < n; ++i) { + o = nodes[i]; + if (isNaN(o.x)) o.x = position("x", w); + if (isNaN(o.y)) o.y = position("y", h); + if (isNaN(o.px)) o.px = o.x; + if (isNaN(o.py)) o.py = o.y; + } + distances = []; + if (typeof linkDistance === "function") for (i = 0; i < m; ++i) distances[i] = +linkDistance.call(this, links[i], i); else for (i = 0; i < m; ++i) distances[i] = linkDistance; + strengths = []; + if (typeof linkStrength === "function") for (i = 0; i < m; ++i) strengths[i] = +linkStrength.call(this, links[i], i); else for (i = 0; i < m; ++i) strengths[i] = linkStrength; + charges = []; + if (typeof charge === "function") for (i = 0; i < n; ++i) charges[i] = +charge.call(this, nodes[i], i); else for (i = 0; i < n; ++i) charges[i] = charge; + function position(dimension, size) { + if (!neighbors) { + neighbors = new Array(n); + for (j = 0; j < n; ++j) { + neighbors[j] = []; + } + for (j = 0; j < m; ++j) { + var o = links[j]; + neighbors[o.source.index].push(o.target); + neighbors[o.target.index].push(o.source); + } + } + var candidates = neighbors[i], j = -1, m = candidates.length, x; + while (++j < m) if (!isNaN(x = candidates[j][dimension])) return x; + return Math.random() * size; + } + return force.resume(); + }; + force.resume = function() { + return force.alpha(.1); + }; + force.stop = function() { + return force.alpha(0); + }; + force.drag = function() { + if (!drag) drag = d3.behavior.drag().origin(d3_identity).on("dragstart.force", d3_layout_forceDragstart).on("drag.force", dragmove).on("dragend.force", d3_layout_forceDragend); + if (!arguments.length) return drag; + this.on("mouseover.force", d3_layout_forceMouseover).on("mouseout.force", d3_layout_forceMouseout).call(drag); + }; + function dragmove(d) { + d.px = d3.event.x, d.py = d3.event.y; + force.resume(); + } + return d3.rebind(force, event, "on"); + }; + function d3_layout_forceDragstart(d) { + d.fixed |= 2; + } + function d3_layout_forceDragend(d) { + d.fixed &= ~6; + } + function d3_layout_forceMouseover(d) { + d.fixed |= 4; + d.px = d.x, d.py = d.y; + } + function d3_layout_forceMouseout(d) { + d.fixed &= ~4; + } + function d3_layout_forceAccumulate(quad, alpha, charges) { + var cx = 0, cy = 0; + quad.charge = 0; + if (!quad.leaf) { + var nodes = quad.nodes, n = nodes.length, i = -1, c; + while (++i < n) { + c = nodes[i]; + if (c == null) continue; + d3_layout_forceAccumulate(c, alpha, charges); + quad.charge += c.charge; + cx += c.charge * c.cx; + cy += c.charge * c.cy; + } + } + if (quad.point) { + if (!quad.leaf) { + quad.point.x += Math.random() - .5; + quad.point.y += Math.random() - .5; + } + var k = alpha * charges[quad.point.index]; + quad.charge += quad.pointCharge = k; + cx += k * quad.point.x; + cy += k * quad.point.y; + } + quad.cx = cx / quad.charge; + quad.cy = cy / quad.charge; + } + var d3_layout_forceLinkDistance = 20, d3_layout_forceLinkStrength = 1, d3_layout_forceChargeDistance2 = Infinity; + d3.layout.hierarchy = function() { + var sort = d3_layout_hierarchySort, children = d3_layout_hierarchyChildren, value = d3_layout_hierarchyValue; + function hierarchy(root) { + var stack = [ root ], nodes = [], node; + root.depth = 0; + while ((node = stack.pop()) != null) { + nodes.push(node); + if ((childs = children.call(hierarchy, node, node.depth)) && (n = childs.length)) { + var n, childs, child; + while (--n >= 0) { + stack.push(child = childs[n]); + child.parent = node; + child.depth = node.depth + 1; + } + if (value) node.value = 0; + node.children = childs; + } else { + if (value) node.value = +value.call(hierarchy, node, node.depth) || 0; + delete node.children; + } + } + d3_layout_hierarchyVisitAfter(root, function(node) { + var childs, parent; + if (sort && (childs = node.children)) childs.sort(sort); + if (value && (parent = node.parent)) parent.value += node.value; + }); + return nodes; + } + hierarchy.sort = function(x) { + if (!arguments.length) return sort; + sort = x; + return hierarchy; + }; + hierarchy.children = function(x) { + if (!arguments.length) return children; + children = x; + return hierarchy; + }; + hierarchy.value = function(x) { + if (!arguments.length) return value; + value = x; + return hierarchy; + }; + hierarchy.revalue = function(root) { + if (value) { + d3_layout_hierarchyVisitBefore(root, function(node) { + if (node.children) node.value = 0; + }); + d3_layout_hierarchyVisitAfter(root, function(node) { + var parent; + if (!node.children) node.value = +value.call(hierarchy, node, node.depth) || 0; + if (parent = node.parent) parent.value += node.value; + }); + } + return root; + }; + return hierarchy; + }; + function d3_layout_hierarchyRebind(object, hierarchy) { + d3.rebind(object, hierarchy, "sort", "children", "value"); + object.nodes = object; + object.links = d3_layout_hierarchyLinks; + return object; + } + function d3_layout_hierarchyVisitBefore(node, callback) { + var nodes = [ node ]; + while ((node = nodes.pop()) != null) { + callback(node); + if ((children = node.children) && (n = children.length)) { + var n, children; + while (--n >= 0) nodes.push(children[n]); + } + } + } + function d3_layout_hierarchyVisitAfter(node, callback) { + var nodes = [ node ], nodes2 = []; + while ((node = nodes.pop()) != null) { + nodes2.push(node); + if ((children = node.children) && (n = children.length)) { + var i = -1, n, children; + while (++i < n) nodes.push(children[i]); + } + } + while ((node = nodes2.pop()) != null) { + callback(node); + } + } + function d3_layout_hierarchyChildren(d) { + return d.children; + } + function d3_layout_hierarchyValue(d) { + return d.value; + } + function d3_layout_hierarchySort(a, b) { + return b.value - a.value; + } + function d3_layout_hierarchyLinks(nodes) { + return d3.merge(nodes.map(function(parent) { + return (parent.children || []).map(function(child) { + return { + source: parent, + target: child + }; + }); + })); + } + d3.layout.partition = function() { + var hierarchy = d3.layout.hierarchy(), size = [ 1, 1 ]; + function position(node, x, dx, dy) { + var children = node.children; + node.x = x; + node.y = node.depth * dy; + node.dx = dx; + node.dy = dy; + if (children && (n = children.length)) { + var i = -1, n, c, d; + dx = node.value ? dx / node.value : 0; + while (++i < n) { + position(c = children[i], x, d = c.value * dx, dy); + x += d; + } + } + } + function depth(node) { + var children = node.children, d = 0; + if (children && (n = children.length)) { + var i = -1, n; + while (++i < n) d = Math.max(d, depth(children[i])); + } + return 1 + d; + } + function partition(d, i) { + var nodes = hierarchy.call(this, d, i); + position(nodes[0], 0, size[0], size[1] / depth(nodes[0])); + return nodes; + } + partition.size = function(x) { + if (!arguments.length) return size; + size = x; + return partition; + }; + return d3_layout_hierarchyRebind(partition, hierarchy); + }; + d3.layout.pie = function() { + var value = Number, sort = d3_layout_pieSortByValue, startAngle = 0, endAngle = τ; + function pie(data) { + var values = data.map(function(d, i) { + return +value.call(pie, d, i); + }); + var a = +(typeof startAngle === "function" ? startAngle.apply(this, arguments) : startAngle); + var k = ((typeof endAngle === "function" ? endAngle.apply(this, arguments) : endAngle) - a) / d3.sum(values); + var index = d3.range(data.length); + if (sort != null) index.sort(sort === d3_layout_pieSortByValue ? function(i, j) { + return values[j] - values[i]; + } : function(i, j) { + return sort(data[i], data[j]); + }); + var arcs = []; + index.forEach(function(i) { + var d; + arcs[i] = { + data: data[i], + value: d = values[i], + startAngle: a, + endAngle: a += d * k + }; + }); + return arcs; + } + pie.value = function(x) { + if (!arguments.length) return value; + value = x; + return pie; + }; + pie.sort = function(x) { + if (!arguments.length) return sort; + sort = x; + return pie; + }; + pie.startAngle = function(x) { + if (!arguments.length) return startAngle; + startAngle = x; + return pie; + }; + pie.endAngle = function(x) { + if (!arguments.length) return endAngle; + endAngle = x; + return pie; + }; + return pie; + }; + var d3_layout_pieSortByValue = {}; + d3.layout.stack = function() { + var values = d3_identity, order = d3_layout_stackOrderDefault, offset = d3_layout_stackOffsetZero, out = d3_layout_stackOut, x = d3_layout_stackX, y = d3_layout_stackY; + function stack(data, index) { + var series = data.map(function(d, i) { + return values.call(stack, d, i); + }); + var points = series.map(function(d) { + return d.map(function(v, i) { + return [ x.call(stack, v, i), y.call(stack, v, i) ]; + }); + }); + var orders = order.call(stack, points, index); + series = d3.permute(series, orders); + points = d3.permute(points, orders); + var offsets = offset.call(stack, points, index); + var n = series.length, m = series[0].length, i, j, o; + for (j = 0; j < m; ++j) { + out.call(stack, series[0][j], o = offsets[j], points[0][j][1]); + for (i = 1; i < n; ++i) { + out.call(stack, series[i][j], o += points[i - 1][j][1], points[i][j][1]); + } + } + return data; + } + stack.values = function(x) { + if (!arguments.length) return values; + values = x; + return stack; + }; + stack.order = function(x) { + if (!arguments.length) return order; + order = typeof x === "function" ? x : d3_layout_stackOrders.get(x) || d3_layout_stackOrderDefault; + return stack; + }; + stack.offset = function(x) { + if (!arguments.length) return offset; + offset = typeof x === "function" ? x : d3_layout_stackOffsets.get(x) || d3_layout_stackOffsetZero; + return stack; + }; + stack.x = function(z) { + if (!arguments.length) return x; + x = z; + return stack; + }; + stack.y = function(z) { + if (!arguments.length) return y; + y = z; + return stack; + }; + stack.out = function(z) { + if (!arguments.length) return out; + out = z; + return stack; + }; + return stack; + }; + function d3_layout_stackX(d) { + return d.x; + } + function d3_layout_stackY(d) { + return d.y; + } + function d3_layout_stackOut(d, y0, y) { + d.y0 = y0; + d.y = y; + } + var d3_layout_stackOrders = d3.map({ + "inside-out": function(data) { + var n = data.length, i, j, max = data.map(d3_layout_stackMaxIndex), sums = data.map(d3_layout_stackReduceSum), index = d3.range(n).sort(function(a, b) { + return max[a] - max[b]; + }), top = 0, bottom = 0, tops = [], bottoms = []; + for (i = 0; i < n; ++i) { + j = index[i]; + if (top < bottom) { + top += sums[j]; + tops.push(j); + } else { + bottom += sums[j]; + bottoms.push(j); + } + } + return bottoms.reverse().concat(tops); + }, + reverse: function(data) { + return d3.range(data.length).reverse(); + }, + "default": d3_layout_stackOrderDefault + }); + var d3_layout_stackOffsets = d3.map({ + silhouette: function(data) { + var n = data.length, m = data[0].length, sums = [], max = 0, i, j, o, y0 = []; + for (j = 0; j < m; ++j) { + for (i = 0, o = 0; i < n; i++) o += data[i][j][1]; + if (o > max) max = o; + sums.push(o); + } + for (j = 0; j < m; ++j) { + y0[j] = (max - sums[j]) / 2; + } + return y0; + }, + wiggle: function(data) { + var n = data.length, x = data[0], m = x.length, i, j, k, s1, s2, s3, dx, o, o0, y0 = []; + y0[0] = o = o0 = 0; + for (j = 1; j < m; ++j) { + for (i = 0, s1 = 0; i < n; ++i) s1 += data[i][j][1]; + for (i = 0, s2 = 0, dx = x[j][0] - x[j - 1][0]; i < n; ++i) { + for (k = 0, s3 = (data[i][j][1] - data[i][j - 1][1]) / (2 * dx); k < i; ++k) { + s3 += (data[k][j][1] - data[k][j - 1][1]) / dx; + } + s2 += s3 * data[i][j][1]; + } + y0[j] = o -= s1 ? s2 / s1 * dx : 0; + if (o < o0) o0 = o; + } + for (j = 0; j < m; ++j) y0[j] -= o0; + return y0; + }, + expand: function(data) { + var n = data.length, m = data[0].length, k = 1 / n, i, j, o, y0 = []; + for (j = 0; j < m; ++j) { + for (i = 0, o = 0; i < n; i++) o += data[i][j][1]; + if (o) for (i = 0; i < n; i++) data[i][j][1] /= o; else for (i = 0; i < n; i++) data[i][j][1] = k; + } + for (j = 0; j < m; ++j) y0[j] = 0; + return y0; + }, + zero: d3_layout_stackOffsetZero + }); + function d3_layout_stackOrderDefault(data) { + return d3.range(data.length); + } + function d3_layout_stackOffsetZero(data) { + var j = -1, m = data[0].length, y0 = []; + while (++j < m) y0[j] = 0; + return y0; + } + function d3_layout_stackMaxIndex(array) { + var i = 1, j = 0, v = array[0][1], k, n = array.length; + for (;i < n; ++i) { + if ((k = array[i][1]) > v) { + j = i; + v = k; + } + } + return j; + } + function d3_layout_stackReduceSum(d) { + return d.reduce(d3_layout_stackSum, 0); + } + function d3_layout_stackSum(p, d) { + return p + d[1]; + } + d3.layout.histogram = function() { + var frequency = true, valuer = Number, ranger = d3_layout_histogramRange, binner = d3_layout_histogramBinSturges; + function histogram(data, i) { + var bins = [], values = data.map(valuer, this), range = ranger.call(this, values, i), thresholds = binner.call(this, range, values, i), bin, i = -1, n = values.length, m = thresholds.length - 1, k = frequency ? 1 : 1 / n, x; + while (++i < m) { + bin = bins[i] = []; + bin.dx = thresholds[i + 1] - (bin.x = thresholds[i]); + bin.y = 0; + } + if (m > 0) { + i = -1; + while (++i < n) { + x = values[i]; + if (x >= range[0] && x <= range[1]) { + bin = bins[d3.bisect(thresholds, x, 1, m) - 1]; + bin.y += k; + bin.push(data[i]); + } + } + } + return bins; + } + histogram.value = function(x) { + if (!arguments.length) return valuer; + valuer = x; + return histogram; + }; + histogram.range = function(x) { + if (!arguments.length) return ranger; + ranger = d3_functor(x); + return histogram; + }; + histogram.bins = function(x) { + if (!arguments.length) return binner; + binner = typeof x === "number" ? function(range) { + return d3_layout_histogramBinFixed(range, x); + } : d3_functor(x); + return histogram; + }; + histogram.frequency = function(x) { + if (!arguments.length) return frequency; + frequency = !!x; + return histogram; + }; + return histogram; + }; + function d3_layout_histogramBinSturges(range, values) { + return d3_layout_histogramBinFixed(range, Math.ceil(Math.log(values.length) / Math.LN2 + 1)); + } + function d3_layout_histogramBinFixed(range, n) { + var x = -1, b = +range[0], m = (range[1] - b) / n, f = []; + while (++x <= n) f[x] = m * x + b; + return f; + } + function d3_layout_histogramRange(values) { + return [ d3.min(values), d3.max(values) ]; + } + d3.layout.pack = function() { + var hierarchy = d3.layout.hierarchy().sort(d3_layout_packSort), padding = 0, size = [ 1, 1 ], radius; + function pack(d, i) { + var nodes = hierarchy.call(this, d, i), root = nodes[0], w = size[0], h = size[1], r = radius == null ? Math.sqrt : typeof radius === "function" ? radius : function() { + return radius; + }; + root.x = root.y = 0; + d3_layout_hierarchyVisitAfter(root, function(d) { + d.r = +r(d.value); + }); + d3_layout_hierarchyVisitAfter(root, d3_layout_packSiblings); + if (padding) { + var dr = padding * (radius ? 1 : Math.max(2 * root.r / w, 2 * root.r / h)) / 2; + d3_layout_hierarchyVisitAfter(root, function(d) { + d.r += dr; + }); + d3_layout_hierarchyVisitAfter(root, d3_layout_packSiblings); + d3_layout_hierarchyVisitAfter(root, function(d) { + d.r -= dr; + }); + } + d3_layout_packTransform(root, w / 2, h / 2, radius ? 1 : 1 / Math.max(2 * root.r / w, 2 * root.r / h)); + return nodes; + } + pack.size = function(_) { + if (!arguments.length) return size; + size = _; + return pack; + }; + pack.radius = function(_) { + if (!arguments.length) return radius; + radius = _ == null || typeof _ === "function" ? _ : +_; + return pack; + }; + pack.padding = function(_) { + if (!arguments.length) return padding; + padding = +_; + return pack; + }; + return d3_layout_hierarchyRebind(pack, hierarchy); + }; + function d3_layout_packSort(a, b) { + return a.value - b.value; + } + function d3_layout_packInsert(a, b) { + var c = a._pack_next; + a._pack_next = b; + b._pack_prev = a; + b._pack_next = c; + c._pack_prev = b; + } + function d3_layout_packSplice(a, b) { + a._pack_next = b; + b._pack_prev = a; + } + function d3_layout_packIntersects(a, b) { + var dx = b.x - a.x, dy = b.y - a.y, dr = a.r + b.r; + return .999 * dr * dr > dx * dx + dy * dy; + } + function d3_layout_packSiblings(node) { + if (!(nodes = node.children) || !(n = nodes.length)) return; + var nodes, xMin = Infinity, xMax = -Infinity, yMin = Infinity, yMax = -Infinity, a, b, c, i, j, k, n; + function bound(node) { + xMin = Math.min(node.x - node.r, xMin); + xMax = Math.max(node.x + node.r, xMax); + yMin = Math.min(node.y - node.r, yMin); + yMax = Math.max(node.y + node.r, yMax); + } + nodes.forEach(d3_layout_packLink); + a = nodes[0]; + a.x = -a.r; + a.y = 0; + bound(a); + if (n > 1) { + b = nodes[1]; + b.x = b.r; + b.y = 0; + bound(b); + if (n > 2) { + c = nodes[2]; + d3_layout_packPlace(a, b, c); + bound(c); + d3_layout_packInsert(a, c); + a._pack_prev = c; + d3_layout_packInsert(c, b); + b = a._pack_next; + for (i = 3; i < n; i++) { + d3_layout_packPlace(a, b, c = nodes[i]); + var isect = 0, s1 = 1, s2 = 1; + for (j = b._pack_next; j !== b; j = j._pack_next, s1++) { + if (d3_layout_packIntersects(j, c)) { + isect = 1; + break; + } + } + if (isect == 1) { + for (k = a._pack_prev; k !== j._pack_prev; k = k._pack_prev, s2++) { + if (d3_layout_packIntersects(k, c)) { + break; + } + } + } + if (isect) { + if (s1 < s2 || s1 == s2 && b.r < a.r) d3_layout_packSplice(a, b = j); else d3_layout_packSplice(a = k, b); + i--; + } else { + d3_layout_packInsert(a, c); + b = c; + bound(c); + } + } + } + } + var cx = (xMin + xMax) / 2, cy = (yMin + yMax) / 2, cr = 0; + for (i = 0; i < n; i++) { + c = nodes[i]; + c.x -= cx; + c.y -= cy; + cr = Math.max(cr, c.r + Math.sqrt(c.x * c.x + c.y * c.y)); + } + node.r = cr; + nodes.forEach(d3_layout_packUnlink); + } + function d3_layout_packLink(node) { + node._pack_next = node._pack_prev = node; + } + function d3_layout_packUnlink(node) { + delete node._pack_next; + delete node._pack_prev; + } + function d3_layout_packTransform(node, x, y, k) { + var children = node.children; + node.x = x += k * node.x; + node.y = y += k * node.y; + node.r *= k; + if (children) { + var i = -1, n = children.length; + while (++i < n) d3_layout_packTransform(children[i], x, y, k); + } + } + function d3_layout_packPlace(a, b, c) { + var db = a.r + c.r, dx = b.x - a.x, dy = b.y - a.y; + if (db && (dx || dy)) { + var da = b.r + c.r, dc = dx * dx + dy * dy; + da *= da; + db *= db; + var x = .5 + (db - da) / (2 * dc), y = Math.sqrt(Math.max(0, 2 * da * (db + dc) - (db -= dc) * db - da * da)) / (2 * dc); + c.x = a.x + x * dx + y * dy; + c.y = a.y + x * dy - y * dx; + } else { + c.x = a.x + db; + c.y = a.y; + } + } + d3.layout.tree = function() { + var hierarchy = d3.layout.hierarchy().sort(null).value(null), separation = d3_layout_treeSeparation, size = [ 1, 1 ], nodeSize = null; + function tree(d, i) { + var nodes = hierarchy.call(this, d, i), root0 = nodes[0], root1 = wrapTree(root0); + d3_layout_hierarchyVisitAfter(root1, firstWalk), root1.parent.m = -root1.z; + d3_layout_hierarchyVisitBefore(root1, secondWalk); + if (nodeSize) d3_layout_hierarchyVisitBefore(root0, sizeNode); else { + var left = root0, right = root0, bottom = root0; + d3_layout_hierarchyVisitBefore(root0, function(node) { + if (node.x < left.x) left = node; + if (node.x > right.x) right = node; + if (node.depth > bottom.depth) bottom = node; + }); + var tx = separation(left, right) / 2 - left.x, kx = size[0] / (right.x + separation(right, left) / 2 + tx), ky = size[1] / (bottom.depth || 1); + d3_layout_hierarchyVisitBefore(root0, function(node) { + node.x = (node.x + tx) * kx; + node.y = node.depth * ky; + }); + } + return nodes; + } + function wrapTree(root0) { + var root1 = { + A: null, + children: [ root0 ] + }, queue = [ root1 ], node1; + while ((node1 = queue.pop()) != null) { + for (var children = node1.children, child, i = 0, n = children.length; i < n; ++i) { + queue.push((children[i] = child = { + _: children[i], + parent: node1, + children: (child = children[i].children) && child.slice() || [], + A: null, + a: null, + z: 0, + m: 0, + c: 0, + s: 0, + t: null, + i: i + }).a = child); + } + } + return root1.children[0]; + } + function firstWalk(v) { + var children = v.children, siblings = v.parent.children, w = v.i ? siblings[v.i - 1] : null; + if (children.length) { + d3_layout_treeShift(v); + var midpoint = (children[0].z + children[children.length - 1].z) / 2; + if (w) { + v.z = w.z + separation(v._, w._); + v.m = v.z - midpoint; + } else { + v.z = midpoint; + } + } else if (w) { + v.z = w.z + separation(v._, w._); + } + v.parent.A = apportion(v, w, v.parent.A || siblings[0]); + } + function secondWalk(v) { + v._.x = v.z + v.parent.m; + v.m += v.parent.m; + } + function apportion(v, w, ancestor) { + if (w) { + var vip = v, vop = v, vim = w, vom = vip.parent.children[0], sip = vip.m, sop = vop.m, sim = vim.m, som = vom.m, shift; + while (vim = d3_layout_treeRight(vim), vip = d3_layout_treeLeft(vip), vim && vip) { + vom = d3_layout_treeLeft(vom); + vop = d3_layout_treeRight(vop); + vop.a = v; + shift = vim.z + sim - vip.z - sip + separation(vim._, vip._); + if (shift > 0) { + d3_layout_treeMove(d3_layout_treeAncestor(vim, v, ancestor), v, shift); + sip += shift; + sop += shift; + } + sim += vim.m; + sip += vip.m; + som += vom.m; + sop += vop.m; + } + if (vim && !d3_layout_treeRight(vop)) { + vop.t = vim; + vop.m += sim - sop; + } + if (vip && !d3_layout_treeLeft(vom)) { + vom.t = vip; + vom.m += sip - som; + ancestor = v; + } + } + return ancestor; + } + function sizeNode(node) { + node.x *= size[0]; + node.y = node.depth * size[1]; + } + tree.separation = function(x) { + if (!arguments.length) return separation; + separation = x; + return tree; + }; + tree.size = function(x) { + if (!arguments.length) return nodeSize ? null : size; + nodeSize = (size = x) == null ? sizeNode : null; + return tree; + }; + tree.nodeSize = function(x) { + if (!arguments.length) return nodeSize ? size : null; + nodeSize = (size = x) == null ? null : sizeNode; + return tree; + }; + return d3_layout_hierarchyRebind(tree, hierarchy); + }; + function d3_layout_treeSeparation(a, b) { + return a.parent == b.parent ? 1 : 2; + } + function d3_layout_treeLeft(v) { + var children = v.children; + return children.length ? children[0] : v.t; + } + function d3_layout_treeRight(v) { + var children = v.children, n; + return (n = children.length) ? children[n - 1] : v.t; + } + function d3_layout_treeMove(wm, wp, shift) { + var change = shift / (wp.i - wm.i); + wp.c -= change; + wp.s += shift; + wm.c += change; + wp.z += shift; + wp.m += shift; + } + function d3_layout_treeShift(v) { + var shift = 0, change = 0, children = v.children, i = children.length, w; + while (--i >= 0) { + w = children[i]; + w.z += shift; + w.m += shift; + shift += w.s + (change += w.c); + } + } + function d3_layout_treeAncestor(vim, v, ancestor) { + return vim.a.parent === v.parent ? vim.a : ancestor; + } + d3.layout.cluster = function() { + var hierarchy = d3.layout.hierarchy().sort(null).value(null), separation = d3_layout_treeSeparation, size = [ 1, 1 ], nodeSize = false; + function cluster(d, i) { + var nodes = hierarchy.call(this, d, i), root = nodes[0], previousNode, x = 0; + d3_layout_hierarchyVisitAfter(root, function(node) { + var children = node.children; + if (children && children.length) { + node.x = d3_layout_clusterX(children); + node.y = d3_layout_clusterY(children); + } else { + node.x = previousNode ? x += separation(node, previousNode) : 0; + node.y = 0; + previousNode = node; + } + }); + var left = d3_layout_clusterLeft(root), right = d3_layout_clusterRight(root), x0 = left.x - separation(left, right) / 2, x1 = right.x + separation(right, left) / 2; + d3_layout_hierarchyVisitAfter(root, nodeSize ? function(node) { + node.x = (node.x - root.x) * size[0]; + node.y = (root.y - node.y) * size[1]; + } : function(node) { + node.x = (node.x - x0) / (x1 - x0) * size[0]; + node.y = (1 - (root.y ? node.y / root.y : 1)) * size[1]; + }); + return nodes; + } + cluster.separation = function(x) { + if (!arguments.length) return separation; + separation = x; + return cluster; + }; + cluster.size = function(x) { + if (!arguments.length) return nodeSize ? null : size; + nodeSize = (size = x) == null; + return cluster; + }; + cluster.nodeSize = function(x) { + if (!arguments.length) return nodeSize ? size : null; + nodeSize = (size = x) != null; + return cluster; + }; + return d3_layout_hierarchyRebind(cluster, hierarchy); + }; + function d3_layout_clusterY(children) { + return 1 + d3.max(children, function(child) { + return child.y; + }); + } + function d3_layout_clusterX(children) { + return children.reduce(function(x, child) { + return x + child.x; + }, 0) / children.length; + } + function d3_layout_clusterLeft(node) { + var children = node.children; + return children && children.length ? d3_layout_clusterLeft(children[0]) : node; + } + function d3_layout_clusterRight(node) { + var children = node.children, n; + return children && (n = children.length) ? d3_layout_clusterRight(children[n - 1]) : node; + } + d3.layout.treemap = function() { + var hierarchy = d3.layout.hierarchy(), round = Math.round, size = [ 1, 1 ], padding = null, pad = d3_layout_treemapPadNull, sticky = false, stickies, mode = "squarify", ratio = .5 * (1 + Math.sqrt(5)); + function scale(children, k) { + var i = -1, n = children.length, child, area; + while (++i < n) { + area = (child = children[i]).value * (k < 0 ? 0 : k); + child.area = isNaN(area) || area <= 0 ? 0 : area; + } + } + function squarify(node) { + var children = node.children; + if (children && children.length) { + var rect = pad(node), row = [], remaining = children.slice(), child, best = Infinity, score, u = mode === "slice" ? rect.dx : mode === "dice" ? rect.dy : mode === "slice-dice" ? node.depth & 1 ? rect.dy : rect.dx : Math.min(rect.dx, rect.dy), n; + scale(remaining, rect.dx * rect.dy / node.value); + row.area = 0; + while ((n = remaining.length) > 0) { + row.push(child = remaining[n - 1]); + row.area += child.area; + if (mode !== "squarify" || (score = worst(row, u)) <= best) { + remaining.pop(); + best = score; + } else { + row.area -= row.pop().area; + position(row, u, rect, false); + u = Math.min(rect.dx, rect.dy); + row.length = row.area = 0; + best = Infinity; + } + } + if (row.length) { + position(row, u, rect, true); + row.length = row.area = 0; + } + children.forEach(squarify); + } + } + function stickify(node) { + var children = node.children; + if (children && children.length) { + var rect = pad(node), remaining = children.slice(), child, row = []; + scale(remaining, rect.dx * rect.dy / node.value); + row.area = 0; + while (child = remaining.pop()) { + row.push(child); + row.area += child.area; + if (child.z != null) { + position(row, child.z ? rect.dx : rect.dy, rect, !remaining.length); + row.length = row.area = 0; + } + } + children.forEach(stickify); + } + } + function worst(row, u) { + var s = row.area, r, rmax = 0, rmin = Infinity, i = -1, n = row.length; + while (++i < n) { + if (!(r = row[i].area)) continue; + if (r < rmin) rmin = r; + if (r > rmax) rmax = r; + } + s *= s; + u *= u; + return s ? Math.max(u * rmax * ratio / s, s / (u * rmin * ratio)) : Infinity; + } + function position(row, u, rect, flush) { + var i = -1, n = row.length, x = rect.x, y = rect.y, v = u ? round(row.area / u) : 0, o; + if (u == rect.dx) { + if (flush || v > rect.dy) v = rect.dy; + while (++i < n) { + o = row[i]; + o.x = x; + o.y = y; + o.dy = v; + x += o.dx = Math.min(rect.x + rect.dx - x, v ? round(o.area / v) : 0); + } + o.z = true; + o.dx += rect.x + rect.dx - x; + rect.y += v; + rect.dy -= v; + } else { + if (flush || v > rect.dx) v = rect.dx; + while (++i < n) { + o = row[i]; + o.x = x; + o.y = y; + o.dx = v; + y += o.dy = Math.min(rect.y + rect.dy - y, v ? round(o.area / v) : 0); + } + o.z = false; + o.dy += rect.y + rect.dy - y; + rect.x += v; + rect.dx -= v; + } + } + function treemap(d) { + var nodes = stickies || hierarchy(d), root = nodes[0]; + root.x = 0; + root.y = 0; + root.dx = size[0]; + root.dy = size[1]; + if (stickies) hierarchy.revalue(root); + scale([ root ], root.dx * root.dy / root.value); + (stickies ? stickify : squarify)(root); + if (sticky) stickies = nodes; + return nodes; + } + treemap.size = function(x) { + if (!arguments.length) return size; + size = x; + return treemap; + }; + treemap.padding = function(x) { + if (!arguments.length) return padding; + function padFunction(node) { + var p = x.call(treemap, node, node.depth); + return p == null ? d3_layout_treemapPadNull(node) : d3_layout_treemapPad(node, typeof p === "number" ? [ p, p, p, p ] : p); + } + function padConstant(node) { + return d3_layout_treemapPad(node, x); + } + var type; + pad = (padding = x) == null ? d3_layout_treemapPadNull : (type = typeof x) === "function" ? padFunction : type === "number" ? (x = [ x, x, x, x ], + padConstant) : padConstant; + return treemap; + }; + treemap.round = function(x) { + if (!arguments.length) return round != Number; + round = x ? Math.round : Number; + return treemap; + }; + treemap.sticky = function(x) { + if (!arguments.length) return sticky; + sticky = x; + stickies = null; + return treemap; + }; + treemap.ratio = function(x) { + if (!arguments.length) return ratio; + ratio = x; + return treemap; + }; + treemap.mode = function(x) { + if (!arguments.length) return mode; + mode = x + ""; + return treemap; + }; + return d3_layout_hierarchyRebind(treemap, hierarchy); + }; + function d3_layout_treemapPadNull(node) { + return { + x: node.x, + y: node.y, + dx: node.dx, + dy: node.dy + }; + } + function d3_layout_treemapPad(node, padding) { + var x = node.x + padding[3], y = node.y + padding[0], dx = node.dx - padding[1] - padding[3], dy = node.dy - padding[0] - padding[2]; + if (dx < 0) { + x += dx / 2; + dx = 0; + } + if (dy < 0) { + y += dy / 2; + dy = 0; + } + return { + x: x, + y: y, + dx: dx, + dy: dy + }; + } + d3.random = { + normal: function(µ, σ) { + var n = arguments.length; + if (n < 2) σ = 1; + if (n < 1) µ = 0; + return function() { + var x, y, r; + do { + x = Math.random() * 2 - 1; + y = Math.random() * 2 - 1; + r = x * x + y * y; + } while (!r || r > 1); + return µ + σ * x * Math.sqrt(-2 * Math.log(r) / r); + }; + }, + logNormal: function() { + var random = d3.random.normal.apply(d3, arguments); + return function() { + return Math.exp(random()); + }; + }, + bates: function(m) { + var random = d3.random.irwinHall(m); + return function() { + return random() / m; + }; + }, + irwinHall: function(m) { + return function() { + for (var s = 0, j = 0; j < m; j++) s += Math.random(); + return s; + }; + } + }; + d3.scale = {}; + function d3_scaleExtent(domain) { + var start = domain[0], stop = domain[domain.length - 1]; + return start < stop ? [ start, stop ] : [ stop, start ]; + } + function d3_scaleRange(scale) { + return scale.rangeExtent ? scale.rangeExtent() : d3_scaleExtent(scale.range()); + } + function d3_scale_bilinear(domain, range, uninterpolate, interpolate) { + var u = uninterpolate(domain[0], domain[1]), i = interpolate(range[0], range[1]); + return function(x) { + return i(u(x)); + }; + } + function d3_scale_nice(domain, nice) { + var i0 = 0, i1 = domain.length - 1, x0 = domain[i0], x1 = domain[i1], dx; + if (x1 < x0) { + dx = i0, i0 = i1, i1 = dx; + dx = x0, x0 = x1, x1 = dx; + } + domain[i0] = nice.floor(x0); + domain[i1] = nice.ceil(x1); + return domain; + } + function d3_scale_niceStep(step) { + return step ? { + floor: function(x) { + return Math.floor(x / step) * step; + }, + ceil: function(x) { + return Math.ceil(x / step) * step; + } + } : d3_scale_niceIdentity; + } + var d3_scale_niceIdentity = { + floor: d3_identity, + ceil: d3_identity + }; + function d3_scale_polylinear(domain, range, uninterpolate, interpolate) { + var u = [], i = [], j = 0, k = Math.min(domain.length, range.length) - 1; + if (domain[k] < domain[0]) { + domain = domain.slice().reverse(); + range = range.slice().reverse(); + } + while (++j <= k) { + u.push(uninterpolate(domain[j - 1], domain[j])); + i.push(interpolate(range[j - 1], range[j])); + } + return function(x) { + var j = d3.bisect(domain, x, 1, k) - 1; + return i[j](u[j](x)); + }; + } + d3.scale.linear = function() { + return d3_scale_linear([ 0, 1 ], [ 0, 1 ], d3_interpolate, false); + }; + function d3_scale_linear(domain, range, interpolate, clamp) { + var output, input; + function rescale() { + var linear = Math.min(domain.length, range.length) > 2 ? d3_scale_polylinear : d3_scale_bilinear, uninterpolate = clamp ? d3_uninterpolateClamp : d3_uninterpolateNumber; + output = linear(domain, range, uninterpolate, interpolate); + input = linear(range, domain, uninterpolate, d3_interpolate); + return scale; + } + function scale(x) { + return output(x); + } + scale.invert = function(y) { + return input(y); + }; + scale.domain = function(x) { + if (!arguments.length) return domain; + domain = x.map(Number); + return rescale(); + }; + scale.range = function(x) { + if (!arguments.length) return range; + range = x; + return rescale(); + }; + scale.rangeRound = function(x) { + return scale.range(x).interpolate(d3_interpolateRound); + }; + scale.clamp = function(x) { + if (!arguments.length) return clamp; + clamp = x; + return rescale(); + }; + scale.interpolate = function(x) { + if (!arguments.length) return interpolate; + interpolate = x; + return rescale(); + }; + scale.ticks = function(m) { + return d3_scale_linearTicks(domain, m); + }; + scale.tickFormat = function(m, format) { + return d3_scale_linearTickFormat(domain, m, format); + }; + scale.nice = function(m) { + d3_scale_linearNice(domain, m); + return rescale(); + }; + scale.copy = function() { + return d3_scale_linear(domain, range, interpolate, clamp); + }; + return rescale(); + } + function d3_scale_linearRebind(scale, linear) { + return d3.rebind(scale, linear, "range", "rangeRound", "interpolate", "clamp"); + } + function d3_scale_linearNice(domain, m) { + return d3_scale_nice(domain, d3_scale_niceStep(d3_scale_linearTickRange(domain, m)[2])); + } + function d3_scale_linearTickRange(domain, m) { + if (m == null) m = 10; + var extent = d3_scaleExtent(domain), span = extent[1] - extent[0], step = Math.pow(10, Math.floor(Math.log(span / m) / Math.LN10)), err = m / span * step; + if (err <= .15) step *= 10; else if (err <= .35) step *= 5; else if (err <= .75) step *= 2; + extent[0] = Math.ceil(extent[0] / step) * step; + extent[1] = Math.floor(extent[1] / step) * step + step * .5; + extent[2] = step; + return extent; + } + function d3_scale_linearTicks(domain, m) { + return d3.range.apply(d3, d3_scale_linearTickRange(domain, m)); + } + function d3_scale_linearTickFormat(domain, m, format) { + var range = d3_scale_linearTickRange(domain, m); + if (format) { + var match = d3_format_re.exec(format); + match.shift(); + if (match[8] === "s") { + var prefix = d3.formatPrefix(Math.max(abs(range[0]), abs(range[1]))); + if (!match[7]) match[7] = "." + d3_scale_linearPrecision(prefix.scale(range[2])); + match[8] = "f"; + format = d3.format(match.join("")); + return function(d) { + return format(prefix.scale(d)) + prefix.symbol; + }; + } + if (!match[7]) match[7] = "." + d3_scale_linearFormatPrecision(match[8], range); + format = match.join(""); + } else { + format = ",." + d3_scale_linearPrecision(range[2]) + "f"; + } + return d3.format(format); + } + var d3_scale_linearFormatSignificant = { + s: 1, + g: 1, + p: 1, + r: 1, + e: 1 + }; + function d3_scale_linearPrecision(value) { + return -Math.floor(Math.log(value) / Math.LN10 + .01); + } + function d3_scale_linearFormatPrecision(type, range) { + var p = d3_scale_linearPrecision(range[2]); + return type in d3_scale_linearFormatSignificant ? Math.abs(p - d3_scale_linearPrecision(Math.max(abs(range[0]), abs(range[1])))) + +(type !== "e") : p - (type === "%") * 2; + } + d3.scale.log = function() { + return d3_scale_log(d3.scale.linear().domain([ 0, 1 ]), 10, true, [ 1, 10 ]); + }; + function d3_scale_log(linear, base, positive, domain) { + function log(x) { + return (positive ? Math.log(x < 0 ? 0 : x) : -Math.log(x > 0 ? 0 : -x)) / Math.log(base); + } + function pow(x) { + return positive ? Math.pow(base, x) : -Math.pow(base, -x); + } + function scale(x) { + return linear(log(x)); + } + scale.invert = function(x) { + return pow(linear.invert(x)); + }; + scale.domain = function(x) { + if (!arguments.length) return domain; + positive = x[0] >= 0; + linear.domain((domain = x.map(Number)).map(log)); + return scale; + }; + scale.base = function(_) { + if (!arguments.length) return base; + base = +_; + linear.domain(domain.map(log)); + return scale; + }; + scale.nice = function() { + var niced = d3_scale_nice(domain.map(log), positive ? Math : d3_scale_logNiceNegative); + linear.domain(niced); + domain = niced.map(pow); + return scale; + }; + scale.ticks = function() { + var extent = d3_scaleExtent(domain), ticks = [], u = extent[0], v = extent[1], i = Math.floor(log(u)), j = Math.ceil(log(v)), n = base % 1 ? 2 : base; + if (isFinite(j - i)) { + if (positive) { + for (;i < j; i++) for (var k = 1; k < n; k++) ticks.push(pow(i) * k); + ticks.push(pow(i)); + } else { + ticks.push(pow(i)); + for (;i++ < j; ) for (var k = n - 1; k > 0; k--) ticks.push(pow(i) * k); + } + for (i = 0; ticks[i] < u; i++) {} + for (j = ticks.length; ticks[j - 1] > v; j--) {} + ticks = ticks.slice(i, j); + } + return ticks; + }; + scale.tickFormat = function(n, format) { + if (!arguments.length) return d3_scale_logFormat; + if (arguments.length < 2) format = d3_scale_logFormat; else if (typeof format !== "function") format = d3.format(format); + var k = Math.max(.1, n / scale.ticks().length), f = positive ? (e = 1e-12, Math.ceil) : (e = -1e-12, + Math.floor), e; + return function(d) { + return d / pow(f(log(d) + e)) <= k ? format(d) : ""; + }; + }; + scale.copy = function() { + return d3_scale_log(linear.copy(), base, positive, domain); + }; + return d3_scale_linearRebind(scale, linear); + } + var d3_scale_logFormat = d3.format(".0e"), d3_scale_logNiceNegative = { + floor: function(x) { + return -Math.ceil(-x); + }, + ceil: function(x) { + return -Math.floor(-x); + } + }; + d3.scale.pow = function() { + return d3_scale_pow(d3.scale.linear(), 1, [ 0, 1 ]); + }; + function d3_scale_pow(linear, exponent, domain) { + var powp = d3_scale_powPow(exponent), powb = d3_scale_powPow(1 / exponent); + function scale(x) { + return linear(powp(x)); + } + scale.invert = function(x) { + return powb(linear.invert(x)); + }; + scale.domain = function(x) { + if (!arguments.length) return domain; + linear.domain((domain = x.map(Number)).map(powp)); + return scale; + }; + scale.ticks = function(m) { + return d3_scale_linearTicks(domain, m); + }; + scale.tickFormat = function(m, format) { + return d3_scale_linearTickFormat(domain, m, format); + }; + scale.nice = function(m) { + return scale.domain(d3_scale_linearNice(domain, m)); + }; + scale.exponent = function(x) { + if (!arguments.length) return exponent; + powp = d3_scale_powPow(exponent = x); + powb = d3_scale_powPow(1 / exponent); + linear.domain(domain.map(powp)); + return scale; + }; + scale.copy = function() { + return d3_scale_pow(linear.copy(), exponent, domain); + }; + return d3_scale_linearRebind(scale, linear); + } + function d3_scale_powPow(e) { + return function(x) { + return x < 0 ? -Math.pow(-x, e) : Math.pow(x, e); + }; + } + d3.scale.sqrt = function() { + return d3.scale.pow().exponent(.5); + }; + d3.scale.ordinal = function() { + return d3_scale_ordinal([], { + t: "range", + a: [ [] ] + }); + }; + function d3_scale_ordinal(domain, ranger) { + var index, range, rangeBand; + function scale(x) { + return range[((index.get(x) || (ranger.t === "range" ? index.set(x, domain.push(x)) : NaN)) - 1) % range.length]; + } + function steps(start, step) { + return d3.range(domain.length).map(function(i) { + return start + step * i; + }); + } + scale.domain = function(x) { + if (!arguments.length) return domain; + domain = []; + index = new d3_Map(); + var i = -1, n = x.length, xi; + while (++i < n) if (!index.has(xi = x[i])) index.set(xi, domain.push(xi)); + return scale[ranger.t].apply(scale, ranger.a); + }; + scale.range = function(x) { + if (!arguments.length) return range; + range = x; + rangeBand = 0; + ranger = { + t: "range", + a: arguments + }; + return scale; + }; + scale.rangePoints = function(x, padding) { + if (arguments.length < 2) padding = 0; + var start = x[0], stop = x[1], step = (stop - start) / (Math.max(1, domain.length - 1) + padding); + range = steps(domain.length < 2 ? (start + stop) / 2 : start + step * padding / 2, step); + rangeBand = 0; + ranger = { + t: "rangePoints", + a: arguments + }; + return scale; + }; + scale.rangeBands = function(x, padding, outerPadding) { + if (arguments.length < 2) padding = 0; + if (arguments.length < 3) outerPadding = padding; + var reverse = x[1] < x[0], start = x[reverse - 0], stop = x[1 - reverse], step = (stop - start) / (domain.length - padding + 2 * outerPadding); + range = steps(start + step * outerPadding, step); + if (reverse) range.reverse(); + rangeBand = step * (1 - padding); + ranger = { + t: "rangeBands", + a: arguments + }; + return scale; + }; + scale.rangeRoundBands = function(x, padding, outerPadding) { + if (arguments.length < 2) padding = 0; + if (arguments.length < 3) outerPadding = padding; + var reverse = x[1] < x[0], start = x[reverse - 0], stop = x[1 - reverse], step = Math.floor((stop - start) / (domain.length - padding + 2 * outerPadding)), error = stop - start - (domain.length - padding) * step; + range = steps(start + Math.round(error / 2), step); + if (reverse) range.reverse(); + rangeBand = Math.round(step * (1 - padding)); + ranger = { + t: "rangeRoundBands", + a: arguments + }; + return scale; + }; + scale.rangeBand = function() { + return rangeBand; + }; + scale.rangeExtent = function() { + return d3_scaleExtent(ranger.a[0]); + }; + scale.copy = function() { + return d3_scale_ordinal(domain, ranger); + }; + return scale.domain(domain); + } + d3.scale.category10 = function() { + return d3.scale.ordinal().range(d3_category10); + }; + d3.scale.category20 = function() { + return d3.scale.ordinal().range(d3_category20); + }; + d3.scale.category20b = function() { + return d3.scale.ordinal().range(d3_category20b); + }; + d3.scale.category20c = function() { + return d3.scale.ordinal().range(d3_category20c); + }; + var d3_category10 = [ 2062260, 16744206, 2924588, 14034728, 9725885, 9197131, 14907330, 8355711, 12369186, 1556175 ].map(d3_rgbString); + var d3_category20 = [ 2062260, 11454440, 16744206, 16759672, 2924588, 10018698, 14034728, 16750742, 9725885, 12955861, 9197131, 12885140, 14907330, 16234194, 8355711, 13092807, 12369186, 14408589, 1556175, 10410725 ].map(d3_rgbString); + var d3_category20b = [ 3750777, 5395619, 7040719, 10264286, 6519097, 9216594, 11915115, 13556636, 9202993, 12426809, 15186514, 15190932, 8666169, 11356490, 14049643, 15177372, 8077683, 10834324, 13528509, 14589654 ].map(d3_rgbString); + var d3_category20c = [ 3244733, 7057110, 10406625, 13032431, 15095053, 16616764, 16625259, 16634018, 3253076, 7652470, 10607003, 13101504, 7695281, 10394312, 12369372, 14342891, 6513507, 9868950, 12434877, 14277081 ].map(d3_rgbString); + d3.scale.quantile = function() { + return d3_scale_quantile([], []); + }; + function d3_scale_quantile(domain, range) { + var thresholds; + function rescale() { + var k = 0, q = range.length; + thresholds = []; + while (++k < q) thresholds[k - 1] = d3.quantile(domain, k / q); + return scale; + } + function scale(x) { + if (!isNaN(x = +x)) return range[d3.bisect(thresholds, x)]; + } + scale.domain = function(x) { + if (!arguments.length) return domain; + domain = x.filter(d3_number).sort(d3_ascending); + return rescale(); + }; + scale.range = function(x) { + if (!arguments.length) return range; + range = x; + return rescale(); + }; + scale.quantiles = function() { + return thresholds; + }; + scale.invertExtent = function(y) { + y = range.indexOf(y); + return y < 0 ? [ NaN, NaN ] : [ y > 0 ? thresholds[y - 1] : domain[0], y < thresholds.length ? thresholds[y] : domain[domain.length - 1] ]; + }; + scale.copy = function() { + return d3_scale_quantile(domain, range); + }; + return rescale(); + } + d3.scale.quantize = function() { + return d3_scale_quantize(0, 1, [ 0, 1 ]); + }; + function d3_scale_quantize(x0, x1, range) { + var kx, i; + function scale(x) { + return range[Math.max(0, Math.min(i, Math.floor(kx * (x - x0))))]; + } + function rescale() { + kx = range.length / (x1 - x0); + i = range.length - 1; + return scale; + } + scale.domain = function(x) { + if (!arguments.length) return [ x0, x1 ]; + x0 = +x[0]; + x1 = +x[x.length - 1]; + return rescale(); + }; + scale.range = function(x) { + if (!arguments.length) return range; + range = x; + return rescale(); + }; + scale.invertExtent = function(y) { + y = range.indexOf(y); + y = y < 0 ? NaN : y / kx + x0; + return [ y, y + 1 / kx ]; + }; + scale.copy = function() { + return d3_scale_quantize(x0, x1, range); + }; + return rescale(); + } + d3.scale.threshold = function() { + return d3_scale_threshold([ .5 ], [ 0, 1 ]); + }; + function d3_scale_threshold(domain, range) { + function scale(x) { + if (x <= x) return range[d3.bisect(domain, x)]; + } + scale.domain = function(_) { + if (!arguments.length) return domain; + domain = _; + return scale; + }; + scale.range = function(_) { + if (!arguments.length) return range; + range = _; + return scale; + }; + scale.invertExtent = function(y) { + y = range.indexOf(y); + return [ domain[y - 1], domain[y] ]; + }; + scale.copy = function() { + return d3_scale_threshold(domain, range); + }; + return scale; + } + d3.scale.identity = function() { + return d3_scale_identity([ 0, 1 ]); + }; + function d3_scale_identity(domain) { + function identity(x) { + return +x; + } + identity.invert = identity; + identity.domain = identity.range = function(x) { + if (!arguments.length) return domain; + domain = x.map(identity); + return identity; + }; + identity.ticks = function(m) { + return d3_scale_linearTicks(domain, m); + }; + identity.tickFormat = function(m, format) { + return d3_scale_linearTickFormat(domain, m, format); + }; + identity.copy = function() { + return d3_scale_identity(domain); + }; + return identity; + } + d3.svg = {}; + d3.svg.arc = function() { + var innerRadius = d3_svg_arcInnerRadius, outerRadius = d3_svg_arcOuterRadius, startAngle = d3_svg_arcStartAngle, endAngle = d3_svg_arcEndAngle; + function arc() { + var r0 = innerRadius.apply(this, arguments), r1 = outerRadius.apply(this, arguments), a0 = startAngle.apply(this, arguments) + d3_svg_arcOffset, a1 = endAngle.apply(this, arguments) + d3_svg_arcOffset, da = (a1 < a0 && (da = a0, + a0 = a1, a1 = da), a1 - a0), df = da < π ? "0" : "1", c0 = Math.cos(a0), s0 = Math.sin(a0), c1 = Math.cos(a1), s1 = Math.sin(a1); + return da >= d3_svg_arcMax ? r0 ? "M0," + r1 + "A" + r1 + "," + r1 + " 0 1,1 0," + -r1 + "A" + r1 + "," + r1 + " 0 1,1 0," + r1 + "M0," + r0 + "A" + r0 + "," + r0 + " 0 1,0 0," + -r0 + "A" + r0 + "," + r0 + " 0 1,0 0," + r0 + "Z" : "M0," + r1 + "A" + r1 + "," + r1 + " 0 1,1 0," + -r1 + "A" + r1 + "," + r1 + " 0 1,1 0," + r1 + "Z" : r0 ? "M" + r1 * c0 + "," + r1 * s0 + "A" + r1 + "," + r1 + " 0 " + df + ",1 " + r1 * c1 + "," + r1 * s1 + "L" + r0 * c1 + "," + r0 * s1 + "A" + r0 + "," + r0 + " 0 " + df + ",0 " + r0 * c0 + "," + r0 * s0 + "Z" : "M" + r1 * c0 + "," + r1 * s0 + "A" + r1 + "," + r1 + " 0 " + df + ",1 " + r1 * c1 + "," + r1 * s1 + "L0,0" + "Z"; + } + arc.innerRadius = function(v) { + if (!arguments.length) return innerRadius; + innerRadius = d3_functor(v); + return arc; + }; + arc.outerRadius = function(v) { + if (!arguments.length) return outerRadius; + outerRadius = d3_functor(v); + return arc; + }; + arc.startAngle = function(v) { + if (!arguments.length) return startAngle; + startAngle = d3_functor(v); + return arc; + }; + arc.endAngle = function(v) { + if (!arguments.length) return endAngle; + endAngle = d3_functor(v); + return arc; + }; + arc.centroid = function() { + var r = (innerRadius.apply(this, arguments) + outerRadius.apply(this, arguments)) / 2, a = (startAngle.apply(this, arguments) + endAngle.apply(this, arguments)) / 2 + d3_svg_arcOffset; + return [ Math.cos(a) * r, Math.sin(a) * r ]; + }; + return arc; + }; + var d3_svg_arcOffset = -halfπ, d3_svg_arcMax = τ - ε; + function d3_svg_arcInnerRadius(d) { + return d.innerRadius; + } + function d3_svg_arcOuterRadius(d) { + return d.outerRadius; + } + function d3_svg_arcStartAngle(d) { + return d.startAngle; + } + function d3_svg_arcEndAngle(d) { + return d.endAngle; + } + function d3_svg_line(projection) { + var x = d3_geom_pointX, y = d3_geom_pointY, defined = d3_true, interpolate = d3_svg_lineLinear, interpolateKey = interpolate.key, tension = .7; + function line(data) { + var segments = [], points = [], i = -1, n = data.length, d, fx = d3_functor(x), fy = d3_functor(y); + function segment() { + segments.push("M", interpolate(projection(points), tension)); + } + while (++i < n) { + if (defined.call(this, d = data[i], i)) { + points.push([ +fx.call(this, d, i), +fy.call(this, d, i) ]); + } else if (points.length) { + segment(); + points = []; + } + } + if (points.length) segment(); + return segments.length ? segments.join("") : null; + } + line.x = function(_) { + if (!arguments.length) return x; + x = _; + return line; + }; + line.y = function(_) { + if (!arguments.length) return y; + y = _; + return line; + }; + line.defined = function(_) { + if (!arguments.length) return defined; + defined = _; + return line; + }; + line.interpolate = function(_) { + if (!arguments.length) return interpolateKey; + if (typeof _ === "function") interpolateKey = interpolate = _; else interpolateKey = (interpolate = d3_svg_lineInterpolators.get(_) || d3_svg_lineLinear).key; + return line; + }; + line.tension = function(_) { + if (!arguments.length) return tension; + tension = _; + return line; + }; + return line; + } + d3.svg.line = function() { + return d3_svg_line(d3_identity); + }; + var d3_svg_lineInterpolators = d3.map({ + linear: d3_svg_lineLinear, + "linear-closed": d3_svg_lineLinearClosed, + step: d3_svg_lineStep, + "step-before": d3_svg_lineStepBefore, + "step-after": d3_svg_lineStepAfter, + basis: d3_svg_lineBasis, + "basis-open": d3_svg_lineBasisOpen, + "basis-closed": d3_svg_lineBasisClosed, + bundle: d3_svg_lineBundle, + cardinal: d3_svg_lineCardinal, + "cardinal-open": d3_svg_lineCardinalOpen, + "cardinal-closed": d3_svg_lineCardinalClosed, + monotone: d3_svg_lineMonotone + }); + d3_svg_lineInterpolators.forEach(function(key, value) { + value.key = key; + value.closed = /-closed$/.test(key); + }); + function d3_svg_lineLinear(points) { + return points.join("L"); + } + function d3_svg_lineLinearClosed(points) { + return d3_svg_lineLinear(points) + "Z"; + } + function d3_svg_lineStep(points) { + var i = 0, n = points.length, p = points[0], path = [ p[0], ",", p[1] ]; + while (++i < n) path.push("H", (p[0] + (p = points[i])[0]) / 2, "V", p[1]); + if (n > 1) path.push("H", p[0]); + return path.join(""); + } + function d3_svg_lineStepBefore(points) { + var i = 0, n = points.length, p = points[0], path = [ p[0], ",", p[1] ]; + while (++i < n) path.push("V", (p = points[i])[1], "H", p[0]); + return path.join(""); + } + function d3_svg_lineStepAfter(points) { + var i = 0, n = points.length, p = points[0], path = [ p[0], ",", p[1] ]; + while (++i < n) path.push("H", (p = points[i])[0], "V", p[1]); + return path.join(""); + } + function d3_svg_lineCardinalOpen(points, tension) { + return points.length < 4 ? d3_svg_lineLinear(points) : points[1] + d3_svg_lineHermite(points.slice(1, points.length - 1), d3_svg_lineCardinalTangents(points, tension)); + } + function d3_svg_lineCardinalClosed(points, tension) { + return points.length < 3 ? d3_svg_lineLinear(points) : points[0] + d3_svg_lineHermite((points.push(points[0]), + points), d3_svg_lineCardinalTangents([ points[points.length - 2] ].concat(points, [ points[1] ]), tension)); + } + function d3_svg_lineCardinal(points, tension) { + return points.length < 3 ? d3_svg_lineLinear(points) : points[0] + d3_svg_lineHermite(points, d3_svg_lineCardinalTangents(points, tension)); + } + function d3_svg_lineHermite(points, tangents) { + if (tangents.length < 1 || points.length != tangents.length && points.length != tangents.length + 2) { + return d3_svg_lineLinear(points); + } + var quad = points.length != tangents.length, path = "", p0 = points[0], p = points[1], t0 = tangents[0], t = t0, pi = 1; + if (quad) { + path += "Q" + (p[0] - t0[0] * 2 / 3) + "," + (p[1] - t0[1] * 2 / 3) + "," + p[0] + "," + p[1]; + p0 = points[1]; + pi = 2; + } + if (tangents.length > 1) { + t = tangents[1]; + p = points[pi]; + pi++; + path += "C" + (p0[0] + t0[0]) + "," + (p0[1] + t0[1]) + "," + (p[0] - t[0]) + "," + (p[1] - t[1]) + "," + p[0] + "," + p[1]; + for (var i = 2; i < tangents.length; i++, pi++) { + p = points[pi]; + t = tangents[i]; + path += "S" + (p[0] - t[0]) + "," + (p[1] - t[1]) + "," + p[0] + "," + p[1]; + } + } + if (quad) { + var lp = points[pi]; + path += "Q" + (p[0] + t[0] * 2 / 3) + "," + (p[1] + t[1] * 2 / 3) + "," + lp[0] + "," + lp[1]; + } + return path; + } + function d3_svg_lineCardinalTangents(points, tension) { + var tangents = [], a = (1 - tension) / 2, p0, p1 = points[0], p2 = points[1], i = 1, n = points.length; + while (++i < n) { + p0 = p1; + p1 = p2; + p2 = points[i]; + tangents.push([ a * (p2[0] - p0[0]), a * (p2[1] - p0[1]) ]); + } + return tangents; + } + function d3_svg_lineBasis(points) { + if (points.length < 3) return d3_svg_lineLinear(points); + var i = 1, n = points.length, pi = points[0], x0 = pi[0], y0 = pi[1], px = [ x0, x0, x0, (pi = points[1])[0] ], py = [ y0, y0, y0, pi[1] ], path = [ x0, ",", y0, "L", d3_svg_lineDot4(d3_svg_lineBasisBezier3, px), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier3, py) ]; + points.push(points[n - 1]); + while (++i <= n) { + pi = points[i]; + px.shift(); + px.push(pi[0]); + py.shift(); + py.push(pi[1]); + d3_svg_lineBasisBezier(path, px, py); + } + points.pop(); + path.push("L", pi); + return path.join(""); + } + function d3_svg_lineBasisOpen(points) { + if (points.length < 4) return d3_svg_lineLinear(points); + var path = [], i = -1, n = points.length, pi, px = [ 0 ], py = [ 0 ]; + while (++i < 3) { + pi = points[i]; + px.push(pi[0]); + py.push(pi[1]); + } + path.push(d3_svg_lineDot4(d3_svg_lineBasisBezier3, px) + "," + d3_svg_lineDot4(d3_svg_lineBasisBezier3, py)); + --i; + while (++i < n) { + pi = points[i]; + px.shift(); + px.push(pi[0]); + py.shift(); + py.push(pi[1]); + d3_svg_lineBasisBezier(path, px, py); + } + return path.join(""); + } + function d3_svg_lineBasisClosed(points) { + var path, i = -1, n = points.length, m = n + 4, pi, px = [], py = []; + while (++i < 4) { + pi = points[i % n]; + px.push(pi[0]); + py.push(pi[1]); + } + path = [ d3_svg_lineDot4(d3_svg_lineBasisBezier3, px), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier3, py) ]; + --i; + while (++i < m) { + pi = points[i % n]; + px.shift(); + px.push(pi[0]); + py.shift(); + py.push(pi[1]); + d3_svg_lineBasisBezier(path, px, py); + } + return path.join(""); + } + function d3_svg_lineBundle(points, tension) { + var n = points.length - 1; + if (n) { + var x0 = points[0][0], y0 = points[0][1], dx = points[n][0] - x0, dy = points[n][1] - y0, i = -1, p, t; + while (++i <= n) { + p = points[i]; + t = i / n; + p[0] = tension * p[0] + (1 - tension) * (x0 + t * dx); + p[1] = tension * p[1] + (1 - tension) * (y0 + t * dy); + } + } + return d3_svg_lineBasis(points); + } + function d3_svg_lineDot4(a, b) { + return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3]; + } + var d3_svg_lineBasisBezier1 = [ 0, 2 / 3, 1 / 3, 0 ], d3_svg_lineBasisBezier2 = [ 0, 1 / 3, 2 / 3, 0 ], d3_svg_lineBasisBezier3 = [ 0, 1 / 6, 2 / 3, 1 / 6 ]; + function d3_svg_lineBasisBezier(path, x, y) { + path.push("C", d3_svg_lineDot4(d3_svg_lineBasisBezier1, x), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier1, y), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier2, x), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier2, y), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier3, x), ",", d3_svg_lineDot4(d3_svg_lineBasisBezier3, y)); + } + function d3_svg_lineSlope(p0, p1) { + return (p1[1] - p0[1]) / (p1[0] - p0[0]); + } + function d3_svg_lineFiniteDifferences(points) { + var i = 0, j = points.length - 1, m = [], p0 = points[0], p1 = points[1], d = m[0] = d3_svg_lineSlope(p0, p1); + while (++i < j) { + m[i] = (d + (d = d3_svg_lineSlope(p0 = p1, p1 = points[i + 1]))) / 2; + } + m[i] = d; + return m; + } + function d3_svg_lineMonotoneTangents(points) { + var tangents = [], d, a, b, s, m = d3_svg_lineFiniteDifferences(points), i = -1, j = points.length - 1; + while (++i < j) { + d = d3_svg_lineSlope(points[i], points[i + 1]); + if (abs(d) < ε) { + m[i] = m[i + 1] = 0; + } else { + a = m[i] / d; + b = m[i + 1] / d; + s = a * a + b * b; + if (s > 9) { + s = d * 3 / Math.sqrt(s); + m[i] = s * a; + m[i + 1] = s * b; + } + } + } + i = -1; + while (++i <= j) { + s = (points[Math.min(j, i + 1)][0] - points[Math.max(0, i - 1)][0]) / (6 * (1 + m[i] * m[i])); + tangents.push([ s || 0, m[i] * s || 0 ]); + } + return tangents; + } + function d3_svg_lineMonotone(points) { + return points.length < 3 ? d3_svg_lineLinear(points) : points[0] + d3_svg_lineHermite(points, d3_svg_lineMonotoneTangents(points)); + } + d3.svg.line.radial = function() { + var line = d3_svg_line(d3_svg_lineRadial); + line.radius = line.x, delete line.x; + line.angle = line.y, delete line.y; + return line; + }; + function d3_svg_lineRadial(points) { + var point, i = -1, n = points.length, r, a; + while (++i < n) { + point = points[i]; + r = point[0]; + a = point[1] + d3_svg_arcOffset; + point[0] = r * Math.cos(a); + point[1] = r * Math.sin(a); + } + return points; + } + function d3_svg_area(projection) { + var x0 = d3_geom_pointX, x1 = d3_geom_pointX, y0 = 0, y1 = d3_geom_pointY, defined = d3_true, interpolate = d3_svg_lineLinear, interpolateKey = interpolate.key, interpolateReverse = interpolate, L = "L", tension = .7; + function area(data) { + var segments = [], points0 = [], points1 = [], i = -1, n = data.length, d, fx0 = d3_functor(x0), fy0 = d3_functor(y0), fx1 = x0 === x1 ? function() { + return x; + } : d3_functor(x1), fy1 = y0 === y1 ? function() { + return y; + } : d3_functor(y1), x, y; + function segment() { + segments.push("M", interpolate(projection(points1), tension), L, interpolateReverse(projection(points0.reverse()), tension), "Z"); + } + while (++i < n) { + if (defined.call(this, d = data[i], i)) { + points0.push([ x = +fx0.call(this, d, i), y = +fy0.call(this, d, i) ]); + points1.push([ +fx1.call(this, d, i), +fy1.call(this, d, i) ]); + } else if (points0.length) { + segment(); + points0 = []; + points1 = []; + } + } + if (points0.length) segment(); + return segments.length ? segments.join("") : null; + } + area.x = function(_) { + if (!arguments.length) return x1; + x0 = x1 = _; + return area; + }; + area.x0 = function(_) { + if (!arguments.length) return x0; + x0 = _; + return area; + }; + area.x1 = function(_) { + if (!arguments.length) return x1; + x1 = _; + return area; + }; + area.y = function(_) { + if (!arguments.length) return y1; + y0 = y1 = _; + return area; + }; + area.y0 = function(_) { + if (!arguments.length) return y0; + y0 = _; + return area; + }; + area.y1 = function(_) { + if (!arguments.length) return y1; + y1 = _; + return area; + }; + area.defined = function(_) { + if (!arguments.length) return defined; + defined = _; + return area; + }; + area.interpolate = function(_) { + if (!arguments.length) return interpolateKey; + if (typeof _ === "function") interpolateKey = interpolate = _; else interpolateKey = (interpolate = d3_svg_lineInterpolators.get(_) || d3_svg_lineLinear).key; + interpolateReverse = interpolate.reverse || interpolate; + L = interpolate.closed ? "M" : "L"; + return area; + }; + area.tension = function(_) { + if (!arguments.length) return tension; + tension = _; + return area; + }; + return area; + } + d3_svg_lineStepBefore.reverse = d3_svg_lineStepAfter; + d3_svg_lineStepAfter.reverse = d3_svg_lineStepBefore; + d3.svg.area = function() { + return d3_svg_area(d3_identity); + }; + d3.svg.area.radial = function() { + var area = d3_svg_area(d3_svg_lineRadial); + area.radius = area.x, delete area.x; + area.innerRadius = area.x0, delete area.x0; + area.outerRadius = area.x1, delete area.x1; + area.angle = area.y, delete area.y; + area.startAngle = area.y0, delete area.y0; + area.endAngle = area.y1, delete area.y1; + return area; + }; + d3.svg.chord = function() { + var source = d3_source, target = d3_target, radius = d3_svg_chordRadius, startAngle = d3_svg_arcStartAngle, endAngle = d3_svg_arcEndAngle; + function chord(d, i) { + var s = subgroup(this, source, d, i), t = subgroup(this, target, d, i); + return "M" + s.p0 + arc(s.r, s.p1, s.a1 - s.a0) + (equals(s, t) ? curve(s.r, s.p1, s.r, s.p0) : curve(s.r, s.p1, t.r, t.p0) + arc(t.r, t.p1, t.a1 - t.a0) + curve(t.r, t.p1, s.r, s.p0)) + "Z"; + } + function subgroup(self, f, d, i) { + var subgroup = f.call(self, d, i), r = radius.call(self, subgroup, i), a0 = startAngle.call(self, subgroup, i) + d3_svg_arcOffset, a1 = endAngle.call(self, subgroup, i) + d3_svg_arcOffset; + return { + r: r, + a0: a0, + a1: a1, + p0: [ r * Math.cos(a0), r * Math.sin(a0) ], + p1: [ r * Math.cos(a1), r * Math.sin(a1) ] + }; + } + function equals(a, b) { + return a.a0 == b.a0 && a.a1 == b.a1; + } + function arc(r, p, a) { + return "A" + r + "," + r + " 0 " + +(a > π) + ",1 " + p; + } + function curve(r0, p0, r1, p1) { + return "Q 0,0 " + p1; + } + chord.radius = function(v) { + if (!arguments.length) return radius; + radius = d3_functor(v); + return chord; + }; + chord.source = function(v) { + if (!arguments.length) return source; + source = d3_functor(v); + return chord; + }; + chord.target = function(v) { + if (!arguments.length) return target; + target = d3_functor(v); + return chord; + }; + chord.startAngle = function(v) { + if (!arguments.length) return startAngle; + startAngle = d3_functor(v); + return chord; + }; + chord.endAngle = function(v) { + if (!arguments.length) return endAngle; + endAngle = d3_functor(v); + return chord; + }; + return chord; + }; + function d3_svg_chordRadius(d) { + return d.radius; + } + d3.svg.diagonal = function() { + var source = d3_source, target = d3_target, projection = d3_svg_diagonalProjection; + function diagonal(d, i) { + var p0 = source.call(this, d, i), p3 = target.call(this, d, i), m = (p0.y + p3.y) / 2, p = [ p0, { + x: p0.x, + y: m + }, { + x: p3.x, + y: m + }, p3 ]; + p = p.map(projection); + return "M" + p[0] + "C" + p[1] + " " + p[2] + " " + p[3]; + } + diagonal.source = function(x) { + if (!arguments.length) return source; + source = d3_functor(x); + return diagonal; + }; + diagonal.target = function(x) { + if (!arguments.length) return target; + target = d3_functor(x); + return diagonal; + }; + diagonal.projection = function(x) { + if (!arguments.length) return projection; + projection = x; + return diagonal; + }; + return diagonal; + }; + function d3_svg_diagonalProjection(d) { + return [ d.x, d.y ]; + } + d3.svg.diagonal.radial = function() { + var diagonal = d3.svg.diagonal(), projection = d3_svg_diagonalProjection, projection_ = diagonal.projection; + diagonal.projection = function(x) { + return arguments.length ? projection_(d3_svg_diagonalRadialProjection(projection = x)) : projection; + }; + return diagonal; + }; + function d3_svg_diagonalRadialProjection(projection) { + return function() { + var d = projection.apply(this, arguments), r = d[0], a = d[1] + d3_svg_arcOffset; + return [ r * Math.cos(a), r * Math.sin(a) ]; + }; + } + d3.svg.symbol = function() { + var type = d3_svg_symbolType, size = d3_svg_symbolSize; + function symbol(d, i) { + return (d3_svg_symbols.get(type.call(this, d, i)) || d3_svg_symbolCircle)(size.call(this, d, i)); + } + symbol.type = function(x) { + if (!arguments.length) return type; + type = d3_functor(x); + return symbol; + }; + symbol.size = function(x) { + if (!arguments.length) return size; + size = d3_functor(x); + return symbol; + }; + return symbol; + }; + function d3_svg_symbolSize() { + return 64; + } + function d3_svg_symbolType() { + return "circle"; + } + function d3_svg_symbolCircle(size) { + var r = Math.sqrt(size / π); + return "M0," + r + "A" + r + "," + r + " 0 1,1 0," + -r + "A" + r + "," + r + " 0 1,1 0," + r + "Z"; + } + var d3_svg_symbols = d3.map({ + circle: d3_svg_symbolCircle, + cross: function(size) { + var r = Math.sqrt(size / 5) / 2; + return "M" + -3 * r + "," + -r + "H" + -r + "V" + -3 * r + "H" + r + "V" + -r + "H" + 3 * r + "V" + r + "H" + r + "V" + 3 * r + "H" + -r + "V" + r + "H" + -3 * r + "Z"; + }, + diamond: function(size) { + var ry = Math.sqrt(size / (2 * d3_svg_symbolTan30)), rx = ry * d3_svg_symbolTan30; + return "M0," + -ry + "L" + rx + ",0" + " 0," + ry + " " + -rx + ",0" + "Z"; + }, + square: function(size) { + var r = Math.sqrt(size) / 2; + return "M" + -r + "," + -r + "L" + r + "," + -r + " " + r + "," + r + " " + -r + "," + r + "Z"; + }, + "triangle-down": function(size) { + var rx = Math.sqrt(size / d3_svg_symbolSqrt3), ry = rx * d3_svg_symbolSqrt3 / 2; + return "M0," + ry + "L" + rx + "," + -ry + " " + -rx + "," + -ry + "Z"; + }, + "triangle-up": function(size) { + var rx = Math.sqrt(size / d3_svg_symbolSqrt3), ry = rx * d3_svg_symbolSqrt3 / 2; + return "M0," + -ry + "L" + rx + "," + ry + " " + -rx + "," + ry + "Z"; + } + }); + d3.svg.symbolTypes = d3_svg_symbols.keys(); + var d3_svg_symbolSqrt3 = Math.sqrt(3), d3_svg_symbolTan30 = Math.tan(30 * d3_radians); + function d3_transition(groups, id) { + d3_subclass(groups, d3_transitionPrototype); + groups.id = id; + return groups; + } + var d3_transitionPrototype = [], d3_transitionId = 0, d3_transitionInheritId, d3_transitionInherit; + d3_transitionPrototype.call = d3_selectionPrototype.call; + d3_transitionPrototype.empty = d3_selectionPrototype.empty; + d3_transitionPrototype.node = d3_selectionPrototype.node; + d3_transitionPrototype.size = d3_selectionPrototype.size; + d3.transition = function(selection) { + return arguments.length ? d3_transitionInheritId ? selection.transition() : selection : d3_selectionRoot.transition(); + }; + d3.transition.prototype = d3_transitionPrototype; + d3_transitionPrototype.select = function(selector) { + var id = this.id, subgroups = [], subgroup, subnode, node; + selector = d3_selection_selector(selector); + for (var j = -1, m = this.length; ++j < m; ) { + subgroups.push(subgroup = []); + for (var group = this[j], i = -1, n = group.length; ++i < n; ) { + if ((node = group[i]) && (subnode = selector.call(node, node.__data__, i, j))) { + if ("__data__" in node) subnode.__data__ = node.__data__; + d3_transitionNode(subnode, i, id, node.__transition__[id]); + subgroup.push(subnode); + } else { + subgroup.push(null); + } + } + } + return d3_transition(subgroups, id); + }; + d3_transitionPrototype.selectAll = function(selector) { + var id = this.id, subgroups = [], subgroup, subnodes, node, subnode, transition; + selector = d3_selection_selectorAll(selector); + for (var j = -1, m = this.length; ++j < m; ) { + for (var group = this[j], i = -1, n = group.length; ++i < n; ) { + if (node = group[i]) { + transition = node.__transition__[id]; + subnodes = selector.call(node, node.__data__, i, j); + subgroups.push(subgroup = []); + for (var k = -1, o = subnodes.length; ++k < o; ) { + if (subnode = subnodes[k]) d3_transitionNode(subnode, k, id, transition); + subgroup.push(subnode); + } + } + } + } + return d3_transition(subgroups, id); + }; + d3_transitionPrototype.filter = function(filter) { + var subgroups = [], subgroup, group, node; + if (typeof filter !== "function") filter = d3_selection_filter(filter); + for (var j = 0, m = this.length; j < m; j++) { + subgroups.push(subgroup = []); + for (var group = this[j], i = 0, n = group.length; i < n; i++) { + if ((node = group[i]) && filter.call(node, node.__data__, i, j)) { + subgroup.push(node); + } + } + } + return d3_transition(subgroups, this.id); + }; + d3_transitionPrototype.tween = function(name, tween) { + var id = this.id; + if (arguments.length < 2) return this.node().__transition__[id].tween.get(name); + return d3_selection_each(this, tween == null ? function(node) { + node.__transition__[id].tween.remove(name); + } : function(node) { + node.__transition__[id].tween.set(name, tween); + }); + }; + function d3_transition_tween(groups, name, value, tween) { + var id = groups.id; + return d3_selection_each(groups, typeof value === "function" ? function(node, i, j) { + node.__transition__[id].tween.set(name, tween(value.call(node, node.__data__, i, j))); + } : (value = tween(value), function(node) { + node.__transition__[id].tween.set(name, value); + })); + } + d3_transitionPrototype.attr = function(nameNS, value) { + if (arguments.length < 2) { + for (value in nameNS) this.attr(value, nameNS[value]); + return this; + } + var interpolate = nameNS == "transform" ? d3_interpolateTransform : d3_interpolate, name = d3.ns.qualify(nameNS); + function attrNull() { + this.removeAttribute(name); + } + function attrNullNS() { + this.removeAttributeNS(name.space, name.local); + } + function attrTween(b) { + return b == null ? attrNull : (b += "", function() { + var a = this.getAttribute(name), i; + return a !== b && (i = interpolate(a, b), function(t) { + this.setAttribute(name, i(t)); + }); + }); + } + function attrTweenNS(b) { + return b == null ? attrNullNS : (b += "", function() { + var a = this.getAttributeNS(name.space, name.local), i; + return a !== b && (i = interpolate(a, b), function(t) { + this.setAttributeNS(name.space, name.local, i(t)); + }); + }); + } + return d3_transition_tween(this, "attr." + nameNS, value, name.local ? attrTweenNS : attrTween); + }; + d3_transitionPrototype.attrTween = function(nameNS, tween) { + var name = d3.ns.qualify(nameNS); + function attrTween(d, i) { + var f = tween.call(this, d, i, this.getAttribute(name)); + return f && function(t) { + this.setAttribute(name, f(t)); + }; + } + function attrTweenNS(d, i) { + var f = tween.call(this, d, i, this.getAttributeNS(name.space, name.local)); + return f && function(t) { + this.setAttributeNS(name.space, name.local, f(t)); + }; + } + return this.tween("attr." + nameNS, name.local ? attrTweenNS : attrTween); + }; + d3_transitionPrototype.style = function(name, value, priority) { + var n = arguments.length; + if (n < 3) { + if (typeof name !== "string") { + if (n < 2) value = ""; + for (priority in name) this.style(priority, name[priority], value); + return this; + } + priority = ""; + } + function styleNull() { + this.style.removeProperty(name); + } + function styleString(b) { + return b == null ? styleNull : (b += "", function() { + var a = d3_window.getComputedStyle(this, null).getPropertyValue(name), i; + return a !== b && (i = d3_interpolate(a, b), function(t) { + this.style.setProperty(name, i(t), priority); + }); + }); + } + return d3_transition_tween(this, "style." + name, value, styleString); + }; + d3_transitionPrototype.styleTween = function(name, tween, priority) { + if (arguments.length < 3) priority = ""; + function styleTween(d, i) { + var f = tween.call(this, d, i, d3_window.getComputedStyle(this, null).getPropertyValue(name)); + return f && function(t) { + this.style.setProperty(name, f(t), priority); + }; + } + return this.tween("style." + name, styleTween); + }; + d3_transitionPrototype.text = function(value) { + return d3_transition_tween(this, "text", value, d3_transition_text); + }; + function d3_transition_text(b) { + if (b == null) b = ""; + return function() { + this.textContent = b; + }; + } + d3_transitionPrototype.remove = function() { + return this.each("end.transition", function() { + var p; + if (this.__transition__.count < 2 && (p = this.parentNode)) p.removeChild(this); + }); + }; + d3_transitionPrototype.ease = function(value) { + var id = this.id; + if (arguments.length < 1) return this.node().__transition__[id].ease; + if (typeof value !== "function") value = d3.ease.apply(d3, arguments); + return d3_selection_each(this, function(node) { + node.__transition__[id].ease = value; + }); + }; + d3_transitionPrototype.delay = function(value) { + var id = this.id; + if (arguments.length < 1) return this.node().__transition__[id].delay; + return d3_selection_each(this, typeof value === "function" ? function(node, i, j) { + node.__transition__[id].delay = +value.call(node, node.__data__, i, j); + } : (value = +value, function(node) { + node.__transition__[id].delay = value; + })); + }; + d3_transitionPrototype.duration = function(value) { + var id = this.id; + if (arguments.length < 1) return this.node().__transition__[id].duration; + return d3_selection_each(this, typeof value === "function" ? function(node, i, j) { + node.__transition__[id].duration = Math.max(1, value.call(node, node.__data__, i, j)); + } : (value = Math.max(1, value), function(node) { + node.__transition__[id].duration = value; + })); + }; + d3_transitionPrototype.each = function(type, listener) { + var id = this.id; + if (arguments.length < 2) { + var inherit = d3_transitionInherit, inheritId = d3_transitionInheritId; + d3_transitionInheritId = id; + d3_selection_each(this, function(node, i, j) { + d3_transitionInherit = node.__transition__[id]; + type.call(node, node.__data__, i, j); + }); + d3_transitionInherit = inherit; + d3_transitionInheritId = inheritId; + } else { + d3_selection_each(this, function(node) { + var transition = node.__transition__[id]; + (transition.event || (transition.event = d3.dispatch("start", "end"))).on(type, listener); + }); + } + return this; + }; + d3_transitionPrototype.transition = function() { + var id0 = this.id, id1 = ++d3_transitionId, subgroups = [], subgroup, group, node, transition; + for (var j = 0, m = this.length; j < m; j++) { + subgroups.push(subgroup = []); + for (var group = this[j], i = 0, n = group.length; i < n; i++) { + if (node = group[i]) { + transition = Object.create(node.__transition__[id0]); + transition.delay += transition.duration; + d3_transitionNode(node, i, id1, transition); + } + subgroup.push(node); + } + } + return d3_transition(subgroups, id1); + }; + function d3_transitionNode(node, i, id, inherit) { + var lock = node.__transition__ || (node.__transition__ = { + active: 0, + count: 0 + }), transition = lock[id]; + if (!transition) { + var time = inherit.time; + transition = lock[id] = { + tween: new d3_Map(), + time: time, + ease: inherit.ease, + delay: inherit.delay, + duration: inherit.duration + }; + ++lock.count; + d3.timer(function(elapsed) { + var d = node.__data__, ease = transition.ease, delay = transition.delay, duration = transition.duration, timer = d3_timer_active, tweened = []; + timer.t = delay + time; + if (delay <= elapsed) return start(elapsed - delay); + timer.c = start; + function start(elapsed) { + if (lock.active > id) return stop(); + lock.active = id; + transition.event && transition.event.start.call(node, d, i); + transition.tween.forEach(function(key, value) { + if (value = value.call(node, d, i)) { + tweened.push(value); + } + }); + d3.timer(function() { + timer.c = tick(elapsed || 1) ? d3_true : tick; + return 1; + }, 0, time); + } + function tick(elapsed) { + if (lock.active !== id) return stop(); + var t = elapsed / duration, e = ease(t), n = tweened.length; + while (n > 0) { + tweened[--n].call(node, e); + } + if (t >= 1) { + transition.event && transition.event.end.call(node, d, i); + return stop(); + } + } + function stop() { + if (--lock.count) delete lock[id]; else delete node.__transition__; + return 1; + } + }, 0, time); + } + } + d3.svg.axis = function() { + var scale = d3.scale.linear(), orient = d3_svg_axisDefaultOrient, innerTickSize = 6, outerTickSize = 6, tickPadding = 3, tickArguments_ = [ 10 ], tickValues = null, tickFormat_; + function axis(g) { + g.each(function() { + var g = d3.select(this); + var scale0 = this.__chart__ || scale, scale1 = this.__chart__ = scale.copy(); + var ticks = tickValues == null ? scale1.ticks ? scale1.ticks.apply(scale1, tickArguments_) : scale1.domain() : tickValues, tickFormat = tickFormat_ == null ? scale1.tickFormat ? scale1.tickFormat.apply(scale1, tickArguments_) : d3_identity : tickFormat_, tick = g.selectAll(".tick").data(ticks, scale1), tickEnter = tick.enter().insert("g", ".domain").attr("class", "tick").style("opacity", ε), tickExit = d3.transition(tick.exit()).style("opacity", ε).remove(), tickUpdate = d3.transition(tick.order()).style("opacity", 1), tickTransform; + var range = d3_scaleRange(scale1), path = g.selectAll(".domain").data([ 0 ]), pathUpdate = (path.enter().append("path").attr("class", "domain"), + d3.transition(path)); + tickEnter.append("line"); + tickEnter.append("text"); + var lineEnter = tickEnter.select("line"), lineUpdate = tickUpdate.select("line"), text = tick.select("text").text(tickFormat), textEnter = tickEnter.select("text"), textUpdate = tickUpdate.select("text"); + switch (orient) { + case "bottom": + { + tickTransform = d3_svg_axisX; + lineEnter.attr("y2", innerTickSize); + textEnter.attr("y", Math.max(innerTickSize, 0) + tickPadding); + lineUpdate.attr("x2", 0).attr("y2", innerTickSize); + textUpdate.attr("x", 0).attr("y", Math.max(innerTickSize, 0) + tickPadding); + text.attr("dy", ".71em").style("text-anchor", "middle"); + pathUpdate.attr("d", "M" + range[0] + "," + outerTickSize + "V0H" + range[1] + "V" + outerTickSize); + break; + } + + case "top": + { + tickTransform = d3_svg_axisX; + lineEnter.attr("y2", -innerTickSize); + textEnter.attr("y", -(Math.max(innerTickSize, 0) + tickPadding)); + lineUpdate.attr("x2", 0).attr("y2", -innerTickSize); + textUpdate.attr("x", 0).attr("y", -(Math.max(innerTickSize, 0) + tickPadding)); + text.attr("dy", "0em").style("text-anchor", "middle"); + pathUpdate.attr("d", "M" + range[0] + "," + -outerTickSize + "V0H" + range[1] + "V" + -outerTickSize); + break; + } + + case "left": + { + tickTransform = d3_svg_axisY; + lineEnter.attr("x2", -innerTickSize); + textEnter.attr("x", -(Math.max(innerTickSize, 0) + tickPadding)); + lineUpdate.attr("x2", -innerTickSize).attr("y2", 0); + textUpdate.attr("x", -(Math.max(innerTickSize, 0) + tickPadding)).attr("y", 0); + text.attr("dy", ".32em").style("text-anchor", "end"); + pathUpdate.attr("d", "M" + -outerTickSize + "," + range[0] + "H0V" + range[1] + "H" + -outerTickSize); + break; + } + + case "right": + { + tickTransform = d3_svg_axisY; + lineEnter.attr("x2", innerTickSize); + textEnter.attr("x", Math.max(innerTickSize, 0) + tickPadding); + lineUpdate.attr("x2", innerTickSize).attr("y2", 0); + textUpdate.attr("x", Math.max(innerTickSize, 0) + tickPadding).attr("y", 0); + text.attr("dy", ".32em").style("text-anchor", "start"); + pathUpdate.attr("d", "M" + outerTickSize + "," + range[0] + "H0V" + range[1] + "H" + outerTickSize); + break; + } + } + if (scale1.rangeBand) { + var x = scale1, dx = x.rangeBand() / 2; + scale0 = scale1 = function(d) { + return x(d) + dx; + }; + } else if (scale0.rangeBand) { + scale0 = scale1; + } else { + tickExit.call(tickTransform, scale1); + } + tickEnter.call(tickTransform, scale0); + tickUpdate.call(tickTransform, scale1); + }); + } + axis.scale = function(x) { + if (!arguments.length) return scale; + scale = x; + return axis; + }; + axis.orient = function(x) { + if (!arguments.length) return orient; + orient = x in d3_svg_axisOrients ? x + "" : d3_svg_axisDefaultOrient; + return axis; + }; + axis.ticks = function() { + if (!arguments.length) return tickArguments_; + tickArguments_ = arguments; + return axis; + }; + axis.tickValues = function(x) { + if (!arguments.length) return tickValues; + tickValues = x; + return axis; + }; + axis.tickFormat = function(x) { + if (!arguments.length) return tickFormat_; + tickFormat_ = x; + return axis; + }; + axis.tickSize = function(x) { + var n = arguments.length; + if (!n) return innerTickSize; + innerTickSize = +x; + outerTickSize = +arguments[n - 1]; + return axis; + }; + axis.innerTickSize = function(x) { + if (!arguments.length) return innerTickSize; + innerTickSize = +x; + return axis; + }; + axis.outerTickSize = function(x) { + if (!arguments.length) return outerTickSize; + outerTickSize = +x; + return axis; + }; + axis.tickPadding = function(x) { + if (!arguments.length) return tickPadding; + tickPadding = +x; + return axis; + }; + axis.tickSubdivide = function() { + return arguments.length && axis; + }; + return axis; + }; + var d3_svg_axisDefaultOrient = "bottom", d3_svg_axisOrients = { + top: 1, + right: 1, + bottom: 1, + left: 1 + }; + function d3_svg_axisX(selection, x) { + selection.attr("transform", function(d) { + return "translate(" + x(d) + ",0)"; + }); + } + function d3_svg_axisY(selection, y) { + selection.attr("transform", function(d) { + return "translate(0," + y(d) + ")"; + }); + } + d3.svg.brush = function() { + var event = d3_eventDispatch(brush, "brushstart", "brush", "brushend"), x = null, y = null, xExtent = [ 0, 0 ], yExtent = [ 0, 0 ], xExtentDomain, yExtentDomain, xClamp = true, yClamp = true, resizes = d3_svg_brushResizes[0]; + function brush(g) { + g.each(function() { + var g = d3.select(this).style("pointer-events", "all").style("-webkit-tap-highlight-color", "rgba(0,0,0,0)").on("mousedown.brush", brushstart).on("touchstart.brush", brushstart); + var background = g.selectAll(".background").data([ 0 ]); + background.enter().append("rect").attr("class", "background").style("visibility", "hidden").style("cursor", "crosshair"); + g.selectAll(".extent").data([ 0 ]).enter().append("rect").attr("class", "extent").style("cursor", "move"); + var resize = g.selectAll(".resize").data(resizes, d3_identity); + resize.exit().remove(); + resize.enter().append("g").attr("class", function(d) { + return "resize " + d; + }).style("cursor", function(d) { + return d3_svg_brushCursor[d]; + }).append("rect").attr("x", function(d) { + return /[ew]$/.test(d) ? -3 : null; + }).attr("y", function(d) { + return /^[ns]/.test(d) ? -3 : null; + }).attr("width", 6).attr("height", 6).style("visibility", "hidden"); + resize.style("display", brush.empty() ? "none" : null); + var gUpdate = d3.transition(g), backgroundUpdate = d3.transition(background), range; + if (x) { + range = d3_scaleRange(x); + backgroundUpdate.attr("x", range[0]).attr("width", range[1] - range[0]); + redrawX(gUpdate); + } + if (y) { + range = d3_scaleRange(y); + backgroundUpdate.attr("y", range[0]).attr("height", range[1] - range[0]); + redrawY(gUpdate); + } + redraw(gUpdate); + }); + } + brush.event = function(g) { + g.each(function() { + var event_ = event.of(this, arguments), extent1 = { + x: xExtent, + y: yExtent, + i: xExtentDomain, + j: yExtentDomain + }, extent0 = this.__chart__ || extent1; + this.__chart__ = extent1; + if (d3_transitionInheritId) { + d3.select(this).transition().each("start.brush", function() { + xExtentDomain = extent0.i; + yExtentDomain = extent0.j; + xExtent = extent0.x; + yExtent = extent0.y; + event_({ + type: "brushstart" + }); + }).tween("brush:brush", function() { + var xi = d3_interpolateArray(xExtent, extent1.x), yi = d3_interpolateArray(yExtent, extent1.y); + xExtentDomain = yExtentDomain = null; + return function(t) { + xExtent = extent1.x = xi(t); + yExtent = extent1.y = yi(t); + event_({ + type: "brush", + mode: "resize" + }); + }; + }).each("end.brush", function() { + xExtentDomain = extent1.i; + yExtentDomain = extent1.j; + event_({ + type: "brush", + mode: "resize" + }); + event_({ + type: "brushend" + }); + }); + } else { + event_({ + type: "brushstart" + }); + event_({ + type: "brush", + mode: "resize" + }); + event_({ + type: "brushend" + }); + } + }); + }; + function redraw(g) { + g.selectAll(".resize").attr("transform", function(d) { + return "translate(" + xExtent[+/e$/.test(d)] + "," + yExtent[+/^s/.test(d)] + ")"; + }); + } + function redrawX(g) { + g.select(".extent").attr("x", xExtent[0]); + g.selectAll(".extent,.n>rect,.s>rect").attr("width", xExtent[1] - xExtent[0]); + } + function redrawY(g) { + g.select(".extent").attr("y", yExtent[0]); + g.selectAll(".extent,.e>rect,.w>rect").attr("height", yExtent[1] - yExtent[0]); + } + function brushstart() { + var target = this, eventTarget = d3.select(d3.event.target), event_ = event.of(target, arguments), g = d3.select(target), resizing = eventTarget.datum(), resizingX = !/^(n|s)$/.test(resizing) && x, resizingY = !/^(e|w)$/.test(resizing) && y, dragging = eventTarget.classed("extent"), dragRestore = d3_event_dragSuppress(), center, origin = d3.mouse(target), offset; + var w = d3.select(d3_window).on("keydown.brush", keydown).on("keyup.brush", keyup); + if (d3.event.changedTouches) { + w.on("touchmove.brush", brushmove).on("touchend.brush", brushend); + } else { + w.on("mousemove.brush", brushmove).on("mouseup.brush", brushend); + } + g.interrupt().selectAll("*").interrupt(); + if (dragging) { + origin[0] = xExtent[0] - origin[0]; + origin[1] = yExtent[0] - origin[1]; + } else if (resizing) { + var ex = +/w$/.test(resizing), ey = +/^n/.test(resizing); + offset = [ xExtent[1 - ex] - origin[0], yExtent[1 - ey] - origin[1] ]; + origin[0] = xExtent[ex]; + origin[1] = yExtent[ey]; + } else if (d3.event.altKey) center = origin.slice(); + g.style("pointer-events", "none").selectAll(".resize").style("display", null); + d3.select("body").style("cursor", eventTarget.style("cursor")); + event_({ + type: "brushstart" + }); + brushmove(); + function keydown() { + if (d3.event.keyCode == 32) { + if (!dragging) { + center = null; + origin[0] -= xExtent[1]; + origin[1] -= yExtent[1]; + dragging = 2; + } + d3_eventPreventDefault(); + } + } + function keyup() { + if (d3.event.keyCode == 32 && dragging == 2) { + origin[0] += xExtent[1]; + origin[1] += yExtent[1]; + dragging = 0; + d3_eventPreventDefault(); + } + } + function brushmove() { + var point = d3.mouse(target), moved = false; + if (offset) { + point[0] += offset[0]; + point[1] += offset[1]; + } + if (!dragging) { + if (d3.event.altKey) { + if (!center) center = [ (xExtent[0] + xExtent[1]) / 2, (yExtent[0] + yExtent[1]) / 2 ]; + origin[0] = xExtent[+(point[0] < center[0])]; + origin[1] = yExtent[+(point[1] < center[1])]; + } else center = null; + } + if (resizingX && move1(point, x, 0)) { + redrawX(g); + moved = true; + } + if (resizingY && move1(point, y, 1)) { + redrawY(g); + moved = true; + } + if (moved) { + redraw(g); + event_({ + type: "brush", + mode: dragging ? "move" : "resize" + }); + } + } + function move1(point, scale, i) { + var range = d3_scaleRange(scale), r0 = range[0], r1 = range[1], position = origin[i], extent = i ? yExtent : xExtent, size = extent[1] - extent[0], min, max; + if (dragging) { + r0 -= position; + r1 -= size + position; + } + min = (i ? yClamp : xClamp) ? Math.max(r0, Math.min(r1, point[i])) : point[i]; + if (dragging) { + max = (min += position) + size; + } else { + if (center) position = Math.max(r0, Math.min(r1, 2 * center[i] - min)); + if (position < min) { + max = min; + min = position; + } else { + max = position; + } + } + if (extent[0] != min || extent[1] != max) { + if (i) yExtentDomain = null; else xExtentDomain = null; + extent[0] = min; + extent[1] = max; + return true; + } + } + function brushend() { + brushmove(); + g.style("pointer-events", "all").selectAll(".resize").style("display", brush.empty() ? "none" : null); + d3.select("body").style("cursor", null); + w.on("mousemove.brush", null).on("mouseup.brush", null).on("touchmove.brush", null).on("touchend.brush", null).on("keydown.brush", null).on("keyup.brush", null); + dragRestore(); + event_({ + type: "brushend" + }); + } + } + brush.x = function(z) { + if (!arguments.length) return x; + x = z; + resizes = d3_svg_brushResizes[!x << 1 | !y]; + return brush; + }; + brush.y = function(z) { + if (!arguments.length) return y; + y = z; + resizes = d3_svg_brushResizes[!x << 1 | !y]; + return brush; + }; + brush.clamp = function(z) { + if (!arguments.length) return x && y ? [ xClamp, yClamp ] : x ? xClamp : y ? yClamp : null; + if (x && y) xClamp = !!z[0], yClamp = !!z[1]; else if (x) xClamp = !!z; else if (y) yClamp = !!z; + return brush; + }; + brush.extent = function(z) { + var x0, x1, y0, y1, t; + if (!arguments.length) { + if (x) { + if (xExtentDomain) { + x0 = xExtentDomain[0], x1 = xExtentDomain[1]; + } else { + x0 = xExtent[0], x1 = xExtent[1]; + if (x.invert) x0 = x.invert(x0), x1 = x.invert(x1); + if (x1 < x0) t = x0, x0 = x1, x1 = t; + } + } + if (y) { + if (yExtentDomain) { + y0 = yExtentDomain[0], y1 = yExtentDomain[1]; + } else { + y0 = yExtent[0], y1 = yExtent[1]; + if (y.invert) y0 = y.invert(y0), y1 = y.invert(y1); + if (y1 < y0) t = y0, y0 = y1, y1 = t; + } + } + return x && y ? [ [ x0, y0 ], [ x1, y1 ] ] : x ? [ x0, x1 ] : y && [ y0, y1 ]; + } + if (x) { + x0 = z[0], x1 = z[1]; + if (y) x0 = x0[0], x1 = x1[0]; + xExtentDomain = [ x0, x1 ]; + if (x.invert) x0 = x(x0), x1 = x(x1); + if (x1 < x0) t = x0, x0 = x1, x1 = t; + if (x0 != xExtent[0] || x1 != xExtent[1]) xExtent = [ x0, x1 ]; + } + if (y) { + y0 = z[0], y1 = z[1]; + if (x) y0 = y0[1], y1 = y1[1]; + yExtentDomain = [ y0, y1 ]; + if (y.invert) y0 = y(y0), y1 = y(y1); + if (y1 < y0) t = y0, y0 = y1, y1 = t; + if (y0 != yExtent[0] || y1 != yExtent[1]) yExtent = [ y0, y1 ]; + } + return brush; + }; + brush.clear = function() { + if (!brush.empty()) { + xExtent = [ 0, 0 ], yExtent = [ 0, 0 ]; + xExtentDomain = yExtentDomain = null; + } + return brush; + }; + brush.empty = function() { + return !!x && xExtent[0] == xExtent[1] || !!y && yExtent[0] == yExtent[1]; + }; + return d3.rebind(brush, event, "on"); + }; + var d3_svg_brushCursor = { + n: "ns-resize", + e: "ew-resize", + s: "ns-resize", + w: "ew-resize", + nw: "nwse-resize", + ne: "nesw-resize", + se: "nwse-resize", + sw: "nesw-resize" + }; + var d3_svg_brushResizes = [ [ "n", "e", "s", "w", "nw", "ne", "se", "sw" ], [ "e", "w" ], [ "n", "s" ], [] ]; + var d3_time_format = d3_time.format = d3_locale_enUS.timeFormat; + var d3_time_formatUtc = d3_time_format.utc; + var d3_time_formatIso = d3_time_formatUtc("%Y-%m-%dT%H:%M:%S.%LZ"); + d3_time_format.iso = Date.prototype.toISOString && +new Date("2000-01-01T00:00:00.000Z") ? d3_time_formatIsoNative : d3_time_formatIso; + function d3_time_formatIsoNative(date) { + return date.toISOString(); + } + d3_time_formatIsoNative.parse = function(string) { + var date = new Date(string); + return isNaN(date) ? null : date; + }; + d3_time_formatIsoNative.toString = d3_time_formatIso.toString; + d3_time.second = d3_time_interval(function(date) { + return new d3_date(Math.floor(date / 1e3) * 1e3); + }, function(date, offset) { + date.setTime(date.getTime() + Math.floor(offset) * 1e3); + }, function(date) { + return date.getSeconds(); + }); + d3_time.seconds = d3_time.second.range; + d3_time.seconds.utc = d3_time.second.utc.range; + d3_time.minute = d3_time_interval(function(date) { + return new d3_date(Math.floor(date / 6e4) * 6e4); + }, function(date, offset) { + date.setTime(date.getTime() + Math.floor(offset) * 6e4); + }, function(date) { + return date.getMinutes(); + }); + d3_time.minutes = d3_time.minute.range; + d3_time.minutes.utc = d3_time.minute.utc.range; + d3_time.hour = d3_time_interval(function(date) { + var timezone = date.getTimezoneOffset() / 60; + return new d3_date((Math.floor(date / 36e5 - timezone) + timezone) * 36e5); + }, function(date, offset) { + date.setTime(date.getTime() + Math.floor(offset) * 36e5); + }, function(date) { + return date.getHours(); + }); + d3_time.hours = d3_time.hour.range; + d3_time.hours.utc = d3_time.hour.utc.range; + d3_time.month = d3_time_interval(function(date) { + date = d3_time.day(date); + date.setDate(1); + return date; + }, function(date, offset) { + date.setMonth(date.getMonth() + offset); + }, function(date) { + return date.getMonth(); + }); + d3_time.months = d3_time.month.range; + d3_time.months.utc = d3_time.month.utc.range; + function d3_time_scale(linear, methods, format) { + function scale(x) { + return linear(x); + } + scale.invert = function(x) { + return d3_time_scaleDate(linear.invert(x)); + }; + scale.domain = function(x) { + if (!arguments.length) return linear.domain().map(d3_time_scaleDate); + linear.domain(x); + return scale; + }; + function tickMethod(extent, count) { + var span = extent[1] - extent[0], target = span / count, i = d3.bisect(d3_time_scaleSteps, target); + return i == d3_time_scaleSteps.length ? [ methods.year, d3_scale_linearTickRange(extent.map(function(d) { + return d / 31536e6; + }), count)[2] ] : !i ? [ d3_time_scaleMilliseconds, d3_scale_linearTickRange(extent, count)[2] ] : methods[target / d3_time_scaleSteps[i - 1] < d3_time_scaleSteps[i] / target ? i - 1 : i]; + } + scale.nice = function(interval, skip) { + var domain = scale.domain(), extent = d3_scaleExtent(domain), method = interval == null ? tickMethod(extent, 10) : typeof interval === "number" && tickMethod(extent, interval); + if (method) interval = method[0], skip = method[1]; + function skipped(date) { + return !isNaN(date) && !interval.range(date, d3_time_scaleDate(+date + 1), skip).length; + } + return scale.domain(d3_scale_nice(domain, skip > 1 ? { + floor: function(date) { + while (skipped(date = interval.floor(date))) date = d3_time_scaleDate(date - 1); + return date; + }, + ceil: function(date) { + while (skipped(date = interval.ceil(date))) date = d3_time_scaleDate(+date + 1); + return date; + } + } : interval)); + }; + scale.ticks = function(interval, skip) { + var extent = d3_scaleExtent(scale.domain()), method = interval == null ? tickMethod(extent, 10) : typeof interval === "number" ? tickMethod(extent, interval) : !interval.range && [ { + range: interval + }, skip ]; + if (method) interval = method[0], skip = method[1]; + return interval.range(extent[0], d3_time_scaleDate(+extent[1] + 1), skip < 1 ? 1 : skip); + }; + scale.tickFormat = function() { + return format; + }; + scale.copy = function() { + return d3_time_scale(linear.copy(), methods, format); + }; + return d3_scale_linearRebind(scale, linear); + } + function d3_time_scaleDate(t) { + return new Date(t); + } + var d3_time_scaleSteps = [ 1e3, 5e3, 15e3, 3e4, 6e4, 3e5, 9e5, 18e5, 36e5, 108e5, 216e5, 432e5, 864e5, 1728e5, 6048e5, 2592e6, 7776e6, 31536e6 ]; + var d3_time_scaleLocalMethods = [ [ d3_time.second, 1 ], [ d3_time.second, 5 ], [ d3_time.second, 15 ], [ d3_time.second, 30 ], [ d3_time.minute, 1 ], [ d3_time.minute, 5 ], [ d3_time.minute, 15 ], [ d3_time.minute, 30 ], [ d3_time.hour, 1 ], [ d3_time.hour, 3 ], [ d3_time.hour, 6 ], [ d3_time.hour, 12 ], [ d3_time.day, 1 ], [ d3_time.day, 2 ], [ d3_time.week, 1 ], [ d3_time.month, 1 ], [ d3_time.month, 3 ], [ d3_time.year, 1 ] ]; + var d3_time_scaleLocalFormat = d3_time_format.multi([ [ ".%L", function(d) { + return d.getMilliseconds(); + } ], [ ":%S", function(d) { + return d.getSeconds(); + } ], [ "%I:%M", function(d) { + return d.getMinutes(); + } ], [ "%I %p", function(d) { + return d.getHours(); + } ], [ "%a %d", function(d) { + return d.getDay() && d.getDate() != 1; + } ], [ "%b %d", function(d) { + return d.getDate() != 1; + } ], [ "%B", function(d) { + return d.getMonth(); + } ], [ "%Y", d3_true ] ]); + var d3_time_scaleMilliseconds = { + range: function(start, stop, step) { + return d3.range(Math.ceil(start / step) * step, +stop, step).map(d3_time_scaleDate); + }, + floor: d3_identity, + ceil: d3_identity + }; + d3_time_scaleLocalMethods.year = d3_time.year; + d3_time.scale = function() { + return d3_time_scale(d3.scale.linear(), d3_time_scaleLocalMethods, d3_time_scaleLocalFormat); + }; + var d3_time_scaleUtcMethods = d3_time_scaleLocalMethods.map(function(m) { + return [ m[0].utc, m[1] ]; + }); + var d3_time_scaleUtcFormat = d3_time_formatUtc.multi([ [ ".%L", function(d) { + return d.getUTCMilliseconds(); + } ], [ ":%S", function(d) { + return d.getUTCSeconds(); + } ], [ "%I:%M", function(d) { + return d.getUTCMinutes(); + } ], [ "%I %p", function(d) { + return d.getUTCHours(); + } ], [ "%a %d", function(d) { + return d.getUTCDay() && d.getUTCDate() != 1; + } ], [ "%b %d", function(d) { + return d.getUTCDate() != 1; + } ], [ "%B", function(d) { + return d.getUTCMonth(); + } ], [ "%Y", d3_true ] ]); + d3_time_scaleUtcMethods.year = d3_time.year.utc; + d3_time.scale.utc = function() { + return d3_time_scale(d3.scale.linear(), d3_time_scaleUtcMethods, d3_time_scaleUtcFormat); + }; + d3.text = d3_xhrType(function(request) { + return request.responseText; + }); + d3.json = function(url, callback) { + return d3_xhr(url, "application/json", d3_json, callback); + }; + function d3_json(request) { + return JSON.parse(request.responseText); + } + d3.html = function(url, callback) { + return d3_xhr(url, "text/html", d3_html, callback); + }; + function d3_html(request) { + var range = d3_document.createRange(); + range.selectNode(d3_document.body); + return range.createContextualFragment(request.responseText); + } + d3.xml = d3_xhrType(function(request) { + return request.responseXML; + }); + if (typeof define === "function" && define.amd) { + define(d3); + } else if (typeof module === "object" && module.exports) { + module.exports = d3; + } else { + this.d3 = d3; + } +}(); diff --git a/nipype/external/due.py b/nipype/external/due.py new file mode 100644 index 0000000000..c360435bae --- /dev/null +++ b/nipype/external/due.py @@ -0,0 +1,72 @@ +# emacs: at the end of the file +# ex: set sts=4 ts=4 sw=4 et: +# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### # +""" + +Stub file for a guaranteed safe import of duecredit constructs: if duecredit +is not available. + +To use it, place it into your project codebase to be imported, e.g. copy as + + cp stub.py /path/tomodule/module/due.py + +Note that it might be better to avoid naming it duecredit.py to avoid shadowing +installed duecredit. + +Then use in your code as + + from .due import due, Doi, BibTeX + +See https://github.com/duecredit/duecredit/blob/master/README.md for examples. + +Origin: Originally a part of the duecredit +Copyright: 2015-2016 DueCredit developers +License: BSD-2 +""" + +__version__ = '0.0.5' + + +class InactiveDueCreditCollector(object): + """Just a stub at the Collector which would not do anything""" + + def _donothing(self, *args, **kwargs): + """Perform no good and no bad""" + pass + + def dcite(self, *args, **kwargs): + """If I could cite I would""" + + def nondecorating_decorator(func): + return func + + return nondecorating_decorator + + cite = load = add = _donothing + + def __repr__(self): + return '{}()'.format(self.__class__.__name__) + + +def _donothing_func(*args, **kwargs): + """Perform no good and no bad""" + pass + + +try: + from duecredit import due, BibTeX, Doi, Url + if 'due' in locals() and not hasattr(due, 'cite'): + raise RuntimeError( + "Imported due lacks .cite. DueCredit is now disabled") +except ImportError: + # Initiate due stub + due = InactiveDueCreditCollector() + BibTeX = Doi = Url = _donothing_func + +# Emacs mode definitions +# Local Variables: +# mode: python +# py-indent-offset: 4 +# tab-width: 4 +# indent-tabs-mode: nil +# End: diff --git a/nipype/external/fsl_imglob.py b/nipype/external/fsl_imglob.py new file mode 100755 index 0000000000..3b9a4eea4d --- /dev/null +++ b/nipype/external/fsl_imglob.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +# imglob - expand list of image filenames +# Stephen Smith, Mark Jenkinson & Matthew Webster FMRIB Image Analysis Group +# Copyright (C) 2009 University of Oxford +# Part of FSL - FMRIB's Software Library +# http://www.fmrib.ox.ac.uk/fsl +# fsl@fmrib.ox.ac.uk +# +# Developed at FMRIB (Oxford Centre for Functional Magnetic Resonance +# Imaging of the Brain), Department of Clinical Neurology, Oxford +# University, Oxford, UK +# +# +# LICENCE +# +# FMRIB Software Library, Release 5.0 (c) 2012, The University of +# Oxford (the "Software") +# +# The Software remains the property of the University of Oxford ("the +# University"). +# +# The Software is distributed "AS IS" under this Licence solely for +# non-commercial use in the hope that it will be useful, but in order +# that the University as a charitable foundation protects its assets for +# the benefit of its educational and research purposes, the University +# makes clear that no condition is made or to be implied, nor is any +# warranty given or to be implied, as to the accuracy of the Software, +# or that it will be suitable for any particular purpose or for use +# under any specific conditions. Furthermore, the University disclaims +# all responsibility for the use which is made of the Software. It +# further disclaims any liability for the outcomes arising from using +# the Software. +# +# The Licensee agrees to indemnify the University and hold the +# University harmless from and against any and all claims, damages and +# liabilities asserted by third parties (including claims for +# negligence) which arise directly or indirectly from the use of the +# Software or the sale of any products based on the Software. +# +# No part of the Software may be reproduced, modified, transmitted or +# transferred in any form or by any means, electronic or mechanical, +# without the express permission of the University. The permission of +# the University is not required if the said reproduction, modification, +# transmission or transference is done without financial return, the +# conditions of this Licence are imposed upon the receiver of the +# product, and all original and amended source code is included in any +# transmitted product. You may be held legally responsible for any +# copyright infringement that is caused or encouraged by your failure to +# abide by these terms and conditions. +# +# You are not permitted under this Licence to use this Software +# commercially. Use for which any financial return is received shall be +# defined as commercial use, and includes (1) integration of all or part +# of the source code or the Software into a product for sale or license +# by or on behalf of Licensee to third parties or (2) use of the +# Software or any derivative of it for research with the final aim of +# developing software products for sale or license to a third party or +# (3) use of the Software or any derivative of it for research with the +# final aim of developing non-software products for sale or license to a +# third party, or (4) use of the Software to provide any service to an +# external organisation for which payment is received. If you are +# interested in using the Software commercially, please contact Isis +# Innovation Limited ("Isis"), the technology transfer company of the +# University, to negotiate a licence. Contact details are: +# innovation@isis.ox.ac.uk quoting reference DE/9564. +from __future__ import print_function +import sys +import glob +from builtins import range + + +def usage(): + print("Usage: $0 [-extension/extensions] ") + print(" -extension for one image with full extension") + print(" -extensions for image list with full extensions") + sys.exit(1) + + +# Returns whether an input filename has an image extension ( and the +# basename and extension pair ) +def isImage(input, allExtensions): + for extension in allExtensions: + if input[-len(extension):] == extension: + return True, input[:-len(extension)], extension + return False, input, '' + + +def removeImageExtension(input, allExtensions): + return isImage(input, allExtensions)[1] + + +def main(): + if len(sys.argv) <= 1: + usage() + + if sys.version_info < (2, 4): + import sets + from sets import Set + setAvailable = False + else: + setAvailable = True + + deleteExtensions = True + primaryExtensions = ['.nii.gz', '.nii', '.hdr.gz', '.hdr'] + secondaryExtensions = ['.img.gz', '.img'] + allExtensions = primaryExtensions + secondaryExtensions + validExtensions = primaryExtensions + startingArg = 1 + + if sys.argv[1] == "-extensions": + validExtensions = allExtensions + deleteExtensions = False + startingArg = 2 + if sys.argv[1] == "-extension": + deleteExtensions = False + startingArg = 2 + + filelist = [] + for arg in range(startingArg, len(sys.argv)): + # #These if enables a "pedantic" style mode currently not used + # if isImage(sys.argv[arg],allExtensions)[0]: + # filelist.extend(glob.glob(sys.argv[arg])) + # else: + # for currentExtension in validExtensions: + # filelist.extend(glob.glob(sys.argv[arg]+currentExtension)) + for currentExtension in validExtensions: + filelist.extend( + glob.glob( + removeImageExtension(sys.argv[arg], allExtensions) + + currentExtension)) + + if deleteExtensions: + for file in range(0, len(filelist)): + filelist[file] = removeImageExtension(filelist[file], + allExtensions) + if setAvailable: + filelist = list(set(filelist)) + else: + filelist = list(Set(filelist)) + filelist.sort() + + for file in range(0, len(filelist)): + print(filelist[file], end=' ') + if file < len(filelist) - 1: + print(" ", end=' ') + + +if __name__ == "__main__": + main() diff --git a/nipype/external/portalocker.py b/nipype/external/portalocker.py new file mode 100644 index 0000000000..1da24d894c --- /dev/null +++ b/nipype/external/portalocker.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking. +# Requires python 1.5.2 or better. +'''Cross-platform (posix/nt) API for flock-style file locking. + +Synopsis: + + import portalocker + file = open('somefile', 'r+') + portalocker.lock(file, portalocker.LOCK_EX) + file.seek(12) + file.write('foo') + file.close() + +If you know what you're doing, you may choose to + + portalocker.unlock(file) + +before closing the file, but why? + +Methods: + + lock( file, flags ) + unlock( file ) + +Constants: + + LOCK_EX + LOCK_SH + LOCK_NB + +Exceptions: + + LockException + +Notes: + +For the 'nt' platform, this module requires the Python Extensions for Windows. +Be aware that this may not work as expected on Windows 95/98/ME. + +History: + +I learned the win32 technique for locking files from sample code +provided by John Nielsen in the documentation +that accompanies the win32 modules. + +Author: Jonathan Feinberg , + Lowell Alleman +Version: $Id: portalocker.py 5474 2008-05-16 20:53:50Z lowell $ + +''' +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +__all__ = [ + 'lock', + 'unlock', + 'LOCK_EX', + 'LOCK_SH', + 'LOCK_NB', + 'LockException', +] + +import os + + +class LockException(Exception): + # Error codes: + LOCK_FAILED = 1 + + +if os.name == 'nt': + import win32con + import win32file + import pywintypes + LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK + LOCK_SH = 0 # the default + LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY + # is there any reason not to reuse the following structure? + __overlapped = pywintypes.OVERLAPPED() +elif os.name == 'posix': + import fcntl + LOCK_EX = fcntl.LOCK_EX + LOCK_SH = fcntl.LOCK_SH + LOCK_NB = fcntl.LOCK_NB +else: + raise RuntimeError('PortaLocker only defined for nt and posix platforms') + +if os.name == 'nt': + + def lock(file, flags): + hfile = win32file._get_osfhandle(file.fileno()) + try: + win32file.LockFileEx(hfile, flags, 0, -0x10000, __overlapped) + except pywintypes.error as exc_value: + # error: (33, 'LockFileEx', 'The process cannot access the file + # because another process has locked a portion of the file.') + if exc_value[0] == 33: + raise LockException(LockException.LOCK_FAILED, exc_value[2]) + else: + # Q: Are there exceptions/codes we should be dealing with here? + raise + + def unlock(file): + hfile = win32file._get_osfhandle(file.fileno()) + try: + win32file.UnlockFileEx(hfile, 0, -0x10000, __overlapped) + except pywintypes.error as exc_value: + if exc_value[0] == 158: + # error: (158, 'UnlockFileEx', 'The segment is already + # unlocked.') To match the 'posix' implementation, silently + # ignore this error + pass + else: + # Q: Are there exceptions/codes we should be dealing with here? + raise + +elif os.name == 'posix': + + def lock(file, flags): + try: + fcntl.flock(file.fileno(), flags) + except IOError as exc_value: + # The exception code varies on different systems so we'll catch + # every IO error + raise LockException(*exc_value) + + def unlock(file): + fcntl.flock(file.fileno(), fcntl.LOCK_UN) + + +if __name__ == '__main__': + from time import time, strftime, localtime + import sys + from . import portalocker + + log = open('log.txt', 'a+') + portalocker.lock(log, portalocker.LOCK_EX) + timestamp = strftime('%m/%d/%Y %H:%M:%S\n', localtime(time())) + log.write(timestamp) + + print('Wrote lines. Hit enter to release lock.') + dummy = sys.stdin.readline() + log.close() diff --git a/nipype/info.py b/nipype/info.py new file mode 100644 index 0000000000..6b60da6603 --- /dev/null +++ b/nipype/info.py @@ -0,0 +1,183 @@ +""" This file contains defines parameters for nipy that we use to fill +settings in setup.py, the nipy top-level docstring, and for building the +docs. In setup.py in particular, we exec this file, so it cannot import nipy +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import sys + +# nipype version information. An empty version_extra corresponds to a +# full release. '.dev' as a version_extra string means this is a development +# version +# Remove -dev for release +__version__ = '1.1.1-dev' + + +def get_nipype_gitversion(): + """Nipype version as reported by the last commit in git + + Returns + ------- + None or str + Version of Nipype according to git. + """ + import os + import subprocess + try: + import nipype + gitpath = os.path.realpath( + os.path.join(os.path.dirname(nipype.__file__), os.path.pardir)) + except: + gitpath = os.getcwd() + gitpathgit = os.path.join(gitpath, '.git') + if not os.path.exists(gitpathgit): + return None + ver = None + try: + o, _ = subprocess.Popen( + 'git describe', shell=True, cwd=gitpath, + stdout=subprocess.PIPE).communicate() + except Exception: + pass + else: + ver = o.decode().strip().split('-')[-1] + return ver + + +if __version__.endswith('-dev'): + gitversion = get_nipype_gitversion() + if gitversion: + __version__ = '{}+{}'.format(__version__, gitversion) + +CLASSIFIERS = [ + 'Development Status :: 5 - Production/Stable', 'Environment :: Console', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: MacOS :: MacOS X', + 'Operating System :: POSIX :: Linux', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', 'Topic :: Scientific/Engineering' +] + +description = 'Neuroimaging in Python: Pipelines and Interfaces' + +# Note: this long_description is actually a copy/paste from the top-level +# README.txt, so that it shows up nicely on PyPI. So please remember to edit +# it only in one place and sync it correctly. +long_description = """======================================================== +NIPYPE: Neuroimaging in Python: Pipelines and Interfaces +======================================================== + +Current neuroimaging software offer users an incredible opportunity to +analyze data using a variety of different algorithms. However, this has +resulted in a heterogeneous collection of specialized applications +without transparent interoperability or a uniform operating interface. + +*Nipype*, an open-source, community-developed initiative under the +umbrella of `NiPy `_, is a Python project that provides a +uniform interface to existing neuroimaging software and facilitates interaction +between these packages within a single workflow. Nipype provides an environment +that encourages interactive exploration of algorithms from different +packages (e.g., AFNI, ANTS, BRAINS, BrainSuite, Camino, FreeSurfer, FSL, MNE, +MRtrix, MNE, Nipy, Slicer, SPM), eases the design of workflows within and +between packages, and reduces the learning curve necessary to use different \ +packages. Nipype is creating a collaborative platform for neuroimaging \ +software development in a high-level language and addressing limitations of \ +existing pipeline systems. + +*Nipype* allows you to: + +* easily interact with tools from different software packages +* combine processing steps from different software packages +* develop new workflows faster by reusing common steps from old ones +* process data faster by running it in parallel on many cores/machines +* make your research easily reproducible +* share your processing workflows with the community +""" + +# versions +NIBABEL_MIN_VERSION = '2.1.0' +NETWORKX_MIN_VERSION = '1.9' +NUMPY_MIN_VERSION = '1.9.0' +SCIPY_MIN_VERSION = '0.14' +TRAITS_MIN_VERSION = '4.6' +DATEUTIL_MIN_VERSION = '2.2' +PYTEST_MIN_VERSION = '3.0' +FUTURE_MIN_VERSION = '0.16.0' +SIMPLEJSON_MIN_VERSION = '3.8.0' +PROV_VERSION = '1.5.0' +CLICK_MIN_VERSION = '6.6.0' +PYDOT_MIN_VERSION = '1.2.3' + +NAME = 'nipype' +MAINTAINER = 'nipype developers' +MAINTAINER_EMAIL = 'neuroimaging@python.org' +DESCRIPTION = description +LONG_DESCRIPTION = long_description +URL = 'http://nipy.org/nipype' +DOWNLOAD_URL = 'http://github.com/nipy/nipype/archives/master' +LICENSE = 'Apache License, 2.0' +AUTHOR = 'nipype developers' +AUTHOR_EMAIL = 'neuroimaging@python.org' +PLATFORMS = 'OS Independent' +MAJOR = __version__.split('.')[0] +MINOR = __version__.split('.')[1] +MICRO = __version__.replace('-', '.').split('.')[2] +ISRELEASE = (len(__version__.replace('-', '.').split('.')) == 3 + or 'post' in __version__.replace('-', '.').split('.')[-1]) +VERSION = __version__ +PROVIDES = ['nipype'] +REQUIRES = [ + 'nibabel>=%s' % NIBABEL_MIN_VERSION, + 'networkx>=%s' % NETWORKX_MIN_VERSION, + 'numpy>=%s' % NUMPY_MIN_VERSION, + 'python-dateutil>=%s' % DATEUTIL_MIN_VERSION, + 'scipy>=%s' % SCIPY_MIN_VERSION, + 'traits>=%s' % TRAITS_MIN_VERSION, + 'future>=%s' % FUTURE_MIN_VERSION, + 'simplejson>=%s' % SIMPLEJSON_MIN_VERSION, + 'prov==%s' % PROV_VERSION, + 'click>=%s' % CLICK_MIN_VERSION, + 'funcsigs', + 'pytest>=%s' % PYTEST_MIN_VERSION, + 'pytest-xdist', + 'mock', + 'pydotplus', + 'pydot>=%s' % PYDOT_MIN_VERSION, + 'packaging', + 'futures; python_version == "2.7"', +] + +if sys.version_info <= (3, 4): + REQUIRES.append('configparser') + +TESTS_REQUIRES = ['pytest-cov', 'codecov', 'pytest-env'] + +EXTRA_REQUIRES = { + 'doc': ['Sphinx>=1.4', 'numpydoc', 'matplotlib', 'pydotplus', 'pydot>=1.2.3'], + 'tests': TESTS_REQUIRES, + 'specs': ['yapf'], + 'nipy': ['nitime', 'nilearn', 'dipy', 'nipy', 'matplotlib'], + 'profiler': ['psutil>=5.0'], + 'duecredit': ['duecredit'], + 'xvfbwrapper': ['xvfbwrapper'], + 'pybids': ['pybids'], + 'ssh': ['paramiko'], + # 'mesh': ['mayavi'] # Enable when it works +} + + +def _list_union(iterable): + return list(set(sum(iterable, []))) + + +# Enable a handle to install all extra dependencies at once +EXTRA_REQUIRES['all'] = _list_union(EXTRA_REQUIRES.values()) +# dev = doc + tests + specs +EXTRA_REQUIRES['dev'] = _list_union(val for key, val in EXTRA_REQUIRES.items() + if key in ('doc', 'tests', 'specs')) + +STATUS = 'stable' diff --git a/nipype/interfaces/__init__.py b/nipype/interfaces/__init__.py new file mode 100644 index 0000000000..a19efa64e5 --- /dev/null +++ b/nipype/interfaces/__init__.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Package contains interfaces for using existing functionality in other packages + +Requires Packages to be installed +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +__docformat__ = 'restructuredtext' + +from .io import DataGrabber, DataSink, SelectFiles, BIDSDataGrabber +from .utility import IdentityInterface, Rename, Function, Select, Merge diff --git a/nipype/interfaces/afni/__init__.py b/nipype/interfaces/afni/__init__.py new file mode 100644 index 0000000000..7c3498f7c6 --- /dev/null +++ b/nipype/interfaces/afni/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The afni module provides classes for interfacing with the `AFNI +`_ command line tools. + +Top-level namespace for afni. +""" + +from .base import Info +from .preprocess import ( + AlignEpiAnatPy, Allineate, Automask, AutoTcorrelate, AutoTLRC, Bandpass, + BlurInMask, BlurToFWHM, ClipLevel, DegreeCentrality, Despike, Detrend, ECM, + Fim, Fourier, Hist, LFCD, Maskave, Means, OutlierCount, QualityIndex, + ROIStats, Retroicor, Seg, SkullStrip, TCorr1D, TCorrMap, TCorrelate, TNorm, + TProject, TShift, Volreg, Warp, QwarpPlusMinus, Qwarp) +from .svm import (SVMTest, SVMTrain) +from .utils import ( + ABoverlap, AFNItoNIFTI, Autobox, Axialize, BrickStat, Bucket, Calc, Cat, + CatMatvec, CenterMass, ConvertDset, Copy, Dot, Edge3, Eval, FWHMx, + LocalBistat, MaskTool, Merge, Notes, NwarpApply, NwarpAdjust, NwarpCat, + OneDToolPy, Refit, Resample, TCat, TCatSubBrick, TStat, To3D, Unifize, + Undump, ZCutUp, GCOR, Zcat, Zeropad) +from .model import (Deconvolve, Remlfit, Synthesize) diff --git a/nipype/interfaces/afni/base.py b/nipype/interfaces/afni/base.py new file mode 100644 index 0000000000..d4b8e474ff --- /dev/null +++ b/nipype/interfaces/afni/base.py @@ -0,0 +1,303 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Provide interface to AFNI commands.""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import object, str +from future.utils import raise_from + +import os +from sys import platform +from distutils import spawn + +from ... import logging, LooseVersion +from ...utils.filemanip import split_filename, fname_presuffix + +from ..base import (CommandLine, traits, CommandLineInputSpec, isdefined, File, + TraitedSpec, PackageInfo) +from ...external.due import BibTeX + +# Use nipype's logging system +IFLOGGER = logging.getLogger('nipype.interface') + + +class Info(PackageInfo): + """Handle afni output type and version information. + """ + __outputtype = 'AFNI' + ftypes = {'NIFTI': '.nii', 'AFNI': '', 'NIFTI_GZ': '.nii.gz'} + version_cmd = 'afni --version' + + @staticmethod + def parse_version(raw_info): + version_stamp = raw_info.split('\n')[0].split('Version ')[1] + if version_stamp.startswith('AFNI'): + version_stamp = version_stamp.split('AFNI_')[1] + elif version_stamp.startswith('Debian'): + version_stamp = version_stamp.split('Debian-')[1].split('~')[0] + else: + return None + + version = LooseVersion(version_stamp.replace('_', '.')).version[:3] + if version[0] < 1000: + version[0] = version[0] + 2000 + return tuple(version) + + @classmethod + def output_type_to_ext(cls, outputtype): + """Get the file extension for the given output type. + + Parameters + ---------- + outputtype : {'NIFTI', 'NIFTI_GZ', 'AFNI'} + String specifying the output type. + + Returns + ------- + extension : str + The file extension for the output type. + """ + + try: + return cls.ftypes[outputtype] + except KeyError as e: + msg = 'Invalid AFNIOUTPUTTYPE: ', outputtype + raise_from(KeyError(msg), e) + + @classmethod + def outputtype(cls): + """AFNI has no environment variables, + Output filetypes get set in command line calls + Nipype uses AFNI as default + + + Returns + ------- + None + """ + # warn(('AFNI has no environment variable that sets filetype ' + # 'Nipype uses NIFTI_GZ as default')) + return 'AFNI' + + @staticmethod + def standard_image(img_name): + '''Grab an image from the standard location. + + Could be made more fancy to allow for more relocatability''' + clout = CommandLine( + 'which afni', + ignore_exception=True, + resource_monitor=False, + terminal_output='allatonce').run() + if clout.runtime.returncode is not 0: + return None + + out = clout.runtime.stdout + basedir = os.path.split(out)[0] + return os.path.join(basedir, img_name) + + +class AFNICommandBase(CommandLine): + """ + A base class to fix a linking problem in OSX and afni. + See http://afni.nimh.nih.gov/afni/community/board/read.php?1,145346,145347#msg-145347 + """ + + def _run_interface(self, runtime): + if platform == 'darwin': + runtime.environ['DYLD_FALLBACK_LIBRARY_PATH'] = '/usr/local/afni/' + return super(AFNICommandBase, self)._run_interface(runtime) + + +class AFNICommandInputSpec(CommandLineInputSpec): + num_threads = traits.Int( + 1, usedefault=True, nohash=True, desc='set number of threads') + outputtype = traits.Enum( + 'AFNI', list(Info.ftypes.keys()), desc='AFNI output filetype') + out_file = File( + name_template="%s_afni", + desc='output image file name', + argstr='-prefix %s', + name_source=["in_file"]) + + +class AFNICommandOutputSpec(TraitedSpec): + out_file = File(desc='output file', exists=True) + + +class AFNICommand(AFNICommandBase): + """Shared options for several AFNI commands """ + input_spec = AFNICommandInputSpec + _outputtype = None + + references_ = [{ + 'entry': + BibTeX('@article{Cox1996,' + 'author={R.W. Cox},' + 'title={AFNI: software for analysis and ' + 'visualization of functional magnetic ' + 'resonance neuroimages},' + 'journal={Computers and Biomedical research},' + 'volume={29},' + 'number={3},' + 'pages={162-173},' + 'year={1996},' + '}'), + 'tags': ['implementation'], + }, { + 'entry': + BibTeX('@article{CoxHyde1997,' + 'author={R.W. Cox and J.S. Hyde},' + 'title={Software tools for analysis and ' + 'visualization of fMRI data},' + 'journal={NMR in Biomedicine},' + 'volume={10},' + 'number={45},' + 'pages={171-178},' + 'year={1997},' + '}'), + 'tags': ['implementation'], + }] + + @property + def num_threads(self): + return self.inputs.num_threads + + @num_threads.setter + def num_threads(self, value): + self.inputs.num_threads = value + + @classmethod + def set_default_output_type(cls, outputtype): + """Set the default output type for AFNI classes. + + This method is used to set the default output type for all afni + subclasses. However, setting this will not update the output + type for any existing instances. For these, assign the + .inputs.outputtype. + """ + + if outputtype in Info.ftypes: + cls._outputtype = outputtype + else: + raise AttributeError('Invalid AFNI outputtype: %s' % outputtype) + + def __init__(self, **inputs): + super(AFNICommand, self).__init__(**inputs) + self.inputs.on_trait_change(self._output_update, 'outputtype') + + if hasattr(self.inputs, 'num_threads'): + self.inputs.on_trait_change(self._nthreads_update, 'num_threads') + + if self._outputtype is None: + self._outputtype = Info.outputtype() + + if not isdefined(self.inputs.outputtype): + self.inputs.outputtype = self._outputtype + else: + self._output_update() + + def _nthreads_update(self): + """Update environment with new number of threads""" + self.inputs.environ['OMP_NUM_THREADS'] = '%d' % self.inputs.num_threads + + def _output_update(self): + """ i think? updates class private attribute based on instance input + in fsl also updates ENVIRON variable....not valid in afni + as it uses no environment variables + """ + self._outputtype = self.inputs.outputtype + + def _overload_extension(self, value, name=None): + path, base, _ = split_filename(value) + return os.path.join( + path, base + Info.output_type_to_ext(self.inputs.outputtype)) + + def _list_outputs(self): + outputs = super(AFNICommand, self)._list_outputs() + metadata = dict(name_source=lambda t: t is not None) + out_names = list(self.inputs.traits(**metadata).keys()) + if out_names: + for name in out_names: + if outputs[name]: + _, _, ext = split_filename(outputs[name]) + if ext == "": + outputs[name] = outputs[name] + "+orig.BRIK" + return outputs + + def _gen_fname(self, + basename, + cwd=None, + suffix=None, + change_ext=True, + ext=None): + """Generate a filename based on the given parameters. + + The filename will take the form: cwd/basename. + If change_ext is True, it will use the extentions specified in + intputs.output_type. + + Parameters + ---------- + basename : str + Filename to base the new filename on. + cwd : str + Path to prefix to the new filename. (default is os.getcwd()) + suffix : str + Suffix to add to the `basename`. (defaults is '' ) + change_ext : bool + Flag to change the filename extension to the FSL output type. + (default True) + + Returns + ------- + fname : str + New filename based on given parameters. + + """ + + if basename == '': + msg = 'Unable to generate filename for command %s. ' % self.cmd + msg += 'basename is not set!' + raise ValueError(msg) + if cwd is None: + cwd = os.getcwd() + if ext is None: + ext = Info.output_type_to_ext(self.inputs.outputtype) + if change_ext: + if suffix: + suffix = ''.join((suffix, ext)) + else: + suffix = ext + if suffix is None: + suffix = '' + fname = fname_presuffix( + basename, suffix=suffix, use_ext=False, newpath=cwd) + return fname + + +def no_afni(): + """ Checks if AFNI is available """ + if Info.version() is None: + return True + return False + + +class AFNIPythonCommandInputSpec(CommandLineInputSpec): + outputtype = traits.Enum( + 'AFNI', list(Info.ftypes.keys()), desc='AFNI output filetype') + py27_path = traits.Either( + 'python2', File(exists=True), usedefault=True, default='python2') + + +class AFNIPythonCommand(AFNICommand): + @property + def cmd(self): + orig_cmd = super(AFNIPythonCommand, self).cmd + found = spawn.find_executable(orig_cmd) + return found if found is not None else orig_cmd + + @property + def _cmd_prefix(self): + return "{} ".format(self.inputs.py27_path) diff --git a/nipype/interfaces/afni/model.py b/nipype/interfaces/afni/model.py new file mode 100644 index 0000000000..2cccdfe869 --- /dev/null +++ b/nipype/interfaces/afni/model.py @@ -0,0 +1,666 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft = python sts = 4 ts = 4 sw = 4 et: +"""AFNI modeling interfaces + +Examples +-------- +See the docstrings of the individual classes for examples. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os + +from ..base import (CommandLineInputSpec, CommandLine, Directory, TraitedSpec, + traits, isdefined, File, InputMultiPath, Undefined, Str) +from ...external.due import BibTeX + +from .base import (AFNICommandBase, AFNICommand, AFNICommandInputSpec, + AFNICommandOutputSpec) + + +class DeconvolveInputSpec(AFNICommandInputSpec): + in_files = InputMultiPath( + File(exists=True), + desc='filenames of 3D+time input datasets. More than one filename can ' + 'be given and the datasets will be auto-catenated in time. ' + 'You can input a 1D time series file here, but the time axis ' + 'should run along the ROW direction, not the COLUMN direction as ' + 'in the \'input1D\' option.', + argstr='-input %s', + copyfile=False, + sep=" ", + position=1) + sat = traits.Bool( + desc='check the dataset time series for initial saturation transients,' + ' which should normally have been excised before data analysis.', + argstr='-sat', + xor=['trans']) + trans = traits.Bool( + desc='check the dataset time series for initial saturation transients,' + ' which should normally have been excised before data analysis.', + argstr='-trans', + xor=['sat']) + noblock = traits.Bool( + desc='normally, if you input multiple datasets with \'input\', then ' + 'the separate datasets are taken to be separate image runs that ' + 'get separate baseline models. Use this options if you want to ' + 'have the program consider these to be all one big run.' + '* If any of the input dataset has only 1 sub-brick, then this ' + 'option is automatically invoked!' + '* If the auto-catenation feature isn\'t used, then this option ' + 'has no effect, no how, no way.', + argstr='-noblock') + force_TR = traits.Float( + desc='use this value instead of the TR in the \'input\' ' + 'dataset. (It\'s better to fix the input using Refit.)', + argstr='-force_TR %f', + position=0) + input1D = File( + desc='filename of single (fMRI) .1D time series where time runs down ' + 'the column.', + argstr='-input1D %s', + exists=True) + TR_1D = traits.Float( + desc='TR to use with \'input1D\'. This option has no effect if you do ' + 'not also use \'input1D\'.', + argstr='-TR_1D %f') + legendre = traits.Bool( + desc='use Legendre polynomials for null hypothesis (baseline model)', + argstr='-legendre') + nolegendre = traits.Bool( + desc='use power polynomials for null hypotheses. Don\'t do this ' + 'unless you are crazy!', + argstr='-nolegendre') + nodmbase = traits.Bool( + desc='don\'t de-mean baseline time series', argstr='-nodmbase') + dmbase = traits.Bool( + desc='de-mean baseline time series (default if \'polort\' >= 0)', + argstr='-dmbase') + svd = traits.Bool( + desc='use SVD instead of Gaussian elimination (default)', + argstr='-svd') + nosvd = traits.Bool( + desc='use Gaussian elimination instead of SVD', argstr='-nosvd') + rmsmin = traits.Float( + desc='minimum rms error to reject reduced model (default = 0; don\'t ' + 'use this option normally!)', + argstr='-rmsmin %f') + nocond = traits.Bool( + desc='DON\'T calculate matrix condition number', argstr='-nocond') + singvals = traits.Bool( + desc='print out the matrix singular values', argstr='-singvals') + goforit = traits.Int( + desc='use this to proceed even if the matrix has bad problems (e.g., ' + 'duplicate columns, large condition number, etc.).', + argstr='-GOFORIT %i') + allzero_OK = traits.Bool( + desc='don\'t consider all zero matrix columns to be the type of error ' + 'that \'gotforit\' is needed to ignore.', + argstr='-allzero_OK') + dname = traits.Tuple( + Str, + Str, + desc='set environmental variable to provided value', + argstr='-D%s=%s') + mask = File( + desc='filename of 3D mask dataset; only data time series from within ' + 'the mask will be analyzed; results for voxels outside the mask ' + 'will be set to zero.', + argstr='-mask %s', + exists=True) + automask = traits.Bool( + desc='build a mask automatically from input data (will be slow for ' + 'long time series datasets)', + argstr='-automask') + STATmask = File( + desc='build a mask from provided file, and use this mask for the ' + 'purpose of reporting truncation-to float issues AND for ' + 'computing the FDR curves. The actual results ARE not masked ' + 'with this option (only with \'mask\' or \'automask\' options).', + argstr='-STATmask %s', + exists=True) + censor = File( + desc='filename of censor .1D time series. This is a file of 1s and ' + '0s, indicating which time points are to be included (1) and ' + 'which are to be excluded (0).', + argstr='-censor %s', + exists=True) + polort = traits.Int( + desc='degree of polynomial corresponding to the null hypothesis ' + '[default: 1]', + argstr='-polort %d') + ortvec = traits.Tuple( + File(desc='filename', exists=True), + Str(desc='label'), + desc='this option lets you input a rectangular array of 1 or more ' + 'baseline vectors from a file. This method is a fast way to ' + 'include a lot of baseline regressors in one step. ', + argstr='-ortvec %s %s') + x1D = File(desc='specify name for saved X matrix', argstr='-x1D %s') + x1D_stop = traits.Bool( + desc='stop running after writing .xmat.1D file', argstr='-x1D_stop') + cbucket = traits.Str( + desc='Name for dataset in which to save the regression ' + 'coefficients (no statistics). This dataset ' + 'will be used in a -xrestore run [not yet implemented] ' + 'instead of the bucket dataset, if possible.', + argstr='-cbucket %s') + out_file = File(desc='output statistics file', argstr='-bucket %s') + num_threads = traits.Int( + desc='run the program with provided number of sub-processes', + argstr='-jobs %d', + nohash=True) + fout = traits.Bool( + desc='output F-statistic for each stimulus', argstr='-fout') + rout = traits.Bool( + desc='output the R^2 statistic for each stimulus', argstr='-rout') + tout = traits.Bool( + desc='output the T-statistic for each stimulus', argstr='-tout') + vout = traits.Bool( + desc='output the sample variance (MSE) for each stimulus', + argstr='-vout') + nofdr = traits.Bool( + desc="Don't compute the statistic-vs-FDR curves for the bucket " + "dataset.", + argstr='-noFDR') + global_times = traits.Bool( + desc='use global timing for stimulus timing files', + argstr='-global_times', + xor=['local_times']) + local_times = traits.Bool( + desc='use local timing for stimulus timing files', + argstr='-local_times', + xor=['global_times']) + num_stimts = traits.Int( + desc='number of stimulus timing files', + argstr='-num_stimts %d', + position=-6) + stim_times = traits.List( + traits.Tuple( + traits.Int(desc='k-th response model'), + File(desc='stimulus timing file', exists=True), + Str(desc='model')), + desc='generate a response model from a set of stimulus times' + ' given in file.', + argstr='-stim_times %d %s \'%s\'...', + position=-5) + stim_label = traits.List( + traits.Tuple( + traits.Int(desc='k-th input stimulus'), + Str(desc='stimulus label')), + desc='label for kth input stimulus (e.g., Label1)', + argstr='-stim_label %d %s...', + requires=['stim_times'], + position=-4) + stim_times_subtract = traits.Float( + desc='this option means to subtract specified seconds from each time ' + 'encountered in any \'stim_times\' option. The purpose of this ' + 'option is to make it simple to adjust timing files for the ' + 'removal of images from the start of each imaging run.', + argstr='-stim_times_subtract %f') + num_glt = traits.Int( + desc='number of general linear tests (i.e., contrasts)', + argstr='-num_glt %d', + position=-3) + gltsym = traits.List( + Str(desc='symbolic general linear test'), + desc='general linear tests (i.e., contrasts) using symbolic ' + 'conventions (e.g., \'+Label1 -Label2\')', + argstr='-gltsym \'SYM: %s\'...', + position=-2) + glt_label = traits.List( + traits.Tuple( + traits.Int(desc='k-th general linear test'), + Str(desc='GLT label')), + desc='general linear test (i.e., contrast) labels', + argstr='-glt_label %d %s...', + requires=['gltsym'], + position=-1) + + +class DeconvolveOutputSpec(TraitedSpec): + out_file = File(desc='output statistics file', exists=True) + reml_script = File( + desc='automatical generated script to run 3dREMLfit', exists=True) + x1D = File(desc='save out X matrix', exists=True) + cbucket = File(desc='output regression coefficients file (if generated)') + + +class Deconvolve(AFNICommand): + """Performs OLS regression given a 4D neuroimage file and stimulus timings + + For complete details, see the `3dDeconvolve Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> deconvolve = afni.Deconvolve() + >>> deconvolve.inputs.in_files = ['functional.nii', 'functional2.nii'] + >>> deconvolve.inputs.out_file = 'output.nii' + >>> deconvolve.inputs.x1D = 'output.1D' + >>> stim_times = [(1, 'timeseries.txt', 'SPMG1(4)')] + >>> deconvolve.inputs.stim_times = stim_times + >>> deconvolve.inputs.stim_label = [(1, 'Houses')] + >>> deconvolve.inputs.gltsym = ['SYM: +Houses'] + >>> deconvolve.inputs.glt_label = [(1, 'Houses')] + >>> deconvolve.cmdline + "3dDeconvolve -input functional.nii functional2.nii -bucket output.nii -x1D output.1D -num_stimts 1 -stim_times 1 timeseries.txt 'SPMG1(4)' -stim_label 1 Houses -num_glt 1 -gltsym 'SYM: +Houses' -glt_label 1 Houses" + >>> res = deconvolve.run() # doctest: +SKIP + """ + + _cmd = '3dDeconvolve' + input_spec = DeconvolveInputSpec + output_spec = DeconvolveOutputSpec + + def _format_arg(self, name, trait_spec, value): + if name == 'gltsym': + for n, val in enumerate(value): + if val.startswith('SYM: '): + value[n] = val.lstrip('SYM: ') + + return super(Deconvolve, self)._format_arg(name, trait_spec, value) + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + if len(self.inputs.stim_times) and not isdefined( + self.inputs.num_stimts): + self.inputs.num_stimts = len(self.inputs.stim_times) + if len(self.inputs.gltsym) and not isdefined(self.inputs.num_glt): + self.inputs.num_glt = len(self.inputs.gltsym) + if not isdefined(self.inputs.out_file): + self.inputs.out_file = 'Decon.nii' + + return super(Deconvolve, self)._parse_inputs(skip) + + def _list_outputs(self): + outputs = self.output_spec().get() + + _gen_fname_opts = {} + _gen_fname_opts['basename'] = self.inputs.out_file + _gen_fname_opts['cwd'] = os.getcwd() + + if isdefined(self.inputs.x1D): + if not self.inputs.x1D.endswith('.xmat.1D'): + outputs['x1D'] = os.path.abspath(self.inputs.x1D + '.xmat.1D') + else: + outputs['x1D'] = os.path.abspath(self.inputs.x1D) + else: + outputs['x1D'] = self._gen_fname( + suffix='.xmat.1D', **_gen_fname_opts) + + if isdefined(self.inputs.cbucket): + outputs['cbucket'] = os.path.abspath(self.inputs.cbucket) + + outputs['reml_script'] = self._gen_fname( + suffix='.REML_cmd', **_gen_fname_opts) + # remove out_file from outputs if x1d_stop set to True + if self.inputs.x1D_stop: + del outputs['out_file'], outputs['cbucket'] + else: + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + + return outputs + + +class RemlfitInputSpec(AFNICommandInputSpec): + # mandatory files + in_files = InputMultiPath( + File(exists=True), + desc='Read time series dataset', + argstr='-input "%s"', + mandatory=True, + copyfile=False, + sep=" ") + matrix = File( + desc='the design matrix file, which should have been output from ' + 'Deconvolve via the \'x1D\' option', + argstr='-matrix %s', + mandatory=True) + # "Semi-Hidden Alternative Ways to Define the Matrix" + polort = traits.Int( + desc='if no \'matrix\' option is given, AND no \'matim\' option, ' + 'create a matrix with Legendre polynomial regressors' + 'up to the specified order. The default value is 0, which' + 'produces a matrix with a single column of all ones', + argstr='-polort %d', + xor=['matrix']) + matim = traits.File( + desc='read a standard file as the matrix. You can use only Col as ' + 'a name in GLTs with these nonstandard matrix input methods, ' + 'since the other names come from the \'matrix\' file. ' + 'These mutually exclusive options are ignored if \'matrix\' ' + 'is used.', + argstr='-matim %s', + xor=['matrix']) + # Other arguments + mask = File( + desc='filename of 3D mask dataset; only data time series from within ' + 'the mask will be analyzed; results for voxels outside the mask ' + 'will be set to zero.', + argstr='-mask %s', + exists=True) + automask = traits.Bool( + usedefault=True, + argstr='-automask', + desc='build a mask automatically from input data (will be slow for ' + 'long time series datasets)') + STATmask = File( + desc='filename of 3D mask dataset to be used for the purpose ' + 'of reporting truncation-to float issues AND for computing the ' + 'FDR curves. The actual results ARE not masked with this option ' + '(only with \'mask\' or \'automask\' options).', + argstr='-STATmask %s', + exists=True) + addbase = InputMultiPath( + File( + exists=True, + desc='file containing columns to add to regression matrix'), + desc='file(s) to add baseline model columns to the matrix with this ' + 'option. Each column in the specified file(s) will be appended ' + 'to the matrix. File(s) must have at least as many rows as the ' + 'matrix does.', + copyfile=False, + sep=" ", + argstr='-addbase %s') + slibase = InputMultiPath( + File( + exists=True, + desc='file containing columns to add to regression matrix'), + desc='similar to \'addbase\' in concept, BUT each specified file ' + 'must have an integer multiple of the number of slices ' + 'in the input dataset(s); then, separate regression ' + 'matrices are generated for each slice, with the ' + 'first column of the file appended to the matrix for ' + 'the first slice of the dataset, the second column of the file ' + 'appended to the matrix for the first slice of the dataset, ' + 'and so on. Intended to help model physiological noise in FMRI, ' + 'or other effects you want to regress out that might ' + 'change significantly in the inter-slice time intervals. This ' + 'will slow the program down, and make it use a lot more memory ' + '(to hold all the matrix stuff).', + argstr='-slibase %s') + slibase_sm = InputMultiPath( + File( + exists=True, + desc='file containing columns to add to regression matrix'), + desc='similar to \'slibase\', BUT each file much be in slice major ' + 'order (i.e. all slice0 columns come first, then all slice1 ' + 'columns, etc).', + argstr='-slibase_sm %s') + usetemp = traits.Bool( + desc='write intermediate stuff to disk, to economize on RAM. ' + 'Using this option might be necessary to run with ' + '\'slibase\' and with \'Grid\' values above the default, ' + 'since the program has to store a large number of ' + 'matrices for such a problem: two for every slice and ' + 'for every (a,b) pair in the ARMA parameter grid. Temporary ' + 'files are written to the directory given in environment ' + 'variable TMPDIR, or in /tmp, or in ./ (preference is in that ' + 'order)', + argstr='-usetemp') + nodmbase = traits.Bool( + desc='by default, baseline columns added to the matrix via ' + '\'addbase\' or \'slibase\' or \'dsort\' will each have their ' + 'mean removed (as is done in Deconvolve); this option turns this ' + 'centering off', + argstr='-nodmbase', + requires=['addbase', 'dsort']) + dsort = File( + desc='4D dataset to be used as voxelwise baseline regressor', + exists=True, + copyfile=False, + argstr='-dsort %s') + dsort_nods = traits.Bool( + desc='if \'dsort\' option is used, this command will output ' + 'additional results files excluding the \'dsort\' file', + argstr='-dsort_nods', + requires=['dsort']) + fout = traits.Bool( + desc='output F-statistic for each stimulus', argstr='-fout') + rout = traits.Bool( + desc='output the R^2 statistic for each stimulus', argstr='-rout') + tout = traits.Bool( + desc='output the T-statistic for each stimulus; if you use ' + '\'out_file\' and do not give any of \'fout\', \'tout\',' + 'or \'rout\', then the program assumes \'fout\' is activated.', + argstr='-tout') + nofdr = traits.Bool( + desc='do NOT add FDR curve data to bucket datasets; FDR curves can ' + 'take a long time if \'tout\' is used', + argstr='-noFDR') + nobout = traits.Bool( + desc='do NOT add baseline (null hypothesis) regressor betas ' + 'to the \'rbeta_file\' and/or \'obeta_file\' output datasets.', + argstr='-nobout') + gltsym = traits.List( + traits.Either( + traits.Tuple(File(exists=True), Str()), traits.Tuple(Str(), + Str())), + desc='read a symbolic GLT from input file and associate it with a ' + 'label. As in Deconvolve, you can also use the \'SYM:\' method ' + 'to provide the definition of the GLT directly as a string ' + '(e.g., with \'SYM: +Label1 -Label2\'). Unlike Deconvolve, you ' + 'MUST specify \'SYM: \' if providing the GLT directly as a ' + 'string instead of from a file', + argstr='-gltsym "%s" %s...') + out_file = File( + desc='output dataset for beta + statistics from the REML estimation; ' + 'also contains the results of any GLT analysis requested ' + 'in the Deconvolve setup, similar to the \'bucket\' output ' + 'from Deconvolve. This dataset does NOT get the betas ' + '(or statistics) of those regressors marked as \'baseline\' ' + 'in the matrix file.', + argstr='-Rbuck %s') + var_file = File( + desc='output dataset for REML variance parameters', argstr='-Rvar %s') + rbeta_file = File( + desc='output dataset for beta weights from the REML estimation, ' + 'similar to the \'cbucket\' output from Deconvolve. This dataset ' + 'will contain all the beta weights, for baseline and stimulus ' + 'regressors alike, unless the \'-nobout\' option is given -- ' + 'in that case, this dataset will only get the betas for the ' + 'stimulus regressors.', + argstr='-Rbeta %s') + glt_file = File( + desc='output dataset for beta + statistics from the REML estimation, ' + 'but ONLY for the GLTs added on the REMLfit command line itself ' + 'via \'gltsym\'; GLTs from Deconvolve\'s command line will NOT ' + 'be included.', + argstr='-Rglt %s') + fitts_file = File( + desc='ouput dataset for REML fitted model', argstr='-Rfitts %s') + errts_file = File( + desc='output dataset for REML residuals = data - fitted model', + argstr='-Rerrts %s') + wherr_file = File( + desc='dataset for REML residual, whitened using the estimated ' + 'ARMA(1,1) correlation matrix of the noise', + argstr='-Rwherr %s') + quiet = traits.Bool( + desc='turn off most progress messages', argstr='-quiet') + verb = traits.Bool( + desc='turns on more progress messages, including memory usage ' + 'progress reports at various stages', + argstr='-verb') + ovar = File( + desc='dataset for OLSQ st.dev. parameter (kind of boring)', + argstr='-Ovar %s') + obeta = File( + desc='dataset for beta weights from the OLSQ estimation', + argstr='-Obeta %s') + obuck = File( + desc='dataset for beta + statistics from the OLSQ estimation', + argstr='-Obuck %s') + oglt = File( + desc='dataset for beta + statistics from \'gltsym\' options', + argstr='-Oglt %s') + ofitts = File(desc='dataset for OLSQ fitted model', argstr='-Ofitts %s') + oerrts = File( + desc='dataset for OLSQ residuals (data - fitted model)', + argstr='-Oerrts %s') + + +class RemlfitOutputSpec(AFNICommandOutputSpec): + out_file = File( + desc='dataset for beta + statistics from the REML estimation (if ' + 'generated') + var_file = File(desc='dataset for REML variance parameters (if generated)') + rbeta_file = File( + desc='dataset for beta weights from the REML estimation (if ' + 'generated)') + rbeta_file = File( + desc='output dataset for beta weights from the REML estimation (if ' + 'generated') + glt_file = File( + desc='output dataset for beta + statistics from the REML estimation, ' + 'but ONLY for the GLTs added on the REMLfit command ' + 'line itself via \'gltsym\' (if generated)') + fitts_file = File( + desc='ouput dataset for REML fitted model (if generated)') + errts_file = File( + desc='output dataset for REML residuals = data - fitted model (if ' + 'generated') + wherr_file = File( + desc='dataset for REML residual, whitened using the estimated ' + 'ARMA(1,1) correlation matrix of the noise (if generated)') + ovar = File(desc='dataset for OLSQ st.dev. parameter (if generated)') + obeta = File(desc='dataset for beta weights from the OLSQ estimation (if ' + 'generated)') + obuck = File( + desc='dataset for beta + statistics from the OLSQ estimation (if ' + 'generated)') + oglt = File( + desc='dataset for beta + statistics from \'gltsym\' options (if ' + 'generated') + ofitts = File(desc='dataset for OLSQ fitted model (if generated)') + oerrts = File(desc='dataset for OLSQ residuals = data - fitted model (if ' + 'generated') + + +class Remlfit(AFNICommand): + """Performs Generalized least squares time series fit with Restricted + Maximum Likelihood (REML) estimation of the temporal auto-correlation + structure. + + For complete details, see the `3dREMLfit Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> remlfit = afni.Remlfit() + >>> remlfit.inputs.in_files = ['functional.nii', 'functional2.nii'] + >>> remlfit.inputs.out_file = 'output.nii' + >>> remlfit.inputs.matrix = 'output.1D' + >>> remlfit.inputs.gltsym = [('SYM: +Lab1 -Lab2', 'TestSYM'), ('timeseries.txt', 'TestFile')] + >>> remlfit.cmdline + '3dREMLfit -gltsym "SYM: +Lab1 -Lab2" TestSYM -gltsym "timeseries.txt" TestFile -input "functional.nii functional2.nii" -matrix output.1D -Rbuck output.nii' + >>> res = remlfit.run() # doctest: +SKIP + """ + + _cmd = '3dREMLfit' + input_spec = RemlfitInputSpec + output_spec = RemlfitOutputSpec + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + return super(Remlfit, self)._parse_inputs(skip) + + def _list_outputs(self): + outputs = self.output_spec().get() + + for key in outputs.keys(): + if isdefined(self.inputs.get()[key]): + outputs[key] = os.path.abspath(self.inputs.get()[key]) + + return outputs + + +class SynthesizeInputSpec(AFNICommandInputSpec): + cbucket = File( + desc='Read the dataset output from ' + '3dDeconvolve via the \'-cbucket\' option.', + argstr='-cbucket %s', + copyfile=False, + mandatory=True) + matrix = File( + desc='Read the matrix output from ' + '3dDeconvolve via the \'-x1D\' option.', + argstr='-matrix %s', + copyfile=False, + mandatory=True) + select = traits.List( + Str(desc='selected columns to synthesize'), + argstr='-select %s', + desc='A list of selected columns from the matrix (and the ' + 'corresponding coefficient sub-bricks from the ' + 'cbucket). Valid types include \'baseline\', ' + ' \'polort\', \'allfunc\', \'allstim\', \'all\', ' + 'Can also provide \'something\' where something matches ' + 'a stim_label from 3dDeconvolve, and \'digits\' where digits ' + 'are the numbers of the select matrix columns by ' + 'numbers (starting at 0), or number ranges of the form ' + '\'3..7\' and \'3-7\'.', + mandatory=True) + out_file = File( + name_template='syn', + desc='output dataset prefix name (default \'syn\')', + argstr='-prefix %s') + dry_run = traits.Bool( + desc='Don\'t compute the output, just ' + 'check the inputs.', + argstr='-dry') + TR = traits.Float( + desc='TR to set in the output. The default value of ' + 'TR is read from the header of the matrix file.', + argstr='-TR %f') + cenfill = traits.Enum( + 'zero', + 'nbhr', + 'none', + argstr='-cenfill %s', + desc='Determines how censored time points from the ' + '3dDeconvolve run will be filled. Valid types ' + 'are \'zero\', \'nbhr\' and \'none\'.') + + +class Synthesize(AFNICommand): + """Reads a '-cbucket' dataset and a '.xmat.1D' matrix from 3dDeconvolve, + and synthesizes a fit dataset using user-selected sub-bricks and + matrix columns. + + For complete details, see the `3dSynthesize Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> synthesize = afni.Synthesize() + >>> synthesize.inputs.cbucket = 'functional.nii' + >>> synthesize.inputs.matrix = 'output.1D' + >>> synthesize.inputs.select = ['baseline'] + >>> synthesize.cmdline + '3dSynthesize -cbucket functional.nii -matrix output.1D -select baseline' + >>> syn = synthesize.run() # doctest: +SKIP + """ + + _cmd = '3dSynthesize' + input_spec = SynthesizeInputSpec + output_spec = AFNICommandOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + + for key in outputs.keys(): + if isdefined(self.inputs.get()[key]): + outputs[key] = os.path.abspath(self.inputs.get()[key]) + + return outputs diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py new file mode 100644 index 0000000000..4e56f7578b --- /dev/null +++ b/nipype/interfaces/afni/preprocess.py @@ -0,0 +1,3724 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""AFNI preprocessing interfaces +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +import os +import os.path as op + +from ...utils.filemanip import (load_json, save_json, split_filename, + fname_presuffix) +from ..base import (CommandLineInputSpec, CommandLine, TraitedSpec, traits, + isdefined, File, InputMultiPath, Undefined, Str, + InputMultiObject) + +from .base import (AFNICommandBase, AFNICommand, AFNICommandInputSpec, + AFNICommandOutputSpec, AFNIPythonCommandInputSpec, + AFNIPythonCommand, Info, no_afni) + +from ...import logging +iflogger = logging.getLogger('nipype.interface') + + +class CentralityInputSpec(AFNICommandInputSpec): + """Common input spec class for all centrality-related commands + """ + + mask = File( + desc='mask file to mask input data', argstr='-mask %s', exists=True) + thresh = traits.Float( + desc='threshold to exclude connections where corr <= thresh', + argstr='-thresh %f') + polort = traits.Int(desc='', argstr='-polort %d') + autoclip = traits.Bool( + desc='Clip off low-intensity regions in the dataset', + argstr='-autoclip') + automask = traits.Bool( + desc='Mask the dataset to target brain-only voxels', + argstr='-automask') + + +class AlignEpiAnatPyInputSpec(AFNIPythonCommandInputSpec): + in_file = File( + desc='EPI dataset to align', + argstr='-epi %s', + mandatory=True, + exists=True, + copyfile=False) + anat = File( + desc='name of structural dataset', + argstr='-anat %s', + mandatory=True, + exists=True, + copyfile=False) + epi_base = traits.Either( + traits.Range(low=0), + traits.Enum('mean', 'median', 'max'), + desc='the epi base used in alignment' + 'should be one of (0/mean/median/max/subbrick#)', + mandatory=True, + argstr='-epi_base %s') + anat2epi = traits.Bool( + desc='align anatomical to EPI dataset (default)', argstr='-anat2epi') + epi2anat = traits.Bool( + desc='align EPI to anatomical dataset', argstr='-epi2anat') + save_skullstrip = traits.Bool( + desc='save skull-stripped (not aligned)', argstr='-save_skullstrip') + suffix = traits.Str( + '_al', + desc='append suffix to the original anat/epi dataset to use' + 'in the resulting dataset names (default is "_al")', + usedefault=True, + argstr='-suffix %s') + epi_strip = traits.Enum( + ('3dSkullStrip', '3dAutomask', 'None'), + desc='method to mask brain in EPI data' + 'should be one of[3dSkullStrip]/3dAutomask/None)', + argstr='-epi_strip %s') + volreg = traits.Enum( + 'on', + 'off', + usedefault=True, + desc='do volume registration on EPI dataset before alignment' + 'should be \'on\' or \'off\', defaults to \'on\'', + argstr='-volreg %s') + tshift = traits.Enum( + 'on', + 'off', + usedefault=True, + desc='do time shifting of EPI dataset before alignment' + 'should be \'on\' or \'off\', defaults to \'on\'', + argstr='-tshift %s') + + +class AlignEpiAnatPyOutputSpec(TraitedSpec): + anat_al_orig = File( + desc="A version of the anatomy that is aligned to the EPI") + epi_al_orig = File( + desc="A version of the EPI dataset aligned to the anatomy") + epi_tlrc_al = File( + desc="A version of the EPI dataset aligned to a standard template") + anat_al_mat = File(desc="matrix to align anatomy to the EPI") + epi_al_mat = File(desc="matrix to align EPI to anatomy") + epi_vr_al_mat = File(desc="matrix to volume register EPI") + epi_reg_al_mat = File( + desc="matrix to volume register and align epi to anatomy") + epi_al_tlrc_mat = File(desc="matrix to volume register and align epi" + "to anatomy and put into standard space") + epi_vr_motion = File(desc="motion parameters from EPI time-series" + "registration (tsh included in name if slice" + "timing correction is also included).") + skullstrip = File(desc="skull-stripped (not aligned) volume") + + +class AlignEpiAnatPy(AFNIPythonCommand): + """Align EPI to anatomical datasets or vice versa + This Python script computes the alignment between two datasets, typically + an EPI and an anatomical structural dataset, and applies the resulting + transformation to one or the other to bring them into alignment. + + This script computes the transforms needed to align EPI and + anatomical datasets using a cost function designed for this purpose. The + script combines multiple transformations, thereby minimizing the amount of + interpolation applied to the data. + + Basic Usage: + align_epi_anat.py -anat anat+orig -epi epi+orig -epi_base 5 + + The user must provide EPI and anatomical datasets and specify the EPI + sub-brick to use as a base in the alignment. + + Internally, the script always aligns the anatomical to the EPI dataset, + and the resulting transformation is saved to a 1D file. + As a user option, the inverse of this transformation may be applied to the + EPI dataset in order to align it to the anatomical data instead. + + This program generates several kinds of output in the form of datasets + and transformation matrices which can be applied to other datasets if + needed. Time-series volume registration, oblique data transformations and + Talairach (standard template) transformations will be combined as needed + and requested (with options to turn on and off each of the steps) in + order to create the aligned datasets. + + For complete details, see the `align_epi_anat.py' Documentation. + `_ + + Examples + ======== + >>> from nipype.interfaces import afni + >>> al_ea = afni.AlignEpiAnatPy() + >>> al_ea.inputs.anat = "structural.nii" + >>> al_ea.inputs.in_file = "functional.nii" + >>> al_ea.inputs.epi_base = 0 + >>> al_ea.inputs.epi_strip = '3dAutomask' + >>> al_ea.inputs.volreg = 'off' + >>> al_ea.inputs.tshift = 'off' + >>> al_ea.inputs.save_skullstrip = True + >>> al_ea.cmdline # doctest: +ELLIPSIS + 'python2 ...align_epi_anat.py -anat structural.nii -epi_base 0 -epi_strip 3dAutomask -epi functional.nii -save_skullstrip -suffix _al -tshift off -volreg off' + >>> res = allineate.run() # doctest: +SKIP + """ + _cmd = 'align_epi_anat.py' + input_spec = AlignEpiAnatPyInputSpec + output_spec = AlignEpiAnatPyOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + anat_prefix = ''.join( + self._gen_fname(self.inputs.anat).split('+')[:-1]) + epi_prefix = ''.join( + self._gen_fname(self.inputs.in_file).split('+')[:-1]) + outputtype = self.inputs.outputtype + if outputtype == 'AFNI': + ext = '.HEAD' + else: + Info.output_type_to_ext(outputtype) + matext = '.1D' + suffix = self.inputs.suffix + if self.inputs.anat2epi: + outputs['anat_al_orig'] = self._gen_fname( + anat_prefix, suffix=suffix + '+orig', ext=ext) + outputs['anat_al_mat'] = self._gen_fname( + anat_prefix, suffix=suffix + '_mat.aff12', ext=matext) + if self.inputs.epi2anat: + outputs['epi_al_orig'] = self._gen_fname( + epi_prefix, suffix=suffix + '+orig', ext=ext) + outputs['epi_al_mat'] = self._gen_fname( + epi_prefix, suffix=suffix + '_mat.aff12', ext=matext) + if self.inputs.volreg == 'on': + outputs['epi_vr_al_mat'] = self._gen_fname( + epi_prefix, suffix='_vr' + suffix + '_mat.aff12', ext=matext) + if self.inputs.tshift == 'on': + outputs['epi_vr_motion'] = self._gen_fname( + epi_prefix, suffix='tsh_vr_motion', ext=matext) + elif self.inputs.tshift == 'off': + outputs['epi_vr_motion'] = self._gen_fname( + epi_prefix, suffix='vr_motion', ext=matext) + if self.inputs.volreg == 'on' and self.inputs.epi2anat: + outputs['epi_reg_al_mat'] = self._gen_fname( + epi_prefix, suffix='_reg' + suffix + '_mat.aff12', ext=matext) + if self.inputs.save_skullstrip: + outputs.skullstrip = self._gen_fname( + anat_prefix, suffix='_ns' + '+orig', ext=ext) + return outputs + + +class AllineateInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dAllineate', + argstr='-source %s', + mandatory=True, + exists=True, + copyfile=False) + reference = File( + exists=True, + argstr='-base %s', + desc='file to be used as reference, the first volume will be used if ' + 'not given the reference will be the first volume of in_file.') + out_file = File( + desc='output file from 3dAllineate', + argstr='-prefix %s', + name_template='%s_allineate', + name_source='in_file', + hash_files=False, + xor=['allcostx']) + out_param_file = File( + argstr='-1Dparam_save %s', + desc='Save the warp parameters in ASCII (.1D) format.', + xor=['in_param_file', 'allcostx']) + in_param_file = File( + exists=True, + argstr='-1Dparam_apply %s', + desc='Read warp parameters from file and apply them to ' + 'the source dataset, and produce a new dataset', + xor=['out_param_file']) + out_matrix = File( + argstr='-1Dmatrix_save %s', + desc='Save the transformation matrix for each volume.', + xor=['in_matrix', 'allcostx']) + in_matrix = File( + desc='matrix to align input file', + argstr='-1Dmatrix_apply %s', + position=-3, + xor=['out_matrix']) + overwrite = traits.Bool( + desc='overwrite output file if it already exists', argstr='-overwrite') + + allcostx = File( + desc= + 'Compute and print ALL available cost functionals for the un-warped inputs' + 'AND THEN QUIT. If you use this option none of the other expected outputs will be produced', + argstr='-allcostx |& tee %s', + position=-1, + xor=['out_file', 'out_matrix', 'out_param_file', 'out_weight_file']) + _cost_funcs = [ + 'leastsq', 'ls', 'mutualinfo', 'mi', 'corratio_mul', 'crM', + 'norm_mutualinfo', 'nmi', 'hellinger', 'hel', 'corratio_add', 'crA', + 'corratio_uns', 'crU' + ] + + cost = traits.Enum( + *_cost_funcs, + argstr='-cost %s', + desc='Defines the \'cost\' function that defines the matching between ' + 'the source and the base') + _interp_funcs = [ + 'nearestneighbour', 'linear', 'cubic', 'quintic', 'wsinc5' + ] + interpolation = traits.Enum( + *_interp_funcs[:-1], + argstr='-interp %s', + desc='Defines interpolation method to use during matching') + final_interpolation = traits.Enum( + *_interp_funcs, + argstr='-final %s', + desc='Defines interpolation method used to create the output dataset') + + # TECHNICAL OPTIONS (used for fine control of the program): + nmatch = traits.Int( + argstr='-nmatch %d', + desc='Use at most n scattered points to match the datasets.') + no_pad = traits.Bool( + argstr='-nopad', desc='Do not use zero-padding on the base image.') + zclip = traits.Bool( + argstr='-zclip', + desc='Replace negative values in the input datasets (source & base) ' + 'with zero.') + convergence = traits.Float( + argstr='-conv %f', + desc='Convergence test in millimeters (default 0.05mm).') + usetemp = traits.Bool(argstr='-usetemp', desc='temporary file use') + check = traits.List( + traits.Enum(*_cost_funcs), + argstr='-check %s', + desc='After cost functional optimization is done, start at the final ' + 'parameters and RE-optimize using this new cost functions. If ' + 'the results are too different, a warning message will be ' + 'printed. However, the final parameters from the original ' + 'optimization will be used to create the output dataset.') + + # ** PARAMETERS THAT AFFECT THE COST OPTIMIZATION STRATEGY ** + one_pass = traits.Bool( + argstr='-onepass', + desc='Use only the refining pass -- do not try a coarse resolution ' + 'pass first. Useful if you know that only small amounts of ' + 'image alignment are needed.') + two_pass = traits.Bool( + argstr='-twopass', + desc='Use a two pass alignment strategy for all volumes, searching ' + 'for a large rotation+shift and then refining the alignment.') + two_blur = traits.Float( + argstr='-twoblur %f', + desc='Set the blurring radius for the first pass in mm.') + two_first = traits.Bool( + argstr='-twofirst', + desc='Use -twopass on the first image to be registered, and ' + 'then on all subsequent images from the source dataset, ' + 'use results from the first image\'s coarse pass to start ' + 'the fine pass.') + two_best = traits.Int( + argstr='-twobest %d', + desc='In the coarse pass, use the best \'bb\' set of initial' + 'points to search for the starting point for the fine' + 'pass. If bb==0, then no search is made for the best' + 'starting point, and the identity transformation is' + 'used as the starting point. [Default=5; min=0 max=11]') + fine_blur = traits.Float( + argstr='-fineblur %f', + desc='Set the blurring radius to use in the fine resolution ' + 'pass to \'x\' mm. A small amount (1-2 mm?) of blurring at ' + 'the fine step may help with convergence, if there is ' + 'some problem, especially if the base volume is very noisy. ' + '[Default == 0 mm = no blurring at the final alignment pass]') + center_of_mass = Str( + argstr='-cmass%s', + desc='Use the center-of-mass calculation to bracket the shifts.') + autoweight = Str( + argstr='-autoweight%s', + desc='Compute a weight function using the 3dAutomask ' + 'algorithm plus some blurring of the base image.') + automask = traits.Int( + argstr='-automask+%d', + desc='Compute a mask function, set a value for dilation or 0.') + autobox = traits.Bool( + argstr='-autobox', + desc='Expand the -automask function to enclose a rectangular ' + 'box that holds the irregular mask.') + nomask = traits.Bool( + argstr='-nomask', + desc='Don\'t compute the autoweight/mask; if -weight is not ' + 'also used, then every voxel will be counted equally.') + weight_file = File( + argstr='-weight %s', + exists=True, + deprecated='1.0.0', + new_name='weight', + desc='Set the weighting for each voxel in the base dataset; ' + 'larger weights mean that voxel count more in the cost function. ' + 'Must be defined on the same grid as the base dataset') + weight = traits.Either( + File(exists=True), + traits.Float(), + argstr='-weight %s', + desc='Set the weighting for each voxel in the base dataset; ' + 'larger weights mean that voxel count more in the cost function. ' + 'If an image file is given, the volume must be defined on the ' + 'same grid as the base dataset') + out_weight_file = traits.File( + argstr='-wtprefix %s', + desc='Write the weight volume to disk as a dataset', + xor=['allcostx']) + source_mask = File( + exists=True, argstr='-source_mask %s', desc='mask the input dataset') + source_automask = traits.Int( + argstr='-source_automask+%d', + desc='Automatically mask the source dataset with dilation or 0.') + warp_type = traits.Enum( + 'shift_only', + 'shift_rotate', + 'shift_rotate_scale', + 'affine_general', + argstr='-warp %s', + desc='Set the warp type.') + warpfreeze = traits.Bool( + argstr='-warpfreeze', + desc='Freeze the non-rigid body parameters after first volume.') + replacebase = traits.Bool( + argstr='-replacebase', + desc='If the source has more than one volume, then after the first ' + 'volume is aligned to the base.') + replacemeth = traits.Enum( + *_cost_funcs, + argstr='-replacemeth %s', + desc='After first volume is aligned, switch method for later volumes. ' + 'For use with \'-replacebase\'.') + epi = traits.Bool( + argstr='-EPI', + desc='Treat the source dataset as being composed of warped ' + 'EPI slices, and the base as comprising anatomically ' + '\'true\' images. Only phase-encoding direction image ' + 'shearing and scaling will be allowed with this option.') + maxrot = traits.Float( + argstr='-maxrot %f', desc='Maximum allowed rotation in degrees.') + maxshf = traits.Float( + argstr='-maxshf %f', desc='Maximum allowed shift in mm.') + maxscl = traits.Float( + argstr='-maxscl %f', desc='Maximum allowed scaling factor.') + maxshr = traits.Float( + argstr='-maxshr %f', desc='Maximum allowed shearing factor.') + master = File( + exists=True, + argstr='-master %s', + desc='Write the output dataset on the same grid as this file.') + newgrid = traits.Float( + argstr='-newgrid %f', + desc='Write the output dataset using isotropic grid spacing in mm.') + + # Non-linear experimental + _nwarp_types = [ + 'bilinear', 'cubic', 'quintic', 'heptic', 'nonic', 'poly3', 'poly5', + 'poly7', 'poly9' + ] # same non-hellenistic + nwarp = traits.Enum( + *_nwarp_types, + argstr='-nwarp %s', + desc='Experimental nonlinear warping: bilinear or legendre poly.') + _dirs = ['X', 'Y', 'Z', 'I', 'J', 'K'] + nwarp_fixmot = traits.List( + traits.Enum(*_dirs), + argstr='-nwarp_fixmot%s...', + desc='To fix motion along directions.') + nwarp_fixdep = traits.List( + traits.Enum(*_dirs), + argstr='-nwarp_fixdep%s...', + desc='To fix non-linear warp dependency along directions.') + verbose = traits.Bool( + argstr='-verb', desc='Print out verbose progress reports.') + quiet = traits.Bool( + argstr='-quiet', desc="Don't print out verbose progress reports.") + + +class AllineateOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output image file name') + out_matrix = File(exists=True, desc='matrix to align input file') + out_param_file = File(exists=True, desc='warp parameters') + out_weight_file = File(exists=True, desc='weight volume') + allcostx = File( + desc= + 'Compute and print ALL available cost functionals for the un-warped inputs' + ) + + +class Allineate(AFNICommand): + """Program to align one dataset (the 'source') to a base dataset + + For complete details, see the `3dAllineate Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> allineate = afni.Allineate() + >>> allineate.inputs.in_file = 'functional.nii' + >>> allineate.inputs.out_file = 'functional_allineate.nii' + >>> allineate.inputs.in_matrix = 'cmatrix.mat' + >>> allineate.cmdline + '3dAllineate -source functional.nii -prefix functional_allineate.nii -1Dmatrix_apply cmatrix.mat' + >>> res = allineate.run() # doctest: +SKIP + + >>> allineate = afni.Allineate() + >>> allineate.inputs.in_file = 'functional.nii' + >>> allineate.inputs.reference = 'structural.nii' + >>> allineate.inputs.allcostx = 'out.allcostX.txt' + >>> allineate.cmdline + '3dAllineate -source functional.nii -base structural.nii -allcostx |& tee out.allcostX.txt' + >>> res = allineate.run() # doctest: +SKIP + + >>> allineate = afni.Allineate() + >>> allineate.inputs.in_file = 'functional.nii' + >>> allineate.inputs.reference = 'structural.nii' + >>> allineate.inputs.nwarp_fixmot = ['X', 'Y'] + >>> allineate.cmdline + '3dAllineate -source functional.nii -nwarp_fixmotX -nwarp_fixmotY -prefix functional_allineate -base structural.nii' + >>> res = allineate.run() # doctest: +SKIP + """ + + _cmd = '3dAllineate' + input_spec = AllineateInputSpec + output_spec = AllineateOutputSpec + + def _list_outputs(self): + outputs = super(Allineate, self)._list_outputs() + + if self.inputs.out_weight_file: + outputs['out_weight_file'] = op.abspath( + self.inputs.out_weight_file) + + if self.inputs.out_matrix: + path, base, ext = split_filename(self.inputs.out_matrix) + if ext.lower() not in ['.1d', '.1D']: + outputs['out_matrix'] = self._gen_fname( + self.inputs.out_matrix, suffix='.aff12.1D') + else: + outputs['out_matrix'] = op.abspath(self.inputs.out_matrix) + + if self.inputs.out_param_file: + path, base, ext = split_filename(self.inputs.out_param_file) + if ext.lower() not in ['.1d', '.1D']: + outputs['out_param_file'] = self._gen_fname( + self.inputs.out_param_file, suffix='.param.1D') + else: + outputs['out_param_file'] = op.abspath( + self.inputs.out_param_file) + + if self.inputs.allcostx: + outputs['allcostX'] = os.path.abspath(self.inputs.allcostx) + return outputs + + +class AutoTcorrelateInputSpec(AFNICommandInputSpec): + in_file = File( + desc='timeseries x space (volume or surface) file', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + polort = traits.Int( + desc='Remove polynomical trend of order m or -1 for no detrending', + argstr='-polort %d') + eta2 = traits.Bool(desc='eta^2 similarity', argstr='-eta2') + mask = File(exists=True, desc='mask of voxels', argstr='-mask %s') + mask_only_targets = traits.Bool( + desc='use mask only on targets voxels', + argstr='-mask_only_targets', + xor=['mask_source']) + mask_source = File( + exists=True, + desc='mask for source voxels', + argstr='-mask_source %s', + xor=['mask_only_targets']) + out_file = File( + name_template='%s_similarity_matrix.1D', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + + +class AutoTcorrelate(AFNICommand): + """Computes the correlation coefficient between the time series of each + pair of voxels in the input dataset, and stores the output into a + new anatomical bucket dataset [scaled to shorts to save memory space]. + + For complete details, see the `3dAutoTcorrelate Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> corr = afni.AutoTcorrelate() + >>> corr.inputs.in_file = 'functional.nii' + >>> corr.inputs.polort = -1 + >>> corr.inputs.eta2 = True + >>> corr.inputs.mask = 'mask.nii' + >>> corr.inputs.mask_only_targets = True + >>> corr.cmdline # doctest: +ELLIPSIS + '3dAutoTcorrelate -eta2 -mask mask.nii -mask_only_targets -prefix functional_similarity_matrix.1D -polort -1 functional.nii' + >>> res = corr.run() # doctest: +SKIP + """ + + input_spec = AutoTcorrelateInputSpec + output_spec = AFNICommandOutputSpec + _cmd = '3dAutoTcorrelate' + + def _overload_extension(self, value, name=None): + path, base, ext = split_filename(value) + if ext.lower() not in ['.1d', '.1D', '.nii.gz', '.nii']: + ext = ext + '.1D' + return os.path.join(path, base + ext) + + +class AutomaskInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dAutomask', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_mask', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + brain_file = File( + name_template='%s_masked', + desc='output file from 3dAutomask', + argstr='-apply_prefix %s', + name_source='in_file') + clfrac = traits.Float( + desc='sets the clip level fraction (must be 0.1-0.9). A small value ' + 'will tend to make the mask larger [default = 0.5].', + argstr='-clfrac %s') + dilate = traits.Int(desc='dilate the mask outwards', argstr='-dilate %s') + erode = traits.Int(desc='erode the mask inwards', argstr='-erode %s') + + +class AutomaskOutputSpec(TraitedSpec): + out_file = File(desc='mask file', exists=True) + brain_file = File(desc='brain file (skull stripped)', exists=True) + + +class Automask(AFNICommand): + """Create a brain-only mask of the image using AFNI 3dAutomask command + + For complete details, see the `3dAutomask Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> automask = afni.Automask() + >>> automask.inputs.in_file = 'functional.nii' + >>> automask.inputs.dilate = 1 + >>> automask.inputs.outputtype = 'NIFTI' + >>> automask.cmdline # doctest: +ELLIPSIS + '3dAutomask -apply_prefix functional_masked.nii -dilate 1 -prefix functional_mask.nii functional.nii' + >>> res = automask.run() # doctest: +SKIP + + """ + + _cmd = '3dAutomask' + input_spec = AutomaskInputSpec + output_spec = AutomaskOutputSpec + + +class AutoTLRCInputSpec(CommandLineInputSpec): + outputtype = traits.Enum( + 'AFNI', list(Info.ftypes.keys()), desc='AFNI output filetype') + in_file = File( + desc='Original anatomical volume (+orig).' + 'The skull is removed by this script' + 'unless instructed otherwise (-no_ss).', + argstr='-input %s', + mandatory=True, + exists=True, + copyfile=False) + base = traits.Str( + desc=' Reference anatomical volume' + ' Usually this volume is in some standard space like' + ' TLRC or MNI space and with afni dataset view of' + ' (+tlrc).' + ' Preferably, this reference volume should have had' + ' the skull removed but that is not mandatory.' + ' AFNI\'s distribution contains several templates.' + ' For a longer list, use "whereami -show_templates"' + 'TT_N27+tlrc --> Single subject, skull stripped volume.' + ' This volume is also known as ' + ' N27_SurfVol_NoSkull+tlrc elsewhere in ' + ' AFNI and SUMA land.' + ' (www.loni.ucla.edu, www.bic.mni.mcgill.ca)' + ' This template has a full set of FreeSurfer' + ' (surfer.nmr.mgh.harvard.edu)' + ' surface models that can be used in SUMA. ' + ' For details, see Talairach-related link:' + ' https://afni.nimh.nih.gov/afni/suma' + 'TT_icbm452+tlrc --> Average volume of 452 normal brains.' + ' Skull Stripped. (www.loni.ucla.edu)' + 'TT_avg152T1+tlrc --> Average volume of 152 normal brains.' + ' Skull Stripped.(www.bic.mni.mcgill.ca)' + 'TT_EPI+tlrc --> EPI template from spm2, masked as TT_avg152T1' + ' TT_avg152 and TT_EPI volume sources are from' + ' SPM\'s distribution. (www.fil.ion.ucl.ac.uk/spm/)' + 'If you do not specify a path for the template, the script' + 'will attempt to locate the template AFNI\'s binaries directory.' + 'NOTE: These datasets have been slightly modified from' + ' their original size to match the standard TLRC' + ' dimensions (Jean Talairach and Pierre Tournoux' + ' Co-Planar Stereotaxic Atlas of the Human Brain' + ' Thieme Medical Publishers, New York, 1988). ' + ' That was done for internal consistency in AFNI.' + ' You may use the original form of these' + ' volumes if you choose but your TLRC coordinates' + ' will not be consistent with AFNI\'s TLRC database' + ' (San Antonio Talairach Daemon database), for example.', + mandatory=True, + argstr='-base %s') + no_ss = traits.Bool( + desc='Do not strip skull of input data set' + '(because skull has already been removed' + 'or because template still has the skull)' + 'NOTE: The -no_ss option is not all that optional.' + ' Here is a table of when you should and should not use -no_ss' + ' Template Template' + ' WITH skull WITHOUT skull' + ' Dset.' + ' WITH skull -no_ss xxx ' + ' ' + ' WITHOUT skull No Cigar -no_ss' + ' ' + ' Template means: Your template of choice' + ' Dset. means: Your anatomical dataset' + ' -no_ss means: Skull stripping should not be attempted on Dset' + ' xxx means: Don\'t put anything, the script will strip Dset' + ' No Cigar means: Don\'t try that combination, it makes no sense.', + argstr='-no_ss') + + +class AutoTLRC(AFNICommand): + """A minmal wrapper for the AutoTLRC script + The only option currently supported is no_ss. + For complete details, see the `3dQwarp Documentation. + `_ + + Examples + ======== + >>> from nipype.interfaces import afni + >>> autoTLRC = afni.AutoTLRC() + >>> autoTLRC.inputs.in_file = 'structural.nii' + >>> autoTLRC.inputs.no_ss = True + >>> autoTLRC.inputs.base = "TT_N27+tlrc" + >>> autoTLRC.cmdline + '@auto_tlrc -base TT_N27+tlrc -input structural.nii -no_ss' + >>> res = autoTLRC.run() # doctest: +SKIP + + """ + _cmd = '@auto_tlrc' + input_spec = AutoTLRCInputSpec + output_spec = AFNICommandOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + ext = '.HEAD' + outputs['out_file'] = os.path.abspath( + self._gen_fname(self.inputs.in_file, suffix='+tlrc') + ext) + return outputs + + +class BandpassInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dBandpass', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_bp', + desc='output file from 3dBandpass', + argstr='-prefix %s', + position=1, + name_source='in_file', + genfile=True) + lowpass = traits.Float( + desc='lowpass', argstr='%f', position=-2, mandatory=True) + highpass = traits.Float( + desc='highpass', argstr='%f', position=-3, mandatory=True) + mask = File(desc='mask file', position=2, argstr='-mask %s', exists=True) + despike = traits.Bool( + argstr='-despike', + desc='Despike each time series before other processing. Hopefully, ' + 'you don\'t actually need to do this, which is why it is ' + 'optional.') + orthogonalize_file = InputMultiPath( + File(exists=True), + argstr='-ort %s', + desc='Also orthogonalize input to columns in f.1D. Multiple \'-ort\' ' + 'options are allowed.') + orthogonalize_dset = File( + exists=True, + argstr='-dsort %s', + desc='Orthogonalize each voxel to the corresponding voxel time series ' + 'in dataset \'fset\', which must have the same spatial and ' + 'temporal grid structure as the main input dataset. At present, ' + 'only one \'-dsort\' option is allowed.') + no_detrend = traits.Bool( + argstr='-nodetrend', + desc='Skip the quadratic detrending of the input that occurs before ' + 'the FFT-based bandpassing. You would only want to do this if ' + 'the dataset had been detrended already in some other program.') + tr = traits.Float( + argstr='-dt %f', + desc='Set time step (TR) in sec [default=from dataset header].') + nfft = traits.Int( + argstr='-nfft %d', desc='Set the FFT length [must be a legal value].') + normalize = traits.Bool( + argstr='-norm', + desc='Make all output time series have L2 norm = 1 (i.e., sum of ' + 'squares = 1).') + automask = traits.Bool( + argstr='-automask', desc='Create a mask from the input dataset.') + blur = traits.Float( + argstr='-blur %f', + desc='Blur (inside the mask only) with a filter width (FWHM) of ' + '\'fff\' millimeters.') + localPV = traits.Float( + argstr='-localPV %f', + desc='Replace each vector by the local Principal Vector (AKA first ' + 'singular vector) from a neighborhood of radius \'rrr\' ' + 'millimeters. Note that the PV time series is L2 normalized. ' + 'This option is mostly for Bob Cox to have fun with.') + notrans = traits.Bool( + argstr='-notrans', + desc='Don\'t check for initial positive transients in the data. ' + 'The test is a little slow, so skipping it is OK, if you KNOW ' + 'the data time series are transient-free.') + + +class Bandpass(AFNICommand): + """Program to lowpass and/or highpass each voxel time series in a + dataset, offering more/different options than Fourier + + For complete details, see the `3dBandpass Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> from nipype.testing import example_data + >>> bandpass = afni.Bandpass() + >>> bandpass.inputs.in_file = 'functional.nii' + >>> bandpass.inputs.highpass = 0.005 + >>> bandpass.inputs.lowpass = 0.1 + >>> bandpass.cmdline + '3dBandpass -prefix functional_bp 0.005000 0.100000 functional.nii' + >>> res = bandpass.run() # doctest: +SKIP + + """ + + _cmd = '3dBandpass' + input_spec = BandpassInputSpec + output_spec = AFNICommandOutputSpec + + +class BlurInMaskInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dSkullStrip', + argstr='-input %s', + position=1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_blur', + desc='output to the file', + argstr='-prefix %s', + name_source='in_file', + position=-1) + mask = File( + desc='Mask dataset, if desired. Blurring will occur only within the ' + 'mask. Voxels NOT in the mask will be set to zero in the output.', + argstr='-mask %s') + multimask = File( + desc='Multi-mask dataset -- each distinct nonzero value in dataset ' + 'will be treated as a separate mask for blurring purposes.', + argstr='-Mmask %s') + automask = traits.Bool( + desc='Create an automask from the input dataset.', argstr='-automask') + fwhm = traits.Float( + desc='fwhm kernel size', argstr='-FWHM %f', mandatory=True) + preserve = traits.Bool( + desc='Normally, voxels not in the mask will be set to zero in the ' + 'output. If you want the original values in the dataset to be ' + 'preserved in the output, use this option.', + argstr='-preserve') + float_out = traits.Bool( + desc='Save dataset as floats, no matter what the input data type is.', + argstr='-float') + options = Str(desc='options', argstr='%s', position=2) + + +class BlurInMask(AFNICommand): + """Blurs a dataset spatially inside a mask. That's all. Experimental. + + For complete details, see the `3dBlurInMask Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> bim = afni.BlurInMask() + >>> bim.inputs.in_file = 'functional.nii' + >>> bim.inputs.mask = 'mask.nii' + >>> bim.inputs.fwhm = 5.0 + >>> bim.cmdline # doctest: +ELLIPSIS + '3dBlurInMask -input functional.nii -FWHM 5.000000 -mask mask.nii -prefix functional_blur' + >>> res = bim.run() # doctest: +SKIP + + """ + + _cmd = '3dBlurInMask' + input_spec = BlurInMaskInputSpec + output_spec = AFNICommandOutputSpec + + +class BlurToFWHMInputSpec(AFNICommandInputSpec): + in_file = File( + desc='The dataset that will be smoothed', + argstr='-input %s', + mandatory=True, + exists=True) + automask = traits.Bool( + desc='Create an automask from the input dataset.', argstr='-automask') + fwhm = traits.Float( + desc='Blur until the 3D FWHM reaches this value (in mm)', + argstr='-FWHM %f') + fwhmxy = traits.Float( + desc='Blur until the 2D (x,y)-plane FWHM reaches this value (in mm)', + argstr='-FWHMxy %f') + blurmaster = File( + desc='The dataset whose smoothness controls the process.', + argstr='-blurmaster %s', + exists=True) + mask = File( + desc='Mask dataset, if desired. Voxels NOT in mask will be set to zero ' + 'in output.', + argstr='-mask %s', + exists=True) + + +class BlurToFWHM(AFNICommand): + """Blurs a 'master' dataset until it reaches a specified FWHM smoothness + (approximately). + + For complete details, see the `3dBlurToFWHM Documentation + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> blur = afni.preprocess.BlurToFWHM() + >>> blur.inputs.in_file = 'epi.nii' + >>> blur.inputs.fwhm = 2.5 + >>> blur.cmdline # doctest: +ELLIPSIS + '3dBlurToFWHM -FWHM 2.500000 -input epi.nii -prefix epi_afni' + >>> res = blur.run() # doctest: +SKIP + + """ + _cmd = '3dBlurToFWHM' + input_spec = BlurToFWHMInputSpec + output_spec = AFNICommandOutputSpec + + +class ClipLevelInputSpec(CommandLineInputSpec): + in_file = File( + desc='input file to 3dClipLevel', + argstr='%s', + position=-1, + mandatory=True, + exists=True) + mfrac = traits.Float( + desc='Use the number ff instead of 0.50 in the algorithm', + argstr='-mfrac %s', + position=2) + doall = traits.Bool( + desc='Apply the algorithm to each sub-brick separately.', + argstr='-doall', + position=3, + xor=('grad')) + grad = traits.File( + desc='Also compute a \'gradual\' clip level as a function of voxel ' + 'position, and output that to a dataset.', + argstr='-grad %s', + position=3, + xor=('doall')) + + +class ClipLevelOutputSpec(TraitedSpec): + clip_val = traits.Float(desc='output') + + +class ClipLevel(AFNICommandBase): + """Estimates the value at which to clip the anatomical dataset so + that background regions are set to zero. + + For complete details, see the `3dClipLevel Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces.afni import preprocess + >>> cliplevel = preprocess.ClipLevel() + >>> cliplevel.inputs.in_file = 'anatomical.nii' + >>> cliplevel.cmdline + '3dClipLevel anatomical.nii' + >>> res = cliplevel.run() # doctest: +SKIP + + """ + _cmd = '3dClipLevel' + input_spec = ClipLevelInputSpec + output_spec = ClipLevelOutputSpec + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + + outputs = self._outputs() + + outfile = os.path.join(os.getcwd(), 'stat_result.json') + + if runtime is None: + try: + clip_val = load_json(outfile)['stat'] + except IOError: + return self.run().outputs + else: + clip_val = [] + for line in runtime.stdout.split('\n'): + if line: + values = line.split() + if len(values) > 1: + clip_val.append([float(val) for val in values]) + else: + clip_val.extend([float(val) for val in values]) + + if len(clip_val) == 1: + clip_val = clip_val[0] + save_json(outfile, dict(stat=clip_val)) + outputs.clip_val = clip_val + + return outputs + + +class DegreeCentralityInputSpec(CentralityInputSpec): + """DegreeCentrality inputspec + """ + + in_file = File( + desc='input file to 3dDegreeCentrality', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + sparsity = traits.Float( + desc='only take the top percent of connections', argstr='-sparsity %f') + oned_file = Str( + desc='output filepath to text dump of correlation matrix', + argstr='-out1D %s') + + +class DegreeCentralityOutputSpec(AFNICommandOutputSpec): + """DegreeCentrality outputspec + """ + + oned_file = File( + desc='The text output of the similarity matrix computed after ' + 'thresholding with one-dimensional and ijk voxel indices, ' + 'correlations, image extents, and affine matrix.') + + +class DegreeCentrality(AFNICommand): + """Performs degree centrality on a dataset using a given maskfile + via 3dDegreeCentrality + + For complete details, see the `3dDegreeCentrality Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> degree = afni.DegreeCentrality() + >>> degree.inputs.in_file = 'functional.nii' + >>> degree.inputs.mask = 'mask.nii' + >>> degree.inputs.sparsity = 1 # keep the top one percent of connections + >>> degree.inputs.out_file = 'out.nii' + >>> degree.cmdline + '3dDegreeCentrality -mask mask.nii -prefix out.nii -sparsity 1.000000 functional.nii' + >>> res = degree.run() # doctest: +SKIP + + """ + + _cmd = '3dDegreeCentrality' + input_spec = DegreeCentralityInputSpec + output_spec = DegreeCentralityOutputSpec + + # Re-define generated inputs + def _list_outputs(self): + # Import packages + import os + + # Update outputs dictionary if oned file is defined + outputs = super(DegreeCentrality, self)._list_outputs() + if self.inputs.oned_file: + outputs['oned_file'] = os.path.abspath(self.inputs.oned_file) + + return outputs + + +class DespikeInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dDespike', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_despike', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + + +class Despike(AFNICommand): + """Removes 'spikes' from the 3D+time input dataset + + For complete details, see the `3dDespike Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> despike = afni.Despike() + >>> despike.inputs.in_file = 'functional.nii' + >>> despike.cmdline + '3dDespike -prefix functional_despike functional.nii' + >>> res = despike.run() # doctest: +SKIP + + """ + + _cmd = '3dDespike' + input_spec = DespikeInputSpec + output_spec = AFNICommandOutputSpec + + +class DetrendInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dDetrend', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_detrend', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + + +class Detrend(AFNICommand): + """This program removes components from voxel time series using + linear least squares + + For complete details, see the `3dDetrend Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> detrend = afni.Detrend() + >>> detrend.inputs.in_file = 'functional.nii' + >>> detrend.inputs.args = '-polort 2' + >>> detrend.inputs.outputtype = 'AFNI' + >>> detrend.cmdline + '3dDetrend -polort 2 -prefix functional_detrend functional.nii' + >>> res = detrend.run() # doctest: +SKIP + + """ + + _cmd = '3dDetrend' + input_spec = DetrendInputSpec + output_spec = AFNICommandOutputSpec + + +class ECMInputSpec(CentralityInputSpec): + """ECM inputspec + """ + + in_file = File( + desc='input file to 3dECM', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + sparsity = traits.Float( + desc='only take the top percent of connections', argstr='-sparsity %f') + full = traits.Bool( + desc='Full power method; enables thresholding; automatically selected ' + 'if -thresh or -sparsity are set', + argstr='-full') + fecm = traits.Bool( + desc='Fast centrality method; substantial speed increase but cannot ' + 'accomodate thresholding; automatically selected if -thresh or ' + '-sparsity are not set', + argstr='-fecm') + shift = traits.Float( + desc='shift correlation coefficients in similarity matrix to enforce ' + 'non-negativity, s >= 0.0; default = 0.0 for -full, 1.0 for -fecm', + argstr='-shift %f') + scale = traits.Float( + desc='scale correlation coefficients in similarity matrix to after ' + 'shifting, x >= 0.0; default = 1.0 for -full, 0.5 for -fecm', + argstr='-scale %f') + eps = traits.Float( + desc='sets the stopping criterion for the power iteration; ' + 'l2|v_old - v_new| < eps*|v_old|; default = 0.001', + argstr='-eps %f') + max_iter = traits.Int( + desc='sets the maximum number of iterations to use in the power ' + 'iteration; default = 1000', + argstr='-max_iter %d') + memory = traits.Float( + desc='Limit memory consumption on system by setting the amount of GB ' + 'to limit the algorithm to; default = 2GB', + argstr='-memory %f') + + +class ECM(AFNICommand): + """Performs degree centrality on a dataset using a given maskfile + via the 3dECM command + + For complete details, see the `3dECM Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> ecm = afni.ECM() + >>> ecm.inputs.in_file = 'functional.nii' + >>> ecm.inputs.mask = 'mask.nii' + >>> ecm.inputs.sparsity = 0.1 # keep top 0.1% of connections + >>> ecm.inputs.out_file = 'out.nii' + >>> ecm.cmdline + '3dECM -mask mask.nii -prefix out.nii -sparsity 0.100000 functional.nii' + >>> res = ecm.run() # doctest: +SKIP + + """ + + _cmd = '3dECM' + input_spec = ECMInputSpec + output_spec = AFNICommandOutputSpec + + +class FimInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dfim+', + argstr='-input %s', + position=1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_fim', + desc='output image file name', + argstr='-bucket %s', + name_source='in_file') + ideal_file = File( + desc='ideal time series file name', + argstr='-ideal_file %s', + position=2, + mandatory=True, + exists=True) + fim_thr = traits.Float( + desc='fim internal mask threshold value', + argstr='-fim_thr %f', + position=3) + out = Str( + desc='Flag to output the specified parameter', + argstr='-out %s', + position=4) + + +class Fim(AFNICommand): + """Program to calculate the cross-correlation of an ideal reference + waveform with the measured FMRI time series for each voxel. + + For complete details, see the `3dfim+ Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> fim = afni.Fim() + >>> fim.inputs.in_file = 'functional.nii' + >>> fim.inputs.ideal_file= 'seed.1D' + >>> fim.inputs.out_file = 'functional_corr.nii' + >>> fim.inputs.out = 'Correlation' + >>> fim.inputs.fim_thr = 0.0009 + >>> fim.cmdline + '3dfim+ -input functional.nii -ideal_file seed.1D -fim_thr 0.000900 -out Correlation -bucket functional_corr.nii' + >>> res = fim.run() # doctest: +SKIP + + """ + + _cmd = '3dfim+' + input_spec = FimInputSpec + output_spec = AFNICommandOutputSpec + + +class FourierInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dFourier', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_fourier', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + lowpass = traits.Float( + desc='lowpass', argstr='-lowpass %f', mandatory=True) + highpass = traits.Float( + desc='highpass', argstr='-highpass %f', mandatory=True) + retrend = traits.Bool( + desc='Any mean and linear trend are removed before filtering. This ' + 'will restore the trend after filtering.', + argstr='-retrend') + + +class Fourier(AFNICommand): + """Program to lowpass and/or highpass each voxel time series in a + dataset, via the FFT + + For complete details, see the `3dFourier Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> fourier = afni.Fourier() + >>> fourier.inputs.in_file = 'functional.nii' + >>> fourier.inputs.retrend = True + >>> fourier.inputs.highpass = 0.005 + >>> fourier.inputs.lowpass = 0.1 + >>> fourier.cmdline + '3dFourier -highpass 0.005000 -lowpass 0.100000 -prefix functional_fourier -retrend functional.nii' + >>> res = fourier.run() # doctest: +SKIP + + """ + + _cmd = '3dFourier' + input_spec = FourierInputSpec + output_spec = AFNICommandOutputSpec + + +class HistInputSpec(CommandLineInputSpec): + in_file = File( + desc='input file to 3dHist', + argstr='-input %s', + position=1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + desc='Write histogram to niml file with this prefix', + name_template='%s_hist', + keep_extension=False, + argstr='-prefix %s', + name_source=['in_file']) + showhist = traits.Bool( + False, + usedefault=True, + desc='write a text visual histogram', + argstr='-showhist') + out_show = File( + name_template='%s_hist.out', + desc='output image file name', + keep_extension=False, + argstr='> %s', + name_source='in_file', + position=-1) + mask = File( + desc='matrix to align input file', argstr='-mask %s', exists=True) + nbin = traits.Int(desc='number of bins', argstr='-nbin %d') + max_value = traits.Float(argstr='-max %f', desc='maximum intensity value') + min_value = traits.Float(argstr='-min %f', desc='minimum intensity value') + bin_width = traits.Float(argstr='-binwidth %f', desc='bin width') + + +class HistOutputSpec(TraitedSpec): + out_file = File(desc='output file', exists=True) + out_show = File(desc='output visual histogram') + + +class Hist(AFNICommandBase): + """Computes average of all voxels in the input dataset + which satisfy the criterion in the options list + + For complete details, see the `3dHist Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> hist = afni.Hist() + >>> hist.inputs.in_file = 'functional.nii' + >>> hist.cmdline + '3dHist -input functional.nii -prefix functional_hist' + >>> res = hist.run() # doctest: +SKIP + + """ + + _cmd = '3dHist' + input_spec = HistInputSpec + output_spec = HistOutputSpec + _redirect_x = True + + def __init__(self, **inputs): + super(Hist, self).__init__(**inputs) + if not no_afni(): + version = Info.version() + + # As of AFNI 16.0.00, redirect_x is not needed + if version[0] > 2015: + self._redirect_x = False + + def _parse_inputs(self, skip=None): + if not self.inputs.showhist: + if skip is None: + skip = [] + skip += ['out_show'] + return super(Hist, self)._parse_inputs(skip=skip) + + def _list_outputs(self): + outputs = super(Hist, self)._list_outputs() + outputs['out_file'] += '.niml.hist' + if not self.inputs.showhist: + outputs['out_show'] = Undefined + return outputs + + +class LFCDInputSpec(CentralityInputSpec): + """LFCD inputspec + """ + + in_file = File( + desc='input file to 3dLFCD', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + + +class LFCD(AFNICommand): + """Performs degree centrality on a dataset using a given maskfile + via the 3dLFCD command + + For complete details, see the `3dLFCD Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> lfcd = afni.LFCD() + >>> lfcd.inputs.in_file = 'functional.nii' + >>> lfcd.inputs.mask = 'mask.nii' + >>> lfcd.inputs.thresh = 0.8 # keep all connections with corr >= 0.8 + >>> lfcd.inputs.out_file = 'out.nii' + >>> lfcd.cmdline + '3dLFCD -mask mask.nii -prefix out.nii -thresh 0.800000 functional.nii' + >>> res = lfcd.run() # doctest: +SKIP + """ + + _cmd = '3dLFCD' + input_spec = LFCDInputSpec + output_spec = AFNICommandOutputSpec + + +class MaskaveInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dmaskave', + argstr='%s', + position=-2, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_maskave.1D', + desc='output image file name', + keep_extension=True, + argstr='> %s', + name_source='in_file', + position=-1) + mask = File( + desc='matrix to align input file', + argstr='-mask %s', + position=1, + exists=True) + quiet = traits.Bool( + desc='matrix to align input file', argstr='-quiet', position=2) + + +class Maskave(AFNICommand): + """Computes average of all voxels in the input dataset + which satisfy the criterion in the options list + + For complete details, see the `3dmaskave Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> maskave = afni.Maskave() + >>> maskave.inputs.in_file = 'functional.nii' + >>> maskave.inputs.mask= 'seed_mask.nii' + >>> maskave.inputs.quiet= True + >>> maskave.cmdline # doctest: +ELLIPSIS + '3dmaskave -mask seed_mask.nii -quiet functional.nii > functional_maskave.1D' + >>> res = maskave.run() # doctest: +SKIP + + """ + + _cmd = '3dmaskave' + input_spec = MaskaveInputSpec + output_spec = AFNICommandOutputSpec + + +class MeansInputSpec(AFNICommandInputSpec): + in_file_a = File( + desc='input file to 3dMean', + argstr='%s', + position=-2, + mandatory=True, + exists=True) + in_file_b = File( + desc='another input file to 3dMean', + argstr='%s', + position=-1, + exists=True) + datum = traits.Str( + desc='Sets the data type of the output dataset', argstr='-datum %s') + out_file = File( + name_template='%s_mean', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file_a') + scale = Str(desc='scaling of output', argstr='-%sscale') + non_zero = traits.Bool(desc='use only non-zero values', argstr='-non_zero') + std_dev = traits.Bool(desc='calculate std dev', argstr='-stdev') + sqr = traits.Bool(desc='mean square instead of value', argstr='-sqr') + summ = traits.Bool(desc='take sum, (not average)', argstr='-sum') + count = traits.Bool( + desc='compute count of non-zero voxels', argstr='-count') + mask_inter = traits.Bool( + desc='create intersection mask', argstr='-mask_inter') + mask_union = traits.Bool(desc='create union mask', argstr='-mask_union') + + +class Means(AFNICommand): + """Takes the voxel-by-voxel mean of all input datasets using 3dMean + + For complete details, see the `3dMean Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> means = afni.Means() + >>> means.inputs.in_file_a = 'im1.nii' + >>> means.inputs.in_file_b = 'im2.nii' + >>> means.inputs.out_file = 'output.nii' + >>> means.cmdline + '3dMean -prefix output.nii im1.nii im2.nii' + >>> res = means.run() # doctest: +SKIP + + >>> from nipype.interfaces import afni + >>> means = afni.Means() + >>> means.inputs.in_file_a = 'im1.nii' + >>> means.inputs.out_file = 'output.nii' + >>> means.inputs.datum = 'short' + >>> means.cmdline + '3dMean -datum short -prefix output.nii im1.nii' + >>> res = means.run() # doctest: +SKIP + + """ + + _cmd = '3dMean' + input_spec = MeansInputSpec + output_spec = AFNICommandOutputSpec + + +class OutlierCountInputSpec(CommandLineInputSpec): + in_file = File( + argstr='%s', + mandatory=True, + exists=True, + position=-2, + desc='input dataset') + mask = File( + exists=True, + argstr='-mask %s', + xor=['autoclip', 'automask'], + desc='only count voxels within the given mask') + qthr = traits.Range( + value=1e-3, + low=0.0, + high=1.0, + usedefault=True, + argstr='-qthr %.5f', + desc='indicate a value for q to compute alpha') + autoclip = traits.Bool( + False, + usedefault=True, + argstr='-autoclip', + xor=['mask'], + desc='clip off small voxels') + automask = traits.Bool( + False, + usedefault=True, + argstr='-automask', + xor=['mask'], + desc='clip off small voxels') + fraction = traits.Bool( + False, + usedefault=True, + argstr='-fraction', + desc='write out the fraction of masked voxels which are outliers at ' + 'each timepoint') + interval = traits.Bool( + False, + usedefault=True, + argstr='-range', + desc='write out the median + 3.5 MAD of outlier count with each ' + 'timepoint') + save_outliers = traits.Bool( + False, usedefault=True, desc='enables out_file option') + outliers_file = File( + name_template='%s_outliers', + argstr='-save %s', + name_source=['in_file'], + output_name='out_outliers', + keep_extension=True, + desc='output image file name') + polort = traits.Int( + argstr='-polort %d', + desc='detrend each voxel timeseries with polynomials') + legendre = traits.Bool( + False, + usedefault=True, + argstr='-legendre', + desc='use Legendre polynomials') + out_file = File( + name_template='%s_outliers', + name_source=['in_file'], + keep_extension=False, + desc='capture standard output') + + +class OutlierCountOutputSpec(TraitedSpec): + out_outliers = File(exists=True, desc='output image file name') + out_file = File(desc='capture standard output') + + +class OutlierCount(CommandLine): + """Calculates number of 'outliers' at each time point of a + a 3D+time dataset. + + For complete details, see the `3dToutcount Documentation + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> toutcount = afni.OutlierCount() + >>> toutcount.inputs.in_file = 'functional.nii' + >>> toutcount.cmdline # doctest: +ELLIPSIS + '3dToutcount -qthr 0.00100 functional.nii' + >>> res = toutcount.run() # doctest: +SKIP + + """ + + _cmd = '3dToutcount' + input_spec = OutlierCountInputSpec + output_spec = OutlierCountOutputSpec + _terminal_output = 'file_split' + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + + # This is not strictly an input, but needs be + # set before run() is called. + if self.terminal_output == 'none': + self.terminal_output = 'file_split' + + if not self.inputs.save_outliers: + skip += ['outliers_file'] + return super(OutlierCount, self)._parse_inputs(skip) + + def _run_interface(self, runtime): + runtime = super(OutlierCount, self)._run_interface(runtime) + + # Read from runtime.stdout or runtime.merged + with open(op.abspath(self.inputs.out_file), 'w') as outfh: + outfh.write(runtime.stdout or runtime.merged) + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + if self.inputs.save_outliers: + outputs['out_outliers'] = op.abspath(self.inputs.outliers_file) + return outputs + + +class QualityIndexInputSpec(CommandLineInputSpec): + in_file = File( + argstr='%s', + mandatory=True, + exists=True, + position=-2, + desc='input dataset') + mask = File( + exists=True, + argstr='-mask %s', + xor=['autoclip', 'automask'], + desc='compute correlation only across masked voxels') + spearman = traits.Bool( + False, + usedefault=True, + argstr='-spearman', + desc='Quality index is 1 minus the Spearman (rank) correlation ' + 'coefficient of each sub-brick with the median sub-brick. ' + '(default).') + quadrant = traits.Bool( + False, + usedefault=True, + argstr='-quadrant', + desc='Similar to -spearman, but using 1 minus the quadrant correlation ' + 'coefficient as the quality index.') + autoclip = traits.Bool( + False, + usedefault=True, + argstr='-autoclip', + xor=['mask'], + desc='clip off small voxels') + automask = traits.Bool( + False, + usedefault=True, + argstr='-automask', + xor=['mask'], + desc='clip off small voxels') + clip = traits.Float(argstr='-clip %f', desc='clip off values below') + interval = traits.Bool( + False, + usedefault=True, + argstr='-range', + desc='write out the median + 3.5 MAD of outlier count with each ' + 'timepoint') + out_file = File( + name_template='%s_tqual', + name_source=['in_file'], + argstr='> %s', + keep_extension=False, + position=-1, + desc='capture standard output') + + +class QualityIndexOutputSpec(TraitedSpec): + out_file = File(desc='file containing the captured standard output') + + +class QualityIndex(CommandLine): + """Computes a `quality index' for each sub-brick in a 3D+time dataset. + The output is a 1D time series with the index for each sub-brick. + The results are written to stdout. + + For complete details, see the `3dTqual Documentation + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> tqual = afni.QualityIndex() + >>> tqual.inputs.in_file = 'functional.nii' + >>> tqual.cmdline # doctest: +ELLIPSIS + '3dTqual functional.nii > functional_tqual' + >>> res = tqual.run() # doctest: +SKIP + + """ + _cmd = '3dTqual' + input_spec = QualityIndexInputSpec + output_spec = QualityIndexOutputSpec + + +class ROIStatsInputSpec(CommandLineInputSpec): + in_file = File( + desc='input file to 3dROIstats', + argstr='%s', + position=-1, + mandatory=True, + exists=True) + mask = File(desc='input mask', argstr='-mask %s', position=3, exists=True) + mask_f2short = traits.Bool( + desc='Tells the program to convert a float mask to short integers, ' + 'by simple rounding.', + argstr='-mask_f2short', + position=2) + quiet = traits.Bool(desc='execute quietly', argstr='-quiet', position=1) + + +class ROIStatsOutputSpec(TraitedSpec): + stats = File(desc='output tab separated values file', exists=True) + + +class ROIStats(AFNICommandBase): + """Display statistics over masked regions + + For complete details, see the `3dROIstats Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> roistats = afni.ROIStats() + >>> roistats.inputs.in_file = 'functional.nii' + >>> roistats.inputs.mask = 'skeleton_mask.nii.gz' + >>> roistats.inputs.quiet = True + >>> roistats.cmdline + '3dROIstats -quiet -mask skeleton_mask.nii.gz functional.nii' + >>> res = roistats.run() # doctest: +SKIP + + """ + _cmd = '3dROIstats' + _terminal_output = 'allatonce' + input_spec = ROIStatsInputSpec + output_spec = ROIStatsOutputSpec + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + outputs = self._outputs() + output_filename = 'roi_stats.csv' + with open(output_filename, 'w') as f: + f.write(runtime.stdout) + + outputs.stats = os.path.abspath(output_filename) + return outputs + + +class RetroicorInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dretroicor', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_retroicor', + name_source=['in_file'], + desc='output image file name', + argstr='-prefix %s', + position=1) + card = File( + desc='1D cardiac data file for cardiac correction', + argstr='-card %s', + position=-2, + exists=True) + resp = File( + desc='1D respiratory waveform data for correction', + argstr='-resp %s', + position=-3, + exists=True) + threshold = traits.Int( + desc='Threshold for detection of R-wave peaks in input (Make sure it ' + 'is above the background noise level, Try 3/4 or 4/5 times range ' + 'plus minimum)', + argstr='-threshold %d', + position=-4) + order = traits.Int( + desc='The order of the correction (2 is typical)', + argstr='-order %s', + position=-5) + cardphase = File( + desc='Filename for 1D cardiac phase output', + argstr='-cardphase %s', + position=-6, + hash_files=False) + respphase = File( + desc='Filename for 1D resp phase output', + argstr='-respphase %s', + position=-7, + hash_files=False) + + +class Retroicor(AFNICommand): + """Performs Retrospective Image Correction for physiological + motion effects, using a slightly modified version of the + RETROICOR algorithm + + The durations of the physiological inputs are assumed to equal + the duration of the dataset. Any constant sampling rate may be + used, but 40 Hz seems to be acceptable. This program's cardiac + peak detection algorithm is rather simplistic, so you might try + using the scanner's cardiac gating output (transform it to a + spike wave if necessary). + + This program uses slice timing information embedded in the + dataset to estimate the proper cardiac/respiratory phase for + each slice. It makes sense to run this program before any + program that may destroy the slice timings (e.g. 3dvolreg for + motion correction). + + For complete details, see the `3dretroicor Documentation. + `_ + + Examples + ======== + >>> from nipype.interfaces import afni + >>> ret = afni.Retroicor() + >>> ret.inputs.in_file = 'functional.nii' + >>> ret.inputs.card = 'mask.1D' + >>> ret.inputs.resp = 'resp.1D' + >>> ret.inputs.outputtype = 'NIFTI' + >>> ret.cmdline + '3dretroicor -prefix functional_retroicor.nii -resp resp.1D -card mask.1D functional.nii' + >>> res = ret.run() # doctest: +SKIP + + """ + + _cmd = '3dretroicor' + input_spec = RetroicorInputSpec + output_spec = AFNICommandOutputSpec + + def _format_arg(self, name, trait_spec, value): + if name == 'in_file': + if not isdefined(self.inputs.card) and not isdefined( + self.inputs.resp): + return None + return super(Retroicor, self)._format_arg(name, trait_spec, value) + + +class SegInputSpec(CommandLineInputSpec): + in_file = File( + desc='ANAT is the volume to segment', + argstr='-anat %s', + position=-1, + mandatory=True, + exists=True, + copyfile=True) + mask = traits.Either( + traits.Enum('AUTO'), + File(exists=True), + desc='only non-zero voxels in mask are analyzed. mask can either be a ' + 'dataset or the string "AUTO" which would use AFNI\'s automask ' + 'function to create the mask.', + argstr='-mask %s', + position=-2, + mandatory=True) + blur_meth = traits.Enum( + 'BFT', + 'BIM', + argstr='-blur_meth %s', + desc='set the blurring method for bias field estimation') + bias_fwhm = traits.Float( + desc='The amount of blurring used when estimating the field bias with ' + 'the Wells method', + argstr='-bias_fwhm %f') + classes = Str( + desc='CLASS_STRING is a semicolon delimited string of class labels', + argstr='-classes %s') + bmrf = traits.Float( + desc='Weighting factor controlling spatial homogeneity of the ' + 'classifications', + argstr='-bmrf %f') + bias_classes = Str( + desc='A semicolon delimited string of classes that contribute to the ' + 'estimation of the bias field', + argstr='-bias_classes %s') + prefix = Str( + desc='the prefix for the output folder containing all output volumes', + argstr='-prefix %s') + mixfrac = Str( + desc='MIXFRAC sets up the volume-wide (within mask) tissue fractions ' + 'while initializing the segmentation (see IGNORE for exception)', + argstr='-mixfrac %s') + mixfloor = traits.Float( + desc='Set the minimum value for any class\'s mixing fraction', + argstr='-mixfloor %f') + main_N = traits.Int( + desc='Number of iterations to perform.', argstr='-main_N %d') + + +class Seg(AFNICommandBase): + """3dSeg segments brain volumes into tissue classes. The program allows + for adding a variety of global and voxelwise priors. However for the + moment, only mixing fractions and MRF are documented. + + For complete details, see the `3dSeg Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces.afni import preprocess + >>> seg = preprocess.Seg() + >>> seg.inputs.in_file = 'structural.nii' + >>> seg.inputs.mask = 'AUTO' + >>> seg.cmdline + '3dSeg -mask AUTO -anat structural.nii' + >>> res = seg.run() # doctest: +SKIP + + """ + + _cmd = '3dSeg' + input_spec = SegInputSpec + output_spec = AFNICommandOutputSpec + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + + import glob + + outputs = self._outputs() + + if isdefined(self.inputs.prefix): + outfile = os.path.join(os.getcwd(), self.inputs.prefix, + 'Classes+*.BRIK') + else: + outfile = os.path.join(os.getcwd(), 'Segsy', 'Classes+*.BRIK') + + outputs.out_file = glob.glob(outfile)[0] + + return outputs + + +class SkullStripInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dSkullStrip', + argstr='-input %s', + position=1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_skullstrip', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + + +class SkullStrip(AFNICommand): + """A program to extract the brain from surrounding tissue from MRI + T1-weighted images. + TODO Add optional arguments. + + For complete details, see the `3dSkullStrip Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> skullstrip = afni.SkullStrip() + >>> skullstrip.inputs.in_file = 'functional.nii' + >>> skullstrip.inputs.args = '-o_ply' + >>> skullstrip.cmdline + '3dSkullStrip -input functional.nii -o_ply -prefix functional_skullstrip' + >>> res = skullstrip.run() # doctest: +SKIP + + """ + _cmd = '3dSkullStrip' + _redirect_x = True + input_spec = SkullStripInputSpec + output_spec = AFNICommandOutputSpec + + def __init__(self, **inputs): + super(SkullStrip, self).__init__(**inputs) + + if not no_afni(): + v = Info.version() + + # Between AFNI 16.0.00 and 16.2.07, redirect_x is not needed + if v >= (2016, 0, 0) and v < (2016, 2, 7): + self._redirect_x = False + + +class TCorr1DInputSpec(AFNICommandInputSpec): + xset = File( + desc='3d+time dataset input', + argstr=' %s', + position=-2, + mandatory=True, + exists=True, + copyfile=False) + y_1d = File( + desc='1D time series file input', + argstr=' %s', + position=-1, + mandatory=True, + exists=True) + out_file = File( + desc='output filename prefix', + name_template='%s_correlation.nii.gz', + argstr='-prefix %s', + name_source='xset', + keep_extension=True) + pearson = traits.Bool( + desc='Correlation is the normal Pearson correlation coefficient', + argstr=' -pearson', + xor=['spearman', 'quadrant', 'ktaub'], + position=1) + spearman = traits.Bool( + desc='Correlation is the Spearman (rank) correlation coefficient', + argstr=' -spearman', + xor=['pearson', 'quadrant', 'ktaub'], + position=1) + quadrant = traits.Bool( + desc='Correlation is the quadrant correlation coefficient', + argstr=' -quadrant', + xor=['pearson', 'spearman', 'ktaub'], + position=1) + ktaub = traits.Bool( + desc='Correlation is the Kendall\'s tau_b correlation coefficient', + argstr=' -ktaub', + xor=['pearson', 'spearman', 'quadrant'], + position=1) + + +class TCorr1DOutputSpec(TraitedSpec): + out_file = File(desc='output file containing correlations', exists=True) + + +class TCorr1D(AFNICommand): + """Computes the correlation coefficient between each voxel time series + in the input 3D+time dataset. + + For complete details, see the `3dTcorr1D Documentation. + `_ + + >>> from nipype.interfaces import afni + >>> tcorr1D = afni.TCorr1D() + >>> tcorr1D.inputs.xset= 'u_rc1s1_Template.nii' + >>> tcorr1D.inputs.y_1d = 'seed.1D' + >>> tcorr1D.cmdline + '3dTcorr1D -prefix u_rc1s1_Template_correlation.nii.gz u_rc1s1_Template.nii seed.1D' + >>> res = tcorr1D.run() # doctest: +SKIP + + """ + + _cmd = '3dTcorr1D' + input_spec = TCorr1DInputSpec + output_spec = TCorr1DOutputSpec + + +class TCorrMapInputSpec(AFNICommandInputSpec): + in_file = File( + exists=True, argstr='-input %s', mandatory=True, copyfile=False) + seeds = File(exists=True, argstr='-seed %s', xor=('seeds_width')) + mask = File(exists=True, argstr='-mask %s') + automask = traits.Bool(argstr='-automask') + polort = traits.Int(argstr='-polort %d') + bandpass = traits.Tuple( + (traits.Float(), traits.Float()), argstr='-bpass %f %f') + regress_out_timeseries = traits.File(exists=True, argstr='-ort %s') + blur_fwhm = traits.Float(argstr='-Gblur %f') + seeds_width = traits.Float(argstr='-Mseed %f', xor=('seeds')) + + # outputs + mean_file = File(argstr='-Mean %s', suffix='_mean', name_source='in_file') + zmean = File(argstr='-Zmean %s', suffix='_zmean', name_source='in_file') + qmean = File(argstr='-Qmean %s', suffix='_qmean', name_source='in_file') + pmean = File(argstr='-Pmean %s', suffix='_pmean', name_source='in_file') + + _thresh_opts = ('absolute_threshold', 'var_absolute_threshold', + 'var_absolute_threshold_normalize') + thresholds = traits.List(traits.Int()) + absolute_threshold = File( + argstr='-Thresh %f %s', + suffix='_thresh', + name_source='in_file', + xor=_thresh_opts) + var_absolute_threshold = File( + argstr='-VarThresh %f %f %f %s', + suffix='_varthresh', + name_source='in_file', + xor=_thresh_opts) + var_absolute_threshold_normalize = File( + argstr='-VarThreshN %f %f %f %s', + suffix='_varthreshn', + name_source='in_file', + xor=_thresh_opts) + + correlation_maps = File(argstr='-CorrMap %s', name_source='in_file') + correlation_maps_masked = File( + argstr='-CorrMask %s', name_source='in_file') + + _expr_opts = ('average_expr', 'average_expr_nonzero', 'sum_expr') + expr = Str() + average_expr = File( + argstr='-Aexpr %s %s', + suffix='_aexpr', + name_source='in_file', + xor=_expr_opts) + average_expr_nonzero = File( + argstr='-Cexpr %s %s', + suffix='_cexpr', + name_source='in_file', + xor=_expr_opts) + sum_expr = File( + argstr='-Sexpr %s %s', + suffix='_sexpr', + name_source='in_file', + xor=_expr_opts) + histogram_bin_numbers = traits.Int() + histogram = File( + name_source='in_file', argstr='-Hist %d %s', suffix='_hist') + + +class TCorrMapOutputSpec(TraitedSpec): + mean_file = File() + zmean = File() + qmean = File() + pmean = File() + absolute_threshold = File() + var_absolute_threshold = File() + var_absolute_threshold_normalize = File() + correlation_maps = File() + correlation_maps_masked = File() + average_expr = File() + average_expr_nonzero = File() + sum_expr = File() + histogram = File() + + +class TCorrMap(AFNICommand): + """For each voxel time series, computes the correlation between it + and all other voxels, and combines this set of values into the + output dataset(s) in some way. + + For complete details, see the `3dTcorrMap Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> tcm = afni.TCorrMap() + >>> tcm.inputs.in_file = 'functional.nii' + >>> tcm.inputs.mask = 'mask.nii' + >>> tcm.mean_file = 'functional_meancorr.nii' + >>> tcm.cmdline # doctest: +SKIP + '3dTcorrMap -input functional.nii -mask mask.nii -Mean functional_meancorr.nii' + >>> res = tcm.run() # doctest: +SKIP + + """ + + _cmd = '3dTcorrMap' + input_spec = TCorrMapInputSpec + output_spec = TCorrMapOutputSpec + _additional_metadata = ['suffix'] + + def _format_arg(self, name, trait_spec, value): + if name in self.inputs._thresh_opts: + return trait_spec.argstr % self.inputs.thresholds + [value] + elif name in self.inputs._expr_opts: + return trait_spec.argstr % (self.inputs.expr, value) + elif name == 'histogram': + return trait_spec.argstr % (self.inputs.histogram_bin_numbers, + value) + else: + return super(TCorrMap, self)._format_arg(name, trait_spec, value) + + +class TCorrelateInputSpec(AFNICommandInputSpec): + xset = File( + desc='input xset', + argstr='%s', + position=-2, + mandatory=True, + exists=True, + copyfile=False) + yset = File( + desc='input yset', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_tcorr', + desc='output image file name', + argstr='-prefix %s', + name_source='xset') + pearson = traits.Bool( + desc='Correlation is the normal Pearson correlation coefficient', + argstr='-pearson') + polort = traits.Int( + desc='Remove polynomical trend of order m', argstr='-polort %d') + + +class TCorrelate(AFNICommand): + """Computes the correlation coefficient between corresponding voxel + time series in two input 3D+time datasets 'xset' and 'yset' + + For complete details, see the `3dTcorrelate Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> tcorrelate = afni.TCorrelate() + >>> tcorrelate.inputs.xset= 'u_rc1s1_Template.nii' + >>> tcorrelate.inputs.yset = 'u_rc1s2_Template.nii' + >>> tcorrelate.inputs.out_file = 'functional_tcorrelate.nii.gz' + >>> tcorrelate.inputs.polort = -1 + >>> tcorrelate.inputs.pearson = True + >>> tcorrelate.cmdline + '3dTcorrelate -prefix functional_tcorrelate.nii.gz -pearson -polort -1 u_rc1s1_Template.nii u_rc1s2_Template.nii' + >>> res = tcarrelate.run() # doctest: +SKIP + + """ + + _cmd = '3dTcorrelate' + input_spec = TCorrelateInputSpec + output_spec = AFNICommandOutputSpec + + +class TNormInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dTNorm', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_tnorm', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + norm2 = traits.Bool( + desc='L2 normalize (sum of squares = 1) [DEFAULT]', argstr='-norm2') + normR = traits.Bool( + desc= + 'normalize so sum of squares = number of time points * e.g., so RMS = 1.', + argstr='-normR') + norm1 = traits.Bool( + desc='L1 normalize (sum of absolute values = 1)', argstr='-norm1') + normx = traits.Bool( + desc='Scale so max absolute value = 1 (L_infinity norm)', + argstr='-normx') + polort = traits.Int( + desc="""Detrend with polynomials of order p before normalizing + [DEFAULT = don't do this] + * Use '-polort 0' to remove the mean, for example""", + argstr='-polort %s') + L1fit = traits.Bool( + desc="""Detrend with L1 regression (L2 is the default) + * This option is here just for the hell of it""", + argstr='-L1fit') + + +class TNorm(AFNICommand): + """Shifts voxel time series from input so that seperate slices are aligned + to the same temporal origin. + + For complete details, see the `3dTnorm Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> tnorm = afni.TNorm() + >>> tnorm.inputs.in_file = 'functional.nii' + >>> tnorm.inputs.norm2 = True + >>> tnorm.inputs.out_file = 'rm.errts.unit errts+tlrc' + >>> tnorm.cmdline + '3dTnorm -norm2 -prefix rm.errts.unit errts+tlrc functional.nii' + >>> res = tshift.run() # doctest: +SKIP + + """ + _cmd = '3dTnorm' + input_spec = TNormInputSpec + output_spec = AFNICommandOutputSpec + + +class TProjectInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dTproject', + argstr='-input %s', + position=1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_tproject', + desc='output image file name', + position=-1, + argstr='-prefix %s', + name_source='in_file') + censor = File( + desc="""filename of censor .1D time series + * This is a file of 1s and 0s, indicating which + time points are to be included (1) and which are + to be excluded (0).""", + argstr="-censor %s", + exists=True) + censortr = traits.List( + traits.Str(), + desc="""list of strings that specify time indexes + to be removed from the analysis. Each string is + of one of the following forms: + 37 => remove global time index #37 + 2:37 => remove time index #37 in run #2 + 37..47 => remove global time indexes #37-47 + 37-47 => same as above + 2:37..47 => remove time indexes #37-47 in run #2 + *:0-2 => remove time indexes #0-2 in all runs + +Time indexes within each run start at 0. + +Run indexes start at 1 (just be to confusing). + +N.B.: 2:37,47 means index #37 in run #2 and + global time index 47; it does NOT mean + index #37 in run #2 AND index #47 in run #2.""", + argstr="-CENSORTR %s") + cenmode = traits.Enum( + 'KILL', 'ZERO', 'NTRP', + desc="""specifies how censored time points are treated in + the output dataset: + + mode = ZERO ==> put zero values in their place + ==> output datset is same length as input + + mode = KILL ==> remove those time points + ==> output dataset is shorter than input + + mode = NTRP ==> censored values are replaced by interpolated + neighboring (in time) non-censored values, + BEFORE any projections, and then the + analysis proceeds without actual removal + of any time points -- this feature is to + keep the Spanish Inquisition happy. + * The default mode is KILL !!!""", + argstr='-cenmode %s') + concat = File( + desc="""The catenation file, as in 3dDeconvolve, containing the + TR indexes of the start points for each contiguous run + within the input dataset (the first entry should be 0). + ++ Also as in 3dDeconvolve, if the input dataset is + automatically catenated from a collection of datasets, + then the run start indexes are determined directly, + and '-concat' is not needed (and will be ignored). + ++ Each run must have at least 9 time points AFTER + censoring, or the program will not work! + ++ The only use made of this input is in setting up + the bandpass/stopband regressors. + ++ '-ort' and '-dsort' regressors run through all time + points, as read in. If you want separate projections + in each run, then you must either break these ort files + into appropriate components, OR you must run 3dTproject + for each run separately, using the appropriate pieces + from the ort files via the '{...}' selector for the + 1D files and the '[...]' selector for the datasets.""", + exists=True, + argstr='-concat %s') + noblock = traits.Bool( + desc="""Also as in 3dDeconvolve, if you want the program to treat + an auto-catenated dataset as one long run, use this option. + ++ However, '-noblock' will not affect catenation if you use + the '-concat' option.""", + argstr='-noblock') + ort = File( + desc="""Remove each column in file + ++ Each column will have its mean removed.""", + exists=True, + argstr="-ort %s") + polort = traits.Int( + desc="""Remove polynomials up to and including degree pp. + ++ Default value is 2. + ++ It makes no sense to use a value of pp greater than + 2, if you are bandpassing out the lower frequencies! + ++ For catenated datasets, each run gets a separate set + set of pp+1 Legendre polynomial regressors. + ++ Use of -polort -1 is not advised (if data mean != 0), + even if -ort contains constant terms, as all means are + removed.""", + argstr="-polort %d") + dsort = InputMultiObject( + File( + exists=True, + copyfile=False), + argstr="-dsort %s...", + desc="""Remove the 3D+time time series in dataset fset. + ++ That is, 'fset' contains a different nuisance time + series for each voxel (e.g., from AnatICOR). + ++ Multiple -dsort options are allowed.""") + bandpass = traits.Tuple( + traits.Float, traits.Float, + desc="""Remove all frequencies EXCEPT those in the range""", + argstr='-bandpass %g %g') + stopband = traits.Tuple( + traits.Float, traits.Float, + desc="""Remove all frequencies in the range""", + argstr='-stopband %g %g') + TR = traits.Float( + desc="""Use time step dd for the frequency calculations, + rather than the value stored in the dataset header.""", + argstr='-TR %g') + mask = File( + exists=True, + desc="""Only operate on voxels nonzero in the mset dataset. + ++ Voxels outside the mask will be filled with zeros. + ++ If no masking option is given, then all voxels + will be processed.""", + argstr='-mask %s') + automask = traits.Bool( + desc="""Generate a mask automatically""", + xor=['mask'], + argstr='-automask') + blur = traits.Float( + desc="""Blur (inside the mask only) with a filter that has + width (FWHM) of fff millimeters. + ++ Spatial blurring (if done) is after the time + series filtering.""", + argstr='-blur %g') + norm = traits.Bool( + desc="""Normalize each output time series to have sum of + squares = 1. This is the LAST operation.""", + argstr='-norm') + + +class TProject(AFNICommand): + """ + This program projects (detrends) out various 'nuisance' time series from + each voxel in the input dataset. Note that all the projections are done + via linear regression, including the frequency-based options such + as '-passband'. In this way, you can bandpass time-censored data, and at + the same time, remove other time series of no interest + (e.g., physiological estimates, motion parameters). + Shifts voxel time series from input so that seperate slices are aligned to + the same temporal origin. + + For complete details, see the `3dTproject Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> tproject = afni.TProject() + >>> tproject.inputs.in_file = 'functional.nii' + >>> tproject.inputs.bandpass = (0.00667, 99999) + >>> tproject.inputs.polort = 3 + >>> tproject.inputs.automask = True + >>> tproject.inputs.out_file = 'projected.nii.gz' + >>> tproject.cmdline + '3dTproject -input functional.nii -automask -bandpass 0.00667 99999 -polort 3 -prefix projected.nii.gz' + >>> res = tproject.run() # doctest: +SKIP + + """ + _cmd = '3dTproject' + input_spec = TProjectInputSpec + output_spec = AFNICommandOutputSpec + + + +class TShiftInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dTshift', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_tshift', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + tr = Str( + desc='manually set the TR. You can attach suffix "s" for seconds ' + 'or "ms" for milliseconds.', + argstr='-TR %s') + tzero = traits.Float( + desc='align each slice to given time offset', + argstr='-tzero %s', + xor=['tslice']) + tslice = traits.Int( + desc='align each slice to time offset of given slice', + argstr='-slice %s', + xor=['tzero']) + ignore = traits.Int( + desc='ignore the first set of points specified', argstr='-ignore %s') + interp = traits.Enum( + ('Fourier', 'linear', 'cubic', 'quintic', 'heptic'), + desc='different interpolation methods (see 3dTshift for details) ' + 'default = Fourier', + argstr='-%s') + tpattern = traits.Either( + traits.Enum('alt+z', 'altplus', # Synonyms + 'alt+z2', + 'alt-z', 'altminus', # Synonyms + 'alt-z2', + 'seq+z', 'seqplus', # Synonyms + 'seq-z', 'seqminus'), # Synonyms + Str, # For backwards compatibility + desc='use specified slice time pattern rather than one in header', + argstr='-tpattern %s', + xor=['slice_timing']) + slice_timing = traits.Either( + File(exists=True), + traits.List(traits.Float), + desc='time offsets from the volume acquisition onset for each slice', + argstr='-tpattern @%s', + xor=['tpattern']) + rlt = traits.Bool( + desc='Before shifting, remove the mean and linear trend', + argstr='-rlt') + rltplus = traits.Bool( + desc='Before shifting, remove the mean and linear trend and later put ' + 'back the mean', + argstr='-rlt+') + + +class TShiftOutputSpec(AFNICommandOutputSpec): + timing_file = File(desc="AFNI formatted timing file, if ``slice_timing`` is a list") + + +class TShift(AFNICommand): + """Shifts voxel time series from input so that seperate slices are aligned + to the same temporal origin. + + For complete details, see the `3dTshift Documentation. + `_ + + Examples + ======== + + Slice timing details may be specified explicitly via the ``slice_timing`` + input: + + >>> from nipype.interfaces import afni + >>> TR = 2.5 + >>> tshift = afni.TShift() + >>> tshift.inputs.in_file = 'functional.nii' + >>> tshift.inputs.tzero = 0.0 + >>> tshift.inputs.tr = '%.1fs' % TR + >>> tshift.inputs.slice_timing = list(np.arange(40) / TR) + >>> tshift.cmdline + '3dTshift -prefix functional_tshift -tpattern @slice_timing.1D -TR 2.5s -tzero 0.0 functional.nii' + + When the ``slice_timing`` input is used, the ``timing_file`` output is populated, + in this case with the generated file. + + >>> tshift._list_outputs()['timing_file'] # doctest: +ELLIPSIS + '.../slice_timing.1D' + + This method creates a ``slice_timing.1D`` file to be passed to ``3dTshift``. + A pre-existing slice-timing file may be used in the same way: + + >>> tshift = afni.TShift() + >>> tshift.inputs.in_file = 'functional.nii' + >>> tshift.inputs.tzero = 0.0 + >>> tshift.inputs.tr = '%.1fs' % TR + >>> tshift.inputs.slice_timing = 'slice_timing.1D' + >>> tshift.cmdline + '3dTshift -prefix functional_tshift -tpattern @slice_timing.1D -TR 2.5s -tzero 0.0 functional.nii' + + When a pre-existing file is provided, ``timing_file`` is simply passed through. + + >>> tshift._list_outputs()['timing_file'] # doctest: +ELLIPSIS + '.../slice_timing.1D' + + Alternatively, pre-specified slice timing patterns may be specified with the + ``tpattern`` input. + For example, to specify an alternating, ascending slice timing pattern: + + >>> tshift = afni.TShift() + >>> tshift.inputs.in_file = 'functional.nii' + >>> tshift.inputs.tzero = 0.0 + >>> tshift.inputs.tr = '%.1fs' % TR + >>> tshift.inputs.tpattern = 'alt+z' + >>> tshift.cmdline + '3dTshift -prefix functional_tshift -tpattern alt+z -TR 2.5s -tzero 0.0 functional.nii' + + For backwards compatibility, ``tpattern`` may also take filenames prefixed + with ``@``. + However, in this case, filenames are not validated, so this usage will be + deprecated in future versions of Nipype. + + >>> tshift = afni.TShift() + >>> tshift.inputs.in_file = 'functional.nii' + >>> tshift.inputs.tzero = 0.0 + >>> tshift.inputs.tr = '%.1fs' % TR + >>> tshift.inputs.tpattern = '@slice_timing.1D' + >>> tshift.cmdline + '3dTshift -prefix functional_tshift -tpattern @slice_timing.1D -TR 2.5s -tzero 0.0 functional.nii' + + In these cases, ``timing_file`` is undefined. + + >>> tshift._list_outputs()['timing_file'] # doctest: +ELLIPSIS + + + In any configuration, the interface may be run as usual: + + >>> res = tshift.run() # doctest: +SKIP + """ + _cmd = '3dTshift' + input_spec = TShiftInputSpec + output_spec = TShiftOutputSpec + + def _format_arg(self, name, trait_spec, value): + if name == 'tpattern' and value.startswith('@'): + iflogger.warning('Passing a file prefixed by "@" will be deprecated' + '; please use the `slice_timing` input') + elif name == 'slice_timing' and isinstance(value, list): + value = self._write_slice_timing() + return super(TShift, self)._format_arg(name, trait_spec, value) + + def _write_slice_timing(self): + fname = 'slice_timing.1D' + with open(fname, 'w') as fobj: + fobj.write('\t'.join(map(str, self.inputs.slice_timing))) + return fname + + def _list_outputs(self): + outputs = super(TShift, self)._list_outputs() + if isdefined(self.inputs.slice_timing): + if isinstance(self.inputs.slice_timing, list): + outputs['timing_file'] = os.path.abspath('slice_timing.1D') + else: + outputs['timing_file'] = os.path.abspath(self.inputs.slice_timing) + return outputs + + +class VolregInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dvolreg', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + in_weight_volume = traits.Either( + traits.Tuple(File(exists=True), traits.Int), + File(exists=True), + desc='weights for each voxel specified by a file with an ' + 'optional volume number (defaults to 0)', + argstr="-weight '%s[%d]'") + out_file = File( + name_template='%s_volreg', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + basefile = File( + desc='base file for registration', + argstr='-base %s', + position=-6, + exists=True) + zpad = traits.Int( + desc='Zeropad around the edges by \'n\' voxels during rotations', + argstr='-zpad %d', + position=-5) + md1d_file = File( + name_template='%s_md.1D', + desc='max displacement output file', + argstr='-maxdisp1D %s', + name_source='in_file', + keep_extension=True, + position=-4) + oned_file = File( + name_template='%s.1D', + desc='1D movement parameters output file', + argstr='-1Dfile %s', + name_source='in_file', + keep_extension=True) + verbose = traits.Bool( + desc='more detailed description of the process', argstr='-verbose') + timeshift = traits.Bool( + desc='time shift to mean slice time offset', argstr='-tshift 0') + copyorigin = traits.Bool( + desc='copy base file origin coords to output', argstr='-twodup') + oned_matrix_save = File( + name_template='%s.aff12.1D', + desc='Save the matrix transformation', + argstr='-1Dmatrix_save %s', + keep_extension=True, + name_source='in_file') + interp = traits.Enum( + ('Fourier', 'cubic', 'heptic', 'quintic', 'linear'), + desc='spatial interpolation methods [default = heptic]', + argstr='-%s') + + +class VolregOutputSpec(TraitedSpec): + out_file = File(desc='registered file', exists=True) + md1d_file = File(desc='max displacement info file', exists=True) + oned_file = File(desc='movement parameters info file', exists=True) + oned_matrix_save = File( + desc='matrix transformation from base to input', exists=True) + + +class Volreg(AFNICommand): + """Register input volumes to a base volume using AFNI 3dvolreg command + + For complete details, see the `3dvolreg Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> volreg = afni.Volreg() + >>> volreg.inputs.in_file = 'functional.nii' + >>> volreg.inputs.args = '-Fourier -twopass' + >>> volreg.inputs.zpad = 4 + >>> volreg.inputs.outputtype = 'NIFTI' + >>> volreg.cmdline # doctest: +ELLIPSIS + '3dvolreg -Fourier -twopass -1Dfile functional.1D -1Dmatrix_save functional.aff12.1D -prefix functional_volreg.nii -zpad 4 -maxdisp1D functional_md.1D functional.nii' + >>> res = volreg.run() # doctest: +SKIP + + >>> from nipype.interfaces import afni + >>> volreg = afni.Volreg() + >>> volreg.inputs.in_file = 'functional.nii' + >>> volreg.inputs.interp = 'cubic' + >>> volreg.inputs.verbose = True + >>> volreg.inputs.zpad = 1 + >>> volreg.inputs.basefile = 'functional.nii' + >>> volreg.inputs.out_file = 'rm.epi.volreg.r1' + >>> volreg.inputs.oned_file = 'dfile.r1.1D' + >>> volreg.inputs.oned_matrix_save = 'mat.r1.tshift+orig.1D' + >>> volreg.cmdline + '3dvolreg -cubic -1Dfile dfile.r1.1D -1Dmatrix_save mat.r1.tshift+orig.1D -prefix rm.epi.volreg.r1 -verbose -base functional.nii -zpad 1 -maxdisp1D functional_md.1D functional.nii' + >>> res = volreg.run() # doctest: +SKIP + + """ + + _cmd = '3dvolreg' + input_spec = VolregInputSpec + output_spec = VolregOutputSpec + + def _format_arg(self, name, trait_spec, value): + if name == 'in_weight_volume' and not isinstance(value, tuple): + value = (value, 0) + return super(Volreg, self)._format_arg(name, trait_spec, value) + + +class WarpInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dWarp', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_warp', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file', + keep_extension=True) + tta2mni = traits.Bool( + desc='transform dataset from Talairach to MNI152', argstr='-tta2mni') + mni2tta = traits.Bool( + desc='transform dataset from MNI152 to Talaraich', argstr='-mni2tta') + matparent = File( + desc='apply transformation from 3dWarpDrive', + argstr='-matparent %s', + exists=True) + oblique_parent = File( + desc='Read in the oblique transformation matrix from an oblique ' + 'dataset and make cardinal dataset oblique to match', + argstr='-oblique_parent %s', + exists=True) + deoblique = traits.Bool( + desc='transform dataset from oblique to cardinal', argstr='-deoblique') + interp = traits.Enum( + ('linear', 'cubic', 'NN', 'quintic'), + desc='spatial interpolation methods [default = linear]', + argstr='-%s') + gridset = File( + desc='copy grid of specified dataset', + argstr='-gridset %s', + exists=True) + newgrid = traits.Float( + desc='specify grid of this size (mm)', argstr='-newgrid %f') + zpad = traits.Int( + desc='pad input dataset with N planes of zero on all sides.', + argstr='-zpad %d') + verbose = traits.Bool( + desc='Print out some information along the way.', argstr='-verb') + save_warp = traits.Bool( + desc='save warp as .mat file', requires=['verbose']) + + +class WarpOutputSpec(TraitedSpec): + out_file = File(desc='Warped file.', exists=True) + warp_file = File(desc='warp transform .mat file') + + +class Warp(AFNICommand): + """Use 3dWarp for spatially transforming a dataset + + For complete details, see the `3dWarp Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> warp = afni.Warp() + >>> warp.inputs.in_file = 'structural.nii' + >>> warp.inputs.deoblique = True + >>> warp.inputs.out_file = 'trans.nii.gz' + >>> warp.cmdline + '3dWarp -deoblique -prefix trans.nii.gz structural.nii' + >>> res = warp.run() # doctest: +SKIP + + >>> warp_2 = afni.Warp() + >>> warp_2.inputs.in_file = 'structural.nii' + >>> warp_2.inputs.newgrid = 1.0 + >>> warp_2.inputs.out_file = 'trans.nii.gz' + >>> warp_2.cmdline + '3dWarp -newgrid 1.000000 -prefix trans.nii.gz structural.nii' + >>> res = warp_2.run() # doctest: +SKIP + + """ + _cmd = '3dWarp' + input_spec = WarpInputSpec + output_spec = WarpOutputSpec + + def _run_interface(self, runtime): + runtime = super(Warp, self)._run_interface(runtime) + + if self.inputs.save_warp: + import numpy as np + warp_file = self._list_outputs()['warp_file'] + np.savetxt(warp_file, [runtime.stdout], fmt=str('%s')) + return runtime + + def _list_outputs(self): + outputs = super(Warp, self)._list_outputs() + if self.inputs.save_warp: + outputs['warp_file'] = fname_presuffix(outputs['out_file'], + suffix='_transform.mat', + use_ext=False) + + return outputs + + +class QwarpPlusMinusInputSpec(CommandLineInputSpec): + source_file = File( + desc= + 'Source image (opposite phase encoding direction than base image).', + argstr='-source %s', + mandatory=True, + exists=True, + copyfile=False) + base_file = File( + desc= + 'Base image (opposite phase encoding direction than source image).', + argstr='-base %s', + mandatory=True, + exists=True, + copyfile=False) + pblur = traits.List( + traits.Float(), + desc='The fraction of the patch size that' + 'is used for the progressive blur by providing a ' + 'value between 0 and 0.25. If you provide TWO ' + 'values, the first fraction is used for ' + 'progressively blurring the base image and the ' + 'second for the source image.', + argstr='-pblur %s', + minlen=1, + maxlen=2) + blur = traits.List( + traits.Float(), + desc="Gaussian blur the input images by (FWHM) voxels " + "before doing the alignment (the output dataset " + "will not be blurred). The default is 2.345 (for " + "no good reason). Optionally, you can provide 2 " + "values, and then the first one is applied to the " + "base volume, the second to the source volume. A " + "negative blur radius means to use 3D median " + "filtering, rather than Gaussian blurring. This " + "type of filtering will better preserve edges, " + "which can be important in alignment.", + argstr='-blur %s', + minlen=1, + maxlen=2) + noweight = traits.Bool( + desc='If you want a binary weight (the old default), use this option.' + 'That is, each voxel in the base volume automask will be' + 'weighted the same in the computation of the cost functional.', + argstr='-noweight') + minpatch = traits.Int( + desc="Set the minimum patch size for warp searching to 'mm' voxels.", + argstr='-minpatch %d') + nopadWARP = traits.Bool( + desc='If for some reason you require the warp volume to' + 'match the base volume, then use this option to have the output' + 'WARP dataset(s) truncated.', + argstr='-nopadWARP') + + +class QwarpPlusMinusOutputSpec(TraitedSpec): + warped_source = File(desc='Undistorted source file.', exists=True) + warped_base = File(desc='Undistorted base file.', exists=True) + source_warp = File( + desc="Field suceptibility correction warp (in 'mm') for source image.", + exists=True) + base_warp = File( + desc="Field suceptibility correction warp (in 'mm') for base image.", + exists=True) + + +class QwarpPlusMinus(CommandLine): + """A version of 3dQwarp for performing field susceptibility correction + using two images with opposing phase encoding directions. + + For complete details, see the `3dQwarp Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> qwarp = afni.QwarpPlusMinus() + >>> qwarp.inputs.source_file = 'sub-01_dir-LR_epi.nii.gz' + >>> qwarp.inputs.nopadWARP = True + >>> qwarp.inputs.base_file = 'sub-01_dir-RL_epi.nii.gz' + >>> qwarp.cmdline + '3dQwarp -prefix Qwarp.nii.gz -plusminus -base sub-01_dir-RL_epi.nii.gz -nopadWARP -source sub-01_dir-LR_epi.nii.gz' + >>> res = warp.run() # doctest: +SKIP + + """ + _cmd = '3dQwarp -prefix Qwarp.nii.gz -plusminus' + input_spec = QwarpPlusMinusInputSpec + output_spec = QwarpPlusMinusOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['warped_source'] = os.path.abspath("Qwarp_PLUS.nii.gz") + outputs['warped_base'] = os.path.abspath("Qwarp_MINUS.nii.gz") + outputs['source_warp'] = os.path.abspath("Qwarp_PLUS_WARP.nii.gz") + outputs['base_warp'] = os.path.abspath("Qwarp_MINUS_WARP.nii.gz") + + return outputs + + +class QwarpInputSpec(AFNICommandInputSpec): + in_file = File( + desc= + 'Source image (opposite phase encoding direction than base image).', + argstr='-source %s', + mandatory=True, + exists=True, + copyfile=False) + base_file = File( + desc= + 'Base image (opposite phase encoding direction than source image).', + argstr='-base %s', + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + argstr='-prefix %s', + name_template='%s_QW', + name_source=['in_file'], + genfile=True, + desc='out_file ppp' + 'Sets the prefix for the output datasets.' + '* The source dataset is warped to match the base' + 'and gets prefix \'ppp\'. (Except if \'-plusminus\' is used.)' + '* The final interpolation to this output dataset is' + 'done using the \'wsinc5\' method. See the output of' + ' 3dAllineate -HELP' + '(in the "Modifying \'-final wsinc5\'" section) for' + 'the lengthy technical details.' + '* The 3D warp used is saved in a dataset with' + 'prefix \'ppp_WARP\' -- this dataset can be used' + 'with 3dNwarpApply and 3dNwarpCat, for example.' + '* To be clear, this is the warp from source dataset' + ' coordinates to base dataset coordinates, where the' + ' values at each base grid point are the xyz displacments' + ' needed to move that grid point\'s xyz values to the' + ' corresponding xyz values in the source dataset:' + ' base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z)' + ' Another way to think of this warp is that it \'pulls\'' + ' values back from source space to base space.' + '* 3dNwarpApply would use \'ppp_WARP\' to transform datasets' + 'aligned with the source dataset to be aligned with the' + 'base dataset.' + '** If you do NOT want this warp saved, use the option \'-nowarp\'.' + '-->> (However, this warp is usually the most valuable possible output!)' + '* If you want to calculate and save the inverse 3D warp,' + 'use the option \'-iwarp\'. This inverse warp will then be' + 'saved in a dataset with prefix \'ppp_WARPINV\'.' + '* This inverse warp could be used to transform data from base' + 'space to source space, if you need to do such an operation.' + '* You can easily compute the inverse later, say by a command like' + ' 3dNwarpCat -prefix Z_WARPINV \'INV(Z_WARP+tlrc)\'' + 'or the inverse can be computed as needed in 3dNwarpApply, like' + ' 3dNwarpApply -nwarp \'INV(Z_WARP+tlrc)\' -source Dataset.nii ...') + resample = traits.Bool( + desc='This option simply resamples the source dataset to match the' + 'base dataset grid. You can use this if the two datasets' + 'overlap well (as seen in the AFNI GUI), but are not on the' + 'same 3D grid.' + '* If they don\'t overlap well, allineate them first' + '* The reampling here is done with the' + '\'wsinc5\' method, which has very little blurring artifact.' + '* If the base and source datasets ARE on the same 3D grid,' + 'then the -resample option will be ignored.' + '* You CAN use -resample with these 3dQwarp options:' + '-plusminus -inilev -iniwarp -duplo', + argstr='-resample') + allineate = traits.Bool( + desc='This option will make 3dQwarp run 3dAllineate first, to align ' + 'the source dataset to the base with an affine transformation. ' + 'It will then use that alignment as a starting point for the ' + 'nonlinear warping.', + argstr='-allineate') + allineate_opts = traits.Str( + desc='add extra options to the 3dAllineate command to be run by ' + '3dQwarp.', + argstr='-allineate_opts %s', + requires=['allineate']) + nowarp = traits.Bool(desc='Do not save the _WARP file.', argstr='-nowarp') + iwarp = traits.Bool( + desc='Do compute and save the _WARPINV file.', + argstr='-iwarp', + xor=['plusminus']) + pear = traits.Bool( + desc='Use strict Pearson correlation for matching.' + '* Not usually recommended, since the \'clipped Pearson\' method' + 'used by default will reduce the impact of outlier values.', + argstr='-pear') + noneg = traits.Bool( + desc='Replace negative values in either input volume with 0.' + '* If there ARE negative input values, and you do NOT use -noneg,' + 'then strict Pearson correlation will be used, since the \'clipped\'' + 'method only is implemented for non-negative volumes.' + '* \'-noneg\' is not the default, since there might be situations where' + 'you want to align datasets with positive and negative values mixed.' + '* But, in many cases, the negative values in a dataset are just the' + 'result of interpolation artifacts (or other peculiarities), and so' + 'they should be ignored. That is what \'-noneg\' is for.', + argstr='-noneg') + nopenalty = traits.Bool( + desc='Replace negative values in either input volume with 0.' + '* If there ARE negative input values, and you do NOT use -noneg,' + 'then strict Pearson correlation will be used, since the \'clipped\'' + 'method only is implemented for non-negative volumes.' + '* \'-noneg\' is not the default, since there might be situations where' + 'you want to align datasets with positive and negative values mixed.' + '* But, in many cases, the negative values in a dataset are just the' + 'result of interpolation artifacts (or other peculiarities), and so' + 'they should be ignored. That is what \'-noneg\' is for.', + argstr='-nopenalty') + penfac = traits.Float( + desc='Use this value to weight the penalty.' + 'The default value is 1.Larger values mean the' + 'penalty counts more, reducing grid distortions,' + 'insha\'Allah; \'-nopenalty\' is the same as \'-penfac 0\'.' + ' -->>* [23 Sep 2013] -- Zhark increased the default value of' + ' the penalty by a factor of 5, and also made it get' + ' progressively larger with each level of refinement.' + ' Thus, warping results will vary from earlier instances' + ' of 3dQwarp.' + ' * The progressive increase in the penalty at higher levels' + ' means that the \'cost function\' can actually look like the' + ' alignment is getting worse when the levels change.' + ' * IF you wish to turn off this progression, for whatever' + ' reason (e.g., to keep compatibility with older results),' + ' use the option \'-penold\'.To be completely compatible with' + ' the older 3dQwarp, you\'ll also have to use \'-penfac 0.2\'.', + argstr='-penfac %f') + noweight = traits.Bool( + desc='If you want a binary weight (the old default), use this option.' + 'That is, each voxel in the base volume automask will be' + 'weighted the same in the computation of the cost functional.', + argstr='-noweight') + weight = File( + desc='Instead of computing the weight from the base dataset,' + 'directly input the weight volume from dataset \'www\'.' + '* Useful if you know what over parts of the base image you' + 'want to emphasize or de-emphasize the matching functional.', + argstr='-weight %s', + exists=True) + wball = traits.List( + traits.Int(), + desc='-wball x y z r f' + 'Enhance automatic weight from \'-useweight\' by a factor' + 'of 1+f*Gaussian(FWHM=r) centered in the base image at' + 'DICOM coordinates (x,y,z) and with radius \'r\'. The' + 'goal of this option is to try and make the alignment' + 'better in a specific part of the brain.' + '* Example: -wball 0 14 6 30 40' + 'to emphasize the thalamic area (in MNI/Talairach space).' + '* The \'r\' parameter must be positive!' + '* The \'f\' parameter must be between 1 and 100 (inclusive).' + '* \'-wball\' does nothing if you input your own weight' + 'with the \'-weight\' option.' + '* \'-wball\' does change the binary weight created by' + 'the \'-noweight\' option.' + '* You can only use \'-wball\' once in a run of 3dQwarp.' + '*** The effect of \'-wball\' is not dramatic. The example' + 'above makes the average brain image across a collection' + 'of subjects a little sharper in the thalamic area, which' + 'might have some small value. If you care enough about' + 'alignment to use \'-wball\', then you should examine the' + 'results from 3dQwarp for each subject, to see if the' + 'alignments are good enough for your purposes.', + argstr='-wball %s', + minlen=5, + maxlen=5) + traits.Tuple((traits.Float(), traits.Float()), argstr='-bpass %f %f') + wmask = traits.Tuple( + (File(exists=True), traits.Float()), + desc='-wmask ws f' + 'Similar to \'-wball\', but here, you provide a dataset \'ws\'' + 'that indicates where to increase the weight.' + '* The \'ws\' dataset must be on the same 3D grid as the base dataset.' + '* \'ws\' is treated as a mask -- it only matters where it' + 'is nonzero -- otherwise, the values inside are not used.' + '* After \'ws\' comes the factor \'f\' by which to increase the' + 'automatically computed weight. Where \'ws\' is nonzero,' + 'the weighting will be multiplied by (1+f).' + '* As with \'-wball\', the factor \'f\' should be between 1 and 100.' + '* You cannot use \'-wball\' and \'-wmask\' together!', + argstr='-wpass %s %f') + out_weight_file = traits.File( + argstr='-wtprefix %s', + desc='Write the weight volume to disk as a dataset') + blur = traits.List( + traits.Float(), + desc='Gaussian blur the input images by \'bb\' (FWHM) voxels before' + 'doing the alignment (the output dataset will not be blurred).' + 'The default is 2.345 (for no good reason).' + '* Optionally, you can provide 2 values for \'bb\', and then' + 'the first one is applied to the base volume, the second' + 'to the source volume.' + '-->>* e.g., \'-blur 0 3\' to skip blurring the base image' + '(if the base is a blurry template, for example).' + '* A negative blur radius means to use 3D median filtering,' + 'rather than Gaussian blurring. This type of filtering will' + 'better preserve edges, which can be important in alignment.' + '* If the base is a template volume that is already blurry,' + 'you probably don\'t want to blur it again, but blurring' + 'the source volume a little is probably a good idea, to' + 'help the program avoid trying to match tiny features.' + '* Note that -duplo will blur the volumes some extra' + 'amount for the initial small-scale warping, to make' + 'that phase of the program converge more rapidly.', + argstr='-blur %s', + minlen=1, + maxlen=2) + pblur = traits.List( + traits.Float(), + desc='Use progressive blurring; that is, for larger patch sizes,' + 'the amount of blurring is larger. The general idea is to' + 'avoid trying to match finer details when the patch size' + 'and incremental warps are coarse. When \'-blur\' is used' + 'as well, it sets a minimum amount of blurring that will' + 'be used. [06 Aug 2014 -- \'-pblur\' may become the default someday].' + '* You can optionally give the fraction of the patch size that' + 'is used for the progressive blur by providing a value between' + '0 and 0.25 after \'-pblur\'. If you provide TWO values, the' + 'the first fraction is used for progressively blurring the' + 'base image and the second for the source image. The default' + 'parameters when just \'-pblur\' is given is the same as giving' + 'the options as \'-pblur 0.09 0.09\'.' + '* \'-pblur\' is useful when trying to match 2 volumes with high' + 'amounts of detail; e.g, warping one subject\'s brain image to' + 'match another\'s, or trying to warp to match a detailed template.' + '* Note that using negative values with \'-blur\' means that the' + 'progressive blurring will be done with median filters, rather' + 'than Gaussian linear blurring.' + '-->>*** The combination of the -allineate and -pblur options will make' + 'the results of using 3dQwarp to align to a template somewhat' + 'less sensitive to initial head position and scaling.', + argstr='-pblur %s', + minlen=1, + maxlen=2) + emask = File( + desc='Here, \'ee\' is a dataset to specify a mask of voxels' + 'to EXCLUDE from the analysis -- all voxels in \'ee\'' + 'that are NONZERO will not be used in the alignment.' + '* The base image always automasked -- the emask is' + 'extra, to indicate voxels you definitely DON\'T want' + 'included in the matching process, even if they are' + 'inside the brain.', + argstr='-emask %s', + exists=True, + copyfile=False) + noXdis = traits.Bool( + desc='Warp will not displace in x directoin', argstr='-noXdis') + noYdis = traits.Bool( + desc='Warp will not displace in y directoin', argstr='-noYdis') + noZdis = traits.Bool( + desc='Warp will not displace in z directoin', argstr='-noZdis') + iniwarp = traits.List( + File(exists=True, copyfile=False), + desc='A dataset with an initial nonlinear warp to use.' + '* If this option is not used, the initial warp is the identity.' + '* You can specify a catenation of warps (in quotes) here, as in' + 'program 3dNwarpApply.' + '* As a special case, if you just input an affine matrix in a .1D' + 'file, that will work also -- it is treated as giving the initial' + 'warp via the string "IDENT(base_dataset) matrix_file.aff12.1D".' + '* You CANNOT use this option with -duplo !!' + '* -iniwarp is usually used with -inilev to re-start 3dQwarp from' + 'a previous stopping point.', + argstr='-iniwarp %s', + xor=['duplo']) + inilev = traits.Int( + desc='The initial refinement \'level\' at which to start.' + '* Usually used with -iniwarp; CANNOT be used with -duplo.' + '* The combination of -inilev and -iniwarp lets you take the' + 'results of a previous 3dQwarp run and refine them further:' + 'Note that the source dataset in the second run is the SAME as' + 'in the first run. If you don\'t see why this is necessary,' + 'then you probably need to seek help from an AFNI guru.', + argstr='-inilev %d', + xor=['duplo']) + minpatch = traits.Int( + desc='* The value of mm should be an odd integer.' + '* The default value of mm is 25.' + '* For more accurate results than mm=25, try 19 or 13.' + '* The smallest allowed patch size is 5.' + '* You may want stop at a larger patch size (say 7 or 9) and use' + 'the -Qfinal option to run that final level with quintic warps,' + 'which might run faster and provide the same degree of warp detail.' + '* Trying to make two different brain volumes match in fine detail' + 'is usually a waste of time, especially in humans. There is too' + 'much variability in anatomy to match gyrus to gyrus accurately.' + 'For this reason, the default minimum patch size is 25 voxels.' + 'Using a smaller \'-minpatch\' might try to force the warp to' + 'match features that do not match, and the result can be useless' + 'image distortions -- another reason to LOOK AT THE RESULTS.', + argstr='-minpatch %d') + maxlev = traits.Int( + desc='The initial refinement \'level\' at which to start.' + '* Usually used with -iniwarp; CANNOT be used with -duplo.' + '* The combination of -inilev and -iniwarp lets you take the' + 'results of a previous 3dQwarp run and refine them further:' + 'Note that the source dataset in the second run is the SAME as' + 'in the first run. If you don\'t see why this is necessary,' + 'then you probably need to seek help from an AFNI guru.', + argstr='-maxlev %d', + xor=['duplo'], + position=-1) + gridlist = File( + desc='This option provides an alternate way to specify the patch' + 'grid sizes used in the warp optimization process. \'gl\' is' + 'a 1D file with a list of patches to use -- in most cases,' + 'you will want to use it in the following form:' + '-gridlist \'1D: 0 151 101 75 51\'' + '* Here, a 0 patch size means the global domain. Patch sizes' + 'otherwise should be odd integers >= 5.' + '* If you use the \'0\' patch size again after the first position,' + 'you will actually get an iteration at the size of the' + 'default patch level 1, where the patch sizes are 75% of' + 'the volume dimension. There is no way to force the program' + 'to literally repeat the sui generis step of lev=0.' + '* You cannot use -gridlist with -duplo or -plusminus!', + argstr='-gridlist %s', + exists=True, + copyfile=False, + xor=['duplo', 'plusminus']) + allsave = traits.Bool( + desc='This option lets you save the output warps from each level' + 'of the refinement process. Mostly used for experimenting.' + '* Cannot be used with -nopadWARP, -duplo, or -plusminus.' + '* Will only save all the outputs if the program terminates' + 'normally -- if it crashes, or freezes, then all these' + 'warps are lost.', + argstr='-allsave', + xor=['nopadWARP', 'duplo', 'plusminus']) + duplo = traits.Bool( + desc='Start off with 1/2 scale versions of the volumes,' + 'for getting a speedy coarse first alignment.' + '* Then scales back up to register the full volumes.' + 'The goal is greater speed, and it seems to help this' + 'positively piggish program to be more expeditious.' + '* However, accuracy is somewhat lower with \'-duplo\',' + 'for reasons that currenly elude Zhark; for this reason,' + 'the Emperor does not usually use \'-duplo\'.', + argstr='-duplo', + xor=[ + 'gridlist', 'maxlev', 'inilev', 'iniwarp', 'plusminus', 'allsave' + ]) + workhard = traits.Bool( + desc='Iterate more times, which can help when the volumes are' + 'hard to align at all, or when you hope to get a more precise' + 'alignment.' + '* Slows the program down (possibly a lot), of course.' + '* When you combine \'-workhard\' with \'-duplo\', only the' + 'full size volumes get the extra iterations.' + '* For finer control over which refinement levels work hard,' + 'you can use this option in the form (for example)' + ' -workhard:4:7' + 'which implies the extra iterations will be done at levels' + '4, 5, 6, and 7, but not otherwise.' + '* You can also use \'-superhard\' to iterate even more, but' + 'this extra option will REALLY slow things down.' + '-->>* Under most circumstances, you should not need to use either' + '-workhard or -superhard.' + '-->>* The fastest way to register to a template image is via the' + '-duplo option, and without the -workhard or -superhard options.' + '-->>* If you use this option in the form \'-Workhard\' (first letter' + 'in upper case), then the second iteration at each level is' + 'done with quintic polynomial warps.', + argstr='-workhard', + xor=['boxopt', 'ballopt']) + Qfinal = traits.Bool( + desc='At the finest patch size (the final level), use Hermite' + 'quintic polynomials for the warp instead of cubic polynomials.' + '* In a 3D \'patch\', there are 2x2x2x3=24 cubic polynomial basis' + 'function parameters over which to optimize (2 polynomials' + 'dependent on each of the x,y,z directions, and 3 different' + 'directions of displacement).' + '* There are 3x3x3x3=81 quintic polynomial parameters per patch.' + '* With -Qfinal, the final level will have more detail in' + 'the allowed warps, at the cost of yet more CPU time.' + '* However, no patch below 7x7x7 in size will be done with quintic' + 'polynomials.' + '* This option is also not usually needed, and is experimental.', + argstr='-Qfinal') + Qonly = traits.Bool( + desc='Use Hermite quintic polynomials at all levels.' + '* Very slow (about 4 times longer). Also experimental.' + '* Will produce a (discrete representation of a) C2 warp.', + argstr='-Qonly') + plusminus = traits.Bool( + desc='Normally, the warp displacements dis(x) are defined to match' + 'base(x) to source(x+dis(x)). With this option, the match' + 'is between base(x-dis(x)) and source(x+dis(x)) -- the two' + 'images \'meet in the middle\'.' + '* One goal is to mimic the warping done to MRI EPI data by' + 'field inhomogeneities, when registering between a \'blip up\'' + 'and a \'blip down\' down volume, which will have opposite' + 'distortions.' + '* Define Wp(x) = x+dis(x) and Wm(x) = x-dis(x). Then since' + 'base(Wm(x)) matches source(Wp(x)), by substituting INV(Wm(x))' + 'wherever we see x, we have base(x) matches source(Wp(INV(Wm(x))));' + 'that is, the warp V(x) that one would get from the \'usual\' way' + 'of running 3dQwarp is V(x) = Wp(INV(Wm(x))).' + '* Conversely, we can calculate Wp(x) in terms of V(x) as follows:' + 'If V(x) = x + dv(x), define Vh(x) = x + dv(x)/2;' + 'then Wp(x) = V(INV(Vh(x)))' + '* With the above formulas, it is possible to compute Wp(x) from' + 'V(x) and vice-versa, using program 3dNwarpCalc. The requisite' + 'commands are left as an exercise for the aspiring AFNI Jedi Master.' + '* You can use the semi-secret \'-pmBASE\' option to get the V(x)' + 'warp and the source dataset warped to base space, in addition to' + 'the Wp(x) \'_PLUS\' and Wm(x) \'_MINUS\' warps.' + '-->>* Alas: -plusminus does not work with -duplo or -allineate :-(' + '* However, you can use -iniwarp with -plusminus :-)' + '-->>* The outputs have _PLUS (from the source dataset) and _MINUS' + '(from the base dataset) in their filenames, in addition to' + 'the prefix. The -iwarp option, if present, will be ignored.', + argstr='-plusminus', + xor=['duplo', 'allsave', 'iwarp']) + nopad = traits.Bool( + desc='Do NOT use zero-padding on the 3D base and source images.' + '[Default == zero-pad, if needed]' + '* The underlying model for deformations goes to zero at the' + 'edge of the volume being warped. However, if there is' + 'significant data near an edge of the volume, then it won\'t' + 'get displaced much, and so the results might not be good.' + '* Zero padding is designed as a way to work around this potential' + 'problem. You should NOT need the \'-nopad\' option for any' + 'reason that Zhark can think of, but it is here to be symmetrical' + 'with 3dAllineate.' + '* Note that the output (warped from source) dataset will be on the' + 'base dataset grid whether or not zero-padding is allowed. However,' + 'unless you use the following option, allowing zero-padding (i.e.,' + 'the default operation) will make the output WARP dataset(s) be' + 'on a larger grid (also see \'-expad\' below).', + argstr='-nopad') + nopadWARP = traits.Bool( + desc='If for some reason you require the warp volume to' + 'match the base volume, then use this option to have the output' + 'WARP dataset(s) truncated.', + argstr='-nopadWARP', + xor=['allsave', 'expad']) + expad = traits.Int( + desc='This option instructs the program to pad the warp by an extra' + '\'EE\' voxels (and then 3dQwarp starts optimizing it).' + '* This option is seldom needed, but can be useful if you' + 'might later catenate the nonlinear warp -- via 3dNwarpCat --' + 'with an affine transformation that contains a large shift.' + 'Under that circumstance, the nonlinear warp might be shifted' + 'partially outside its original grid, so expanding that grid' + 'can avoid this problem.' + '* Note that this option perforce turns off \'-nopadWARP\'.', + argstr='-expad %d', + xor=['nopadWARP']) + ballopt = traits.Bool( + desc='Normally, the incremental warp parameters are optimized inside' + 'a rectangular \'box\' (24 dimensional for cubic patches, 81 for' + 'quintic patches), whose limits define the amount of distortion' + 'allowed at each step. Using \'-ballopt\' switches these limits' + 'to be applied to a \'ball\' (interior of a hypersphere), which' + 'can allow for larger incremental displacements. Use this' + 'option if you think things need to be able to move farther.', + argstr='-ballopt', + xor=['workhard', 'boxopt']) + baxopt = traits.Bool( + desc='Use the \'box\' optimization limits instead of the \'ball\'' + '[this is the default at present].' + '* Note that if \'-workhard\' is used, then ball and box optimization' + 'are alternated in the different iterations at each level, so' + 'these two options have no effect in that case.', + argstr='-boxopt', + xor=['workhard', 'ballopt']) + verb = traits.Bool( + desc='more detailed description of the process', + argstr='-verb', + xor=['quiet']) + quiet = traits.Bool( + desc='Cut out most of the fun fun fun progress messages :-(', + argstr='-quiet', + xor=['verb']) + # Hidden and semi-hidden options + overwrite = traits.Bool(desc='Overwrite outputs', argstr='-overwrite') + lpc = traits.Bool( + desc='Local Pearson minimization (i.e., EPI-T1 registration)' + 'This option has not be extensively tested' + 'If you use \'-lpc\', then \'-maxlev 0\' is automatically set.' + 'If you want to go to more refined levels, you can set \'-maxlev\'' + 'This should be set up to have lpc as the second to last argument' + 'and maxlev as the second to last argument, as needed by AFNI' + 'Using maxlev > 1 is not recommended for EPI-T1 alignment.', + argstr='-lpc', + xor=['nmi', 'mi', 'hel', 'lpa', 'pear'], + position=-2) + lpa = traits.Bool( + desc='Local Pearson maximization' + 'This option has not be extensively tested', + argstr='-lpa', + xor=['nmi', 'mi', 'lpc', 'hel', 'pear']) + hel = traits.Bool( + desc='Hellinger distance: a matching function for the adventurous' + 'This option has NOT be extensively tested for usefullness' + 'and should be considered experimental at this infundibulum.', + argstr='-hel', + xor=['nmi', 'mi', 'lpc', 'lpa', 'pear']) + mi = traits.Bool( + desc='Mutual Information: a matching function for the adventurous' + 'This option has NOT be extensively tested for usefullness' + 'and should be considered experimental at this infundibulum.', + argstr='-mi', + xor=['mi', 'hel', 'lpc', 'lpa', 'pear']) + nmi = traits.Bool( + desc= + 'Normalized Mutual Information: a matching function for the adventurous' + 'This option has NOT be extensively tested for usefullness' + 'and should be considered experimental at this infundibulum.', + argstr='-nmi', + xor=['nmi', 'hel', 'lpc', 'lpa', 'pear']) + + +class QwarpOutputSpec(TraitedSpec): + warped_source = File( + desc='Warped source file. If plusminus is used, this is the undistorted' + 'source file.') + warped_base = File(desc='Undistorted base file.') + source_warp = File( + desc="Displacement in mm for the source image." + "If plusminus is used this is the field suceptibility correction" + "warp (in 'mm') for source image.") + base_warp = File( + desc="Displacement in mm for the base image." + "If plus minus is used, this is the field suceptibility correction" + "warp (in 'mm') for base image. This is only output if plusminus" + "or iwarp options are passed") + weights = File(desc="Auto-computed weight volume.") + + +class Qwarp(AFNICommand): + """A version of 3dQwarp + Allineate your images prior to passing them to this workflow. + + For complete details, see the `3dQwarp Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> qwarp = afni.Qwarp() + >>> qwarp.inputs.in_file = 'sub-01_dir-LR_epi.nii.gz' + >>> qwarp.inputs.nopadWARP = True + >>> qwarp.inputs.base_file = 'sub-01_dir-RL_epi.nii.gz' + >>> qwarp.inputs.plusminus = True + >>> qwarp.cmdline + '3dQwarp -base sub-01_dir-RL_epi.nii.gz -source sub-01_dir-LR_epi.nii.gz -nopadWARP -prefix sub-01_dir-LR_epi_QW -plusminus' + >>> res = qwarp.run() # doctest: +SKIP + + >>> from nipype.interfaces import afni + >>> qwarp = afni.Qwarp() + >>> qwarp.inputs.in_file = 'structural.nii' + >>> qwarp.inputs.base_file = 'mni.nii' + >>> qwarp.inputs.resample = True + >>> qwarp.cmdline + '3dQwarp -base mni.nii -source structural.nii -prefix structural_QW -resample' + >>> res = qwarp.run() # doctest: +SKIP + + >>> from nipype.interfaces import afni + >>> qwarp = afni.Qwarp() + >>> qwarp.inputs.in_file = 'structural.nii' + >>> qwarp.inputs.base_file = 'epi.nii' + >>> qwarp.inputs.out_file = 'anatSSQ.nii.gz' + >>> qwarp.inputs.resample = True + >>> qwarp.inputs.lpc = True + >>> qwarp.inputs.verb = True + >>> qwarp.inputs.iwarp = True + >>> qwarp.inputs.blur = [0,3] + >>> qwarp.cmdline + '3dQwarp -base epi.nii -blur 0.0 3.0 -source structural.nii -iwarp -prefix anatSSQ.nii.gz -resample -verb -lpc' + >>> res = qwarp.run() # doctest: +SKIP + + >>> from nipype.interfaces import afni + >>> qwarp = afni.Qwarp() + >>> qwarp.inputs.in_file = 'structural.nii' + >>> qwarp.inputs.base_file = 'mni.nii' + >>> qwarp.inputs.duplo = True + >>> qwarp.inputs.blur = [0,3] + >>> qwarp.cmdline + '3dQwarp -base mni.nii -blur 0.0 3.0 -duplo -source structural.nii -prefix structural_QW' + >>> res = qwarp.run() # doctest: +SKIP + + >>> from nipype.interfaces import afni + >>> qwarp = afni.Qwarp() + >>> qwarp.inputs.in_file = 'structural.nii' + >>> qwarp.inputs.base_file = 'mni.nii' + >>> qwarp.inputs.duplo = True + >>> qwarp.inputs.minpatch = 25 + >>> qwarp.inputs.blur = [0,3] + >>> qwarp.inputs.out_file = 'Q25' + >>> qwarp.cmdline + '3dQwarp -base mni.nii -blur 0.0 3.0 -duplo -source structural.nii -minpatch 25 -prefix Q25' + >>> res = qwarp.run() # doctest: +SKIP + >>> qwarp2 = afni.Qwarp() + >>> qwarp2.inputs.in_file = 'structural.nii' + >>> qwarp2.inputs.base_file = 'mni.nii' + >>> qwarp2.inputs.blur = [0,2] + >>> qwarp2.inputs.out_file = 'Q11' + >>> qwarp2.inputs.inilev = 7 + >>> qwarp2.inputs.iniwarp = ['Q25_warp+tlrc.HEAD'] + >>> qwarp2.cmdline + '3dQwarp -base mni.nii -blur 0.0 2.0 -source structural.nii -inilev 7 -iniwarp Q25_warp+tlrc.HEAD -prefix Q11' + >>> res2 = qwarp2.run() # doctest: +SKIP + >>> res2 = qwarp2.run() # doctest: +SKIP + >>> qwarp3 = afni.Qwarp() + >>> qwarp3.inputs.in_file = 'structural.nii' + >>> qwarp3.inputs.base_file = 'mni.nii' + >>> qwarp3.inputs.allineate = True + >>> qwarp3.inputs.allineate_opts = '-cose lpa -verb' + >>> qwarp3.cmdline + "3dQwarp -allineate -allineate_opts '-cose lpa -verb' -base mni.nii -source structural.nii -prefix structural_QW" + >>> res3 = qwarp3.run() # doctest: +SKIP """ + _cmd = '3dQwarp' + input_spec = QwarpInputSpec + output_spec = QwarpOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'allineate_opts': + return spec.argstr % ("'" + value + "'") + return super(Qwarp, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + + if not isdefined(self.inputs.out_file): + prefix = self._gen_fname(self.inputs.in_file, suffix='_QW') + ext = '.HEAD' + suffix = '+tlrc' + else: + prefix = self.inputs.out_file + ext_ind = max([ + prefix.lower().rfind('.nii.gz'), + prefix.lower().rfind('.nii.') + ]) + if ext_ind == -1: + ext = '.HEAD' + suffix = '+tlrc' + else: + ext = prefix[ext_ind:] + suffix = '' + outputs['warped_source'] = fname_presuffix( + prefix, suffix=suffix, use_ext=False) + ext + if not self.inputs.nowarp: + outputs['source_warp'] = fname_presuffix( + prefix, suffix='_WARP' + suffix, use_ext=False) + ext + if self.inputs.iwarp: + outputs['base_warp'] = fname_presuffix( + prefix, suffix='_WARPINV' + suffix, use_ext=False) + ext + if isdefined(self.inputs.out_weight_file): + outputs['weights'] = os.path.abspath(self.inputs.out_weight_file) + + if self.inputs.plusminus: + outputs['warped_source'] = fname_presuffix( + prefix, suffix='_PLUS' + suffix, use_ext=False) + ext + outputs['warped_base'] = fname_presuffix( + prefix, suffix='_MINUS' + suffix, use_ext=False) + ext + outputs['source_warp'] = fname_presuffix( + prefix, suffix='_PLUS_WARP' + suffix, use_ext=False) + ext + outputs['base_warp'] = fname_presuffix( + prefix, suffix='_MINUS_WARP' + suffix, use_ext=False) + ext + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_fname(self.inputs.source_file, suffix='_QW') diff --git a/nipype/interfaces/afni/svm.py b/nipype/interfaces/afni/svm.py new file mode 100644 index 0000000000..d465c1caaa --- /dev/null +++ b/nipype/interfaces/afni/svm.py @@ -0,0 +1,168 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft = python sts = 4 ts = 4 sw = 4 et: +"""Afni svm interfaces +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from ..base import TraitedSpec, traits, File +from .base import AFNICommand, AFNICommandInputSpec, AFNICommandOutputSpec + + +class SVMTrainInputSpec(AFNICommandInputSpec): + # training options + ttype = traits.Str( + desc='tname: classification or regression', + argstr='-type %s', + mandatory=True) + in_file = File( + desc='A 3D+t AFNI brik dataset to be used for training.', + argstr='-trainvol %s', + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template="%s_vectors", + desc='output sum of weighted linear support vectors file name', + argstr='-bucket %s', + suffix='_bucket', + name_source="in_file") + model = File( + name_template="%s_model", + desc='basename for the brik containing the SVM model', + argstr='-model %s', + suffix='_model', + name_source="in_file") + alphas = File( + name_template="%s_alphas", + desc='output alphas file name', + argstr='-alpha %s', + suffix='_alphas', + name_source="in_file") + mask = File( + desc='byte-format brik file used to mask voxels in the analysis', + argstr='-mask %s', + position=-1, + exists=True, + copyfile=False) + nomodelmask = traits.Bool( + desc='Flag to enable the omission of a mask file', + argstr='-nomodelmask') + trainlabels = File( + desc= + '.1D labels corresponding to the stimulus paradigm for the training data.', + argstr='-trainlabels %s', + exists=True) + censor = File( + desc= + '.1D censor file that allows the user to ignore certain samples in the training data.', + argstr='-censor %s', + exists=True) + kernel = traits.Str( + desc= + 'string specifying type of kernel function:linear, polynomial, rbf, sigmoid', + argstr='-kernel %s') + max_iterations = traits.Int( + desc='Specify the maximum number of iterations for the optimization.', + argstr='-max_iterations %d') + w_out = traits.Bool( + desc='output sum of weighted linear support vectors', argstr='-wout') + options = traits.Str(desc='additional options for SVM-light', argstr='%s') + + +class SVMTrainOutputSpec(TraitedSpec): + out_file = File(desc='sum of weighted linear support vectors file name') + model = File(desc='brik containing the SVM model file name') + alphas = File(desc='output alphas file name') + + +class SVMTrain(AFNICommand): + """Temporally predictive modeling with the support vector machine + SVM Train Only + For complete details, see the `3dsvm Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni as afni + >>> svmTrain = afni.SVMTrain() + >>> svmTrain.inputs.in_file = 'run1+orig' + >>> svmTrain.inputs.trainlabels = 'run1_categories.1D' + >>> svmTrain.inputs.ttype = 'regression' + >>> svmTrain.inputs.mask = 'mask.nii' + >>> svmTrain.inputs.model = 'model_run1' + >>> svmTrain.inputs.alphas = 'alphas_run1' + >>> res = svmTrain.run() # doctest: +SKIP + + """ + + _cmd = '3dsvm' + input_spec = SVMTrainInputSpec + output_spec = SVMTrainOutputSpec + _additional_metadata = ['suffix'] + + def _format_arg(self, name, trait_spec, value): + return super(SVMTrain, self)._format_arg(name, trait_spec, value) + + +class SVMTestInputSpec(AFNICommandInputSpec): + # testing options + model = traits.Str( + desc='modname is the basename for the brik containing the SVM model', + argstr='-model %s', + mandatory=True) + in_file = File( + desc='A 3D or 3D+t AFNI brik dataset to be used for testing.', + argstr='-testvol %s', + exists=True, + mandatory=True) + out_file = File( + name_template="%s_predictions", + desc='filename for .1D prediction file(s).', + argstr='-predictions %s') + testlabels = File( + desc= + '*true* class category .1D labels for the test dataset. It is used to calculate the prediction accuracy performance', + exists=True, + argstr='-testlabels %s') + classout = traits.Bool( + desc= + 'Flag to specify that pname files should be integer-valued, corresponding to class category decisions.', + argstr='-classout') + nopredcensord = traits.Bool( + desc= + 'Flag to prevent writing predicted values for censored time-points', + argstr='-nopredcensord') + nodetrend = traits.Bool( + desc= + 'Flag to specify that pname files should not be linearly detrended', + argstr='-nodetrend') + multiclass = traits.Bool( + desc='Specifies multiclass algorithm for classification', + argstr='-multiclass %s') + options = traits.Str(desc='additional options for SVM-light', argstr='%s') + + +class SVMTest(AFNICommand): + """Temporally predictive modeling with the support vector machine + SVM Test Only + For complete details, see the `3dsvm Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni as afni + >>> svmTest = afni.SVMTest() + >>> svmTest.inputs.in_file= 'run2+orig' + >>> svmTest.inputs.model= 'run1+orig_model' + >>> svmTest.inputs.testlabels= 'run2_categories.1D' + >>> svmTest.inputs.out_file= 'pred2_model1' + >>> res = svmTest.run() # doctest: +SKIP + + """ + _cmd = '3dsvm' + input_spec = SVMTestInputSpec + output_spec = AFNICommandOutputSpec diff --git a/nipype/interfaces/afni/tests/__init__.py b/nipype/interfaces/afni/tests/__init__.py new file mode 100644 index 0000000000..99fb243f19 --- /dev/null +++ b/nipype/interfaces/afni/tests/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/interfaces/afni/tests/test_auto_ABoverlap.py b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py new file mode 100644 index 0000000000..55cb12c27f --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_ABoverlap.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ABoverlap + + +def test_ABoverlap_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file_a=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-3, + ), + in_file_b=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-2, + ), + no_automask=dict(argstr='-no_automask', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr=' |& tee %s', + position=-1, + ), + outputtype=dict(), + quiet=dict(argstr='-quiet', ), + verb=dict(argstr='-verb', ), + ) + inputs = ABoverlap.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ABoverlap_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ABoverlap.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommand.py b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py new file mode 100644 index 0000000000..724c98dcb2 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommand.py @@ -0,0 +1,28 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import AFNICommand + + +def test_AFNICommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source=['in_file'], + name_template='%s_afni', + ), + outputtype=dict(), + ) + inputs = AFNICommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py new file mode 100644 index 0000000000..431baec30b --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_AFNICommandBase.py @@ -0,0 +1,18 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import AFNICommandBase + + +def test_AFNICommandBase_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) + inputs = AFNICommandBase.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py new file mode 100644 index 0000000000..ba2411edfb --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_AFNIPythonCommand.py @@ -0,0 +1,28 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import AFNIPythonCommand + + +def test_AFNIPythonCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source=['in_file'], + name_template='%s_afni', + ), + outputtype=dict(), + ) + inputs = AFNIPythonCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py new file mode 100644 index 0000000000..d89519d571 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_AFNItoNIFTI.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import AFNItoNIFTI + + +def test_AFNItoNIFTI_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + denote=dict(argstr='-denote', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + newid=dict( + argstr='-newid', + xor=['oldid'], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oldid=dict( + argstr='-oldid', + xor=['newid'], + ), + out_file=dict( + argstr='-prefix %s', + hash_files=False, + name_source='in_file', + name_template='%s.nii', + ), + outputtype=dict(), + pure=dict(argstr='-pure', ), + ) + inputs = AFNItoNIFTI.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AFNItoNIFTI_outputs(): + output_map = dict(out_file=dict(), ) + outputs = AFNItoNIFTI.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py new file mode 100644 index 0000000000..dfbfc648f7 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_AlignEpiAnatPy.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import AlignEpiAnatPy + + +def test_AlignEpiAnatPy_inputs(): + input_map = dict( + anat=dict( + argstr='-anat %s', + copyfile=False, + mandatory=True, + ), + anat2epi=dict(argstr='-anat2epi', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi2anat=dict(argstr='-epi2anat', ), + epi_base=dict( + argstr='-epi_base %s', + mandatory=True, + ), + epi_strip=dict(argstr='-epi_strip %s', ), + in_file=dict( + argstr='-epi %s', + copyfile=False, + mandatory=True, + ), + outputtype=dict(), + py27_path=dict(usedefault=True, ), + save_skullstrip=dict(argstr='-save_skullstrip', ), + suffix=dict( + argstr='-suffix %s', + usedefault=True, + ), + tshift=dict( + argstr='-tshift %s', + usedefault=True, + ), + volreg=dict( + argstr='-volreg %s', + usedefault=True, + ), + ) + inputs = AlignEpiAnatPy.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AlignEpiAnatPy_outputs(): + output_map = dict( + anat_al_mat=dict(), + anat_al_orig=dict(), + epi_al_mat=dict(), + epi_al_orig=dict(), + epi_al_tlrc_mat=dict(), + epi_reg_al_mat=dict(), + epi_tlrc_al=dict(), + epi_vr_al_mat=dict(), + epi_vr_motion=dict(), + skullstrip=dict(), + ) + outputs = AlignEpiAnatPy.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Allineate.py b/nipype/interfaces/afni/tests/test_auto_Allineate.py new file mode 100644 index 0000000000..be02af14da --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Allineate.py @@ -0,0 +1,122 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Allineate + + +def test_Allineate_inputs(): + input_map = dict( + allcostx=dict( + argstr='-allcostx |& tee %s', + position=-1, + xor=[ + 'out_file', 'out_matrix', 'out_param_file', 'out_weight_file' + ], + ), + args=dict(argstr='%s', ), + autobox=dict(argstr='-autobox', ), + automask=dict(argstr='-automask+%d', ), + autoweight=dict(argstr='-autoweight%s', ), + center_of_mass=dict(argstr='-cmass%s', ), + check=dict(argstr='-check %s', ), + convergence=dict(argstr='-conv %f', ), + cost=dict(argstr='-cost %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi=dict(argstr='-EPI', ), + final_interpolation=dict(argstr='-final %s', ), + fine_blur=dict(argstr='-fineblur %f', ), + in_file=dict( + argstr='-source %s', + copyfile=False, + mandatory=True, + ), + in_matrix=dict( + argstr='-1Dmatrix_apply %s', + position=-3, + xor=['out_matrix'], + ), + in_param_file=dict( + argstr='-1Dparam_apply %s', + xor=['out_param_file'], + ), + interpolation=dict(argstr='-interp %s', ), + master=dict(argstr='-master %s', ), + maxrot=dict(argstr='-maxrot %f', ), + maxscl=dict(argstr='-maxscl %f', ), + maxshf=dict(argstr='-maxshf %f', ), + maxshr=dict(argstr='-maxshr %f', ), + newgrid=dict(argstr='-newgrid %f', ), + nmatch=dict(argstr='-nmatch %d', ), + no_pad=dict(argstr='-nopad', ), + nomask=dict(argstr='-nomask', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + nwarp=dict(argstr='-nwarp %s', ), + nwarp_fixdep=dict(argstr='-nwarp_fixdep%s...', ), + nwarp_fixmot=dict(argstr='-nwarp_fixmot%s...', ), + one_pass=dict(argstr='-onepass', ), + out_file=dict( + argstr='-prefix %s', + hash_files=False, + name_source='in_file', + name_template='%s_allineate', + xor=['allcostx'], + ), + out_matrix=dict( + argstr='-1Dmatrix_save %s', + xor=['in_matrix', 'allcostx'], + ), + out_param_file=dict( + argstr='-1Dparam_save %s', + xor=['in_param_file', 'allcostx'], + ), + out_weight_file=dict( + argstr='-wtprefix %s', + xor=['allcostx'], + ), + outputtype=dict(), + overwrite=dict(argstr='-overwrite', ), + quiet=dict(argstr='-quiet', ), + reference=dict(argstr='-base %s', ), + replacebase=dict(argstr='-replacebase', ), + replacemeth=dict(argstr='-replacemeth %s', ), + source_automask=dict(argstr='-source_automask+%d', ), + source_mask=dict(argstr='-source_mask %s', ), + two_best=dict(argstr='-twobest %d', ), + two_blur=dict(argstr='-twoblur %f', ), + two_first=dict(argstr='-twofirst', ), + two_pass=dict(argstr='-twopass', ), + usetemp=dict(argstr='-usetemp', ), + verbose=dict(argstr='-verb', ), + warp_type=dict(argstr='-warp %s', ), + warpfreeze=dict(argstr='-warpfreeze', ), + weight=dict(argstr='-weight %s', ), + weight_file=dict( + argstr='-weight %s', + deprecated='1.0.0', + new_name='weight', + ), + zclip=dict(argstr='-zclip', ), + ) + inputs = Allineate.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Allineate_outputs(): + output_map = dict( + allcostx=dict(), + out_file=dict(), + out_matrix=dict(), + out_param_file=dict(), + out_weight_file=dict(), + ) + outputs = Allineate.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py new file mode 100644 index 0000000000..bb00b3b585 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_AutoTLRC.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import AutoTLRC + + +def test_AutoTLRC_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + base=dict( + argstr='-base %s', + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-input %s', + copyfile=False, + mandatory=True, + ), + no_ss=dict(argstr='-no_ss', ), + outputtype=dict(), + ) + inputs = AutoTLRC.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AutoTLRC_outputs(): + output_map = dict(out_file=dict(), ) + outputs = AutoTLRC.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py new file mode 100644 index 0000000000..424b7d25b1 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_AutoTcorrelate.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import AutoTcorrelate + + +def test_AutoTcorrelate_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + eta2=dict(argstr='-eta2', ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + mask=dict(argstr='-mask %s', ), + mask_only_targets=dict( + argstr='-mask_only_targets', + xor=['mask_source'], + ), + mask_source=dict( + argstr='-mask_source %s', + xor=['mask_only_targets'], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_similarity_matrix.1D', + ), + outputtype=dict(), + polort=dict(argstr='-polort %d', ), + ) + inputs = AutoTcorrelate.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AutoTcorrelate_outputs(): + output_map = dict(out_file=dict(), ) + outputs = AutoTcorrelate.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Autobox.py b/nipype/interfaces/afni/tests/test_auto_Autobox.py new file mode 100644 index 0000000000..f158263c54 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Autobox.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Autobox + + +def test_Autobox_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-input %s', + copyfile=False, + mandatory=True, + ), + no_clustering=dict(argstr='-noclust', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_autobox', + ), + outputtype=dict(), + padding=dict(argstr='-npad %d', ), + ) + inputs = Autobox.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Autobox_outputs(): + output_map = dict( + out_file=dict(), + x_max=dict(), + x_min=dict(), + y_max=dict(), + y_min=dict(), + z_max=dict(), + z_min=dict(), + ) + outputs = Autobox.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Automask.py b/nipype/interfaces/afni/tests/test_auto_Automask.py new file mode 100644 index 0000000000..efffd19cba --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Automask.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Automask + + +def test_Automask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + brain_file=dict( + argstr='-apply_prefix %s', + name_source='in_file', + name_template='%s_masked', + ), + clfrac=dict(argstr='-clfrac %s', ), + dilate=dict(argstr='-dilate %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + erode=dict(argstr='-erode %s', ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_mask', + ), + outputtype=dict(), + ) + inputs = Automask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Automask_outputs(): + output_map = dict( + brain_file=dict(), + out_file=dict(), + ) + outputs = Automask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Axialize.py b/nipype/interfaces/afni/tests/test_auto_Axialize.py new file mode 100644 index 0000000000..62b425c932 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Axialize.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Axialize + + +def test_Axialize_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + axial=dict( + argstr='-axial', + xor=['coronal', 'sagittal'], + ), + coronal=dict( + argstr='-coronal', + xor=['sagittal', 'axial'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + orientation=dict(argstr='-orient %s', ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_axialize', + ), + outputtype=dict(), + sagittal=dict( + argstr='-sagittal', + xor=['coronal', 'axial'], + ), + verb=dict(argstr='-verb', ), + ) + inputs = Axialize.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Axialize_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Axialize.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Bandpass.py b/nipype/interfaces/afni/tests/test_auto_Bandpass.py new file mode 100644 index 0000000000..fb0861a747 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Bandpass.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Bandpass + + +def test_Bandpass_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + automask=dict(argstr='-automask', ), + blur=dict(argstr='-blur %f', ), + despike=dict(argstr='-despike', ), + environ=dict( + nohash=True, + usedefault=True, + ), + highpass=dict( + argstr='%f', + mandatory=True, + position=-3, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + localPV=dict(argstr='-localPV %f', ), + lowpass=dict( + argstr='%f', + mandatory=True, + position=-2, + ), + mask=dict( + argstr='-mask %s', + position=2, + ), + nfft=dict(argstr='-nfft %d', ), + no_detrend=dict(argstr='-nodetrend', ), + normalize=dict(argstr='-norm', ), + notrans=dict(argstr='-notrans', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + orthogonalize_dset=dict(argstr='-dsort %s', ), + orthogonalize_file=dict(argstr='-ort %s', ), + out_file=dict( + argstr='-prefix %s', + genfile=True, + name_source='in_file', + name_template='%s_bp', + position=1, + ), + outputtype=dict(), + tr=dict(argstr='-dt %f', ), + ) + inputs = Bandpass.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Bandpass_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Bandpass.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_BlurInMask.py b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py new file mode 100644 index 0000000000..334116d945 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_BlurInMask.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import BlurInMask + + +def test_BlurInMask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + automask=dict(argstr='-automask', ), + environ=dict( + nohash=True, + usedefault=True, + ), + float_out=dict(argstr='-float', ), + fwhm=dict( + argstr='-FWHM %f', + mandatory=True, + ), + in_file=dict( + argstr='-input %s', + copyfile=False, + mandatory=True, + position=1, + ), + mask=dict(argstr='-mask %s', ), + multimask=dict(argstr='-Mmask %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + options=dict( + argstr='%s', + position=2, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_blur', + position=-1, + ), + outputtype=dict(), + preserve=dict(argstr='-preserve', ), + ) + inputs = BlurInMask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BlurInMask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = BlurInMask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py new file mode 100644 index 0000000000..2f88a1edcb --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_BlurToFWHM.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import BlurToFWHM + + +def test_BlurToFWHM_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + automask=dict(argstr='-automask', ), + blurmaster=dict(argstr='-blurmaster %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict(argstr='-FWHM %f', ), + fwhmxy=dict(argstr='-FWHMxy %f', ), + in_file=dict( + argstr='-input %s', + mandatory=True, + ), + mask=dict(argstr='-mask %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source=['in_file'], + name_template='%s_afni', + ), + outputtype=dict(), + ) + inputs = BlurToFWHM.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BlurToFWHM_outputs(): + output_map = dict(out_file=dict(), ) + outputs = BlurToFWHM.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_BrickStat.py b/nipype/interfaces/afni/tests/test_auto_BrickStat.py new file mode 100644 index 0000000000..0056df5980 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_BrickStat.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import BrickStat + + +def test_BrickStat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + mask=dict( + argstr='-mask %s', + position=2, + ), + max=dict(argstr='-max', ), + mean=dict(argstr='-mean', ), + min=dict( + argstr='-min', + position=1, + ), + percentile=dict(argstr='-percentile %.3f %.3f %.3f', ), + slow=dict(argstr='-slow', ), + sum=dict(argstr='-sum', ), + var=dict(argstr='-var', ), + ) + inputs = BrickStat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BrickStat_outputs(): + output_map = dict(min_val=dict(), ) + outputs = BrickStat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Bucket.py b/nipype/interfaces/afni/tests/test_auto_Bucket.py new file mode 100644 index 0000000000..c3faf87a09 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Bucket.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Bucket + + +def test_Bucket_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_template='buck', + ), + outputtype=dict(), + ) + inputs = Bucket.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Bucket_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Bucket.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Calc.py b/nipype/interfaces/afni/tests/test_auto_Calc.py new file mode 100644 index 0000000000..28863d5a4b --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Calc.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Calc + + +def test_Calc_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + expr=dict( + argstr='-expr "%s"', + mandatory=True, + position=3, + ), + in_file_a=dict( + argstr='-a %s', + mandatory=True, + position=0, + ), + in_file_b=dict( + argstr='-b %s', + position=1, + ), + in_file_c=dict( + argstr='-c %s', + position=2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + other=dict(argstr='', ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file_a', + name_template='%s_calc', + ), + outputtype=dict(), + overwrite=dict(argstr='-overwrite', ), + single_idx=dict(), + start_idx=dict(requires=['stop_idx'], ), + stop_idx=dict(requires=['start_idx'], ), + ) + inputs = Calc.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Calc_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Calc.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Cat.py b/nipype/interfaces/afni/tests/test_auto_Cat.py new file mode 100644 index 0000000000..3da86c66d6 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Cat.py @@ -0,0 +1,75 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Cat + + +def test_Cat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + keepfree=dict(argstr='-nonfixed', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + omitconst=dict(argstr='-nonconst', ), + out_cint=dict( + xor=[ + 'out_format', 'out_nice', 'out_double', 'out_fint', 'out_int' + ], ), + out_double=dict( + argstr='-d', + xor=['out_format', 'out_nice', 'out_int', 'out_fint', 'out_cint'], + ), + out_file=dict( + argstr='> %s', + mandatory=True, + position=-1, + usedefault=True, + ), + out_fint=dict( + argstr='-f', + xor=[ + 'out_format', 'out_nice', 'out_double', 'out_int', 'out_cint' + ], + ), + out_format=dict( + argstr='-form %s', + xor=['out_int', 'out_nice', 'out_double', 'out_fint', 'out_cint'], + ), + out_int=dict( + argstr='-i', + xor=[ + 'out_format', 'out_nice', 'out_double', 'out_fint', 'out_cint' + ], + ), + out_nice=dict( + argstr='-n', + xor=[ + 'out_format', 'out_int', 'out_double', 'out_fint', 'out_cint' + ], + ), + outputtype=dict(), + sel=dict(argstr='-sel %s', ), + stack=dict(argstr='-stack', ), + ) + inputs = Cat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Cat_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Cat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_CatMatvec.py b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py new file mode 100644 index 0000000000..b67ab485d4 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_CatMatvec.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import CatMatvec + + +def test_CatMatvec_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fourxfour=dict( + argstr='-4x4', + xor=['matrix', 'oneline'], + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + matrix=dict( + argstr='-MATRIX', + xor=['oneline', 'fourxfour'], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oneline=dict( + argstr='-ONELINE', + xor=['matrix', 'fourxfour'], + ), + out_file=dict( + argstr=' > %s', + keep_extension=False, + mandatory=True, + name_source='in_file', + name_template='%s_cat.aff12.1D', + position=-1, + ), + outputtype=dict(), + ) + inputs = CatMatvec.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CatMatvec_outputs(): + output_map = dict(out_file=dict(), ) + outputs = CatMatvec.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_CenterMass.py b/nipype/interfaces/afni/tests/test_auto_CenterMass.py new file mode 100644 index 0000000000..322218a50d --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_CenterMass.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import CenterMass + + +def test_CenterMass_inputs(): + input_map = dict( + all_rois=dict(argstr='-all_rois', ), + args=dict(argstr='%s', ), + automask=dict(argstr='-automask', ), + cm_file=dict( + argstr='> %s', + hash_files=False, + keep_extension=False, + name_source='in_file', + name_template='%s_cm.out', + position=-1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=True, + mandatory=True, + position=-2, + ), + local_ijk=dict(argstr='-local_ijk', ), + mask_file=dict(argstr='-mask %s', ), + roi_vals=dict(argstr='-roi_vals %s', ), + set_cm=dict(argstr='-set %f %f %f', ), + ) + inputs = CenterMass.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CenterMass_outputs(): + output_map = dict( + cm=dict(), + cm_file=dict(), + out_file=dict(), + ) + outputs = CenterMass.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_ClipLevel.py b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py new file mode 100644 index 0000000000..8b8c61208d --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_ClipLevel.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ClipLevel + + +def test_ClipLevel_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + doall=dict( + argstr='-doall', + position=3, + xor='grad', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad=dict( + argstr='-grad %s', + position=3, + xor='doall', + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + mfrac=dict( + argstr='-mfrac %s', + position=2, + ), + ) + inputs = ClipLevel.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ClipLevel_outputs(): + output_map = dict(clip_val=dict(), ) + outputs = ClipLevel.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_ConvertDset.py b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py new file mode 100644 index 0000000000..06ba3a54f2 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_ConvertDset.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ConvertDset + + +def test_ConvertDset_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-input %s', + mandatory=True, + position=-2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + mandatory=True, + position=-1, + ), + out_type=dict( + argstr='-o_%s', + mandatory=True, + position=0, + ), + outputtype=dict(), + ) + inputs = ConvertDset.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ConvertDset_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ConvertDset.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Copy.py b/nipype/interfaces/afni/tests/test_auto_Copy.py new file mode 100644 index 0000000000..a8e67d5607 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Copy.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Copy + + +def test_Copy_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='%s', + name_source='in_file', + name_template='%s_copy', + position=-1, + ), + outputtype=dict(), + verbose=dict(argstr='-verb', ), + ) + inputs = Copy.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Copy_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Copy.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Deconvolve.py b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py new file mode 100644 index 0000000000..ae42a77019 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Deconvolve.py @@ -0,0 +1,118 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Deconvolve + + +def test_Deconvolve_inputs(): + input_map = dict( + STATmask=dict(argstr='-STATmask %s', ), + TR_1D=dict(argstr='-TR_1D %f', ), + allzero_OK=dict(argstr='-allzero_OK', ), + args=dict(argstr='%s', ), + automask=dict(argstr='-automask', ), + cbucket=dict(argstr='-cbucket %s', ), + censor=dict(argstr='-censor %s', ), + dmbase=dict(argstr='-dmbase', ), + dname=dict(argstr='-D%s=%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + force_TR=dict( + argstr='-force_TR %f', + position=0, + ), + fout=dict(argstr='-fout', ), + global_times=dict( + argstr='-global_times', + xor=['local_times'], + ), + glt_label=dict( + argstr='-glt_label %d %s...', + position=-1, + requires=['gltsym'], + ), + gltsym=dict( + argstr="-gltsym 'SYM: %s'...", + position=-2, + ), + goforit=dict(argstr='-GOFORIT %i', ), + in_files=dict( + argstr='-input %s', + copyfile=False, + position=1, + sep=' ', + ), + input1D=dict(argstr='-input1D %s', ), + legendre=dict(argstr='-legendre', ), + local_times=dict( + argstr='-local_times', + xor=['global_times'], + ), + mask=dict(argstr='-mask %s', ), + noblock=dict(argstr='-noblock', ), + nocond=dict(argstr='-nocond', ), + nodmbase=dict(argstr='-nodmbase', ), + nofdr=dict(argstr='-noFDR', ), + nolegendre=dict(argstr='-nolegendre', ), + nosvd=dict(argstr='-nosvd', ), + num_glt=dict( + argstr='-num_glt %d', + position=-3, + ), + num_stimts=dict( + argstr='-num_stimts %d', + position=-6, + ), + num_threads=dict( + argstr='-jobs %d', + nohash=True, + ), + ortvec=dict(argstr='-ortvec %s %s', ), + out_file=dict(argstr='-bucket %s', ), + outputtype=dict(), + polort=dict(argstr='-polort %d', ), + rmsmin=dict(argstr='-rmsmin %f', ), + rout=dict(argstr='-rout', ), + sat=dict( + argstr='-sat', + xor=['trans'], + ), + singvals=dict(argstr='-singvals', ), + stim_label=dict( + argstr='-stim_label %d %s...', + position=-4, + requires=['stim_times'], + ), + stim_times=dict( + argstr="-stim_times %d %s '%s'...", + position=-5, + ), + stim_times_subtract=dict(argstr='-stim_times_subtract %f', ), + svd=dict(argstr='-svd', ), + tout=dict(argstr='-tout', ), + trans=dict( + argstr='-trans', + xor=['sat'], + ), + vout=dict(argstr='-vout', ), + x1D=dict(argstr='-x1D %s', ), + x1D_stop=dict(argstr='-x1D_stop', ), + ) + inputs = Deconvolve.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Deconvolve_outputs(): + output_map = dict( + cbucket=dict(), + out_file=dict(), + reml_script=dict(), + x1D=dict(), + ) + outputs = Deconvolve.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py new file mode 100644 index 0000000000..664cca5985 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_DegreeCentrality.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import DegreeCentrality + + +def test_DegreeCentrality_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + autoclip=dict(argstr='-autoclip', ), + automask=dict(argstr='-automask', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + mask=dict(argstr='-mask %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oned_file=dict(argstr='-out1D %s', ), + out_file=dict( + argstr='-prefix %s', + name_source=['in_file'], + name_template='%s_afni', + ), + outputtype=dict(), + polort=dict(argstr='-polort %d', ), + sparsity=dict(argstr='-sparsity %f', ), + thresh=dict(argstr='-thresh %f', ), + ) + inputs = DegreeCentrality.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DegreeCentrality_outputs(): + output_map = dict( + oned_file=dict(), + out_file=dict(), + ) + outputs = DegreeCentrality.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Despike.py b/nipype/interfaces/afni/tests/test_auto_Despike.py new file mode 100644 index 0000000000..ff0b8b532a --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Despike.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Despike + + +def test_Despike_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_despike', + ), + outputtype=dict(), + ) + inputs = Despike.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Despike_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Despike.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Detrend.py b/nipype/interfaces/afni/tests/test_auto_Detrend.py new file mode 100644 index 0000000000..1938529cf7 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Detrend.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Detrend + + +def test_Detrend_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_detrend', + ), + outputtype=dict(), + ) + inputs = Detrend.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Detrend_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Detrend.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Dot.py b/nipype/interfaces/afni/tests/test_auto_Dot.py new file mode 100644 index 0000000000..7623e90ca7 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Dot.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Dot + + +def test_Dot_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + demean=dict(argstr='-demean', ), + docoef=dict(argstr='-docoef', ), + docor=dict(argstr='-docor', ), + dodice=dict(argstr='-dodice', ), + dodot=dict(argstr='-dodot', ), + doeta2=dict(argstr='-doeta2', ), + dosums=dict(argstr='-dosums', ), + environ=dict( + nohash=True, + usedefault=True, + ), + full=dict(argstr='-full', ), + in_files=dict( + argstr='%s ...', + position=-2, + ), + mask=dict(argstr='-mask %s', ), + mrange=dict(argstr='-mrange %s %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr=' |& tee %s', + position=-1, + ), + outputtype=dict(), + show_labels=dict(argstr='-show_labels', ), + upper=dict(argstr='-upper', ), + ) + inputs = Dot.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Dot_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Dot.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_ECM.py b/nipype/interfaces/afni/tests/test_auto_ECM.py new file mode 100644 index 0000000000..8a4793fb7f --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_ECM.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ECM + + +def test_ECM_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + autoclip=dict(argstr='-autoclip', ), + automask=dict(argstr='-automask', ), + environ=dict( + nohash=True, + usedefault=True, + ), + eps=dict(argstr='-eps %f', ), + fecm=dict(argstr='-fecm', ), + full=dict(argstr='-full', ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + mask=dict(argstr='-mask %s', ), + max_iter=dict(argstr='-max_iter %d', ), + memory=dict(argstr='-memory %f', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source=['in_file'], + name_template='%s_afni', + ), + outputtype=dict(), + polort=dict(argstr='-polort %d', ), + scale=dict(argstr='-scale %f', ), + shift=dict(argstr='-shift %f', ), + sparsity=dict(argstr='-sparsity %f', ), + thresh=dict(argstr='-thresh %f', ), + ) + inputs = ECM.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ECM_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ECM.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Edge3.py b/nipype/interfaces/afni/tests/test_auto_Edge3.py new file mode 100644 index 0000000000..8fc6953c28 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Edge3.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Edge3 + + +def test_Edge3_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + datum=dict(argstr='-datum %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fscale=dict( + argstr='-fscale', + xor=['gscale', 'nscale', 'scale_floats'], + ), + gscale=dict( + argstr='-gscale', + xor=['fscale', 'nscale', 'scale_floats'], + ), + in_file=dict( + argstr='-input %s', + copyfile=False, + mandatory=True, + position=0, + ), + nscale=dict( + argstr='-nscale', + xor=['fscale', 'gscale', 'scale_floats'], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + position=-1, + ), + outputtype=dict(), + scale_floats=dict( + argstr='-scale_floats %f', + xor=['fscale', 'gscale', 'nscale'], + ), + verbose=dict(argstr='-verbose', ), + ) + inputs = Edge3.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Edge3_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Edge3.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Eval.py b/nipype/interfaces/afni/tests/test_auto_Eval.py new file mode 100644 index 0000000000..35c1360a6e --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Eval.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Eval + + +def test_Eval_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + expr=dict( + argstr='-expr "%s"', + mandatory=True, + position=3, + ), + in_file_a=dict( + argstr='-a %s', + mandatory=True, + position=0, + ), + in_file_b=dict( + argstr='-b %s', + position=1, + ), + in_file_c=dict( + argstr='-c %s', + position=2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + other=dict(argstr='', ), + out1D=dict(argstr='-1D', ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file_a', + name_template='%s_calc', + ), + outputtype=dict(), + single_idx=dict(), + start_idx=dict(requires=['stop_idx'], ), + stop_idx=dict(requires=['start_idx'], ), + ) + inputs = Eval.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Eval_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Eval.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_FWHMx.py b/nipype/interfaces/afni/tests/test_auto_FWHMx.py new file mode 100644 index 0000000000..14ab26fa4f --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_FWHMx.py @@ -0,0 +1,84 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import FWHMx + + +def test_FWHMx_inputs(): + input_map = dict( + acf=dict( + argstr='-acf', + usedefault=True, + ), + args=dict(argstr='%s', ), + arith=dict( + argstr='-arith', + xor=['geom'], + ), + automask=dict( + argstr='-automask', + usedefault=True, + ), + combine=dict(argstr='-combine', ), + compat=dict(argstr='-compat', ), + demed=dict( + argstr='-demed', + xor=['detrend'], + ), + detrend=dict( + argstr='-detrend', + usedefault=True, + xor=['demed'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + geom=dict( + argstr='-geom', + xor=['arith'], + ), + in_file=dict( + argstr='-input %s', + mandatory=True, + ), + mask=dict(argstr='-mask %s', ), + out_detrend=dict( + argstr='-detprefix %s', + keep_extension=False, + name_source='in_file', + name_template='%s_detrend', + ), + out_file=dict( + argstr='> %s', + keep_extension=False, + name_source='in_file', + name_template='%s_fwhmx.out', + position=-1, + ), + out_subbricks=dict( + argstr='-out %s', + keep_extension=False, + name_source='in_file', + name_template='%s_subbricks.out', + ), + unif=dict(argstr='-unif', ), + ) + inputs = FWHMx.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FWHMx_outputs(): + output_map = dict( + acf_param=dict(), + fwhm=dict(), + out_acf=dict(), + out_detrend=dict(), + out_file=dict(), + out_subbricks=dict(), + ) + outputs = FWHMx.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Fim.py b/nipype/interfaces/afni/tests/test_auto_Fim.py new file mode 100644 index 0000000000..931e5ff244 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Fim.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Fim + + +def test_Fim_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fim_thr=dict( + argstr='-fim_thr %f', + position=3, + ), + ideal_file=dict( + argstr='-ideal_file %s', + mandatory=True, + position=2, + ), + in_file=dict( + argstr='-input %s', + copyfile=False, + mandatory=True, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out=dict( + argstr='-out %s', + position=4, + ), + out_file=dict( + argstr='-bucket %s', + name_source='in_file', + name_template='%s_fim', + ), + outputtype=dict(), + ) + inputs = Fim.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Fim_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Fim.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Fourier.py b/nipype/interfaces/afni/tests/test_auto_Fourier.py new file mode 100644 index 0000000000..69cd955cbc --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Fourier.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Fourier + + +def test_Fourier_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + highpass=dict( + argstr='-highpass %f', + mandatory=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + lowpass=dict( + argstr='-lowpass %f', + mandatory=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_fourier', + ), + outputtype=dict(), + retrend=dict(argstr='-retrend', ), + ) + inputs = Fourier.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Fourier_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Fourier.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_GCOR.py b/nipype/interfaces/afni/tests/test_auto_GCOR.py new file mode 100644 index 0000000000..9d603dda36 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_GCOR.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import GCOR + + +def test_GCOR_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-input %s', + copyfile=False, + mandatory=True, + position=-1, + ), + mask=dict( + argstr='-mask %s', + copyfile=False, + ), + nfirst=dict(argstr='-nfirst %d', ), + no_demean=dict(argstr='-no_demean', ), + ) + inputs = GCOR.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GCOR_outputs(): + output_map = dict(out=dict(), ) + outputs = GCOR.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Hist.py b/nipype/interfaces/afni/tests/test_auto_Hist.py new file mode 100644 index 0000000000..48499a9605 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Hist.py @@ -0,0 +1,56 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Hist + + +def test_Hist_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bin_width=dict(argstr='-binwidth %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-input %s', + copyfile=False, + mandatory=True, + position=1, + ), + mask=dict(argstr='-mask %s', ), + max_value=dict(argstr='-max %f', ), + min_value=dict(argstr='-min %f', ), + nbin=dict(argstr='-nbin %d', ), + out_file=dict( + argstr='-prefix %s', + keep_extension=False, + name_source=['in_file'], + name_template='%s_hist', + ), + out_show=dict( + argstr='> %s', + keep_extension=False, + name_source='in_file', + name_template='%s_hist.out', + position=-1, + ), + showhist=dict( + argstr='-showhist', + usedefault=True, + ), + ) + inputs = Hist.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Hist_outputs(): + output_map = dict( + out_file=dict(), + out_show=dict(), + ) + outputs = Hist.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_LFCD.py b/nipype/interfaces/afni/tests/test_auto_LFCD.py new file mode 100644 index 0000000000..9cbde10b56 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_LFCD.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import LFCD + + +def test_LFCD_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + autoclip=dict(argstr='-autoclip', ), + automask=dict(argstr='-automask', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + mask=dict(argstr='-mask %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source=['in_file'], + name_template='%s_afni', + ), + outputtype=dict(), + polort=dict(argstr='-polort %d', ), + thresh=dict(argstr='-thresh %f', ), + ) + inputs = LFCD.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_LFCD_outputs(): + output_map = dict(out_file=dict(), ) + outputs = LFCD.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_LocalBistat.py b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py new file mode 100644 index 0000000000..22f0b717d8 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_LocalBistat.py @@ -0,0 +1,64 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import LocalBistat + + +def test_LocalBistat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + automask=dict( + argstr='-automask', + xor=['weight_file'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file1=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + in_file2=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + mask_file=dict(argstr='-mask %s', ), + neighborhood=dict( + argstr="-nbhd '%s(%s)'", + mandatory=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + keep_extension=True, + name_source='in_file1', + name_template='%s_bistat', + position=0, + ), + outputtype=dict(), + stat=dict( + argstr='-stat %s...', + mandatory=True, + ), + weight_file=dict( + argstr='-weight %s', + xor=['automask'], + ), + ) + inputs = LocalBistat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_LocalBistat_outputs(): + output_map = dict(out_file=dict(), ) + outputs = LocalBistat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_MaskTool.py b/nipype/interfaces/afni/tests/test_auto_MaskTool.py new file mode 100644 index 0000000000..1644bee878 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_MaskTool.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MaskTool + + +def test_MaskTool_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + count=dict( + argstr='-count', + position=2, + ), + datum=dict(argstr='-datum %s', ), + dilate_inputs=dict(argstr='-dilate_inputs %s', ), + dilate_results=dict(argstr='-dilate_results %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_dirs=dict( + argstr='-fill_dirs %s', + requires=['fill_holes'], + ), + fill_holes=dict(argstr='-fill_holes', ), + frac=dict(argstr='-frac %s', ), + in_file=dict( + argstr='-input %s', + copyfile=False, + mandatory=True, + position=-1, + ), + inter=dict(argstr='-inter', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_mask', + ), + outputtype=dict(), + union=dict(argstr='-union', ), + verbose=dict(argstr='-verb %s', ), + ) + inputs = MaskTool.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MaskTool_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MaskTool.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Maskave.py b/nipype/interfaces/afni/tests/test_auto_Maskave.py new file mode 100644 index 0000000000..a318e685a9 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Maskave.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Maskave + + +def test_Maskave_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-2, + ), + mask=dict( + argstr='-mask %s', + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='> %s', + keep_extension=True, + name_source='in_file', + name_template='%s_maskave.1D', + position=-1, + ), + outputtype=dict(), + quiet=dict( + argstr='-quiet', + position=2, + ), + ) + inputs = Maskave.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Maskave_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Maskave.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Means.py b/nipype/interfaces/afni/tests/test_auto_Means.py new file mode 100644 index 0000000000..2e422e68db --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Means.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Means + + +def test_Means_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + count=dict(argstr='-count', ), + datum=dict(argstr='-datum %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file_a=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + in_file_b=dict( + argstr='%s', + position=-1, + ), + mask_inter=dict(argstr='-mask_inter', ), + mask_union=dict(argstr='-mask_union', ), + non_zero=dict(argstr='-non_zero', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file_a', + name_template='%s_mean', + ), + outputtype=dict(), + scale=dict(argstr='-%sscale', ), + sqr=dict(argstr='-sqr', ), + std_dev=dict(argstr='-stdev', ), + summ=dict(argstr='-sum', ), + ) + inputs = Means.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Means_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Means.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Merge.py b/nipype/interfaces/afni/tests/test_auto_Merge.py new file mode 100644 index 0000000000..c36ee6f7a1 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Merge.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Merge + + +def test_Merge_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + blurfwhm=dict( + argstr='-1blur_fwhm %d', + units='mm', + ), + doall=dict(argstr='-doall', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_files', + name_template='%s_merge', + ), + outputtype=dict(), + ) + inputs = Merge.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Merge_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Merge.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Notes.py b/nipype/interfaces/afni/tests/test_auto_Notes.py new file mode 100644 index 0000000000..965bacb000 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Notes.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Notes + + +def test_Notes_inputs(): + input_map = dict( + add=dict(argstr='-a "%s"', ), + add_history=dict( + argstr='-h "%s"', + xor=['rep_history'], + ), + args=dict(argstr='%s', ), + delete=dict(argstr='-d %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict(argstr='%s', ), + outputtype=dict(), + rep_history=dict( + argstr='-HH "%s"', + xor=['add_history'], + ), + ses=dict(argstr='-ses', ), + ) + inputs = Notes.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Notes_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Notes.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py new file mode 100644 index 0000000000..48b59a2968 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_NwarpAdjust.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import NwarpAdjust + + +def test_NwarpAdjust_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict(argstr='-source %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + keep_extension=True, + name_source='in_files', + name_template='%s_NwarpAdjust', + requires=['in_files'], + ), + outputtype=dict(), + warps=dict( + argstr='-nwarp %s', + mandatory=True, + ), + ) + inputs = NwarpAdjust.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_NwarpAdjust_outputs(): + output_map = dict(out_file=dict(), ) + outputs = NwarpAdjust.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpApply.py b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py new file mode 100644 index 0000000000..63f6baa044 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_NwarpApply.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import NwarpApply + + +def test_NwarpApply_inputs(): + input_map = dict( + ainterp=dict(argstr='-ainterp %s', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-source %s', + mandatory=True, + ), + interp=dict( + argstr='-interp %s', + usedefault=True, + ), + inv_warp=dict(argstr='-iwarp', ), + master=dict(argstr='-master %s', ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_Nwarp', + ), + quiet=dict( + argstr='-quiet', + xor=['verb'], + ), + short=dict(argstr='-short', ), + verb=dict( + argstr='-verb', + xor=['quiet'], + ), + warp=dict( + argstr='-nwarp %s', + mandatory=True, + ), + ) + inputs = NwarpApply.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_NwarpApply_outputs(): + output_map = dict(out_file=dict(), ) + outputs = NwarpApply.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_NwarpCat.py b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py new file mode 100644 index 0000000000..c6b3689c9e --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_NwarpCat.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import NwarpCat + + +def test_NwarpCat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + expad=dict(argstr='-expad %d', ), + in_files=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + interp=dict( + argstr='-interp %s', + usedefault=True, + ), + inv_warp=dict(argstr='-iwarp', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_files', + name_template='%s_NwarpCat', + ), + outputtype=dict(), + space=dict(argstr='-space %s', ), + verb=dict(argstr='-verb', ), + ) + inputs = NwarpCat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_NwarpCat_outputs(): + output_map = dict(out_file=dict(), ) + outputs = NwarpCat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py new file mode 100644 index 0000000000..fe037851af --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_OneDToolPy.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import OneDToolPy + + +def test_OneDToolPy_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + censor_motion=dict(argstr='-censor_motion %f %s', ), + censor_prev_TR=dict(argstr='-censor_prev_TR', ), + demean=dict(argstr='-demean', ), + derivative=dict(argstr='-derivative', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-infile %s', + mandatory=True, + ), + out_file=dict( + argstr='-write %s', + xor=['show_cormat_warnings'], + ), + outputtype=dict(), + py27_path=dict(usedefault=True, ), + set_nruns=dict(argstr='-set_nruns %d', ), + show_censor_count=dict(argstr='-show_censor_count', ), + show_cormat_warnings=dict( + argstr='-show_cormat_warnings |& tee %s', + position=-1, + xor=['out_file'], + ), + show_indices_interest=dict(argstr='-show_indices_interest', ), + show_trs_run=dict(argstr='-show_trs_run %d', ), + show_trs_uncensored=dict(argstr='-show_trs_uncensored %s', ), + ) + inputs = OneDToolPy.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_OneDToolPy_outputs(): + output_map = dict(out_file=dict(), ) + outputs = OneDToolPy.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_OutlierCount.py b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py new file mode 100644 index 0000000000..a63fbb8bef --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_OutlierCount.py @@ -0,0 +1,77 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import OutlierCount + + +def test_OutlierCount_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + autoclip=dict( + argstr='-autoclip', + usedefault=True, + xor=['mask'], + ), + automask=dict( + argstr='-automask', + usedefault=True, + xor=['mask'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fraction=dict( + argstr='-fraction', + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + interval=dict( + argstr='-range', + usedefault=True, + ), + legendre=dict( + argstr='-legendre', + usedefault=True, + ), + mask=dict( + argstr='-mask %s', + xor=['autoclip', 'automask'], + ), + out_file=dict( + keep_extension=False, + name_source=['in_file'], + name_template='%s_outliers', + ), + outliers_file=dict( + argstr='-save %s', + keep_extension=True, + name_source=['in_file'], + name_template='%s_outliers', + output_name='out_outliers', + ), + polort=dict(argstr='-polort %d', ), + qthr=dict( + argstr='-qthr %.5f', + usedefault=True, + ), + save_outliers=dict(usedefault=True, ), + ) + inputs = OutlierCount.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_OutlierCount_outputs(): + output_map = dict( + out_file=dict(), + out_outliers=dict(), + ) + outputs = OutlierCount.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_QualityIndex.py b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py new file mode 100644 index 0000000000..679a3e0393 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_QualityIndex.py @@ -0,0 +1,64 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import QualityIndex + + +def test_QualityIndex_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + autoclip=dict( + argstr='-autoclip', + usedefault=True, + xor=['mask'], + ), + automask=dict( + argstr='-automask', + usedefault=True, + xor=['mask'], + ), + clip=dict(argstr='-clip %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + interval=dict( + argstr='-range', + usedefault=True, + ), + mask=dict( + argstr='-mask %s', + xor=['autoclip', 'automask'], + ), + out_file=dict( + argstr='> %s', + keep_extension=False, + name_source=['in_file'], + name_template='%s_tqual', + position=-1, + ), + quadrant=dict( + argstr='-quadrant', + usedefault=True, + ), + spearman=dict( + argstr='-spearman', + usedefault=True, + ), + ) + inputs = QualityIndex.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_QualityIndex_outputs(): + output_map = dict(out_file=dict(), ) + outputs = QualityIndex.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Qwarp.py b/nipype/interfaces/afni/tests/test_auto_Qwarp.py new file mode 100644 index 0000000000..f6df3d0ab5 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Qwarp.py @@ -0,0 +1,168 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Qwarp + + +def test_Qwarp_inputs(): + input_map = dict( + Qfinal=dict(argstr='-Qfinal', ), + Qonly=dict(argstr='-Qonly', ), + allineate=dict(argstr='-allineate', ), + allineate_opts=dict( + argstr='-allineate_opts %s', + requires=['allineate'], + ), + allsave=dict( + argstr='-allsave', + xor=['nopadWARP', 'duplo', 'plusminus'], + ), + args=dict(argstr='%s', ), + ballopt=dict( + argstr='-ballopt', + xor=['workhard', 'boxopt'], + ), + base_file=dict( + argstr='-base %s', + copyfile=False, + mandatory=True, + ), + baxopt=dict( + argstr='-boxopt', + xor=['workhard', 'ballopt'], + ), + blur=dict(argstr='-blur %s', ), + duplo=dict( + argstr='-duplo', + xor=[ + 'gridlist', 'maxlev', 'inilev', 'iniwarp', 'plusminus', + 'allsave' + ], + ), + emask=dict( + argstr='-emask %s', + copyfile=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expad=dict( + argstr='-expad %d', + xor=['nopadWARP'], + ), + gridlist=dict( + argstr='-gridlist %s', + copyfile=False, + xor=['duplo', 'plusminus'], + ), + hel=dict( + argstr='-hel', + xor=['nmi', 'mi', 'lpc', 'lpa', 'pear'], + ), + in_file=dict( + argstr='-source %s', + copyfile=False, + mandatory=True, + ), + inilev=dict( + argstr='-inilev %d', + xor=['duplo'], + ), + iniwarp=dict( + argstr='-iniwarp %s', + xor=['duplo'], + ), + iwarp=dict( + argstr='-iwarp', + xor=['plusminus'], + ), + lpa=dict( + argstr='-lpa', + xor=['nmi', 'mi', 'lpc', 'hel', 'pear'], + ), + lpc=dict( + argstr='-lpc', + position=-2, + xor=['nmi', 'mi', 'hel', 'lpa', 'pear'], + ), + maxlev=dict( + argstr='-maxlev %d', + position=-1, + xor=['duplo'], + ), + mi=dict( + argstr='-mi', + xor=['mi', 'hel', 'lpc', 'lpa', 'pear'], + ), + minpatch=dict(argstr='-minpatch %d', ), + nmi=dict( + argstr='-nmi', + xor=['nmi', 'hel', 'lpc', 'lpa', 'pear'], + ), + noXdis=dict(argstr='-noXdis', ), + noYdis=dict(argstr='-noYdis', ), + noZdis=dict(argstr='-noZdis', ), + noneg=dict(argstr='-noneg', ), + nopad=dict(argstr='-nopad', ), + nopadWARP=dict( + argstr='-nopadWARP', + xor=['allsave', 'expad'], + ), + nopenalty=dict(argstr='-nopenalty', ), + nowarp=dict(argstr='-nowarp', ), + noweight=dict(argstr='-noweight', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + genfile=True, + name_source=['in_file'], + name_template='%s_QW', + ), + out_weight_file=dict(argstr='-wtprefix %s', ), + outputtype=dict(), + overwrite=dict(argstr='-overwrite', ), + pblur=dict(argstr='-pblur %s', ), + pear=dict(argstr='-pear', ), + penfac=dict(argstr='-penfac %f', ), + plusminus=dict( + argstr='-plusminus', + xor=['duplo', 'allsave', 'iwarp'], + ), + quiet=dict( + argstr='-quiet', + xor=['verb'], + ), + resample=dict(argstr='-resample', ), + verb=dict( + argstr='-verb', + xor=['quiet'], + ), + wball=dict(argstr='-wball %s', ), + weight=dict(argstr='-weight %s', ), + wmask=dict(argstr='-wpass %s %f', ), + workhard=dict( + argstr='-workhard', + xor=['boxopt', 'ballopt'], + ), + ) + inputs = Qwarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Qwarp_outputs(): + output_map = dict( + base_warp=dict(), + source_warp=dict(), + warped_base=dict(), + warped_source=dict(), + weights=dict(), + ) + outputs = Qwarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py new file mode 100644 index 0000000000..2b823cf8af --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_QwarpPlusMinus.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import QwarpPlusMinus + + +def test_QwarpPlusMinus_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + base_file=dict( + argstr='-base %s', + copyfile=False, + mandatory=True, + ), + blur=dict(argstr='-blur %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + minpatch=dict(argstr='-minpatch %d', ), + nopadWARP=dict(argstr='-nopadWARP', ), + noweight=dict(argstr='-noweight', ), + pblur=dict(argstr='-pblur %s', ), + source_file=dict( + argstr='-source %s', + copyfile=False, + mandatory=True, + ), + ) + inputs = QwarpPlusMinus.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_QwarpPlusMinus_outputs(): + output_map = dict( + base_warp=dict(), + source_warp=dict(), + warped_base=dict(), + warped_source=dict(), + ) + outputs = QwarpPlusMinus.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_ROIStats.py b/nipype/interfaces/afni/tests/test_auto_ROIStats.py new file mode 100644 index 0000000000..57eb59d284 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_ROIStats.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ROIStats + + +def test_ROIStats_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + mask=dict( + argstr='-mask %s', + position=3, + ), + mask_f2short=dict( + argstr='-mask_f2short', + position=2, + ), + quiet=dict( + argstr='-quiet', + position=1, + ), + ) + inputs = ROIStats.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ROIStats_outputs(): + output_map = dict(stats=dict(), ) + outputs = ROIStats.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Refit.py b/nipype/interfaces/afni/tests/test_auto_Refit.py new file mode 100644 index 0000000000..4983eafc81 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Refit.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Refit + + +def test_Refit_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + atrcopy=dict(argstr='-atrcopy %s %s', ), + atrfloat=dict(argstr='-atrfloat %s %s', ), + atrint=dict(argstr='-atrint %s %s', ), + atrstring=dict(argstr='-atrstring %s %s', ), + deoblique=dict(argstr='-deoblique', ), + duporigin_file=dict(argstr='-duporigin %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=True, + mandatory=True, + position=-1, + ), + nosaveatr=dict(argstr='-nosaveatr', ), + saveatr=dict(argstr='-saveatr', ), + space=dict(argstr='-space %s', ), + xdel=dict(argstr='-xdel %f', ), + xorigin=dict(argstr='-xorigin %s', ), + xyzscale=dict(argstr='-xyzscale %f', ), + ydel=dict(argstr='-ydel %f', ), + yorigin=dict(argstr='-yorigin %s', ), + zdel=dict(argstr='-zdel %f', ), + zorigin=dict(argstr='-zorigin %s', ), + ) + inputs = Refit.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Refit_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Refit.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Remlfit.py b/nipype/interfaces/afni/tests/test_auto_Remlfit.py new file mode 100644 index 0000000000..4cdc8b2ff2 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Remlfit.py @@ -0,0 +1,109 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Remlfit + + +def test_Remlfit_inputs(): + input_map = dict( + STATmask=dict(argstr='-STATmask %s', ), + addbase=dict( + argstr='-addbase %s', + copyfile=False, + sep=' ', + ), + args=dict(argstr='%s', ), + automask=dict( + argstr='-automask', + usedefault=True, + ), + dsort=dict( + argstr='-dsort %s', + copyfile=False, + ), + dsort_nods=dict( + argstr='-dsort_nods', + requires=['dsort'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + errts_file=dict(argstr='-Rerrts %s', ), + fitts_file=dict(argstr='-Rfitts %s', ), + fout=dict(argstr='-fout', ), + glt_file=dict(argstr='-Rglt %s', ), + gltsym=dict(argstr='-gltsym "%s" %s...', ), + in_files=dict( + argstr='-input "%s"', + copyfile=False, + mandatory=True, + sep=' ', + ), + mask=dict(argstr='-mask %s', ), + matim=dict( + argstr='-matim %s', + xor=['matrix'], + ), + matrix=dict( + argstr='-matrix %s', + mandatory=True, + ), + nobout=dict(argstr='-nobout', ), + nodmbase=dict( + argstr='-nodmbase', + requires=['addbase', 'dsort'], + ), + nofdr=dict(argstr='-noFDR', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + obeta=dict(argstr='-Obeta %s', ), + obuck=dict(argstr='-Obuck %s', ), + oerrts=dict(argstr='-Oerrts %s', ), + ofitts=dict(argstr='-Ofitts %s', ), + oglt=dict(argstr='-Oglt %s', ), + out_file=dict(argstr='-Rbuck %s', ), + outputtype=dict(), + ovar=dict(argstr='-Ovar %s', ), + polort=dict( + argstr='-polort %d', + xor=['matrix'], + ), + quiet=dict(argstr='-quiet', ), + rbeta_file=dict(argstr='-Rbeta %s', ), + rout=dict(argstr='-rout', ), + slibase=dict(argstr='-slibase %s', ), + slibase_sm=dict(argstr='-slibase_sm %s', ), + tout=dict(argstr='-tout', ), + usetemp=dict(argstr='-usetemp', ), + var_file=dict(argstr='-Rvar %s', ), + verb=dict(argstr='-verb', ), + wherr_file=dict(argstr='-Rwherr %s', ), + ) + inputs = Remlfit.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Remlfit_outputs(): + output_map = dict( + errts_file=dict(), + fitts_file=dict(), + glt_file=dict(), + obeta=dict(), + obuck=dict(), + oerrts=dict(), + ofitts=dict(), + oglt=dict(), + out_file=dict(), + ovar=dict(), + rbeta_file=dict(), + var_file=dict(), + wherr_file=dict(), + ) + outputs = Remlfit.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Resample.py b/nipype/interfaces/afni/tests/test_auto_Resample.py new file mode 100644 index 0000000000..9ab2309307 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Resample.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Resample + + +def test_Resample_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-inset %s', + copyfile=False, + mandatory=True, + position=-1, + ), + master=dict(argstr='-master %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + orientation=dict(argstr='-orient %s', ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_resample', + ), + outputtype=dict(), + resample_mode=dict(argstr='-rmode %s', ), + voxel_size=dict(argstr='-dxyz %f %f %f', ), + ) + inputs = Resample.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Resample_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Resample.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Retroicor.py b/nipype/interfaces/afni/tests/test_auto_Retroicor.py new file mode 100644 index 0000000000..4c2a1c2393 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Retroicor.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Retroicor + + +def test_Retroicor_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + card=dict( + argstr='-card %s', + position=-2, + ), + cardphase=dict( + argstr='-cardphase %s', + hash_files=False, + position=-6, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + order=dict( + argstr='-order %s', + position=-5, + ), + out_file=dict( + argstr='-prefix %s', + name_source=['in_file'], + name_template='%s_retroicor', + position=1, + ), + outputtype=dict(), + resp=dict( + argstr='-resp %s', + position=-3, + ), + respphase=dict( + argstr='-respphase %s', + hash_files=False, + position=-7, + ), + threshold=dict( + argstr='-threshold %d', + position=-4, + ), + ) + inputs = Retroicor.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Retroicor_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Retroicor.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTest.py b/nipype/interfaces/afni/tests/test_auto_SVMTest.py new file mode 100644 index 0000000000..4b31d3a11c --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_SVMTest.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..svm import SVMTest + + +def test_SVMTest_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + classout=dict(argstr='-classout', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-testvol %s', + mandatory=True, + ), + model=dict( + argstr='-model %s', + mandatory=True, + ), + multiclass=dict(argstr='-multiclass %s', ), + nodetrend=dict(argstr='-nodetrend', ), + nopredcensord=dict(argstr='-nopredcensord', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + options=dict(argstr='%s', ), + out_file=dict( + argstr='-predictions %s', + name_template='%s_predictions', + ), + outputtype=dict(), + testlabels=dict(argstr='-testlabels %s', ), + ) + inputs = SVMTest.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SVMTest_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SVMTest.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_SVMTrain.py b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py new file mode 100644 index 0000000000..ddb3f8c05f --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_SVMTrain.py @@ -0,0 +1,73 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..svm import SVMTrain + + +def test_SVMTrain_inputs(): + input_map = dict( + alphas=dict( + argstr='-alpha %s', + name_source='in_file', + name_template='%s_alphas', + suffix='_alphas', + ), + args=dict(argstr='%s', ), + censor=dict(argstr='-censor %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-trainvol %s', + copyfile=False, + mandatory=True, + ), + kernel=dict(argstr='-kernel %s', ), + mask=dict( + argstr='-mask %s', + copyfile=False, + position=-1, + ), + max_iterations=dict(argstr='-max_iterations %d', ), + model=dict( + argstr='-model %s', + name_source='in_file', + name_template='%s_model', + suffix='_model', + ), + nomodelmask=dict(argstr='-nomodelmask', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + options=dict(argstr='%s', ), + out_file=dict( + argstr='-bucket %s', + name_source='in_file', + name_template='%s_vectors', + suffix='_bucket', + ), + outputtype=dict(), + trainlabels=dict(argstr='-trainlabels %s', ), + ttype=dict( + argstr='-type %s', + mandatory=True, + ), + w_out=dict(argstr='-wout', ), + ) + inputs = SVMTrain.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SVMTrain_outputs(): + output_map = dict( + alphas=dict(), + model=dict(), + out_file=dict(), + ) + outputs = SVMTrain.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Seg.py b/nipype/interfaces/afni/tests/test_auto_Seg.py new file mode 100644 index 0000000000..e93d81a7aa --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Seg.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Seg + + +def test_Seg_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bias_classes=dict(argstr='-bias_classes %s', ), + bias_fwhm=dict(argstr='-bias_fwhm %f', ), + blur_meth=dict(argstr='-blur_meth %s', ), + bmrf=dict(argstr='-bmrf %f', ), + classes=dict(argstr='-classes %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-anat %s', + copyfile=True, + mandatory=True, + position=-1, + ), + main_N=dict(argstr='-main_N %d', ), + mask=dict( + argstr='-mask %s', + mandatory=True, + position=-2, + ), + mixfloor=dict(argstr='-mixfloor %f', ), + mixfrac=dict(argstr='-mixfrac %s', ), + prefix=dict(argstr='-prefix %s', ), + ) + inputs = Seg.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Seg_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Seg.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_SkullStrip.py b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py new file mode 100644 index 0000000000..e960369787 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_SkullStrip.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import SkullStrip + + +def test_SkullStrip_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-input %s', + copyfile=False, + mandatory=True, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_skullstrip', + ), + outputtype=dict(), + ) + inputs = SkullStrip.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SkullStrip_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SkullStrip.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Synthesize.py b/nipype/interfaces/afni/tests/test_auto_Synthesize.py new file mode 100644 index 0000000000..87278098fb --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Synthesize.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Synthesize + + +def test_Synthesize_inputs(): + input_map = dict( + TR=dict(argstr='-TR %f', ), + args=dict(argstr='%s', ), + cbucket=dict( + argstr='-cbucket %s', + copyfile=False, + mandatory=True, + ), + cenfill=dict(argstr='-cenfill %s', ), + dry_run=dict(argstr='-dry', ), + environ=dict( + nohash=True, + usedefault=True, + ), + matrix=dict( + argstr='-matrix %s', + copyfile=False, + mandatory=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_template='syn', + ), + outputtype=dict(), + select=dict( + argstr='-select %s', + mandatory=True, + ), + ) + inputs = Synthesize.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Synthesize_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Synthesize.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_TCat.py b/nipype/interfaces/afni/tests/test_auto_TCat.py new file mode 100644 index 0000000000..5c51b02637 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_TCat.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TCat + + +def test_TCat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr=' %s', + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_files', + name_template='%s_tcat', + ), + outputtype=dict(), + rlt=dict( + argstr='-rlt%s', + position=1, + ), + verbose=dict(argstr='-verb', ), + ) + inputs = TCat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TCat_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TCat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py new file mode 100644 index 0000000000..09b21ea408 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_TCatSubBrick.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TCatSubBrick + + +def test_TCatSubBrick_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr='%s%s ...', + copyfile=False, + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + genfile=True, + ), + outputtype=dict(), + rlt=dict( + argstr='-rlt%s', + position=1, + ), + ) + inputs = TCatSubBrick.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TCatSubBrick_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TCatSubBrick.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_TCorr1D.py b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py new file mode 100644 index 0000000000..e1b23a3387 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_TCorr1D.py @@ -0,0 +1,67 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import TCorr1D + + +def test_TCorr1D_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ktaub=dict( + argstr=' -ktaub', + position=1, + xor=['pearson', 'spearman', 'quadrant'], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + keep_extension=True, + name_source='xset', + name_template='%s_correlation.nii.gz', + ), + outputtype=dict(), + pearson=dict( + argstr=' -pearson', + position=1, + xor=['spearman', 'quadrant', 'ktaub'], + ), + quadrant=dict( + argstr=' -quadrant', + position=1, + xor=['pearson', 'spearman', 'ktaub'], + ), + spearman=dict( + argstr=' -spearman', + position=1, + xor=['pearson', 'quadrant', 'ktaub'], + ), + xset=dict( + argstr=' %s', + copyfile=False, + mandatory=True, + position=-2, + ), + y_1d=dict( + argstr=' %s', + mandatory=True, + position=-1, + ), + ) + inputs = TCorr1D.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TCorr1D_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TCorr1D.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrMap.py b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py new file mode 100644 index 0000000000..32778fcf11 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_TCorrMap.py @@ -0,0 +1,143 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import TCorrMap + + +def test_TCorrMap_inputs(): + input_map = dict( + absolute_threshold=dict( + argstr='-Thresh %f %s', + name_source='in_file', + suffix='_thresh', + xor=('absolute_threshold', 'var_absolute_threshold', + 'var_absolute_threshold_normalize'), + ), + args=dict(argstr='%s', ), + automask=dict(argstr='-automask', ), + average_expr=dict( + argstr='-Aexpr %s %s', + name_source='in_file', + suffix='_aexpr', + xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), + ), + average_expr_nonzero=dict( + argstr='-Cexpr %s %s', + name_source='in_file', + suffix='_cexpr', + xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), + ), + bandpass=dict(argstr='-bpass %f %f', ), + blur_fwhm=dict(argstr='-Gblur %f', ), + correlation_maps=dict( + argstr='-CorrMap %s', + name_source='in_file', + ), + correlation_maps_masked=dict( + argstr='-CorrMask %s', + name_source='in_file', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expr=dict(), + histogram=dict( + argstr='-Hist %d %s', + name_source='in_file', + suffix='_hist', + ), + histogram_bin_numbers=dict(), + in_file=dict( + argstr='-input %s', + copyfile=False, + mandatory=True, + ), + mask=dict(argstr='-mask %s', ), + mean_file=dict( + argstr='-Mean %s', + name_source='in_file', + suffix='_mean', + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source=['in_file'], + name_template='%s_afni', + ), + outputtype=dict(), + pmean=dict( + argstr='-Pmean %s', + name_source='in_file', + suffix='_pmean', + ), + polort=dict(argstr='-polort %d', ), + qmean=dict( + argstr='-Qmean %s', + name_source='in_file', + suffix='_qmean', + ), + regress_out_timeseries=dict(argstr='-ort %s', ), + seeds=dict( + argstr='-seed %s', + xor='seeds_width', + ), + seeds_width=dict( + argstr='-Mseed %f', + xor='seeds', + ), + sum_expr=dict( + argstr='-Sexpr %s %s', + name_source='in_file', + suffix='_sexpr', + xor=('average_expr', 'average_expr_nonzero', 'sum_expr'), + ), + thresholds=dict(), + var_absolute_threshold=dict( + argstr='-VarThresh %f %f %f %s', + name_source='in_file', + suffix='_varthresh', + xor=('absolute_threshold', 'var_absolute_threshold', + 'var_absolute_threshold_normalize'), + ), + var_absolute_threshold_normalize=dict( + argstr='-VarThreshN %f %f %f %s', + name_source='in_file', + suffix='_varthreshn', + xor=('absolute_threshold', 'var_absolute_threshold', + 'var_absolute_threshold_normalize'), + ), + zmean=dict( + argstr='-Zmean %s', + name_source='in_file', + suffix='_zmean', + ), + ) + inputs = TCorrMap.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TCorrMap_outputs(): + output_map = dict( + absolute_threshold=dict(), + average_expr=dict(), + average_expr_nonzero=dict(), + correlation_maps=dict(), + correlation_maps_masked=dict(), + histogram=dict(), + mean_file=dict(), + pmean=dict(), + qmean=dict(), + sum_expr=dict(), + var_absolute_threshold=dict(), + var_absolute_threshold_normalize=dict(), + zmean=dict(), + ) + outputs = TCorrMap.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_TCorrelate.py b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py new file mode 100644 index 0000000000..8d773f429e --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_TCorrelate.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import TCorrelate + + +def test_TCorrelate_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='xset', + name_template='%s_tcorr', + ), + outputtype=dict(), + pearson=dict(argstr='-pearson', ), + polort=dict(argstr='-polort %d', ), + xset=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-2, + ), + yset=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + ) + inputs = TCorrelate.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TCorrelate_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TCorrelate.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_TNorm.py b/nipype/interfaces/afni/tests/test_auto_TNorm.py new file mode 100644 index 0000000000..3fb246684b --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_TNorm.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import TNorm + + +def test_TNorm_inputs(): + input_map = dict( + L1fit=dict(argstr='-L1fit', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + norm1=dict(argstr='-norm1', ), + norm2=dict(argstr='-norm2', ), + normR=dict(argstr='-normR', ), + normx=dict(argstr='-normx', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_tnorm', + ), + outputtype=dict(), + polort=dict(argstr='-polort %s', ), + ) + inputs = TNorm.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TNorm_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TNorm.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_TProject.py b/nipype/interfaces/afni/tests/test_auto_TProject.py new file mode 100644 index 0000000000..ba4efd6189 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_TProject.py @@ -0,0 +1,60 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import TProject + + +def test_TProject_inputs(): + input_map = dict( + TR=dict(argstr='-TR %g', ), + args=dict(argstr='%s', ), + automask=dict( + argstr='-automask', + xor=['mask'], + ), + bandpass=dict(argstr='-bandpass %g %g', ), + blur=dict(argstr='-blur %g', ), + cenmode=dict(argstr='-cenmode %s', ), + censor=dict(argstr='-censor %s', ), + censortr=dict(argstr='-CENSORTR %s', ), + concat=dict(argstr='-concat %s', ), + dsort=dict(argstr='-dsort %s...', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-input %s', + copyfile=False, + mandatory=True, + position=1, + ), + mask=dict(argstr='-mask %s', ), + noblock=dict(argstr='-noblock', ), + norm=dict(argstr='-norm', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + ort=dict(argstr='-ort %s', ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_tproject', + position=-1, + ), + outputtype=dict(), + polort=dict(argstr='-polort %d', ), + stopband=dict(argstr='-stopband %g %g', ), + ) + inputs = TProject.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TProject_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TProject.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_TShift.py b/nipype/interfaces/afni/tests/test_auto_TShift.py new file mode 100644 index 0000000000..a1208753d9 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_TShift.py @@ -0,0 +1,65 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import TShift + + +def test_TShift_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore=dict(argstr='-ignore %s', ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + interp=dict(argstr='-%s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_tshift', + ), + outputtype=dict(), + rlt=dict(argstr='-rlt', ), + rltplus=dict(argstr='-rlt+', ), + slice_timing=dict( + argstr='-tpattern @%s', + xor=['tpattern'], + ), + tpattern=dict( + argstr='-tpattern %s', + xor=['slice_timing'], + ), + tr=dict(argstr='-TR %s', ), + tslice=dict( + argstr='-slice %s', + xor=['tzero'], + ), + tzero=dict( + argstr='-tzero %s', + xor=['tslice'], + ), + ) + inputs = TShift.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TShift_outputs(): + output_map = dict( + out_file=dict(), + timing_file=dict(), + ) + outputs = TShift.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_TStat.py b/nipype/interfaces/afni/tests/test_auto_TStat.py new file mode 100644 index 0000000000..5a3ebd8a21 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_TStat.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TStat + + +def test_TStat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + mask=dict(argstr='-mask %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + options=dict(argstr='%s', ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_tstat', + ), + outputtype=dict(), + ) + inputs = TStat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TStat_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TStat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_To3D.py b/nipype/interfaces/afni/tests/test_auto_To3D.py new file mode 100644 index 0000000000..66ccabefcb --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_To3D.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import To3D + + +def test_To3D_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + assumemosaic=dict(argstr='-assume_dicom_mosaic', ), + datatype=dict(argstr='-datum %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + filetype=dict(argstr='-%s', ), + funcparams=dict(argstr='-time:zt %s alt+z2', ), + in_folder=dict( + argstr='%s/*.dcm', + mandatory=True, + position=-1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source=['in_folder'], + name_template='%s', + ), + outputtype=dict(), + skipoutliers=dict(argstr='-skip_outliers', ), + ) + inputs = To3D.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_To3D_outputs(): + output_map = dict(out_file=dict(), ) + outputs = To3D.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Undump.py b/nipype/interfaces/afni/tests/test_auto_Undump.py new file mode 100644 index 0000000000..07d0ff8e81 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Undump.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Undump + + +def test_Undump_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + coordinates_specification=dict(argstr='-%s', ), + datatype=dict(argstr='-datum %s', ), + default_value=dict(argstr='-dval %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_value=dict(argstr='-fval %f', ), + head_only=dict(argstr='-head_only', ), + in_file=dict( + argstr='-master %s', + copyfile=False, + mandatory=True, + position=-1, + ), + mask_file=dict(argstr='-mask %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + orient=dict(argstr='-orient %s', ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + ), + outputtype=dict(), + srad=dict(argstr='-srad %f', ), + ) + inputs = Undump.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Undump_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Undump.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Unifize.py b/nipype/interfaces/afni/tests/test_auto_Unifize.py new file mode 100644 index 0000000000..3215d51e64 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Unifize.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Unifize + + +def test_Unifize_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + cl_frac=dict(argstr='-clfrac %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi=dict( + argstr='-EPI', + requires=['no_duplo', 't2'], + xor=['gm'], + ), + gm=dict(argstr='-GM', ), + in_file=dict( + argstr='-input %s', + copyfile=False, + mandatory=True, + position=-1, + ), + no_duplo=dict(argstr='-noduplo', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_unifized', + ), + outputtype=dict(), + quiet=dict(argstr='-quiet', ), + rbt=dict(argstr='-rbt %f %f %f', ), + scale_file=dict(argstr='-ssave %s', ), + t2=dict(argstr='-T2', ), + t2_up=dict(argstr='-T2up %f', ), + urad=dict(argstr='-Urad %s', ), + ) + inputs = Unifize.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Unifize_outputs(): + output_map = dict( + out_file=dict(), + scale_file=dict(), + ) + outputs = Unifize.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Volreg.py b/nipype/interfaces/afni/tests/test_auto_Volreg.py new file mode 100644 index 0000000000..293b7613e8 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Volreg.py @@ -0,0 +1,78 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Volreg + + +def test_Volreg_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + basefile=dict( + argstr='-base %s', + position=-6, + ), + copyorigin=dict(argstr='-twodup', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + in_weight_volume=dict(argstr="-weight '%s[%d]'", ), + interp=dict(argstr='-%s', ), + md1d_file=dict( + argstr='-maxdisp1D %s', + keep_extension=True, + name_source='in_file', + name_template='%s_md.1D', + position=-4, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oned_file=dict( + argstr='-1Dfile %s', + keep_extension=True, + name_source='in_file', + name_template='%s.1D', + ), + oned_matrix_save=dict( + argstr='-1Dmatrix_save %s', + keep_extension=True, + name_source='in_file', + name_template='%s.aff12.1D', + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_volreg', + ), + outputtype=dict(), + timeshift=dict(argstr='-tshift 0', ), + verbose=dict(argstr='-verbose', ), + zpad=dict( + argstr='-zpad %d', + position=-5, + ), + ) + inputs = Volreg.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Volreg_outputs(): + output_map = dict( + md1d_file=dict(), + oned_file=dict(), + oned_matrix_save=dict(), + out_file=dict(), + ) + outputs = Volreg.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Warp.py b/nipype/interfaces/afni/tests/test_auto_Warp.py new file mode 100644 index 0000000000..b85692310a --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Warp.py @@ -0,0 +1,56 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Warp + + +def test_Warp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + deoblique=dict(argstr='-deoblique', ), + environ=dict( + nohash=True, + usedefault=True, + ), + gridset=dict(argstr='-gridset %s', ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + interp=dict(argstr='-%s', ), + matparent=dict(argstr='-matparent %s', ), + mni2tta=dict(argstr='-mni2tta', ), + newgrid=dict(argstr='-newgrid %f', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + oblique_parent=dict(argstr='-oblique_parent %s', ), + out_file=dict( + argstr='-prefix %s', + keep_extension=True, + name_source='in_file', + name_template='%s_warp', + ), + outputtype=dict(), + save_warp=dict(requires=['verbose'], ), + tta2mni=dict(argstr='-tta2mni', ), + verbose=dict(argstr='-verb', ), + zpad=dict(argstr='-zpad %d', ), + ) + inputs = Warp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Warp_outputs(): + output_map = dict( + out_file=dict(), + warp_file=dict(), + ) + outputs = Warp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_ZCutUp.py b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py new file mode 100644 index 0000000000..cacb6b1534 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_ZCutUp.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ZCutUp + + +def test_ZCutUp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + keep=dict(argstr='-keep %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_file', + name_template='%s_zcutup', + ), + outputtype=dict(), + ) + inputs = ZCutUp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ZCutUp_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ZCutUp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Zcat.py b/nipype/interfaces/afni/tests/test_auto_Zcat.py new file mode 100644 index 0000000000..3a2b7ff853 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Zcat.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Zcat + + +def test_Zcat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + datum=dict(argstr='-datum %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fscale=dict( + argstr='-fscale', + xor=['nscale'], + ), + in_files=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + nscale=dict( + argstr='-nscale', + xor=['fscale'], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_source='in_files', + name_template='%s_zcat', + ), + outputtype=dict(), + verb=dict(argstr='-verb', ), + ) + inputs = Zcat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Zcat_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Zcat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_auto_Zeropad.py b/nipype/interfaces/afni/tests/test_auto_Zeropad.py new file mode 100644 index 0000000000..cfba8e2f3c --- /dev/null +++ b/nipype/interfaces/afni/tests/test_auto_Zeropad.py @@ -0,0 +1,88 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Zeropad + + +def test_Zeropad_inputs(): + input_map = dict( + A=dict( + argstr='-A %i', + xor=['master'], + ), + AP=dict( + argstr='-AP %i', + xor=['master'], + ), + I=dict( + argstr='-I %i', + xor=['master'], + ), + IS=dict( + argstr='-IS %i', + xor=['master'], + ), + L=dict( + argstr='-L %i', + xor=['master'], + ), + P=dict( + argstr='-P %i', + xor=['master'], + ), + R=dict( + argstr='-R %i', + xor=['master'], + ), + RL=dict( + argstr='-RL %i', + xor=['master'], + ), + S=dict( + argstr='-S %i', + xor=['master'], + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + master=dict( + argstr='-master %s', + xor=['I', 'S', 'A', 'P', 'L', 'R', 'z', 'RL', 'AP', 'IS', 'mm'], + ), + mm=dict( + argstr='-mm', + xor=['master'], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='-prefix %s', + name_template='zeropad', + ), + outputtype=dict(), + z=dict( + argstr='-z %i', + xor=['master'], + ), + ) + inputs = Zeropad.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Zeropad_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Zeropad.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/afni/tests/test_extra_Deconvolve.py b/nipype/interfaces/afni/tests/test_extra_Deconvolve.py new file mode 100644 index 0000000000..93adc3b748 --- /dev/null +++ b/nipype/interfaces/afni/tests/test_extra_Deconvolve.py @@ -0,0 +1,11 @@ +"""Test afni deconvolve""" + +from ..model import Deconvolve + +def test_x1dstop(): + deconv = Deconvolve() + deconv.inputs.out_file = 'file.nii' + assert 'out_file' in deconv._list_outputs() + deconv.inputs.x1D_stop = True + assert 'out_file' not in deconv._list_outputs() + assert 'cbucket' not in deconv._list_outputs() diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py new file mode 100644 index 0000000000..ce32d183ef --- /dev/null +++ b/nipype/interfaces/afni/utils.py @@ -0,0 +1,2985 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft = python sts = 4 ts = 4 sw = 4 et: +"""AFNI utility interfaces + +Examples +-------- +See the docstrings of the individual classes for examples. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, bytes + +import os +import os.path as op +import re +import numpy as np + +from ...utils.filemanip import (load_json, save_json, split_filename) +from ..base import (CommandLineInputSpec, CommandLine, Directory, TraitedSpec, + traits, isdefined, File, InputMultiPath, Undefined, Str) +from ...external.due import BibTeX +from .base import (AFNICommandBase, AFNICommand, AFNICommandInputSpec, + AFNICommandOutputSpec, AFNIPythonCommandInputSpec, + AFNIPythonCommand) + + +class ABoverlapInputSpec(AFNICommandInputSpec): + in_file_a = File( + desc='input file A', + argstr='%s', + position=-3, + mandatory=True, + exists=True, + copyfile=False) + in_file_b = File( + desc='input file B', + argstr='%s', + position=-2, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + desc='collect output to a file', argstr=' |& tee %s', position=-1) + no_automask = traits.Bool( + desc='consider input datasets as masks', argstr='-no_automask') + quiet = traits.Bool( + desc='be as quiet as possible (without being entirely mute)', + argstr='-quiet') + verb = traits.Bool( + desc='print out some progress reports (to stderr)', argstr='-verb') + + +class ABoverlap(AFNICommand): + """Output (to screen) is a count of various things about how + the automasks of datasets A and B overlap or don't overlap. + + For complete details, see the `3dABoverlap Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> aboverlap = afni.ABoverlap() + >>> aboverlap.inputs.in_file_a = 'functional.nii' + >>> aboverlap.inputs.in_file_b = 'structural.nii' + >>> aboverlap.inputs.out_file = 'out.mask_ae_overlap.txt' + >>> aboverlap.cmdline + '3dABoverlap functional.nii structural.nii |& tee out.mask_ae_overlap.txt' + >>> res = aboverlap.run() # doctest: +SKIP + + """ + + _cmd = '3dABoverlap' + input_spec = ABoverlapInputSpec + output_spec = AFNICommandOutputSpec + + +class AFNItoNIFTIInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dAFNItoNIFTI', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s.nii', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file', + hash_files=False) + float_ = traits.Bool( + desc='Force the output dataset to be 32-bit floats. This option ' + 'should be used when the input AFNI dataset has different float ' + 'scale factors for different sub-bricks, an option that ' + 'NIfTI-1.1 does not support.', + argstr='-float') + pure = traits.Bool( + desc='Do NOT write an AFNI extension field into the output file. Only ' + 'use this option if needed. You can also use the \'nifti_tool\' ' + 'program to strip extensions from a file.', + argstr='-pure') + denote = traits.Bool( + desc='When writing the AFNI extension field, remove text notes that ' + 'might contain subject identifying information.', + argstr='-denote') + oldid = traits.Bool( + desc='Give the new dataset the input dataset' + 's AFNI ID code.', + argstr='-oldid', + xor=['newid']) + newid = traits.Bool( + desc='Give the new dataset a new AFNI ID code, to distinguish it from ' + 'the input dataset.', + argstr='-newid', + xor=['oldid']) + + +class AFNItoNIFTI(AFNICommand): + """Converts AFNI format files to NIFTI format. This can also convert 2D or + 1D data, which you can numpy.squeeze() to remove extra dimensions. + + For complete details, see the `3dAFNItoNIFTI Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> a2n = afni.AFNItoNIFTI() + >>> a2n.inputs.in_file = 'afni_output.3D' + >>> a2n.inputs.out_file = 'afni_output.nii' + >>> a2n.cmdline + '3dAFNItoNIFTI -prefix afni_output.nii afni_output.3D' + >>> res = a2n.run() # doctest: +SKIP + + """ + + _cmd = '3dAFNItoNIFTI' + input_spec = AFNItoNIFTIInputSpec + output_spec = AFNICommandOutputSpec + + def _overload_extension(self, value): + path, base, ext = split_filename(value) + if ext.lower() not in ['.nii', '.nii.gz', '.1d', '.1D']: + ext += '.nii' + return os.path.join(path, base + ext) + + def _gen_filename(self, name): + return os.path.abspath(super(AFNItoNIFTI, self)._gen_filename(name)) + + +class AutoboxInputSpec(AFNICommandInputSpec): + in_file = File( + exists=True, + mandatory=True, + argstr='-input %s', + desc='input file', + copyfile=False) + padding = traits.Int( + argstr='-npad %d', + desc='Number of extra voxels to pad on each side of box') + out_file = File( + argstr='-prefix %s', name_source='in_file', name_template='%s_autobox') + no_clustering = traits.Bool( + argstr='-noclust', + desc='Don\'t do any clustering to find box. Any non-zero voxel will ' + 'be preserved in the cropped volume. The default method uses ' + 'some clustering to find the cropping box, and will clip off ' + 'small isolated blobs.') + + +class AutoboxOutputSpec(TraitedSpec): # out_file not mandatory + x_min = traits.Int() + x_max = traits.Int() + y_min = traits.Int() + y_max = traits.Int() + z_min = traits.Int() + z_max = traits.Int() + + out_file = File(desc='output file') + + +class Autobox(AFNICommand): + """Computes size of a box that fits around the volume. + Also can be used to crop the volume to that box. + + For complete details, see the `3dAutobox Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> abox = afni.Autobox() + >>> abox.inputs.in_file = 'structural.nii' + >>> abox.inputs.padding = 5 + >>> abox.cmdline + '3dAutobox -input structural.nii -prefix structural_autobox -npad 5' + >>> res = abox.run() # doctest: +SKIP + + """ + + _cmd = '3dAutobox' + input_spec = AutoboxInputSpec + output_spec = AutoboxOutputSpec + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + outputs = super(Autobox, self).aggregate_outputs( + runtime, needed_outputs) + pattern = 'x=(?P-?\d+)\.\.(?P-?\d+) '\ + 'y=(?P-?\d+)\.\.(?P-?\d+) '\ + 'z=(?P-?\d+)\.\.(?P-?\d+)' + for line in runtime.stderr.split('\n'): + m = re.search(pattern, line) + if m: + d = m.groupdict() + outputs.trait_set(**{k: int(d[k]) for k in d.keys()}) + return outputs + + +class BrickStatInputSpec(CommandLineInputSpec): + in_file = File( + desc='input file to 3dmaskave', + argstr='%s', + position=-1, + mandatory=True, + exists=True) + mask = File( + desc='-mask dset = use dset as mask to include/exclude voxels', + argstr='-mask %s', + position=2, + exists=True) + min = traits.Bool( + desc='print the minimum value in dataset', argstr='-min', position=1) + slow = traits.Bool( + desc='read the whole dataset to find the min and max values', + argstr='-slow') + max = traits.Bool( + desc='print the maximum value in the dataset', argstr='-max') + mean = traits.Bool( + desc='print the mean value in the dataset', argstr='-mean') + sum = traits.Bool( + desc='print the sum of values in the dataset', argstr='-sum') + var = traits.Bool(desc='print the variance in the dataset', argstr='-var') + percentile = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + desc='p0 ps p1 write the percentile values starting ' + 'at p0% and ending at p1% at a step of ps%. ' + 'only one sub-brick is accepted.', + argstr='-percentile %.3f %.3f %.3f') + + +class BrickStatOutputSpec(TraitedSpec): + min_val = traits.Float(desc='output') + + +class BrickStat(AFNICommandBase): + """Computes maximum and/or minimum voxel values of an input dataset. + TODO Add optional arguments. + + For complete details, see the `3dBrickStat Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> brickstat = afni.BrickStat() + >>> brickstat.inputs.in_file = 'functional.nii' + >>> brickstat.inputs.mask = 'skeleton_mask.nii.gz' + >>> brickstat.inputs.min = True + >>> brickstat.cmdline + '3dBrickStat -min -mask skeleton_mask.nii.gz functional.nii' + >>> res = brickstat.run() # doctest: +SKIP + + """ + _cmd = '3dBrickStat' + input_spec = BrickStatInputSpec + output_spec = BrickStatOutputSpec + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + + outputs = self._outputs() + + outfile = os.path.join(os.getcwd(), 'stat_result.json') + + if runtime is None: + try: + min_val = load_json(outfile)['stat'] + except IOError: + return self.run().outputs + else: + min_val = [] + for line in runtime.stdout.split('\n'): + if line: + values = line.split() + if len(values) > 1: + min_val.append([float(val) for val in values]) + else: + min_val.extend([float(val) for val in values]) + + if len(min_val) == 1: + min_val = min_val[0] + save_json(outfile, dict(stat=min_val)) + outputs.min_val = min_val + + return outputs + + +class BucketInputSpec(AFNICommandInputSpec): + in_file = traits.List( + traits.Tuple( + (File(exists=True, copyfile=False), traits.Str(argstr="'%s'")), + artstr="%s%s"), + position=-1, + mandatory=True, + argstr="%s", + desc='List of tuples of input datasets and subbrick selection strings' + 'as described in more detail in the following afni help string' + 'Input dataset specified using one of these forms:' + ' \'prefix+view\', \'prefix+view.HEAD\', or \'prefix+view.BRIK\'.' + 'You can also add a sub-brick selection list after the end of the' + 'dataset name. This allows only a subset of the sub-bricks to be' + 'included into the output (by default, all of the input dataset' + 'is copied into the output). A sub-brick selection list looks like' + 'one of the following forms:' + ' fred+orig[5] ==> use only sub-brick #5' + ' fred+orig[5,9,17] ==> use #5, #9, and #17' + ' fred+orig[5..8] or [5-8] ==> use #5, #6, #7, and #8' + ' fred+orig[5..13(2)] or [5-13(2)] ==> use #5, #7, #9, #11, and #13' + 'Sub-brick indexes start at 0. You can use the character \'$\'' + 'to indicate the last sub-brick in a dataset; for example, you' + 'can select every third sub-brick by using the selection list' + ' fred+orig[0..$(3)]' + 'N.B.: The sub-bricks are output in the order specified, which may' + ' not be the order in the original datasets. For example, using' + ' fred+orig[0..$(2),1..$(2)]' + ' will cause the sub-bricks in fred+orig to be output into the' + ' new dataset in an interleaved fashion. Using' + ' fred+orig[$..0]' + ' will reverse the order of the sub-bricks in the output.' + 'N.B.: Bucket datasets have multiple sub-bricks, but do NOT have' + ' a time dimension. You can input sub-bricks from a 3D+time dataset' + ' into a bucket dataset. You can use the \'3dinfo\' program to see' + ' how many sub-bricks a 3D+time or a bucket dataset contains.' + 'N.B.: In non-bucket functional datasets (like the \'fico\' datasets' + ' output by FIM, or the \'fitt\' datasets output by 3dttest), sub-brick' + ' [0] is the \'intensity\' and sub-brick [1] is the statistical parameter' + ' used as a threshold. Thus, to create a bucket dataset using the' + ' intensity from dataset A and the threshold from dataset B, and' + ' calling the output dataset C, you would type' + ' 3dbucket -prefix C -fbuc \'A+orig[0]\' -fbuc \'B+orig[1]\'' + 'WARNING: using this program, it is possible to create a dataset that' + ' has different basic datum types for different sub-bricks' + ' (e.g., shorts for brick 0, floats for brick 1).' + ' Do NOT do this! Very few AFNI programs will work correctly' + ' with such datasets!') + out_file = File(argstr='-prefix %s', name_template='buck') + + +class Bucket(AFNICommand): + """Concatenate sub-bricks from input datasets into one big + 'bucket' dataset. + + For complete details, see the `3dbucket Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> bucket = afni.Bucket() + >>> bucket.inputs.in_file = [('functional.nii',"{2..$}"), ('functional.nii',"{1}")] + >>> bucket.inputs.out_file = 'vr_base' + >>> bucket.cmdline + "3dbucket -prefix vr_base functional.nii'{2..$}' functional.nii'{1}'" + >>> res = bucket.run() # doctest: +SKIP + + """ + + _cmd = '3dbucket' + input_spec = BucketInputSpec + output_spec = AFNICommandOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'in_file': + return spec.argstr % ( + ' '.join([i[0] + "'" + i[1] + "'" for i in value])) + return super(Bucket, self)._format_arg(name, spec, value) + + +class CalcInputSpec(AFNICommandInputSpec): + in_file_a = File( + desc='input file to 3dcalc', + argstr='-a %s', + position=0, + mandatory=True, + exists=True) + in_file_b = File( + desc='operand file to 3dcalc', argstr='-b %s', position=1, exists=True) + in_file_c = File( + desc='operand file to 3dcalc', argstr='-c %s', position=2, exists=True) + out_file = File( + name_template='%s_calc', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file_a') + expr = Str(desc='expr', argstr='-expr "%s"', position=3, mandatory=True) + start_idx = traits.Int( + desc='start index for in_file_a', requires=['stop_idx']) + stop_idx = traits.Int( + desc='stop index for in_file_a', requires=['start_idx']) + single_idx = traits.Int(desc='volume index for in_file_a') + overwrite = traits.Bool(desc='overwrite output', argstr='-overwrite') + other = File(desc='other options', argstr='') + + +class Calc(AFNICommand): + """This program does voxel-by-voxel arithmetic on 3D datasets. + + For complete details, see the `3dcalc Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> calc = afni.Calc() + >>> calc.inputs.in_file_a = 'functional.nii' + >>> calc.inputs.in_file_b = 'functional2.nii' + >>> calc.inputs.expr='a*b' + >>> calc.inputs.out_file = 'functional_calc.nii.gz' + >>> calc.inputs.outputtype = 'NIFTI' + >>> calc.cmdline # doctest: +ELLIPSIS + '3dcalc -a functional.nii -b functional2.nii -expr "a*b" -prefix functional_calc.nii.gz' + >>> res = calc.run() # doctest: +SKIP + + >>> from nipype.interfaces import afni + >>> calc = afni.Calc() + >>> calc.inputs.in_file_a = 'functional.nii' + >>> calc.inputs.expr = '1' + >>> calc.inputs.out_file = 'rm.epi.all1' + >>> calc.inputs.overwrite = True + >>> calc.cmdline + '3dcalc -a functional.nii -expr "1" -prefix rm.epi.all1 -overwrite' + >>> res = calc.run() # doctest: +SKIP + + """ + + _cmd = '3dcalc' + input_spec = CalcInputSpec + output_spec = AFNICommandOutputSpec + + def _format_arg(self, name, trait_spec, value): + if name == 'in_file_a': + arg = trait_spec.argstr % value + if isdefined(self.inputs.start_idx): + arg += '[%d..%d]' % (self.inputs.start_idx, + self.inputs.stop_idx) + if isdefined(self.inputs.single_idx): + arg += '[%d]' % (self.inputs.single_idx) + return arg + return super(Calc, self)._format_arg(name, trait_spec, value) + + def _parse_inputs(self, skip=None): + """Skip the arguments without argstr metadata + """ + return super( + Calc, self)._parse_inputs(skip=('start_idx', 'stop_idx', 'other')) + + +class CatInputSpec(AFNICommandInputSpec): + in_files = traits.List( + File(exists=True), argstr="%s", mandatory=True, position=-2) + out_file = File( + argstr='> %s', + value='catout.1d', + usedefault=True, + desc='output (concatenated) file name', + position=-1, + mandatory=True) + omitconst = traits.Bool( + desc='Omit columns that are identically constant from output.', + argstr='-nonconst') + keepfree = traits.Bool( + desc='Keep only columns that are marked as \'free\' in the ' + '3dAllineate header from \'-1Dparam_save\'. ' + 'If there is no such header, all columns are kept.', + argstr='-nonfixed') + out_format = traits.Enum( + 'int', + 'nice', + 'double', + 'fint', + 'cint', + argstr='-form %s', + desc='specify data type for output. Valid types are \'int\', ' + '\'nice\', \'double\', \'fint\', and \'cint\'.', + xor=['out_int', 'out_nice', 'out_double', 'out_fint', 'out_cint']) + stack = traits.Bool( + desc='Stack the columns of the resultant matrix in the output.', + argstr='-stack') + sel = traits.Str( + desc='Apply the same column/row selection string to all filenames ' + 'on the command line.', + argstr='-sel %s') + out_int = traits.Bool( + desc='specifiy int data type for output', + argstr='-i', + xor=['out_format', 'out_nice', 'out_double', 'out_fint', 'out_cint']) + out_nice = traits.Bool( + desc='specifiy nice data type for output', + argstr='-n', + xor=['out_format', 'out_int', 'out_double', 'out_fint', 'out_cint']) + out_double = traits.Bool( + desc='specifiy double data type for output', + argstr='-d', + xor=['out_format', 'out_nice', 'out_int', 'out_fint', 'out_cint']) + out_fint = traits.Bool( + desc='specifiy int, rounded down, data type for output', + argstr='-f', + xor=['out_format', 'out_nice', 'out_double', 'out_int', 'out_cint']) + out_cint = traits.Bool( + desc='specifiy int, rounded up, data type for output', + xor=['out_format', 'out_nice', 'out_double', 'out_fint', 'out_int']) + + +class Cat(AFNICommand): + """1dcat takes as input one or more 1D files, and writes out a 1D file + containing the side-by-side concatenation of all or a subset of the + columns from the input files. + + For complete details, see the `1dcat Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> cat1d = afni.Cat() + >>> cat1d.inputs.sel = "'[0,2]'" + >>> cat1d.inputs.in_files = ['f1.1D', 'f2.1D'] + >>> cat1d.inputs.out_file = 'catout.1d' + >>> cat1d.cmdline + "1dcat -sel '[0,2]' f1.1D f2.1D > catout.1d" + >>> res = cat1d.run() # doctest: +SKIP + + """ + + _cmd = '1dcat' + input_spec = CatInputSpec + output_spec = AFNICommandOutputSpec + + +class CatMatvecInputSpec(AFNICommandInputSpec): + in_file = traits.List( + traits.Tuple(traits.Str(), traits.Str()), + desc="list of tuples of mfiles and associated opkeys", + mandatory=True, + argstr="%s", + position=-2) + out_file = File( + argstr=" > %s", + name_template='%s_cat.aff12.1D', + name_source='in_file', + keep_extension=False, + desc="File to write concattenated matvecs to", + position=-1, + mandatory=True) + matrix = traits.Bool( + desc="indicates that the resulting matrix will" + "be written to outfile in the 'MATRIX(...)' format (FORM 3)." + "This feature could be used, with clever scripting, to input" + "a matrix directly on the command line to program 3dWarp.", + argstr="-MATRIX", + xor=['oneline', 'fourxfour']) + oneline = traits.Bool( + desc="indicates that the resulting matrix" + "will simply be written as 12 numbers on one line.", + argstr="-ONELINE", + xor=['matrix', 'fourxfour']) + fourxfour = traits.Bool( + desc="Output matrix in augmented form (last row is 0 0 0 1)" + "This option does not work with -MATRIX or -ONELINE", + argstr="-4x4", + xor=['matrix', 'oneline']) + + +class CatMatvec(AFNICommand): + """Catenates 3D rotation+shift matrix+vector transformations. + + For complete details, see the `cat_matvec Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> cmv = afni.CatMatvec() + >>> cmv.inputs.in_file = [('structural.BRIK::WARP_DATA','I')] + >>> cmv.inputs.out_file = 'warp.anat.Xat.1D' + >>> cmv.cmdline + 'cat_matvec structural.BRIK::WARP_DATA -I > warp.anat.Xat.1D' + >>> res = cmv.run() # doctest: +SKIP + + """ + + _cmd = 'cat_matvec' + input_spec = CatMatvecInputSpec + output_spec = AFNICommandOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'in_file': + return spec.argstr % (' '.join([i[0] + ' -' + i[1] + for i in value])) + return super(CatMatvec, self)._format_arg(name, spec, value) + + +class CenterMassInputSpec(CommandLineInputSpec): + in_file = File( + desc='input file to 3dCM', + argstr='%s', + position=-2, + mandatory=True, + exists=True, + copyfile=True) + cm_file = File( + name_source='in_file', + name_template='%s_cm.out', + hash_files=False, + keep_extension=False, + desc="File to write center of mass to", + argstr="> %s", + position=-1) + mask_file = File( + desc='Only voxels with nonzero values in the provided mask will be ' + 'averaged.', + argstr='-mask %s', + exists=True) + automask = traits.Bool( + desc='Generate the mask automatically', argstr='-automask') + set_cm = traits.Tuple( + (traits.Float(), traits.Float(), traits.Float()), + desc='After computing the center of mass, set the origin fields in ' + 'the header so that the center of mass will be at (x,y,z) in ' + 'DICOM coords.', + argstr='-set %f %f %f') + local_ijk = traits.Bool( + desc='Output values as (i,j,k) in local orienation', + argstr='-local_ijk') + roi_vals = traits.List( + traits.Int, + desc='Compute center of mass for each blob with voxel value of v0, ' + 'v1, v2, etc. This option is handy for getting ROI centers of ' + 'mass.', + argstr='-roi_vals %s') + all_rois = traits.Bool( + desc='Don\'t bother listing the values of ROIs you want: The program ' + 'will find all of them and produce a full list', + argstr='-all_rois') + + +class CenterMassOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output file') + cm_file = File(desc='file with the center of mass coordinates') + cm = traits.List( + traits.Tuple(traits.Float(), traits.Float(), traits.Float()), + desc='center of mass') + + +class CenterMass(AFNICommandBase): + """Computes center of mass using 3dCM command + + .. note:: + + By default, the output is (x,y,z) values in DICOM coordinates. But + as of Dec, 2016, there are now command line switches for other options. + + + For complete details, see the `3dCM Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> cm = afni.CenterMass() + >>> cm.inputs.in_file = 'structural.nii' + >>> cm.inputs.cm_file = 'cm.txt' + >>> cm.inputs.roi_vals = [2, 10] + >>> cm.cmdline + '3dCM -roi_vals 2 10 structural.nii > cm.txt' + >>> res = 3dcm.run() # doctest: +SKIP + """ + + _cmd = '3dCM' + input_spec = CenterMassInputSpec + output_spec = CenterMassOutputSpec + + def _list_outputs(self): + outputs = super(CenterMass, self)._list_outputs() + outputs['out_file'] = os.path.abspath(self.inputs.in_file) + outputs['cm_file'] = os.path.abspath(self.inputs.cm_file) + sout = np.loadtxt(outputs['cm_file'], ndmin=2) + outputs['cm'] = [tuple(s) for s in sout] + return outputs + + +class ConvertDsetInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to ConvertDset', + argstr='-input %s', + position=-2, + mandatory=True, + exists=True) + + out_file = File( + desc='output file for ConvertDset', + argstr='-prefix %s', + position=-1, + mandatory=True) + + out_type = traits.Enum( + ('niml', 'niml_asc', 'niml_bi', + '1D', '1Dp', '1Dpt', + 'gii', 'gii_asc', 'gii_b64', 'gii_b64gz'), + desc='output type', + argstr='-o_%s', + mandatory=True, + position=0) + + +class ConvertDset(AFNICommandBase): + """Converts a surface dataset from one format to another. + + For complete details, see the `ConvertDset Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> convertdset = afni.ConvertDset() + >>> convertdset.inputs.in_file = 'lh.pial_converted.gii' + >>> convertdset.inputs.out_type = 'niml_asc' + >>> convertdset.inputs.out_file = 'lh.pial_converted.niml.dset' + >>> convertdset.cmdline + 'ConvertDset -o_niml_asc -input lh.pial_converted.gii -prefix lh.pial_converted.niml.dset' + >>> res = convertdset.run() # doctest: +SKIP + """ + + _cmd = 'ConvertDset' + input_spec = ConvertDsetInputSpec + output_spec = AFNICommandOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class CopyInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dcopy', + argstr='%s', + position=-2, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_copy', + desc='output image file name', + argstr='%s', + position=-1, + name_source='in_file') + verbose = traits.Bool(desc='print progress reports', argstr='-verb') + + +class Copy(AFNICommand): + """Copies an image of one type to an image of the same + or different type using 3dcopy command + + For complete details, see the `3dcopy Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> copy3d = afni.Copy() + >>> copy3d.inputs.in_file = 'functional.nii' + >>> copy3d.cmdline + '3dcopy functional.nii functional_copy' + >>> res = copy3d.run() # doctest: +SKIP + + >>> from copy import deepcopy + >>> copy3d_2 = deepcopy(copy3d) + >>> copy3d_2.inputs.outputtype = 'NIFTI' + >>> copy3d_2.cmdline + '3dcopy functional.nii functional_copy.nii' + >>> res = copy3d_2.run() # doctest: +SKIP + + >>> copy3d_3 = deepcopy(copy3d) + >>> copy3d_3.inputs.outputtype = 'NIFTI_GZ' + >>> copy3d_3.cmdline + '3dcopy functional.nii functional_copy.nii.gz' + >>> res = copy3d_3.run() # doctest: +SKIP + + >>> copy3d_4 = deepcopy(copy3d) + >>> copy3d_4.inputs.out_file = 'new_func.nii' + >>> copy3d_4.cmdline + '3dcopy functional.nii new_func.nii' + >>> res = copy3d_4.run() # doctest: +SKIP + + """ + + _cmd = '3dcopy' + input_spec = CopyInputSpec + output_spec = AFNICommandOutputSpec + + +class DotInputSpec(AFNICommandInputSpec): + in_files = traits.List( + (File()), + desc="list of input files, possibly with subbrick selectors", + argstr="%s ...", + position=-2) + out_file = File( + desc='collect output to a file', argstr=' |& tee %s', position=-1) + mask = File(desc='Use this dataset as a mask', argstr='-mask %s') + mrange = traits.Tuple( + (traits.Float(), traits.Float()), + desc='Means to further restrict the voxels from \'mset\' so that' + 'only those mask values within this range (inclusive) willbe used.', + argstr='-mrange %s %s') + demean = traits.Bool( + desc= + 'Remove the mean from each volume prior to computing the correlation', + argstr='-demean') + docor = traits.Bool( + desc='Return the correlation coefficient (default).', argstr='-docor') + dodot = traits.Bool( + desc='Return the dot product (unscaled).', argstr='-dodot') + docoef = traits.Bool( + desc= + 'Return the least square fit coefficients {{a,b}} so that dset2 is approximately a + b*dset1', + argstr='-docoef') + dosums = traits.Bool( + desc= + 'Return the 6 numbers xbar= ybar= <(x-xbar)^2> <(y-ybar)^2> <(x-xbar)(y-ybar)> and the correlation coefficient.', + argstr='-dosums') + dodice = traits.Bool( + desc='Return the Dice coefficient (the Sorensen-Dice index).', + argstr='-dodice') + doeta2 = traits.Bool( + desc='Return eta-squared (Cohen, NeuroImage 2008).', argstr='-doeta2') + full = traits.Bool( + desc= + 'Compute the whole matrix. A waste of time, but handy for parsing.', + argstr='-full') + show_labels = traits.Bool( + desc= + 'Print sub-brick labels to help identify what is being correlated. This option is useful when' + 'you have more than 2 sub-bricks at input.', + argstr='-show_labels') + upper = traits.Bool( + desc='Compute upper triangular matrix', argstr='-upper') + + +class Dot(AFNICommand): + """Correlation coefficient between sub-brick pairs. + All datasets in in_files list will be concatenated. + You can use sub-brick selectors in the file specification. + Note: This program is not efficient when more than two subbricks are input. + For complete details, see the `3ddot Documentation. + `_ + + >>> from nipype.interfaces import afni + >>> dot = afni.Dot() + >>> dot.inputs.in_files = ['functional.nii[0]', 'structural.nii'] + >>> dot.inputs.dodice = True + >>> dot.inputs.out_file = 'out.mask_ae_dice.txt' + >>> dot.cmdline + '3dDot -dodice functional.nii[0] structural.nii |& tee out.mask_ae_dice.txt' + >>> res = copy3d.run() # doctest: +SKIP + + """ + _cmd = '3dDot' + input_spec = DotInputSpec + output_spec = AFNICommandOutputSpec + + +class Edge3InputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dedge3', + argstr='-input %s', + position=0, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + desc='output image file name', position=-1, argstr='-prefix %s') + datum = traits.Enum( + 'byte', + 'short', + 'float', + argstr='-datum %s', + desc='specify data type for output. Valid types are \'byte\', ' + '\'short\' and \'float\'.') + fscale = traits.Bool( + desc='Force scaling of the output to the maximum integer range.', + argstr='-fscale', + xor=['gscale', 'nscale', 'scale_floats']) + gscale = traits.Bool( + desc='Same as \'-fscale\', but also forces each output sub-brick to ' + 'to get the same scaling factor.', + argstr='-gscale', + xor=['fscale', 'nscale', 'scale_floats']) + nscale = traits.Bool( + desc='Don\'t do any scaling on output to byte or short datasets.', + argstr='-nscale', + xor=['fscale', 'gscale', 'scale_floats']) + scale_floats = traits.Float( + desc='Multiply input by VAL, but only if the input datum is ' + 'float. This is needed when the input dataset ' + 'has a small range, like 0 to 2.0 for instance. ' + 'With such a range, very few edges are detected due to ' + 'what I suspect to be truncation problems. ' + 'Multiplying such a dataset by 10000 fixes the problem ' + 'and the scaling is undone at the output.', + argstr='-scale_floats %f', + xor=['fscale', 'gscale', 'nscale']) + verbose = traits.Bool( + desc='Print out some information along the way.', argstr='-verbose') + + +class Edge3(AFNICommand): + """Does 3D Edge detection using the library 3DEdge + by Gregoire Malandain (gregoire.malandain@sophia.inria.fr). + + For complete details, see the `3dedge3 Documentation. + `_ + + references_ = [{'entry': BibTeX('@article{Deriche1987,' + 'author={R. Deriche},' + 'title={Optimal edge detection using recursive filtering},' + 'journal={International Journal of Computer Vision},' + 'volume={2},', + 'pages={167-187},' + 'year={1987},' + '}'), + 'tags': ['method'], + }, + {'entry': BibTeX('@article{MongaDericheMalandainCocquerez1991,' + 'author={O. Monga, R. Deriche, G. Malandain, J.P. Cocquerez},' + 'title={Recursive filtering and edge tracking: two primary tools for 3D edge detection},' + 'journal={Image and vision computing},' + 'volume={9},', + 'pages={203-214},' + 'year={1991},' + '}'), + 'tags': ['method'], + }, + ] + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> edge3 = afni.Edge3() + >>> edge3.inputs.in_file = 'functional.nii' + >>> edge3.inputs.out_file = 'edges.nii' + >>> edge3.inputs.datum = 'byte' + >>> edge3.cmdline + '3dedge3 -input functional.nii -datum byte -prefix edges.nii' + >>> res = edge3.run() # doctest: +SKIP + + """ + + _cmd = '3dedge3' + input_spec = Edge3InputSpec + output_spec = AFNICommandOutputSpec + + +class EvalInputSpec(AFNICommandInputSpec): + in_file_a = File( + desc='input file to 1deval', + argstr='-a %s', + position=0, + mandatory=True, + exists=True) + in_file_b = File( + desc='operand file to 1deval', argstr='-b %s', position=1, exists=True) + in_file_c = File( + desc='operand file to 1deval', argstr='-c %s', position=2, exists=True) + out_file = File( + name_template='%s_calc', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file_a') + out1D = traits.Bool(desc='output in 1D', argstr='-1D') + expr = Str(desc='expr', argstr='-expr "%s"', position=3, mandatory=True) + start_idx = traits.Int( + desc='start index for in_file_a', requires=['stop_idx']) + stop_idx = traits.Int( + desc='stop index for in_file_a', requires=['start_idx']) + single_idx = traits.Int(desc='volume index for in_file_a') + other = File(desc='other options', argstr='') + + +class Eval(AFNICommand): + """Evaluates an expression that may include columns of data from one or + more text files. + + For complete details, see the `1deval Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> eval = afni.Eval() + >>> eval.inputs.in_file_a = 'seed.1D' + >>> eval.inputs.in_file_b = 'resp.1D' + >>> eval.inputs.expr = 'a*b' + >>> eval.inputs.out1D = True + >>> eval.inputs.out_file = 'data_calc.1D' + >>> eval.cmdline + '1deval -a seed.1D -b resp.1D -expr "a*b" -1D -prefix data_calc.1D' + >>> res = eval.run() # doctest: +SKIP + + """ + + _cmd = '1deval' + input_spec = EvalInputSpec + output_spec = AFNICommandOutputSpec + + def _format_arg(self, name, trait_spec, value): + if name == 'in_file_a': + arg = trait_spec.argstr % value + if isdefined(self.inputs.start_idx): + arg += '[%d..%d]' % (self.inputs.start_idx, + self.inputs.stop_idx) + if isdefined(self.inputs.single_idx): + arg += '[%d]' % (self.inputs.single_idx) + return arg + return super(Eval, self)._format_arg(name, trait_spec, value) + + def _parse_inputs(self, skip=None): + """Skip the arguments without argstr metadata + """ + return super( + Eval, self)._parse_inputs(skip=('start_idx', 'stop_idx', 'other')) + + +class FWHMxInputSpec(CommandLineInputSpec): + in_file = File( + desc='input dataset', argstr='-input %s', mandatory=True, exists=True) + out_file = File( + argstr='> %s', + name_source='in_file', + name_template='%s_fwhmx.out', + position=-1, + keep_extension=False, + desc='output file') + out_subbricks = File( + argstr='-out %s', + name_source='in_file', + name_template='%s_subbricks.out', + keep_extension=False, + desc='output file listing the subbricks FWHM') + mask = File( + desc='use only voxels that are nonzero in mask', + argstr='-mask %s', + exists=True) + automask = traits.Bool( + False, + usedefault=True, + argstr='-automask', + desc='compute a mask from THIS dataset, a la 3dAutomask') + detrend = traits.Either( + traits.Bool(), + traits.Int(), + default=False, + argstr='-detrend', + xor=['demed'], + usedefault=True, + desc='instead of demed (0th order detrending), detrend to the ' + 'specified order. If order is not given, the program picks ' + 'q=NT/30. -detrend disables -demed, and includes -unif.') + demed = traits.Bool( + False, + argstr='-demed', + xor=['detrend'], + desc='If the input dataset has more than one sub-brick (e.g., has a ' + 'time axis), then subtract the median of each voxel\'s time ' + 'series before processing FWHM. This will tend to remove ' + 'intrinsic spatial structure and leave behind the noise.') + unif = traits.Bool( + False, + argstr='-unif', + desc='If the input dataset has more than one sub-brick, then ' + 'normalize each voxel\'s time series to have the same MAD before ' + 'processing FWHM.') + out_detrend = File( + argstr='-detprefix %s', + name_source='in_file', + name_template='%s_detrend', + keep_extension=False, + desc='Save the detrended file into a dataset') + geom = traits.Bool( + argstr='-geom', + xor=['arith'], + desc='if in_file has more than one sub-brick, compute the final ' + 'estimate as the geometric mean of the individual sub-brick FWHM ' + 'estimates') + arith = traits.Bool( + argstr='-arith', + xor=['geom'], + desc='if in_file has more than one sub-brick, compute the final ' + 'estimate as the arithmetic mean of the individual sub-brick ' + 'FWHM estimates') + combine = traits.Bool( + argstr='-combine', + desc='combine the final measurements along each axis') + compat = traits.Bool( + argstr='-compat', desc='be compatible with the older 3dFWHM') + acf = traits.Either( + traits.Bool(), + File(), + traits.Tuple(File(exists=True), traits.Float()), + default=False, + usedefault=True, + argstr='-acf', + desc='computes the spatial autocorrelation') + + +class FWHMxOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output file') + out_subbricks = File(exists=True, desc='output file (subbricks)') + out_detrend = File(desc='output file, detrended') + fwhm = traits.Either( + traits.Tuple(traits.Float(), traits.Float(), traits.Float()), + traits.Tuple(traits.Float(), traits.Float(), traits.Float(), + traits.Float()), + desc='FWHM along each axis') + acf_param = traits.Either( + traits.Tuple(traits.Float(), traits.Float(), traits.Float()), + traits.Tuple(traits.Float(), traits.Float(), traits.Float(), + traits.Float()), + desc='fitted ACF model parameters') + out_acf = File(exists=True, desc='output acf file') + + +class FWHMx(AFNICommandBase): + """ + Unlike the older 3dFWHM, this program computes FWHMs for all sub-bricks + in the input dataset, each one separately. The output for each one is + written to the file specified by '-out'. The mean (arithmetic or geometric) + of all the FWHMs along each axis is written to stdout. (A non-positive + output value indicates something bad happened; e.g., FWHM in z is meaningless + for a 2D dataset; the estimation method computed incoherent intermediate results.) + + For complete details, see the `3dFWHMx Documentation. + `_ + + Examples + -------- + + >>> from nipype.interfaces import afni + >>> fwhm = afni.FWHMx() + >>> fwhm.inputs.in_file = 'functional.nii' + >>> fwhm.cmdline + '3dFWHMx -input functional.nii -out functional_subbricks.out > functional_fwhmx.out' + >>> res = fwhm.run() # doctest: +SKIP + + + (Classic) METHOD: + + * Calculate ratio of variance of first differences to data variance. + * Should be the same as 3dFWHM for a 1-brick dataset. + (But the output format is simpler to use in a script.) + + + .. note:: IMPORTANT NOTE [AFNI > 16] + + A completely new method for estimating and using noise smoothness values is + now available in 3dFWHMx and 3dClustSim. This method is implemented in the + '-acf' options to both programs. 'ACF' stands for (spatial) AutoCorrelation + Function, and it is estimated by calculating moments of differences out to + a larger radius than before. + + Notably, real FMRI data does not actually have a Gaussian-shaped ACF, so the + estimated ACF is then fit (in 3dFWHMx) to a mixed model (Gaussian plus + mono-exponential) of the form + + .. math:: + + ACF(r) = a * exp(-r*r/(2*b*b)) + (1-a)*exp(-r/c) + + + where :math:`r` is the radius, and :math:`a, b, c` are the fitted parameters. + The apparent FWHM from this model is usually somewhat larger in real data + than the FWHM estimated from just the nearest-neighbor differences used + in the 'classic' analysis. + + The longer tails provided by the mono-exponential are also significant. + 3dClustSim has also been modified to use the ACF model given above to generate + noise random fields. + + + .. note:: TL;DR or summary + + The take-awaymessage is that the 'classic' 3dFWHMx and + 3dClustSim analysis, using a pure Gaussian ACF, is not very correct for + FMRI data -- I cannot speak for PET or MEG data. + + + .. warning:: + + Do NOT use 3dFWHMx on the statistical results (e.g., '-bucket') from + 3dDeconvolve or 3dREMLfit!!! The function of 3dFWHMx is to estimate + the smoothness of the time series NOISE, not of the statistics. This + proscription is especially true if you plan to use 3dClustSim next!! + + + .. note:: Recommendations + + * For FMRI statistical purposes, you DO NOT want the FWHM to reflect + the spatial structure of the underlying anatomy. Rather, you want + the FWHM to reflect the spatial structure of the noise. This means + that the input dataset should not have anatomical (spatial) structure. + * One good form of input is the output of '3dDeconvolve -errts', which is + the dataset of residuals left over after the GLM fitted signal model is + subtracted out from each voxel's time series. + * If you don't want to go to that much trouble, use '-detrend' to approximately + subtract out the anatomical spatial structure, OR use the output of 3dDetrend + for the same purpose. + * If you do not use '-detrend', the program attempts to find non-zero spatial + structure in the input, and will print a warning message if it is detected. + + + .. note:: Notes on -demend + + * I recommend this option, and it is not the default only for historical + compatibility reasons. It may become the default someday. + * It is already the default in program 3dBlurToFWHM. This is the same detrending + as done in 3dDespike; using 2*q+3 basis functions for q > 0. + * If you don't use '-detrend', the program now [Aug 2010] checks if a large number + of voxels are have significant nonzero means. If so, the program will print a + warning message suggesting the use of '-detrend', since inherent spatial + structure in the image will bias the estimation of the FWHM of the image time + series NOISE (which is usually the point of using 3dFWHMx). + + + """ + _cmd = '3dFWHMx' + input_spec = FWHMxInputSpec + output_spec = FWHMxOutputSpec + + references_ = [ + { + 'entry': + BibTeX('@article{CoxReynoldsTaylor2016,' + 'author={R.W. Cox, R.C. Reynolds, and P.A. Taylor},' + 'title={AFNI and clustering: false positive rates redux},' + 'journal={bioRxiv},' + 'year={2016},' + '}'), + 'tags': ['method'], + }, + ] + _acf = True + + def _parse_inputs(self, skip=None): + if not self.inputs.detrend: + if skip is None: + skip = [] + skip += ['out_detrend'] + return super(FWHMx, self)._parse_inputs(skip=skip) + + def _format_arg(self, name, trait_spec, value): + if name == 'detrend': + if isinstance(value, bool): + if value: + return trait_spec.argstr + else: + return None + elif isinstance(value, int): + return trait_spec.argstr + ' %d' % value + + if name == 'acf': + if isinstance(value, bool): + if value: + return trait_spec.argstr + else: + self._acf = False + return None + elif isinstance(value, tuple): + return trait_spec.argstr + ' %s %f' % value + elif isinstance(value, (str, bytes)): + return trait_spec.argstr + ' ' + value + return super(FWHMx, self)._format_arg(name, trait_spec, value) + + def _list_outputs(self): + outputs = super(FWHMx, self)._list_outputs() + + if self.inputs.detrend: + fname, ext = op.splitext(self.inputs.in_file) + if '.gz' in ext: + _, ext2 = op.splitext(fname) + ext = ext2 + ext + outputs['out_detrend'] += ext + else: + outputs['out_detrend'] = Undefined + + sout = np.loadtxt(outputs['out_file']) + + # handle newer versions of AFNI + if sout.size == 8: + outputs['fwhm'] = tuple(sout[0, :]) + else: + outputs['fwhm'] = tuple(sout) + + if self._acf: + assert sout.size == 8, "Wrong number of elements in %s" % str(sout) + outputs['acf_param'] = tuple(sout[1]) + + outputs['out_acf'] = op.abspath('3dFWHMx.1D') + if isinstance(self.inputs.acf, (str, bytes)): + outputs['out_acf'] = op.abspath(self.inputs.acf) + + return outputs + + +class LocalBistatInputSpec(AFNICommandInputSpec): + in_file1 = File( + exists=True, + mandatory=True, + argstr='%s', + position=-2, + desc='Filename of the first image') + in_file2 = File( + exists=True, + mandatory=True, + argstr='%s', + position=-1, + desc='Filename of the second image') + neighborhood = traits.Either( + traits.Tuple(traits.Enum('SPHERE', 'RHDD', 'TOHD'), traits.Float()), + traits.Tuple(traits.Enum('RECT'), traits.Tuple(traits.Float(), + traits.Float(), + traits.Float())), + mandatory=True, + desc='The region around each voxel that will be extracted for ' + 'the statistics calculation. Possible regions are: ' + '\'SPHERE\', \'RHDD\' (rhombic dodecahedron), \'TOHD\' ' + '(truncated octahedron) with a given radius in mm or ' + '\'RECT\' (rectangular block) with dimensions to specify in mm.', + argstr="-nbhd '%s(%s)'") + _stat_names = ['pearson', 'spearman', 'quadrant', 'mutinfo', 'normuti', + 'jointent', 'hellinger', 'crU', 'crM', 'crA', 'L2slope', + 'L1slope', 'num', 'ALL'] + stat = InputMultiPath( + traits.Enum(_stat_names), + mandatory=True, + desc='statistics to compute. Possible names are :' + ' * pearson = Pearson correlation coefficient' + ' * spearman = Spearman correlation coefficient' + ' * quadrant = Quadrant correlation coefficient' + ' * mutinfo = Mutual Information' + ' * normuti = Normalized Mutual Information' + ' * jointent = Joint entropy' + ' * hellinger= Hellinger metric' + ' * crU = Correlation ratio (Unsymmetric)' + ' * crM = Correlation ratio (symmetrized by Multiplication)' + ' * crA = Correlation ratio (symmetrized by Addition)' + ' * L2slope = slope of least-squares (L2) linear regression of ' + ' the data from dataset1 vs. the dataset2 ' + ' (i.e., d2 = a + b*d1 ==> this is \'b\')' + ' * L1slope = slope of least-absolute-sum (L1) linear ' + ' regression of the data from dataset1 vs. ' + ' the dataset2' + ' * num = number of the values in the region: ' + ' with the use of -mask or -automask, ' + ' the size of the region around any given ' + ' voxel will vary; this option lets you ' + ' map that size.' + ' * ALL = all of the above, in that order' + 'More than one option can be used.', + argstr='-stat %s...') + mask_file = traits.File( + exists=True, + desc='mask image file name. Voxels NOT in the mask will not be used ' + 'in the neighborhood of any voxel. Also, a voxel NOT in the mask ' + 'will have its statistic(s) computed as zero (0).', + argstr='-mask %s') + automask = traits.Bool( + desc='Compute the mask as in program 3dAutomask.', + argstr='-automask', + xor=['weight_file']) + weight_file = traits.File( + exists=True, + desc='File name of an image to use as a weight. Only applies to ' + '\'pearson\' statistics.', + argstr='-weight %s', + xor=['automask']) + out_file = traits.File( + desc='Output dataset.', + argstr='-prefix %s', + name_source='in_file1', + name_template='%s_bistat', + keep_extension=True, + position=0) + + +class LocalBistat(AFNICommand): + """3dLocalBistat - computes statistics between 2 datasets, at each voxel, + based on a local neighborhood of that voxel. + + For complete details, see the `3dLocalBistat Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> bistat = afni.LocalBistat() + >>> bistat.inputs.in_file1 = 'functional.nii' + >>> bistat.inputs.in_file2 = 'structural.nii' + >>> bistat.inputs.neighborhood = ('SPHERE', 1.2) + >>> bistat.inputs.stat = 'pearson' + >>> bistat.inputs.outputtype = 'NIFTI' + >>> bistat.cmdline + "3dLocalBistat -prefix functional_bistat.nii -nbhd 'SPHERE(1.2)' -stat pearson functional.nii structural.nii" + >>> res = automask.run() # doctest: +SKIP + + """ + + _cmd = '3dLocalBistat' + input_spec = LocalBistatInputSpec + output_spec = AFNICommandOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'neighborhood' and value[0] == 'RECT': + value = ('RECT', '%s,%s,%s' % value[1]) + + return super(LocalBistat, self)._format_arg(name, spec, value) + + +class MaskToolInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file or files to 3dmask_tool', + argstr='-input %s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_mask', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + count = traits.Bool( + desc='Instead of created a binary 0/1 mask dataset, create one with ' + 'counts of voxel overlap, i.e., each voxel will contain the ' + 'number of masks that it is set in.', + argstr='-count', + position=2) + datum = traits.Enum( + 'byte', + 'short', + 'float', + argstr='-datum %s', + desc='specify data type for output. Valid types are \'byte\', ' + '\'short\' and \'float\'.') + dilate_inputs = Str( + desc='Use this option to dilate and/or erode datasets as they are ' + 'read. ex. \'5 -5\' to dilate and erode 5 times', + argstr='-dilate_inputs %s') + dilate_results = Str( + desc='dilate and/or erode combined mask at the given levels.', + argstr='-dilate_results %s') + frac = traits.Float( + desc='When combining masks (across datasets and sub-bricks), use ' + 'this option to restrict the result to a certain fraction of the ' + 'set of volumes', + argstr='-frac %s') + inter = traits.Bool( + desc='intersection, this means -frac 1.0', argstr='-inter') + union = traits.Bool(desc='union, this means -frac 0', argstr='-union') + fill_holes = traits.Bool( + desc='This option can be used to fill holes in the resulting mask, ' + 'i.e. after all other processing has been done.', + argstr='-fill_holes') + fill_dirs = Str( + desc='fill holes only in the given directions. This option is for use ' + 'with -fill holes. should be a single string that specifies ' + '1-3 of the axes using {x,y,z} labels (i.e. dataset axis order), ' + 'or using the labels in {R,L,A,P,I,S}.', + argstr='-fill_dirs %s', + requires=['fill_holes']) + verbose = traits.Int( + desc='specify verbosity level, for 0 to 3', argstr='-verb %s') + + +class MaskToolOutputSpec(TraitedSpec): + out_file = File(desc='mask file', exists=True) + + +class MaskTool(AFNICommand): + """3dmask_tool - for combining/dilating/eroding/filling masks + + For complete details, see the `3dmask_tool Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> masktool = afni.MaskTool() + >>> masktool.inputs.in_file = 'functional.nii' + >>> masktool.inputs.outputtype = 'NIFTI' + >>> masktool.cmdline + '3dmask_tool -prefix functional_mask.nii -input functional.nii' + >>> res = automask.run() # doctest: +SKIP + + """ + + _cmd = '3dmask_tool' + input_spec = MaskToolInputSpec + output_spec = MaskToolOutputSpec + + +class MergeInputSpec(AFNICommandInputSpec): + in_files = InputMultiPath( + File(desc='input file to 3dmerge', exists=True), + argstr='%s', + position=-1, + mandatory=True, + copyfile=False) + out_file = File( + name_template='%s_merge', + desc='output image file name', + argstr='-prefix %s', + name_source='in_files') + doall = traits.Bool( + desc='apply options to all sub-bricks in dataset', argstr='-doall') + blurfwhm = traits.Int( + desc='FWHM blur value (mm)', argstr='-1blur_fwhm %d', units='mm') + + +class Merge(AFNICommand): + """Merge or edit volumes using AFNI 3dmerge command + + For complete details, see the `3dmerge Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> merge = afni.Merge() + >>> merge.inputs.in_files = ['functional.nii', 'functional2.nii'] + >>> merge.inputs.blurfwhm = 4 + >>> merge.inputs.doall = True + >>> merge.inputs.out_file = 'e7.nii' + >>> merge.cmdline + '3dmerge -1blur_fwhm 4 -doall -prefix e7.nii functional.nii functional2.nii' + >>> res = merge.run() # doctest: +SKIP + + """ + + _cmd = '3dmerge' + input_spec = MergeInputSpec + output_spec = AFNICommandOutputSpec + + +class NotesInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dNotes', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + add = Str(desc='note to add', argstr='-a "%s"') + add_history = Str( + desc='note to add to history', argstr='-h "%s"', xor=['rep_history']) + rep_history = Str( + desc='note with which to replace history', + argstr='-HH "%s"', + xor=['add_history']) + delete = traits.Int(desc='delete note number num', argstr='-d %d') + ses = traits.Bool(desc='print to stdout the expanded notes', argstr='-ses') + out_file = File(desc='output image file name', argstr='%s') + + +class Notes(CommandLine): + """A program to add, delete, and show notes for AFNI datasets. + + For complete details, see the `3dNotes Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> notes = afni.Notes() + >>> notes.inputs.in_file = 'functional.HEAD' + >>> notes.inputs.add = 'This note is added.' + >>> notes.inputs.add_history = 'This note is added to history.' + >>> notes.cmdline + '3dNotes -a "This note is added." -h "This note is added to history." functional.HEAD' + >>> res = notes.run() # doctest: +SKIP + """ + + _cmd = '3dNotes' + input_spec = NotesInputSpec + output_spec = AFNICommandOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.in_file) + return outputs + + +class NwarpAdjustInputSpec(AFNICommandInputSpec): + warps = InputMultiPath( + File(exists=True), + minlen=5, + mandatory=True, + argstr='-nwarp %s', + desc='List of input 3D warp datasets') + in_files = InputMultiPath( + File(exists=True), + minlen=5, + argstr='-source %s', + desc='List of input 3D datasets to be warped by the adjusted warp ' + 'datasets. There must be exactly as many of these datasets as ' + 'there are input warps.') + out_file = File( + desc='Output mean dataset, only needed if in_files are also given. ' + 'The output dataset will be on the common grid shared by the ' + 'source datasets.', + argstr='-prefix %s', + name_source='in_files', + name_template='%s_NwarpAdjust', + keep_extension=True, + requires=['in_files']) + + +class NwarpAdjust(AFNICommandBase): + """This program takes as input a bunch of 3D warps, averages them, + and computes the inverse of this average warp. It then composes + each input warp with this inverse average to 'adjust' the set of + warps. Optionally, it can also read in a set of 1-brick datasets + corresponding to the input warps, and warp each of them, and average + those. + + For complete details, see the `3dNwarpAdjust Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> adjust = afni.NwarpAdjust() + >>> adjust.inputs.warps = ['func2anat_InverseWarp.nii.gz', 'func2anat_InverseWarp.nii.gz', 'func2anat_InverseWarp.nii.gz', 'func2anat_InverseWarp.nii.gz', 'func2anat_InverseWarp.nii.gz'] + >>> adjust.cmdline + '3dNwarpAdjust -nwarp func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz' + >>> res = adjust.run() # doctest: +SKIP + + """ + _cmd = '3dNwarpAdjust' + input_spec = NwarpAdjustInputSpec + output_spec = AFNICommandOutputSpec + + def _parse_inputs(self, skip=None): + if not self.inputs.in_files: + if skip is None: + skip = [] + skip += ['out_file'] + return super(NwarpAdjust, self)._parse_inputs(skip=skip) + + def _list_outputs(self): + outputs = self.output_spec().get() + + if self.inputs.in_files: + if self.inputs.out_file: + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + else: + basename = os.path.basename(self.inputs.in_files[0]) + basename_noext, ext = op.splitext(basename) + if '.gz' in ext: + basename_noext, ext2 = op.splitext(basename_noext) + ext = ext2 + ext + outputs['out_file'] = os.path.abspath( + basename_noext + '_NwarpAdjust' + ext) + return outputs + + +class NwarpApplyInputSpec(CommandLineInputSpec): + in_file = traits.Either( + File(exists=True), + traits.List(File(exists=True)), + mandatory=True, + argstr='-source %s', + desc='the name of the dataset to be warped ' + 'can be multiple datasets') + warp = traits.String( + desc='the name of the warp dataset. ' + 'multiple warps can be concatenated (make sure they exist)', + argstr='-nwarp %s', + mandatory=True) + inv_warp = traits.Bool( + desc='After the warp specified in \'-nwarp\' is computed, invert it', + argstr='-iwarp') + master = traits.File( + exists=True, + desc='the name of the master dataset, which defines the output grid', + argstr='-master %s') + interp = traits.Enum( + 'wsinc5', + 'NN', + 'nearestneighbour', + 'nearestneighbor', + 'linear', + 'trilinear', + 'cubic', + 'tricubic', + 'quintic', + 'triquintic', + desc='defines interpolation method to use during warp', + argstr='-interp %s', + usedefault=True) + ainterp = traits.Enum( + 'NN', + 'nearestneighbour', + 'nearestneighbor', + 'linear', + 'trilinear', + 'cubic', + 'tricubic', + 'quintic', + 'triquintic', + 'wsinc5', + desc='specify a different interpolation method than might ' + 'be used for the warp', + argstr='-ainterp %s') + out_file = File( + name_template='%s_Nwarp', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + short = traits.Bool( + desc='Write output dataset using 16-bit short integers, rather than ' + 'the usual 32-bit floats.', + argstr='-short') + quiet = traits.Bool( + desc='don\'t be verbose :(', argstr='-quiet', xor=['verb']) + verb = traits.Bool( + desc='be extra verbose :)', argstr='-verb', xor=['quiet']) + + +class NwarpApply(AFNICommandBase): + """Program to apply a nonlinear 3D warp saved from 3dQwarp + (or 3dNwarpCat, etc.) to a 3D dataset, to produce a warped + version of the source dataset. + + For complete details, see the `3dNwarpApply Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> nwarp = afni.NwarpApply() + >>> nwarp.inputs.in_file = 'Fred+orig' + >>> nwarp.inputs.master = 'NWARP' + >>> nwarp.inputs.warp = "'Fred_WARP+tlrc Fred.Xaff12.1D'" + >>> nwarp.cmdline + "3dNwarpApply -source Fred+orig -interp wsinc5 -master NWARP -prefix Fred+orig_Nwarp -nwarp \'Fred_WARP+tlrc Fred.Xaff12.1D\'" + >>> res = nwarp.run() # doctest: +SKIP + + """ + _cmd = '3dNwarpApply' + input_spec = NwarpApplyInputSpec + output_spec = AFNICommandOutputSpec + + +class NwarpCatInputSpec(AFNICommandInputSpec): + in_files = traits.List( + traits.Either(traits.File(), + traits.Tuple( + traits.Enum('IDENT', 'INV', 'SQRT', 'SQRTINV'), + traits.File())), + desc="list of tuples of 3D warps and associated functions", + mandatory=True, + argstr="%s", + position=-1) + space = traits.String( + desc='string to attach to the output dataset as its atlas space ' + 'marker.', + argstr='-space %s') + inv_warp = traits.Bool( + desc='invert the final warp before output', argstr='-iwarp') + interp = traits.Enum( + 'wsinc5', + 'linear', + 'quintic', + desc='specify a different interpolation method than might ' + 'be used for the warp', + argstr='-interp %s', + usedefault=True) + expad = traits.Int( + desc='Pad the nonlinear warps by the given number of voxels voxels in ' + 'all directions. The warp displacements are extended by linear ' + 'extrapolation from the faces of the input grid..', + argstr='-expad %d') + out_file = File( + name_template='%s_NwarpCat', + desc='output image file name', + argstr='-prefix %s', + name_source='in_files') + verb = traits.Bool(desc='be verbose', argstr='-verb') + + +class NwarpCat(AFNICommand): + """Catenates (composes) 3D warps defined on a grid, OR via a matrix. + + .. note:: + + * All transformations are from DICOM xyz (in mm) to DICOM xyz. + + * Matrix warps are in files that end in '.1D' or in '.txt'. A matrix + warp file should have 12 numbers in it, as output (for example), by + '3dAllineate -1Dmatrix_save'. + + * Nonlinear warps are in dataset files (AFNI .HEAD/.BRIK or NIfTI .nii) + with 3 sub-bricks giving the DICOM order xyz grid displacements in mm. + + * If all the input warps are matrices, then the output is a matrix + and will be written to the file 'prefix.aff12.1D'. + Unless the prefix already contains the string '.1D', in which case + the filename is just the prefix. + + * If 'prefix' is just 'stdout', then the output matrix is written + to standard output. + In any of these cases, the output format is 12 numbers in one row. + + * If any of the input warps are datasets, they must all be defined on + the same 3D grid! + And of course, then the output will be a dataset on the same grid. + However, you can expand the grid using the '-expad' option. + + * The order of operations in the final (output) warp is, for the + case of 3 input warps: + + OUTPUT(x) = warp3( warp2( warp1(x) ) ) + + That is, warp1 is applied first, then warp2, et cetera. + The 3D x coordinates are taken from each grid location in the + first dataset defined on a grid. + + For complete details, see the `3dNwarpCat Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> nwarpcat = afni.NwarpCat() + >>> nwarpcat.inputs.in_files = ['Q25_warp+tlrc.HEAD', ('IDENT', 'structural.nii')] + >>> nwarpcat.inputs.out_file = 'Fred_total_WARP' + >>> nwarpcat.cmdline + "3dNwarpCat -interp wsinc5 -prefix Fred_total_WARP Q25_warp+tlrc.HEAD 'IDENT(structural.nii)'" + >>> res = nwarpcat.run() # doctest: +SKIP + + """ + _cmd = '3dNwarpCat' + input_spec = NwarpCatInputSpec + output_spec = AFNICommandOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'in_files': + return spec.argstr % (' '.join([ + "'" + v[0] + "(" + v[1] + ")'" if isinstance(v, tuple) else v + for v in value + ])) + return super(NwarpCat, self)._format_arg(name, spec, value) + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_fname( + self.inputs.in_files[0][0], suffix='_NwarpCat') + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.out_file): + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + else: + outputs['out_file'] = os.path.abspath( + self._gen_fname( + self.inputs.in_files[0], + suffix='_NwarpCat+tlrc', + ext='.HEAD')) + return outputs + + +class OneDToolPyInputSpec(AFNIPythonCommandInputSpec): + in_file = File( + desc='input file to OneDTool', + argstr='-infile %s', + mandatory=True, + exists=True) + set_nruns = traits.Int( + desc='treat the input data as if it has nruns', argstr='-set_nruns %d') + derivative = traits.Bool( + desc= + 'take the temporal derivative of each vector (done as first backward difference)', + argstr='-derivative') + demean = traits.Bool( + desc='demean each run (new mean of each run = 0.0)', argstr='-demean') + out_file = File( + desc='write the current 1D data to FILE', + argstr='-write %s', + xor=['show_cormat_warnings']) + show_censor_count = traits.Bool( + desc= + 'display the total number of censored TRs Note : if input is a valid xmat.1D dataset, ' + 'then the count will come from the header. Otherwise the input is assumed to be a binary censor' + 'file, and zeros are simply counted.', + argstr="-show_censor_count") + censor_motion = traits.Tuple( + (traits.Float(), File()), + desc= + 'Tuple of motion limit and outfile prefix. need to also set set_nruns -r set_run_lengths', + argstr="-censor_motion %f %s") + censor_prev_TR = traits.Bool( + desc='for each censored TR, also censor previous', + argstr='-censor_prev_TR') + show_trs_uncensored = traits.Enum( + 'comma', + 'space', + 'encoded', + 'verbose', + desc= + 'display a list of TRs which were not censored in the specified style', + argstr='-show_trs_uncensored %s') + show_cormat_warnings = traits.File( + desc='Write cormat warnings to a file', + argstr="-show_cormat_warnings |& tee %s", + position=-1, + xor=['out_file']) + show_indices_interest = traits.Bool( + desc="display column indices for regs of interest", + argstr="-show_indices_interest") + show_trs_run = traits.Int( + desc="restrict -show_trs_[un]censored to the given 1-based run", + argstr="-show_trs_run %d") + + +class OneDToolPyOutputSpec(AFNICommandOutputSpec): + out_file = File(desc='output of 1D_tool.py') + + +class OneDToolPy(AFNIPythonCommand): + """This program is meant to read/manipulate/write/diagnose 1D datasets. + Input can be specified using AFNI sub-brick[]/time{} selectors. + + >>> from nipype.interfaces import afni + >>> odt = afni.OneDToolPy() + >>> odt.inputs.in_file = 'f1.1D' + >>> odt.inputs.set_nruns = 3 + >>> odt.inputs.demean = True + >>> odt.inputs.out_file = 'motion_dmean.1D' + >>> odt.cmdline # doctest: +ELLIPSIS + 'python2 ...1d_tool.py -demean -infile f1.1D -write motion_dmean.1D -set_nruns 3' + >>> res = odt.run() # doctest: +SKIP +""" + + _cmd = '1d_tool.py' + + input_spec = OneDToolPyInputSpec + output_spec = OneDToolPyOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + + if isdefined(self.inputs.out_file): + outputs['out_file'] = os.path.join(os.getcwd(), + self.inputs.out_file) + if isdefined(self.inputs.show_cormat_warnings): + outputs['out_file'] = os.path.join( + os.getcwd(), self.inputs.show_cormat_warnings) + if isdefined(self.inputs.censor_motion): + outputs['out_file'] = os.path.join(os.getcwd(), + self.inputs.censor_motion[1] + + '_censor.1D') + return outputs + + +class RefitInputSpec(CommandLineInputSpec): + in_file = File( + desc='input file to 3drefit', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=True) + deoblique = traits.Bool( + desc='replace current transformation matrix with cardinal matrix', + argstr='-deoblique') + xorigin = Str( + desc='x distance for edge voxel offset', argstr='-xorigin %s') + yorigin = Str( + desc='y distance for edge voxel offset', argstr='-yorigin %s') + zorigin = Str( + desc='z distance for edge voxel offset', argstr='-zorigin %s') + duporigin_file = File( + argstr='-duporigin %s', + exists=True, + desc='Copies the xorigin, yorigin, and zorigin values from the header ' + 'of the given dataset') + xdel = traits.Float(desc='new x voxel dimension in mm', argstr='-xdel %f') + ydel = traits.Float(desc='new y voxel dimension in mm', argstr='-ydel %f') + zdel = traits.Float(desc='new z voxel dimension in mm', argstr='-zdel %f') + xyzscale = traits.Float( + desc='Scale the size of the dataset voxels by the given factor', + argstr='-xyzscale %f') + space = traits.Enum( + 'TLRC', + 'MNI', + 'ORIG', + argstr='-space %s', + desc='Associates the dataset with a specific template type, e.g. ' + 'TLRC, MNI, ORIG') + atrcopy = traits.Tuple( + traits.File(exists=True), + traits.Str(), + argstr='-atrcopy %s %s', + desc='Copy AFNI header attribute from the given file into the header ' + 'of the dataset(s) being modified. For more information on AFNI ' + 'header attributes, see documentation file README.attributes. ' + 'More than one \'-atrcopy\' option can be used. For AFNI ' + 'advanced users only. Do NOT use -atrcopy or -atrstring with ' + 'other modification options. See also -copyaux.') + atrstring = traits.Tuple( + traits.Str(), + traits.Str(), + argstr='-atrstring %s %s', + desc='Copy the last given string into the dataset(s) being modified, ' + 'giving it the attribute name given by the last string.' + 'To be safe, the last string should be in quotes.') + atrfloat = traits.Tuple( + traits.Str(), + traits.Str(), + argstr='-atrfloat %s %s', + desc='Create or modify floating point attributes. ' + 'The input values may be specified as a single string in quotes ' + 'or as a 1D filename or string, example ' + '\'1 0.2 0 0 -0.2 1 0 0 0 0 1 0\' or ' + 'flipZ.1D or \'1D:1,0.2,2@0,-0.2,1,2@0,2@0,1,0\'') + atrint = traits.Tuple( + traits.Str(), + traits.Str(), + argstr='-atrint %s %s', + desc='Create or modify integer attributes. ' + 'The input values may be specified as a single string in quotes ' + 'or as a 1D filename or string, example ' + '\'1 0 0 0 0 1 0 0 0 0 1 0\' or ' + 'flipZ.1D or \'1D:1,0,2@0,-0,1,2@0,2@0,1,0\'') + saveatr = traits.Bool( + argstr='-saveatr', + desc='(default) Copy the attributes that are known to AFNI into ' + 'the dset->dblk structure thereby forcing changes to known ' + 'attributes to be present in the output. This option only makes ' + 'sense with -atrcopy.') + nosaveatr = traits.Bool(argstr='-nosaveatr', desc='Opposite of -saveatr') + + +class Refit(AFNICommandBase): + """Changes some of the information inside a 3D dataset's header + + For complete details, see the `3drefit Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> refit = afni.Refit() + >>> refit.inputs.in_file = 'structural.nii' + >>> refit.inputs.deoblique = True + >>> refit.cmdline + '3drefit -deoblique structural.nii' + >>> res = refit.run() # doctest: +SKIP + + >>> refit_2 = afni.Refit() + >>> refit_2.inputs.in_file = 'structural.nii' + >>> refit_2.inputs.atrfloat = ("IJK_TO_DICOM_REAL", "'1 0.2 0 0 -0.2 1 0 0 0 0 1 0'") + >>> refit_2.cmdline + "3drefit -atrfloat IJK_TO_DICOM_REAL '1 0.2 0 0 -0.2 1 0 0 0 0 1 0' structural.nii" + >>> res = refit_2.run() # doctest: +SKIP + """ + _cmd = '3drefit' + input_spec = RefitInputSpec + output_spec = AFNICommandOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.in_file) + return outputs + + +class ResampleInputSpec(AFNICommandInputSpec): + + in_file = File( + desc='input file to 3dresample', + argstr='-inset %s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_resample', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + orientation = Str(desc='new orientation code', argstr='-orient %s') + resample_mode = traits.Enum( + 'NN', + 'Li', + 'Cu', + 'Bk', + argstr='-rmode %s', + desc='resampling method from set {"NN", "Li", "Cu", "Bk"}. These are ' + 'for "Nearest Neighbor", "Linear", "Cubic" and "Blocky"' + 'interpolation, respectively. Default is NN.') + voxel_size = traits.Tuple( + *[traits.Float()] * 3, + argstr='-dxyz %f %f %f', + desc='resample to new dx, dy and dz') + master = traits.File( + argstr='-master %s', desc='align dataset grid to a reference file') + + +class Resample(AFNICommand): + """Resample or reorient an image using AFNI 3dresample command + + For complete details, see the `3dresample Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> resample = afni.Resample() + >>> resample.inputs.in_file = 'functional.nii' + >>> resample.inputs.orientation= 'RPI' + >>> resample.inputs.outputtype = 'NIFTI' + >>> resample.cmdline + '3dresample -orient RPI -prefix functional_resample.nii -inset functional.nii' + >>> res = resample.run() # doctest: +SKIP + + """ + + _cmd = '3dresample' + input_spec = ResampleInputSpec + output_spec = AFNICommandOutputSpec + + +class TCatInputSpec(AFNICommandInputSpec): + in_files = InputMultiPath( + File(exists=True), + desc='input file to 3dTcat', + argstr=' %s', + position=-1, + mandatory=True, + copyfile=False) + out_file = File( + name_template='%s_tcat', + desc='output image file name', + argstr='-prefix %s', + name_source='in_files') + rlt = traits.Enum( + '', + '+', + '++', + argstr='-rlt%s', + desc='Remove linear trends in each voxel time series loaded from each ' + 'input dataset, SEPARATELY. Option -rlt removes the least squares ' + 'fit of \'a+b*t\' to each voxel time series. Option -rlt+ adds ' + 'dataset mean back in. Option -rlt++ adds overall mean of all ' + 'dataset timeseries back in.', + position=1) + verbose = traits.Bool( + desc='Print out some verbose output as the program', argstr='-verb') + + +class TCat(AFNICommand): + """Concatenate sub-bricks from input datasets into one big 3D+time dataset. + + TODO Replace InputMultiPath in_files with Traits.List, if possible. Current + version adds extra whitespace. + + For complete details, see the `3dTcat Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> tcat = afni.TCat() + >>> tcat.inputs.in_files = ['functional.nii', 'functional2.nii'] + >>> tcat.inputs.out_file= 'functional_tcat.nii' + >>> tcat.inputs.rlt = '+' + >>> tcat.cmdline + '3dTcat -rlt+ -prefix functional_tcat.nii functional.nii functional2.nii' + >>> res = tcat.run() # doctest: +SKIP + + """ + + _cmd = '3dTcat' + input_spec = TCatInputSpec + output_spec = AFNICommandOutputSpec + + +class TCatSBInputSpec(AFNICommandInputSpec): + in_files = traits.List( + traits.Tuple(File(exists=True), Str()), + desc='List of tuples of file names and subbrick selectors as strings.' + 'Don\'t forget to protect the single quotes in the subbrick selector' + 'so the contents are protected from the command line interpreter.', + argstr='%s%s ...', + position=-1, + mandatory=True, + copyfile=False) + out_file = File( + desc='output image file name', argstr='-prefix %s', genfile=True) + rlt = traits.Enum( + '', + '+', + '++', + argstr='-rlt%s', + desc='Remove linear trends in each voxel time series loaded from each ' + 'input dataset, SEPARATELY. Option -rlt removes the least squares ' + 'fit of \'a+b*t\' to each voxel time series. Option -rlt+ adds ' + 'dataset mean back in. Option -rlt++ adds overall mean of all ' + 'dataset timeseries back in.', + position=1) + + +class TCatSubBrick(AFNICommand): + """Hopefully a temporary function to allow sub-brick selection until + afni file managment is improved. + + For complete details, see the `3dTcat Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> tcsb = afni.TCatSubBrick() + >>> tcsb.inputs.in_files = [('functional.nii', "'{2..$}'"), ('functional2.nii', "'{2..$}'")] + >>> tcsb.inputs.out_file= 'functional_tcat.nii' + >>> tcsb.inputs.rlt = '+' + >>> tcsb.cmdline + "3dTcat -rlt+ -prefix functional_tcat.nii functional.nii'{2..$}' functional2.nii'{2..$}' " + >>> res = tcsb.run() # doctest: +SKIP + + """ + + _cmd = '3dTcat' + input_spec = TCatSBInputSpec + output_spec = AFNICommandOutputSpec + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_fname(self.inputs.in_files[0][0], suffix='_tcat') + + +class TStatInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dTstat', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_tstat', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + mask = File(desc='mask file', argstr='-mask %s', exists=True) + options = Str(desc='selected statistical output', argstr='%s') + + +class TStat(AFNICommand): + """Compute voxel-wise statistics using AFNI 3dTstat command + + For complete details, see the `3dTstat Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> tstat = afni.TStat() + >>> tstat.inputs.in_file = 'functional.nii' + >>> tstat.inputs.args = '-mean' + >>> tstat.inputs.out_file = 'stats' + >>> tstat.cmdline + '3dTstat -mean -prefix stats functional.nii' + >>> res = tstat.run() # doctest: +SKIP + + """ + + _cmd = '3dTstat' + input_spec = TStatInputSpec + output_spec = AFNICommandOutputSpec + + +class To3DInputSpec(AFNICommandInputSpec): + out_file = File( + name_template='%s', + desc='output image file name', + argstr='-prefix %s', + name_source=['in_folder']) + in_folder = Directory( + desc='folder with DICOM images to convert', + argstr='%s/*.dcm', + position=-1, + mandatory=True, + exists=True) + filetype = traits.Enum( + 'spgr', + 'fse', + 'epan', + 'anat', + 'ct', + 'spct', + 'pet', + 'mra', + 'bmap', + 'diff', + 'omri', + 'abuc', + 'fim', + 'fith', + 'fico', + 'fitt', + 'fift', + 'fizt', + 'fict', + 'fibt', + 'fibn', + 'figt', + 'fipt', + 'fbuc', + argstr='-%s', + desc='type of datafile being converted') + skipoutliers = traits.Bool( + desc='skip the outliers check', argstr='-skip_outliers') + assumemosaic = traits.Bool( + desc='assume that Siemens image is mosaic', + argstr='-assume_dicom_mosaic') + datatype = traits.Enum( + 'short', + 'float', + 'byte', + 'complex', + desc='set output file datatype', + argstr='-datum %s') + funcparams = Str( + desc='parameters for functional data', argstr='-time:zt %s alt+z2') + + +class To3D(AFNICommand): + """Create a 3D dataset from 2D image files using AFNI to3d command + + For complete details, see the `to3d Documentation + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> to3d = afni.To3D() + >>> to3d.inputs.datatype = 'float' + >>> to3d.inputs.in_folder = '.' + >>> to3d.inputs.out_file = 'dicomdir.nii' + >>> to3d.inputs.filetype = 'anat' + >>> to3d.cmdline # doctest: +ELLIPSIS + 'to3d -datum float -anat -prefix dicomdir.nii ./*.dcm' + >>> res = to3d.run() # doctest: +SKIP + + """ + + _cmd = 'to3d' + input_spec = To3DInputSpec + output_spec = AFNICommandOutputSpec + + +class UndumpInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dUndump, whose geometry will determine' + 'the geometry of the output', + argstr='-master %s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + mask_file = File( + desc='mask image file name. Only voxels that are nonzero in the mask ' + 'can be set.', + argstr='-mask %s') + datatype = traits.Enum( + 'short', + 'float', + 'byte', + desc='set output file datatype', + argstr='-datum %s') + default_value = traits.Float( + desc='default value stored in each input voxel that does not have ' + 'a value supplied in the input file', + argstr='-dval %f') + fill_value = traits.Float( + desc='value, used for each voxel in the output dataset that is NOT ' + 'listed in the input file', + argstr='-fval %f') + coordinates_specification = traits.Enum( + 'ijk', + 'xyz', + desc='Coordinates in the input file as index triples (i, j, k) ' + 'or spatial coordinates (x, y, z) in mm', + argstr='-%s') + srad = traits.Float( + desc='radius in mm of the sphere that will be filled about each input ' + '(x,y,z) or (i,j,k) voxel. If the radius is not given, or is 0, ' + 'then each input data line sets the value in only one voxel.', + argstr='-srad %f') + orient = traits.Tuple( + traits.Enum('R', 'L'), + traits.Enum('A', 'P'), + traits.Enum('I', 'S'), + desc='Specifies the coordinate order used by -xyz. ' + 'The code must be 3 letters, one each from the pairs ' + '{R,L} {A,P} {I,S}. The first letter gives the ' + 'orientation of the x-axis, the second the orientation ' + 'of the y-axis, the third the z-axis: ' + 'R = right-to-left L = left-to-right ' + 'A = anterior-to-posterior P = posterior-to-anterior ' + 'I = inferior-to-superior S = superior-to-inferior ' + 'If -orient isn\'t used, then the coordinate order of the ' + '-master (in_file) dataset is used to interpret (x,y,z) inputs.', + argstr='-orient %s') + head_only = traits.Bool( + desc='create only the .HEAD file which gets exploited by ' + 'the AFNI matlab library function New_HEAD.m', + argstr='-head_only') + + +class UndumpOutputSpec(TraitedSpec): + out_file = File(desc='assembled file', exists=True) + + +class Undump(AFNICommand): + """3dUndump - Assembles a 3D dataset from an ASCII list of coordinates and + (optionally) values. + + The input file(s) are ASCII files, with one voxel specification per + line. A voxel specification is 3 numbers (-ijk or -xyz coordinates), + with an optional 4th number giving the voxel value. For example: + + 1 2 3 + 3 2 1 5 + 5.3 6.2 3.7 + // this line illustrates a comment + + The first line puts a voxel (with value given by '-dval') at point + (1,2,3). The second line puts a voxel (with value 5) at point (3,2,1). + The third line puts a voxel (with value given by '-dval') at point + (5.3,6.2,3.7). If -ijk is in effect, and fractional coordinates + are given, they will be rounded to the nearest integers; for example, + the third line would be equivalent to (i,j,k) = (5,6,4). + + + For complete details, see the `3dUndump Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> unndump = afni.Undump() + >>> unndump.inputs.in_file = 'structural.nii' + >>> unndump.inputs.out_file = 'structural_undumped.nii' + >>> unndump.cmdline + '3dUndump -prefix structural_undumped.nii -master structural.nii' + >>> res = unndump.run() # doctest: +SKIP + + """ + + _cmd = '3dUndump' + input_spec = UndumpInputSpec + output_spec = UndumpOutputSpec + + +class UnifizeInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dUnifize', + argstr='-input %s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_unifized', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + t2 = traits.Bool( + desc='Treat the input as if it were T2-weighted, rather than ' + 'T1-weighted. This processing is done simply by inverting ' + 'the image contrast, processing it as if that result were ' + 'T1-weighted, and then re-inverting the results ' + 'counts of voxel overlap, i.e., each voxel will contain the ' + 'number of masks that it is set in.', + argstr='-T2') + gm = traits.Bool( + desc='Also scale to unifize \'gray matter\' = lower intensity voxels ' + '(to aid in registering images from different scanners).', + argstr='-GM') + urad = traits.Float( + desc='Sets the radius (in voxels) of the ball used for the sneaky ' + 'trick. Default value is 18.3, and should be changed ' + 'proportionally if the dataset voxel size differs significantly ' + 'from 1 mm.', + argstr='-Urad %s') + scale_file = File( + desc='output file name to save the scale factor used at each voxel ', + argstr='-ssave %s') + no_duplo = traits.Bool( + desc='Do NOT use the \'duplo down\' step; this can be useful for ' + 'lower resolution datasets.', + argstr='-noduplo') + epi = traits.Bool( + desc='Assume the input dataset is a T2 (or T2*) weighted EPI time ' + 'series. After computing the scaling, apply it to ALL volumes ' + '(TRs) in the input dataset. That is, a given voxel will be ' + 'scaled by the same factor at each TR. ' + 'This option also implies \'-noduplo\' and \'-T2\'.' + 'This option turns off \'-GM\' if you turned it on.', + argstr='-EPI', + requires=['no_duplo', 't2'], + xor=['gm']) + rbt = traits.Tuple( + traits.Float(), + traits.Float(), + traits.Float(), + desc='Option for AFNI experts only.' + 'Specify the 3 parameters for the algorithm:\n' + 'R = radius; same as given by option \'-Urad\', [default=18.3]\n' + 'b = bottom percentile of normalizing data range, [default=70.0]\n' + 'r = top percentile of normalizing data range, [default=80.0]\n', + argstr='-rbt %f %f %f') + t2_up = traits.Float( + desc='Option for AFNI experts only.' + 'Set the upper percentile point used for T2-T1 inversion. ' + 'Allowed to be anything between 90 and 100 (inclusive), with ' + 'default to 98.5 (for no good reason).', + argstr='-T2up %f') + cl_frac = traits.Float( + desc='Option for AFNI experts only.' + 'Set the automask \'clip level fraction\'. Must be between ' + '0.1 and 0.9. A small fraction means to make the initial ' + 'threshold for clipping (a la 3dClipLevel) smaller, which ' + 'will tend to make the mask larger. [default=0.1]', + argstr='-clfrac %f') + quiet = traits.Bool( + desc='Don\'t print the progress messages.', argstr='-quiet') + + +class UnifizeOutputSpec(TraitedSpec): + scale_file = File(desc='scale factor file') + out_file = File(desc='unifized file', exists=True) + + +class Unifize(AFNICommand): + """3dUnifize - for uniformizing image intensity + + * The input dataset is supposed to be a T1-weighted volume, + possibly already skull-stripped (e.g., via 3dSkullStrip). + However, this program can be a useful step to take BEFORE + 3dSkullStrip, since the latter program can fail if the input + volume is strongly shaded -- 3dUnifize will (mostly) remove + such shading artifacts. + + * The output dataset has the white matter (WM) intensity approximately + uniformized across space, and scaled to peak at about 1000. + + * The output dataset is always stored in float format! + + * If the input dataset has more than 1 sub-brick, only sub-brick + #0 will be processed! + + * Want to correct EPI datasets for nonuniformity? + You can try the new and experimental [Mar 2017] '-EPI' option. + + * The principal motive for this program is for use in an image + registration script, and it may or may not be useful otherwise. + + * This program replaces the older (and very different) 3dUniformize, + which is no longer maintained and may sublimate at any moment. + (In other words, we do not recommend the use of 3dUniformize.) + + For complete details, see the `3dUnifize Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> unifize = afni.Unifize() + >>> unifize.inputs.in_file = 'structural.nii' + >>> unifize.inputs.out_file = 'structural_unifized.nii' + >>> unifize.cmdline + '3dUnifize -prefix structural_unifized.nii -input structural.nii' + >>> res = unifize.run() # doctest: +SKIP + + """ + + _cmd = '3dUnifize' + input_spec = UnifizeInputSpec + output_spec = UnifizeOutputSpec + + +class ZCutUpInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3dZcutup', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_zcutup', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + keep = Str(desc='slice range to keep in output', argstr='-keep %s') + + +class ZCutUp(AFNICommand): + """Cut z-slices from a volume using AFNI 3dZcutup command + + For complete details, see the `3dZcutup Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> zcutup = afni.ZCutUp() + >>> zcutup.inputs.in_file = 'functional.nii' + >>> zcutup.inputs.out_file = 'functional_zcutup.nii' + >>> zcutup.inputs.keep= '0 10' + >>> zcutup.cmdline + '3dZcutup -keep 0 10 -prefix functional_zcutup.nii functional.nii' + >>> res = zcutup.run() # doctest: +SKIP + + """ + + _cmd = '3dZcutup' + input_spec = ZCutUpInputSpec + output_spec = AFNICommandOutputSpec + + +class GCORInputSpec(CommandLineInputSpec): + in_file = File( + desc='input dataset to compute the GCOR over', + argstr='-input %s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + + mask = File( + desc='mask dataset, for restricting the computation', + argstr='-mask %s', + exists=True, + copyfile=False) + + nfirst = traits.Int( + 0, argstr='-nfirst %d', desc='specify number of initial TRs to ignore') + no_demean = traits.Bool( + False, + argstr='-no_demean', + desc='do not (need to) demean as first step') + + +class GCOROutputSpec(TraitedSpec): + out = traits.Float(desc='global correlation value') + + +class GCOR(CommandLine): + """ + Computes the average correlation between every voxel + and ever other voxel, over any give mask. + + + For complete details, see the `@compute_gcor Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> gcor = afni.GCOR() + >>> gcor.inputs.in_file = 'structural.nii' + >>> gcor.inputs.nfirst = 4 + >>> gcor.cmdline + '@compute_gcor -nfirst 4 -input structural.nii' + >>> res = gcor.run() # doctest: +SKIP + + """ + + _cmd = '@compute_gcor' + input_spec = GCORInputSpec + output_spec = GCOROutputSpec + + def _run_interface(self, runtime): + runtime = super(GCOR, self)._run_interface(runtime) + + gcor_line = [ + line.strip() for line in runtime.stdout.split('\n') + if line.strip().startswith('GCOR = ') + ][-1] + setattr(self, '_gcor', float(gcor_line[len('GCOR = '):])) + return runtime + + def _list_outputs(self): + return {'out': getattr(self, '_gcor')} + + +class AxializeInputSpec(AFNICommandInputSpec): + in_file = File( + desc='input file to 3daxialize', + argstr='%s', + position=-2, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='%s_axialize', + desc='output image file name', + argstr='-prefix %s', + name_source='in_file') + verb = traits.Bool(desc='Print out a progerss report', argstr='-verb') + sagittal = traits.Bool( + desc='Do sagittal slice order [-orient ASL]', + argstr='-sagittal', + xor=['coronal', 'axial']) + coronal = traits.Bool( + desc='Do coronal slice order [-orient RSA]', + argstr='-coronal', + xor=['sagittal', 'axial']) + axial = traits.Bool( + desc='Do axial slice order [-orient RAI]' + 'This is the default AFNI axial order, and' + 'is the one currently required by the' + 'volume rendering plugin; this is also' + 'the default orientation output by this' + "program (hence the program's name).", + argstr='-axial', + xor=['coronal', 'sagittal']) + orientation = Str(desc='new orientation code', argstr='-orient %s') + + +class Axialize(AFNICommand): + """Read in a dataset and write it out as a new dataset + with the data brick oriented as axial slices. + + For complete details, see the `3dcopy Documentation. + `_ + + Examples + ======== + >>> from nipype.interfaces import afni + >>> axial3d = afni.Axialize() + >>> axial3d.inputs.in_file = 'functional.nii' + >>> axial3d.inputs.out_file = 'axialized.nii' + >>> axial3d.cmdline + '3daxialize -prefix axialized.nii functional.nii' + >>> res = axial3d.run() # doctest: +SKIP + + """ + + _cmd = '3daxialize' + input_spec = AxializeInputSpec + output_spec = AFNICommandOutputSpec + + +class ZcatInputSpec(AFNICommandInputSpec): + in_files = InputMultiPath( + File(desc='input files to 3dZcat', exists=True), + argstr='%s', + position=-1, + mandatory=True, + copyfile=False) + out_file = File( + name_template='%s_zcat', + desc='output dataset prefix name (default \'zcat\')', + argstr='-prefix %s', + name_source='in_files') + datum = traits.Enum( + 'byte', + 'short', + 'float', + argstr='-datum %s', + desc='specify data type for output. Valid types are \'byte\', ' + '\'short\' and \'float\'.') + verb = traits.Bool( + desc='print out some verbositiness as the program proceeds.', + argstr='-verb') + fscale = traits.Bool( + desc='Force scaling of the output to the maximum integer ' + 'range. This only has effect if the output datum is ' + 'byte or short (either forced or defaulted). This ' + 'option is sometimes necessary to eliminate ' + 'unpleasant truncation artifacts.', + argstr='-fscale', + xor=['nscale']) + nscale = traits.Bool( + desc='Don\'t do any scaling on output to byte or short ' + 'datasets. This may be especially useful when ' + 'operating on mask datasets whose output values ' + 'are only 0\'s and 1\'s.', + argstr='-nscale', + xor=['fscale']) + + +class Zcat(AFNICommand): + """Copies an image of one type to an image of the same + or different type using 3dZcat command + + For complete details, see the `3dZcat Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> zcat = afni.Zcat() + >>> zcat.inputs.in_files = ['functional2.nii', 'functional3.nii'] + >>> zcat.inputs.out_file = 'cat_functional.nii' + >>> zcat.cmdline + '3dZcat -prefix cat_functional.nii functional2.nii functional3.nii' + >>> res = zcat.run() # doctest: +SKIP + """ + + _cmd = '3dZcat' + input_spec = ZcatInputSpec + output_spec = AFNICommandOutputSpec + + +class ZeropadInputSpec(AFNICommandInputSpec): + in_files = File( + desc='input dataset', + argstr='%s', + position=-1, + mandatory=True, + exists=True, + copyfile=False) + out_file = File( + name_template='zeropad', + desc='output dataset prefix name (default \'zeropad\')', + argstr='-prefix %s') + I = traits.Int( + desc='adds \'n\' planes of zero at the Inferior edge', + argstr='-I %i', + xor=['master']) + S = traits.Int( + desc='adds \'n\' planes of zero at the Superior edge', + argstr='-S %i', + xor=['master']) + A = traits.Int( + desc='adds \'n\' planes of zero at the Anterior edge', + argstr='-A %i', + xor=['master']) + P = traits.Int( + desc='adds \'n\' planes of zero at the Posterior edge', + argstr='-P %i', + xor=['master']) + L = traits.Int( + desc='adds \'n\' planes of zero at the Left edge', + argstr='-L %i', + xor=['master']) + R = traits.Int( + desc='adds \'n\' planes of zero at the Right edge', + argstr='-R %i', + xor=['master']) + z = traits.Int( + desc='adds \'n\' planes of zero on EACH of the ' + 'dataset z-axis (slice-direction) faces', + argstr='-z %i', + xor=['master']) + RL = traits.Int( + desc='specify that planes should be added or cut ' + 'symmetrically to make the resulting volume have' + 'N slices in the right-left direction', + argstr='-RL %i', + xor=['master']) + AP = traits.Int( + desc='specify that planes should be added or cut ' + 'symmetrically to make the resulting volume have' + 'N slices in the anterior-posterior direction', + argstr='-AP %i', + xor=['master']) + IS = traits.Int( + desc='specify that planes should be added or cut ' + 'symmetrically to make the resulting volume have' + 'N slices in the inferior-superior direction', + argstr='-IS %i', + xor=['master']) + mm = traits.Bool( + desc='pad counts \'n\' are in mm instead of slices, ' + 'where each \'n\' is an integer and at least \'n\' ' + 'mm of slices will be added/removed; e.g., n = 3 ' + 'and slice thickness = 2.5 mm ==> 2 slices added', + argstr='-mm', + xor=['master']) + master = traits.File( + desc='match the volume described in dataset ' + '\'mset\', where mset must have the same ' + 'orientation and grid spacing as dataset to be ' + 'padded. the goal of -master is to make the ' + 'output dataset from 3dZeropad match the ' + 'spatial \'extents\' of mset by adding or ' + 'subtracting slices as needed. You can\'t use ' + '-I,-S,..., or -mm with -master', + argstr='-master %s', + xor=['I', 'S', 'A', 'P', 'L', 'R', 'z', 'RL', 'AP', 'IS', 'mm']) + + +class Zeropad(AFNICommand): + """Adds planes of zeros to a dataset (i.e., pads it out). + + For complete details, see the `3dZeropad Documentation. + `_ + + Examples + ======== + + >>> from nipype.interfaces import afni + >>> zeropad = afni.Zeropad() + >>> zeropad.inputs.in_files = 'functional.nii' + >>> zeropad.inputs.out_file = 'pad_functional.nii' + >>> zeropad.inputs.I = 10 + >>> zeropad.inputs.S = 10 + >>> zeropad.inputs.A = 10 + >>> zeropad.inputs.P = 10 + >>> zeropad.inputs.R = 10 + >>> zeropad.inputs.L = 10 + >>> zeropad.cmdline + '3dZeropad -A 10 -I 10 -L 10 -P 10 -R 10 -S 10 -prefix pad_functional.nii functional.nii' + >>> res = zeropad.run() # doctest: +SKIP + """ + + _cmd = '3dZeropad' + input_spec = ZeropadInputSpec + output_spec = AFNICommandOutputSpec diff --git a/nipype/interfaces/ants/__init__.py b/nipype/interfaces/ants/__init__.py new file mode 100644 index 0000000000..e1970912cf --- /dev/null +++ b/nipype/interfaces/ants/__init__.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Top-level namespace for ants.""" + +# Registraiton programs +from .registration import (ANTS, Registration, RegistrationSynQuick, + MeasureImageSimilarity) + +# Resampling Programs +from .resampling import (ApplyTransforms, ApplyTransformsToPoints, + WarpImageMultiTransform, + WarpTimeSeriesImageMultiTransform) + +# Segmentation Programs +from .segmentation import (Atropos, LaplacianThickness, N4BiasFieldCorrection, + JointFusion, CorticalThickness, BrainExtraction, + DenoiseImage, AntsJointFusion) + +# Visualization Programs +from .visualization import ConvertScalarImageToRGB, CreateTiledMosaic + +# Utility Programs +from .utils import (AverageAffineTransform, AverageImages, MultiplyImages, + CreateJacobianDeterminantImage, AffineInitializer, + ComposeMultiTransform, LabelGeometry) diff --git a/nipype/interfaces/ants/base.py b/nipype/interfaces/ants/base.py new file mode 100644 index 0000000000..34b64a0ec1 --- /dev/null +++ b/nipype/interfaces/ants/base.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The ants module provides basic functions for interfacing with ANTS tools.""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str + +import os + +# Local imports +from ... import logging, LooseVersion +from ..base import (CommandLine, CommandLineInputSpec, traits, isdefined, + PackageInfo) +iflogger = logging.getLogger('nipype.interface') + +# -Using -1 gives primary responsibilty to ITKv4 to do the correct +# thread limitings. +# -Using 1 takes a very conservative approach to avoid overloading +# the computer (when running MultiProc) by forcing everything to +# single threaded. This can be a severe penalty for registration +# performance. +LOCAL_DEFAULT_NUMBER_OF_THREADS = 1 +# -Using NSLOTS has the same behavior as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS +# as long as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS is not set. Otherwise +# ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS takes precidence. +# This behavior states that you the user explicitly specifies +# num_threads, then respect that no matter what SGE tries to limit. +PREFERED_ITKv4_THREAD_LIMIT_VARIABLE = 'NSLOTS' +ALT_ITKv4_THREAD_LIMIT_VARIABLE = 'ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS' + + +class Info(PackageInfo): + version_cmd = os.path.join(os.getenv('ANTSPATH', ''), + 'antsRegistration') + ' --version' + + @staticmethod + def parse_version(raw_info): + for line in raw_info.splitlines(): + if line.startswith('ANTs Version: '): + v_string = line.split()[2] + break + else: + return None + + # -githash may or may not be appended + v_string = v_string.split('-')[0] + + # 2.2.0-equivalent version string + if 'post' in v_string and \ + LooseVersion(v_string) >= LooseVersion('2.1.0.post789'): + return '2.2.0' + else: + return '.'.join(v_string.split('.')[:3]) + + +class ANTSCommandInputSpec(CommandLineInputSpec): + """Base Input Specification for all ANTS Commands + """ + + num_threads = traits.Int( + LOCAL_DEFAULT_NUMBER_OF_THREADS, + usedefault=True, + nohash=True, + desc="Number of ITK threads to use") + + +class ANTSCommand(CommandLine): + """Base class for ANTS interfaces + """ + + input_spec = ANTSCommandInputSpec + _num_threads = LOCAL_DEFAULT_NUMBER_OF_THREADS + + def __init__(self, **inputs): + super(ANTSCommand, self).__init__(**inputs) + self.inputs.on_trait_change(self._num_threads_update, 'num_threads') + + if not isdefined(self.inputs.num_threads): + self.inputs.num_threads = self._num_threads + else: + self._num_threads_update() + + def _num_threads_update(self): + self._num_threads = self.inputs.num_threads + # ONLY SET THE ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS if requested + # by the end user. The default setting did not allow for + # overwriting the default values. + # In ITKv4 (the version used for all ANTS programs), ITK respects + # the SGE controlled $NSLOTS environmental variable. + # If user specifies -1, then that indicates that the system + # default behavior should be the one specified by ITKv4 rules + # (i.e. respect SGE $NSLOTS or environmental variables of threads, or + # user environmental settings) + if (self.inputs.num_threads == -1): + if (ALT_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ): + del self.inputs.environ[ALT_ITKv4_THREAD_LIMIT_VARIABLE] + if (PREFERED_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ): + del self.inputs.environ[PREFERED_ITKv4_THREAD_LIMIT_VARIABLE] + else: + self.inputs.environ.update({ + PREFERED_ITKv4_THREAD_LIMIT_VARIABLE: + '%s' % self.inputs.num_threads + }) + + @staticmethod + def _format_xarray(val): + """ Convenience method for converting input arrays [1,2,3] to + commandline format '1x2x3' """ + return 'x'.join([str(x) for x in val]) + + @classmethod + def set_default_num_threads(cls, num_threads): + """Set the default number of threads for ITK calls + + This method is used to set the default number of ITK threads for all + the ANTS interfaces. However, setting this will not update the output + type for any existing instances. For these, assign the + .inputs.num_threads + """ + cls._num_threads = num_threads + + @property + def version(self): + return Info.version() diff --git a/nipype/interfaces/ants/legacy.py b/nipype/interfaces/ants/legacy.py new file mode 100644 index 0000000000..40f2def728 --- /dev/null +++ b/nipype/interfaces/ants/legacy.py @@ -0,0 +1,325 @@ +# -*- coding: utf-8 -*- +# NOTE: This implementation has been superceeded buy the antsApplyTransform +# implmeentation that more closely follows the strucutre and capabilities +# of the antsApplyTransform program. This implementation is here +# for backwards compatibility. +"""ANTS Apply Transforms interface +""" + +from builtins import range + +import os +from glob import glob + +from .base import ANTSCommand, ANTSCommandInputSpec +from ..base import TraitedSpec, File, traits, isdefined, OutputMultiPath +from ...utils.filemanip import split_filename + + +class antsIntroductionInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + argstr='-d %d', + usedefault=True, + desc='image dimension (2 or 3)', + position=1) + reference_image = File( + exists=True, + argstr='-r %s', + desc='template file to warp to', + mandatory=True, + copyfile=True) + input_image = File( + exists=True, + argstr='-i %s', + desc='input image to warp to template', + mandatory=True, + copyfile=False) + force_proceed = traits.Bool( + argstr='-f 1', + desc=('force script to proceed even if headers ' + 'may be incompatible')) + inverse_warp_template_labels = traits.Bool( + argstr='-l', + desc=('Applies inverse warp to the template labels ' + 'to estimate label positions in target space (use ' + 'for template-based segmentation)')) + max_iterations = traits.List( + traits.Int, + argstr='-m %s', + sep='x', + desc=('maximum number of iterations (must be ' + 'list of integers in the form [J,K,L...]: ' + 'J = coarsest resolution iterations, K = ' + 'middle resolution interations, L = fine ' + 'resolution iterations')) + bias_field_correction = traits.Bool( + argstr='-n 1', + desc=('Applies bias field correction to moving ' + 'image')) + similarity_metric = traits.Enum( + 'PR', + 'CC', + 'MI', + 'MSQ', + argstr='-s %s', + desc=('Type of similartiy metric used for registration ' + '(CC = cross correlation, MI = mutual information, ' + 'PR = probability mapping, MSQ = mean square difference)')) + transformation_model = traits.Enum( + 'GR', + 'EL', + 'SY', + 'S2', + 'EX', + 'DD', + 'RI', + 'RA', + argstr='-t %s', + usedefault=True, + desc=('Type of transofmration model used for registration ' + '(EL = elastic transformation model, SY = SyN with time, ' + 'arbitrary number of time points, S2 = SyN with time ' + 'optimized for 2 time points, GR = greedy SyN, EX = ' + 'exponential, DD = diffeomorphic demons style exponential ' + 'mapping, RI = purely rigid, RA = affine rigid')) + out_prefix = traits.Str( + 'ants_', + argstr='-o %s', + usedefault=True, + desc=('Prefix that is prepended to all output ' + 'files (default = ants_)')) + quality_check = traits.Bool( + argstr='-q 1', desc='Perform a quality check of the result') + + +class antsIntroductionOutputSpec(TraitedSpec): + affine_transformation = File( + exists=True, desc='affine (prefix_Affine.txt)') + warp_field = File(exists=True, desc='warp field (prefix_Warp.nii)') + inverse_warp_field = File( + exists=True, desc='inverse warp field (prefix_InverseWarp.nii)') + input_file = File(exists=True, desc='input image (prefix_repaired.nii)') + output_file = File(exists=True, desc='output image (prefix_deformed.nii)') + + +class antsIntroduction(ANTSCommand): + """Uses ANTS to generate matrices to warp data from one space to another. + + Examples + -------- + + >>> from nipype.interfaces.ants.legacy import antsIntroduction + >>> warp = antsIntroduction() + >>> warp.inputs.reference_image = 'Template_6.nii' + >>> warp.inputs.input_image = 'structural.nii' + >>> warp.inputs.max_iterations = [30,90,20] + >>> warp.cmdline + 'antsIntroduction.sh -d 3 -i structural.nii -m 30x90x20 -o ants_ -r Template_6.nii -t GR' + + """ + + _cmd = 'antsIntroduction.sh' + input_spec = antsIntroductionInputSpec + output_spec = antsIntroductionOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + transmodel = self.inputs.transformation_model + + # When transform is set as 'RI'/'RA', wrap fields should not be expected + # The default transformation is GR, which outputs the wrap fields + if not isdefined(transmodel) or (isdefined(transmodel) + and transmodel not in ['RI', 'RA']): + outputs['warp_field'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'Warp.nii.gz') + outputs['inverse_warp_field'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'InverseWarp.nii.gz') + + outputs['affine_transformation'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'Affine.txt') + outputs['input_file'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'repaired.nii.gz') + outputs['output_file'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'deformed.nii.gz') + + return outputs + + +# How do we make a pass through so that GenWarpFields is just an alias for antsIntroduction ? + + +class GenWarpFields(antsIntroduction): + pass + + +class buildtemplateparallelInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + 4, + argstr='-d %d', + usedefault=True, + desc='image dimension (2, 3 or 4)', + position=1) + out_prefix = traits.Str( + 'antsTMPL_', + argstr='-o %s', + usedefault=True, + desc=('Prefix that is prepended to all output ' + 'files (default = antsTMPL_)')) + in_files = traits.List( + File(exists=True), + mandatory=True, + desc='list of images to generate template from', + argstr='%s', + position=-1) + parallelization = traits.Enum( + 0, + 1, + 2, + argstr='-c %d', + usedefault=True, + desc=('control for parallel processing (0 = ' + 'serial, 1 = use PBS, 2 = use PEXEC, 3 = ' + 'use Apple XGrid')) + gradient_step_size = traits.Float( + argstr='-g %f', + desc=('smaller magnitude results in ' + 'more cautious steps (default = ' + '.25)')) + iteration_limit = traits.Int( + 4, + argstr='-i %d', + usedefault=True, + desc='iterations of template construction') + num_cores = traits.Int( + argstr='-j %d', + requires=['parallelization'], + desc=('Requires parallelization = 2 (PEXEC). ' + 'Sets number of cpu cores to use')) + max_iterations = traits.List( + traits.Int, + argstr='-m %s', + sep='x', + desc=('maximum number of iterations (must be ' + 'list of integers in the form [J,K,L...]: ' + 'J = coarsest resolution iterations, K = ' + 'middle resolution interations, L = fine ' + 'resolution iterations')) + bias_field_correction = traits.Bool( + argstr='-n 1', + desc=('Applies bias field correction to moving ' + 'image')) + rigid_body_registration = traits.Bool( + argstr='-r 1', + desc=('registers inputs before creating template ' + '(useful if no initial template available)')) + similarity_metric = traits.Enum( + 'PR', + 'CC', + 'MI', + 'MSQ', + argstr='-s %s', + desc=('Type of similartiy metric used for registration ' + '(CC = cross correlation, MI = mutual information, ' + 'PR = probability mapping, MSQ = mean square difference)')) + transformation_model = traits.Enum( + 'GR', + 'EL', + 'SY', + 'S2', + 'EX', + 'DD', + argstr='-t %s', + usedefault=True, + desc=('Type of transofmration model used for registration ' + '(EL = elastic transformation model, SY = SyN with time, ' + 'arbitrary number of time points, S2 = SyN with time ' + 'optimized for 2 time points, GR = greedy SyN, EX = ' + 'exponential, DD = diffeomorphic demons style exponential ' + 'mapping')) + use_first_as_target = traits.Bool( + desc=('uses first volume as target of ' + 'all inputs. When not used, an ' + 'unbiased average image is used ' + 'to start.')) + + +class buildtemplateparallelOutputSpec(TraitedSpec): + final_template_file = File(exists=True, desc='final ANTS template') + template_files = OutputMultiPath( + File(exists=True), desc='Templates from different stages of iteration') + subject_outfiles = OutputMultiPath( + File(exists=True), + desc=('Outputs for each input image. Includes warp ' + 'field, inverse warp, Affine, original image ' + '(repaired) and warped image (deformed)')) + + +class buildtemplateparallel(ANTSCommand): + """Generate a optimal average template + + .. warning:: + + This can take a VERY long time to complete + + Examples + -------- + + >>> from nipype.interfaces.ants.legacy import buildtemplateparallel + >>> tmpl = buildtemplateparallel() + >>> tmpl.inputs.in_files = ['T1.nii', 'structural.nii'] + >>> tmpl.inputs.max_iterations = [30, 90, 20] + >>> tmpl.cmdline + 'buildtemplateparallel.sh -d 3 -i 4 -m 30x90x20 -o antsTMPL_ -c 0 -t GR T1.nii structural.nii' + + """ + + _cmd = 'buildtemplateparallel.sh' + input_spec = buildtemplateparallelInputSpec + output_spec = buildtemplateparallelOutputSpec + + def _format_arg(self, opt, spec, val): + if opt == 'num_cores': + if self.inputs.parallelization == 2: + return '-j ' + str(val) + else: + return '' + if opt == 'in_files': + if self.inputs.use_first_as_target: + start = '-z ' + else: + start = '' + return start + ' '.join(name for name in val) + return super(buildtemplateparallel, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['template_files'] = [] + for i in range(len(glob(os.path.realpath('*iteration*')))): + temp = os.path.realpath('%s_iteration_%d/%stemplate.nii.gz' % + (self.inputs.transformation_model, i, + self.inputs.out_prefix)) + os.rename( + temp, + os.path.realpath('%s_iteration_%d/%stemplate_i%d.nii.gz' % + (self.inputs.transformation_model, i, + self.inputs.out_prefix, i))) + file_ = ('%s_iteration_%d/%stemplate_i%d.nii.gz' % + (self.inputs.transformation_model, i, + self.inputs.out_prefix, i)) + + outputs['template_files'].append(os.path.realpath(file_)) + outputs['final_template_file'] = \ + os.path.realpath('%stemplate.nii.gz' % + self.inputs.out_prefix) + outputs['subject_outfiles'] = [] + for filename in self.inputs.in_files: + _, base, _ = split_filename(filename) + temp = glob( + os.path.realpath('%s%s*' % (self.inputs.out_prefix, base))) + for file_ in temp: + outputs['subject_outfiles'].append(file_) + return outputs diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py new file mode 100644 index 0000000000..7190816cf3 --- /dev/null +++ b/nipype/interfaces/ants/registration.py @@ -0,0 +1,1598 @@ +# -*- coding: utf-8 -*- +"""The ants module provides basic functions for interfacing with ants + functions. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, str +import os + +from ...utils.filemanip import ensure_list +from ..base import TraitedSpec, File, Str, traits, InputMultiPath, isdefined +from .base import ANTSCommand, ANTSCommandInputSpec, LOCAL_DEFAULT_NUMBER_OF_THREADS + + +class ANTSInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + argstr='%d', + position=1, + desc='image dimension (2 or 3)') + fixed_image = InputMultiPath( + File(exists=True), + mandatory=True, + desc=('image to which the moving image is ' + 'warped')) + moving_image = InputMultiPath( + File(exists=True), + argstr='%s', + mandatory=True, + desc=('image to apply transformation to ' + '(generally a coregistered' + 'functional)')) + + # Not all metrics are appropriate for all modalities. Also, not all metrics + # are efficeint or appropriate at all resolution levels, Some metrics + # perform well for gross global registraiton, but do poorly for small + # changes (i.e. Mattes), and some metrics do well for small changes but + # don't work well for gross level changes (i.e. 'CC'). + # + # This is a two stage registration. in the first stage + # [ 'Mattes', .................] + # ^^^^^^ <- First stage + # Do a unimodal registration of the first elements of the fixed/moving input + # list use the"CC" as the metric. + # + # In the second stage + # [ ....., ['Mattes','CC'] ] + # ^^^^^^^^^^^^^^^ <- Second stage + # Do a multi-modal registration where the first elements of fixed/moving + # input list use 'CC' metric and that is added to 'Mattes' metric result of + # the second elements of the fixed/moving input. + # + # Cost = Sum_i ( metricweight[i] Metric_i ( fixedimage[i], movingimage[i]) ) + metric = traits.List( + traits.Enum('CC', 'MI', 'SMI', 'PR', 'SSD', 'MSQ', 'PSE'), + mandatory=True, + desc='') + + metric_weight = traits.List( + traits.Float(), + value=[1.0], + usedefault=True, + requires=['metric'], + mandatory=True, + desc='the metric weight(s) for each stage. ' + 'The weights must sum to 1 per stage.') + + radius = traits.List( + traits.Int(), + requires=['metric'], + mandatory=True, + desc='radius of the region (i.e. number of layers around a voxel/pixel)' + ' that is used for computing cross correlation') + + output_transform_prefix = Str( + 'out', + usedefault=True, + argstr='--output-naming %s', + mandatory=True, + desc='') + transformation_model = traits.Enum( + 'Diff', + 'Elast', + 'Exp', + 'Greedy Exp', + 'SyN', + argstr='%s', + mandatory=True, + desc='') + gradient_step_length = traits.Float( + requires=['transformation_model'], desc='') + number_of_time_steps = traits.Float( + requires=['gradient_step_length'], desc='') + delta_time = traits.Float(requires=['number_of_time_steps'], desc='') + symmetry_type = traits.Float(requires=['delta_time'], desc='') + + use_histogram_matching = traits.Bool( + argstr='%s', default_value=True, usedefault=True) + number_of_iterations = traits.List( + traits.Int(), argstr='--number-of-iterations %s', sep='x') + smoothing_sigmas = traits.List( + traits.Int(), argstr='--gaussian-smoothing-sigmas %s', sep='x') + subsampling_factors = traits.List( + traits.Int(), argstr='--subsampling-factors %s', sep='x') + affine_gradient_descent_option = traits.List(traits.Float(), argstr='%s') + + mi_option = traits.List(traits.Int(), argstr='--MI-option %s', sep='x') + regularization = traits.Enum('Gauss', 'DMFFD', argstr='%s', desc='') + regularization_gradient_field_sigma = traits.Float( + requires=['regularization'], desc='') + regularization_deformation_field_sigma = traits.Float( + requires=['regularization'], desc='') + number_of_affine_iterations = traits.List( + traits.Int(), argstr='--number-of-affine-iterations %s', sep='x') + + +class ANTSOutputSpec(TraitedSpec): + affine_transform = File(exists=True, desc='Affine transform file') + warp_transform = File(exists=True, desc='Warping deformation field') + inverse_warp_transform = File( + exists=True, desc='Inverse warping deformation field') + metaheader = File(exists=True, desc='VTK metaheader .mhd file') + metaheader_raw = File(exists=True, desc='VTK metaheader .raw file') + + +class ANTS(ANTSCommand): + """ANTS wrapper for registration of images + (old, use Registration instead) + + Examples + -------- + + >>> from nipype.interfaces.ants import ANTS + >>> ants = ANTS() + >>> ants.inputs.dimension = 3 + >>> ants.inputs.output_transform_prefix = 'MY' + >>> ants.inputs.metric = ['CC'] + >>> ants.inputs.fixed_image = ['T1.nii'] + >>> ants.inputs.moving_image = ['resting.nii'] + >>> ants.inputs.metric_weight = [1.0] + >>> ants.inputs.radius = [5] + >>> ants.inputs.transformation_model = 'SyN' + >>> ants.inputs.gradient_step_length = 0.25 + >>> ants.inputs.number_of_iterations = [50, 35, 15] + >>> ants.inputs.use_histogram_matching = True + >>> ants.inputs.mi_option = [32, 16000] + >>> ants.inputs.regularization = 'Gauss' + >>> ants.inputs.regularization_gradient_field_sigma = 3 + >>> ants.inputs.regularization_deformation_field_sigma = 0 + >>> ants.inputs.number_of_affine_iterations = [10000,10000,10000,10000,10000] + >>> ants.cmdline + 'ANTS 3 --MI-option 32x16000 --image-metric CC[ T1.nii, resting.nii, 1, 5 ] --number-of-affine-iterations \ +10000x10000x10000x10000x10000 --number-of-iterations 50x35x15 --output-naming MY --regularization Gauss[3.0,0.0] \ +--transformation-model SyN[0.25] --use-Histogram-Matching 1' + """ + _cmd = 'ANTS' + input_spec = ANTSInputSpec + output_spec = ANTSOutputSpec + + def _image_metric_constructor(self): + retval = [] + intensity_based = ['CC', 'MI', 'SMI', 'PR', 'SSD', 'MSQ'] + point_set_based = ['PSE', 'JTB'] + for ii in range(len(self.inputs.moving_image)): + if self.inputs.metric[ii] in intensity_based: + retval.append( + '--image-metric %s[ %s, %s, %g, %d ]' % + (self.inputs.metric[ii], self.inputs.fixed_image[ii], + self.inputs.moving_image[ii], + self.inputs.metric_weight[ii], self.inputs.radius[ii])) + elif self.inputs.metric[ii] == point_set_based: + pass + # retval.append('--image-metric %s[%s, %s, ...'.format(self.inputs.metric[ii], + # self.inputs.fixed_image[ii], self.inputs.moving_image[ii], ...)) + return ' '.join(retval) + + def _transformation_constructor(self): + model = self.inputs.transformation_model + step_length = self.inputs.gradient_step_length + time_step = self.inputs.number_of_time_steps + delta_time = self.inputs.delta_time + symmetry_type = self.inputs.symmetry_type + retval = ['--transformation-model %s' % model] + parameters = [] + for elem in (step_length, time_step, delta_time, symmetry_type): + if elem is not traits.Undefined: + parameters.append('%#.2g' % elem) + if len(parameters) > 0: + if len(parameters) > 1: + parameters = ','.join(parameters) + else: + parameters = ''.join(parameters) + retval.append('[%s]' % parameters) + return ''.join(retval) + + def _regularization_constructor(self): + return '--regularization {0}[{1},{2}]'.format( + self.inputs.regularization, + self.inputs.regularization_gradient_field_sigma, + self.inputs.regularization_deformation_field_sigma) + + def _affine_gradient_descent_option_constructor(self): + values = self.inputs.affine_gradient_descent_option + defaults = [0.1, 0.5, 1.e-4, 1.e-4] + for ii in range(len(defaults)): + try: + defaults[ii] = values[ii] + except IndexError: + break + parameters = self._format_xarray( + [('%g' % defaults[index]) for index in range(4)]) + retval = ['--affine-gradient-descent-option', parameters] + return ' '.join(retval) + + def _format_arg(self, opt, spec, val): + if opt == 'moving_image': + return self._image_metric_constructor() + elif opt == 'transformation_model': + return self._transformation_constructor() + elif opt == 'regularization': + return self._regularization_constructor() + elif opt == 'affine_gradient_descent_option': + return self._affine_gradient_descent_option_constructor() + elif opt == 'use_histogram_matching': + if self.inputs.use_histogram_matching: + return '--use-Histogram-Matching 1' + else: + return '--use-Histogram-Matching 0' + return super(ANTS, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['affine_transform'] = os.path.abspath( + self.inputs.output_transform_prefix + 'Affine.txt') + outputs['warp_transform'] = os.path.abspath( + self.inputs.output_transform_prefix + 'Warp.nii.gz') + outputs['inverse_warp_transform'] = os.path.abspath( + self.inputs.output_transform_prefix + 'InverseWarp.nii.gz') + # outputs['metaheader'] = os.path.abspath(self.inputs.output_transform_prefix + 'velocity.mhd') + # outputs['metaheader_raw'] = os.path.abspath(self.inputs.output_transform_prefix + 'velocity.raw') + return outputs + + +class RegistrationInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + argstr='--dimensionality %d', + usedefault=True, + desc='image dimension (2 or 3)') + fixed_image = InputMultiPath( + File(exists=True), + mandatory=True, + desc='Image to which the moving_image should be transformed' + '(usually a structural image)') + fixed_image_mask = File( + exists=True, + argstr='%s', + max_ver='2.1.0', + xor=['fixed_image_masks'], + desc='Mask used to limit metric sampling region of the fixed image' + 'in all stages') + fixed_image_masks = InputMultiPath( + traits.Either('NULL', File(exists=True)), + min_ver='2.2.0', + xor=['fixed_image_mask'], + desc= + 'Masks used to limit metric sampling region of the fixed image, defined per registration stage' + '(Use "NULL" to omit a mask at a given stage)') + moving_image = InputMultiPath( + File(exists=True), + mandatory=True, + desc= + 'Image that will be registered to the space of fixed_image. This is the' + 'image on which the transformations will be applied to') + moving_image_mask = File( + exists=True, + requires=['fixed_image_mask'], + max_ver='2.1.0', + xor=['moving_image_masks'], + desc='mask used to limit metric sampling region of the moving image' + 'in all stages') + moving_image_masks = InputMultiPath( + traits.Either('NULL', File(exists=True)), + min_ver='2.2.0', + xor=['moving_image_mask'], + desc= + 'Masks used to limit metric sampling region of the moving image, defined per registration stage' + '(Use "NULL" to omit a mask at a given stage)') + + save_state = File( + argstr='--save-state %s', + exists=False, + desc= + 'Filename for saving the internal restorable state of the registration' + ) + restore_state = File( + argstr='--restore-state %s', + exists=True, + desc= + 'Filename for restoring the internal restorable state of the registration' + ) + + initial_moving_transform = InputMultiPath( + File(exists=True), + argstr='%s', + desc='A transform or a list of transforms that should be applied' + 'before the registration begins. Note that, when a list is given,' + 'the transformations are applied in reverse order.', + xor=['initial_moving_transform_com']) + invert_initial_moving_transform = InputMultiPath( + traits.Bool(), + requires=["initial_moving_transform"], + desc='One boolean or a list of booleans that indicate' + 'whether the inverse(s) of the transform(s) defined' + 'in initial_moving_transform should be used.', + xor=['initial_moving_transform_com']) + + initial_moving_transform_com = traits.Enum( + 0, + 1, + 2, + argstr='%s', + xor=['initial_moving_transform'], + desc="Align the moving_image nad fixed_image befor registration using" + "the geometric center of the images (=0), the image intensities (=1)," + "or the origin of the images (=2)") + metric_item_trait = traits.Enum("CC", "MeanSquares", "Demons", "GC", "MI", + "Mattes") + metric_stage_trait = traits.Either(metric_item_trait, + traits.List(metric_item_trait)) + metric = traits.List( + metric_stage_trait, + mandatory=True, + desc='the metric(s) to use for each stage. ' + 'Note that multiple metrics per stage are not supported ' + 'in ANTS 1.9.1 and earlier.') + metric_weight_item_trait = traits.Float(1.0, usedefault=True) + metric_weight_stage_trait = traits.Either( + metric_weight_item_trait, traits.List(metric_weight_item_trait)) + metric_weight = traits.List( + metric_weight_stage_trait, + value=[1.0], + usedefault=True, + requires=['metric'], + mandatory=True, + desc='the metric weight(s) for each stage. ' + 'The weights must sum to 1 per stage.') + radius_bins_item_trait = traits.Int(5, usedefault=True) + radius_bins_stage_trait = traits.Either( + radius_bins_item_trait, traits.List(radius_bins_item_trait)) + radius_or_number_of_bins = traits.List( + radius_bins_stage_trait, + value=[5], + usedefault=True, + requires=['metric_weight'], + desc='the number of bins in each stage for the MI and Mattes metric, ' + 'the radius for other metrics') + sampling_strategy_item_trait = traits.Enum("None", "Regular", "Random", + None) + sampling_strategy_stage_trait = traits.Either( + sampling_strategy_item_trait, + traits.List(sampling_strategy_item_trait)) + sampling_strategy = traits.List( + trait=sampling_strategy_stage_trait, + requires=['metric_weight'], + desc='the metric sampling strategy (strategies) for each stage') + sampling_percentage_item_trait = traits.Either( + traits.Range(low=0.0, high=1.0), None) + sampling_percentage_stage_trait = traits.Either( + sampling_percentage_item_trait, + traits.List(sampling_percentage_item_trait)) + sampling_percentage = traits.List( + trait=sampling_percentage_stage_trait, + requires=['sampling_strategy'], + desc="the metric sampling percentage(s) to use for each stage") + use_estimate_learning_rate_once = traits.List(traits.Bool(), desc='') + use_histogram_matching = traits.Either( + traits.Bool, + traits.List(traits.Bool(argstr='%s')), + default=True, + usedefault=True, + desc='Histogram match the images before registration.') + interpolation = traits.Enum( + 'Linear', + 'NearestNeighbor', + 'CosineWindowedSinc', + 'WelchWindowedSinc', + 'HammingWindowedSinc', + 'LanczosWindowedSinc', + 'BSpline', + 'MultiLabel', + 'Gaussian', + argstr='%s', + usedefault=True) + interpolation_parameters = traits.Either( + traits.Tuple(traits.Int()), # BSpline (order) + traits.Tuple( + traits.Float(), # Gaussian/MultiLabel (sigma, alpha) + traits.Float())) + + write_composite_transform = traits.Bool( + argstr='--write-composite-transform %d', + default_value=False, + usedefault=True, + desc='') + collapse_output_transforms = traits.Bool( + argstr='--collapse-output-transforms %d', + default_value=True, + usedefault=True, # This should be true for explicit completeness + desc=('Collapse output transforms. Specifically, enabling this option ' + 'combines all adjacent linear transforms and composes all ' + 'adjacent displacement field transforms before writing the ' + 'results to disk.')) + initialize_transforms_per_stage = traits.Bool( + argstr='--initialize-transforms-per-stage %d', + default_value=False, + usedefault=True, # This should be true for explicit completeness + desc= + ('Initialize linear transforms from the previous stage. By enabling this option, ' + 'the current linear stage transform is directly intialized from the previous ' + 'stages linear transform; this allows multiple linear stages to be run where ' + 'each stage directly updates the estimated linear transform from the previous ' + 'stage. (e.g. Translation -> Rigid -> Affine). ')) + # NOTE: Even though only 0=False and 1=True are allowed, ants uses integer + # values instead of booleans + float = traits.Bool( + argstr='--float %d', + default_value=False, + desc='Use float instead of double for computations.') + + transforms = traits.List( + traits.Enum('Rigid', 'Affine', 'CompositeAffine', 'Similarity', + 'Translation', 'BSpline', 'GaussianDisplacementField', + 'TimeVaryingVelocityField', + 'TimeVaryingBSplineVelocityField', 'SyN', 'BSplineSyN', + 'Exponential', 'BSplineExponential'), + argstr='%s', + mandatory=True) + # TODO: input checking and allow defaults + # All parameters must be specified for BSplineDisplacementField, TimeVaryingBSplineVelocityField, BSplineSyN, + # Exponential, and BSplineExponential. EVEN DEFAULTS! + transform_parameters = traits.List( + traits.Either( + traits.Tuple(traits.Float()), # Translation, Rigid, Affine, + # CompositeAffine, Similarity + traits.Tuple( + traits.Float(), # GaussianDisplacementField, SyN + traits.Float(), + traits.Float()), + traits.Tuple( + traits.Float(), # BSplineSyn, + traits.Int(), # BSplineDisplacementField, + traits.Int(), # TimeVaryingBSplineVelocityField + traits.Int()), + traits.Tuple( + traits.Float(), # TimeVaryingVelocityField + traits.Int(), + traits.Float(), + traits.Float(), + traits.Float(), + traits.Float()), + traits.Tuple( + traits.Float(), # Exponential + traits.Float(), + traits.Float(), + traits.Int()), + traits.Tuple( + traits.Float(), # BSplineExponential + traits.Int(), + traits.Int(), + traits.Int(), + traits.Int()), + )) + restrict_deformation = traits.List( + traits.List(traits.Enum(0, 1)), + desc=("This option allows the user to restrict the optimization of " + "the displacement field, translation, rigid or affine transform " + "on a per-component basis. For example, if one wants to limit " + "the deformation or rotation of 3-D volume to the first two " + "dimensions, this is possible by specifying a weight vector of " + "'1x1x0' for a deformation field or '1x1x0x1x1x0' for a rigid " + "transformation. Low-dimensional restriction only works if " + "there are no preceding transformations.")) + # Convergence flags + number_of_iterations = traits.List(traits.List(traits.Int())) + smoothing_sigmas = traits.List(traits.List(traits.Float()), mandatory=True) + sigma_units = traits.List( + traits.Enum('mm', 'vox'), + requires=['smoothing_sigmas'], + desc="units for smoothing sigmas") + shrink_factors = traits.List(traits.List(traits.Int()), mandatory=True) + convergence_threshold = traits.List( + trait=traits.Float(), + value=[1e-6], + minlen=1, + requires=['number_of_iterations'], + usedefault=True) + convergence_window_size = traits.List( + trait=traits.Int(), + value=[10], + minlen=1, + requires=['convergence_threshold'], + usedefault=True) + # Output flags + output_transform_prefix = Str( + "transform", usedefault=True, argstr="%s", desc="") + output_warped_image = traits.Either( + traits.Bool, File(), hash_files=False, desc="") + output_inverse_warped_image = traits.Either( + traits.Bool, + File(), + hash_files=False, + requires=['output_warped_image'], + desc="") + winsorize_upper_quantile = traits.Range( + low=0.0, + high=1.0, + value=1.0, + argstr='%s', + usedefault=True, + desc="The Upper quantile to clip image ranges") + winsorize_lower_quantile = traits.Range( + low=0.0, + high=1.0, + value=0.0, + argstr='%s', + usedefault=True, + desc="The Lower quantile to clip image ranges") + + verbose = traits.Bool(argstr='-v', default_value=False, usedefault=True) + + +class RegistrationOutputSpec(TraitedSpec): + forward_transforms = traits.List( + File(exists=True), + desc='List of output transforms for forward registration') + reverse_transforms = traits.List( + File(exists=True), + desc='List of output transforms for reverse registration') + forward_invert_flags = traits.List( + traits.Bool(), + desc='List of flags corresponding to the forward transforms') + reverse_invert_flags = traits.List( + traits.Bool(), + desc='List of flags corresponding to the reverse transforms') + composite_transform = File(exists=True, desc='Composite transform file') + inverse_composite_transform = File(desc='Inverse composite transform file') + warped_image = File(desc="Outputs warped image") + inverse_warped_image = File(desc="Outputs the inverse of the warped image") + save_state = File(desc="The saved registration state to be restored") + metric_value = traits.Float(desc='the final value of metric') + elapsed_time = traits.Float( + desc='the total elapsed time as reported by ANTs') + + +class Registration(ANTSCommand): + """ANTs Registration command for registration of images + + `antsRegistration `_ registers a ``moving_image`` to a ``fixed_image``, + using a predefined (sequence of) cost function(s) and transformation operations. + The cost function is defined using one or more 'metrics', specifically + local cross-correlation (``CC``), Mean Squares (``MeanSquares``), Demons (``Demons``), + global correlation (``GC``), or Mutual Information (``Mattes`` or ``MI``). + + ANTS can use both linear (``Translation``, ``Rigid``, ``Affine``, ``CompositeAffine``, + or ``Translation``) and non-linear transformations (``BSpline``, ``GaussianDisplacementField``, + ``TimeVaryingVelocityField``, ``TimeVaryingBSplineVelocityField``, ``SyN``, ``BSplineSyN``, + ``Exponential``, or ``BSplineExponential``). Usually, registration is done in multiple + *stages*. For example first an Affine, then a Rigid, and ultimately a non-linear + (Syn)-transformation. + + antsRegistration can be initialized using one ore more transforms from moving_image + to fixed_image with the ``initial_moving_transform``-input. For example, when you + already have a warpfield that corrects for geometrical distortions in an EPI (functional) image, + that you want to apply before an Affine registration to a structural image. + You could put this transform into 'intial_moving_transform'. + + The Registration-interface can output the resulting transform(s) that map moving_image to + fixed_image in a single file as a ``composite_transform`` (if ``write_composite_transform`` + is set to ``True``), or a list of transforms as ``forwards_transforms``. It can also output + inverse transforms (from ``fixed_image`` to ``moving_image``) in a similar fashion using + ``inverse_composite_transform``. Note that the order of ``forward_transforms`` is in 'natural' + order: the first element should be applied first, the last element should be applied last. + + Note, however, that ANTS tools always apply lists of transformations in reverse order (the last + transformation in the list is applied first). Therefore, if the output forward_transforms + is a list, one can not directly feed it into, for example, ``ants.ApplyTransforms``. To + make ``ants.ApplyTransforms`` apply the transformations in the same order as ``ants.Registration``, + you have to provide the list of transformations in reverse order from ``forward_transforms``. + ``reverse_forward_transforms`` outputs ``forward_transforms`` in reverse order and can be used for + this purpose. Note also that, because ``composite_transform`` is always a single file, this + output is preferred for most use-cases. + + More information can be found in the `ANTS + manual `_. + + See below for some useful examples. + + Examples + -------- + + Set up a Registration node with some default settings. This Node registers + 'fixed1.nii' to 'moving1.nii' by first fitting a linear 'Affine' transformation, and + then a non-linear 'SyN' transformation, both using the Mutual Information-cost + metric. + + The registration is initialized by first applying the (linear) transform + trans.mat. + + >>> import copy, pprint + >>> from nipype.interfaces.ants import Registration + >>> reg = Registration() + >>> reg.inputs.fixed_image = 'fixed1.nii' + >>> reg.inputs.moving_image = 'moving1.nii' + >>> reg.inputs.output_transform_prefix = "output_" + >>> reg.inputs.initial_moving_transform = 'trans.mat' + >>> reg.inputs.transforms = ['Affine', 'SyN'] + >>> reg.inputs.transform_parameters = [(2.0,), (0.25, 3.0, 0.0)] + >>> reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]] + >>> reg.inputs.dimension = 3 + >>> reg.inputs.write_composite_transform = True + >>> reg.inputs.collapse_output_transforms = False + >>> reg.inputs.initialize_transforms_per_stage = False + >>> reg.inputs.metric = ['Mattes']*2 + >>> reg.inputs.metric_weight = [1]*2 # Default (value ignored currently by ANTs) + >>> reg.inputs.radius_or_number_of_bins = [32]*2 + >>> reg.inputs.sampling_strategy = ['Random', None] + >>> reg.inputs.sampling_percentage = [0.05, None] + >>> reg.inputs.convergence_threshold = [1.e-8, 1.e-9] + >>> reg.inputs.convergence_window_size = [20]*2 + >>> reg.inputs.smoothing_sigmas = [[1,0], [2,1,0]] + >>> reg.inputs.sigma_units = ['vox'] * 2 + >>> reg.inputs.shrink_factors = [[2,1], [3,2,1]] + >>> reg.inputs.use_estimate_learning_rate_once = [True, True] + >>> reg.inputs.use_histogram_matching = [True, True] # This is the default + >>> reg.inputs.output_warped_image = 'output_warped_image.nii.gz' + >>> reg.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 0 ] \ +--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ +--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ +--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \ +--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ +--use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' + >>> reg.run() # doctest: +SKIP + + Same as reg1, but first invert the initial transform ('trans.mat') before applying it. + + >>> reg.inputs.invert_initial_moving_transform = True + >>> reg1 = copy.deepcopy(reg) + >>> reg1.inputs.winsorize_lower_quantile = 0.025 + >>> reg1.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ +--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ +--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ +--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \ +--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ +--use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1' + >>> reg1.run() # doctest: +SKIP + + Clip extremely high intensity data points using winsorize_upper_quantile. All data points + higher than the 0.975 quantile are set to the value of the 0.975 quantile. + + >>> reg2 = copy.deepcopy(reg) + >>> reg2.inputs.winsorize_upper_quantile = 0.975 + >>> reg2.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ +--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ +--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ +--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \ +--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ +--use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1' + + Clip extremely low intensity data points using winsorize_lower_quantile. All data points + lower than the 0.025 quantile are set to the original value at the 0.025 quantile. + + + >>> reg3 = copy.deepcopy(reg) + >>> reg3.inputs.winsorize_lower_quantile = 0.025 + >>> reg3.inputs.winsorize_upper_quantile = 0.975 + >>> reg3.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ +--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ +--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ +--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \ +--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ +--use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1' + + Use float instead of double for computations (saves memory usage) + + >>> reg3a = copy.deepcopy(reg) + >>> reg3a.inputs.float = True + >>> reg3a.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 \ +--initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear \ +--output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \ +--smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \ +--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ +--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ +--write-composite-transform 1' + + Force to use double instead of float for computations (more precision and memory usage). + + >>> reg3b = copy.deepcopy(reg) + >>> reg3b.inputs.float = False + >>> reg3b.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 \ +--initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear \ +--output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \ +--smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \ +--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ +--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ +--write-composite-transform 1' + + 'collapse_output_transforms' can be used to put all transformation in a single 'composite_transform'- + file. Note that forward_transforms will now be an empty list. + + >>> # Test collapse transforms flag + >>> reg4 = copy.deepcopy(reg) + >>> reg4.inputs.save_state = 'trans.mat' + >>> reg4.inputs.restore_state = 'trans.mat' + >>> reg4.inputs.initialize_transforms_per_stage = True + >>> reg4.inputs.collapse_output_transforms = True + >>> outputs = reg4._list_outputs() + >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, + {'composite_transform': '.../nipype/testing/data/output_Composite.h5', + 'elapsed_time': , + 'forward_invert_flags': [], + 'forward_transforms': [], + 'inverse_composite_transform': '.../nipype/testing/data/output_InverseComposite.h5', + 'inverse_warped_image': , + 'metric_value': , + 'reverse_invert_flags': [], + 'reverse_transforms': [], + 'save_state': '.../nipype/testing/data/trans.mat', + 'warped_image': '.../nipype/testing/data/output_warped_image.nii.gz'} + >>> reg4.cmdline + 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ +--initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ +--restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \ +--smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \ +--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ +--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ +--write-composite-transform 1' + + + >>> # Test collapse transforms flag + >>> reg4b = copy.deepcopy(reg4) + >>> reg4b.inputs.write_composite_transform = False + >>> outputs = reg4b._list_outputs() + >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, + {'composite_transform': , + 'elapsed_time': , + 'forward_invert_flags': [False, False], + 'forward_transforms': ['.../nipype/testing/data/output_0GenericAffine.mat', + '.../nipype/testing/data/output_1Warp.nii.gz'], + 'inverse_composite_transform': , + 'inverse_warped_image': , + 'metric_value': , + 'reverse_invert_flags': [True, False], + 'reverse_transforms': ['.../nipype/testing/data/output_0GenericAffine.mat', \ + '.../nipype/testing/data/output_1InverseWarp.nii.gz'], + 'save_state': '.../nipype/testing/data/trans.mat', + 'warped_image': '.../nipype/testing/data/output_warped_image.nii.gz'} + >>> reg4b.aggregate_outputs() # doctest: +SKIP + >>> reg4b.cmdline + 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ +--initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ +--restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \ +--smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \ +--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ +--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ +--write-composite-transform 0' + + One can use multiple similarity metrics in a single registration stage.The Node below first + performs a linear registation using only the Mutual Information ('Mattes')-metric. + In a second stage, it performs a non-linear registration ('Syn') using both a + Mutual Information and a local cross-correlation ('CC')-metric. Both metrics are weighted + equally ('metric_weight' is .5 for both). The Mutual Information- metric uses 32 bins. + The local cross-correlations (correlations between every voxel's neighborhoods) is computed + with a radius of 4. + + >>> # Test multiple metrics per stage + >>> reg5 = copy.deepcopy(reg) + >>> reg5.inputs.fixed_image = 'fixed1.nii' + >>> reg5.inputs.moving_image = 'moving1.nii' + >>> reg5.inputs.metric = ['Mattes', ['Mattes', 'CC']] + >>> reg5.inputs.metric_weight = [1, [.5,.5]] + >>> reg5.inputs.radius_or_number_of_bins = [32, [32, 4] ] + >>> reg5.inputs.sampling_strategy = ['Random', None] # use default strategy in second stage + >>> reg5.inputs.sampling_percentage = [0.05, [0.05, 0.10]] + >>> reg5.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ +--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ +--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ +--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] \ +--metric CC[ fixed1.nii, moving1.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] \ +--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ +--use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' + + ANTS Registration can also use multiple modalities to perform the registration. Here it is assumed + that fixed1.nii and fixed2.nii are in the same space, and so are moving1.nii and + moving2.nii. First, a linear registration is performed matching fixed1.nii to moving1.nii, + then a non-linear registration is performed to match fixed2.nii to moving2.nii, starting from + the transformation of the first step. + + >>> # Test multiple inputS + >>> reg6 = copy.deepcopy(reg5) + >>> reg6.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii'] + >>> reg6.inputs.moving_image = ['moving1.nii', 'moving2.nii'] + >>> reg6.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ +--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ +--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ +--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] \ +--metric CC[ fixed2.nii, moving2.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] \ +--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ +--use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' + + Different methods can be used for the interpolation when applying transformations. + + >>> # Test Interpolation Parameters (BSpline) + >>> reg7a = copy.deepcopy(reg) + >>> reg7a.inputs.interpolation = 'BSpline' + >>> reg7a.inputs.interpolation_parameters = (3,) + >>> reg7a.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ +--initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] \ +--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ +--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \ +--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ +--use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' + + >>> # Test Interpolation Parameters (MultiLabel/Gaussian) + >>> reg7b = copy.deepcopy(reg) + >>> reg7b.inputs.interpolation = 'Gaussian' + >>> reg7b.inputs.interpolation_parameters = (1.0, 1.0) + >>> reg7b.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ +--initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] \ +--output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \ +--smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \ +--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ +--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ +--write-composite-transform 1' + + BSplineSyN non-linear registration with custom parameters. + + >>> # Test Extended Transform Parameters + >>> reg8 = copy.deepcopy(reg) + >>> reg8.inputs.transforms = ['Affine', 'BSplineSyN'] + >>> reg8.inputs.transform_parameters = [(2.0,), (0.25, 26, 0, 3)] + >>> reg8.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ +--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ +--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ +--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform BSplineSyN[ 0.25, 26, 0, 3 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] \ +--smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 \ +--use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' + + Mask the fixed image in the second stage of the registration (but not the first). + + >>> # Test masking + >>> reg9 = copy.deepcopy(reg) + >>> reg9.inputs.fixed_image_masks = ['NULL', 'fixed1.nii'] + >>> reg9.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] \ +--initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] \ +--transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] \ +--convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ NULL, NULL ] \ +--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ +--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ fixed1.nii, NULL ] \ +--winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' + + Here we use both a warpfield and a linear transformation, before registration commences. Note that + the first transformation that needs to be applied ('ants_Warp.nii.gz') is last in the list of + 'initial_moving_transform'. + + >>> # Test initialization with multiple transforms matrices (e.g., unwarp and affine transform) + >>> reg10 = copy.deepcopy(reg) + >>> reg10.inputs.initial_moving_transform = ['func_to_struct.mat', 'ants_Warp.nii.gz'] + >>> reg10.inputs.invert_initial_moving_transform = [False, False] + >>> reg10.cmdline + 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform \ +[ func_to_struct.mat, 0 ] [ ants_Warp.nii.gz, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear \ +--output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] \ +--metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] \ +--smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 \ +--transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] \ +--convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 \ +--use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] \ +--write-composite-transform 1' + """ + DEF_SAMPLING_STRATEGY = 'None' + """The default sampling strategy argument.""" + + _cmd = 'antsRegistration' + input_spec = RegistrationInputSpec + output_spec = RegistrationOutputSpec + _quantilesDone = False + _linear_transform_names = [ + 'Rigid', 'Affine', 'Translation', 'CompositeAffine', 'Similarity' + ] + + def __init__(self, **inputs): + super(Registration, self).__init__(**inputs) + self._elapsed_time = None + self._metric_value = None + + def _run_interface(self, runtime, correct_return_codes=(0, )): + runtime = super(Registration, self)._run_interface(runtime) + + # Parse some profiling info + output = runtime.stdout or runtime.merged + if output: + lines = output.split('\n') + for l in lines[::-1]: + # This should be the last line + if l.strip().startswith('Total elapsed time:'): + self._elapsed_time = float(l.strip().replace( + 'Total elapsed time: ', '')) + elif 'DIAGNOSTIC' in l: + self._metric_value = float(l.split(',')[2]) + break + + return runtime + + def _format_metric(self, index): + """ + Format the antsRegistration -m metric argument(s). + + Parameters + ---------- + index: the stage index + """ + # The metric name input for the current stage. + name_input = self.inputs.metric[index] + # The stage-specific input dictionary. + stage_inputs = dict( + fixed_image=self.inputs.fixed_image[0], + moving_image=self.inputs.moving_image[0], + metric=name_input, + weight=self.inputs.metric_weight[index], + radius_or_bins=self.inputs.radius_or_number_of_bins[index], + optional=self.inputs.radius_or_number_of_bins[index]) + # The optional sampling strategy and percentage. + if isdefined(self.inputs.sampling_strategy + ) and self.inputs.sampling_strategy: + sampling_strategy = self.inputs.sampling_strategy[index] + if sampling_strategy: + stage_inputs['sampling_strategy'] = sampling_strategy + if isdefined(self.inputs.sampling_percentage + ) and self.inputs.sampling_percentage: + sampling_percentage = self.inputs.sampling_percentage[index] + if sampling_percentage: + stage_inputs['sampling_percentage'] = sampling_percentage + + # Make a list of metric specifications, one per -m command line + # argument for the current stage. + # If there are multiple inputs for this stage, then convert the + # dictionary of list inputs into a list of metric specifications. + # Otherwise, make a singleton list of the metric specification + # from the non-list inputs. + if isinstance(name_input, list): + items = list(stage_inputs.items()) + indexes = list(range(0, len(name_input))) + specs = list() + for i in indexes: + temp = dict([(k, v[i]) for k, v in items]) + if len(self.inputs.fixed_image) == 1: + temp["fixed_image"] = self.inputs.fixed_image[0] + else: + temp["fixed_image"] = self.inputs.fixed_image[i] + + if len(self.inputs.moving_image) == 1: + temp["moving_image"] = self.inputs.moving_image[0] + else: + temp["moving_image"] = self.inputs.moving_image[i] + + specs.append(temp) + else: + specs = [stage_inputs] + + # Format the --metric command line metric arguments, one per + # specification. + return [self._format_metric_argument(**spec) for spec in specs] + + @staticmethod + def _format_metric_argument(**kwargs): + retval = '%s[ %s, %s, %g, %d' % (kwargs['metric'], + kwargs['fixed_image'], + kwargs['moving_image'], + kwargs['weight'], + kwargs['radius_or_bins']) + + # The optional sampling strategy. + if 'sampling_strategy' in kwargs: + sampling_strategy = kwargs['sampling_strategy'] + elif 'sampling_percentage' in kwargs: + # The sampling percentage is specified but not the + # sampling strategy. Use the default strategy. + sampling_strategy = Registration.DEF_SAMPLING_STRATEGY + else: + sampling_strategy = None + # Format the optional sampling arguments. + if sampling_strategy: + retval += ', %s' % sampling_strategy + if 'sampling_percentage' in kwargs: + retval += ', %g' % kwargs['sampling_percentage'] + + retval += ' ]' + + return retval + + def _format_transform(self, index): + retval = [] + retval.append('%s[ ' % self.inputs.transforms[index]) + parameters = ', '.join([ + str(element) for element in self.inputs.transform_parameters[index] + ]) + retval.append('%s' % parameters) + retval.append(' ]') + return "".join(retval) + + def _format_registration(self): + retval = [] + for ii in range(len(self.inputs.transforms)): + retval.append('--transform %s' % (self._format_transform(ii))) + for metric in self._format_metric(ii): + retval.append('--metric %s' % metric) + retval.append('--convergence %s' % self._format_convergence(ii)) + if isdefined(self.inputs.sigma_units): + retval.append( + '--smoothing-sigmas %s%s' % + (self._format_xarray(self.inputs.smoothing_sigmas[ii]), + self.inputs.sigma_units[ii])) + else: + retval.append('--smoothing-sigmas %s' % self._format_xarray( + self.inputs.smoothing_sigmas[ii])) + retval.append('--shrink-factors %s' % self._format_xarray( + self.inputs.shrink_factors[ii])) + if isdefined(self.inputs.use_estimate_learning_rate_once): + retval.append('--use-estimate-learning-rate-once %d' % + self.inputs.use_estimate_learning_rate_once[ii]) + if isdefined(self.inputs.use_histogram_matching): + # use_histogram_matching is either a common flag for all transforms + # or a list of transform-specific flags + if isinstance(self.inputs.use_histogram_matching, bool): + histval = self.inputs.use_histogram_matching + else: + histval = self.inputs.use_histogram_matching[ii] + retval.append('--use-histogram-matching %d' % histval) + if isdefined(self.inputs.restrict_deformation): + retval.append( + '--restrict-deformation %s' % self._format_xarray( + self.inputs.restrict_deformation[ii])) + if any((isdefined(self.inputs.fixed_image_masks), + isdefined(self.inputs.moving_image_masks))): + if isdefined(self.inputs.fixed_image_masks): + fixed_masks = ensure_list( + self.inputs.fixed_image_masks) + fixed_mask = fixed_masks[ii if len(fixed_masks) > 1 else 0] + else: + fixed_mask = 'NULL' + + if isdefined(self.inputs.moving_image_masks): + moving_masks = ensure_list( + self.inputs.moving_image_masks) + moving_mask = moving_masks[ii + if len(moving_masks) > 1 else 0] + else: + moving_mask = 'NULL' + retval.append('--masks [ %s, %s ]' % (fixed_mask, moving_mask)) + return " ".join(retval) + + def _get_outputfilenames(self, inverse=False): + output_filename = None + if not inverse: + if isdefined(self.inputs.output_warped_image) and \ + self.inputs.output_warped_image: + output_filename = self.inputs.output_warped_image + if isinstance(output_filename, bool): + output_filename = '%s_Warped.nii.gz' % self.inputs.output_transform_prefix + return output_filename + inv_output_filename = None + if isdefined(self.inputs.output_inverse_warped_image) and \ + self.inputs.output_inverse_warped_image: + inv_output_filename = self.inputs.output_inverse_warped_image + if isinstance(inv_output_filename, bool): + inv_output_filename = '%s_InverseWarped.nii.gz' % self.inputs.output_transform_prefix + return inv_output_filename + + def _format_convergence(self, ii): + convergence_iter = self._format_xarray( + self.inputs.number_of_iterations[ii]) + if len(self.inputs.convergence_threshold) > ii: + convergence_value = self.inputs.convergence_threshold[ii] + else: + convergence_value = self.inputs.convergence_threshold[0] + if len(self.inputs.convergence_window_size) > ii: + convergence_ws = self.inputs.convergence_window_size[ii] + else: + convergence_ws = self.inputs.convergence_window_size[0] + return '[ %s, %g, %d ]' % (convergence_iter, convergence_value, + convergence_ws) + + def _format_winsorize_image_intensities(self): + if not self.inputs.winsorize_upper_quantile > self.inputs.winsorize_lower_quantile: + raise RuntimeError( + "Upper bound MUST be more than lower bound: %g > %g" % + (self.inputs.winsorize_upper_quantile, + self.inputs.winsorize_lower_quantile)) + self._quantilesDone = True + return '--winsorize-image-intensities [ %s, %s ]' % ( + self.inputs.winsorize_lower_quantile, + self.inputs.winsorize_upper_quantile) + + def _get_initial_transform_filenames(self): + n_transforms = len(self.inputs.initial_moving_transform) + + # Assume transforms should not be inverted by default + invert_flags = [0] * n_transforms + if isdefined(self.inputs.invert_initial_moving_transform): + if len(self.inputs.invert_initial_moving_transform) != n_transforms: + raise Exception( + 'Inputs "initial_moving_transform" and "invert_initial_moving_transform"' + 'should have the same length.') + invert_flags = self.inputs.invert_initial_moving_transform + + retval = [ + "[ %s, %d ]" % (xfm, int(flag)) for xfm, flag in zip( + self.inputs.initial_moving_transform, invert_flags) + ] + return " ".join(['--initial-moving-transform'] + retval) + + def _format_arg(self, opt, spec, val): + if opt == 'fixed_image_mask': + if isdefined(self.inputs.moving_image_mask): + return '--masks [ %s, %s ]' % (self.inputs.fixed_image_mask, + self.inputs.moving_image_mask) + else: + return '--masks %s' % self.inputs.fixed_image_mask + elif opt == 'transforms': + return self._format_registration() + elif opt == 'initial_moving_transform': + return self._get_initial_transform_filenames() + elif opt == 'initial_moving_transform_com': + do_center_of_mass_init = self.inputs.initial_moving_transform_com \ + if isdefined(self.inputs.initial_moving_transform_com) else 0 # Just do the default behavior + return '--initial-moving-transform [ %s, %s, %d ]' % ( + self.inputs.fixed_image[0], self.inputs.moving_image[0], + do_center_of_mass_init) + elif opt == 'interpolation': + if self.inputs.interpolation in ['BSpline', 'MultiLabel', 'Gaussian'] and \ + isdefined(self.inputs.interpolation_parameters): + return '--interpolation %s[ %s ]' % ( + self.inputs.interpolation, ', '.join([ + str(param) + for param in self.inputs.interpolation_parameters + ])) + else: + return '--interpolation %s' % self.inputs.interpolation + elif opt == 'output_transform_prefix': + out_filename = self._get_outputfilenames(inverse=False) + inv_out_filename = self._get_outputfilenames(inverse=True) + if out_filename and inv_out_filename: + return '--output [ %s, %s, %s ]' % ( + self.inputs.output_transform_prefix, out_filename, + inv_out_filename) + elif out_filename: + return '--output [ %s, %s ]' % ( + self.inputs.output_transform_prefix, out_filename) + else: + return '--output %s' % self.inputs.output_transform_prefix + elif opt == 'winsorize_upper_quantile' or opt == 'winsorize_lower_quantile': + if not self._quantilesDone: + return self._format_winsorize_image_intensities() + else: + self._quantilesDone = False + return '' # Must return something for argstr! + # This feature was removed from recent versions of antsRegistration due to corrupt outputs. + # elif opt == 'collapse_linear_transforms_to_fixed_image_header': + # return self._formatCollapseLinearTransformsToFixedImageHeader() + return super(Registration, self)._format_arg(opt, spec, val) + + def _output_filenames(self, prefix, count, transform, inverse=False): + self.low_dimensional_transform_map = { + 'Rigid': 'Rigid.mat', + 'Affine': 'Affine.mat', + 'GenericAffine': 'GenericAffine.mat', + 'CompositeAffine': 'Affine.mat', + 'Similarity': 'Similarity.mat', + 'Translation': 'Translation.mat', + 'BSpline': 'BSpline.txt', + 'Initial': 'DerivedInitialMovingTranslation.mat' + } + if transform in list(self.low_dimensional_transform_map.keys()): + suffix = self.low_dimensional_transform_map[transform] + inverse_mode = inverse + else: + inverse_mode = False # These are not analytically invertable + if inverse: + suffix = 'InverseWarp.nii.gz' + else: + suffix = 'Warp.nii.gz' + return '%s%d%s' % (prefix, count, suffix), inverse_mode + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['forward_transforms'] = [] + outputs['forward_invert_flags'] = [] + outputs['reverse_transforms'] = [] + outputs['reverse_invert_flags'] = [] + + # invert_initial_moving_transform should be always defined, even if + # there's no initial transform + invert_initial_moving_transform = [False] * len( + self.inputs.initial_moving_transform) + if isdefined(self.inputs.invert_initial_moving_transform): + invert_initial_moving_transform = self.inputs.invert_initial_moving_transform + + if self.inputs.write_composite_transform: + filename = self.inputs.output_transform_prefix + 'Composite.h5' + outputs['composite_transform'] = os.path.abspath(filename) + filename = self.inputs.output_transform_prefix + \ + 'InverseComposite.h5' + outputs['inverse_composite_transform'] = os.path.abspath(filename) + # If composite transforms are written, then individuals are not written (as of 2014-10-26 + else: + if not self.inputs.collapse_output_transforms: + transform_count = 0 + if isdefined(self.inputs.initial_moving_transform): + outputs[ + 'forward_transforms'] += self.inputs.initial_moving_transform + outputs[ + 'forward_invert_flags'] += invert_initial_moving_transform + outputs['reverse_transforms'] = self.inputs.initial_moving_transform + \ + outputs['reverse_transforms'] + outputs['reverse_invert_flags'] = [ + not e for e in invert_initial_moving_transform + ] + outputs['reverse_invert_flags'] # Prepend + transform_count += len( + self.inputs.initial_moving_transform) + elif isdefined(self.inputs.initial_moving_transform_com): + forward_filename, forward_inversemode = self._output_filenames( + self.inputs.output_transform_prefix, transform_count, + 'Initial') + reverse_filename, reverse_inversemode = self._output_filenames( + self.inputs.output_transform_prefix, transform_count, + 'Initial', True) + outputs['forward_transforms'].append( + os.path.abspath(forward_filename)) + outputs['forward_invert_flags'].append(False) + outputs['reverse_transforms'].insert( + 0, os.path.abspath(reverse_filename)) + outputs['reverse_invert_flags'].insert(0, True) + transform_count += 1 + + for count in range(len(self.inputs.transforms)): + forward_filename, forward_inversemode = self._output_filenames( + self.inputs.output_transform_prefix, transform_count, + self.inputs.transforms[count]) + reverse_filename, reverse_inversemode = self._output_filenames( + self.inputs.output_transform_prefix, transform_count, + self.inputs.transforms[count], True) + outputs['forward_transforms'].append( + os.path.abspath(forward_filename)) + outputs['forward_invert_flags'].append(forward_inversemode) + outputs['reverse_transforms'].insert( + 0, os.path.abspath(reverse_filename)) + outputs['reverse_invert_flags'].insert( + 0, reverse_inversemode) + transform_count += 1 + else: + transform_count = 0 + is_linear = [ + t in self._linear_transform_names + for t in self.inputs.transforms + ] + collapse_list = [] + + if isdefined(self.inputs.initial_moving_transform) or \ + isdefined(self.inputs.initial_moving_transform_com): + is_linear.insert(0, True) + + # Only files returned by collapse_output_transforms + if any(is_linear): + collapse_list.append('GenericAffine') + if not all(is_linear): + collapse_list.append('SyN') + + for transform in collapse_list: + forward_filename, forward_inversemode = self._output_filenames( + self.inputs.output_transform_prefix, + transform_count, + transform, + inverse=False) + reverse_filename, reverse_inversemode = self._output_filenames( + self.inputs.output_transform_prefix, + transform_count, + transform, + inverse=True) + outputs['forward_transforms'].append( + os.path.abspath(forward_filename)) + outputs['forward_invert_flags'].append(forward_inversemode) + outputs['reverse_transforms'].append( + os.path.abspath(reverse_filename)) + outputs['reverse_invert_flags'].append(reverse_inversemode) + transform_count += 1 + + out_filename = self._get_outputfilenames(inverse=False) + inv_out_filename = self._get_outputfilenames(inverse=True) + if out_filename: + outputs['warped_image'] = os.path.abspath(out_filename) + if inv_out_filename: + outputs['inverse_warped_image'] = os.path.abspath(inv_out_filename) + if len(self.inputs.save_state): + outputs['save_state'] = os.path.abspath(self.inputs.save_state) + if self._metric_value: + outputs['metric_value'] = self._metric_value + if self._elapsed_time: + outputs['elapsed_time'] = self._elapsed_time + return outputs + + +class MeasureImageSimilarityInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 2, + 3, + 4, + argstr='--dimensionality %d', + position=1, + desc='Dimensionality of the fixed/moving image pair', + ) + fixed_image = File( + exists=True, + mandatory=True, + desc='Image to which the moving image is warped', + ) + moving_image = File( + exists=True, + mandatory=True, + desc= + 'Image to apply transformation to (generally a coregistered functional)', + ) + metric = traits.Enum( + "CC", + "MI", + "Mattes", + "MeanSquares", + "Demons", + "GC", + argstr="%s", + mandatory=True, + ) + metric_weight = traits.Float( + requires=['metric'], + default_value=1.0, + usedefault=True, + desc='The "metricWeight" variable is not used.', + ) + radius_or_number_of_bins = traits.Int( + requires=['metric'], + mandatory=True, + desc='The number of bins in each stage for the MI and Mattes metric, ' + 'or the radius for other metrics', + ) + sampling_strategy = traits.Enum( + "None", + "Regular", + "Random", + requires=['metric'], + usedefault=True, + desc='Manner of choosing point set over which to optimize the metric. ' + 'Defaults to "None" (i.e. a dense sampling of one sample per voxel).') + sampling_percentage = traits.Either( + traits.Range(low=0.0, high=1.0), + requires=['metric'], + mandatory=True, + desc= + 'Percentage of points accessible to the sampling strategy over which ' + 'to optimize the metric.') + fixed_image_mask = File( + exists=True, + argstr='%s', + desc='mask used to limit metric sampling region of the fixed image', + ) + moving_image_mask = File( + exists=True, + requires=['fixed_image_mask'], + desc='mask used to limit metric sampling region of the moving image', + ) + + +class MeasureImageSimilarityOutputSpec(TraitedSpec): + similarity = traits.Float() + + +class MeasureImageSimilarity(ANTSCommand): + """ + + + Examples + -------- + + >>> from nipype.interfaces.ants import MeasureImageSimilarity + >>> sim = MeasureImageSimilarity() + >>> sim.inputs.dimension = 3 + >>> sim.inputs.metric = 'MI' + >>> sim.inputs.fixed_image = 'T1.nii' + >>> sim.inputs.moving_image = 'resting.nii' + >>> sim.inputs.metric_weight = 1.0 + >>> sim.inputs.radius_or_number_of_bins = 5 + >>> sim.inputs.sampling_strategy = 'Regular' + >>> sim.inputs.sampling_percentage = 1.0 + >>> sim.inputs.fixed_image_mask = 'mask.nii' + >>> sim.inputs.moving_image_mask = 'mask.nii.gz' + >>> sim.cmdline + 'MeasureImageSimilarity --dimensionality 3 --masks ["mask.nii","mask.nii.gz"] \ +--metric MI["T1.nii","resting.nii",1.0,5,Regular,1.0]' + """ + _cmd = 'MeasureImageSimilarity' + input_spec = MeasureImageSimilarityInputSpec + output_spec = MeasureImageSimilarityOutputSpec + + def _metric_constructor(self): + retval = '--metric {metric}["{fixed_image}","{moving_image}",{metric_weight},'\ + '{radius_or_number_of_bins},{sampling_strategy},{sampling_percentage}]'\ + .format( + metric=self.inputs.metric, + fixed_image=self.inputs.fixed_image, + moving_image=self.inputs.moving_image, + metric_weight=self.inputs.metric_weight, + radius_or_number_of_bins=self.inputs.radius_or_number_of_bins, + sampling_strategy=self.inputs.sampling_strategy, + sampling_percentage=self.inputs.sampling_percentage, + ) + return retval + + def _mask_constructor(self): + if self.inputs.moving_image_mask: + retval = '--masks ["{fixed_image_mask}","{moving_image_mask}"]'\ + .format( + fixed_image_mask=self.inputs.fixed_image_mask, + moving_image_mask=self.inputs.moving_image_mask, + ) + else: + retval = '--masks "{fixed_image_mask}"'\ + .format( + fixed_image_mask=self.inputs.fixed_image_mask, + ) + return retval + + def _format_arg(self, opt, spec, val): + if opt == 'metric': + return self._metric_constructor() + elif opt == 'fixed_image_mask': + return self._mask_constructor() + return super(MeasureImageSimilarity, self)._format_arg(opt, spec, val) + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + outputs = self._outputs() + stdout = runtime.stdout.split('\n') + outputs.similarity = float(stdout[0]) + return outputs + + +class RegistrationSynQuickInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum(3, 2, argstr='-d %d', + usedefault=True, desc='image dimension (2 or 3)') + fixed_image = InputMultiPath(File(exists=True), mandatory=True, argstr='-f %s...', + desc='Fixed image or source image or reference image') + moving_image = InputMultiPath(File(exists=True), mandatory=True, argstr='-m %s...', + desc='Moving image or target image') + output_prefix = Str("transform", usedefault=True, argstr='-o %s', + desc="A prefix that is prepended to all output files") + num_threads = traits.Int(default_value=LOCAL_DEFAULT_NUMBER_OF_THREADS, usedefault=True, + desc='Number of threads (default = 1)', argstr='-n %d') + + transform_type = traits.Enum('s', 't', 'r', 'a', 'sr', 'b', 'br', argstr='-t %s', + desc=""" + transform type + t: translation + r: rigid + a: rigid + affine + s: rigid + affine + deformable syn (default) + sr: rigid + deformable syn + b: rigid + affine + deformable b-spline syn + br: rigid + deformable b-spline syn""", + usedefault=True) + + use_histogram_matching = traits.Bool(False, argstr='-j %d', + desc='use histogram matching') + histogram_bins = traits.Int(default_value=32, usedefault=True, argstr='-r %d', + desc='histogram bins for mutual information in SyN stage \ + (default = 32)') + spline_distance = traits.Int(default_value=26, usedefault=True, argstr='-s %d', + desc='spline distance for deformable B-spline SyN transform \ + (default = 26)') + precision_type = traits.Enum('double', 'float', argstr='-p %s', + desc='precision type (default = double)', usedefault=True) + + +class RegistrationSynQuickOutputSpec(TraitedSpec): + warped_image = File(exists=True, desc="Warped image") + inverse_warped_image = File(exists=True, desc="Inverse warped image") + out_matrix = File(exists=True, desc='Affine matrix') + forward_warp_field = File(exists=True, desc='Forward warp field') + inverse_warp_field = File(exists=True, desc='Inverse warp field') + + +class RegistrationSynQuick(ANTSCommand): + """ + Registration using a symmetric image normalization method (SyN). + You can read more in Avants et al.; Med Image Anal., 2008 + (https://www.ncbi.nlm.nih.gov/pubmed/17659998). + + Examples + -------- + + >>> from nipype.interfaces.ants import RegistrationSynQuick + >>> reg = RegistrationSynQuick() + >>> reg.inputs.fixed_image = 'fixed1.nii' + >>> reg.inputs.moving_image = 'moving1.nii' + >>> reg.inputs.num_threads = 2 + >>> reg.cmdline + 'antsRegistrationSyNQuick.sh -d 3 -f fixed1.nii -r 32 -m moving1.nii -n 2 -o transform -p d -s 26 -t s' + >>> reg.run() # doctest: +SKIP + + example for multiple images + + >>> from nipype.interfaces.ants import RegistrationSynQuick + >>> reg = RegistrationSynQuick() + >>> reg.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii'] + >>> reg.inputs.moving_image = ['moving1.nii', 'moving2.nii'] + >>> reg.inputs.num_threads = 2 + >>> reg.cmdline + 'antsRegistrationSyNQuick.sh -d 3 -f fixed1.nii -f fixed2.nii -r 32 -m moving1.nii -m moving2.nii \ +-n 2 -o transform -p d -s 26 -t s' + >>> reg.run() # doctest: +SKIP + """ + + _cmd = 'antsRegistrationSyNQuick.sh' + input_spec = RegistrationSynQuickInputSpec + output_spec = RegistrationSynQuickOutputSpec + + def _num_threads_update(self): + """ + antsRegistrationSyNQuick.sh ignores environment variables, + so override environment update from ANTSCommand class + """ + pass + + def _format_arg(self, name, spec, value): + if name == 'precision_type': + return spec.argstr % value[0] + return super(RegistrationSynQuick, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + out_base = os.path.abspath(self.inputs.output_prefix) + outputs['warped_image'] = out_base + 'Warped.nii.gz' + outputs['inverse_warped_image'] = out_base + 'InverseWarped.nii.gz' + outputs['out_matrix'] = out_base + '0GenericAffine.mat' + + if self.inputs.transform_type not in ('t', 'r', 'a'): + outputs['forward_warp_field'] = out_base + '1Warp.nii.gz' + outputs['inverse_warp_field'] = out_base + '1InverseWarp.nii.gz' + return outputs diff --git a/nipype/interfaces/ants/resampling.py b/nipype/interfaces/ants/resampling.py new file mode 100644 index 0000000000..e26a48ed6a --- /dev/null +++ b/nipype/interfaces/ants/resampling.py @@ -0,0 +1,571 @@ +# -*- coding: utf-8 -*- +"""ANTS Apply Transforms interface +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, str +import os + +from .base import ANTSCommand, ANTSCommandInputSpec +from ..base import TraitedSpec, File, traits, isdefined, InputMultiPath +from ...utils.filemanip import split_filename + + +class WarpTimeSeriesImageMultiTransformInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 4, + 3, + argstr='%d', + usedefault=True, + desc='image dimension (3 or 4)', + position=1) + input_image = File( + argstr='%s', + mandatory=True, + copyfile=True, + desc=('image to apply transformation to (generally a ' + 'coregistered functional)')) + out_postfix = traits.Str( + '_wtsimt', + argstr='%s', + usedefault=True, + desc=('Postfix that is prepended to all output ' + 'files (default = _wtsimt)')) + reference_image = File( + argstr='-R %s', + xor=['tightest_box'], + desc='reference image space that you wish to warp INTO') + tightest_box = traits.Bool( + argstr='--tightest-bounding-box', + desc=('computes tightest bounding box (overrided by ' + 'reference_image if given)'), + xor=['reference_image']) + reslice_by_header = traits.Bool( + argstr='--reslice-by-header', + desc=('Uses orientation matrix and origin encoded in ' + 'reference image file header. Not typically used ' + 'with additional transforms')) + use_nearest = traits.Bool( + argstr='--use-NN', desc='Use nearest neighbor interpolation') + use_bspline = traits.Bool( + argstr='--use-Bspline', desc='Use 3rd order B-Spline interpolation') + transformation_series = InputMultiPath( + File(exists=True), + argstr='%s', + desc='transformation file(s) to be applied', + mandatory=True, + copyfile=False) + invert_affine = traits.List( + traits.Int, + desc=( + 'List of Affine transformations to invert.' + 'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines ' + 'found in transformation_series. Note that indexing ' + 'starts with 1 and does not include warp fields. Affine ' + 'transformations are distinguished ' + 'from warp fields by the word "affine" included in their filenames.' + )) + + +class WarpTimeSeriesImageMultiTransformOutputSpec(TraitedSpec): + output_image = File(exists=True, desc='Warped image') + + +class WarpTimeSeriesImageMultiTransform(ANTSCommand): + """Warps a time-series from one space to another + + Examples + -------- + + >>> from nipype.interfaces.ants import WarpTimeSeriesImageMultiTransform + >>> wtsimt = WarpTimeSeriesImageMultiTransform() + >>> wtsimt.inputs.input_image = 'resting.nii' + >>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' + >>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] + >>> wtsimt.cmdline + 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz \ +ants_Affine.txt' + + >>> wtsimt = WarpTimeSeriesImageMultiTransform() + >>> wtsimt.inputs.input_image = 'resting.nii' + >>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' + >>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] + >>> wtsimt.inputs.invert_affine = [1] # # this will invert the 1st Affine file: ants_Affine.txt + >>> wtsimt.cmdline + 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz \ +-i ants_Affine.txt' + """ + + _cmd = 'WarpTimeSeriesImageMultiTransform' + input_spec = WarpTimeSeriesImageMultiTransformInputSpec + output_spec = WarpTimeSeriesImageMultiTransformOutputSpec + + def _format_arg(self, opt, spec, val): + if opt == 'out_postfix': + _, name, ext = split_filename( + os.path.abspath(self.inputs.input_image)) + return name + val + ext + if opt == 'transformation_series': + series = [] + affine_counter = 0 + affine_invert = [] + for transformation in val: + if 'Affine' in transformation and \ + isdefined(self.inputs.invert_affine): + affine_counter += 1 + if affine_counter in self.inputs.invert_affine: + series += ['-i'] + affine_invert.append(affine_counter) + series += [transformation] + + if isdefined(self.inputs.invert_affine): + diff_inv = set(self.inputs.invert_affine) - set(affine_invert) + if diff_inv: + raise Exceptions( + "Review invert_affine, not all indexes from invert_affine were used, " + "check the description for the full definition") + + return ' '.join(series) + return super(WarpTimeSeriesImageMultiTransform, self)._format_arg( + opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + _, name, ext = split_filename(os.path.abspath(self.inputs.input_image)) + outputs['output_image'] = os.path.join(os.getcwd(), ''.join( + (name, self.inputs.out_postfix, ext))) + return outputs + + def _run_interface(self, runtime, correct_return_codes=[0]): + runtime = super(WarpTimeSeriesImageMultiTransform, + self)._run_interface( + runtime, correct_return_codes=[0, 1]) + if "100 % complete" not in runtime.stdout: + self.raise_exception(runtime) + return runtime + + +class WarpImageMultiTransformInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + argstr='%d', + usedefault=True, + desc='image dimension (2 or 3)', + position=1) + input_image = File( + argstr='%s', + mandatory=True, + desc=('image to apply transformation to (generally a ' + 'coregistered functional)'), + position=2) + output_image = File( + genfile=True, + hash_files=False, + argstr='%s', + desc='name of the output warped image', + position=3, + xor=['out_postfix']) + out_postfix = File( + "_wimt", + usedefault=True, + hash_files=False, + desc=('Postfix that is prepended to all output ' + 'files (default = _wimt)'), + xor=['output_image']) + reference_image = File( + argstr='-R %s', + xor=['tightest_box'], + desc='reference image space that you wish to warp INTO') + tightest_box = traits.Bool( + argstr='--tightest-bounding-box', + desc=('computes tightest bounding box (overrided by ' + 'reference_image if given)'), + xor=['reference_image']) + reslice_by_header = traits.Bool( + argstr='--reslice-by-header', + desc=('Uses orientation matrix and origin encoded in ' + 'reference image file header. Not typically used ' + 'with additional transforms')) + use_nearest = traits.Bool( + argstr='--use-NN', desc='Use nearest neighbor interpolation') + use_bspline = traits.Bool( + argstr='--use-BSpline', desc='Use 3rd order B-Spline interpolation') + transformation_series = InputMultiPath( + File(exists=True), + argstr='%s', + desc='transformation file(s) to be applied', + mandatory=True, + position=-1) + invert_affine = traits.List( + traits.Int, + desc=( + 'List of Affine transformations to invert.' + 'E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines ' + 'found in transformation_series. Note that indexing ' + 'starts with 1 and does not include warp fields. Affine ' + 'transformations are distinguished ' + 'from warp fields by the word "affine" included in their filenames.' + )) + + +class WarpImageMultiTransformOutputSpec(TraitedSpec): + output_image = File(exists=True, desc='Warped image') + + +class WarpImageMultiTransform(ANTSCommand): + """Warps an image from one space to another + + Examples + -------- + + >>> from nipype.interfaces.ants import WarpImageMultiTransform + >>> wimt = WarpImageMultiTransform() + >>> wimt.inputs.input_image = 'structural.nii' + >>> wimt.inputs.reference_image = 'ants_deformed.nii.gz' + >>> wimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] + >>> wimt.cmdline + 'WarpImageMultiTransform 3 structural.nii structural_wimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz \ +ants_Affine.txt' + + >>> wimt = WarpImageMultiTransform() + >>> wimt.inputs.input_image = 'diffusion_weighted.nii' + >>> wimt.inputs.reference_image = 'functional.nii' + >>> wimt.inputs.transformation_series = ['func2anat_coreg_Affine.txt','func2anat_InverseWarp.nii.gz', \ + 'dwi2anat_Warp.nii.gz','dwi2anat_coreg_Affine.txt'] + >>> wimt.inputs.invert_affine = [1] # this will invert the 1st Affine file: 'func2anat_coreg_Affine.txt' + >>> wimt.cmdline + 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii \ +-i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt' + + """ + + _cmd = 'WarpImageMultiTransform' + input_spec = WarpImageMultiTransformInputSpec + output_spec = WarpImageMultiTransformOutputSpec + + def _gen_filename(self, name): + if name == 'output_image': + _, name, ext = split_filename( + os.path.abspath(self.inputs.input_image)) + return ''.join((name, self.inputs.out_postfix, ext)) + return None + + def _format_arg(self, opt, spec, val): + if opt == 'transformation_series': + series = [] + affine_counter = 0 + affine_invert = [] + for transformation in val: + if "affine" in transformation.lower() and \ + isdefined(self.inputs.invert_affine): + affine_counter += 1 + if affine_counter in self.inputs.invert_affine: + series += ['-i'] + affine_invert.append(affine_counter) + series += [transformation] + + if isdefined(self.inputs.invert_affine): + diff_inv = set(self.inputs.invert_affine) - set(affine_invert) + if diff_inv: + raise Exceptions( + "Review invert_affine, not all indexes from invert_affine were used, " + "check the description for the full definition") + + return ' '.join(series) + + return super(WarpImageMultiTransform, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + if isdefined(self.inputs.output_image): + outputs['output_image'] = os.path.abspath(self.inputs.output_image) + else: + outputs['output_image'] = os.path.abspath( + self._gen_filename('output_image')) + return outputs + + +class ApplyTransformsInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 2, + 3, + 4, + argstr='--dimensionality %d', + desc=('This option forces the image to be treated ' + 'as a specified-dimensional image. If not ' + 'specified, antsWarp tries to infer the ' + 'dimensionality from the input image.')) + input_image_type = traits.Enum( + 0, + 1, + 2, + 3, + argstr='--input-image-type %d', + desc=('Option specifying the input image ' + 'type of scalar (default), vector, ' + 'tensor, or time series.')) + input_image = File( + argstr='--input %s', + mandatory=True, + desc=('image to apply transformation to (generally a ' + 'coregistered functional)'), + exists=True) + output_image = traits.Str( + argstr='--output %s', + desc='output file name', + genfile=True, + hash_files=False) + out_postfix = traits.Str( + "_trans", + usedefault=True, + desc=('Postfix that is appended to all output ' + 'files (default = _trans)')) + reference_image = File( + argstr='--reference-image %s', + mandatory=True, + desc='reference image space that you wish to warp INTO', + exists=True) + interpolation = traits.Enum( + 'Linear', + 'NearestNeighbor', + 'CosineWindowedSinc', + 'WelchWindowedSinc', + 'HammingWindowedSinc', + 'LanczosWindowedSinc', + 'MultiLabel', + 'Gaussian', + 'BSpline', + argstr='%s', + usedefault=True) + interpolation_parameters = traits.Either( + traits.Tuple(traits.Int()), # BSpline (order) + traits.Tuple( + traits.Float(), # Gaussian/MultiLabel (sigma, alpha) + traits.Float())) + transforms = traits.Either( + InputMultiPath(File(exists=True)), + 'identity', + argstr='%s', + mandatory=True, + desc='transform files: will be applied in reverse order. For ' + 'example, the last specified transform will be applied first.') + invert_transform_flags = InputMultiPath(traits.Bool()) + default_value = traits.Float( + 0.0, argstr='--default-value %g', usedefault=True) + print_out_composite_warp_file = traits.Bool( + False, + requires=["output_image"], + desc='output a composite warp file instead of a transformed image') + float = traits.Bool( + argstr='--float %d', + default_value=False, + usedefault=True, + desc='Use float instead of double for computations.') + + +class ApplyTransformsOutputSpec(TraitedSpec): + output_image = File(exists=True, desc='Warped image') + + +class ApplyTransforms(ANTSCommand): + """ApplyTransforms, applied to an input image, transforms it according to a + reference image and a transform (or a set of transforms). + + Examples + -------- + + >>> from nipype.interfaces.ants import ApplyTransforms + >>> at = ApplyTransforms() + >>> at.inputs.input_image = 'moving1.nii' + >>> at.inputs.reference_image = 'fixed1.nii' + >>> at.inputs.transforms = 'identity' + >>> at.cmdline + 'antsApplyTransforms --default-value 0 --float 0 --input moving1.nii \ +--interpolation Linear --output moving1_trans.nii \ +--reference-image fixed1.nii -t identity' + + >>> at = ApplyTransforms() + >>> at.inputs.dimension = 3 + >>> at.inputs.input_image = 'moving1.nii' + >>> at.inputs.reference_image = 'fixed1.nii' + >>> at.inputs.output_image = 'deformed_moving1.nii' + >>> at.inputs.interpolation = 'Linear' + >>> at.inputs.default_value = 0 + >>> at.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat'] + >>> at.inputs.invert_transform_flags = [False, False] + >>> at.cmdline + 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii \ +--interpolation Linear --output deformed_moving1.nii --reference-image fixed1.nii \ +--transform [ ants_Warp.nii.gz, 0 ] --transform [ trans.mat, 0 ]' + + >>> at1 = ApplyTransforms() + >>> at1.inputs.dimension = 3 + >>> at1.inputs.input_image = 'moving1.nii' + >>> at1.inputs.reference_image = 'fixed1.nii' + >>> at1.inputs.output_image = 'deformed_moving1.nii' + >>> at1.inputs.interpolation = 'BSpline' + >>> at1.inputs.interpolation_parameters = (5,) + >>> at1.inputs.default_value = 0 + >>> at1.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat'] + >>> at1.inputs.invert_transform_flags = [False, False] + >>> at1.cmdline + 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii \ +--interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii \ +--transform [ ants_Warp.nii.gz, 0 ] --transform [ trans.mat, 0 ]' + """ + _cmd = 'antsApplyTransforms' + input_spec = ApplyTransformsInputSpec + output_spec = ApplyTransformsOutputSpec + + def _gen_filename(self, name): + if name == 'output_image': + output = self.inputs.output_image + if not isdefined(output): + _, name, ext = split_filename(self.inputs.input_image) + output = name + self.inputs.out_postfix + ext + return output + return None + + def _get_transform_filenames(self): + retval = [] + for ii in range(len(self.inputs.transforms)): + if isdefined(self.inputs.invert_transform_flags): + if len(self.inputs.transforms) == len( + self.inputs.invert_transform_flags): + invert_code = 1 if self.inputs.invert_transform_flags[ + ii] else 0 + retval.append("--transform [ %s, %d ]" % + (self.inputs.transforms[ii], invert_code)) + else: + raise Exception(( + "ERROR: The useInverse list must have the same number " + "of entries as the transformsFileName list.")) + else: + retval.append("--transform %s" % self.inputs.transforms[ii]) + return " ".join(retval) + + def _get_output_warped_filename(self): + if isdefined(self.inputs.print_out_composite_warp_file): + return "--output [ %s, %d ]" % ( + self._gen_filename("output_image"), + int(self.inputs.print_out_composite_warp_file)) + else: + return "--output %s" % (self._gen_filename("output_image")) + + def _format_arg(self, opt, spec, val): + if opt == "output_image": + return self._get_output_warped_filename() + elif opt == "transforms": + if val == 'identity': + return '-t identity' + return self._get_transform_filenames() + elif opt == 'interpolation': + if self.inputs.interpolation in ['BSpline', 'MultiLabel', 'Gaussian'] and \ + isdefined(self.inputs.interpolation_parameters): + return '--interpolation %s[ %s ]' % ( + self.inputs.interpolation, ', '.join([ + str(param) + for param in self.inputs.interpolation_parameters + ])) + else: + return '--interpolation %s' % self.inputs.interpolation + return super(ApplyTransforms, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output_image'] = os.path.abspath( + self._gen_filename('output_image')) + return outputs + + +class ApplyTransformsToPointsInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 2, + 3, + 4, + argstr='--dimensionality %d', + desc=('This option forces the image to be treated ' + 'as a specified-dimensional image. If not ' + 'specified, antsWarp tries to infer the ' + 'dimensionality from the input image.')) + input_file = File( + argstr='--input %s', + mandatory=True, + desc= + ("Currently, the only input supported is a csv file with" + " columns including x,y (2D), x,y,z (3D) or x,y,z,t,label (4D) column headers." + " The points should be defined in physical space." + " If in doubt how to convert coordinates from your files to the space" + " required by antsApplyTransformsToPoints try creating/drawing a simple" + " label volume with only one voxel set to 1 and all others set to 0." + " Write down the voxel coordinates. Then use ImageMaths LabelStats to find" + " out what coordinates for this voxel antsApplyTransformsToPoints is" + " expecting."), + exists=True) + output_file = traits.Str( + argstr='--output %s', + desc='Name of the output CSV file', + name_source=['input_file'], + hash_files=False, + name_template='%s_transformed.csv') + transforms = traits.List( + File(exists=True), + argstr='%s', + mandatory=True, + desc='transforms that will be applied to the points') + invert_transform_flags = traits.List( + traits.Bool(), + desc='list indicating if a transform should be reversed') + + +class ApplyTransformsToPointsOutputSpec(TraitedSpec): + output_file = File( + exists=True, desc='csv file with transformed coordinates') + + +class ApplyTransformsToPoints(ANTSCommand): + """ApplyTransformsToPoints, applied to an CSV file, transforms coordinates + using provided transform (or a set of transforms). + + Examples + -------- + + >>> from nipype.interfaces.ants import ApplyTransforms + >>> at = ApplyTransformsToPoints() + >>> at.inputs.dimension = 3 + >>> at.inputs.input_file = 'moving.csv' + >>> at.inputs.transforms = ['trans.mat', 'ants_Warp.nii.gz'] + >>> at.inputs.invert_transform_flags = [False, False] + >>> at.cmdline + 'antsApplyTransformsToPoints --dimensionality 3 --input moving.csv --output moving_transformed.csv \ +--transform [ trans.mat, 0 ] --transform [ ants_Warp.nii.gz, 0 ]' + + + """ + _cmd = 'antsApplyTransformsToPoints' + input_spec = ApplyTransformsToPointsInputSpec + output_spec = ApplyTransformsToPointsOutputSpec + + def _get_transform_filenames(self): + retval = [] + for ii in range(len(self.inputs.transforms)): + if isdefined(self.inputs.invert_transform_flags): + if len(self.inputs.transforms) == len( + self.inputs.invert_transform_flags): + invert_code = 1 if self.inputs.invert_transform_flags[ + ii] else 0 + retval.append("--transform [ %s, %d ]" % + (self.inputs.transforms[ii], invert_code)) + else: + raise Exception(( + "ERROR: The useInverse list must have the same number " + "of entries as the transformsFileName list.")) + else: + retval.append("--transform %s" % self.inputs.transforms[ii]) + return " ".join(retval) + + def _format_arg(self, opt, spec, val): + if opt == "transforms": + return self._get_transform_filenames() + return super(ApplyTransformsToPoints, self)._format_arg(opt, spec, val) diff --git a/nipype/interfaces/ants/segmentation.py b/nipype/interfaces/ants/segmentation.py new file mode 100644 index 0000000000..6ccfb28ba2 --- /dev/null +++ b/nipype/interfaces/ants/segmentation.py @@ -0,0 +1,1642 @@ +# -*- coding: utf-8 -*- +"""The ants module provides basic functions for interfacing with ants functions. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, str + +import os +from ...external.due import BibTeX +from ...utils.filemanip import split_filename, copyfile, which +from ..base import TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, isdefined +from .base import ANTSCommand, ANTSCommandInputSpec + + +class AtroposInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + 4, + argstr='--image-dimensionality %d', + usedefault=True, + desc='image dimension (2, 3, or 4)') + intensity_images = InputMultiPath( + File(exists=True), argstr="--intensity-image %s...", mandatory=True) + mask_image = File(exists=True, argstr='--mask-image %s', mandatory=True) + initialization = traits.Enum( + 'Random', + 'Otsu', + 'KMeans', + 'PriorProbabilityImages', + 'PriorLabelImage', + argstr="%s", + requires=['number_of_tissue_classes'], + mandatory=True) + prior_probability_images = InputMultiPath(File(exists=True)) + number_of_tissue_classes = traits.Int(mandatory=True) + prior_weighting = traits.Float() + prior_probability_threshold = traits.Float(requires=['prior_weighting']) + likelihood_model = traits.Str(argstr="--likelihood-model %s") + mrf_smoothing_factor = traits.Float(argstr="%s") + mrf_radius = traits.List(traits.Int(), requires=['mrf_smoothing_factor']) + icm_use_synchronous_update = traits.Bool(argstr="%s") + maximum_number_of_icm_terations = traits.Int( + requires=['icm_use_synchronous_update']) + n_iterations = traits.Int(argstr="%s") + convergence_threshold = traits.Float(requires=['n_iterations']) + posterior_formulation = traits.Str(argstr="%s") + use_random_seed = traits.Bool( + True, + argstr='--use-random-seed %d', + desc='use random seed value over constant', + usedefault=True) + use_mixture_model_proportions = traits.Bool( + requires=['posterior_formulation']) + out_classified_image_name = File( + argstr="%s", genfile=True, hash_files=False) + save_posteriors = traits.Bool() + output_posteriors_name_template = traits.Str( + 'POSTERIOR_%02d.nii.gz', usedefault=True) + + +class AtroposOutputSpec(TraitedSpec): + classified_image = File(exists=True) + posteriors = OutputMultiPath(File(exist=True)) + + +class Atropos(ANTSCommand): + """A finite mixture modeling (FMM) segmentation approach with possibilities for + specifying prior constraints. These prior constraints include the specification + of a prior label image, prior probability images (one for each class), and/or an + MRF prior to enforce spatial smoothing of the labels. Similar algorithms include + FAST and SPM. + + Examples + -------- + + >>> from nipype.interfaces.ants import Atropos + >>> at = Atropos() + >>> at.inputs.dimension = 3 + >>> at.inputs.intensity_images = 'structural.nii' + >>> at.inputs.mask_image = 'mask.nii' + >>> at.inputs.initialization = 'PriorProbabilityImages' + >>> at.inputs.prior_probability_images = ['rc1s1.nii', 'rc1s2.nii'] + >>> at.inputs.number_of_tissue_classes = 2 + >>> at.inputs.prior_weighting = 0.8 + >>> at.inputs.prior_probability_threshold = 0.0000001 + >>> at.inputs.likelihood_model = 'Gaussian' + >>> at.inputs.mrf_smoothing_factor = 0.2 + >>> at.inputs.mrf_radius = [1, 1, 1] + >>> at.inputs.icm_use_synchronous_update = True + >>> at.inputs.maximum_number_of_icm_terations = 1 + >>> at.inputs.n_iterations = 5 + >>> at.inputs.convergence_threshold = 0.000001 + >>> at.inputs.posterior_formulation = 'Socrates' + >>> at.inputs.use_mixture_model_proportions = True + >>> at.inputs.save_posteriors = True + >>> at.cmdline + 'Atropos --image-dimensionality 3 --icm [1,1] \ +--initialization PriorProbabilityImages[2,priors/priorProbImages%02d.nii,0.8,1e-07] --intensity-image structural.nii \ +--likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] \ +--output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] --use-random-seed 1' + + """ + input_spec = AtroposInputSpec + output_spec = AtroposOutputSpec + _cmd = 'Atropos' + + def _format_arg(self, opt, spec, val): + if opt == 'initialization': + retval = "--initialization %s[%d" % ( + val, self.inputs.number_of_tissue_classes) + if val == "PriorProbabilityImages": + _, _, ext = split_filename( + self.inputs.prior_probability_images[0]) + retval += ",priors/priorProbImages%02d" + \ + ext + ",%g" % self.inputs.prior_weighting + if isdefined(self.inputs.prior_probability_threshold): + retval += ",%g" % self.inputs.prior_probability_threshold + return retval + "]" + if opt == 'mrf_smoothing_factor': + retval = "--mrf [%g" % val + if isdefined(self.inputs.mrf_radius): + retval += ",%s" % self._format_xarray( + [str(s) for s in self.inputs.mrf_radius]) + return retval + "]" + if opt == "icm_use_synchronous_update": + retval = "--icm [%d" % val + if isdefined(self.inputs.maximum_number_of_icm_terations): + retval += ",%g" % self.inputs.maximum_number_of_icm_terations + return retval + "]" + if opt == "n_iterations": + retval = "--convergence [%d" % val + if isdefined(self.inputs.convergence_threshold): + retval += ",%g" % self.inputs.convergence_threshold + return retval + "]" + if opt == "posterior_formulation": + retval = "--posterior-formulation %s" % val + if isdefined(self.inputs.use_mixture_model_proportions): + retval += "[%d]" % self.inputs.use_mixture_model_proportions + return retval + if opt == "out_classified_image_name": + retval = "--output [%s" % val + if isdefined(self.inputs.save_posteriors): + retval += ",%s" % self.inputs.output_posteriors_name_template + return retval + "]" + return super(Atropos, self)._format_arg(opt, spec, val) + + def _run_interface(self, runtime, correct_return_codes=[0]): + if self.inputs.initialization == "PriorProbabilityImages": + priors_directory = os.path.join(os.getcwd(), "priors") + if not os.path.exists(priors_directory): + os.makedirs(priors_directory) + _, _, ext = split_filename(self.inputs.prior_probability_images[0]) + for i, f in enumerate(self.inputs.prior_probability_images): + target = os.path.join(priors_directory, + 'priorProbImages%02d' % (i + 1) + ext) + if not (os.path.exists(target) + and os.path.realpath(target) == os.path.abspath(f)): + copyfile( + os.path.abspath(f), + os.path.join(priors_directory, + 'priorProbImages%02d' % (i + 1) + ext)) + runtime = super(Atropos, self)._run_interface(runtime) + return runtime + + def _gen_filename(self, name): + if name == 'out_classified_image_name': + output = self.inputs.out_classified_image_name + if not isdefined(output): + _, name, ext = split_filename(self.inputs.intensity_images[0]) + output = name + '_labeled' + ext + return output + return None + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['classified_image'] = os.path.abspath( + self._gen_filename('out_classified_image_name')) + if isdefined( + self.inputs.save_posteriors) and self.inputs.save_posteriors: + outputs['posteriors'] = [] + for i in range(self.inputs.number_of_tissue_classes): + outputs['posteriors'].append( + os.path.abspath( + self.inputs.output_posteriors_name_template % (i + 1))) + return outputs + + +class LaplacianThicknessInputSpec(ANTSCommandInputSpec): + input_wm = File( + argstr='%s', + mandatory=True, + copyfile=True, + desc='white matter segmentation image', + position=1) + input_gm = File( + argstr='%s', + mandatory=True, + copyfile=True, + desc='gray matter segmentation image', + position=2) + output_image = File( + desc='name of output file', + argstr='%s', + position=3, + genfile=True, + hash_files=False) + smooth_param = traits.Float(argstr='smoothparam=%d', desc='', position=4) + prior_thickness = traits.Float( + argstr='priorthickval=%d', desc='', position=5) + dT = traits.Float(argstr='dT=%d', desc='', position=6) + sulcus_prior = traits.Bool(argstr='use-sulcus-prior', desc='', position=7) + opt_tolerance = traits.Float( + argstr='optional-laplacian-tolerance=%d', desc='', position=8) + + +class LaplacianThicknessOutputSpec(TraitedSpec): + output_image = File(exists=True, desc='Cortical thickness') + + +class LaplacianThickness(ANTSCommand): + """Calculates the cortical thickness from an anatomical image + + Examples + -------- + + >>> from nipype.interfaces.ants import LaplacianThickness + >>> cort_thick = LaplacianThickness() + >>> cort_thick.inputs.input_wm = 'white_matter.nii.gz' + >>> cort_thick.inputs.input_gm = 'gray_matter.nii.gz' + >>> cort_thick.inputs.output_image = 'output_thickness.nii.gz' + >>> cort_thick.cmdline + 'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz output_thickness.nii.gz' + + """ + + _cmd = 'LaplacianThickness' + input_spec = LaplacianThicknessInputSpec + output_spec = LaplacianThicknessOutputSpec + + def _gen_filename(self, name): + if name == 'output_image': + output = self.inputs.output_image + if not isdefined(output): + _, name, ext = split_filename(self.inputs.input_wm) + output = name + '_thickness' + ext + return output + return None + + def _list_outputs(self): + outputs = self._outputs().get() + _, name, ext = split_filename(os.path.abspath(self.inputs.input_wm)) + outputs['output_image'] = os.path.join(os.getcwd(), ''.join( + (name, self.inputs.output_image, ext))) + return outputs + + +class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + 4, + argstr='-d %d', + usedefault=True, + desc='image dimension (2, 3 or 4)') + input_image = File( + argstr='--input-image %s', + mandatory=True, + desc=('input for bias correction. Negative values or values close to ' + 'zero should be processed prior to correction')) + mask_image = File( + argstr='--mask-image %s', + desc=('image to specify region to perform final bias correction in')) + weight_image = File( + argstr='--weight-image %s', + desc=('image for relative weighting (e.g. probability map of the white ' + 'matter) of voxels during the B-spline fitting. ')) + output_image = traits.Str( + argstr='--output %s', + desc='output file name', + genfile=True, + hash_files=False) + bspline_fitting_distance = traits.Float(argstr="--bspline-fitting %s") + bspline_order = traits.Int(requires=['bspline_fitting_distance']) + shrink_factor = traits.Int(argstr="--shrink-factor %d") + n_iterations = traits.List(traits.Int(), argstr="--convergence %s") + convergence_threshold = traits.Float(requires=['n_iterations']) + save_bias = traits.Bool( + False, + mandatory=True, + usedefault=True, + desc=('True if the estimated bias should be saved to file.'), + xor=['bias_image']) + bias_image = File( + desc='Filename for the estimated bias.', hash_files=False) + copy_header = traits.Bool( + False, + mandatory=True, + usedefault=True, + desc='copy headers of the original image into the ' + 'output (corrected) file') + + +class N4BiasFieldCorrectionOutputSpec(TraitedSpec): + output_image = File(exists=True, desc='Warped image') + bias_image = File(exists=True, desc='Estimated bias') + + +class N4BiasFieldCorrection(ANTSCommand): + """N4 is a variant of the popular N3 (nonparameteric nonuniform normalization) + retrospective bias correction algorithm. Based on the assumption that the + corruption of the low frequency bias field can be modeled as a convolution of + the intensity histogram by a Gaussian, the basic algorithmic protocol is to + iterate between deconvolving the intensity histogram by a Gaussian, remapping + the intensities, and then spatially smoothing this result by a B-spline modeling + of the bias field itself. The modifications from and improvements obtained over + the original N3 algorithm are described in [Tustison2010]_. + + .. [Tustison2010] N. Tustison et al., + N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging, + 29(6):1310-1320, June 2010. + + Examples + -------- + + >>> import copy + >>> from nipype.interfaces.ants import N4BiasFieldCorrection + >>> n4 = N4BiasFieldCorrection() + >>> n4.inputs.dimension = 3 + >>> n4.inputs.input_image = 'structural.nii' + >>> n4.inputs.bspline_fitting_distance = 300 + >>> n4.inputs.shrink_factor = 3 + >>> n4.inputs.n_iterations = [50,50,30,20] + >>> n4.cmdline + 'N4BiasFieldCorrection --bspline-fitting [ 300 ] \ +-d 3 --input-image structural.nii \ +--convergence [ 50x50x30x20 ] --output structural_corrected.nii \ +--shrink-factor 3' + + >>> n4_2 = copy.deepcopy(n4) + >>> n4_2.inputs.convergence_threshold = 1e-6 + >>> n4_2.cmdline + 'N4BiasFieldCorrection --bspline-fitting [ 300 ] \ +-d 3 --input-image structural.nii \ +--convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \ +--shrink-factor 3' + + >>> n4_3 = copy.deepcopy(n4_2) + >>> n4_3.inputs.bspline_order = 5 + >>> n4_3.cmdline + 'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] \ +-d 3 --input-image structural.nii \ +--convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \ +--shrink-factor 3' + + >>> n4_4 = N4BiasFieldCorrection() + >>> n4_4.inputs.input_image = 'structural.nii' + >>> n4_4.inputs.save_bias = True + >>> n4_4.inputs.dimension = 3 + >>> n4_4.cmdline + 'N4BiasFieldCorrection -d 3 --input-image structural.nii \ +--output [ structural_corrected.nii, structural_bias.nii ]' + """ + + _cmd = 'N4BiasFieldCorrection' + input_spec = N4BiasFieldCorrectionInputSpec + output_spec = N4BiasFieldCorrectionOutputSpec + + def _gen_filename(self, name): + if name == 'output_image': + output = self.inputs.output_image + if not isdefined(output): + _, name, ext = split_filename(self.inputs.input_image) + output = name + '_corrected' + ext + return output + + if name == 'bias_image': + output = self.inputs.bias_image + if not isdefined(output): + _, name, ext = split_filename(self.inputs.input_image) + output = name + '_bias' + ext + return output + return None + + def _format_arg(self, name, trait_spec, value): + if ((name == 'output_image') and + (self.inputs.save_bias or isdefined(self.inputs.bias_image))): + bias_image = self._gen_filename('bias_image') + output = self._gen_filename('output_image') + newval = '[ %s, %s ]' % (output, bias_image) + return trait_spec.argstr % newval + + if name == 'bspline_fitting_distance': + if isdefined(self.inputs.bspline_order): + newval = '[ %g, %d ]' % (value, self.inputs.bspline_order) + else: + newval = '[ %g ]' % value + return trait_spec.argstr % newval + + if name == 'n_iterations': + if isdefined(self.inputs.convergence_threshold): + newval = '[ %s, %g ]' % ( + self._format_xarray([str(elt) for elt in value]), + self.inputs.convergence_threshold) + else: + newval = '[ %s ]' % self._format_xarray( + [str(elt) for elt in value]) + return trait_spec.argstr % newval + + return super(N4BiasFieldCorrection, self)._format_arg( + name, trait_spec, value) + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + skip += ['save_bias', 'bias_image'] + return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output_image'] = os.path.abspath( + self._gen_filename('output_image')) + + if self.inputs.save_bias or isdefined(self.inputs.bias_image): + outputs['bias_image'] = os.path.abspath( + self._gen_filename('bias_image')) + return outputs + + def _run_interface(self, runtime, correct_return_codes=(0, )): + runtime = super(N4BiasFieldCorrection, self)._run_interface( + runtime, correct_return_codes) + + if self.inputs.copy_header and runtime.returncode in correct_return_codes: + self._copy_header(self._gen_filename('output_image')) + if self.inputs.save_bias or isdefined(self.inputs.bias_image): + self._copy_header(self._gen_filename('bias_image')) + + return runtime + + def _copy_header(self, fname): + """Copy header from input image to an output image""" + import nibabel as nb + in_img = nb.load(self.inputs.input_image) + out_img = nb.load(fname, mmap=False) + new_img = out_img.__class__(out_img.get_data(), in_img.affine, + in_img.header) + new_img.set_data_dtype(out_img.get_data_dtype()) + new_img.to_filename(fname) + + +class CorticalThicknessInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, 2, argstr='-d %d', usedefault=True, desc='image dimension (2 or 3)') + anatomical_image = File( + exists=True, + argstr='-a %s', + desc=('Structural *intensity* image, typically T1.' + ' If more than one anatomical image is specified,' + ' subsequently specified images are used during the' + ' segmentation process. However, only the first' + ' image is used in the registration of priors.' + ' Our suggestion would be to specify the T1' + ' as the first image.'), + mandatory=True) + brain_template = File( + exists=True, + argstr='-e %s', + desc=('Anatomical *intensity* template (possibly created using a' + ' population data set with buildtemplateparallel.sh in ANTs).' + ' This template is *not* skull-stripped.'), + mandatory=True) + brain_probability_mask = File( + exists=True, + argstr='-m %s', + desc='brain probability mask in template space', + copyfile=False, + mandatory=True) + segmentation_priors = InputMultiPath( + File(exists=True), argstr='-p %s', mandatory=True) + out_prefix = traits.Str( + 'antsCT_', + argstr='-o %s', + usedefault=True, + desc=('Prefix that is prepended to all output' + ' files (default = antsCT_)')) + image_suffix = traits.Str( + 'nii.gz', + desc=('any of standard ITK formats,' + ' nii.gz is default'), + argstr='-s %s', + usedefault=True) + t1_registration_template = File( + exists=True, + desc=('Anatomical *intensity* template' + ' (assumed to be skull-stripped). A common' + ' case would be where this would be the same' + ' template as specified in the -e option which' + ' is not skull stripped.'), + argstr='-t %s', + mandatory=True) + extraction_registration_mask = File( + exists=True, + argstr='-f %s', + desc=('Mask (defined in the template space) used during' + ' registration for brain extraction.')) + keep_temporary_files = traits.Int( + argstr='-k %d', + desc='Keep brain extraction/segmentation warps, etc (default = 0).') + max_iterations = traits.Int( + argstr='-i %d', + desc=('ANTS registration max iterations (default = 100x100x70x20)')) + prior_segmentation_weight = traits.Float( + argstr='-w %f', + desc=('Atropos spatial prior *probability* weight for' + ' the segmentation')) + segmentation_iterations = traits.Int( + argstr='-n %d', + desc=('N4 -> Atropos -> N4 iterations during segmentation' + ' (default = 3)')) + posterior_formulation = traits.Str( + argstr='-b %s', + desc=('Atropos posterior formulation and whether or not' + ' to use mixture model proportions.' + ''' e.g 'Socrates[1]' (default) or 'Aristotle[1]'.''' + ' Choose the latter if you' + ' want use the distance priors (see also the -l option' + ' for label propagation control).')) + use_floatingpoint_precision = traits.Enum( + 0, + 1, + argstr='-j %d', + desc=('Use floating point precision in registrations (default = 0)')) + use_random_seeding = traits.Enum( + 0, + 1, + argstr='-u %d', + desc=('Use random number generated from system clock in Atropos' + ' (default = 1)')) + b_spline_smoothing = traits.Bool( + argstr='-v', + desc=('Use B-spline SyN for registrations and B-spline' + ' exponential mapping in DiReCT.')) + cortical_label_image = File( + exists=True, desc='Cortical ROI labels to use as a prior for ATITH.') + label_propagation = traits.Str( + argstr='-l %s', + desc= + ('Incorporate a distance prior one the posterior formulation. Should be' + ''' of the form 'label[lambda,boundaryProbability]' where label''' + ' is a value of 1,2,3,... denoting label ID. The label' + ' probability for anything outside the current label' + ' = boundaryProbability * exp( -lambda * distanceFromBoundary )' + ' Intuitively, smaller lambda values will increase the spatial capture' + ' range of the distance prior. To apply to all label values, simply omit' + ' specifying the label, i.e. -l [lambda,boundaryProbability].')) + quick_registration = traits.Bool( + argstr='-q 1', + desc= + ('If = 1, use antsRegistrationSyNQuick.sh as the basis for registration' + ' during brain extraction, brain segmentation, and' + ' (optional) normalization to a template.' + ' Otherwise use antsRegistrationSyN.sh (default = 0).')) + debug = traits.Bool( + argstr='-z 1', + desc=( + 'If > 0, runs a faster version of the script.' + ' Only for testing. Implies -u 0.' + ' Requires single thread computation for complete reproducibility.' + )) + + +class CorticalThicknessOutputSpec(TraitedSpec): + BrainExtractionMask = File(exists=True, desc='brain extraction mask') + BrainSegmentation = File(exists=True, desc='brain segmentaion image') + BrainSegmentationN4 = File(exists=True, desc='N4 corrected image') + BrainSegmentationPosteriors = OutputMultiPath( + File(exists=True), desc='Posterior probability images') + CorticalThickness = File(exists=True, desc='cortical thickness file') + TemplateToSubject1GenericAffine = File( + exists=True, desc='Template to subject affine') + TemplateToSubject0Warp = File(exists=True, desc='Template to subject warp') + SubjectToTemplate1Warp = File( + exists=True, desc='Template to subject inverse warp') + SubjectToTemplate0GenericAffine = File( + exists=True, desc='Template to subject inverse affine') + SubjectToTemplateLogJacobian = File( + exists=True, desc='Template to subject log jacobian') + CorticalThicknessNormedToTemplate = File( + exists=True, desc='Normalized cortical thickness') + BrainVolumes = File(exists=True, desc='Brain volumes as text') + + +class CorticalThickness(ANTSCommand): + """ + Examples + -------- + >>> from nipype.interfaces.ants.segmentation import CorticalThickness + >>> corticalthickness = CorticalThickness() + >>> corticalthickness.inputs.dimension = 3 + >>> corticalthickness.inputs.anatomical_image ='T1.nii.gz' + >>> corticalthickness.inputs.brain_template = 'study_template.nii.gz' + >>> corticalthickness.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz' + >>> corticalthickness.inputs.segmentation_priors = ['BrainSegmentationPrior01.nii.gz', + ... 'BrainSegmentationPrior02.nii.gz', + ... 'BrainSegmentationPrior03.nii.gz', + ... 'BrainSegmentationPrior04.nii.gz'] + >>> corticalthickness.inputs.t1_registration_template = 'brain_study_template.nii.gz' + >>> corticalthickness.cmdline + 'antsCorticalThickness.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 \ +-s nii.gz -o antsCT_ -p nipype_priors/BrainSegmentationPrior%02d.nii.gz -t brain_study_template.nii.gz' + + """ + + input_spec = CorticalThicknessInputSpec + output_spec = CorticalThicknessOutputSpec + _cmd = 'antsCorticalThickness.sh' + + def _format_arg(self, opt, spec, val): + if opt == 'anatomical_image': + retval = '-a %s' % val + return retval + if opt == 'brain_template': + retval = '-e %s' % val + return retval + if opt == 'brain_probability_mask': + retval = '-m %s' % val + return retval + if opt == 'out_prefix': + retval = '-o %s' % val + return retval + if opt == 't1_registration_template': + retval = '-t %s' % val + return retval + if opt == 'segmentation_priors': + _, _, ext = split_filename(self.inputs.segmentation_priors[0]) + retval = "-p nipype_priors/BrainSegmentationPrior%02d" + ext + return retval + return super(CorticalThickness, self)._format_arg(opt, spec, val) + + def _run_interface(self, runtime, correct_return_codes=[0]): + priors_directory = os.path.join(os.getcwd(), "nipype_priors") + if not os.path.exists(priors_directory): + os.makedirs(priors_directory) + _, _, ext = split_filename(self.inputs.segmentation_priors[0]) + for i, f in enumerate(self.inputs.segmentation_priors): + target = os.path.join(priors_directory, + 'BrainSegmentationPrior%02d' % (i + 1) + ext) + if not (os.path.exists(target) + and os.path.realpath(target) == os.path.abspath(f)): + copyfile(os.path.abspath(f), target) + runtime = super(CorticalThickness, self)._run_interface(runtime) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['BrainExtractionMask'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'BrainExtractionMask.' + + self.inputs.image_suffix) + outputs['BrainSegmentation'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'BrainSegmentation.' + + self.inputs.image_suffix) + outputs['BrainSegmentationN4'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'BrainSegmentation0N4.' + + self.inputs.image_suffix) + posteriors = [] + for i in range(len(self.inputs.segmentation_priors)): + posteriors.append( + os.path.join(os.getcwd(), self.inputs.out_prefix + + 'BrainSegmentationPosteriors%02d.' % + (i + 1) + self.inputs.image_suffix)) + outputs['BrainSegmentationPosteriors'] = posteriors + outputs['CorticalThickness'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'CorticalThickness.' + + self.inputs.image_suffix) + outputs['TemplateToSubject1GenericAffine'] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + 'TemplateToSubject1GenericAffine.mat') + outputs['TemplateToSubject0Warp'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'TemplateToSubject0Warp.' + + self.inputs.image_suffix) + outputs['SubjectToTemplate1Warp'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'SubjectToTemplate1Warp.' + + self.inputs.image_suffix) + outputs['SubjectToTemplate0GenericAffine'] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + 'SubjectToTemplate0GenericAffine.mat') + outputs['SubjectToTemplateLogJacobian'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + + 'SubjectToTemplateLogJacobian.' + self.inputs.image_suffix) + outputs['CorticalThicknessNormedToTemplate'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'CorticalThickness.' + + self.inputs.image_suffix) + outputs['BrainVolumes'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'brainvols.csv') + return outputs + + +class BrainExtractionInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, 2, argstr='-d %d', usedefault=True, desc='image dimension (2 or 3)') + anatomical_image = File( + exists=True, + argstr='-a %s', + desc=('Structural image, typically T1. If more than one' + ' anatomical image is specified, subsequently specified' + ' images are used during the segmentation process. However,' + ' only the first image is used in the registration of priors.' + ' Our suggestion would be to specify the T1 as the first image.' + ' Anatomical template created using e.g. LPBA40 data set with' + ' buildtemplateparallel.sh in ANTs.'), + mandatory=True) + brain_template = File( + exists=True, + argstr='-e %s', + desc=('Anatomical template created using e.g. LPBA40 data set with' + ' buildtemplateparallel.sh in ANTs.'), + mandatory=True) + brain_probability_mask = File( + exists=True, + argstr='-m %s', + desc=('Brain probability mask created using e.g. LPBA40 data set which' + ' have brain masks defined, and warped to anatomical template and' + ' averaged resulting in a probability image.'), + copyfile=False, + mandatory=True) + out_prefix = traits.Str( + 'highres001_', + argstr='-o %s', + usedefault=True, + desc=('Prefix that is prepended to all output' + ' files (default = highress001_)')) + + extraction_registration_mask = File( + exists=True, + argstr='-f %s', + desc=('Mask (defined in the template space) used during' + ' registration for brain extraction.' + ' To limit the metric computation to a specific region.')) + image_suffix = traits.Str( + 'nii.gz', + desc=('any of standard ITK formats,' + ' nii.gz is default'), + argstr='-s %s', + usedefault=True) + use_random_seeding = traits.Enum( + 0, + 1, + argstr='-u %d', + desc=('Use random number generated from system clock in Atropos' + ' (default = 1)')) + keep_temporary_files = traits.Int( + argstr='-k %d', + desc='Keep brain extraction/segmentation warps, etc (default = 0).') + use_floatingpoint_precision = traits.Enum( + 0, + 1, + argstr='-q %d', + desc=('Use floating point precision in registrations (default = 0)')) + debug = traits.Bool( + argstr='-z 1', + desc=( + 'If > 0, runs a faster version of the script.' + ' Only for testing. Implies -u 0.' + ' Requires single thread computation for complete reproducibility.' + )) + + +class BrainExtractionOutputSpec(TraitedSpec): + BrainExtractionMask = File(exists=True, desc='brain extraction mask') + BrainExtractionBrain = File(exists=True, desc='brain extraction image') + BrainExtractionCSF = File( + exists=True, desc='segmentation mask with only CSF') + BrainExtractionGM = File( + exists=True, desc='segmentation mask with only grey matter') + BrainExtractionInitialAffine = File(exists=True, desc='') + BrainExtractionInitialAffineFixed = File(exists=True, desc='') + BrainExtractionInitialAffineMoving = File(exists=True, desc='') + BrainExtractionLaplacian = File(exists=True, desc='') + BrainExtractionPrior0GenericAffine = File(exists=True, desc='') + BrainExtractionPrior1InverseWarp = File(exists=True, desc='') + BrainExtractionPrior1Warp = File(exists=True, desc='') + BrainExtractionPriorWarped = File(exists=True, desc='') + BrainExtractionSegmentation = File( + exists=True, desc='segmentation mask with CSF, GM, and WM') + BrainExtractionTemplateLaplacian = File(exists=True, desc='') + BrainExtractionTmp = File(exists=True, desc='') + BrainExtractionWM = File( + exists=True, desc='segmenration mask with only white matter') + N4Corrected0 = File(exists=True, desc='N4 bias field corrected image') + N4Truncated0 = File(exists=True, desc='') + + +class BrainExtraction(ANTSCommand): + """ + Examples + -------- + >>> from nipype.interfaces.ants.segmentation import BrainExtraction + >>> brainextraction = BrainExtraction() + >>> brainextraction.inputs.dimension = 3 + >>> brainextraction.inputs.anatomical_image ='T1.nii.gz' + >>> brainextraction.inputs.brain_template = 'study_template.nii.gz' + >>> brainextraction.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz' + >>> brainextraction.cmdline + 'antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 \ +-s nii.gz -o highres001_' + """ + input_spec = BrainExtractionInputSpec + output_spec = BrainExtractionOutputSpec + _cmd = 'antsBrainExtraction.sh' + + def _run_interface(self, runtime, correct_return_codes=(0, )): + # antsBrainExtraction.sh requires ANTSPATH to be defined + out_environ = self._get_environ() + ants_path = out_environ.get('ANTSPATH', None) or os.getenv( + 'ANTSPATH', None) + if ants_path is None: + # Check for antsRegistration, which is under bin/ (the $ANTSPATH) instead of + # checking for antsBrainExtraction.sh which is under script/ + cmd_path = which('antsRegistration', env=runtime.environ) + if not cmd_path: + raise RuntimeError( + 'The environment variable $ANTSPATH is not defined in host "%s", ' + 'and Nipype could not determine it automatically.' % + runtime.hostname) + ants_path = os.path.dirname(cmd_path) + + self.inputs.environ.update({'ANTSPATH': ants_path}) + runtime.environ.update({'ANTSPATH': ants_path}) + runtime = super(BrainExtraction, self)._run_interface(runtime) + + # Still, double-check if it didn't found N4 + if 'we cant find' in runtime.stdout: + for line in runtime.stdout.split('\n'): + if line.strip().startswith('we cant find'): + tool = line.strip().replace('we cant find the', + '').split(' ')[0] + break + + errmsg = ( + 'antsBrainExtraction.sh requires "%s" to be found in $ANTSPATH ' + '($ANTSPATH="%s").') % (tool, ants_path) + if runtime.stderr is None: + runtime.stderr = errmsg + else: + runtime.stderr += '\n' + errmsg + runtime.returncode = 1 + self.raise_exception(runtime) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['BrainExtractionMask'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'BrainExtractionMask.' + + self.inputs.image_suffix) + outputs['BrainExtractionBrain'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'BrainExtractionBrain.' + + self.inputs.image_suffix) + if isdefined(self.inputs.keep_temporary_files + ) and self.inputs.keep_temporary_files != 0: + outputs['BrainExtractionCSF'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'BrainExtractionCSF.' + + self.inputs.image_suffix) + outputs['BrainExtractionGM'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'BrainExtractionGM.' + + self.inputs.image_suffix) + outputs['BrainExtractionInitialAffine'] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + 'BrainExtractionInitialAffine.mat') + outputs['BrainExtractionInitialAffineFixed'] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + 'BrainExtractionInitialAffineFixed.' + + self.inputs.image_suffix) + outputs['BrainExtractionInitialAffineMoving'] = os.path.join( + os.getcwd(), + self.inputs.out_prefix + 'BrainExtractionInitialAffineMoving.' + + self.inputs.image_suffix) + outputs['BrainExtractionLaplacian'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + + 'BrainExtractionLaplacian.' + self.inputs.image_suffix) + outputs['BrainExtractionPrior0GenericAffine'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + + 'BrainExtractionPrior0GenericAffine.mat') + outputs['BrainExtractionPrior1InverseWarp'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + + 'BrainExtractionPrior1InverseWarp.' + self.inputs.image_suffix) + outputs['BrainExtractionPrior1Warp'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + + 'BrainExtractionPrior1Warp.' + self.inputs.image_suffix) + outputs['BrainExtractionPriorWarped'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + + 'BrainExtractionPriorWarped.' + self.inputs.image_suffix) + outputs['BrainExtractionSegmentation'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + + 'BrainExtractionSegmentation.' + self.inputs.image_suffix) + outputs['BrainExtractionTemplateLaplacian'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + + 'BrainExtractionTemplateLaplacian.' + self.inputs.image_suffix) + outputs['BrainExtractionTmp'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'BrainExtractionTmp.' + + self.inputs.image_suffix) + outputs['BrainExtractionWM'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'BrainExtractionWM.' + + self.inputs.image_suffix) + outputs['N4Corrected0'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'N4Corrected0.' + + self.inputs.image_suffix) + outputs['N4Truncated0'] = os.path.join( + os.getcwd(), self.inputs.out_prefix + 'N4Truncated0.' + + self.inputs.image_suffix) + + return outputs + + +class JointFusionInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + 4, + argstr='%d', + position=0, + usedefault=True, + mandatory=True, + desc='image dimension (2, 3, or 4)') + modalities = traits.Int( + argstr='%d', + position=1, + mandatory=True, + desc='Number of modalities or features') + warped_intensity_images = InputMultiPath( + File(exists=True), + argstr="-g %s...", + mandatory=True, + desc='Warped atlas images') + target_image = InputMultiPath( + File(exists=True), + argstr='-tg %s...', + mandatory=True, + desc='Target image(s)') + warped_label_images = InputMultiPath( + File(exists=True), + argstr="-l %s...", + mandatory=True, + desc='Warped atlas segmentations') + method = traits.Str( + default='Joint', + argstr='-m %s', + usedefault=True, + desc=('Select voting method. Options: Joint (Joint' + ' Label Fusion). May be followed by optional' + ' parameters in brackets, e.g., -m Joint[0.1,2]')) + alpha = traits.Float( + default=0.1, + usedefault=True, + requires=['method'], + desc=('Regularization term added to matrix Mx for inverse')) + beta = traits.Int( + default=2, + usedefault=True, + requires=['method'], + desc=('Exponent for mapping intensity difference to joint error')) + output_label_image = File( + argstr='%s', + mandatory=True, + position=-1, + name_template='%s', + output_name='output_label_image', + desc='Output fusion label map image') + patch_radius = traits.ListInt( + minlen=3, + maxlen=3, + argstr='-rp %s', + desc=('Patch radius for similarity measures, ' + 'scalar or vector. Default: 2x2x2')) + search_radius = traits.ListInt( + minlen=3, + maxlen=3, + argstr='-rs %s', + desc='Local search radius. Default: 3x3x3') + exclusion_region = File( + exists=True, + argstr='-x %s', + desc=('Specify an exclusion region for the given label.')) + atlas_group_id = traits.ListInt( + argstr='-gp %d...', desc='Assign a group ID for each atlas') + atlas_group_weights = traits.ListInt( + argstr='-gpw %d...', + desc=('Assign the voting weights to each atlas group')) + + +class JointFusionOutputSpec(TraitedSpec): + output_label_image = File(exists=True) + # TODO: optional outputs - output_posteriors, output_voting_weights + + +class JointFusion(ANTSCommand): + """ + Examples + -------- + + >>> from nipype.interfaces.ants import JointFusion + >>> at = JointFusion() + >>> at.inputs.dimension = 3 + >>> at.inputs.modalities = 1 + >>> at.inputs.method = 'Joint[0.1,2]' + >>> at.inputs.output_label_image ='fusion_labelimage_output.nii' + >>> at.inputs.warped_intensity_images = ['im1.nii', + ... 'im2.nii', + ... 'im3.nii'] + >>> at.inputs.warped_label_images = ['segmentation0.nii.gz', + ... 'segmentation1.nii.gz', + ... 'segmentation1.nii.gz'] + >>> at.inputs.target_image = 'T1.nii' + >>> at.cmdline + 'jointfusion 3 1 -m Joint[0.1,2] -tg T1.nii -g im1.nii -g im2.nii -g im3.nii -l segmentation0.nii.gz \ +-l segmentation1.nii.gz -l segmentation1.nii.gz fusion_labelimage_output.nii' + + >>> at.inputs.method = 'Joint' + >>> at.inputs.alpha = 0.5 + >>> at.inputs.beta = 1 + >>> at.inputs.patch_radius = [3,2,1] + >>> at.inputs.search_radius = [1,2,3] + >>> at.cmdline + 'jointfusion 3 1 -m Joint[0.5,1] -rp 3x2x1 -rs 1x2x3 -tg T1.nii -g im1.nii -g im2.nii -g im3.nii \ +-l segmentation0.nii.gz -l segmentation1.nii.gz -l segmentation1.nii.gz fusion_labelimage_output.nii' + """ + input_spec = JointFusionInputSpec + output_spec = JointFusionOutputSpec + _cmd = 'jointfusion' + + def _format_arg(self, opt, spec, val): + if opt == 'method': + if '[' in val: + retval = '-m {0}'.format(val) + else: + retval = '-m {0}[{1},{2}]'.format( + self.inputs.method, self.inputs.alpha, self.inputs.beta) + elif opt == 'patch_radius': + retval = '-rp {0}'.format(self._format_xarray(val)) + elif opt == 'search_radius': + retval = '-rs {0}'.format(self._format_xarray(val)) + else: + if opt == 'warped_intensity_images': + assert len(val) == self.inputs.modalities * len(self.inputs.warped_label_images), \ + "Number of intensity images and label maps must be the same {0}!={1}".format( + len(val), len(self.inputs.warped_label_images)) + return super(JointFusion, self)._format_arg(opt, spec, val) + return retval + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output_label_image'] = os.path.abspath( + self.inputs.output_label_image) + return outputs + + +class DenoiseImageInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 2, + 3, + 4, + argstr='-d %d', + desc='This option forces the image to be treated ' + 'as a specified-dimensional image. If not ' + 'specified, the program tries to infer the ' + 'dimensionality from the input image.') + input_image = File( + exists=True, + argstr="-i %s", + mandatory=True, + desc='A scalar image is expected as input for noise correction.') + noise_model = traits.Enum( + 'Gaussian', + 'Rician', + argstr='-n %s', + usedefault=True, + desc=('Employ a Rician or Gaussian noise model.')) + shrink_factor = traits.Int( + default_value=1, + usedefault=True, + argstr='-s %s', + desc=('Running noise correction on large images can' + ' be time consuming. To lessen computation time,' + ' the input image can be resampled. The shrink' + ' factor, specified as a single integer, describes' + ' this resampling. Shrink factor = 1 is the default.')) + output_image = File( + argstr="-o %s", + name_source=['input_image'], + hash_files=False, + keep_extension=True, + name_template='%s_noise_corrected', + desc='The output consists of the noise corrected' + ' version of the input image.') + save_noise = traits.Bool( + False, + mandatory=True, + usedefault=True, + desc=('True if the estimated noise should be saved to file.'), + xor=['noise_image']) + noise_image = File( + name_source=['input_image'], + hash_files=False, + keep_extension=True, + name_template='%s_noise', + desc='Filename for the estimated noise.') + verbose = traits.Bool(False, argstr="-v", desc=('Verbose output.')) + + +class DenoiseImageOutputSpec(TraitedSpec): + output_image = File(exists=True) + noise_image = File() + + +class DenoiseImage(ANTSCommand): + """ + Examples + -------- + >>> import copy + >>> from nipype.interfaces.ants import DenoiseImage + >>> denoise = DenoiseImage() + >>> denoise.inputs.dimension = 3 + >>> denoise.inputs.input_image = 'im1.nii' + >>> denoise.cmdline + 'DenoiseImage -d 3 -i im1.nii -n Gaussian -o im1_noise_corrected.nii -s 1' + + >>> denoise_2 = copy.deepcopy(denoise) + >>> denoise_2.inputs.output_image = 'output_corrected_image.nii.gz' + >>> denoise_2.inputs.noise_model = 'Rician' + >>> denoise_2.inputs.shrink_factor = 2 + >>> denoise_2.cmdline + 'DenoiseImage -d 3 -i im1.nii -n Rician -o output_corrected_image.nii.gz -s 2' + + >>> denoise_3 = DenoiseImage() + >>> denoise_3.inputs.input_image = 'im1.nii' + >>> denoise_3.inputs.save_noise = True + >>> denoise_3.cmdline + 'DenoiseImage -i im1.nii -n Gaussian -o [ im1_noise_corrected.nii, im1_noise.nii ] -s 1' + """ + input_spec = DenoiseImageInputSpec + output_spec = DenoiseImageOutputSpec + _cmd = 'DenoiseImage' + + def _format_arg(self, name, trait_spec, value): + if ((name == 'output_image') and + (self.inputs.save_noise or isdefined(self.inputs.noise_image))): + newval = '[ %s, %s ]' % ( + self._filename_from_source('output_image'), + self._filename_from_source('noise_image')) + return trait_spec.argstr % newval + + return super(DenoiseImage, self)._format_arg(name, trait_spec, value) + + +class AntsJointFusionInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + 4, + argstr='-d %d', + desc='This option forces the image to be treated ' + 'as a specified-dimensional image. If not ' + 'specified, the program tries to infer the ' + 'dimensionality from the input image.') + target_image = traits.List( + InputMultiPath(File(exists=True)), + argstr='-t %s', + mandatory=True, + desc='The target image (or ' + 'multimodal target images) assumed to be ' + 'aligned to a common image domain.') + atlas_image = traits.List( + InputMultiPath(File(exists=True)), + argstr="-g %s...", + mandatory=True, + desc='The atlas image (or ' + 'multimodal atlas images) assumed to be ' + 'aligned to a common image domain.') + atlas_segmentation_image = InputMultiPath( + File(exists=True), + argstr="-l %s...", + mandatory=True, + desc='The atlas segmentation ' + 'images. For performing label fusion the number ' + 'of specified segmentations should be identical ' + 'to the number of atlas image sets.') + alpha = traits.Float( + default_value=0.1, + usedefault=True, + argstr='-a %s', + desc=( + 'Regularization ' + 'term added to matrix Mx for calculating the inverse. Default = 0.1' + )) + beta = traits.Float( + default_value=2.0, + usedefault=True, + argstr='-b %s', + desc=('Exponent for mapping ' + 'intensity difference to the joint error. Default = 2.0')) + retain_label_posterior_images = traits.Bool( + False, + argstr='-r', + usedefault=True, + requires=['atlas_segmentation_image'], + desc=('Retain label posterior probability images. Requires ' + 'atlas segmentations to be specified. Default = false')) + retain_atlas_voting_images = traits.Bool( + False, + argstr='-f', + usedefault=True, + desc=('Retain atlas voting images. Default = false')) + constrain_nonnegative = traits.Bool( + False, + argstr='-c', + usedefault=True, + desc=('Constrain solution to non-negative weights.')) + patch_radius = traits.ListInt( + minlen=3, + maxlen=3, + argstr='-p %s', + desc=('Patch radius for similarity measures.' + 'Default: 2x2x2')) + patch_metric = traits.Enum( + 'PC', + 'MSQ', + argstr='-m %s', + desc=('Metric to be used in determining the most similar ' + 'neighborhood patch. Options include Pearson\'s ' + 'correlation (PC) and mean squares (MSQ). Default = ' + 'PC (Pearson correlation).')) + search_radius = traits.List( + [3, 3, 3], + minlen=1, + maxlen=3, + argstr='-s %s', + usedefault=True, + desc=('Search radius for similarity measures. Default = 3x3x3. ' + 'One can also specify an image where the value at the ' + 'voxel specifies the isotropic search radius at that voxel.')) + exclusion_image_label = traits.List( + traits.Str(), + argstr='-e %s', + requires=['exclusion_image'], + desc=('Specify a label for the exclusion region.')) + exclusion_image = traits.List( + File(exists=True), + desc=('Specify an exclusion region for the given label.')) + mask_image = File( + argstr='-x %s', + exists=True, + desc='If a mask image ' + 'is specified, fusion is only performed in the mask region.') + out_label_fusion = File( + argstr="%s", hash_files=False, desc='The output label fusion image.') + out_intensity_fusion_name_format = traits.Str( + argstr="", + desc='Optional intensity fusion ' + 'image file name format. ' + '(e.g. "antsJointFusionIntensity_%d.nii.gz")') + out_label_post_prob_name_format = traits.Str( + 'antsJointFusionPosterior_%d.nii.gz', + requires=['out_label_fusion', 'out_intensity_fusion_name_format'], + desc='Optional label posterior probability ' + 'image file name format.') + out_atlas_voting_weight_name_format = traits.Str( + 'antsJointFusionVotingWeight_%d.nii.gz', + requires=[ + 'out_label_fusion', 'out_intensity_fusion_name_format', + 'out_label_post_prob_name_format' + ], + desc='Optional atlas voting weight image ' + 'file name format.') + verbose = traits.Bool(False, argstr="-v", desc=('Verbose output.')) + + +class AntsJointFusionOutputSpec(TraitedSpec): + out_label_fusion = File(exists=True) + out_intensity_fusion_name_format = traits.Str() + out_label_post_prob_name_format = traits.Str() + out_atlas_voting_weight_name_format = traits.Str() + + +class AntsJointFusion(ANTSCommand): + """ + Examples + -------- + + >>> from nipype.interfaces.ants import AntsJointFusion + >>> antsjointfusion = AntsJointFusion() + >>> antsjointfusion.inputs.out_label_fusion = 'ants_fusion_label_output.nii' + >>> antsjointfusion.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'] ] + >>> antsjointfusion.inputs.atlas_segmentation_image = ['segmentation0.nii.gz'] + >>> antsjointfusion.inputs.target_image = ['im1.nii'] + >>> antsjointfusion.cmdline + "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz \ +-b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii']" + + >>> antsjointfusion.inputs.target_image = [ ['im1.nii', 'im2.nii'] ] + >>> antsjointfusion.cmdline + "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz \ +-b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii', 'im2.nii']" + + >>> antsjointfusion.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'], + ... ['rc2s1.nii','rc2s2.nii'] ] + >>> antsjointfusion.inputs.atlas_segmentation_image = ['segmentation0.nii.gz', + ... 'segmentation1.nii.gz'] + >>> antsjointfusion.cmdline + "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \ +-l segmentation0.nii.gz -l segmentation1.nii.gz -b 2.0 -o ants_fusion_label_output.nii \ +-s 3x3x3 -t ['im1.nii', 'im2.nii']" + + >>> antsjointfusion.inputs.dimension = 3 + >>> antsjointfusion.inputs.alpha = 0.5 + >>> antsjointfusion.inputs.beta = 1.0 + >>> antsjointfusion.inputs.patch_radius = [3,2,1] + >>> antsjointfusion.inputs.search_radius = [3] + >>> antsjointfusion.cmdline + "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \ +-l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -o ants_fusion_label_output.nii \ +-p 3x2x1 -s 3 -t ['im1.nii', 'im2.nii']" + + >>> antsjointfusion.inputs.search_radius = ['mask.nii'] + >>> antsjointfusion.inputs.verbose = True + >>> antsjointfusion.inputs.exclusion_image = ['roi01.nii', 'roi02.nii'] + >>> antsjointfusion.inputs.exclusion_image_label = ['1','2'] + >>> antsjointfusion.cmdline + "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \ +-l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] \ +-o ants_fusion_label_output.nii -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" + + >>> antsjointfusion.inputs.out_label_fusion = 'ants_fusion_label_output.nii' + >>> antsjointfusion.inputs.out_intensity_fusion_name_format = 'ants_joint_fusion_intensity_%d.nii.gz' + >>> antsjointfusion.inputs.out_label_post_prob_name_format = 'ants_joint_fusion_posterior_%d.nii.gz' + >>> antsjointfusion.inputs.out_atlas_voting_weight_name_format = 'ants_joint_fusion_voting_weight_%d.nii.gz' + >>> antsjointfusion.cmdline + "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] \ +-l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] \ +-o [ants_fusion_label_output.nii, ants_joint_fusion_intensity_%d.nii.gz, \ +ants_joint_fusion_posterior_%d.nii.gz, ants_joint_fusion_voting_weight_%d.nii.gz] \ +-p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" + + """ + input_spec = AntsJointFusionInputSpec + output_spec = AntsJointFusionOutputSpec + _cmd = 'antsJointFusion' + + def _format_arg(self, opt, spec, val): + if opt == 'exclusion_image_label': + retval = [] + for ii in range(len(self.inputs.exclusion_image_label)): + retval.append( + '-e {0}[{1}]'.format(self.inputs.exclusion_image_label[ii], + self.inputs.exclusion_image[ii])) + retval = ' '.join(retval) + elif opt == 'patch_radius': + retval = '-p {0}'.format(self._format_xarray(val)) + elif opt == 'search_radius': + retval = '-s {0}'.format(self._format_xarray(val)) + elif opt == 'out_label_fusion': + if isdefined(self.inputs.out_intensity_fusion_name_format): + if isdefined(self.inputs.out_label_post_prob_name_format): + if isdefined( + self.inputs.out_atlas_voting_weight_name_format): + retval = '-o [{0}, {1}, {2}, {3}]'.format( + self.inputs.out_label_fusion, + self.inputs.out_intensity_fusion_name_format, + self.inputs.out_label_post_prob_name_format, + self.inputs.out_atlas_voting_weight_name_format) + else: + retval = '-o [{0}, {1}, {2}]'.format( + self.inputs.out_label_fusion, + self.inputs.out_intensity_fusion_name_format, + self.inputs.out_label_post_prob_name_format) + else: + retval = '-o [{0}, {1}]'.format( + self.inputs.out_label_fusion, + self.inputs.out_intensity_fusion_name_format) + else: + retval = '-o {0}'.format(self.inputs.out_label_fusion) + elif opt == 'out_intensity_fusion_name_format': + retval = '' + if not isdefined(self.inputs.out_label_fusion): + retval = '-o {0}'.format( + self.inputs.out_intensity_fusion_name_format) + elif opt == 'atlas_image': + atlas_image_cmd = " ".join([ + '-g [{0}]'.format(", ".join("'%s'" % fn for fn in ai)) + for ai in self.inputs.atlas_image + ]) + retval = atlas_image_cmd + elif opt == 'target_image': + target_image_cmd = " ".join([ + '-t [{0}]'.format(", ".join("'%s'" % fn for fn in ai)) + for ai in self.inputs.target_image + ]) + retval = target_image_cmd + elif opt == 'atlas_segmentation_image': + assert len(val) == len(self.inputs.atlas_image), "Number of specified " \ + "segmentations should be identical to the number of atlas image " \ + "sets {0}!={1}".format(len(val), len(self.inputs.atlas_image)) + + atlas_segmentation_image_cmd = " ".join([ + '-l {0}'.format(fn) + for fn in self.inputs.atlas_segmentation_image + ]) + retval = atlas_segmentation_image_cmd + else: + + return super(AntsJointFusion, self)._format_arg(opt, spec, val) + return retval + + def _list_outputs(self): + outputs = self._outputs().get() + if isdefined(self.inputs.out_label_fusion): + outputs['out_label_fusion'] = os.path.abspath( + self.inputs.out_label_fusion) + if isdefined(self.inputs.out_intensity_fusion_name_format): + outputs['out_intensity_fusion_name_format'] = os.path.abspath( + self.inputs.out_intensity_fusion_name_format) + if isdefined(self.inputs.out_label_post_prob_name_format): + outputs['out_label_post_prob_name_format'] = os.path.abspath( + self.inputs.out_label_post_prob_name_format) + if isdefined(self.inputs.out_atlas_voting_weight_name_format): + outputs['out_atlas_voting_weight_name_format'] = os.path.abspath( + self.inputs.out_atlas_voting_weight_name_format) + + return outputs + + +class KellyKapowskiInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + argstr='--image-dimensionality %d', + usedefault=True, + desc='image dimension (2 or 3)') + + segmentation_image = File( + exists=True, + argstr='--segmentation-image "%s"', + mandatory=True, + desc= + "A segmentation image must be supplied labeling the gray and white matters." + " Default values = 2 and 3, respectively.", + ) + + gray_matter_label = traits.Int( + 2, + usedefault=True, + desc= + "The label value for the gray matter label in the segmentation_image.") + + white_matter_label = traits.Int( + 3, + usedefault=True, + desc= + "The label value for the white matter label in the segmentation_image." + ) + + gray_matter_prob_image = File( + exists=True, + argstr='--gray-matter-probability-image "%s"', + desc= + "In addition to the segmentation image, a gray matter probability image can be" + " used. If no such image is supplied, one is created using the segmentation image" + " and a variance of 1.0 mm.") + + white_matter_prob_image = File( + exists=True, + argstr='--white-matter-probability-image "%s"', + desc= + "In addition to the segmentation image, a white matter probability image can be" + " used. If no such image is supplied, one is created using the segmentation image" + " and a variance of 1.0 mm.") + + convergence = traits.Str( + default="[50,0.001,10]", + argstr='--convergence "%s"', + usedefault=True, + desc= + "Convergence is determined by fitting a line to the normalized energy profile of" + " the last N iterations (where N is specified by the window size) and determining" + " the slope which is then compared with the convergence threshold.", + ) + + thickness_prior_estimate = traits.Float( + 10, + usedefault=True, + argstr="--thickness-prior-estimate %f", + desc= + "Provides a prior constraint on the final thickness measurement in mm." + ) + + thickness_prior_image = File( + exists=True, + argstr='--thickness-prior-image "%s"', + desc="An image containing spatially varying prior thickness values.") + + gradient_step = traits.Float( + 0.025, + usedefault=True, + argstr="--gradient-step %f", + desc="Gradient step size for the optimization.") + + smoothing_variance = traits.Float( + 1.0, usedefault=True, + argstr="--smoothing-variance %f", + desc="Defines the Gaussian smoothing of the hit and total images.") + + smoothing_velocity_field = traits.Float( + 1.5, usedefault=True, + argstr="--smoothing-velocity-field-parameter %f", + desc= + "Defines the Gaussian smoothing of the velocity field (default = 1.5)." + " If the b-spline smoothing option is chosen, then this defines the" + " isotropic mesh spacing for the smoothing spline (default = 15).") + + use_bspline_smoothing = traits.Bool( + argstr="--use-bspline-smoothing 1", + desc="Sets the option for B-spline smoothing of the velocity field.") + + number_integration_points = traits.Int( + 10, usedefault=True, + argstr="--number-of-integration-points %d", + desc="Number of compositions of the diffeomorphism per iteration.") + + max_invert_displacement_field_iters = traits.Int( + 20, usedefault=True, + argstr="--maximum-number-of-invert-displacement-field-iterations %d", + desc="Maximum number of iterations for estimating the invert" + "displacement field.") + + cortical_thickness = File( + argstr='--output "%s"', + keep_extension=True, + name_source=["segmentation_image"], + name_template='%s_cortical_thickness', + desc='Filename for the cortical thickness.', + hash_files=False) + + warped_white_matter = File( + name_source=["segmentation_image"], + keep_extension=True, + name_template='%s_warped_white_matter', + desc='Filename for the warped white matter file.', + hash_files=False) + + +class KellyKapowskiOutputSpec(TraitedSpec): + cortical_thickness = File( + desc="A thickness map defined in the segmented gray matter.") + warped_white_matter = File(desc="A warped white matter image.") + + +class KellyKapowski(ANTSCommand): + """ Nipype Interface to ANTs' KellyKapowski, also known as DiReCT. + + DiReCT is a registration based estimate of cortical thickness. It was published + in S. R. Das, B. B. Avants, M. Grossman, and J. C. Gee, Registration based + cortical thickness measurement, Neuroimage 2009, 45:867--879. + + Examples + -------- + >>> from nipype.interfaces.ants.segmentation import KellyKapowski + >>> kk = KellyKapowski() + >>> kk.inputs.dimension = 3 + >>> kk.inputs.segmentation_image = "segmentation0.nii.gz" + >>> kk.inputs.convergence = "[45,0.0,10]" + >>> kk.inputs.thickness_prior_estimate = 10 + >>> kk.cmdline + 'KellyKapowski --convergence "[45,0.0,10]" \ +--output "[segmentation0_cortical_thickness.nii.gz,segmentation0_warped_white_matter.nii.gz]" \ +--image-dimensionality 3 --gradient-step 0.025000 \ +--maximum-number-of-invert-displacement-field-iterations 20 --number-of-integration-points 10 \ +--segmentation-image "[segmentation0.nii.gz,2,3]" --smoothing-variance 1.000000 \ +--smoothing-velocity-field-parameter 1.500000 --thickness-prior-estimate 10.000000' + + """ + _cmd = "KellyKapowski" + input_spec = KellyKapowskiInputSpec + output_spec = KellyKapowskiOutputSpec + + references_ = [{ + 'entry': + BibTeX( + "@book{Das2009867," + "author={Sandhitsu R. Das and Brian B. Avants and Murray Grossman and James C. Gee}," + "title={Registration based cortical thickness measurement.}," + "journal={NeuroImage}," + "volume={45}," + "number={37}," + "pages={867--879}," + "year={2009}," + "issn={1053-8119}," + "url={http://www.sciencedirect.com/science/article/pii/S1053811908012780}," + "doi={http://dx.doi.org/10.1016/j.neuroimage.2008.12.016}" + "}"), + 'description': + 'The details on the implementation of DiReCT.', + 'tags': ['implementation'], + }] + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + skip += [ + 'warped_white_matter', 'gray_matter_label', 'white_matter_label' + ] + return super(KellyKapowski, self)._parse_inputs(skip=skip) + + def _gen_filename(self, name): + if name == 'cortical_thickness': + output = self.inputs.cortical_thickness + if not isdefined(output): + _, name, ext = split_filename(self.inputs.segmentation_image) + output = name + '_cortical_thickness' + ext + return output + + if name == 'warped_white_matter': + output = self.inputs.warped_white_matter + if not isdefined(output): + _, name, ext = split_filename(self.inputs.segmentation_image) + output = name + '_warped_white_matter' + ext + return output + + return None + + def _format_arg(self, opt, spec, val): + if opt == "segmentation_image": + newval = '[{0},{1},{2}]'.format(self.inputs.segmentation_image, + self.inputs.gray_matter_label, + self.inputs.white_matter_label) + return spec.argstr % newval + + if opt == "cortical_thickness": + ct = self._gen_filename("cortical_thickness") + wm = self._gen_filename("warped_white_matter") + newval = '[{},{}]'.format(ct, wm) + return spec.argstr % newval + + return super(KellyKapowski, self)._format_arg(opt, spec, val) diff --git a/nipype/interfaces/ants/tests/__init__.py b/nipype/interfaces/ants/tests/__init__.py new file mode 100644 index 0000000000..99fb243f19 --- /dev/null +++ b/nipype/interfaces/ants/tests/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/interfaces/ants/tests/test_auto_ANTS.py b/nipype/interfaces/ants/tests/test_auto_ANTS.py new file mode 100644 index 0000000000..7c7ef6682c --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_ANTS.py @@ -0,0 +1,97 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import ANTS + + +def test_ANTS_inputs(): + input_map = dict( + affine_gradient_descent_option=dict(argstr='%s', ), + args=dict(argstr='%s', ), + delta_time=dict(requires=['number_of_time_steps'], ), + dimension=dict( + argstr='%d', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict(mandatory=True, ), + gradient_step_length=dict(requires=['transformation_model'], ), + metric=dict(mandatory=True, ), + metric_weight=dict( + mandatory=True, + requires=['metric'], + usedefault=True, + ), + mi_option=dict( + argstr='--MI-option %s', + sep='x', + ), + moving_image=dict( + argstr='%s', + mandatory=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + number_of_affine_iterations=dict( + argstr='--number-of-affine-iterations %s', + sep='x', + ), + number_of_iterations=dict( + argstr='--number-of-iterations %s', + sep='x', + ), + number_of_time_steps=dict(requires=['gradient_step_length'], ), + output_transform_prefix=dict( + argstr='--output-naming %s', + mandatory=True, + usedefault=True, + ), + radius=dict( + mandatory=True, + requires=['metric'], + ), + regularization=dict(argstr='%s', ), + regularization_deformation_field_sigma=dict( + requires=['regularization'], ), + regularization_gradient_field_sigma=dict( + requires=['regularization'], ), + smoothing_sigmas=dict( + argstr='--gaussian-smoothing-sigmas %s', + sep='x', + ), + subsampling_factors=dict( + argstr='--subsampling-factors %s', + sep='x', + ), + symmetry_type=dict(requires=['delta_time'], ), + transformation_model=dict( + argstr='%s', + mandatory=True, + ), + use_histogram_matching=dict( + argstr='%s', + usedefault=True, + ), + ) + inputs = ANTS.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ANTS_outputs(): + output_map = dict( + affine_transform=dict(), + inverse_warp_transform=dict(), + metaheader=dict(), + metaheader_raw=dict(), + warp_transform=dict(), + ) + outputs = ANTS.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py new file mode 100644 index 0000000000..7423579ef7 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_ANTSCommand.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import ANTSCommand + + +def test_ANTSCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + ) + inputs = ANTSCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py new file mode 100644 index 0000000000..fed21cdbef --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_AffineInitializer.py @@ -0,0 +1,69 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import AffineInitializer + + +def test_AffineInitializer_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='%s', + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + argstr='%s', + mandatory=True, + position=1, + ), + local_search=dict( + argstr='%d', + position=7, + usedefault=True, + ), + moving_image=dict( + argstr='%s', + mandatory=True, + position=2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='%s', + position=3, + usedefault=True, + ), + principal_axes=dict( + argstr='%d', + position=6, + usedefault=True, + ), + radian_fraction=dict( + argstr='%f', + position=5, + usedefault=True, + ), + search_factor=dict( + argstr='%f', + position=4, + usedefault=True, + ), + ) + inputs = AffineInitializer.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AffineInitializer_outputs(): + output_map = dict(out_file=dict(), ) + outputs = AffineInitializer.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py new file mode 100644 index 0000000000..292e6b398b --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -0,0 +1,98 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import AntsJointFusion + + +def test_AntsJointFusion_inputs(): + input_map = dict( + alpha=dict( + argstr='-a %s', + usedefault=True, + ), + args=dict(argstr='%s', ), + atlas_image=dict( + argstr='-g %s...', + mandatory=True, + ), + atlas_segmentation_image=dict( + argstr='-l %s...', + mandatory=True, + ), + beta=dict( + argstr='-b %s', + usedefault=True, + ), + constrain_nonnegative=dict( + argstr='-c', + usedefault=True, + ), + dimension=dict(argstr='-d %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + exclusion_image=dict(), + exclusion_image_label=dict( + argstr='-e %s', + requires=['exclusion_image'], + ), + mask_image=dict(argstr='-x %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_atlas_voting_weight_name_format=dict( + requires=[ + 'out_label_fusion', 'out_intensity_fusion_name_format', + 'out_label_post_prob_name_format' + ], ), + out_intensity_fusion_name_format=dict(argstr='', ), + out_label_fusion=dict( + argstr='%s', + hash_files=False, + ), + out_label_post_prob_name_format=dict( + requires=['out_label_fusion', 'out_intensity_fusion_name_format'], + ), + patch_metric=dict(argstr='-m %s', ), + patch_radius=dict( + argstr='-p %s', + maxlen=3, + minlen=3, + ), + retain_atlas_voting_images=dict( + argstr='-f', + usedefault=True, + ), + retain_label_posterior_images=dict( + argstr='-r', + requires=['atlas_segmentation_image'], + usedefault=True, + ), + search_radius=dict( + argstr='-s %s', + usedefault=True, + ), + target_image=dict( + argstr='-t %s', + mandatory=True, + ), + verbose=dict(argstr='-v', ), + ) + inputs = AntsJointFusion.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AntsJointFusion_outputs(): + output_map = dict( + out_atlas_voting_weight_name_format=dict(), + out_intensity_fusion_name_format=dict(), + out_label_fusion=dict(), + out_label_post_prob_name_format=dict(), + ) + outputs = AntsJointFusion.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py new file mode 100644 index 0000000000..c2bbffa14f --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransforms.py @@ -0,0 +1,64 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..resampling import ApplyTransforms + + +def test_ApplyTransforms_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + default_value=dict( + argstr='--default-value %g', + usedefault=True, + ), + dimension=dict(argstr='--dimensionality %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + float=dict( + argstr='--float %d', + usedefault=True, + ), + input_image=dict( + argstr='--input %s', + mandatory=True, + ), + input_image_type=dict(argstr='--input-image-type %d', ), + interpolation=dict( + argstr='%s', + usedefault=True, + ), + interpolation_parameters=dict(), + invert_transform_flags=dict(), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_postfix=dict(usedefault=True, ), + output_image=dict( + argstr='--output %s', + genfile=True, + hash_files=False, + ), + print_out_composite_warp_file=dict(requires=['output_image'], ), + reference_image=dict( + argstr='--reference-image %s', + mandatory=True, + ), + transforms=dict( + argstr='%s', + mandatory=True, + ), + ) + inputs = ApplyTransforms.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ApplyTransforms_outputs(): + output_map = dict(output_image=dict(), ) + outputs = ApplyTransforms.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py new file mode 100644 index 0000000000..92c6c21ec1 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_ApplyTransformsToPoints.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..resampling import ApplyTransformsToPoints + + +def test_ApplyTransformsToPoints_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict(argstr='--dimensionality %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr='--input %s', + mandatory=True, + ), + invert_transform_flags=dict(), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_file=dict( + argstr='--output %s', + hash_files=False, + name_source=['input_file'], + name_template='%s_transformed.csv', + ), + transforms=dict( + argstr='%s', + mandatory=True, + ), + ) + inputs = ApplyTransformsToPoints.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ApplyTransformsToPoints_outputs(): + output_map = dict(output_file=dict(), ) + outputs = ApplyTransformsToPoints.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_Atropos.py b/nipype/interfaces/ants/tests/test_auto_Atropos.py new file mode 100644 index 0000000000..90bc13778a --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_Atropos.py @@ -0,0 +1,75 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import Atropos + + +def test_Atropos_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + convergence_threshold=dict(requires=['n_iterations'], ), + dimension=dict( + argstr='--image-dimensionality %d', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + icm_use_synchronous_update=dict(argstr='%s', ), + initialization=dict( + argstr='%s', + mandatory=True, + requires=['number_of_tissue_classes'], + ), + intensity_images=dict( + argstr='--intensity-image %s...', + mandatory=True, + ), + likelihood_model=dict(argstr='--likelihood-model %s', ), + mask_image=dict( + argstr='--mask-image %s', + mandatory=True, + ), + maximum_number_of_icm_terations=dict( + requires=['icm_use_synchronous_update'], ), + mrf_radius=dict(requires=['mrf_smoothing_factor'], ), + mrf_smoothing_factor=dict(argstr='%s', ), + n_iterations=dict(argstr='%s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + number_of_tissue_classes=dict(mandatory=True, ), + out_classified_image_name=dict( + argstr='%s', + genfile=True, + hash_files=False, + ), + output_posteriors_name_template=dict(usedefault=True, ), + posterior_formulation=dict(argstr='%s', ), + prior_probability_images=dict(), + prior_probability_threshold=dict(requires=['prior_weighting'], ), + prior_weighting=dict(), + save_posteriors=dict(), + use_mixture_model_proportions=dict( + requires=['posterior_formulation'], ), + use_random_seed=dict( + argstr='--use-random-seed %d', + usedefault=True, + ), + ) + inputs = Atropos.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Atropos_outputs(): + output_map = dict( + classified_image=dict(), + posteriors=dict(), + ) + outputs = Atropos.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py new file mode 100644 index 0000000000..3fe3abe5cf --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_AverageAffineTransform.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import AverageAffineTransform + + +def test_AverageAffineTransform_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='%d', + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_affine_transform=dict( + argstr='%s', + mandatory=True, + position=1, + ), + transforms=dict( + argstr='%s', + mandatory=True, + position=3, + ), + ) + inputs = AverageAffineTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AverageAffineTransform_outputs(): + output_map = dict(affine_transform=dict(), ) + outputs = AverageAffineTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_AverageImages.py b/nipype/interfaces/ants/tests/test_auto_AverageImages.py new file mode 100644 index 0000000000..41e0c99007 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_AverageImages.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import AverageImages + + +def test_AverageImages_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='%d', + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + images=dict( + argstr='%s', + mandatory=True, + position=3, + ), + normalize=dict( + argstr='%d', + mandatory=True, + position=2, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_average_image=dict( + argstr='%s', + hash_files=False, + position=1, + usedefault=True, + ), + ) + inputs = AverageImages.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AverageImages_outputs(): + output_map = dict(output_average_image=dict(), ) + outputs = AverageImages.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py new file mode 100644 index 0000000000..2d48192199 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_BrainExtraction.py @@ -0,0 +1,78 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import BrainExtraction + + +def test_BrainExtraction_inputs(): + input_map = dict( + anatomical_image=dict( + argstr='-a %s', + mandatory=True, + ), + args=dict(argstr='%s', ), + brain_probability_mask=dict( + argstr='-m %s', + copyfile=False, + mandatory=True, + ), + brain_template=dict( + argstr='-e %s', + mandatory=True, + ), + debug=dict(argstr='-z 1', ), + dimension=dict( + argstr='-d %d', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extraction_registration_mask=dict(argstr='-f %s', ), + image_suffix=dict( + argstr='-s %s', + usedefault=True, + ), + keep_temporary_files=dict(argstr='-k %d', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr='-o %s', + usedefault=True, + ), + use_floatingpoint_precision=dict(argstr='-q %d', ), + use_random_seeding=dict(argstr='-u %d', ), + ) + inputs = BrainExtraction.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BrainExtraction_outputs(): + output_map = dict( + BrainExtractionBrain=dict(), + BrainExtractionCSF=dict(), + BrainExtractionGM=dict(), + BrainExtractionInitialAffine=dict(), + BrainExtractionInitialAffineFixed=dict(), + BrainExtractionInitialAffineMoving=dict(), + BrainExtractionLaplacian=dict(), + BrainExtractionMask=dict(), + BrainExtractionPrior0GenericAffine=dict(), + BrainExtractionPrior1InverseWarp=dict(), + BrainExtractionPrior1Warp=dict(), + BrainExtractionPriorWarped=dict(), + BrainExtractionSegmentation=dict(), + BrainExtractionTemplateLaplacian=dict(), + BrainExtractionTmp=dict(), + BrainExtractionWM=dict(), + N4Corrected0=dict(), + N4Truncated0=dict(), + ) + outputs = BrainExtraction.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py new file mode 100644 index 0000000000..83fb2ed2aa --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_ComposeMultiTransform.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ComposeMultiTransform + + +def test_ComposeMultiTransform_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='%d', + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_transform=dict( + argstr='%s', + keep_extension=True, + name_source=['transforms'], + name_template='%s_composed', + position=1, + ), + reference_image=dict( + argstr='%s', + position=2, + ), + transforms=dict( + argstr='%s', + mandatory=True, + position=3, + ), + ) + inputs = ComposeMultiTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ComposeMultiTransform_outputs(): + output_map = dict(output_transform=dict(), ) + outputs = ComposeMultiTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py new file mode 100644 index 0000000000..ea9a16cbe2 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_ConvertScalarImageToRGB.py @@ -0,0 +1,81 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..visualization import ConvertScalarImageToRGB + + +def test_ConvertScalarImageToRGB_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + colormap=dict( + argstr='%s', + mandatory=True, + position=4, + usedefault=True, + ), + custom_color_map_file=dict( + argstr='%s', + position=5, + usedefault=True, + ), + dimension=dict( + argstr='%d', + mandatory=True, + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr='%s', + mandatory=True, + position=1, + ), + mask_image=dict( + argstr='%s', + position=3, + usedefault=True, + ), + maximum_RGB_output=dict( + argstr='%d', + position=9, + usedefault=True, + ), + maximum_input=dict( + argstr='%d', + mandatory=True, + position=7, + ), + minimum_RGB_output=dict( + argstr='%d', + position=8, + usedefault=True, + ), + minimum_input=dict( + argstr='%d', + mandatory=True, + position=6, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_image=dict( + argstr='%s', + position=2, + usedefault=True, + ), + ) + inputs = ConvertScalarImageToRGB.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ConvertScalarImageToRGB_outputs(): + output_map = dict(output_image=dict(), ) + outputs = ConvertScalarImageToRGB.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py new file mode 100644 index 0000000000..dd39568bc0 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_CorticalThickness.py @@ -0,0 +1,88 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import CorticalThickness + + +def test_CorticalThickness_inputs(): + input_map = dict( + anatomical_image=dict( + argstr='-a %s', + mandatory=True, + ), + args=dict(argstr='%s', ), + b_spline_smoothing=dict(argstr='-v', ), + brain_probability_mask=dict( + argstr='-m %s', + copyfile=False, + mandatory=True, + ), + brain_template=dict( + argstr='-e %s', + mandatory=True, + ), + cortical_label_image=dict(), + debug=dict(argstr='-z 1', ), + dimension=dict( + argstr='-d %d', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extraction_registration_mask=dict(argstr='-f %s', ), + image_suffix=dict( + argstr='-s %s', + usedefault=True, + ), + keep_temporary_files=dict(argstr='-k %d', ), + label_propagation=dict(argstr='-l %s', ), + max_iterations=dict(argstr='-i %d', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr='-o %s', + usedefault=True, + ), + posterior_formulation=dict(argstr='-b %s', ), + prior_segmentation_weight=dict(argstr='-w %f', ), + quick_registration=dict(argstr='-q 1', ), + segmentation_iterations=dict(argstr='-n %d', ), + segmentation_priors=dict( + argstr='-p %s', + mandatory=True, + ), + t1_registration_template=dict( + argstr='-t %s', + mandatory=True, + ), + use_floatingpoint_precision=dict(argstr='-j %d', ), + use_random_seeding=dict(argstr='-u %d', ), + ) + inputs = CorticalThickness.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CorticalThickness_outputs(): + output_map = dict( + BrainExtractionMask=dict(), + BrainSegmentation=dict(), + BrainSegmentationN4=dict(), + BrainSegmentationPosteriors=dict(), + BrainVolumes=dict(), + CorticalThickness=dict(), + CorticalThicknessNormedToTemplate=dict(), + SubjectToTemplate0GenericAffine=dict(), + SubjectToTemplate1Warp=dict(), + SubjectToTemplateLogJacobian=dict(), + TemplateToSubject0Warp=dict(), + TemplateToSubject1GenericAffine=dict(), + ) + outputs = CorticalThickness.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py new file mode 100644 index 0000000000..b32e7b98e0 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_CreateJacobianDeterminantImage.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import CreateJacobianDeterminantImage + + +def test_CreateJacobianDeterminantImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + deformationField=dict( + argstr='%s', + mandatory=True, + position=1, + ), + doLogJacobian=dict( + argstr='%d', + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + imageDimension=dict( + argstr='%d', + mandatory=True, + position=0, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + outputImage=dict( + argstr='%s', + mandatory=True, + position=2, + ), + useGeometric=dict( + argstr='%d', + position=4, + ), + ) + inputs = CreateJacobianDeterminantImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CreateJacobianDeterminantImage_outputs(): + output_map = dict(jacobian_image=dict(), ) + outputs = CreateJacobianDeterminantImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py new file mode 100644 index 0000000000..74c2b0a7c2 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_CreateTiledMosaic.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..visualization import CreateTiledMosaic + + +def test_CreateTiledMosaic_inputs(): + input_map = dict( + alpha_value=dict(argstr='-a %.2f', ), + args=dict(argstr='%s', ), + direction=dict(argstr='-d %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip_slice=dict(argstr='-f %s', ), + input_image=dict( + argstr='-i %s', + mandatory=True, + ), + mask_image=dict(argstr='-x %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_image=dict( + argstr='-o %s', + usedefault=True, + ), + pad_or_crop=dict(argstr='-p %s', ), + permute_axes=dict(argstr='-g', ), + rgb_image=dict( + argstr='-r %s', + mandatory=True, + ), + slices=dict(argstr='-s %s', ), + tile_geometry=dict(argstr='-t %s', ), + ) + inputs = CreateTiledMosaic.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CreateTiledMosaic_outputs(): + output_map = dict(output_image=dict(), ) + outputs = CreateTiledMosaic.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py new file mode 100644 index 0000000000..882cb21854 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_DenoiseImage.py @@ -0,0 +1,64 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import DenoiseImage + + +def test_DenoiseImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict(argstr='-d %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr='-i %s', + mandatory=True, + ), + noise_image=dict( + hash_files=False, + keep_extension=True, + name_source=['input_image'], + name_template='%s_noise', + ), + noise_model=dict( + argstr='-n %s', + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_image=dict( + argstr='-o %s', + hash_files=False, + keep_extension=True, + name_source=['input_image'], + name_template='%s_noise_corrected', + ), + save_noise=dict( + mandatory=True, + usedefault=True, + xor=['noise_image'], + ), + shrink_factor=dict( + argstr='-s %s', + usedefault=True, + ), + verbose=dict(argstr='-v', ), + ) + inputs = DenoiseImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DenoiseImage_outputs(): + output_map = dict( + noise_image=dict(), + output_image=dict(), + ) + outputs = DenoiseImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py new file mode 100644 index 0000000000..af91f9a8af --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_GenWarpFields.py @@ -0,0 +1,67 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..legacy import GenWarpFields + + +def test_GenWarpFields_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bias_field_correction=dict(argstr='-n 1', ), + dimension=dict( + argstr='-d %d', + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + force_proceed=dict(argstr='-f 1', ), + input_image=dict( + argstr='-i %s', + copyfile=False, + mandatory=True, + ), + inverse_warp_template_labels=dict(argstr='-l', ), + max_iterations=dict( + argstr='-m %s', + sep='x', + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr='-o %s', + usedefault=True, + ), + quality_check=dict(argstr='-q 1', ), + reference_image=dict( + argstr='-r %s', + copyfile=True, + mandatory=True, + ), + similarity_metric=dict(argstr='-s %s', ), + transformation_model=dict( + argstr='-t %s', + usedefault=True, + ), + ) + inputs = GenWarpFields.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GenWarpFields_outputs(): + output_map = dict( + affine_transformation=dict(), + input_file=dict(), + inverse_warp_field=dict(), + output_file=dict(), + warp_field=dict(), + ) + outputs = GenWarpFields.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_JointFusion.py b/nipype/interfaces/ants/tests/test_auto_JointFusion.py new file mode 100644 index 0000000000..9e82584729 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_JointFusion.py @@ -0,0 +1,84 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import JointFusion + + +def test_JointFusion_inputs(): + input_map = dict( + alpha=dict( + requires=['method'], + usedefault=True, + ), + args=dict(argstr='%s', ), + atlas_group_id=dict(argstr='-gp %d...', ), + atlas_group_weights=dict(argstr='-gpw %d...', ), + beta=dict( + requires=['method'], + usedefault=True, + ), + dimension=dict( + argstr='%d', + mandatory=True, + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + exclusion_region=dict(argstr='-x %s', ), + method=dict( + argstr='-m %s', + usedefault=True, + ), + modalities=dict( + argstr='%d', + mandatory=True, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_label_image=dict( + argstr='%s', + mandatory=True, + name_template='%s', + output_name='output_label_image', + position=-1, + ), + patch_radius=dict( + argstr='-rp %s', + maxlen=3, + minlen=3, + ), + search_radius=dict( + argstr='-rs %s', + maxlen=3, + minlen=3, + ), + target_image=dict( + argstr='-tg %s...', + mandatory=True, + ), + warped_intensity_images=dict( + argstr='-g %s...', + mandatory=True, + ), + warped_label_images=dict( + argstr='-l %s...', + mandatory=True, + ), + ) + inputs = JointFusion.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JointFusion_outputs(): + output_map = dict(output_label_image=dict(), ) + outputs = JointFusion.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py new file mode 100644 index 0000000000..6129670d39 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_KellyKapowski.py @@ -0,0 +1,90 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import KellyKapowski + + +def test_KellyKapowski_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + convergence=dict( + argstr='--convergence "%s"', + usedefault=True, + ), + cortical_thickness=dict( + argstr='--output "%s"', + hash_files=False, + keep_extension=True, + name_source=['segmentation_image'], + name_template='%s_cortical_thickness', + ), + dimension=dict( + argstr='--image-dimensionality %d', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradient_step=dict( + argstr='--gradient-step %f', + usedefault=True, + ), + gray_matter_label=dict(usedefault=True, ), + gray_matter_prob_image=dict( + argstr='--gray-matter-probability-image "%s"', ), + max_invert_displacement_field_iters=dict( + argstr= + '--maximum-number-of-invert-displacement-field-iterations %d', + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + number_integration_points=dict( + argstr='--number-of-integration-points %d', + usedefault=True, + ), + segmentation_image=dict( + argstr='--segmentation-image "%s"', + mandatory=True, + ), + smoothing_variance=dict( + argstr='--smoothing-variance %f', + usedefault=True, + ), + smoothing_velocity_field=dict( + argstr='--smoothing-velocity-field-parameter %f', + usedefault=True, + ), + thickness_prior_estimate=dict( + argstr='--thickness-prior-estimate %f', + usedefault=True, + ), + thickness_prior_image=dict(argstr='--thickness-prior-image "%s"', ), + use_bspline_smoothing=dict(argstr='--use-bspline-smoothing 1', ), + warped_white_matter=dict( + hash_files=False, + keep_extension=True, + name_source=['segmentation_image'], + name_template='%s_warped_white_matter', + ), + white_matter_label=dict(usedefault=True, ), + white_matter_prob_image=dict( + argstr='--white-matter-probability-image "%s"', ), + ) + inputs = KellyKapowski.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_KellyKapowski_outputs(): + output_map = dict( + cortical_thickness=dict(), + warped_white_matter=dict(), + ) + outputs = KellyKapowski.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py new file mode 100644 index 0000000000..a5de244711 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_LabelGeometry.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import LabelGeometry + + +def test_LabelGeometry_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='%d', + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + intensity_image=dict( + argstr='%s', + mandatory=True, + position=2, + usedefault=True, + ), + label_image=dict( + argstr='%s', + mandatory=True, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_file=dict( + argstr='%s', + name_source=['label_image'], + name_template='%s.csv', + position=3, + ), + ) + inputs = LabelGeometry.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_LabelGeometry_outputs(): + output_map = dict(output_file=dict(), ) + outputs = LabelGeometry.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py new file mode 100644 index 0000000000..8f9a13d832 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_LaplacianThickness.py @@ -0,0 +1,67 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import LaplacianThickness + + +def test_LaplacianThickness_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dT=dict( + argstr='dT=%d', + position=6, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_gm=dict( + argstr='%s', + copyfile=True, + mandatory=True, + position=2, + ), + input_wm=dict( + argstr='%s', + copyfile=True, + mandatory=True, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + opt_tolerance=dict( + argstr='optional-laplacian-tolerance=%d', + position=8, + ), + output_image=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=3, + ), + prior_thickness=dict( + argstr='priorthickval=%d', + position=5, + ), + smooth_param=dict( + argstr='smoothparam=%d', + position=4, + ), + sulcus_prior=dict( + argstr='use-sulcus-prior', + position=7, + ), + ) + inputs = LaplacianThickness.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_LaplacianThickness_outputs(): + output_map = dict(output_image=dict(), ) + outputs = LaplacianThickness.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py new file mode 100644 index 0000000000..1a5041ae74 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_MeasureImageSimilarity.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import MeasureImageSimilarity + + +def test_MeasureImageSimilarity_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='--dimensionality %d', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict(mandatory=True, ), + fixed_image_mask=dict(argstr='%s', ), + metric=dict( + argstr='%s', + mandatory=True, + ), + metric_weight=dict( + requires=['metric'], + usedefault=True, + ), + moving_image=dict(mandatory=True, ), + moving_image_mask=dict(requires=['fixed_image_mask'], ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + radius_or_number_of_bins=dict( + mandatory=True, + requires=['metric'], + ), + sampling_percentage=dict( + mandatory=True, + requires=['metric'], + ), + sampling_strategy=dict( + requires=['metric'], + usedefault=True, + ), + ) + inputs = MeasureImageSimilarity.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MeasureImageSimilarity_outputs(): + output_map = dict(similarity=dict(), ) + outputs = MeasureImageSimilarity.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py new file mode 100644 index 0000000000..1bf787018d --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_MultiplyImages.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MultiplyImages + + +def test_MultiplyImages_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='%d', + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + first_input=dict( + argstr='%s', + mandatory=True, + position=1, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_product_image=dict( + argstr='%s', + mandatory=True, + position=3, + ), + second_input=dict( + argstr='%s', + mandatory=True, + position=2, + ), + ) + inputs = MultiplyImages.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MultiplyImages_outputs(): + output_map = dict(output_product_image=dict(), ) + outputs = MultiplyImages.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py new file mode 100644 index 0000000000..66edf0c3d6 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_N4BiasFieldCorrection.py @@ -0,0 +1,62 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import N4BiasFieldCorrection + + +def test_N4BiasFieldCorrection_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bias_image=dict(hash_files=False, ), + bspline_fitting_distance=dict(argstr='--bspline-fitting %s', ), + bspline_order=dict(requires=['bspline_fitting_distance'], ), + convergence_threshold=dict(requires=['n_iterations'], ), + copy_header=dict( + mandatory=True, + usedefault=True, + ), + dimension=dict( + argstr='-d %d', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr='--input-image %s', + mandatory=True, + ), + mask_image=dict(argstr='--mask-image %s', ), + n_iterations=dict(argstr='--convergence %s', ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + output_image=dict( + argstr='--output %s', + genfile=True, + hash_files=False, + ), + save_bias=dict( + mandatory=True, + usedefault=True, + xor=['bias_image'], + ), + shrink_factor=dict(argstr='--shrink-factor %d', ), + weight_image=dict(argstr='--weight-image %s', ), + ) + inputs = N4BiasFieldCorrection.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_N4BiasFieldCorrection_outputs(): + output_map = dict( + bias_image=dict(), + output_image=dict(), + ) + outputs = N4BiasFieldCorrection.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_Registration.py b/nipype/interfaces/ants/tests/test_auto_Registration.py new file mode 100644 index 0000000000..4bd253c3d0 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_Registration.py @@ -0,0 +1,160 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import Registration + + +def test_Registration_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + collapse_output_transforms=dict( + argstr='--collapse-output-transforms %d', + usedefault=True, + ), + convergence_threshold=dict( + requires=['number_of_iterations'], + usedefault=True, + ), + convergence_window_size=dict( + requires=['convergence_threshold'], + usedefault=True, + ), + dimension=dict( + argstr='--dimensionality %d', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict(mandatory=True, ), + fixed_image_mask=dict( + argstr='%s', + max_ver='2.1.0', + xor=['fixed_image_masks'], + ), + fixed_image_masks=dict( + min_ver='2.2.0', + xor=['fixed_image_mask'], + ), + float=dict(argstr='--float %d', ), + initial_moving_transform=dict( + argstr='%s', + xor=['initial_moving_transform_com'], + ), + initial_moving_transform_com=dict( + argstr='%s', + xor=['initial_moving_transform'], + ), + initialize_transforms_per_stage=dict( + argstr='--initialize-transforms-per-stage %d', + usedefault=True, + ), + interpolation=dict( + argstr='%s', + usedefault=True, + ), + interpolation_parameters=dict(), + invert_initial_moving_transform=dict( + requires=['initial_moving_transform'], + xor=['initial_moving_transform_com'], + ), + metric=dict(mandatory=True, ), + metric_item_trait=dict(), + metric_stage_trait=dict(), + metric_weight=dict( + mandatory=True, + requires=['metric'], + usedefault=True, + ), + metric_weight_item_trait=dict(usedefault=True, ), + metric_weight_stage_trait=dict(), + moving_image=dict(mandatory=True, ), + moving_image_mask=dict( + max_ver='2.1.0', + requires=['fixed_image_mask'], + xor=['moving_image_masks'], + ), + moving_image_masks=dict( + min_ver='2.2.0', + xor=['moving_image_mask'], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + number_of_iterations=dict(), + output_inverse_warped_image=dict( + hash_files=False, + requires=['output_warped_image'], + ), + output_transform_prefix=dict( + argstr='%s', + usedefault=True, + ), + output_warped_image=dict(hash_files=False, ), + radius_bins_item_trait=dict(usedefault=True, ), + radius_bins_stage_trait=dict(), + radius_or_number_of_bins=dict( + requires=['metric_weight'], + usedefault=True, + ), + restore_state=dict(argstr='--restore-state %s', ), + restrict_deformation=dict(), + sampling_percentage=dict(requires=['sampling_strategy'], ), + sampling_percentage_item_trait=dict(), + sampling_percentage_stage_trait=dict(), + sampling_strategy=dict(requires=['metric_weight'], ), + sampling_strategy_item_trait=dict(), + sampling_strategy_stage_trait=dict(), + save_state=dict(argstr='--save-state %s', ), + shrink_factors=dict(mandatory=True, ), + sigma_units=dict(requires=['smoothing_sigmas'], ), + smoothing_sigmas=dict(mandatory=True, ), + transform_parameters=dict(), + transforms=dict( + argstr='%s', + mandatory=True, + ), + use_estimate_learning_rate_once=dict(), + use_histogram_matching=dict(usedefault=True, ), + verbose=dict( + argstr='-v', + usedefault=True, + ), + winsorize_lower_quantile=dict( + argstr='%s', + usedefault=True, + ), + winsorize_upper_quantile=dict( + argstr='%s', + usedefault=True, + ), + write_composite_transform=dict( + argstr='--write-composite-transform %d', + usedefault=True, + ), + ) + inputs = Registration.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Registration_outputs(): + output_map = dict( + composite_transform=dict(), + elapsed_time=dict(), + forward_invert_flags=dict(), + forward_transforms=dict(), + inverse_composite_transform=dict(), + inverse_warped_image=dict(), + metric_value=dict(), + reverse_invert_flags=dict(), + reverse_transforms=dict(), + save_state=dict(), + warped_image=dict(), + ) + outputs = Registration.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py new file mode 100644 index 0000000000..8bc79392e1 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_RegistrationSynQuick.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import RegistrationSynQuick + + +def test_RegistrationSynQuick_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='-d %d', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + argstr='-f %s...', + mandatory=True, + ), + histogram_bins=dict( + argstr='-r %d', + usedefault=True, + ), + moving_image=dict( + argstr='-m %s...', + mandatory=True, + ), + num_threads=dict( + argstr='-n %d', + usedefault=True, + ), + output_prefix=dict( + argstr='-o %s', + usedefault=True, + ), + precision_type=dict( + argstr='-p %s', + usedefault=True, + ), + spline_distance=dict( + argstr='-s %d', + usedefault=True, + ), + transform_type=dict( + argstr='-t %s', + usedefault=True, + ), + use_histogram_matching=dict(argstr='-j %d', ), + ) + inputs = RegistrationSynQuick.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RegistrationSynQuick_outputs(): + output_map = dict( + forward_warp_field=dict(), + inverse_warp_field=dict(), + inverse_warped_image=dict(), + out_matrix=dict(), + warped_image=dict(), + ) + outputs = RegistrationSynQuick.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py new file mode 100644 index 0000000000..42020f6db9 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_WarpImageMultiTransform.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..resampling import WarpImageMultiTransform + + +def test_WarpImageMultiTransform_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='%d', + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr='%s', + mandatory=True, + position=2, + ), + invert_affine=dict(), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_postfix=dict( + hash_files=False, + usedefault=True, + xor=['output_image'], + ), + output_image=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=3, + xor=['out_postfix'], + ), + reference_image=dict( + argstr='-R %s', + xor=['tightest_box'], + ), + reslice_by_header=dict(argstr='--reslice-by-header', ), + tightest_box=dict( + argstr='--tightest-bounding-box', + xor=['reference_image'], + ), + transformation_series=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + use_bspline=dict(argstr='--use-BSpline', ), + use_nearest=dict(argstr='--use-NN', ), + ) + inputs = WarpImageMultiTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_WarpImageMultiTransform_outputs(): + output_map = dict(output_image=dict(), ) + outputs = WarpImageMultiTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py new file mode 100644 index 0000000000..de3131f056 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_WarpTimeSeriesImageMultiTransform.py @@ -0,0 +1,60 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..resampling import WarpTimeSeriesImageMultiTransform + + +def test_WarpTimeSeriesImageMultiTransform_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='%d', + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_image=dict( + argstr='%s', + copyfile=True, + mandatory=True, + ), + invert_affine=dict(), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_postfix=dict( + argstr='%s', + usedefault=True, + ), + reference_image=dict( + argstr='-R %s', + xor=['tightest_box'], + ), + reslice_by_header=dict(argstr='--reslice-by-header', ), + tightest_box=dict( + argstr='--tightest-bounding-box', + xor=['reference_image'], + ), + transformation_series=dict( + argstr='%s', + copyfile=False, + mandatory=True, + ), + use_bspline=dict(argstr='--use-Bspline', ), + use_nearest=dict(argstr='--use-NN', ), + ) + inputs = WarpTimeSeriesImageMultiTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_WarpTimeSeriesImageMultiTransform_outputs(): + output_map = dict(output_image=dict(), ) + outputs = WarpTimeSeriesImageMultiTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py new file mode 100644 index 0000000000..fe21858500 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_antsIntroduction.py @@ -0,0 +1,67 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..legacy import antsIntroduction + + +def test_antsIntroduction_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bias_field_correction=dict(argstr='-n 1', ), + dimension=dict( + argstr='-d %d', + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + force_proceed=dict(argstr='-f 1', ), + input_image=dict( + argstr='-i %s', + copyfile=False, + mandatory=True, + ), + inverse_warp_template_labels=dict(argstr='-l', ), + max_iterations=dict( + argstr='-m %s', + sep='x', + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr='-o %s', + usedefault=True, + ), + quality_check=dict(argstr='-q 1', ), + reference_image=dict( + argstr='-r %s', + copyfile=True, + mandatory=True, + ), + similarity_metric=dict(argstr='-s %s', ), + transformation_model=dict( + argstr='-t %s', + usedefault=True, + ), + ) + inputs = antsIntroduction.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_antsIntroduction_outputs(): + output_map = dict( + affine_transformation=dict(), + input_file=dict(), + inverse_warp_field=dict(), + output_file=dict(), + warp_field=dict(), + ) + outputs = antsIntroduction.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py new file mode 100644 index 0000000000..8513003c29 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_auto_buildtemplateparallel.py @@ -0,0 +1,72 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..legacy import buildtemplateparallel + + +def test_buildtemplateparallel_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bias_field_correction=dict(argstr='-n 1', ), + dimension=dict( + argstr='-d %d', + position=1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradient_step_size=dict(argstr='-g %f', ), + in_files=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + iteration_limit=dict( + argstr='-i %d', + usedefault=True, + ), + max_iterations=dict( + argstr='-m %s', + sep='x', + ), + num_cores=dict( + argstr='-j %d', + requires=['parallelization'], + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + out_prefix=dict( + argstr='-o %s', + usedefault=True, + ), + parallelization=dict( + argstr='-c %d', + usedefault=True, + ), + rigid_body_registration=dict(argstr='-r 1', ), + similarity_metric=dict(argstr='-s %s', ), + transformation_model=dict( + argstr='-t %s', + usedefault=True, + ), + use_first_as_target=dict(), + ) + inputs = buildtemplateparallel.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_buildtemplateparallel_outputs(): + output_map = dict( + final_template_file=dict(), + subject_outfiles=dict(), + template_files=dict(), + ) + outputs = buildtemplateparallel.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/ants/tests/test_extra_Registration.py b/nipype/interfaces/ants/tests/test_extra_Registration.py new file mode 100644 index 0000000000..745b825c65 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_extra_Registration.py @@ -0,0 +1,22 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import unicode_literals +from nipype.interfaces.ants import registration +import os +import pytest + + +def test_ants_mand(tmpdir): + tmpdir.chdir() + filepath = os.path.dirname(os.path.realpath(__file__)) + datadir = os.path.realpath(os.path.join(filepath, '../../../testing/data')) + + ants = registration.ANTS() + ants.inputs.transformation_model = "SyN" + ants.inputs.moving_image = [os.path.join(datadir, 'resting.nii')] + ants.inputs.fixed_image = [os.path.join(datadir, 'T1.nii')] + ants.inputs.metric = ['MI'] + + with pytest.raises(ValueError) as er: + ants.run() + assert "ANTS requires a value for input 'radius'" in str(er.value) diff --git a/nipype/interfaces/ants/tests/test_resampling.py b/nipype/interfaces/ants/tests/test_resampling.py new file mode 100644 index 0000000000..14903f0137 --- /dev/null +++ b/nipype/interfaces/ants/tests/test_resampling.py @@ -0,0 +1,90 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from nipype.interfaces.ants import WarpImageMultiTransform, WarpTimeSeriesImageMultiTransform +import os +import pytest + + +@pytest.fixture() +def change_dir(request): + orig_dir = os.getcwd() + filepath = os.path.dirname(os.path.realpath(__file__)) + datadir = os.path.realpath(os.path.join(filepath, '../../../testing/data')) + os.chdir(datadir) + + def move2orig(): + os.chdir(orig_dir) + + request.addfinalizer(move2orig) + + +@pytest.fixture() +def create_wimt(): + wimt = WarpImageMultiTransform() + wimt.inputs.input_image = 'diffusion_weighted.nii' + wimt.inputs.reference_image = 'functional.nii' + wimt.inputs.transformation_series = [ + 'func2anat_coreg_Affine.txt', 'func2anat_InverseWarp.nii.gz', + 'dwi2anat_Warp.nii.gz', 'dwi2anat_coreg_Affine.txt' + ] + return wimt + + +def test_WarpImageMultiTransform(change_dir, create_wimt): + wimt = create_wimt + assert wimt.cmdline == 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii \ +func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt' + + +def test_WarpImageMultiTransform_invaffine_1(change_dir, create_wimt): + wimt = create_wimt + wimt.inputs.invert_affine = [1] + assert wimt.cmdline == 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii \ +-i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt' + + +def test_WarpImageMultiTransform_invaffine_2(change_dir, create_wimt): + wimt = create_wimt + wimt.inputs.invert_affine = [2] + assert wimt.cmdline == 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz -i dwi2anat_coreg_Affine.txt' + + +def test_WarpImageMultiTransform_invaffine_wrong(change_dir, create_wimt): + wimt = create_wimt + wimt.inputs.invert_affine = [3] + with pytest.raises(Exception): + assert wimt.cmdline + + +@pytest.fixture() +def create_wtsimt(): + wtsimt = WarpTimeSeriesImageMultiTransform() + wtsimt.inputs.input_image = 'resting.nii' + wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' + wtsimt.inputs.transformation_series = [ + 'ants_Warp.nii.gz', 'ants_Affine.txt' + ] + return wtsimt + + +def test_WarpTimeSeriesImageMultiTransform(change_dir, create_wtsimt): + wtsimt = create_wtsimt + assert wtsimt.cmdline == 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii \ +-R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt' + + +def test_WarpTimeSeriesImageMultiTransform_invaffine(change_dir, + create_wtsimt): + wtsimt = create_wtsimt + wtsimt.inputs.invert_affine = [1] + assert wtsimt.cmdline == 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii \ +-R ants_deformed.nii.gz ants_Warp.nii.gz -i ants_Affine.txt' + + +def test_WarpTimeSeriesImageMultiTransform_invaffine_wrong( + change_dir, create_wtsimt): + wtsimt = create_wtsimt + wtsimt.inputs.invert_affine = [0] + with pytest.raises(Exception): + wtsimt.cmdline diff --git a/nipype/interfaces/ants/tests/test_spec_JointFusion.py b/nipype/interfaces/ants/tests/test_spec_JointFusion.py new file mode 100644 index 0000000000..b2ca69926a --- /dev/null +++ b/nipype/interfaces/ants/tests/test_spec_JointFusion.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- +from __future__ import division +from builtins import range +from nipype.testing import example_data +from nipype.interfaces.base import InputMultiPath +from traits.trait_errors import TraitError +from nipype.interfaces.ants import JointFusion +import pytest + + +def test_JointFusion_dimension(): + at = JointFusion() + set_dimension = lambda d: setattr(at.inputs, 'dimension', int(d)) + for d in range(2, 5): + set_dimension(d) + assert at.inputs.dimension == int(d) + for d in [0, 1, 6, 7]: + with pytest.raises(TraitError): + set_dimension(d) + + +@pytest.mark.parametrize("m", range(1, 5)) +def test_JointFusion_modalities(m): + at = JointFusion() + setattr(at.inputs, 'modalities', int(m)) + assert at.inputs.modalities == int(m) + + +@pytest.mark.parametrize("a, b", + [(a, b) for a in range(10) for b in range(10)]) +def test_JointFusion_method(a, b): + at = JointFusion() + set_method = lambda a, b: setattr(at.inputs, 'method', 'Joint[%.1f,%d]'.format(a, b)) + _a = a / 10.0 + set_method(_a, b) + # set directly + assert at.inputs.method == 'Joint[%.1f,%d]'.format(_a, b) + aprime = _a + 0.1 + bprime = b + 1 + at.inputs.alpha = aprime + at.inputs.beta = bprime + # set with alpha/beta + assert at.inputs.method == 'Joint[%.1f,%d]'.format(aprime, bprime) + + +@pytest.mark.parametrize("attr, x", + [(attr, x) + for attr in ['patch_radius', 'search_radius'] + for x in range(5)]) +def test_JointFusion_radius(attr, x): + at = JointFusion() + setattr(at.inputs, attr, [x, x + 1, x**x]) + assert at._format_arg(attr, None, getattr( + at.inputs, attr))[4:] == '{0}x{1}x{2}'.format(x, x + 1, x**x) + + +def test_JointFusion_cmd(): + at = JointFusion() + at.inputs.dimension = 3 + at.inputs.modalities = 1 + at.inputs.method = 'Joint[0.1,2]' + at.inputs.output_label_image = 'fusion_labelimage_output.nii' + warped_intensity_images = [ + example_data('im1.nii'), + example_data('im2.nii') + ] + at.inputs.warped_intensity_images = warped_intensity_images + segmentation_images = [ + example_data('segmentation0.nii.gz'), + example_data('segmentation1.nii.gz') + ] + at.inputs.warped_label_images = segmentation_images + T1_image = example_data('T1.nii') + at.inputs.target_image = T1_image + at.inputs.patch_radius = [3, 2, 1] + at.inputs.search_radius = [1, 2, 3] + expected_command = ('jointfusion 3 1 -m Joint[0.1,2] -rp 3x2x1 -rs 1x2x3' + ' -tg %s -g %s -g %s -l %s -l %s' + ' fusion_labelimage_output.nii') % ( + T1_image, warped_intensity_images[0], + warped_intensity_images[1], segmentation_images[0], + segmentation_images[1]) + assert at.cmdline == expected_command + # setting intensity or labels with unequal lengths raises error + with pytest.raises(AssertionError): + at._format_arg('warped_intensity_images', InputMultiPath, + warped_intensity_images + [example_data('im3.nii')]) diff --git a/nipype/interfaces/ants/utils.py b/nipype/interfaces/ants/utils.py new file mode 100644 index 0000000000..5d284b89c0 --- /dev/null +++ b/nipype/interfaces/ants/utils.py @@ -0,0 +1,416 @@ +# -*- coding: utf-8 -*- +"""ANTS Apply Transforms interface +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os + +from ..base import TraitedSpec, File, traits, InputMultiPath +from .base import ANTSCommand, ANTSCommandInputSpec + + +class AverageAffineTransformInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + argstr='%d', + mandatory=True, + position=0, + desc='image dimension (2 or 3)') + output_affine_transform = File( + argstr='%s', + mandatory=True, + position=1, + desc='Outputfname.txt: the name of the resulting transform.') + transforms = InputMultiPath( + File(exists=True), + argstr='%s', + mandatory=True, + position=3, + desc='transforms to average') + + +class AverageAffineTransformOutputSpec(TraitedSpec): + affine_transform = File(exists=True, desc='average transform file') + + +class AverageAffineTransform(ANTSCommand): + """ + Examples + -------- + >>> from nipype.interfaces.ants import AverageAffineTransform + >>> avg = AverageAffineTransform() + >>> avg.inputs.dimension = 3 + >>> avg.inputs.transforms = ['trans.mat', 'func_to_struct.mat'] + >>> avg.inputs.output_affine_transform = 'MYtemplatewarp.mat' + >>> avg.cmdline + 'AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat' + """ + _cmd = 'AverageAffineTransform' + input_spec = AverageAffineTransformInputSpec + output_spec = AverageAffineTransformOutputSpec + + def _format_arg(self, opt, spec, val): + return super(AverageAffineTransform, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['affine_transform'] = os.path.abspath( + self.inputs.output_affine_transform) + return outputs + + +class AverageImagesInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + argstr='%d', + mandatory=True, + position=0, + desc='image dimension (2 or 3)') + output_average_image = File( + "average.nii", + argstr='%s', + position=1, + usedefault=True, + hash_files=False, + desc='the name of the resulting image.') + normalize = traits.Bool( + argstr="%d", + mandatory=True, + position=2, + desc='Normalize: if true, the 2nd image is divided by its mean. ' + 'This will select the largest image to average into.') + images = InputMultiPath( + File(exists=True), + argstr='%s', + mandatory=True, + position=3, + desc= + 'image to apply transformation to (generally a coregistered functional)' + ) + + +class AverageImagesOutputSpec(TraitedSpec): + output_average_image = File(exists=True, desc='average image file') + + +class AverageImages(ANTSCommand): + """ + Examples + -------- + >>> from nipype.interfaces.ants import AverageImages + >>> avg = AverageImages() + >>> avg.inputs.dimension = 3 + >>> avg.inputs.output_average_image = "average.nii.gz" + >>> avg.inputs.normalize = True + >>> avg.inputs.images = ['rc1s1.nii', 'rc1s1.nii'] + >>> avg.cmdline + 'AverageImages 3 average.nii.gz 1 rc1s1.nii rc1s1.nii' + """ + _cmd = 'AverageImages' + input_spec = AverageImagesInputSpec + output_spec = AverageImagesOutputSpec + + def _format_arg(self, opt, spec, val): + return super(AverageImages, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output_average_image'] = os.path.realpath( + self.inputs.output_average_image) + return outputs + + +class MultiplyImagesInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + argstr='%d', + mandatory=True, + position=0, + desc='image dimension (2 or 3)') + first_input = File( + argstr='%s', exists=True, mandatory=True, position=1, desc='image 1') + second_input = traits.Either( + File(exists=True), + traits.Float, + argstr='%s', + mandatory=True, + position=2, + desc='image 2 or multiplication weight') + output_product_image = File( + argstr='%s', + mandatory=True, + position=3, + desc='Outputfname.nii.gz: the name of the resulting image.') + + +class MultiplyImagesOutputSpec(TraitedSpec): + output_product_image = File(exists=True, desc='average image file') + + +class MultiplyImages(ANTSCommand): + """ + Examples + -------- + >>> from nipype.interfaces.ants import MultiplyImages + >>> test = MultiplyImages() + >>> test.inputs.dimension = 3 + >>> test.inputs.first_input = 'moving2.nii' + >>> test.inputs.second_input = 0.25 + >>> test.inputs.output_product_image = "out.nii" + >>> test.cmdline + 'MultiplyImages 3 moving2.nii 0.25 out.nii' + """ + _cmd = 'MultiplyImages' + input_spec = MultiplyImagesInputSpec + output_spec = MultiplyImagesOutputSpec + + def _format_arg(self, opt, spec, val): + return super(MultiplyImages, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output_product_image'] = os.path.abspath( + self.inputs.output_product_image) + return outputs + + +class CreateJacobianDeterminantImageInputSpec(ANTSCommandInputSpec): + imageDimension = traits.Enum( + 3, + 2, + argstr='%d', + mandatory=True, + position=0, + desc='image dimension (2 or 3)') + deformationField = File( + argstr='%s', + exists=True, + mandatory=True, + position=1, + desc='deformation transformation file') + outputImage = File( + argstr='%s', mandatory=True, position=2, desc='output filename') + doLogJacobian = traits.Enum( + 0, 1, argstr='%d', position=3, desc='return the log jacobian') + useGeometric = traits.Enum( + 0, 1, argstr='%d', position=4, desc='return the geometric jacobian') + + +class CreateJacobianDeterminantImageOutputSpec(TraitedSpec): + jacobian_image = File(exists=True, desc='jacobian image') + + +class CreateJacobianDeterminantImage(ANTSCommand): + """ + Examples + -------- + >>> from nipype.interfaces.ants import CreateJacobianDeterminantImage + >>> jacobian = CreateJacobianDeterminantImage() + >>> jacobian.inputs.imageDimension = 3 + >>> jacobian.inputs.deformationField = 'ants_Warp.nii.gz' + >>> jacobian.inputs.outputImage = 'out_name.nii.gz' + >>> jacobian.cmdline + 'CreateJacobianDeterminantImage 3 ants_Warp.nii.gz out_name.nii.gz' + """ + + _cmd = 'CreateJacobianDeterminantImage' + input_spec = CreateJacobianDeterminantImageInputSpec + output_spec = CreateJacobianDeterminantImageOutputSpec + + def _format_arg(self, opt, spec, val): + return super(CreateJacobianDeterminantImage, self)._format_arg( + opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['jacobian_image'] = os.path.abspath(self.inputs.outputImage) + return outputs + + +class AffineInitializerInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, 2, usedefault=True, position=0, argstr='%s', desc='dimension') + fixed_image = File( + exists=True, + mandatory=True, + position=1, + argstr='%s', + desc='reference image') + moving_image = File( + exists=True, + mandatory=True, + position=2, + argstr='%s', + desc='moving image') + out_file = File( + 'transform.mat', + usedefault=True, + position=3, + argstr='%s', + desc='output transform file') + # Defaults in antsBrainExtraction.sh -> 15 0.1 0 10 + search_factor = traits.Float( + 15.0, + usedefault=True, + position=4, + argstr='%f', + desc='increments (degrees) for affine search') + radian_fraction = traits.Range( + 0.0, + 1.0, + value=0.1, + usedefault=True, + position=5, + argstr='%f', + desc='search this arc +/- principal axes') + principal_axes = traits.Bool( + False, + usedefault=True, + position=6, + argstr='%d', + desc= + 'whether the rotation is searched around an initial principal axis alignment.' + ) + local_search = traits.Int( + 10, + usedefault=True, + position=7, + argstr='%d', + desc= + ' determines if a local optimization is run at each search point for the set ' + 'number of iterations') + + +class AffineInitializerOutputSpec(TraitedSpec): + out_file = File(desc='output transform file') + + +class AffineInitializer(ANTSCommand): + """ + Initialize an affine transform (as in antsBrainExtraction.sh) + + >>> from nipype.interfaces.ants import AffineInitializer + >>> init = AffineInitializer() + >>> init.inputs.fixed_image = 'fixed1.nii' + >>> init.inputs.moving_image = 'moving1.nii' + >>> init.cmdline + 'antsAffineInitializer 3 fixed1.nii moving1.nii transform.mat 15.000000 0.100000 0 10' + + """ + _cmd = 'antsAffineInitializer' + input_spec = AffineInitializerInputSpec + output_spec = AffineInitializerOutputSpec + + def _list_outputs(self): + return {'out_file': os.path.abspath(self.inputs.out_file)} + + +class ComposeMultiTransformInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + argstr='%d', + usedefault=True, + position=0, + desc='image dimension (2 or 3)') + output_transform = File( + argstr='%s', + position=1, + name_source=['transforms'], + name_template='%s_composed', + keep_extension=True, + desc='the name of the resulting transform.') + reference_image = File( + argstr='%s', + position=2, + desc='Reference image (only necessary when output is warpfield)') + transforms = InputMultiPath( + File(exists=True), + argstr='%s', + mandatory=True, + position=3, + desc='transforms to average') + + +class ComposeMultiTransformOutputSpec(TraitedSpec): + output_transform = File(exists=True, desc='Composed transform file') + + +class ComposeMultiTransform(ANTSCommand): + """ + Take a set of transformations and convert them to a single transformation matrix/warpfield. + + Examples + -------- + >>> from nipype.interfaces.ants import ComposeMultiTransform + >>> compose_transform = ComposeMultiTransform() + >>> compose_transform.inputs.dimension = 3 + >>> compose_transform.inputs.transforms = ['struct_to_template.mat', 'func_to_struct.mat'] + >>> compose_transform.cmdline + 'ComposeMultiTransform 3 struct_to_template_composed.mat struct_to_template.mat func_to_struct.mat' + + """ + _cmd = 'ComposeMultiTransform' + input_spec = ComposeMultiTransformInputSpec + output_spec = ComposeMultiTransformOutputSpec + + +class LabelGeometryInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + argstr='%d', + usedefault=True, + position=0, + desc='image dimension (2 or 3)') + label_image = File( + argstr='%s', + position=1, + mandatory=True, + desc='label image to use for extracting geometry measures') + intensity_image = File( + value='[]', + exists=True, + argstr='%s', + mandatory=True, + usedefault=True, + position=2, + desc='Intensity image to extract values from. ' + 'This is an optional input') + output_file = traits.Str( + name_source=['label_image'], + name_template='%s.csv', + argstr='%s', + position=3, + desc='name of output file') + + +class LabelGeometryOutputSpec(TraitedSpec): + output_file = File(exists=True, desc='CSV file of geometry measures') + + +class LabelGeometry(ANTSCommand): + """ + Extracts geometry measures using a label file and an optional image file + + Examples + -------- + >>> from nipype.interfaces.ants import LabelGeometry + >>> label_extract = LabelGeometry() + >>> label_extract.inputs.dimension = 3 + >>> label_extract.inputs.label_image = 'atlas.nii.gz' + >>> label_extract.cmdline + 'LabelGeometryMeasures 3 atlas.nii.gz [] atlas.csv' + + >>> label_extract.inputs.intensity_image = 'ants_Warp.nii.gz' + >>> label_extract.cmdline + 'LabelGeometryMeasures 3 atlas.nii.gz ants_Warp.nii.gz atlas.csv' + + """ + _cmd = 'LabelGeometryMeasures' + input_spec = LabelGeometryInputSpec + output_spec = LabelGeometryOutputSpec diff --git a/nipype/interfaces/ants/visualization.py b/nipype/interfaces/ants/visualization.py new file mode 100644 index 0000000000..21186931ce --- /dev/null +++ b/nipype/interfaces/ants/visualization.py @@ -0,0 +1,193 @@ +# -*- coding: utf-8 -*- +"""The ants visualisation module provides basic functions based on ITK. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os + +from ..base import TraitedSpec, File, traits +from .base import ANTSCommand, ANTSCommandInputSpec + + +class ConvertScalarImageToRGBInputSpec(ANTSCommandInputSpec): + dimension = traits.Enum( + 3, + 2, + argstr='%d', + usedefault=True, + desc='image dimension (2 or 3)', + mandatory=True, + position=0) + input_image = File( + argstr='%s', + exists=True, + desc='Main input is a 3-D grayscale image.', + mandatory=True, + position=1) + output_image = traits.Str( + 'rgb.nii.gz', + argstr='%s', + usedefault=True, + desc='rgb output image', + position=2) + mask_image = File( + 'none', + argstr='%s', + exists=True, + desc='mask image', + position=3, + usedefault=True) + colormap = traits.Str( + argstr='%s', + usedefault=True, + desc=('Possible colormaps: grey, red, green, ' + 'blue, copper, jet, hsv, spring, summer, ' + 'autumn, winter, hot, cool, overunder, custom '), + mandatory=True, + position=4) + custom_color_map_file = traits.Str( + 'none', + argstr='%s', + usedefault=True, + desc='custom color map file', + position=5) + minimum_input = traits.Int( + argstr='%d', desc='minimum input', mandatory=True, position=6) + maximum_input = traits.Int( + argstr='%d', desc='maximum input', mandatory=True, position=7) + minimum_RGB_output = traits.Int( + 0, usedefault=True, argstr='%d', desc='', position=8) + maximum_RGB_output = traits.Int( + 255, usedefault=True, argstr='%d', desc='', position=9) + + +class ConvertScalarImageToRGBOutputSpec(TraitedSpec): + output_image = File(exists=True, desc='converted RGB image') + + +class ConvertScalarImageToRGB(ANTSCommand): + """ + Examples + -------- + >>> from nipype.interfaces.ants.visualization import ConvertScalarImageToRGB + >>> converter = ConvertScalarImageToRGB() + >>> converter.inputs.dimension = 3 + >>> converter.inputs.input_image = 'T1.nii.gz' + >>> converter.inputs.colormap = 'jet' + >>> converter.inputs.minimum_input = 0 + >>> converter.inputs.maximum_input = 6 + >>> converter.cmdline + 'ConvertScalarImageToRGB 3 T1.nii.gz rgb.nii.gz none jet none 0 6 0 255' + """ + _cmd = 'ConvertScalarImageToRGB' + input_spec = ConvertScalarImageToRGBInputSpec + output_spec = ConvertScalarImageToRGBOutputSpec + + def _format_arg(self, opt, spec, val): + return super(ConvertScalarImageToRGB, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output_image'] = os.path.join(os.getcwd(), + self.inputs.output_image) + return outputs + + +class CreateTiledMosaicInputSpec(ANTSCommandInputSpec): + input_image = File( + argstr='-i %s', + exists=True, + desc='Main input is a 3-D grayscale image.', + mandatory=True) + rgb_image = File( + argstr='-r %s', + exists=True, + desc=('An optional Rgb image can be added as an overlay.' + 'It must have the same image' + 'geometry as the input grayscale image.'), + mandatory=True) + mask_image = File( + argstr='-x %s', + exists=True, + desc='Specifies the ROI of the RGB voxels used.') + alpha_value = traits.Float( + argstr='-a %.2f', + desc=('If an Rgb image is provided, render the overlay ' + 'using the specified alpha parameter.')) + output_image = traits.Str( + 'output.png', + argstr='-o %s', + desc='The output consists of the tiled mosaic image.', + usedefault=True) + tile_geometry = traits.Str( + argstr='-t %s', + desc=( + 'The tile geometry specifies the number of rows and columns' + 'in the output image. For example, if the user specifies "5x10", ' + 'then 5 rows by 10 columns of slices are rendered. If R < 0 and C > ' + '0 (or vice versa), the negative value is selected' + 'based on direction.')) + direction = traits.Int( + argstr='-d %d', + desc=('Specifies the direction of ' + 'the slices. If no direction is specified, the ' + 'direction with the coarsest spacing is chosen.')) + pad_or_crop = traits.Str( + argstr='-p %s', + desc='argument passed to -p flag:' + '[padVoxelWidth,]' + '[lowerPadding[0]xlowerPadding[1],upperPadding[0]xupperPadding[1],' + 'constantValue]' + 'The user can specify whether to pad or crop a specified ' + 'voxel-width boundary of each individual slice. For this ' + 'program, cropping is simply padding with negative voxel-widths.' + 'If one pads (+), the user can also specify a constant pad ' + 'value (default = 0). If a mask is specified, the user can use ' + 'the mask to define the region, by using the keyword "mask"' + ' plus an offset, e.g. "-p mask+3".') + slices = traits.Str( + argstr='-s %s', + desc=('Number of slices to increment Slice1xSlice2xSlice3' + '[numberOfSlicesToIncrement,,]')) + flip_slice = traits.Str(argstr='-f %s', desc='flipXxflipY') + permute_axes = traits.Bool(argstr='-g', desc='doPermute') + + +class CreateTiledMosaicOutputSpec(TraitedSpec): + output_image = File(exists=True, desc='image file') + + +class CreateTiledMosaic(ANTSCommand): + """The program CreateTiledMosaic in conjunction with ConvertScalarImageToRGB + provides useful functionality for common image analysis tasks. The basic + usage of CreateTiledMosaic is to tile a 3-D image volume slice-wise into + a 2-D image. + + Examples + -------- + + >>> from nipype.interfaces.ants.visualization import CreateTiledMosaic + >>> mosaic_slicer = CreateTiledMosaic() + >>> mosaic_slicer.inputs.input_image = 'T1.nii.gz' + >>> mosaic_slicer.inputs.rgb_image = 'rgb.nii.gz' + >>> mosaic_slicer.inputs.mask_image = 'mask.nii.gz' + >>> mosaic_slicer.inputs.output_image = 'output.png' + >>> mosaic_slicer.inputs.alpha_value = 0.5 + >>> mosaic_slicer.inputs.direction = 2 + >>> mosaic_slicer.inputs.pad_or_crop = '[ -15x -50 , -15x -30 ,0]' + >>> mosaic_slicer.inputs.slices = '[2 ,100 ,160]' + >>> mosaic_slicer.cmdline + 'CreateTiledMosaic -a 0.50 -d 2 -i T1.nii.gz -x mask.nii.gz -o output.png -p [ -15x -50 , -15x -30 ,0] \ +-r rgb.nii.gz -s [2 ,100 ,160]' + """ + + _cmd = 'CreateTiledMosaic' + input_spec = CreateTiledMosaicInputSpec + output_spec = CreateTiledMosaicOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output_image'] = os.path.join(os.getcwd(), + self.inputs.output_image) + return outputs diff --git a/nipype/interfaces/base/__init__.py b/nipype/interfaces/base/__init__.py new file mode 100644 index 0000000000..f617064b2f --- /dev/null +++ b/nipype/interfaces/base/__init__.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Nipype base interfaces +---------------------- + +This module defines the API of all nipype interfaces. + +""" +from .core import (Interface, BaseInterface, SimpleInterface, CommandLine, + StdOutCommandLine, MpiCommandLine, SEMLikeCommandLine, + LibraryBaseInterface, PackageInfo) + +from .specs import (BaseTraitedSpec, TraitedSpec, DynamicTraitedSpec, + BaseInterfaceInputSpec, CommandLineInputSpec, + StdOutCommandLineInputSpec) + +from .traits_extension import ( + traits, Undefined, TraitDictObject, TraitListObject, TraitError, isdefined, + File, Directory, Str, DictStrStr, has_metadata, ImageFile, + OutputMultiObject, InputMultiObject, + OutputMultiPath, InputMultiPath) + +from .support import (Bunch, InterfaceResult, load_template, + NipypeInterfaceError) diff --git a/nipype/interfaces/base/core.py b/nipype/interfaces/base/core.py new file mode 100644 index 0000000000..6069c12041 --- /dev/null +++ b/nipype/interfaces/base/core.py @@ -0,0 +1,1357 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Nipype interfaces core +...................... + + +Defines the ``Interface`` API and the body of the +most basic interfaces. +The I/O specifications corresponding to these base +interfaces are found in the ``specs`` module. + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from builtins import object, open, str, bytes + +import gc +from copy import deepcopy +from datetime import datetime as dt +import errno +import os +import re +import platform +import select +import subprocess as sp +import shlex +import sys +from textwrap import wrap +import simplejson as json +from dateutil.parser import parse as parseutc + +from ... import config, logging, LooseVersion +from ...utils.provenance import write_provenance +from ...utils.misc import trim, str2bool, rgetcwd +from ...utils.filemanip import (FileNotFoundError, split_filename, read_stream, + which, get_dependencies, canonicalize_env as + _canonicalize_env) + +from ...external.due import due + +from .traits_extension import traits, isdefined, TraitError +from .specs import (BaseInterfaceInputSpec, CommandLineInputSpec, + StdOutCommandLineInputSpec, MpiCommandLineInputSpec) +from .support import (Bunch, Stream, InterfaceResult, NipypeInterfaceError) + +from future import standard_library +standard_library.install_aliases() + +iflogger = logging.getLogger('nipype.interface') + +PY35 = sys.version_info >= (3, 5) +PY3 = sys.version_info[0] > 2 +VALID_TERMINAL_OUTPUT = [ + 'stream', 'allatonce', 'file', 'file_split', 'file_stdout', 'file_stderr', + 'none' +] +__docformat__ = 'restructuredtext' + + +class Interface(object): + """This is an abstract definition for Interface objects. + + It provides no functionality. It defines the necessary attributes + and methods all Interface objects should have. + + """ + + input_spec = None # A traited input specification + output_spec = None # A traited output specification + + # defines if the interface can reuse partial results after interruption + _can_resume = False + + @property + def can_resume(self): + return self._can_resume + + # should the interface be always run even if the inputs were not changed? + _always_run = False + + @property + def always_run(self): + return self._always_run + + def __init__(self, **inputs): + """Initialize command with given args and inputs.""" + raise NotImplementedError + + @classmethod + def help(cls): + """ Prints class help""" + raise NotImplementedError + + @classmethod + def _inputs_help(cls): + """ Prints inputs help""" + raise NotImplementedError + + @classmethod + def _outputs_help(cls): + """ Prints outputs help""" + raise NotImplementedError + + @classmethod + def _outputs(cls): + """ Initializes outputs""" + raise NotImplementedError + + @property + def version(self): + raise NotImplementedError + + def run(self): + """Execute the command.""" + raise NotImplementedError + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + """Called to populate outputs""" + raise NotImplementedError + + def _list_outputs(self): + """ List expected outputs""" + raise NotImplementedError + + def _get_filecopy_info(self): + """ Provides information about file inputs to copy or link to cwd. + Necessary for pipeline operation + """ + raise NotImplementedError + + +class BaseInterface(Interface): + """Implements common interface functionality. + + Implements + ---------- + + * Initializes inputs/outputs from input_spec/output_spec + * Provides help based on input_spec and output_spec + * Checks for mandatory inputs before running an interface + * Runs an interface and returns results + * Determines which inputs should be copied or linked to cwd + + This class does not implement aggregate_outputs, input_spec or + output_spec. These should be defined by derived classes. + + This class cannot be instantiated. + + + Relevant Interface attributes + ----------------------------- + + ``input_spec`` points to the traited class for the inputs + ``output_spec`` points to the traited class for the outputs + ``_redirect_x`` should be set to ``True`` when the interface requires + connecting to a ``$DISPLAY`` (default is ``False``). + ``resource_monitor`` if ``False`` prevents resource-monitoring this + interface, if ``True`` monitoring will be enabled IFF the general + Nipype config is set on (``resource_monitor = true``). + + + """ + input_spec = BaseInterfaceInputSpec + _version = None + _additional_metadata = [] + _redirect_x = False + references_ = [] + resource_monitor = True # Enabled for this interface IFF enabled in the config + + def __init__(self, from_file=None, resource_monitor=None, + ignore_exception=False, **inputs): + if not self.input_spec: + raise Exception( + 'No input_spec in class: %s' % self.__class__.__name__) + + self.inputs = self.input_spec(**inputs) + self.ignore_exception = ignore_exception + + if resource_monitor is not None: + self.resource_monitor = resource_monitor + + if from_file is not None: + self.load_inputs_from_json(from_file, overwrite=True) + + for name, value in list(inputs.items()): + setattr(self.inputs, name, value) + + @classmethod + def help(cls, returnhelp=False): + """ Prints class help + """ + + if cls.__doc__: + # docstring = cls.__doc__.split('\n') + # docstring = [trim(line, '') for line in docstring] + docstring = trim(cls.__doc__).split('\n') + [''] + else: + docstring = [''] + + allhelp = '\n'.join(docstring + cls._inputs_help( + ) + [''] + cls._outputs_help() + [''] + cls._refs_help() + ['']) + if returnhelp: + return allhelp + else: + print(allhelp) + + @classmethod + def _refs_help(cls): + """ Prints interface references. + """ + if not cls.references_: + return [] + + helpstr = ['References::'] + + for r in cls.references_: + helpstr += ['{}'.format(r['entry'])] + + return helpstr + + @classmethod + def _get_trait_desc(self, inputs, name, spec): + desc = spec.desc + xor = spec.xor + requires = spec.requires + argstr = spec.argstr + + manhelpstr = ['\t%s' % name] + + type_info = spec.full_info(inputs, name, None) + + default = '' + if spec.usedefault: + default = ', nipype default value: %s' % str( + spec.default_value()[1]) + line = "(%s%s)" % (type_info, default) + + manhelpstr = wrap( + line, + 70, + initial_indent=manhelpstr[0] + ': ', + subsequent_indent='\t\t ') + + if desc: + for line in desc.split('\n'): + line = re.sub("\s+", " ", line) + manhelpstr += wrap( + line, 70, initial_indent='\t\t', subsequent_indent='\t\t') + + if argstr: + pos = spec.position + if pos is not None: + manhelpstr += wrap( + 'flag: %s, position: %s' % (argstr, pos), + 70, + initial_indent='\t\t', + subsequent_indent='\t\t') + else: + manhelpstr += wrap( + 'flag: %s' % argstr, + 70, + initial_indent='\t\t', + subsequent_indent='\t\t') + + if xor: + line = '%s' % ', '.join(xor) + manhelpstr += wrap( + line, + 70, + initial_indent='\t\tmutually_exclusive: ', + subsequent_indent='\t\t ') + + if requires: + others = [field for field in requires if field != name] + line = '%s' % ', '.join(others) + manhelpstr += wrap( + line, + 70, + initial_indent='\t\trequires: ', + subsequent_indent='\t\t ') + return manhelpstr + + @classmethod + def _inputs_help(cls): + """ Prints description for input parameters + """ + helpstr = ['Inputs::'] + + inputs = cls.input_spec() + if len(list(inputs.traits(transient=None).items())) == 0: + helpstr += ['', '\tNone'] + return helpstr + + manhelpstr = ['', '\t[Mandatory]'] + mandatory_items = inputs.traits(mandatory=True) + for name, spec in sorted(mandatory_items.items()): + manhelpstr += cls._get_trait_desc(inputs, name, spec) + + opthelpstr = ['', '\t[Optional]'] + for name, spec in sorted(inputs.traits(transient=None).items()): + if name in mandatory_items: + continue + opthelpstr += cls._get_trait_desc(inputs, name, spec) + + if manhelpstr: + helpstr += manhelpstr + if opthelpstr: + helpstr += opthelpstr + return helpstr + + @classmethod + def _outputs_help(cls): + """ Prints description for output parameters + """ + helpstr = ['Outputs::', ''] + if cls.output_spec: + outputs = cls.output_spec() + for name, spec in sorted(outputs.traits(transient=None).items()): + helpstr += cls._get_trait_desc(outputs, name, spec) + if len(helpstr) == 2: + helpstr += ['\tNone'] + return helpstr + + def _outputs(self): + """ Returns a bunch containing output fields for the class + """ + outputs = None + if self.output_spec: + outputs = self.output_spec() + + return outputs + + @classmethod + def _get_filecopy_info(cls): + """ Provides information about file inputs to copy or link to cwd. + Necessary for pipeline operation + """ + info = [] + if cls.input_spec is None: + return info + metadata = dict(copyfile=lambda t: t is not None) + for name, spec in sorted(cls.input_spec().traits(**metadata).items()): + info.append(dict(key=name, copy=spec.copyfile)) + return info + + def _check_requires(self, spec, name, value): + """ check if required inputs are satisfied + """ + if spec.requires: + values = [ + not isdefined(getattr(self.inputs, field)) + for field in spec.requires + ] + if any(values) and isdefined(value): + msg = ("%s requires a value for input '%s' because one of %s " + "is set. For a list of required inputs, see %s.help()" % + (self.__class__.__name__, name, + ', '.join(spec.requires), self.__class__.__name__)) + raise ValueError(msg) + + def _check_xor(self, spec, name, value): + """ check if mutually exclusive inputs are satisfied + """ + if spec.xor: + values = [ + isdefined(getattr(self.inputs, field)) for field in spec.xor + ] + if not any(values) and not isdefined(value): + msg = ("%s requires a value for one of the inputs '%s'. " + "For a list of required inputs, see %s.help()" % + (self.__class__.__name__, ', '.join(spec.xor), + self.__class__.__name__)) + raise ValueError(msg) + + def _check_mandatory_inputs(self): + """ Raises an exception if a mandatory input is Undefined + """ + for name, spec in list(self.inputs.traits(mandatory=True).items()): + value = getattr(self.inputs, name) + self._check_xor(spec, name, value) + if not isdefined(value) and spec.xor is None: + msg = ("%s requires a value for input '%s'. " + "For a list of required inputs, see %s.help()" % + (self.__class__.__name__, name, + self.__class__.__name__)) + raise ValueError(msg) + if isdefined(value): + self._check_requires(spec, name, value) + for name, spec in list( + self.inputs.traits(mandatory=None, transient=None).items()): + self._check_requires(spec, name, getattr(self.inputs, name)) + + def _check_version_requirements(self, trait_object, raise_exception=True): + """ Raises an exception on version mismatch + """ + unavailable_traits = [] + # check minimum version + check = dict(min_ver=lambda t: t is not None) + names = trait_object.trait_names(**check) + + if names and self.version: + version = LooseVersion(str(self.version)) + for name in names: + min_ver = LooseVersion( + str(trait_object.traits()[name].min_ver)) + if min_ver > version: + unavailable_traits.append(name) + if not isdefined(getattr(trait_object, name)): + continue + if raise_exception: + raise Exception( + 'Trait %s (%s) (version %s < required %s)' % + (name, self.__class__.__name__, version, min_ver)) + + # check maximum version + check = dict(max_ver=lambda t: t is not None) + names = trait_object.trait_names(**check) + if names and self.version: + version = LooseVersion(str(self.version)) + for name in names: + max_ver = LooseVersion( + str(trait_object.traits()[name].max_ver)) + if max_ver < version: + unavailable_traits.append(name) + if not isdefined(getattr(trait_object, name)): + continue + if raise_exception: + raise Exception( + 'Trait %s (%s) (version %s > required %s)' % + (name, self.__class__.__name__, version, max_ver)) + return unavailable_traits + + def _run_interface(self, runtime): + """ Core function that executes interface + """ + raise NotImplementedError + + def _duecredit_cite(self): + """ Add the interface references to the duecredit citations + """ + for r in self.references_: + r['path'] = self.__module__ + due.cite(**r) + + def run(self, cwd=None, ignore_exception=None, **inputs): + """Execute this interface. + + This interface will not raise an exception if runtime.returncode is + non-zero. + + Parameters + ---------- + + cwd : specify a folder where the interface should be run + inputs : allows the interface settings to be updated + + Returns + ------- + results : an InterfaceResult object containing a copy of the instance + that was executed, provenance information and, if successful, results + """ + from ...utils.profiler import ResourceMonitor + + # if ignore_exception is not provided, taking self.ignore_exception + if ignore_exception is None: + ignore_exception = self.ignore_exception + + # Tear-up: get current and prev directories + syscwd = rgetcwd(error=False) # Recover when wd does not exist + if cwd is None: + cwd = syscwd + + os.chdir(cwd) # Change to the interface wd + + enable_rm = config.resource_monitor and self.resource_monitor + self.inputs.trait_set(**inputs) + self._check_mandatory_inputs() + self._check_version_requirements(self.inputs) + interface = self.__class__ + self._duecredit_cite() + + # initialize provenance tracking + store_provenance = str2bool( + config.get('execution', 'write_provenance', 'false')) + env = deepcopy(dict(os.environ)) + if self._redirect_x: + env['DISPLAY'] = config.get_display() + + runtime = Bunch( + cwd=cwd, + prevcwd=syscwd, + returncode=None, + duration=None, + environ=env, + startTime=dt.isoformat(dt.utcnow()), + endTime=None, + platform=platform.platform(), + hostname=platform.node(), + version=self.version) + + mon_sp = None + if enable_rm: + mon_freq = float( + config.get('execution', 'resource_monitor_frequency', 1)) + proc_pid = os.getpid() + iflogger.debug( + 'Creating a ResourceMonitor on a %s interface, PID=%d.', + self.__class__.__name__, proc_pid) + mon_sp = ResourceMonitor(proc_pid, freq=mon_freq) + mon_sp.start() + + # Grab inputs now, as they should not change during execution + inputs = self.inputs.get_traitsfree() + outputs = None + + try: + runtime = self._pre_run_hook(runtime) + runtime = self._run_interface(runtime) + runtime = self._post_run_hook(runtime) + outputs = self.aggregate_outputs(runtime) + except Exception as e: + import traceback + # Retrieve the maximum info fast + runtime.traceback = traceback.format_exc() + # Gather up the exception arguments and append nipype info. + exc_args = e.args if getattr(e, 'args') else tuple() + exc_args += ( + 'An exception of type %s occurred while running interface %s.' + % (type(e).__name__, self.__class__.__name__), ) + if config.get('logging', 'interface_level', + 'info').lower() == 'debug': + exc_args += ('Inputs: %s' % str(self.inputs), ) + + runtime.traceback_args = ('\n'.join( + ['%s' % arg for arg in exc_args]), ) + + if not ignore_exception: + raise + finally: + # This needs to be done always + runtime.endTime = dt.isoformat(dt.utcnow()) + timediff = parseutc(runtime.endTime) - parseutc(runtime.startTime) + runtime.duration = (timediff.days * 86400 + timediff.seconds + + timediff.microseconds / 1e6) + results = InterfaceResult( + interface, + runtime, + inputs=inputs, + outputs=outputs, + provenance=None) + + # Add provenance (if required) + if store_provenance: + # Provenance will only throw a warning if something went wrong + results.provenance = write_provenance(results) + + # Make sure runtime profiler is shut down + if enable_rm: + import numpy as np + mon_sp.stop() + + runtime.mem_peak_gb = None + runtime.cpu_percent = None + + # Read .prof file in and set runtime values + vals = np.loadtxt(mon_sp.fname, delimiter=',') + if vals.size: + vals = np.atleast_2d(vals) + runtime.mem_peak_gb = vals[:, 1].max() / 1024 + runtime.cpu_percent = vals[:, 2].max() + + runtime.prof_dict = { + 'time': vals[:, 0].tolist(), + 'cpus': vals[:, 1].tolist(), + 'rss_GiB': (vals[:, 2] / 1024).tolist(), + 'vms_GiB': (vals[:, 3] / 1024).tolist(), + } + os.chdir(syscwd) + + return results + + def _list_outputs(self): + """ List the expected outputs + """ + if self.output_spec: + raise NotImplementedError + else: + return None + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + """ Collate expected outputs and check for existence + """ + + predicted_outputs = self._list_outputs() + outputs = self._outputs() + if predicted_outputs: + _unavailable_outputs = [] + if outputs: + _unavailable_outputs = \ + self._check_version_requirements(self._outputs()) + for key, val in list(predicted_outputs.items()): + if needed_outputs and key not in needed_outputs: + continue + if key in _unavailable_outputs: + raise KeyError(('Output trait %s not available in version ' + '%s of interface %s. Please inform ' + 'developers.') % (key, self.version, + self.__class__.__name__)) + try: + setattr(outputs, key, val) + except TraitError as error: + if getattr(error, 'info', + 'default').startswith('an existing'): + msg = ("File/Directory '%s' not found for %s output " + "'%s'." % (val, self.__class__.__name__, key)) + raise FileNotFoundError(msg) + raise error + + return outputs + + @property + def version(self): + if self._version is None: + if str2bool(config.get('execution', 'stop_on_unknown_version')): + raise ValueError('Interface %s has no version information' % + self.__class__.__name__) + return self._version + + def load_inputs_from_json(self, json_file, overwrite=True): + """ + A convenient way to load pre-set inputs from a JSON file. + """ + + with open(json_file) as fhandle: + inputs_dict = json.load(fhandle) + + def_inputs = [] + if not overwrite: + def_inputs = list(self.inputs.get_traitsfree().keys()) + + new_inputs = list(set(list(inputs_dict.keys())) - set(def_inputs)) + for key in new_inputs: + if hasattr(self.inputs, key): + setattr(self.inputs, key, inputs_dict[key]) + + def save_inputs_to_json(self, json_file): + """ + A convenient way to save current inputs to a JSON file. + """ + inputs = self.inputs.get_traitsfree() + iflogger.debug('saving inputs {}', inputs) + with open(json_file, 'w' if PY3 else 'wb') as fhandle: + json.dump(inputs, fhandle, indent=4, ensure_ascii=False) + + def _pre_run_hook(self, runtime): + """ + Perform any pre-_run_interface() processing + + Subclasses may override this function to modify ``runtime`` object or + interface state + + MUST return runtime object + """ + return runtime + + def _post_run_hook(self, runtime): + """ + Perform any post-_run_interface() processing + + Subclasses may override this function to modify ``runtime`` object or + interface state + + MUST return runtime object + """ + return runtime + + +class SimpleInterface(BaseInterface): + """ An interface pattern that allows outputs to be set in a dictionary + called ``_results`` that is automatically interpreted by + ``_list_outputs()`` to find the outputs. + + When implementing ``_run_interface``, set outputs with:: + + self._results[out_name] = out_value + + This can be a way to upgrade a ``Function`` interface to do type checking. + + Examples + -------- + + >>> from nipype.interfaces.base import ( + ... SimpleInterface, BaseInterfaceInputSpec, TraitedSpec) + + >>> def double(x): + ... return 2 * x + ... + >>> class DoubleInputSpec(BaseInterfaceInputSpec): + ... x = traits.Float(mandatory=True) + ... + >>> class DoubleOutputSpec(TraitedSpec): + ... doubled = traits.Float() + ... + >>> class Double(SimpleInterface): + ... input_spec = DoubleInputSpec + ... output_spec = DoubleOutputSpec + ... + ... def _run_interface(self, runtime): + ... self._results['doubled'] = double(self.inputs.x) + ... return runtime + + >>> dbl = Double() + >>> dbl.inputs.x = 2 + >>> dbl.run().outputs.doubled + 4.0 + """ + + def __init__(self, from_file=None, resource_monitor=None, **inputs): + super(SimpleInterface, self).__init__( + from_file=from_file, resource_monitor=resource_monitor, **inputs) + self._results = {} + + def _list_outputs(self): + return self._results + + +def run_command(runtime, output=None, timeout=0.01): + """Run a command, read stdout and stderr, prefix with timestamp. + + The returned runtime contains a merged stdout+stderr log with timestamps + """ + + # Init variables + cmdline = runtime.cmdline + env = _canonicalize_env(runtime.environ) + + errfile = None + outfile = None + stdout = sp.PIPE + stderr = sp.PIPE + + if output == 'file': + outfile = os.path.join(runtime.cwd, 'output.nipype') + stdout = open(outfile, 'wb') # t=='text'===default + stderr = sp.STDOUT + elif output == 'file_split': + outfile = os.path.join(runtime.cwd, 'stdout.nipype') + stdout = open(outfile, 'wb') + errfile = os.path.join(runtime.cwd, 'stderr.nipype') + stderr = open(errfile, 'wb') + elif output == 'file_stdout': + outfile = os.path.join(runtime.cwd, 'stdout.nipype') + stdout = open(outfile, 'wb') + elif output == 'file_stderr': + errfile = os.path.join(runtime.cwd, 'stderr.nipype') + stderr = open(errfile, 'wb') + + proc = sp.Popen( + cmdline, + stdout=stdout, + stderr=stderr, + shell=True, + cwd=runtime.cwd, + env=env, + close_fds=(not sys.platform.startswith('win')), + ) + + result = { + 'stdout': [], + 'stderr': [], + 'merged': [], + } + + if output == 'stream': + streams = [ + Stream('stdout', proc.stdout), + Stream('stderr', proc.stderr) + ] + + def _process(drain=0): + try: + res = select.select(streams, [], [], timeout) + except select.error as e: + iflogger.info(e) + if e[0] == errno.EINTR: + return + else: + raise + else: + for stream in res[0]: + stream.read(drain) + + while proc.returncode is None: + proc.poll() + _process() + + _process(drain=1) + + # collect results, merge and return + result = {} + temp = [] + for stream in streams: + rows = stream._rows + temp += rows + result[stream._name] = [r[2] for r in rows] + temp.sort() + result['merged'] = [r[1] for r in temp] + + if output.startswith('file'): + proc.wait() + if outfile is not None: + stdout.flush() + stdout.close() + with open(outfile, 'rb') as ofh: + stdoutstr = ofh.read() + result['stdout'] = read_stream(stdoutstr, logger=iflogger) + del stdoutstr + + if errfile is not None: + stderr.flush() + stderr.close() + with open(errfile, 'rb') as efh: + stderrstr = efh.read() + result['stderr'] = read_stream(stderrstr, logger=iflogger) + del stderrstr + + if output == 'file': + result['merged'] = result['stdout'] + result['stdout'] = [] + else: + stdout, stderr = proc.communicate() + if output == 'allatonce': # Discard stdout and stderr otherwise + result['stdout'] = read_stream(stdout, logger=iflogger) + result['stderr'] = read_stream(stderr, logger=iflogger) + + runtime.returncode = proc.returncode + try: + proc.terminate() # Ensure we are done + except OSError as error: + # Python 2 raises when the process is already gone + if error.errno != errno.ESRCH: + raise + + # Dereference & force GC for a cleanup + del proc + del stdout + del stderr + gc.collect() + + runtime.stderr = '\n'.join(result['stderr']) + runtime.stdout = '\n'.join(result['stdout']) + runtime.merged = '\n'.join(result['merged']) + return runtime + + +class CommandLine(BaseInterface): + """Implements functionality to interact with command line programs + class must be instantiated with a command argument + + Parameters + ---------- + + command : string + define base immutable `command` you wish to run + + args : string, optional + optional arguments passed to base `command` + + + Examples + -------- + >>> import pprint + >>> from nipype.interfaces.base import CommandLine + >>> cli = CommandLine(command='ls', environ={'DISPLAY': ':1'}) + >>> cli.inputs.args = '-al' + >>> cli.cmdline + 'ls -al' + + # Use get_traitsfree() to check all inputs set + >>> pprint.pprint(cli.inputs.get_traitsfree()) # doctest: + {'args': '-al', + 'environ': {'DISPLAY': ':1'}} + + >>> cli.inputs.get_hashval()[0][0] + ('args', '-al') + >>> cli.inputs.get_hashval()[1] + '11c37f97649cd61627f4afe5136af8c0' + + """ + input_spec = CommandLineInputSpec + _cmd_prefix = '' + _cmd = None + _version = None + _terminal_output = 'stream' + + @classmethod + def set_default_terminal_output(cls, output_type): + """Set the default terminal output for CommandLine Interfaces. + + This method is used to set default terminal output for + CommandLine Interfaces. However, setting this will not + update the output type for any existing instances. For these, + assign the .terminal_output. + """ + + if output_type in VALID_TERMINAL_OUTPUT: + cls._terminal_output = output_type + else: + raise AttributeError( + 'Invalid terminal output_type: %s' % output_type) + + @classmethod + def help(cls, returnhelp=False): + allhelp = 'Wraps command **{cmd}**\n\n{help}'.format( + cmd=cls._cmd, help=super(CommandLine, cls).help(returnhelp=True)) + if returnhelp: + return allhelp + print(allhelp) + + def __init__(self, command=None, terminal_output=None, **inputs): + super(CommandLine, self).__init__(**inputs) + self._environ = None + # Set command. Input argument takes precedence + self._cmd = command or getattr(self, '_cmd', None) + + # Store dependencies in runtime object + self._ldd = str2bool( + config.get('execution', 'get_linked_libs', 'true')) + + if self._cmd is None: + raise Exception("Missing command") + + if terminal_output is not None: + self.terminal_output = terminal_output + + @property + def cmd(self): + """sets base command, immutable""" + return self._cmd + + @property + def cmdline(self): + """ `command` plus any arguments (args) + validates arguments and generates command line""" + self._check_mandatory_inputs() + allargs = [self._cmd_prefix + self.cmd] + self._parse_inputs() + return ' '.join(allargs) + + @property + def terminal_output(self): + return self._terminal_output + + @terminal_output.setter + def terminal_output(self, value): + if value not in VALID_TERMINAL_OUTPUT: + raise RuntimeError( + 'Setting invalid value "%s" for terminal_output. Valid values are ' + '%s.' % (value, + ', '.join(['"%s"' % v + for v in VALID_TERMINAL_OUTPUT]))) + self._terminal_output = value + + def raise_exception(self, runtime): + raise RuntimeError( + ('Command:\n{cmdline}\nStandard output:\n{stdout}\n' + 'Standard error:\n{stderr}\nReturn code: {returncode}' + ).format(**runtime.dictcopy())) + + def _get_environ(self): + return getattr(self.inputs, 'environ', {}) + + def version_from_command(self, flag='-v', cmd=None): + iflogger.warning('version_from_command member of CommandLine was ' + 'Deprecated in nipype-1.0.0 and deleted in 1.1.0') + if cmd is None: + cmd = self.cmd.split()[0] + + env = dict(os.environ) + if which(cmd, env=env): + out_environ = self._get_environ() + env.update(out_environ) + proc = sp.Popen( + ' '.join((cmd, flag)), + shell=True, + env=env, + stdout=sp.PIPE, + stderr=sp.PIPE, + ) + o, e = proc.communicate() + return o + + def _run_interface(self, runtime, correct_return_codes=(0, )): + """Execute command via subprocess + + Parameters + ---------- + runtime : passed by the run function + + Returns + ------- + runtime : updated runtime information + adds stdout, stderr, merged, cmdline, dependencies, command_path + + """ + + out_environ = self._get_environ() + # Initialize runtime Bunch + runtime.stdout = None + runtime.stderr = None + runtime.cmdline = self.cmdline + runtime.environ.update(out_environ) + + # which $cmd + executable_name = shlex.split(self._cmd_prefix + self.cmd)[0] + cmd_path = which(executable_name, env=runtime.environ) + + if cmd_path is None: + raise IOError( + 'No command "%s" found on host %s. Please check that the ' + 'corresponding package is installed.' % (executable_name, + runtime.hostname)) + + runtime.command_path = cmd_path + runtime.dependencies = (get_dependencies(executable_name, + runtime.environ) + if self._ldd else '') + runtime = run_command(runtime, output=self.terminal_output) + if runtime.returncode is None or \ + runtime.returncode not in correct_return_codes: + self.raise_exception(runtime) + + return runtime + + def _format_arg(self, name, trait_spec, value): + """A helper function for _parse_inputs + + Formats a trait containing argstr metadata + """ + argstr = trait_spec.argstr + iflogger.debug('%s_%s', name, value) + if trait_spec.is_trait_type(traits.Bool) and "%" not in argstr: + # Boolean options have no format string. Just append options if True. + return argstr if value else None + # traits.Either turns into traits.TraitCompound and does not have any + # inner_traits + elif trait_spec.is_trait_type(traits.List) \ + or (trait_spec.is_trait_type(traits.TraitCompound) and + isinstance(value, list)): + # This is a bit simple-minded at present, and should be + # construed as the default. If more sophisticated behavior + # is needed, it can be accomplished with metadata (e.g. + # format string for list member str'ification, specifying + # the separator, etc.) + + # Depending on whether we stick with traitlets, and whether or + # not we beef up traitlets.List, we may want to put some + # type-checking code here as well + sep = trait_spec.sep if trait_spec.sep is not None else ' ' + + if argstr.endswith('...'): + # repeatable option + # --id %d... will expand to + # --id 1 --id 2 --id 3 etc.,. + argstr = argstr.replace('...', '') + return sep.join([argstr % elt for elt in value]) + else: + return argstr % sep.join(str(elt) for elt in value) + else: + # Append options using format string. + return argstr % value + + def _filename_from_source(self, name, chain=None): + if chain is None: + chain = [] + + trait_spec = self.inputs.trait(name) + retval = getattr(self.inputs, name) + source_ext = None + if not isdefined(retval) or "%s" in retval: + if not trait_spec.name_source: + return retval + + # Do not generate filename when excluded by other inputs + if any(isdefined(getattr(self.inputs, field)) + for field in trait_spec.xor or ()): + return retval + + # Do not generate filename when required fields are missing + if not all(isdefined(getattr(self.inputs, field)) + for field in trait_spec.requires or ()): + return retval + + if isdefined(retval) and "%s" in retval: + name_template = retval + else: + name_template = trait_spec.name_template + if not name_template: + name_template = "%s_generated" + + ns = trait_spec.name_source + while isinstance(ns, (list, tuple)): + if len(ns) > 1: + iflogger.warning( + 'Only one name_source per trait is allowed') + ns = ns[0] + + if not isinstance(ns, (str, bytes)): + raise ValueError( + 'name_source of \'{}\' trait should be an input trait ' + 'name, but a type {} object was found'.format( + name, type(ns))) + + if isdefined(getattr(self.inputs, ns)): + name_source = ns + source = getattr(self.inputs, name_source) + while isinstance(source, list): + source = source[0] + + # special treatment for files + try: + _, base, source_ext = split_filename(source) + except (AttributeError, TypeError): + base = source + else: + if name in chain: + raise NipypeInterfaceError( + 'Mutually pointing name_sources') + + chain.append(name) + base = self._filename_from_source(ns, chain) + if isdefined(base): + _, _, source_ext = split_filename(base) + else: + # Do not generate filename when required fields are missing + return retval + + chain = None + retval = name_template % base + _, _, ext = split_filename(retval) + if trait_spec.keep_extension and (ext or source_ext): + if (ext is None or not ext) and source_ext: + retval = retval + source_ext + else: + retval = self._overload_extension(retval, name) + return retval + + def _gen_filename(self, name): + raise NotImplementedError + + def _overload_extension(self, value, name=None): + return value + + def _list_outputs(self): + metadata = dict(name_source=lambda t: t is not None) + traits = self.inputs.traits(**metadata) + if traits: + outputs = self.output_spec().trait_get() + for name, trait_spec in list(traits.items()): + out_name = name + if trait_spec.output_name is not None: + out_name = trait_spec.output_name + fname = self._filename_from_source(name) + if isdefined(fname): + outputs[out_name] = os.path.abspath(fname) + return outputs + + def _parse_inputs(self, skip=None): + """Parse all inputs using the ``argstr`` format string in the Trait. + + Any inputs that are assigned (not the default_value) are formatted + to be added to the command line. + + Returns + ------- + all_args : list + A list of all inputs formatted for the command line. + + """ + all_args = [] + initial_args = {} + final_args = {} + metadata = dict(argstr=lambda t: t is not None) + for name, spec in sorted(self.inputs.traits(**metadata).items()): + if skip and name in skip: + continue + value = getattr(self.inputs, name) + if spec.name_source: + value = self._filename_from_source(name) + elif spec.genfile: + if not isdefined(value) or value is None: + value = self._gen_filename(name) + + if not isdefined(value): + continue + arg = self._format_arg(name, spec, value) + if arg is None: + continue + pos = spec.position + if pos is not None: + if int(pos) >= 0: + initial_args[pos] = arg + else: + final_args[pos] = arg + else: + all_args.append(arg) + first_args = [el for _, el in sorted(initial_args.items())] + last_args = [el for _, el in sorted(final_args.items())] + return first_args + all_args + last_args + + +class StdOutCommandLine(CommandLine): + input_spec = StdOutCommandLineInputSpec + + def _gen_filename(self, name): + return self._gen_outfilename() if name == 'out_file' else None + + def _gen_outfilename(self): + raise NotImplementedError + + +class MpiCommandLine(CommandLine): + """Implements functionality to interact with command line programs + that can be run with MPI (i.e. using 'mpiexec'). + + Examples + -------- + >>> from nipype.interfaces.base import MpiCommandLine + >>> mpi_cli = MpiCommandLine(command='my_mpi_prog') + >>> mpi_cli.inputs.args = '-v' + >>> mpi_cli.cmdline + 'my_mpi_prog -v' + + >>> mpi_cli.inputs.use_mpi = True + >>> mpi_cli.inputs.n_procs = 8 + >>> mpi_cli.cmdline + 'mpiexec -n 8 my_mpi_prog -v' + """ + input_spec = MpiCommandLineInputSpec + + @property + def cmdline(self): + """Adds 'mpiexec' to begining of command""" + result = [] + if self.inputs.use_mpi: + result.append('mpiexec') + if self.inputs.n_procs: + result.append('-n %d' % self.inputs.n_procs) + result.append(super(MpiCommandLine, self).cmdline) + return ' '.join(result) + + +class SEMLikeCommandLine(CommandLine): + """In SEM derived interface all outputs have corresponding inputs. + However, some SEM commands create outputs that are not defined in the XML. + In those cases one has to create a subclass of the autogenerated one and + overload the _list_outputs method. _outputs_from_inputs should still be + used but only for the reduced (by excluding those that do not have + corresponding inputs list of outputs. + """ + + def _list_outputs(self): + outputs = self.output_spec().trait_get() + return self._outputs_from_inputs(outputs) + + def _outputs_from_inputs(self, outputs): + for name in list(outputs.keys()): + corresponding_input = getattr(self.inputs, name) + if isdefined(corresponding_input): + if (isinstance(corresponding_input, bool) + and corresponding_input): + outputs[name] = \ + os.path.abspath(self._outputs_filenames[name]) + else: + if isinstance(corresponding_input, list): + outputs[name] = [ + os.path.abspath(inp) for inp in corresponding_input + ] + else: + outputs[name] = os.path.abspath(corresponding_input) + return outputs + + def _format_arg(self, name, spec, value): + if name in list(self._outputs_filenames.keys()): + if isinstance(value, bool): + if value: + value = os.path.abspath(self._outputs_filenames[name]) + else: + return "" + return super(SEMLikeCommandLine, self)._format_arg(name, spec, value) + + +class LibraryBaseInterface(BaseInterface): + _pkg = None + imports = () + + def __init__(self, check_import=True, *args, **kwargs): + super(LibraryBaseInterface, self).__init__(*args, **kwargs) + if check_import: + import importlib + failed_imports = [] + for pkg in (self._pkg,) + tuple(self.imports): + try: + importlib.import_module(pkg) + except ImportError: + failed_imports.append(pkg) + if failed_imports: + iflogger.warn('Unable to import %s; %s interface may fail to ' + 'run', failed_imports, self.__class__.__name__) + + @property + def version(self): + if self._version is None: + import importlib + try: + self._version = importlib.import_module(self._pkg).__version__ + except (ImportError, AttributeError): + pass + return super(LibraryBaseInterface, self).version + + +class PackageInfo(object): + _version = None + version_cmd = None + version_file = None + + @classmethod + def version(klass): + if klass._version is None: + if klass.version_cmd is not None: + try: + clout = CommandLine( + command=klass.version_cmd, + resource_monitor=False, + terminal_output='allatonce').run() + except IOError: + return None + + raw_info = clout.runtime.stdout + elif klass.version_file is not None: + try: + with open(klass.version_file, 'rt') as fobj: + raw_info = fobj.read() + except OSError: + return None + else: + return None + + klass._version = klass.parse_version(raw_info) + + return klass._version + + @staticmethod + def parse_version(raw_info): + raise NotImplementedError diff --git a/nipype/interfaces/base/specs.py b/nipype/interfaces/base/specs.py new file mode 100644 index 0000000000..c07bdc4a71 --- /dev/null +++ b/nipype/interfaces/base/specs.py @@ -0,0 +1,374 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" + +Base I/O specifications for Nipype interfaces +............................................. + +Define the API for the I/O of interfaces + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +from copy import deepcopy +from warnings import warn +from builtins import str, bytes +from packaging.version import Version + +from ...utils.filemanip import md5, hash_infile, hash_timestamp, to_str +from .traits_extension import ( + traits, + Undefined, + isdefined, + TraitError, + TraitDictObject, + TraitListObject, + has_metadata, +) + +from ... import config, __version__ + +FLOAT_FORMAT = '{:.10f}'.format +nipype_version = Version(__version__) + + +class BaseTraitedSpec(traits.HasTraits): + """ + Provide a few methods necessary to support nipype interface api + + The inputs attribute of interfaces call certain methods that are not + available in traits.HasTraits. These are provided here. + + new metadata: + + * usedefault : set this to True if the default value of the trait should be + used. Unless this is set, the attributes are set to traits.Undefined + + new attribute: + + * get_hashval : returns a tuple containing the state of the trait as a dict + and hashvalue corresponding to dict. + + XXX Reconsider this in the long run, but it seems like the best + solution to move forward on the refactoring. + """ + package_version = nipype_version + + def __init__(self, **kwargs): + """ Initialize handlers and inputs""" + # NOTE: In python 2.6, object.__init__ no longer accepts input + # arguments. HasTraits does not define an __init__ and + # therefore these args were being ignored. + # super(TraitedSpec, self).__init__(*args, **kwargs) + super(BaseTraitedSpec, self).__init__(**kwargs) + traits.push_exception_handler(reraise_exceptions=True) + undefined_traits = {} + for trait in self.copyable_trait_names(): + if not self.traits()[trait].usedefault: + undefined_traits[trait] = Undefined + self.trait_set(trait_change_notify=False, **undefined_traits) + self._generate_handlers() + self.trait_set(**kwargs) + + def items(self): + """ Name, trait generator for user modifiable traits + """ + for name in sorted(self.copyable_trait_names()): + yield name, self.traits()[name] + + def __repr__(self): + """ Return a well-formatted representation of the traits """ + outstr = [] + for name, value in sorted(self.trait_get().items()): + outstr.append('%s = %s' % (name, value)) + return '\n{}\n'.format('\n'.join(outstr)) + + def _generate_handlers(self): + """Find all traits with the 'xor' metadata and attach an event + handler to them. + """ + has_xor = dict(xor=lambda t: t is not None) + xors = self.trait_names(**has_xor) + for elem in xors: + self.on_trait_change(self._xor_warn, elem) + has_deprecation = dict(deprecated=lambda t: t is not None) + deprecated = self.trait_names(**has_deprecation) + for elem in deprecated: + self.on_trait_change(self._deprecated_warn, elem) + + def _xor_warn(self, obj, name, old, new): + """ Generates warnings for xor traits + """ + if isdefined(new): + trait_spec = self.traits()[name] + # for each xor, set to default_value + for trait_name in trait_spec.xor: + if trait_name == name: + # skip ourself + continue + if isdefined(getattr(self, trait_name)): + self.trait_set( + trait_change_notify=False, **{ + '%s' % name: Undefined + }) + msg = ('Input "%s" is mutually exclusive with input "%s", ' + 'which is already set') % (name, trait_name) + raise IOError(msg) + + def _deprecated_warn(self, obj, name, old, new): + """Checks if a user assigns a value to a deprecated trait + """ + if isdefined(new): + trait_spec = self.traits()[name] + msg1 = ('Input %s in interface %s is deprecated.' % + (name, self.__class__.__name__.split('InputSpec')[0])) + msg2 = ('Will be removed or raise an error as of release %s' % + trait_spec.deprecated) + if trait_spec.new_name: + if trait_spec.new_name not in self.copyable_trait_names(): + raise TraitError(msg1 + ' Replacement trait %s not found' % + trait_spec.new_name) + msg3 = 'It has been replaced by %s.' % trait_spec.new_name + else: + msg3 = '' + msg = ' '.join((msg1, msg2, msg3)) + if Version(str(trait_spec.deprecated)) < self.package_version: + raise TraitError(msg) + else: + if trait_spec.new_name: + msg += 'Unsetting old value %s; setting new value %s.' % ( + name, trait_spec.new_name) + warn(msg) + if trait_spec.new_name: + self.trait_set( + trait_change_notify=False, + **{ + '%s' % name: Undefined, + '%s' % trait_spec.new_name: new + }) + + def trait_get(self, **kwargs): + """ Returns traited class as a dict + + Augments the trait get function to return a dictionary without + notification handles + """ + out = super(BaseTraitedSpec, self).trait_get(**kwargs) + out = self._clean_container(out, Undefined) + return out + + get = trait_get + + def get_traitsfree(self, **kwargs): + """ Returns traited class as a dict + + Augments the trait get function to return a dictionary without + any traits. The dictionary does not contain any attributes that + were Undefined + """ + out = super(BaseTraitedSpec, self).trait_get(**kwargs) + out = self._clean_container(out, skipundefined=True) + return out + + def _clean_container(self, objekt, undefinedval=None, skipundefined=False): + """Convert a traited obejct into a pure python representation. + """ + if isinstance(objekt, TraitDictObject) or isinstance(objekt, dict): + out = {} + for key, val in list(objekt.items()): + if isdefined(val): + out[key] = self._clean_container(val, undefinedval) + else: + if not skipundefined: + out[key] = undefinedval + elif (isinstance(objekt, TraitListObject) or isinstance(objekt, list) or + isinstance(objekt, tuple)): + out = [] + for val in objekt: + if isdefined(val): + out.append(self._clean_container(val, undefinedval)) + else: + if not skipundefined: + out.append(undefinedval) + else: + out.append(None) + if isinstance(objekt, tuple): + out = tuple(out) + else: + out = None + if isdefined(objekt): + out = objekt + else: + if not skipundefined: + out = undefinedval + return out + + def has_metadata(self, name, metadata, value=None, recursive=True): + """ + Return has_metadata for the requested trait name in this + interface + """ + return has_metadata( + self.trait(name).trait_type, metadata, value, recursive) + + def get_hashval(self, hash_method=None): + """Return a dictionary of our items with hashes for each file. + + Searches through dictionary items and if an item is a file, it + calculates the md5 hash of the file contents and stores the + file name and hash value as the new key value. + + However, the overall bunch hash is calculated only on the hash + value of a file. The path and name of the file are not used in + the overall hash calculation. + + Returns + ------- + list_withhash : dict + Copy of our dictionary with the new file hashes included + with each file. + hashvalue : str + The md5 hash value of the traited spec + + """ + + list_withhash = [] + list_nofilename = [] + for name, val in sorted(self.trait_get().items()): + if not isdefined(val) or self.has_metadata(name, "nohash", True): + # skip undefined traits and traits with nohash=True + continue + + hash_files = (not self.has_metadata(name, "hash_files", False) and + not self.has_metadata(name, "name_source")) + list_nofilename.append((name, + self._get_sorteddict( + val, + hash_method=hash_method, + hash_files=hash_files))) + list_withhash.append((name, + self._get_sorteddict( + val, + True, + hash_method=hash_method, + hash_files=hash_files))) + return list_withhash, md5(to_str(list_nofilename).encode()).hexdigest() + + def _get_sorteddict(self, + objekt, + dictwithhash=False, + hash_method=None, + hash_files=True): + if isinstance(objekt, dict): + out = [] + for key, val in sorted(objekt.items()): + if isdefined(val): + out.append((key, + self._get_sorteddict( + val, + dictwithhash, + hash_method=hash_method, + hash_files=hash_files))) + elif isinstance(objekt, (list, tuple)): + out = [] + for val in objekt: + if isdefined(val): + out.append( + self._get_sorteddict( + val, + dictwithhash, + hash_method=hash_method, + hash_files=hash_files)) + if isinstance(objekt, tuple): + out = tuple(out) + else: + out = None + if isdefined(objekt): + if (hash_files and isinstance(objekt, (str, bytes)) and + os.path.isfile(objekt)): + if hash_method is None: + hash_method = config.get('execution', 'hash_method') + + if hash_method.lower() == 'timestamp': + hash = hash_timestamp(objekt) + elif hash_method.lower() == 'content': + hash = hash_infile(objekt) + else: + raise Exception( + "Unknown hash method: %s" % hash_method) + if dictwithhash: + out = (objekt, hash) + else: + out = hash + elif isinstance(objekt, float): + out = FLOAT_FORMAT(objekt) + else: + out = objekt + return out + + +class TraitedSpec(BaseTraitedSpec): + """ Create a subclass with strict traits. + + This is used in 90% of the cases. + """ + _ = traits.Disallow + + +class BaseInterfaceInputSpec(TraitedSpec): + pass + + +class DynamicTraitedSpec(BaseTraitedSpec): + """ A subclass to handle dynamic traits + + This class is a workaround for add_traits and clone_traits not + functioning well together. + """ + + def __deepcopy__(self, memo): + """ bug in deepcopy for HasTraits results in weird cloning behavior for + added traits + """ + id_self = id(self) + if id_self in memo: + return memo[id_self] + dup_dict = deepcopy(self.trait_get(), memo) + # access all keys + for key in self.copyable_trait_names(): + if key in self.__dict__.keys(): + _ = getattr(self, key) + # clone once + dup = self.clone_traits(memo=memo) + for key in self.copyable_trait_names(): + try: + _ = getattr(dup, key) + except: + pass + # clone twice + dup = self.clone_traits(memo=memo) + dup.trait_set(**dup_dict) + return dup + + +class CommandLineInputSpec(BaseInterfaceInputSpec): + args = traits.Str(argstr='%s', desc='Additional parameters to the command') + environ = traits.DictStrStr( + desc='Environment variables', usedefault=True, nohash=True) + + +class StdOutCommandLineInputSpec(CommandLineInputSpec): + out_file = traits.File(argstr="> %s", position=-1, genfile=True) + + +class MpiCommandLineInputSpec(CommandLineInputSpec): + use_mpi = traits.Bool( + False, + desc="Whether or not to run the command with mpiexec", + usedefault=True) + n_procs = traits.Int(desc="Num processors to specify to mpiexec. Do not " + "specify if this is managed externally (e.g. through " + "SGE)") diff --git a/nipype/interfaces/base/support.py b/nipype/interfaces/base/support.py new file mode 100644 index 0000000000..87252fd6d3 --- /dev/null +++ b/nipype/interfaces/base/support.py @@ -0,0 +1,303 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" + +Miscellaneous tools to support Interface functionality +...................................................... + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, object, str + +import os +from copy import deepcopy + +import datetime +import locale + +from ... import logging +from ...utils.misc import is_container +from ...utils.filemanip import md5, to_str, hash_infile +iflogger = logging.getLogger('nipype.interface') + + +class NipypeInterfaceError(Exception): + """Custom error for interfaces""" + + def __init__(self, value): + self.value = value + + def __str__(self): + return '{}'.format(self.value) + + +class Bunch(object): + """Dictionary-like class that provides attribute-style access to it's items. + + A `Bunch` is a simple container that stores it's items as class + attributes. Internally all items are stored in a dictionary and + the class exposes several of the dictionary methods. + + Examples + -------- + >>> from nipype.interfaces.base import Bunch + >>> inputs = Bunch(infile='subj.nii', fwhm=6.0, register_to_mean=True) + >>> inputs + Bunch(fwhm=6.0, infile='subj.nii', register_to_mean=True) + >>> inputs.register_to_mean = False + >>> inputs + Bunch(fwhm=6.0, infile='subj.nii', register_to_mean=False) + + Notes + ----- + The Bunch pattern came from the Python Cookbook: + + .. [1] A. Martelli, D. Hudgeon, "Collecting a Bunch of Named + Items", Python Cookbook, 2nd Ed, Chapter 4.18, 2005. + + """ + + def __init__(self, *args, **kwargs): + self.__dict__.update(*args, **kwargs) + + def update(self, *args, **kwargs): + """update existing attribute, or create new attribute + + Note: update is very much like HasTraits.set""" + self.__dict__.update(*args, **kwargs) + + def items(self): + """iterates over bunch attributes as key, value pairs""" + return list(self.__dict__.items()) + + def iteritems(self): + """iterates over bunch attributes as key, value pairs""" + iflogger.warning('iteritems is deprecated, use items instead') + return list(self.items()) + + def get(self, *args): + """Support dictionary get() functionality + """ + return self.__dict__.get(*args) + + def set(self, **kwargs): + """Support dictionary get() functionality + """ + return self.__dict__.update(**kwargs) + + def dictcopy(self): + """returns a deep copy of existing Bunch as a dictionary""" + return deepcopy(self.__dict__) + + def __repr__(self): + """representation of the sorted Bunch as a string + + Currently, this string representation of the `inputs` Bunch of + interfaces is hashed to determine if the process' dirty-bit + needs setting or not. Till that mechanism changes, only alter + this after careful consideration. + """ + outstr = ['Bunch('] + first = True + for k, v in sorted(self.items()): + if not first: + outstr.append(', ') + if isinstance(v, dict): + pairs = [] + for key, value in sorted(v.items()): + pairs.append("'%s': %s" % (key, value)) + v = '{' + ', '.join(pairs) + '}' + outstr.append('%s=%s' % (k, v)) + else: + outstr.append('%s=%r' % (k, v)) + first = False + outstr.append(')') + return ''.join(outstr) + + def _get_bunch_hash(self): + """Return a dictionary of our items with hashes for each file. + + Searches through dictionary items and if an item is a file, it + calculates the md5 hash of the file contents and stores the + file name and hash value as the new key value. + + However, the overall bunch hash is calculated only on the hash + value of a file. The path and name of the file are not used in + the overall hash calculation. + + Returns + ------- + dict_withhash : dict + Copy of our dictionary with the new file hashes included + with each file. + hashvalue : str + The md5 hash value of the `dict_withhash` + + """ + + infile_list = [] + for key, val in list(self.items()): + if is_container(val): + # XXX - SG this probably doesn't catch numpy arrays + # containing embedded file names either. + if isinstance(val, dict): + # XXX - SG should traverse dicts, but ignoring for now + item = None + else: + if len(val) == 0: + raise AttributeError('%s attribute is empty' % key) + item = val[0] + else: + item = val + try: + if isinstance(item, str) and os.path.isfile(item): + infile_list.append(key) + except TypeError: + # `item` is not a file or string. + continue + dict_withhash = self.dictcopy() + dict_nofilename = self.dictcopy() + for item in infile_list: + dict_withhash[item] = _hash_bunch_dict(dict_withhash, item) + dict_nofilename[item] = [val[1] for val in dict_withhash[item]] + # Sort the items of the dictionary, before hashing the string + # representation so we get a predictable order of the + # dictionary. + sorted_dict = to_str(sorted(dict_nofilename.items())) + return dict_withhash, md5(sorted_dict.encode()).hexdigest() + + def _repr_pretty_(self, p, cycle): + """Support for the pretty module from ipython.externals""" + if cycle: + p.text('Bunch(...)') + else: + p.begin_group(6, 'Bunch(') + first = True + for k, v in sorted(self.items()): + if not first: + p.text(',') + p.breakable() + p.text(k + '=') + p.pretty(v) + first = False + p.end_group(6, ')') + + +def _hash_bunch_dict(adict, key): + """Inject file hashes into adict[key]""" + stuff = adict[key] + if not is_container(stuff): + stuff = [stuff] + return [(afile, hash_infile(afile)) for afile in stuff] + + +class InterfaceResult(object): + """Object that contains the results of running a particular Interface. + + Attributes + ---------- + version : version of this Interface result object (a readonly property) + interface : class type + A copy of the `Interface` class that was run to generate this result. + inputs : a traits free representation of the inputs + outputs : Bunch + An `Interface` specific Bunch that contains all possible files + that are generated by the interface. The `outputs` are used + as the `inputs` to another node when interfaces are used in + the pipeline. + runtime : Bunch + + Contains attributes that describe the runtime environment when + the `Interface` was run. Contains the attributes: + + * cmdline : The command line string that was executed + * cwd : The directory the ``cmdline`` was executed in. + * stdout : The output of running the ``cmdline``. + * stderr : Any error messages output from running ``cmdline``. + * returncode : The code returned from running the ``cmdline``. + + """ + + def __init__(self, + interface, + runtime, + inputs=None, + outputs=None, + provenance=None): + self._version = 2.0 + self.interface = interface + self.runtime = runtime + self.inputs = inputs + self.outputs = outputs + self.provenance = provenance + + @property + def version(self): + return self._version + + +class Stream(object): + """Function to capture stdout and stderr streams with timestamps + + stackoverflow.com/questions/4984549/merge-and-sync-stdout-and-stderr/5188359 + """ + + def __init__(self, name, impl): + self._name = name + self._impl = impl + self._buf = '' + self._rows = [] + self._lastidx = 0 + self.default_encoding = locale.getdefaultlocale()[1] or 'UTF-8' + + def fileno(self): + "Pass-through for file descriptor." + return self._impl.fileno() + + def read(self, drain=0): + "Read from the file descriptor. If 'drain' set, read until EOF." + while self._read(drain) is not None: + if not drain: + break + + def _read(self, drain): + "Read from the file descriptor" + fd = self.fileno() + buf = os.read(fd, 4096).decode(self.default_encoding) + if not buf and not self._buf: + return None + if '\n' not in buf: + if not drain: + self._buf += buf + return [] + + # prepend any data previously read, then split into lines and format + buf = self._buf + buf + if '\n' in buf: + tmp, rest = buf.rsplit('\n', 1) + else: + tmp = buf + rest = None + self._buf = rest + now = datetime.datetime.now().isoformat() + rows = tmp.split('\n') + self._rows += [(now, '%s %s:%s' % (self._name, now, r), r) + for r in rows] + for idx in range(self._lastidx, len(self._rows)): + iflogger.info(self._rows[idx][1]) + self._lastidx = len(self._rows) + + +def load_template(name): + """ + Deprecated stub for backwards compatibility, + please use nipype.interfaces.fsl.model.load_template + + """ + from ..fsl.model import load_template + iflogger.warning( + 'Deprecated in 1.0.0, and will be removed in 1.1.0, ' + 'please use nipype.interfaces.fsl.model.load_template instead.') + return load_template(name) diff --git a/nipype/interfaces/base/tests/__init__.py b/nipype/interfaces/base/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/base/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/base/tests/test_auto_BaseInterface.py b/nipype/interfaces/base/tests/test_auto_BaseInterface.py new file mode 100644 index 0000000000..eb0272d495 --- /dev/null +++ b/nipype/interfaces/base/tests/test_auto_BaseInterface.py @@ -0,0 +1,12 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..core import BaseInterface + + +def test_BaseInterface_inputs(): + input_map = dict() + inputs = BaseInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/base/tests/test_auto_CommandLine.py b/nipype/interfaces/base/tests/test_auto_CommandLine.py new file mode 100644 index 0000000000..fb16422864 --- /dev/null +++ b/nipype/interfaces/base/tests/test_auto_CommandLine.py @@ -0,0 +1,18 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..core import CommandLine + + +def test_CommandLine_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) + inputs = CommandLine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/base/tests/test_auto_LibraryBaseInterface.py b/nipype/interfaces/base/tests/test_auto_LibraryBaseInterface.py new file mode 100644 index 0000000000..19beef071f --- /dev/null +++ b/nipype/interfaces/base/tests/test_auto_LibraryBaseInterface.py @@ -0,0 +1,12 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..core import LibraryBaseInterface + + +def test_LibraryBaseInterface_inputs(): + input_map = dict() + inputs = LibraryBaseInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py new file mode 100644 index 0000000000..2d66f2f623 --- /dev/null +++ b/nipype/interfaces/base/tests/test_auto_MpiCommandLine.py @@ -0,0 +1,20 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..core import MpiCommandLine + + +def test_MpiCommandLine_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + n_procs=dict(), + use_mpi=dict(usedefault=True, ), + ) + inputs = MpiCommandLine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py new file mode 100644 index 0000000000..aceabfe2a2 --- /dev/null +++ b/nipype/interfaces/base/tests/test_auto_SEMLikeCommandLine.py @@ -0,0 +1,18 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..core import SEMLikeCommandLine + + +def test_SEMLikeCommandLine_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) + inputs = SEMLikeCommandLine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/base/tests/test_auto_SimpleInterface.py b/nipype/interfaces/base/tests/test_auto_SimpleInterface.py new file mode 100644 index 0000000000..2a4454dd9f --- /dev/null +++ b/nipype/interfaces/base/tests/test_auto_SimpleInterface.py @@ -0,0 +1,12 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..core import SimpleInterface + + +def test_SimpleInterface_inputs(): + input_map = dict() + inputs = SimpleInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py new file mode 100644 index 0000000000..e39dc3acaa --- /dev/null +++ b/nipype/interfaces/base/tests/test_auto_StdOutCommandLine.py @@ -0,0 +1,23 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..core import StdOutCommandLine + + +def test_StdOutCommandLine_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + ) + inputs = StdOutCommandLine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/base/tests/test_core.py b/nipype/interfaces/base/tests/test_core.py new file mode 100644 index 0000000000..fe1b3a227e --- /dev/null +++ b/nipype/interfaces/base/tests/test_core.py @@ -0,0 +1,519 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals +from future import standard_library +from builtins import open +import os +import simplejson as json + +import pytest + +from .... import config +from ....testing import example_data +from ... import base as nib + +standard_library.install_aliases() + + +def check_dict(ref_dict, tst_dict): + """Compare dictionaries of inputs and and those loaded from json files""" + + def to_list(x): + if isinstance(x, tuple): + x = list(x) + + if isinstance(x, list): + for i, xel in enumerate(x): + x[i] = to_list(xel) + + return x + + failed_dict = {} + for key, value in list(ref_dict.items()): + newval = to_list(tst_dict[key]) + if newval != value: + failed_dict[key] = (value, newval) + return failed_dict + + +def test_Interface(): + assert nib.Interface.input_spec is None + assert nib.Interface.output_spec is None + with pytest.raises(NotImplementedError): + nib.Interface() + with pytest.raises(NotImplementedError): + nib.Interface.help() + with pytest.raises(NotImplementedError): + nib.Interface._inputs_help() + with pytest.raises(NotImplementedError): + nib.Interface._outputs_help() + with pytest.raises(NotImplementedError): + nib.Interface._outputs() + + class DerivedInterface(nib.Interface): + def __init__(self): + pass + + nif = DerivedInterface() + with pytest.raises(NotImplementedError): + nif.run() + with pytest.raises(NotImplementedError): + nif.aggregate_outputs() + with pytest.raises(NotImplementedError): + nif._list_outputs() + with pytest.raises(NotImplementedError): + nif._get_filecopy_info() + + +def test_BaseInterface(): + config.set('monitoring', 'enable', '0') + + assert nib.BaseInterface.help() is None + assert nib.BaseInterface._get_filecopy_info() == [] + + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int') + goo = nib.traits.Int(desc='a random int', mandatory=True) + moo = nib.traits.Int(desc='a random int', mandatory=False) + hoo = nib.traits.Int(desc='a random int', usedefault=True) + zoo = nib.File(desc='a file', copyfile=False) + woo = nib.File(desc='a file', copyfile=True) + + class OutputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int') + + class DerivedInterface(nib.BaseInterface): + input_spec = InputSpec + resource_monitor = False + + assert DerivedInterface.help() is None + assert 'moo' in ''.join(DerivedInterface._inputs_help()) + assert DerivedInterface()._outputs() is None + assert DerivedInterface._get_filecopy_info()[0]['key'] == 'woo' + assert DerivedInterface._get_filecopy_info()[0]['copy'] + assert DerivedInterface._get_filecopy_info()[1]['key'] == 'zoo' + assert not DerivedInterface._get_filecopy_info()[1]['copy'] + assert DerivedInterface().inputs.foo == nib.Undefined + with pytest.raises(ValueError): + DerivedInterface()._check_mandatory_inputs() + assert DerivedInterface(goo=1)._check_mandatory_inputs() is None + with pytest.raises(ValueError): + DerivedInterface().run() + with pytest.raises(NotImplementedError): + DerivedInterface(goo=1).run() + + class DerivedInterface2(DerivedInterface): + output_spec = OutputSpec + + def _run_interface(self, runtime): + return runtime + + assert DerivedInterface2.help() is None + assert DerivedInterface2()._outputs().foo == nib.Undefined + with pytest.raises(NotImplementedError): + DerivedInterface2(goo=1).run() + + default_inpu_spec = nib.BaseInterface.input_spec + nib.BaseInterface.input_spec = None + with pytest.raises(Exception): + nib.BaseInterface() + nib.BaseInterface.input_spec = default_inpu_spec + + +def test_BaseInterface_load_save_inputs(tmpdir): + tmp_json = tmpdir.join('settings.json').strpath + + class InputSpec(nib.TraitedSpec): + input1 = nib.traits.Int() + input2 = nib.traits.Float() + input3 = nib.traits.Bool() + input4 = nib.traits.Str() + + class DerivedInterface(nib.BaseInterface): + input_spec = InputSpec + + def __init__(self, **inputs): + super(DerivedInterface, self).__init__(**inputs) + + inputs_dict = {'input1': 12, 'input3': True, 'input4': 'some string'} + bif = DerivedInterface(**inputs_dict) + bif.save_inputs_to_json(tmp_json) + bif2 = DerivedInterface() + bif2.load_inputs_from_json(tmp_json) + assert bif2.inputs.get_traitsfree() == inputs_dict + + bif3 = DerivedInterface(from_file=tmp_json) + assert bif3.inputs.get_traitsfree() == inputs_dict + + inputs_dict2 = inputs_dict.copy() + inputs_dict2.update({'input4': 'some other string'}) + bif4 = DerivedInterface(from_file=tmp_json, input4=inputs_dict2['input4']) + assert bif4.inputs.get_traitsfree() == inputs_dict2 + + bif5 = DerivedInterface(input4=inputs_dict2['input4']) + bif5.load_inputs_from_json(tmp_json, overwrite=False) + assert bif5.inputs.get_traitsfree() == inputs_dict2 + + bif6 = DerivedInterface(input4=inputs_dict2['input4']) + bif6.load_inputs_from_json(tmp_json) + assert bif6.inputs.get_traitsfree() == inputs_dict + + # test get hashval in a complex interface + from nipype.interfaces.ants import Registration + settings = example_data( + example_data('smri_ants_registration_settings.json')) + with open(settings) as setf: + data_dict = json.load(setf) + + tsthash = Registration() + tsthash.load_inputs_from_json(settings) + assert {} == check_dict(data_dict, tsthash.inputs.get_traitsfree()) + + tsthash2 = Registration(from_file=settings) + assert {} == check_dict(data_dict, tsthash2.inputs.get_traitsfree()) + + _, hashvalue = tsthash.inputs.get_hashval(hash_method='timestamp') + assert '8562a5623562a871115eb14822ee8d02' == hashvalue + + +class MinVerInputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', min_ver='0.9') + +class MaxVerInputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', max_ver='0.7') + + +def test_input_version_1(): + class DerivedInterface1(nib.BaseInterface): + input_spec = MinVerInputSpec + + obj = DerivedInterface1() + obj._check_version_requirements(obj.inputs) + + config.set('execution', 'stop_on_unknown_version', True) + + with pytest.raises(ValueError) as excinfo: + obj._check_version_requirements(obj.inputs) + assert "no version information" in str(excinfo.value) + + config.set_default_config() + + +def test_input_version_2(): + class DerivedInterface1(nib.BaseInterface): + input_spec = MinVerInputSpec + _version = '0.8' + + obj = DerivedInterface1() + obj.inputs.foo = 1 + with pytest.raises(Exception) as excinfo: + obj._check_version_requirements(obj.inputs) + assert "version 0.8 < required 0.9" in str(excinfo.value) + + +def test_input_version_3(): + class DerivedInterface1(nib.BaseInterface): + input_spec = MinVerInputSpec + _version = '0.10' + + obj = DerivedInterface1() + obj._check_version_requirements(obj.inputs) + + +def test_input_version_4(): + class DerivedInterface1(nib.BaseInterface): + input_spec = MinVerInputSpec + _version = '0.9' + + obj = DerivedInterface1() + obj.inputs.foo = 1 + obj._check_version_requirements(obj.inputs) + + +def test_input_version_5(): + class DerivedInterface2(nib.BaseInterface): + input_spec = MaxVerInputSpec + _version = '0.8' + + obj = DerivedInterface2() + obj.inputs.foo = 1 + with pytest.raises(Exception) as excinfo: + obj._check_version_requirements(obj.inputs) + assert "version 0.8 > required 0.7" in str(excinfo.value) + + +def test_input_version_6(): + class DerivedInterface1(nib.BaseInterface): + input_spec = MaxVerInputSpec + _version = '0.7' + + obj = DerivedInterface1() + obj.inputs.foo = 1 + obj._check_version_requirements(obj.inputs) + + +def test_output_version(): + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int') + + class OutputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', min_ver='0.9') + + class DerivedInterface1(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + _version = '0.10' + resource_monitor = False + + obj = DerivedInterface1() + assert obj._check_version_requirements(obj._outputs()) == [] + + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int') + + class OutputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', min_ver='0.11') + + class DerivedInterface1(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + _version = '0.10' + resource_monitor = False + + obj = DerivedInterface1() + assert obj._check_version_requirements(obj._outputs()) == ['foo'] + + class InputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int') + + class OutputSpec(nib.TraitedSpec): + foo = nib.traits.Int(desc='a random int', min_ver='0.11') + + class DerivedInterface1(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + _version = '0.10' + resource_monitor = False + + def _run_interface(self, runtime): + return runtime + + def _list_outputs(self): + return {'foo': 1} + + obj = DerivedInterface1() + with pytest.raises(KeyError): + obj.run() + + +def test_Commandline(): + with pytest.raises(Exception): + nib.CommandLine() + ci = nib.CommandLine(command='which') + assert ci.cmd == 'which' + assert ci.inputs.args == nib.Undefined + ci2 = nib.CommandLine(command='which', args='ls') + assert ci2.cmdline == 'which ls' + ci3 = nib.CommandLine(command='echo') + ci3.resource_monitor = False + ci3.inputs.environ = {'MYENV': 'foo'} + res = ci3.run() + assert res.runtime.environ['MYENV'] == 'foo' + assert res.outputs is None + + class CommandLineInputSpec1(nib.CommandLineInputSpec): + foo = nib.Str(argstr='%s', desc='a str') + goo = nib.traits.Bool(argstr='-g', desc='a bool', position=0) + hoo = nib.traits.List(argstr='-l %s', desc='a list') + moo = nib.traits.List( + argstr='-i %d...', desc='a repeated list', position=-1) + noo = nib.traits.Int(argstr='-x %d', desc='an int') + roo = nib.traits.Str(desc='not on command line') + soo = nib.traits.Bool(argstr="-soo") + + nib.CommandLine.input_spec = CommandLineInputSpec1 + ci4 = nib.CommandLine(command='cmd') + ci4.inputs.foo = 'foo' + ci4.inputs.goo = True + ci4.inputs.hoo = ['a', 'b'] + ci4.inputs.moo = [1, 2, 3] + ci4.inputs.noo = 0 + ci4.inputs.roo = 'hello' + ci4.inputs.soo = False + cmd = ci4._parse_inputs() + assert cmd[0] == '-g' + assert cmd[-1] == '-i 1 -i 2 -i 3' + assert 'hello' not in ' '.join(cmd) + assert '-soo' not in ' '.join(cmd) + ci4.inputs.soo = True + cmd = ci4._parse_inputs() + assert '-soo' in ' '.join(cmd) + + class CommandLineInputSpec2(nib.CommandLineInputSpec): + foo = nib.File(argstr='%s', desc='a str', genfile=True) + + nib.CommandLine.input_spec = CommandLineInputSpec2 + ci5 = nib.CommandLine(command='cmd') + with pytest.raises(NotImplementedError): + ci5._parse_inputs() + + class DerivedClass(nib.CommandLine): + input_spec = CommandLineInputSpec2 + + def _gen_filename(self, name): + return 'filename' + + ci6 = DerivedClass(command='cmd') + assert ci6._parse_inputs()[0] == 'filename' + nib.CommandLine.input_spec = nib.CommandLineInputSpec + + +def test_Commandline_environ(monkeypatch, tmpdir): + from nipype import config + config.set_default_config() + + tmpdir.chdir() + monkeypatch.setitem(os.environ, 'DISPLAY', ':1') + # Test environment + ci3 = nib.CommandLine(command='echo') + res = ci3.run() + assert res.runtime.environ['DISPLAY'] == ':1' + + # Test display_variable option + monkeypatch.delitem(os.environ, 'DISPLAY', raising=False) + config.set('execution', 'display_variable', ':3') + res = ci3.run() + assert 'DISPLAY' not in ci3.inputs.environ + assert 'DISPLAY' not in res.runtime.environ + + # If the interface has _redirect_x then yes, it should be set + ci3._redirect_x = True + res = ci3.run() + assert res.runtime.environ['DISPLAY'] == ':3' + + # Test overwrite + monkeypatch.setitem(os.environ, 'DISPLAY', ':1') + ci3.inputs.environ = {'DISPLAY': ':2'} + res = ci3.run() + assert res.runtime.environ['DISPLAY'] == ':2' + + +def test_CommandLine_output(tmpdir): + # Create one file + tmpdir.chdir() + file = tmpdir.join('foo.txt') + file.write('123456\n') + name = os.path.basename(file.strpath) + + ci = nib.CommandLine(command='ls -l') + ci.terminal_output = 'allatonce' + res = ci.run() + assert res.runtime.merged == '' + assert name in res.runtime.stdout + + # Check stdout is written + ci = nib.CommandLine(command='ls -l') + ci.terminal_output = 'file_stdout' + res = ci.run() + assert os.path.isfile('stdout.nipype') + assert name in res.runtime.stdout + tmpdir.join('stdout.nipype').remove(ignore_errors=True) + + # Check stderr is written + ci = nib.CommandLine(command='ls -l') + ci.terminal_output = 'file_stderr' + res = ci.run() + assert os.path.isfile('stderr.nipype') + tmpdir.join('stderr.nipype').remove(ignore_errors=True) + + # Check outputs are thrown away + ci = nib.CommandLine(command='ls -l') + ci.terminal_output = 'none' + res = ci.run() + assert res.runtime.stdout == '' and \ + res.runtime.stderr == '' and \ + res.runtime.merged == '' + + # Check that new interfaces are set to default 'stream' + ci = nib.CommandLine(command='ls -l') + res = ci.run() + assert ci.terminal_output == 'stream' + assert name in res.runtime.stdout and \ + res.runtime.stderr == '' + + # Check only one file is generated + ci = nib.CommandLine(command='ls -l') + ci.terminal_output = 'file' + res = ci.run() + assert os.path.isfile('output.nipype') + assert name in res.runtime.merged and \ + res.runtime.stdout == '' and \ + res.runtime.stderr == '' + tmpdir.join('output.nipype').remove(ignore_errors=True) + + # Check split files are generated + ci = nib.CommandLine(command='ls -l') + ci.terminal_output = 'file_split' + res = ci.run() + assert os.path.isfile('stdout.nipype') + assert os.path.isfile('stderr.nipype') + assert name in res.runtime.stdout + + +def test_global_CommandLine_output(tmpdir): + """Ensures CommandLine.set_default_terminal_output works""" + from nipype.interfaces.fsl import BET + + ci = nib.CommandLine(command='ls -l') + assert ci.terminal_output == 'stream' # default case + + ci = BET() + assert ci.terminal_output == 'stream' # default case + + nib.CommandLine.set_default_terminal_output('allatonce') + ci = nib.CommandLine(command='ls -l') + assert ci.terminal_output == 'allatonce' + + nib.CommandLine.set_default_terminal_output('file') + ci = nib.CommandLine(command='ls -l') + assert ci.terminal_output == 'file' + + # Check default affects derived interfaces + ci = BET() + assert ci.terminal_output == 'file' + + +def test_CommandLine_prefix(tmpdir): + tmpdir.chdir() + oop = 'out/of/path' + os.makedirs(oop) + + script_name = 'test_script.sh' + script_path = os.path.join(oop, script_name) + with open(script_path, 'w') as script_f: + script_f.write('#!/usr/bin/env bash\necho Success!') + os.chmod(script_path, 0o755) + + ci = nib.CommandLine(command=script_name) + with pytest.raises(IOError): + ci.run() + + class OOPCLI(nib.CommandLine): + _cmd_prefix = oop + '/' + + ci = OOPCLI(command=script_name) + ci.run() + + class OOPShell(nib.CommandLine): + _cmd_prefix = 'bash {}/'.format(oop) + + ci = OOPShell(command=script_name) + ci.run() + + class OOPBadShell(nib.CommandLine): + _cmd_prefix = 'shell_dne {}/'.format(oop) + + ci = OOPBadShell(command=script_name) + with pytest.raises(IOError): + ci.run() diff --git a/nipype/interfaces/base/tests/test_resource_monitor.py b/nipype/interfaces/base/tests/test_resource_monitor.py new file mode 100644 index 0000000000..f82a82661d --- /dev/null +++ b/nipype/interfaces/base/tests/test_resource_monitor.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Module to unit test the resource_monitor in nipype +""" + +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os +import pytest + +# Import packages +from .... import config +from ....utils.profiler import _use_resources +from ...base import traits, CommandLine, CommandLineInputSpec +from ... import utility as niu + +# Try to enable the resource monitor +config.enable_resource_monitor() +run_profile = config.resource_monitor + + +class UseResourcesInputSpec(CommandLineInputSpec): + mem_gb = traits.Float( + desc='Number of GB of RAM to use', argstr='-g %f', mandatory=True) + n_procs = traits.Int( + desc='Number of threads to use', argstr='-p %d', mandatory=True) + + +class UseResources(CommandLine): + """ + use_resources cmd interface + """ + from nipype import __path__ + # Init attributes + input_spec = UseResourcesInputSpec + + # Get path of executable + exec_dir = os.path.realpath(__path__[0]) + exec_path = os.path.join(exec_dir, 'utils', 'tests', 'use_resources') + + # Init cmd + _cmd = exec_path + _always_run = True + + +@pytest.mark.skip(reason="inconsistent readings") +@pytest.mark.skipif( + os.getenv('CI_SKIP_TEST', False), reason='disabled in CI tests') +@pytest.mark.parametrize("mem_gb,n_procs", [(0.5, 3), (2.2, 8), (0.8, 4), + (1.5, 1)]) +def test_cmdline_profiling(tmpdir, mem_gb, n_procs): + """ + Test runtime profiler correctly records workflow RAM/CPUs consumption + of a CommandLine-derived interface + """ + from nipype import config + config.set('monitoring', 'sample_frequency', '0.2') # Force sampling fast + + tmpdir.chdir() + iface = UseResources(mem_gb=mem_gb, n_procs=n_procs) + result = iface.run() + + assert abs(mem_gb - result.runtime.mem_peak_gb + ) < 0.3, 'estimated memory error above .3GB' + assert int(result.runtime.cpu_percent / 100 + 0.2 + ) == n_procs, 'wrong number of threads estimated' + + +@pytest.mark.skipif( + True, reason='test disabled temporarily, until funcion profiling works') +@pytest.mark.parametrize("mem_gb,n_procs", [(0.5, 3), (2.2, 8), (0.8, 4), + (1.5, 1)]) +def test_function_profiling(tmpdir, mem_gb, n_procs): + """ + Test runtime profiler correctly records workflow RAM/CPUs consumption + of a Function interface + """ + from nipype import config + config.set('monitoring', 'sample_frequency', '0.2') # Force sampling fast + + tmpdir.chdir() + iface = niu.Function(function=_use_resources) + iface.inputs.mem_gb = mem_gb + iface.inputs.n_procs = n_procs + result = iface.run() + + assert abs(mem_gb - result.runtime.mem_peak_gb + ) < 0.3, 'estimated memory error above .3GB' + assert int(result.runtime.cpu_percent / 100 + 0.2) >= n_procs diff --git a/nipype/interfaces/base/tests/test_specs.py b/nipype/interfaces/base/tests/test_specs.py new file mode 100644 index 0000000000..2586fc7b6a --- /dev/null +++ b/nipype/interfaces/base/tests/test_specs.py @@ -0,0 +1,387 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals +from future import standard_library +import os +import warnings + +import pytest + +from ....utils.filemanip import split_filename +from ... import base as nib +from ...base import traits, Undefined + +standard_library.install_aliases() + + +@pytest.fixture(scope="module") +def setup_file(request, tmpdir_factory): + tmp_dir = tmpdir_factory.mktemp('files') + tmp_infile = tmp_dir.join('foo.txt') + with tmp_infile.open('w') as fp: + fp.writelines(['123456789']) + + tmp_dir.chdir() + + return tmp_infile.strpath + + +def test_TraitedSpec(): + assert nib.TraitedSpec().get_hashval() + assert nib.TraitedSpec().__repr__() == '\n\n' + + class spec(nib.TraitedSpec): + foo = nib.traits.Int + goo = nib.traits.Float(usedefault=True) + + assert spec().foo == Undefined + assert spec().goo == 0.0 + specfunc = lambda x: spec(hoo=x) + with pytest.raises(nib.traits.TraitError): + specfunc(1) + infields = spec(foo=1) + hashval = ([('foo', 1), ('goo', '0.0000000000')], + 'e89433b8c9141aa0fda2f8f4d662c047') + assert infields.get_hashval() == hashval + assert infields.__repr__() == '\nfoo = 1\ngoo = 0.0\n' + + +@pytest.mark.skip +def test_TraitedSpec_dynamic(): + from pickle import dumps, loads + a = nib.BaseTraitedSpec() + a.add_trait('foo', nib.traits.Int) + a.foo = 1 + assign_a = lambda: setattr(a, 'foo', 'a') + with pytest.raises(Exception): + assign_a + pkld_a = dumps(a) + unpkld_a = loads(pkld_a) + assign_a_again = lambda: setattr(unpkld_a, 'foo', 'a') + with pytest.raises(Exception): + assign_a_again + + +def test_TraitedSpec_logic(): + class spec3(nib.TraitedSpec): + _xor_inputs = ('foo', 'bar') + + foo = nib.traits.Int(xor=_xor_inputs, desc='foo or bar, not both') + bar = nib.traits.Int(xor=_xor_inputs, desc='bar or foo, not both') + kung = nib.traits.Float( + requires=('foo', ), position=0, desc='kung foo') + + class out3(nib.TraitedSpec): + output = nib.traits.Int + + class MyInterface(nib.BaseInterface): + input_spec = spec3 + output_spec = out3 + + myif = MyInterface() + # NOTE_dj, FAIL: I don't get a TypeError, only a UserWarning + # with pytest.raises(TypeError): + # setattr(myif.inputs, 'kung', 10.0) + myif.inputs.foo = 1 + assert myif.inputs.foo == 1 + set_bar = lambda: setattr(myif.inputs, 'bar', 1) + with pytest.raises(IOError): + set_bar() + assert myif.inputs.foo == 1 + myif.inputs.kung = 2 + assert myif.inputs.kung == 2.0 + + +def test_deprecation(): + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', UserWarning) + + class DeprecationSpec1(nib.TraitedSpec): + foo = nib.traits.Int(deprecated='0.1') + + spec_instance = DeprecationSpec1() + set_foo = lambda: setattr(spec_instance, 'foo', 1) + with pytest.raises(nib.TraitError): + set_foo() + assert len(w) == 0, 'no warnings, just errors' + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', UserWarning) + + class DeprecationSpec2(nib.TraitedSpec): + foo = nib.traits.Int(deprecated='100', new_name='bar') + + spec_instance = DeprecationSpec2() + set_foo = lambda: setattr(spec_instance, 'foo', 1) + with pytest.raises(nib.TraitError): + set_foo() + assert len(w) == 0, 'no warnings, just errors' + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', UserWarning) + + class DeprecationSpec3(nib.TraitedSpec): + foo = nib.traits.Int(deprecated='1000', new_name='bar') + bar = nib.traits.Int() + + spec_instance = DeprecationSpec3() + not_raised = True + try: + spec_instance.foo = 1 + except nib.TraitError: + not_raised = False + assert not_raised + assert len( + w) == 1, 'deprecated warning 1 %s' % [w1.message for w1 in w] + + with warnings.catch_warnings(record=True) as w: + warnings.filterwarnings('always', '', UserWarning) + + class DeprecationSpec3(nib.TraitedSpec): + foo = nib.traits.Int(deprecated='1000', new_name='bar') + bar = nib.traits.Int() + + spec_instance = DeprecationSpec3() + not_raised = True + try: + spec_instance.foo = 1 + except nib.TraitError: + not_raised = False + assert not_raised + assert spec_instance.foo == Undefined + assert spec_instance.bar == 1 + assert len( + w) == 1, 'deprecated warning 2 %s' % [w1.message for w1 in w] + + +def test_namesource(setup_file): + tmp_infile = setup_file + tmpd, nme, ext = split_filename(tmp_infile) + + class spec2(nib.CommandLineInputSpec): + moo = nib.File( + name_source=['doo'], hash_files=False, argstr="%s", position=2) + doo = nib.File(exists=True, argstr="%s", position=1) + goo = traits.Int(argstr="%d", position=4) + poo = nib.File( + name_source=['goo'], hash_files=False, argstr="%s", position=3) + + class TestName(nib.CommandLine): + _cmd = "mycommand" + input_spec = spec2 + + testobj = TestName() + testobj.inputs.doo = tmp_infile + testobj.inputs.goo = 99 + assert '%s_generated' % nme in testobj.cmdline + assert '%d_generated' % testobj.inputs.goo in testobj.cmdline + testobj.inputs.moo = "my_%s_template" + assert 'my_%s_template' % nme in testobj.cmdline + + +def test_chained_namesource(setup_file): + tmp_infile = setup_file + tmpd, nme, ext = split_filename(tmp_infile) + + class spec2(nib.CommandLineInputSpec): + doo = nib.File(exists=True, argstr="%s", position=1) + moo = nib.File( + name_source=['doo'], + hash_files=False, + argstr="%s", + position=2, + name_template='%s_mootpl') + poo = nib.File( + name_source=['moo'], hash_files=False, argstr="%s", position=3) + + class TestName(nib.CommandLine): + _cmd = "mycommand" + input_spec = spec2 + + testobj = TestName() + testobj.inputs.doo = tmp_infile + res = testobj.cmdline + assert '%s' % tmp_infile in res + assert '%s_mootpl ' % nme in res + assert '%s_mootpl_generated' % nme in res + + +def test_cycle_namesource1(setup_file): + tmp_infile = setup_file + tmpd, nme, ext = split_filename(tmp_infile) + + class spec3(nib.CommandLineInputSpec): + moo = nib.File( + name_source=['doo'], + hash_files=False, + argstr="%s", + position=1, + name_template='%s_mootpl') + poo = nib.File( + name_source=['moo'], hash_files=False, argstr="%s", position=2) + doo = nib.File( + name_source=['poo'], hash_files=False, argstr="%s", position=3) + + class TestCycle(nib.CommandLine): + _cmd = "mycommand" + input_spec = spec3 + + # Check that an exception is raised + to0 = TestCycle() + not_raised = True + try: + to0.cmdline + except nib.NipypeInterfaceError: + not_raised = False + assert not not_raised + + +def test_cycle_namesource2(setup_file): + tmp_infile = setup_file + tmpd, nme, ext = split_filename(tmp_infile) + + class spec3(nib.CommandLineInputSpec): + moo = nib.File( + name_source=['doo'], + hash_files=False, + argstr="%s", + position=1, + name_template='%s_mootpl') + poo = nib.File( + name_source=['moo'], hash_files=False, argstr="%s", position=2) + doo = nib.File( + name_source=['poo'], hash_files=False, argstr="%s", position=3) + + class TestCycle(nib.CommandLine): + _cmd = "mycommand" + input_spec = spec3 + + # Check that loop can be broken by setting one of the inputs + to1 = TestCycle() + to1.inputs.poo = tmp_infile + + not_raised = True + try: + res = to1.cmdline + except nib.NipypeInterfaceError: + not_raised = False + print(res) + + assert not_raised + assert '%s' % tmp_infile in res + assert '%s_generated' % nme in res + assert '%s_generated_mootpl' % nme in res + + +def test_namesource_constraints(setup_file): + tmp_infile = setup_file + tmpd, nme, ext = split_filename(tmp_infile) + + class constrained_spec(nib.CommandLineInputSpec): + in_file = nib.File(argstr="%s", position=1) + threshold = traits.Float( + argstr="%g", + xor=['mask_file'], + position=2) + mask_file = nib.File( + argstr="%s", + name_source=['in_file'], + name_template='%s_mask', + keep_extension=True, + xor=['threshold'], + position=2) + out_file1 = nib.File( + argstr="%s", + name_source=['in_file'], + name_template='%s_out1', + keep_extension=True, + position=3) + out_file2 = nib.File( + argstr="%s", + name_source=['in_file'], + name_template='%s_out2', + keep_extension=True, + requires=['threshold'], + position=4) + + class TestConstrained(nib.CommandLine): + _cmd = "mycommand" + input_spec = constrained_spec + + tc = TestConstrained() + + # name_source undefined, so template traits remain undefined + assert tc.cmdline == 'mycommand' + + # mask_file and out_file1 enabled by name_source definition + tc.inputs.in_file = os.path.basename(tmp_infile) + assert tc.cmdline == 'mycommand foo.txt foo_mask.txt foo_out1.txt' + + # mask_file disabled by threshold, out_file2 enabled by threshold + tc.inputs.threshold = 10. + assert tc.cmdline == 'mycommand foo.txt 10 foo_out1.txt foo_out2.txt' + + +def test_TraitedSpec_withFile(setup_file): + tmp_infile = setup_file + tmpd, nme = os.path.split(tmp_infile) + assert os.path.exists(tmp_infile) + + class spec2(nib.TraitedSpec): + moo = nib.File(exists=True) + doo = nib.traits.List(nib.File(exists=True)) + + infields = spec2(moo=tmp_infile, doo=[tmp_infile]) + hashval = infields.get_hashval(hash_method='content') + assert hashval[1] == 'a00e9ee24f5bfa9545a515b7a759886b' + + +def test_TraitedSpec_withNoFileHashing(setup_file): + tmp_infile = setup_file + tmpd, nme = os.path.split(tmp_infile) + assert os.path.exists(tmp_infile) + + class spec2(nib.TraitedSpec): + moo = nib.File(exists=True, hash_files=False) + doo = nib.traits.List(nib.File(exists=True)) + + infields = spec2(moo=nme, doo=[tmp_infile]) + hashval = infields.get_hashval(hash_method='content') + assert hashval[1] == '8da4669ff5d72f670a46ea3e7a203215' + + class spec3(nib.TraitedSpec): + moo = nib.File(exists=True, name_source="doo") + doo = nib.traits.List(nib.File(exists=True)) + + infields = spec3(moo=nme, doo=[tmp_infile]) + hashval1 = infields.get_hashval(hash_method='content') + + class spec4(nib.TraitedSpec): + moo = nib.File(exists=True) + doo = nib.traits.List(nib.File(exists=True)) + + infields = spec4(moo=nme, doo=[tmp_infile]) + hashval2 = infields.get_hashval(hash_method='content') + assert hashval1[1] != hashval2[1] + + +def test_ImageFile(): + x = nib.BaseInterface().inputs + + # setup traits + x.add_trait('nifti', nib.ImageFile(types=['nifti1', 'dicom'])) + x.add_trait('anytype', nib.ImageFile()) + x.add_trait('newtype', nib.ImageFile(types=['nifti10'])) + x.add_trait('nocompress', + nib.ImageFile(types=['mgh'], allow_compressed=False)) + + with pytest.raises(nib.TraitError): + x.nifti = 'test.mgz' + x.nifti = 'test.nii' + x.anytype = 'test.xml' + with pytest.raises(AttributeError): + x.newtype = 'test.nii' + with pytest.raises(nib.TraitError): + x.nocompress = 'test.nii.gz' + x.nocompress = 'test.mgh' diff --git a/nipype/interfaces/base/tests/test_support.py b/nipype/interfaces/base/tests/test_support.py new file mode 100644 index 0000000000..e6db69a458 --- /dev/null +++ b/nipype/interfaces/base/tests/test_support.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals +import os +import pytest +from builtins import open +from future import standard_library +from pkg_resources import resource_filename as pkgrf + +from ....utils.filemanip import md5 +from ... import base as nib + +standard_library.install_aliases() + + +@pytest.mark.parametrize("args", [{}, {'a': 1, 'b': [2, 3]}]) +def test_bunch(args): + b = nib.Bunch(**args) + assert b.__dict__ == args + + +def test_bunch_attribute(): + b = nib.Bunch(a=1, b=[2, 3], c=None) + assert b.a == 1 + assert b.b == [2, 3] + assert b.c is None + + +def test_bunch_repr(): + b = nib.Bunch(b=2, c=3, a=dict(n=1, m=2)) + assert repr(b) == "Bunch(a={'m': 2, 'n': 1}, b=2, c=3)" + + +def test_bunch_methods(): + b = nib.Bunch(a=2) + b.update(a=3) + newb = b.dictcopy() + assert b.a == 3 + assert b.get('a') == 3 + assert b.get('badkey', 'otherthing') == 'otherthing' + assert b != newb + assert type(dict()) == type(newb) + assert newb['a'] == 3 + + +def test_bunch_hash(): + # NOTE: Since the path to the json file is included in the Bunch, + # the hash will be unique to each machine. + json_pth = pkgrf('nipype', + os.path.join('testing', 'data', 'realign_json.json')) + + b = nib.Bunch(infile=json_pth, otherthing='blue', yat=True) + newbdict, bhash = b._get_bunch_hash() + assert bhash == 'd1f46750044c3de102efc847720fc35f' + # Make sure the hash stored in the json file for `infile` is correct. + jshash = md5() + with open(json_pth, 'r') as fp: + jshash.update(fp.read().encode('utf-8')) + assert newbdict['infile'][0][1] == jshash.hexdigest() + assert newbdict['yat'] is True diff --git a/nipype/interfaces/base/traits_extension.py b/nipype/interfaces/base/traits_extension.py new file mode 100644 index 0000000000..a98ec020c8 --- /dev/null +++ b/nipype/interfaces/base/traits_extension.py @@ -0,0 +1,429 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Traits extension +................ + +This module contains Trait classes that we've pulled from the +traits source and fixed due to various bugs. File and Directory are +redefined as the release version had dependencies on TraitsUI, which +we do not want Nipype to depend on. At least not yet. + +Undefined class was missing the __len__ operator, causing edit_traits +and configure_traits to fail on List objects. Even though we don't +require TraitsUI, this bug was the only thing preventing us from +popping up GUIs which users like. + +These bugs have been in Traits v3.3.0 and v3.2.1. We have reported +all of these bugs and they've been fixed in enthought svn repository +(usually by Robert Kern). + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from builtins import str, bytes +import os +import collections + +# perform all external trait imports here +from traits import __version__ as traits_version +import traits.api as traits +from traits.trait_handlers import TraitDictObject, TraitListObject +from traits.trait_errors import TraitError +from traits.trait_base import _Undefined, class_of + +from traits.api import BaseUnicode +from traits.api import Unicode +from future import standard_library + +if traits_version < '3.7.0': + raise ImportError('Traits version 3.7.0 or higher must be installed') + +standard_library.install_aliases() + + +class Str(Unicode): + """Replacement for the default traits.Str based in bytes""" + + +# Monkeypatch Str and DictStrStr for Python 2 compatibility +traits.Str = Str +DictStrStr = traits.Dict((bytes, str), (bytes, str)) +traits.DictStrStr = DictStrStr + + +class File(BaseUnicode): + """ Defines a trait whose value must be the name of a file. + """ + + # A description of the type of value this trait accepts: + info_text = 'a file name' + + def __init__(self, + value='', + filter=None, + auto_set=False, + entries=0, + exists=False, + **metadata): + """ Creates a File trait. + + Parameters + ---------- + value : string + The default value for the trait + filter : string + A wildcard string to filter filenames in the file dialog box used by + the attribute trait editor. + auto_set : boolean + Indicates whether the file editor updates the trait value after + every key stroke. + exists : boolean + Indicates whether the trait value must be an existing file or + not. + + Default Value + ------------- + *value* or '' + """ + self.filter = filter + self.auto_set = auto_set + self.entries = entries + self.exists = exists + + if exists: + self.info_text = 'an existing file name' + + super(File, self).__init__(value, **metadata) + + def validate(self, object, name, value): + """ Validates that a specified value is valid for this trait.""" + validated_value = super(File, self).validate(object, name, value) + if not self.exists: + return validated_value + elif os.path.isfile(value): + return validated_value + else: + raise TraitError( + args='The trait \'{}\' of {} instance is {}, but the path ' + ' \'{}\' does not exist.'.format(name, class_of(object), + self.info_text, value)) + + self.error(object, name, value) + + +# ------------------------------------------------------------------------------- +# 'Directory' trait +# ------------------------------------------------------------------------------- + + +class Directory(BaseUnicode): + """ + Defines a trait whose value must be the name of a directory. + """ + + # A description of the type of value this trait accepts: + info_text = 'a directory name' + + def __init__(self, + value='', + auto_set=False, + entries=0, + exists=False, + **metadata): + """ Creates a Directory trait. + + Parameters + ---------- + value : string + The default value for the trait + auto_set : boolean + Indicates whether the directory editor updates the trait value + after every key stroke. + exists : boolean + Indicates whether the trait value must be an existing directory or + not. + + Default Value + ------------- + *value* or '' + """ + self.entries = entries + self.auto_set = auto_set + self.exists = exists + + if exists: + self.info_text = 'an existing directory name' + + super(Directory, self).__init__(value, **metadata) + + def validate(self, object, name, value): + """ Validates that a specified value is valid for this trait.""" + if isinstance(value, (str, bytes)): + if not self.exists: + return value + if os.path.isdir(value): + return value + else: + raise TraitError( + args='The trait \'{}\' of {} instance is {}, but the path ' + ' \'{}\' does not exist.'.format(name, class_of(object), + self.info_text, value)) + + self.error(object, name, value) + + +# lists of tuples +# each element consists of : +# - uncompressed (tuple[0]) extension +# - compressed (tuple[1]) extension +img_fmt_types = { + 'nifti1': [('.nii', '.nii.gz'), (('.hdr', '.img'), ('.hdr', '.img.gz'))], + 'mgh': [('.mgh', '.mgz'), ('.mgh', '.mgh.gz')], + 'nifti2': [('.nii', '.nii.gz')], + 'cifti2': [('.nii', '.nii.gz')], + 'gifti': [('.gii', '.gii.gz')], + 'dicom': [('.dcm', '.dcm'), ('.IMA', '.IMA'), ('.tar', '.tar.gz')], + 'nrrd': [('.nrrd', 'nrrd'), ('nhdr', 'nhdr')], + 'afni': [('.HEAD', '.HEAD'), ('.BRIK', '.BRIK')] +} + + +class ImageFile(File): + """ Defines a trait of specific neuroimaging files """ + + def __init__(self, + value='', + filter=None, + auto_set=False, + entries=0, + exists=False, + types=[], + allow_compressed=True, + **metadata): + """ Trait handles neuroimaging files. + + Parameters + ---------- + types : list + Strings of file format types accepted + compressed : boolean + Indicates whether the file format can compressed + """ + self.types = types + self.allow_compressed = allow_compressed + super(ImageFile, self).__init__(value, filter, auto_set, entries, + exists, **metadata) + + def info(self): + existing = 'n existing' if self.exists else '' + comma = ',' if self.exists and not self.allow_compressed else '' + uncompressed = ' uncompressed' if not self.allow_compressed else '' + with_ext = ' (valid extensions: [{}])'.format( + ', '.join(self.grab_exts())) if self.types else '' + return 'a{existing}{comma}{uncompressed} file{with_ext}'.format( + existing=existing, comma=comma, uncompressed=uncompressed, + with_ext=with_ext) + + def grab_exts(self): + # TODO: file type validation + exts = [] + for fmt in self.types: + if fmt in img_fmt_types: + exts.extend( + sum([[u for u in y[0]] + if isinstance(y[0], tuple) else [y[0]] + for y in img_fmt_types[fmt]], [])) + if self.allow_compressed: + exts.extend( + sum([[u for u in y[-1]] + if isinstance(y[-1], tuple) else [y[-1]] + for y in img_fmt_types[fmt]], [])) + else: + raise AttributeError( + 'Information has not been added for format' + ' type {} yet. Supported formats include: ' + '{}'.format(fmt, ', '.join(img_fmt_types.keys()))) + return list(set(exts)) + + def validate(self, object, name, value): + """ Validates that a specified value is valid for this trait. + """ + validated_value = super(ImageFile, self).validate(object, name, value) + if validated_value and self.types: + _exts = self.grab_exts() + if not any(validated_value.endswith(x) for x in _exts): + raise TraitError( + args="{} is not included in allowed types: {}".format( + validated_value, ', '.join(_exts))) + return validated_value + + +""" +The functions that pop-up the Traits GUIs, edit_traits and +configure_traits, were failing because all of our inputs default to +Undefined deep and down in traits/ui/wx/list_editor.py it checks for +the len() of the elements of the list. The _Undefined class in traits +does not define the __len__ method and would error. I tried defining +our own Undefined and even sublassing Undefined, but both of those +failed with a TraitError in our initializer when we assign the +Undefined to the inputs because of an incompatible type: + +TraitError: The 'vertical_gradient' trait of a BetInputSpec instance must be a float, but a value of was specified. + +So... in order to keep the same type but add the missing method, I +monkey patched. +""" + + +def length(self): + return 0 + + +########################################################################## +# Apply monkeypatch here +_Undefined.__len__ = length +########################################################################## + +Undefined = _Undefined() + + +def isdefined(object): + return not isinstance(object, _Undefined) + + +def has_metadata(trait, metadata, value=None, recursive=True): + ''' + Checks if a given trait has a metadata (and optionally if it is set to particular value) + ''' + count = 0 + if hasattr(trait, "_metadata") and metadata in list( + trait._metadata.keys()) and (trait._metadata[metadata] == value + or value is None): + count += 1 + if recursive: + if hasattr(trait, 'inner_traits'): + for inner_trait in trait.inner_traits(): + count += has_metadata(inner_trait.trait_type, metadata, + recursive) + if hasattr(trait, 'handlers') and trait.handlers is not None: + for handler in trait.handlers: + count += has_metadata(handler, metadata, recursive) + + return count > 0 + + +class MultiObject(traits.List): + """ Abstract class - shared functionality of input and output MultiObject + """ + + def validate(self, object, name, value): + + # want to treat range and other sequences (except str) as list + if not isinstance(value, (str, bytes)) and isinstance( + value, collections.Sequence): + value = list(value) + + if not isdefined(value) or \ + (isinstance(value, list) and len(value) == 0): + return Undefined + + newvalue = value + + inner_trait = self.inner_traits()[0] + if not isinstance(value, list) \ + or (isinstance(inner_trait.trait_type, traits.List) and + not isinstance(inner_trait.trait_type, InputMultiObject) and + not isinstance(value[0], list)): + newvalue = [value] + value = super(MultiObject, self).validate(object, name, newvalue) + + if value: + return value + + self.error(object, name, value) + + +class OutputMultiObject(MultiObject): + """ Implements a user friendly traits that accepts one or more + paths to files or directories. This is the output version which + return a single string whenever possible (when it was set to a + single value or a list of length 1). Default value of this trait + is _Undefined. It does not accept empty lists. + + XXX This should only be used as a final resort. We should stick to + established Traits to the extent possible. + + XXX This needs to be vetted by somebody who understands traits + + >>> from nipype.interfaces.base import OutputMultiObject, TraitedSpec + >>> class A(TraitedSpec): + ... foo = OutputMultiObject(File(exists=False)) + >>> a = A() + >>> a.foo + + + >>> a.foo = '/software/temp/foo.txt' + >>> a.foo + '/software/temp/foo.txt' + + >>> a.foo = ['/software/temp/foo.txt'] + >>> a.foo + '/software/temp/foo.txt' + + >>> a.foo = ['/software/temp/foo.txt', '/software/temp/goo.txt'] + >>> a.foo + ['/software/temp/foo.txt', '/software/temp/goo.txt'] + + """ + + def get(self, object, name): + value = self.get_value(object, name) + if len(value) == 0: + return Undefined + elif len(value) == 1: + return value[0] + else: + return value + + def set(self, object, name, value): + self.set_value(object, name, value) + + +class InputMultiObject(MultiObject): + """ Implements a user friendly traits that accepts one or more + paths to files or directories. This is the input version which + always returns a list. Default value of this trait + is _Undefined. It does not accept empty lists. + + XXX This should only be used as a final resort. We should stick to + established Traits to the extent possible. + + XXX This needs to be vetted by somebody who understands traits + + >>> from nipype.interfaces.base import InputMultiObject, TraitedSpec + >>> class A(TraitedSpec): + ... foo = InputMultiObject(File(exists=False)) + >>> a = A() + >>> a.foo + + + >>> a.foo = '/software/temp/foo.txt' + >>> a.foo + ['/software/temp/foo.txt'] + + >>> a.foo = ['/software/temp/foo.txt'] + >>> a.foo + ['/software/temp/foo.txt'] + + >>> a.foo = ['/software/temp/foo.txt', '/software/temp/goo.txt'] + >>> a.foo + ['/software/temp/foo.txt', '/software/temp/goo.txt'] + + """ + pass + +InputMultiPath = InputMultiObject +OutputMultiPath = OutputMultiObject diff --git a/nipype/interfaces/brainsuite/__init__.py b/nipype/interfaces/brainsuite/__init__.py new file mode 100644 index 0000000000..6611aedff4 --- /dev/null +++ b/nipype/interfaces/brainsuite/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +from .brainsuite import (Bse, Bfc, Pvc, Cerebro, Cortex, Scrubmask, Tca, + Dewisp, Dfs, Pialmesh, Skullfinder, Hemisplit, SVReg, + BDP, ThicknessPVC) diff --git a/nipype/interfaces/brainsuite/brainsuite.py b/nipype/interfaces/brainsuite/brainsuite.py new file mode 100644 index 0000000000..aa75d2bf81 --- /dev/null +++ b/nipype/interfaces/brainsuite/brainsuite.py @@ -0,0 +1,1763 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +import re as regex + +from ..base import TraitedSpec, CommandLineInputSpec, CommandLine, File, traits, isdefined +"""This script provides interfaces for BrainSuite command line tools. +Please see brainsuite.org for more information. + +Author: Jason Wong +""" + + +class BseInputSpec(CommandLineInputSpec): + + inputMRIFile = File( + mandatory=True, argstr='-i %s', desc='input MRI volume') + outputMRIVolume = File( + desc= + 'output brain-masked MRI volume. If unspecified, output file name will be auto generated.', + argstr='-o %s', + hash_files=False, + genfile=True) + outputMaskFile = File( + desc= + 'save smooth brain mask. If unspecified, output file name will be auto generated.', + argstr='--mask %s', + hash_files=False, + genfile=True) + diffusionConstant = traits.Float( + 25, usedefault=True, desc='diffusion constant', argstr='-d %f') + diffusionIterations = traits.Int( + 3, usedefault=True, desc='diffusion iterations', argstr='-n %d') + edgeDetectionConstant = traits.Float( + 0.64, usedefault=True, desc='edge detection constant', argstr='-s %f') + radius = traits.Float( + 1, + usedefault=True, + desc='radius of erosion/dilation filter', + argstr='-r %f') + dilateFinalMask = traits.Bool( + True, usedefault=True, desc='dilate final mask', argstr='-p') + trim = traits.Bool( + True, usedefault=True, desc='trim brainstem', argstr='--trim') + outputDiffusionFilter = File( + desc='diffusion filter output', argstr='--adf %s', hash_files=False) + outputEdgeMap = File( + desc='edge map output', argstr='--edge %s', hash_files=False) + outputDetailedBrainMask = File( + desc='save detailed brain mask', argstr='--hires %s', hash_files=False) + outputCortexFile = File( + desc='cortex file', argstr='--cortex %s', hash_files=False) + verbosityLevel = traits.Float( + 1, usedefault=True, desc=' verbosity level (0=silent)', argstr='-v %f') + noRotate = traits.Bool( + desc= + 'retain original orientation(default behavior will auto-rotate input NII files to LPI orientation)', + argstr='--norotate') + timer = traits.Bool(desc='show timing', argstr='--timer') + + +class BseOutputSpec(TraitedSpec): + outputMRIVolume = File(desc='path/name of brain-masked MRI volume') + outputMaskFile = File(desc='path/name of smooth brain mask') + outputDiffusionFilter = File(desc='path/name of diffusion filter output') + outputEdgeMap = File(desc='path/name of edge map output') + outputDetailedBrainMask = File(desc='path/name of detailed brain mask') + outputCortexFile = File(desc='path/name of cortex file') + + +class Bse(CommandLine): + """ + brain surface extractor (BSE) + This program performs automated skull and scalp removal on T1-weighted MRI volumes. + + http://brainsuite.org/processing/surfaceextraction/bse/ + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> from nipype.testing import example_data + >>> bse = brainsuite.Bse() + >>> bse.inputs.inputMRIFile = example_data('structural.nii') + >>> results = bse.run() #doctest: +SKIP + + """ + + input_spec = BseInputSpec + output_spec = BseOutputSpec + _cmd = 'bse' + + def _gen_filename(self, name): + inputs = self.inputs.get() + if isdefined(inputs[name]): + return os.path.abspath(inputs[name]) + + fileToSuffixMap = { + 'outputMRIVolume': '.bse.nii.gz', + 'outputMaskFile': '.mask.nii.gz' + } + + if name in fileToSuffixMap: + return getFileName(self.inputs.inputMRIFile, fileToSuffixMap[name]) + + return None + + def _list_outputs(self): + return l_outputs(self) + + +class BfcInputSpec(CommandLineInputSpec): + inputMRIFile = File( + mandatory=True, desc='input skull-stripped MRI volume', argstr='-i %s') + inputMaskFile = File(desc='mask file', argstr='-m %s', hash_files=False) + outputMRIVolume = File( + desc= + 'output bias-corrected MRI volume.If unspecified, output file name will be auto generated.', + argstr='-o %s', + hash_files=False, + genfile=True) + outputBiasField = File( + desc='save bias field estimate', argstr='--bias %s', hash_files=False) + outputMaskedBiasField = File( + desc='save bias field estimate (masked)', + argstr='--maskedbias %s', + hash_files=False) + histogramRadius = traits.Int( + desc='histogram radius (voxels)', argstr='-r %d') + biasEstimateSpacing = traits.Int( + desc='bias sample spacing (voxels)', argstr='-s %d') + controlPointSpacing = traits.Int( + desc='control point spacing (voxels)', argstr='-c %d') + splineLambda = traits.Float( + desc='spline stiffness weighting parameter', argstr='-w %f') + histogramType = traits.Enum( + 'ellipse', + 'block', + desc= + 'Options for type of histogram\nellipse: use ellipsoid for ROI histogram\nblock :use block for ROI histogram', + argstr='%s') + iterativeMode = traits.Bool( + desc='iterative mode (overrides -r, -s, -c, -w settings)', + argstr='--iterate') + correctionScheduleFile = File( + desc='list of parameters ', argstr='--schedule %s') + biasFieldEstimatesOutputPrefix = traits.Str( + desc='save iterative bias field estimates as .n.field.nii.gz', + argstr='--biasprefix %s') + correctedImagesOutputPrefix = traits.Str( + desc='save iterative corrected images as .n.bfc.nii.gz', + argstr='--prefix %s') + correctWholeVolume = traits.Bool( + desc='apply correction field to entire volume', argstr='--extrapolate') + minBias = traits.Float( + 0.5, + usedefault=True, + desc='minimum allowed bias value', + argstr='-L %f') + maxBias = traits.Float( + 1.5, + usedefault=True, + desc='maximum allowed bias value', + argstr='-U %f') + biasRange = traits.Enum( + "low", + "medium", + "high", + desc= + 'Preset options for bias_model\n low: small bias model [0.95,1.05]\n' + 'medium: medium bias model [0.90,1.10]\n high: high bias model [0.80,1.20]', + argstr='%s') + intermediate_file_type = traits.Enum( + "analyze", + "nifti", + "gzippedAnalyze", + "gzippedNifti", + desc='Options for the format in which intermediate files are generated', + argstr='%s') + convergenceThreshold = traits.Float( + desc='convergence threshold', argstr='--eps %f') + biasEstimateConvergenceThreshold = traits.Float( + desc='bias estimate convergence threshold (values > 0.1 disable)', + argstr='--beps %f') + verbosityLevel = traits.Int( + desc='verbosity level (0=silent)', argstr='-v %d') + timer = traits.Bool(desc='display timing information', argstr='--timer') + + +class BfcOutputSpec(TraitedSpec): + outputMRIVolume = File(desc='path/name of output file') + outputBiasField = File(desc='path/name of bias field output file') + outputMaskedBiasField = File(desc='path/name of masked bias field output') + correctionScheduleFile = File(desc='path/name of schedule file') + + +class Bfc(CommandLine): + """ + bias field corrector (BFC) + This program corrects gain variation in T1-weighted MRI. + + http://brainsuite.org/processing/surfaceextraction/bfc/ + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> from nipype.testing import example_data + >>> bfc = brainsuite.Bfc() + >>> bfc.inputs.inputMRIFile = example_data('structural.nii') + >>> bfc.inputs.inputMaskFile = example_data('mask.nii') + >>> results = bfc.run() #doctest: +SKIP + + """ + + input_spec = BfcInputSpec + output_spec = BfcOutputSpec + _cmd = 'bfc' + + def _gen_filename(self, name): + inputs = self.inputs.get() + if isdefined(inputs[name]): + return os.path.abspath(inputs[name]) + + fileToSuffixMap = {'outputMRIVolume': '.bfc.nii.gz'} + if name in fileToSuffixMap: + return getFileName(self.inputs.inputMRIFile, fileToSuffixMap[name]) + + return None + + def _format_arg(self, name, spec, value): + if name == 'histogramType': + return spec.argstr % { + "ellipse": "--ellipse", + "block": "--block" + }[value] + if name == 'biasRange': + return spec.argstr % { + "low": "--low", + "medium": "--medium", + "high": "--high" + }[value] + if name == 'intermediate_file_type': + return spec.argstr % { + "analyze": "--analyze", + "nifti": "--nifti", + "gzippedAnalyze": "--analyzegz", + "gzippedNifti": "--niftigz" + }[value] + + return super(Bfc, self)._format_arg(name, spec, value) + + def _list_outputs(self): + return l_outputs(self) + + +class PvcInputSpec(CommandLineInputSpec): + inputMRIFile = File(mandatory=True, desc='MRI file', argstr='-i %s') + inputMaskFile = File(desc='brain mask file', argstr='-m %s') + outputLabelFile = File( + desc= + 'output label file. If unspecified, output file name will be auto generated.', + argstr='-o %s', + genfile=True) + outputTissueFractionFile = File( + desc='output tissue fraction file', argstr='-f %s', genfile=True) + spatialPrior = traits.Float(desc='spatial prior strength', argstr='-l %f') + verbosity = traits.Int(desc='verbosity level (0 = silent)', argstr='-v %d') + threeClassFlag = traits.Bool( + desc='use a three-class (CSF=0,GM=1,WM=2) labeling', argstr='-3') + timer = traits.Bool(desc='time processing', argstr='--timer') + + +class PvcOutputSpec(TraitedSpec): + outputLabelFile = File(desc='path/name of label file') + outputTissueFractionFile = File(desc='path/name of tissue fraction file') + + +class Pvc(CommandLine): + """ + partial volume classifier (PVC) tool. + This program performs voxel-wise tissue classification T1-weighted MRI. + Image should be skull-stripped and bias-corrected before tissue classification. + + http://brainsuite.org/processing/surfaceextraction/pvc/ + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> from nipype.testing import example_data + >>> pvc = brainsuite.Pvc() + >>> pvc.inputs.inputMRIFile = example_data('structural.nii') + >>> pvc.inputs.inputMaskFile = example_data('mask.nii') + >>> results = pvc.run() #doctest: +SKIP + + """ + + input_spec = PvcInputSpec + output_spec = PvcOutputSpec + _cmd = 'pvc' + + def _gen_filename(self, name): + inputs = self.inputs.get() + if isdefined(inputs[name]): + return os.path.abspath(inputs[name]) + + fileToSuffixMap = { + 'outputLabelFile': '.pvc.label.nii.gz', + 'outputTissueFractionFile': '.pvc.frac.nii.gz' + } + if name in fileToSuffixMap: + return getFileName(self.inputs.inputMRIFile, fileToSuffixMap[name]) + + return None + + def _list_outputs(self): + return l_outputs(self) + + +class CerebroInputSpec(CommandLineInputSpec): + inputMRIFile = File( + mandatory=True, desc='input 3D MRI volume', argstr='-i %s') + inputAtlasMRIFile = File( + mandatory=True, desc='atlas MRI volume', argstr='--atlas %s') + inputAtlasLabelFile = File( + mandatory=True, desc='atlas labeling', argstr='--atlaslabels %s') + inputBrainMaskFile = File(desc='brain mask file', argstr='-m %s') + outputCerebrumMaskFile = File( + desc= + 'output cerebrum mask volume. If unspecified, output file name will be auto generated.', + argstr='-o %s', + genfile=True) + outputLabelVolumeFile = File( + desc= + 'output labeled hemisphere/cerebrum volume. If unspecified, output file name will be auto generated.', + argstr='-l %s', + genfile=True) + costFunction = traits.Int(2, usedefault=True, desc='0,1,2', argstr='-c %d') + useCentroids = traits.Bool( + desc='use centroids of data to initialize position', + argstr='--centroids') + outputAffineTransformFile = File( + desc='save affine transform to file.', argstr='--air %s', genfile=True) + outputWarpTransformFile = File( + desc='save warp transform to file.', argstr='--warp %s', genfile=True) + verbosity = traits.Int(desc='verbosity level (0=silent)', argstr='-v %d') + linearConvergence = traits.Float( + desc='linear convergence', argstr='--linconv %f') + warpLabel = traits.Int( + desc='warp order (2,3,4,5,6,7,8)', argstr='--warplevel %d') + warpConvergence = traits.Float( + desc='warp convergence', argstr='--warpconv %f') + keepTempFiles = traits.Bool( + desc="don't remove temporary files", argstr='--keep') + tempDirectory = traits.Str( + desc='specify directory to use for temporary files', + argstr='--tempdir %s') + tempDirectoryBase = traits.Str( + desc='create a temporary directory within this directory', + argstr='--tempdirbase %s') + + +class CerebroOutputSpec(TraitedSpec): + outputCerebrumMaskFile = File(desc='path/name of cerebrum mask file') + outputLabelVolumeFile = File(desc='path/name of label mask file') + outputAffineTransformFile = File(desc='path/name of affine transform file') + outputWarpTransformFile = File(desc='path/name of warp transform file') + + +class Cerebro(CommandLine): + """ + Cerebrum/cerebellum labeling tool + This program performs automated labeling of cerebellum and cerebrum in T1 MRI. + Input MRI should be skull-stripped or a brain-only mask should be provided. + + + http://brainsuite.org/processing/surfaceextraction/cerebrum/ + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> from nipype.testing import example_data + >>> cerebro = brainsuite.Cerebro() + >>> cerebro.inputs.inputMRIFile = example_data('structural.nii') + >>> cerebro.inputs.inputAtlasMRIFile = 'atlasMRIVolume.img' + >>> cerebro.inputs.inputAtlasLabelFile = 'atlasLabels.img' + >>> cerebro.inputs.inputBrainMaskFile = example_data('mask.nii') + >>> results = cerebro.run() #doctest: +SKIP + + """ + + input_spec = CerebroInputSpec + output_spec = CerebroOutputSpec + _cmd = 'cerebro' + + def _gen_filename(self, name): + inputs = self.inputs.get() + if isdefined(inputs[name]): + return os.path.abspath(inputs[name]) + + fileToSuffixMap = { + 'outputCerebrumMaskFile': '.cerebrum.mask.nii.gz', + 'outputLabelVolumeFile': '.hemi.label.nii.gz', + 'outputWarpTransformFile': '.warp', + 'outputAffineTransformFile': '.air' + } + if name in fileToSuffixMap: + return getFileName(self.inputs.inputMRIFile, fileToSuffixMap[name]) + + return None + + def _list_outputs(self): + return l_outputs(self) + + +class CortexInputSpec(CommandLineInputSpec): + inputHemisphereLabelFile = File( + mandatory=True, desc='hemisphere / lobe label volume', argstr='-h %s') + outputCerebrumMask = File( + desc= + 'output structure mask. If unspecified, output file name will be auto generated.', + argstr='-o %s', + genfile=True) + inputTissueFractionFile = File( + mandatory=True, + desc='tissue fraction file (32-bit float)', + argstr='-f %s') + tissueFractionThreshold = traits.Float( + 50.0, + usedefault=True, + desc='tissue fraction threshold (percentage)', + argstr='-p %f') + computeWGBoundary = traits.Bool( + True, usedefault=True, desc='compute WM/GM boundary', argstr='-w') + computeGCBoundary = traits.Bool( + desc='compute GM/CSF boundary', argstr='-g') + includeAllSubcorticalAreas = traits.Bool( + True, + usedefault=True, + desc='include all subcortical areas in WM mask', + argstr='-a') + verbosity = traits.Int(desc='verbosity level', argstr='-v %d') + timer = traits.Bool(desc='timing function', argstr='--timer') + + +class CortexOutputSpec(TraitedSpec): + outputCerebrumMask = File(desc='path/name of cerebrum mask') + + +class Cortex(CommandLine): + """ + cortex extractor + This program produces a cortical mask using tissue fraction estimates + and a co-registered cerebellum/hemisphere mask. + + http://brainsuite.org/processing/surfaceextraction/cortex/ + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> from nipype.testing import example_data + >>> cortex = brainsuite.Cortex() + >>> cortex.inputs.inputHemisphereLabelFile = example_data('mask.nii') + >>> cortex.inputs.inputTissueFractionFile = example_data('tissues.nii.gz') + >>> results = cortex.run() #doctest: +SKIP + + """ + + input_spec = CortexInputSpec + output_spec = CortexOutputSpec + _cmd = 'cortex' + + def _gen_filename(self, name): + inputs = self.inputs.get() + if isdefined(inputs[name]): + return os.path.abspath(inputs[name]) + + if name == 'outputCerebrumMask': + return getFileName(self.inputs.inputHemisphereLabelFile, + '.init.cortex.mask.nii.gz') + return None + + def _list_outputs(self): + return l_outputs(self) + + +class ScrubmaskInputSpec(CommandLineInputSpec): + inputMaskFile = File( + mandatory=True, desc='input structure mask file', argstr='-i %s') + outputMaskFile = File( + desc= + 'output structure mask file. If unspecified, output file name will be auto generated.', + argstr='-o %s', + genfile=True) + backgroundFillThreshold = traits.Int( + 2, usedefault=True, desc='background fill threshold', argstr='-b %d') + foregroundTrimThreshold = traits.Int( + 0, usedefault=True, desc='foreground trim threshold', argstr='-f %d') + numberIterations = traits.Int(desc='number of iterations', argstr='-n %d') + verbosity = traits.Int(desc='verbosity (0=silent)', argstr='-v %d') + timer = traits.Bool(desc='timing function', argstr='--timer') + + +class ScrubmaskOutputSpec(TraitedSpec): + outputMaskFile = File(desc='path/name of mask file') + + +class Scrubmask(CommandLine): + """ + ScrubMask tool + scrubmask filters binary masks to trim loosely connected voxels that may + result from segmentation errors and produce bumps on tessellated surfaces. + + http://brainsuite.org/processing/surfaceextraction/scrubmask/ + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> from nipype.testing import example_data + >>> scrubmask = brainsuite.Scrubmask() + >>> scrubmask.inputs.inputMaskFile = example_data('mask.nii') + >>> results = scrubmask.run() #doctest: +SKIP + + """ + input_spec = ScrubmaskInputSpec + output_spec = ScrubmaskOutputSpec + _cmd = 'scrubmask' + + def _gen_filename(self, name): + inputs = self.inputs.get() + if isdefined(inputs[name]): + return os.path.abspath(inputs[name]) + + if name == 'outputMaskFile': + return getFileName(self.inputs.inputMaskFile, + '.cortex.scrubbed.mask.nii.gz') + + return None + + def _list_outputs(self): + return l_outputs(self) + + +class TcaInputSpec(CommandLineInputSpec): + inputMaskFile = File( + mandatory=True, desc='input mask volume', argstr='-i %s') + outputMaskFile = File( + desc= + 'output mask volume. If unspecified, output file name will be auto generated.', + argstr='-o %s', + genfile=True) + minCorrectionSize = traits.Int( + 2500, usedefault=True, desc='maximum correction size', argstr='-m %d') + maxCorrectionSize = traits.Int( + desc='minimum correction size', argstr='-n %d') + foregroundDelta = traits.Int( + 20, usedefault=True, desc='foreground delta', argstr='--delta %d') + verbosity = traits.Int(desc='verbosity (0 = quiet)', argstr='-v %d') + timer = traits.Bool(desc='timing function', argstr='--timer') + + +class TcaOutputSpec(TraitedSpec): + outputMaskFile = File(desc='path/name of mask file') + + +class Tca(CommandLine): + """ + topological correction algorithm (TCA) + This program removes topological handles from a binary object. + + http://brainsuite.org/processing/surfaceextraction/tca/ + + Examples + -------- + >>> from nipype.interfaces import brainsuite + >>> from nipype.testing import example_data + >>> tca = brainsuite.Tca() + >>> tca.inputs.inputMaskFile = example_data('mask.nii') + >>> results = tca.run() #doctest: +SKIP + + """ + input_spec = TcaInputSpec + output_spec = TcaOutputSpec + _cmd = 'tca' + + def _gen_filename(self, name): + inputs = self.inputs.get() + if isdefined(inputs[name]): + return os.path.abspath(inputs[name]) + + if name == 'outputMaskFile': + return getFileName(self.inputs.inputMaskFile, + '.cortex.tca.mask.nii.gz') + + return None + + def _list_outputs(self): + return l_outputs(self) + + +class DewispInputSpec(CommandLineInputSpec): + inputMaskFile = File(mandatory=True, desc='input file', argstr='-i %s') + outputMaskFile = File( + desc= + 'output file. If unspecified, output file name will be auto generated.', + argstr='-o %s', + genfile=True) + verbosity = traits.Int(desc='verbosity', argstr='-v %d') + sizeThreshold = traits.Int(desc='size threshold', argstr='-t %d') + maximumIterations = traits.Int( + desc='maximum number of iterations', argstr='-n %d') + timer = traits.Bool(desc='time processing', argstr='--timer') + + +class DewispOutputSpec(TraitedSpec): + outputMaskFile = File(desc='path/name of mask file') + + +class Dewisp(CommandLine): + """ + dewisp + removes wispy tendril structures from cortex model binary masks. + It does so based on graph theoretic analysis of connected components, + similar to TCA. Each branch of the structure graph is analyzed to determine + pinch points that indicate a likely error in segmentation that attaches noise + to the image. The pinch threshold determines how many voxels the cross-section + can be before it is considered part of the image. + + http://brainsuite.org/processing/surfaceextraction/dewisp/ + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> from nipype.testing import example_data + >>> dewisp = brainsuite.Dewisp() + >>> dewisp.inputs.inputMaskFile = example_data('mask.nii') + >>> results = dewisp.run() #doctest: +SKIP + + """ + + input_spec = DewispInputSpec + output_spec = DewispOutputSpec + _cmd = 'dewisp' + + def _gen_filename(self, name): + inputs = self.inputs.get() + if isdefined(inputs[name]): + return os.path.abspath(inputs[name]) + + if name == 'outputMaskFile': + return getFileName(self.inputs.inputMaskFile, + '.cortex.dewisp.mask.nii.gz') + + return None + + def _list_outputs(self): + return l_outputs(self) + + +class DfsInputSpec(CommandLineInputSpec): + inputVolumeFile = File( + mandatory=True, desc='input 3D volume', argstr='-i %s') + outputSurfaceFile = File( + desc= + 'output surface mesh file. If unspecified, output file name will be auto generated.', + argstr='-o %s', + genfile=True) + inputShadingVolume = File( + desc='shade surface model with data from image volume', argstr='-c %s') + smoothingIterations = traits.Int( + 10, + usedefault=True, + desc='number of smoothing iterations', + argstr='-n %d') + smoothingConstant = traits.Float( + 0.5, usedefault=True, desc='smoothing constant', argstr='-a %f') + curvatureWeighting = traits.Float( + 5.0, usedefault=True, desc='curvature weighting', argstr='-w %f') + scalingPercentile = traits.Float(desc='scaling percentile', argstr='-f %f') + nonZeroTessellation = traits.Bool( + desc='tessellate non-zero voxels', + argstr='-nz', + xor=('nonZeroTessellation', 'specialTessellation')) + tessellationThreshold = traits.Float( + desc= + 'To be used with specialTessellation. Set this value first, then set specialTessellation value.\nUsage: tessellate voxels greater_than, less_than, or equal_to ', + argstr='%f') + specialTessellation = traits.Enum( + 'greater_than', + 'less_than', + 'equal_to', + desc= + 'To avoid throwing a UserWarning, set tessellationThreshold first. Then set this attribute.\nUsage: tessellate voxels greater_than, less_than, or equal_to ', + argstr='%s', + xor=('nonZeroTessellation', 'specialTessellation'), + requires=['tessellationThreshold'], + position=-1) + zeroPadFlag = traits.Bool( + desc='zero-pad volume (avoids clipping at edges)', argstr='-z') + noNormalsFlag = traits.Bool( + desc='do not compute vertex normals', argstr='--nonormals') + postSmoothFlag = traits.Bool( + desc='smooth vertices after coloring', argstr='--postsmooth') + verbosity = traits.Int(desc='verbosity (0 = quiet)', argstr='-v %d') + timer = traits.Bool(desc='timing function', argstr='--timer') + + +class DfsOutputSpec(TraitedSpec): + outputSurfaceFile = File(desc='path/name of surface file') + + +class Dfs(CommandLine): + """ + Surface Generator + Generates mesh surfaces using an isosurface algorithm. + + http://brainsuite.org/processing/surfaceextraction/inner-cortical-surface/ + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> from nipype.testing import example_data + >>> dfs = brainsuite.Dfs() + >>> dfs.inputs.inputVolumeFile = example_data('structural.nii') + >>> results = dfs.run() #doctest: +SKIP + + """ + + input_spec = DfsInputSpec + output_spec = DfsOutputSpec + _cmd = 'dfs' + + def _format_arg(self, name, spec, value): + if name == 'tessellationThreshold': + return '' # blank argstr + if name == 'specialTessellation': + threshold = self.inputs.tessellationThreshold + return spec.argstr % { + "greater_than": ''.join(("-gt %f" % threshold)), + "less_than": ''.join(("-lt %f" % threshold)), + "equal_to": ''.join(("-eq %f" % threshold)) + }[value] + return super(Dfs, self)._format_arg(name, spec, value) + + def _gen_filename(self, name): + inputs = self.inputs.get() + if isdefined(inputs[name]): + return os.path.abspath(inputs[name]) + + if name == 'outputSurfaceFile': + return getFileName(self.inputs.inputVolumeFile, + '.inner.cortex.dfs') + + return None + + def _list_outputs(self): + return l_outputs(self) + + +class PialmeshInputSpec(CommandLineInputSpec): + inputSurfaceFile = File(mandatory=True, desc='input file', argstr='-i %s') + outputSurfaceFile = File( + desc= + 'output file. If unspecified, output file name will be auto generated.', + argstr='-o %s', + genfile=True) + verbosity = traits.Int(desc='verbosity', argstr='-v %d') + inputTissueFractionFile = File( + mandatory=True, + desc='floating point (32) tissue fraction image', + argstr='-f %s') + numIterations = traits.Int( + 100, usedefault=True, desc='number of iterations', argstr='-n %d') + searchRadius = traits.Float( + 1, usedefault=True, desc='search radius', argstr='-r %f') + stepSize = traits.Float( + 0.4, usedefault=True, desc='step size', argstr='-s %f') + inputMaskFile = File( + mandatory=True, + desc='restrict growth to mask file region', + argstr='-m %s') + maxThickness = traits.Float( + 20, + usedefault=True, + desc='maximum allowed tissue thickness', + argstr='--max %f') + tissueThreshold = traits.Float( + 1.05, usedefault=True, desc='tissue threshold', argstr='-t %f') + # output interval is not an output -- it specifies how frequently the + # output surfaces are generated + outputInterval = traits.Int( + 10, usedefault=True, desc='output interval', argstr='--interval %d') + exportPrefix = traits.Str( + desc='prefix for exporting surfaces if interval is set', + argstr='--prefix %s') + laplacianSmoothing = traits.Float( + 0.025, + usedefault=True, + desc='apply Laplacian smoothing', + argstr='--smooth %f') + timer = traits.Bool(desc='show timing', argstr='--timer') + recomputeNormals = traits.Bool( + desc='recompute normals at each iteration', argstr='--norm') + normalSmoother = traits.Float( + 0.2, + usedefault=True, + desc='strength of normal smoother.', + argstr='--nc %f') + tangentSmoother = traits.Float( + desc='strength of tangential smoother.', argstr='--tc %f') + + +class PialmeshOutputSpec(TraitedSpec): + outputSurfaceFile = File(desc='path/name of surface file') + + +class Pialmesh(CommandLine): + """ + pialmesh + computes a pial surface model using an inner WM/GM mesh and a tissue fraction map. + + http://brainsuite.org/processing/surfaceextraction/pial/ + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> from nipype.testing import example_data + >>> pialmesh = brainsuite.Pialmesh() + >>> pialmesh.inputs.inputSurfaceFile = 'input_mesh.dfs' + >>> pialmesh.inputs.inputTissueFractionFile = 'frac_file.nii.gz' + >>> pialmesh.inputs.inputMaskFile = example_data('mask.nii') + >>> results = pialmesh.run() #doctest: +SKIP + + """ + + input_spec = PialmeshInputSpec + output_spec = PialmeshOutputSpec + _cmd = 'pialmesh' + + def _gen_filename(self, name): + inputs = self.inputs.get() + if isdefined(inputs[name]): + return os.path.abspath(inputs[name]) + + if name == 'outputSurfaceFile': + return getFileName(self.inputs.inputSurfaceFile, + '.pial.cortex.dfs') + + return None + + def _list_outputs(self): + return l_outputs(self) + + +class HemisplitInputSpec(CommandLineInputSpec): + inputSurfaceFile = File( + mandatory=True, desc='input surface', argstr='-i %s') + inputHemisphereLabelFile = File( + mandatory=True, desc='input hemisphere label volume', argstr='-l %s') + outputLeftHemisphere = File( + desc= + 'output surface file, left hemisphere. If unspecified, output file name will be auto generated.', + argstr='--left %s', + genfile=True) + outputRightHemisphere = File( + desc= + 'output surface file, right hemisphere. If unspecified, output file name will be auto generated.', + argstr='--right %s', + genfile=True) + pialSurfaceFile = File( + desc='pial surface file -- must have same geometry as input surface', + argstr='-p %s') + outputLeftPialHemisphere = File( + desc= + 'output pial surface file, left hemisphere. If unspecified, output file name will be auto generated.', + argstr='-pl %s', + genfile=True) + outputRightPialHemisphere = File( + desc= + 'output pial surface file, right hemisphere. If unspecified, output file name will be auto generated.', + argstr='-pr %s', + genfile=True) + verbosity = traits.Int(desc='verbosity (0 = silent)', argstr='-v %d') + timer = traits.Bool(desc='timing function', argstr='--timer') + + +class HemisplitOutputSpec(TraitedSpec): + outputLeftHemisphere = File(desc='path/name of left hemisphere') + outputRightHemisphere = File(desc='path/name of right hemisphere') + outputLeftPialHemisphere = File(desc='path/name of left pial hemisphere') + outputRightPialHemisphere = File(desc='path/name of right pial hemisphere') + + +class Hemisplit(CommandLine): + """ + Hemisphere splitter + Splits a surface object into two separate surfaces given an input label volume. + Each vertex is labeled left or right based on the labels being odd (left) or even (right). + The largest contour on the split surface is then found and used as the separation between left and right. + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> from nipype.testing import example_data + >>> hemisplit = brainsuite.Hemisplit() + >>> hemisplit.inputs.inputSurfaceFile = 'input_surf.dfs' + >>> hemisplit.inputs.inputHemisphereLabelFile = 'label.nii' + >>> hemisplit.inputs.pialSurfaceFile = 'pial.dfs' + >>> results = hemisplit.run() #doctest: +SKIP + + """ + + input_spec = HemisplitInputSpec + output_spec = HemisplitOutputSpec + _cmd = 'hemisplit' + + def _gen_filename(self, name): + inputs = self.inputs.get() + if isdefined(inputs[name]): + return os.path.abspath(inputs[name]) + + fileToSuffixMap = { + 'outputLeftHemisphere': '.left.inner.cortex.dfs', + 'outputLeftPialHemisphere': '.left.pial.cortex.dfs', + 'outputRightHemisphere': '.right.inner.cortex.dfs', + 'outputRightPialHemisphere': '.right.pial.cortex.dfs' + } + if name in fileToSuffixMap: + return getFileName(self.inputs.inputSurfaceFile, + fileToSuffixMap[name]) + + return None + + def _list_outputs(self): + return l_outputs(self) + + +class SkullfinderInputSpec(CommandLineInputSpec): + inputMRIFile = File(mandatory=True, desc='input file', argstr='-i %s') + inputMaskFile = File( + mandatory=True, + desc='A brain mask file, 8-bit image (0=non-brain, 255=brain)', + argstr='-m %s') + outputLabelFile = File( + desc= + 'output multi-colored label volume segmenting brain, scalp, inner skull & outer skull ' + 'If unspecified, output file name will be auto generated.', + argstr='-o %s', + genfile=True) + verbosity = traits.Int(desc='verbosity', argstr='-v %d') + lowerThreshold = traits.Int( + desc='Lower threshold for segmentation', argstr='-l %d') + upperThreshold = traits.Int( + desc='Upper threshold for segmentation', argstr='-u %d') + surfaceFilePrefix = traits.Str( + desc='if specified, generate surface files for brain, skull, and scalp', + argstr='-s %s') + bgLabelValue = traits.Int( + desc='background label value (0-255)', argstr='--bglabel %d') + scalpLabelValue = traits.Int( + desc='scalp label value (0-255)', argstr='--scalplabel %d') + skullLabelValue = traits.Int( + desc='skull label value (0-255)', argstr='--skulllabel %d') + spaceLabelValue = traits.Int( + desc='space label value (0-255)', argstr='--spacelabel %d') + brainLabelValue = traits.Int( + desc='brain label value (0-255)', argstr='--brainlabel %d') + performFinalOpening = traits.Bool( + desc='perform a final opening operation on the scalp mask', + argstr='--finalOpening') + + +class SkullfinderOutputSpec(TraitedSpec): + outputLabelFile = File(desc='path/name of label file') + + +class Skullfinder(CommandLine): + """ + Skull and scalp segmentation algorithm. + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> from nipype.testing import example_data + >>> skullfinder = brainsuite.Skullfinder() + >>> skullfinder.inputs.inputMRIFile = example_data('structural.nii') + >>> skullfinder.inputs.inputMaskFile = example_data('mask.nii') + >>> results = skullfinder.run() #doctest: +SKIP + + """ + input_spec = SkullfinderInputSpec + output_spec = SkullfinderOutputSpec + _cmd = 'skullfinder' + + def _gen_filename(self, name): + inputs = self.inputs.get() + if isdefined(inputs[name]): + return os.path.abspath(inputs[name]) + + if name == 'outputLabelFile': + return getFileName(self.inputs.inputMRIFile, + '.skullfinder.label.nii.gz') + + return None + + def _list_outputs(self): + return l_outputs(self) + + +class SVRegInputSpec(CommandLineInputSpec): + subjectFilePrefix = traits.Str( + argstr='\'%s\'', + mandatory=True, + position=0, + desc= + 'Absolute path and filename prefix of the subjects output from BrainSuite ' + 'Cortical Surface Extraction Sequence') + dataSinkDelay = traits.List( + traits.Str, + argstr='%s', + desc= + 'Connect datasink out_file to dataSinkDelay to delay execution of SVReg ' + 'until dataSink has finished sinking CSE outputs.' + 'For use with parallel processing workflows including Brainsuites Cortical ' + 'Surface Extraction sequence (SVReg requires certain files from Brainsuite ' + 'CSE, which must all be in the pathway specified by subjectFilePrefix. see ' + 'http://brainsuite.org/processing/svreg/usage/ for list of required inputs ' + ) + atlasFilePrefix = traits.Str( + position=1, + argstr='\'%s\'', + desc= + 'Optional: Absolute Path and filename prefix of atlas files and labels to which ' + 'the subject will be registered. If unspecified, SVReg' + 'will use its own included atlas files') + iterations = traits.Int( + argstr='\'-H %d\'', + desc='Assigns a number of iterations in the intensity registration step.' + 'if unspecified, performs 100 iterations') + refineOutputs = traits.Bool( + argstr='\'-r\'', + desc='Refine outputs at the expense of more processing time.') + skipToVolumeReg = traits.Bool( + argstr='\'-s\'', + desc= + 'If surface registration was already performed at an earlier time and the ' + 'user would not like to redo this step, then this flag may be used to skip ' + 'ahead to the volumetric registration. Necessary input files will need to ' + 'be present in the input directory called by the command.') + skipToIntensityReg = traits.Bool( + argstr='\'-p\'', + desc= + 'If the p-harmonic volumetric registration was already performed at an ' + 'earlier time and the user would not like to redo this step, then this ' + 'flag may be used to skip ahead to the intensity registration and ' + 'label transfer step.') + useManualMaskFile = traits.Bool( + argstr='\'-cbm\'', + desc= + 'Can call a manually edited cerebrum mask to limit boundaries. Will ' + 'use file: subbasename.cerebrum.mask.nii.gz Make sure to correctly ' + 'replace your manually edited mask file in your input folder with the ' + 'correct subbasename.') + curveMatchingInstructions = traits.Str( + argstr='\'-cur %s\'', + desc= + 'Used to take control of the curve matching process between the atlas ' + 'and subject. One can specify the name of the .dfc file and ' + 'the sulcal numbers <#sul> to be used as constraints. ' + 'example: curveMatchingInstructions = "subbasename.right.dfc 1 2 20"') + useCerebrumMask = traits.Bool( + argstr='\'-C\'', + desc= + 'The cerebrum mask will be used for ' + 'masking the final labels instead of the default pial surface mask. ' + 'Every voxel will be labeled within the cerebrum mask regardless of ' + 'the boundaries of the pial surface.') + pialSurfaceMaskDilation = traits.Int( + argstr='\'-D %d\'', + desc= + 'Cortical volume labels found in file output subbasename.svreg.label.nii.gz ' + 'find its boundaries by using the pial surface then dilating by 1 voxel. ' + 'Use this flag in order to control the number of pial surface mask dilation. ' + '(ie. -D 0 will assign no voxel dilation)') + keepIntermediates = traits.Bool( + argstr='\'-k\'', + desc='Keep the intermediate files after the svreg sequence is complete.' + ) + _XOR_verbosity = ('verbosity0', 'verbosity1', 'verbosity2') + verbosity0 = traits.Bool( + argstr='\'-v0\'', + xor=_XOR_verbosity, + desc='no messages will be reported') + verbosity1 = traits.Bool( + argstr='\'-v1\'', + xor=_XOR_verbosity, + desc= + 'messages will be reported but not the iteration-wise detailed messages' + ) + verbosity2 = traits.Bool( + argstr='\'v2\'', + xor=_XOR_verbosity, + desc='all the messages, including per-iteration, will be displayed') + shortMessages = traits.Bool( + argstr='\'-gui\'', desc='Short messages instead of detailed messages') + displayModuleName = traits.Bool( + argstr='\'-m\'', desc='Module name will be displayed in the messages') + displayTimestamps = traits.Bool( + argstr='\'-t\'', desc='Timestamps will be displayed in the messages') + skipVolumetricProcessing = traits.Bool( + argstr='\'-S\'', + desc= + 'Only surface registration and labeling will be performed. Volumetric ' + 'processing will be skipped.') + useMultiThreading = traits.Bool( + argstr='\'-P\'', + desc= + 'If multiple CPUs are present on the system, the code will try to use ' + 'multithreading to make the execution fast.') + useSingleThreading = traits.Bool( + argstr='\'-U\'', desc='Use single threaded mode.') + + +class SVReg(CommandLine): + """ + surface and volume registration (svreg) + This program registers a subject's BrainSuite-processed volume and surfaces + to an atlas, allowing for automatic labelling of volume and surface ROIs. + + For more information, please see: + http://brainsuite.org/processing/svreg/usage/ + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> svreg = brainsuite.SVReg() + >>> svreg.inputs.subjectFilePrefix = 'home/user/btestsubject/testsubject' + >>> svreg.inputs.refineOutputs = True + >>> svreg.inputs.skipToVolumeReg = False + >>> svreg.inputs. keepIntermediates = True + >>> svreg.inputs.verbosity2 = True + >>> svreg.inputs.displayTimestamps = True + >>> svreg.inputs.useSingleThreading = True + >>> results = svreg.run() #doctest: +SKIP + + + """ + + input_spec = SVRegInputSpec + _cmd = 'svreg.sh' + + def _format_arg(self, name, spec, value): + if name == 'subjectFilePrefix' or name == 'atlasFilePrefix' or name == 'curveMatchingInstructions': + return spec.argstr % os.path.expanduser(value) + if name == 'dataSinkDelay': + return spec.argstr % '' + return super(SVReg, self)._format_arg(name, spec, value) + + +class BDPInputSpec(CommandLineInputSpec): + bfcFile = File( + argstr='%s', + mandatory=True, + position=0, + xor=['noStructuralRegistration'], + desc= + 'Specify absolute path to file produced by bfc. By default, bfc produces the file in ' + 'the format: prefix.bfc.nii.gz') + noStructuralRegistration = traits.Bool( + argstr='--no-structural-registration', + mandatory=True, + position=0, + xor=['bfcFile'], + desc= + 'Allows BDP to work without any structural input. This can useful when ' + 'one is only interested in diffusion modelling part of BDP. With this ' + 'flag only fieldmap-based distortion correction is supported. ' + 'outPrefix can be used to specify fileprefix of the output ' + 'filenames. Change dwiMask to define region of interest ' + 'for diffusion modelling.') + inputDiffusionData = File( + argstr='--nii %s', + mandatory=True, + position=-2, + desc= + 'Specifies the absolute path and filename of the input diffusion data in 4D NIfTI-1 ' + 'format. The flag must be followed by the filename. Only NIfTI-1 files ' + 'with extension .nii or .nii.gz are supported. Furthermore, either ' + 'bMatrixFile, or a combination of both bValueFile and diffusionGradientFile ' + 'must be used to provide the necessary b-matrices/b-values and gradient vectors. ' + ) + bMatrixFile = File( + argstr='--bmat %s', + mandatory=True, + xor=['BVecBValPair'], + position=-1, + desc= + 'Specifies the absolute path and filename of the file containing b-matrices for ' + 'diffusion-weighted scans. The flag must be followed by the filename. ' + 'This file must be a plain text file containing 3x3 matrices for each ' + 'diffusion encoding direction. It should contain zero matrices ' + 'corresponding to b=0 images. This file usually has ".bmat" as its ' + 'extension, and can be used to provide BDP with the more-accurate ' + 'b-matrices as saved by some proprietary scanners. The b-matrices ' + 'specified by the file must be in the voxel coordinates of the input ' + 'diffusion weighted image (NIfTI file). In case b-matrices are not known/calculated, ' + 'bvec and .bval files can be used instead (see diffusionGradientFile and bValueFile). ' + ) + BVecBValPair = traits.List( + traits.Str, + minlen=2, + maxlen=2, + mandatory=True, + position=-1, + xor=['bMatrixFile'], + argstr='--bvec %s --bval %s', + desc= + 'Must input a list containing first the BVector file, then the BValue file (both must be absolute paths)\n' + 'Example: bdp.inputs.BVecBValPair = [\'/directory/subdir/prefix.dwi.bvec\', \'/directory/subdir/prefix.dwi.bval\'] ' + 'The first item in the list specifies the filename of the file containing b-values for the ' + 'diffusion scan. The b-value file must be a plain-text file and usually has an ' + 'extension of .bval\n' + 'The second item in the list specifies the filename of the file containing the diffusion gradient ' + 'directions (specified in the voxel coordinates of the input ' + 'diffusion-weighted image)The b-vectors file must be a plain text file and ' + 'usually has an extension of .bvec ') + dataSinkDelay = traits.List( + traits.Str, + argstr='%s', + desc= + 'For use in parallel processing workflows including Brainsuite Cortical ' + 'Surface Extraction sequence. Connect datasink out_file to dataSinkDelay ' + 'to delay execution of BDP until dataSink has finished sinking outputs. ' + 'In particular, BDP may be run after BFC has finished. For more information ' + 'see http://brainsuite.org/processing/diffusion/pipeline/') + phaseEncodingDirection = traits.Enum( + 'x', + 'x-', + 'y', + 'y-', + 'z', + 'z-', + argstr='--dir=%s', + desc= + 'Specifies the phase-encoding direction of the EPI (diffusion) images. ' + 'It is same as the dominant direction of distortion in the images. This ' + 'information is used to constrain the distortion correction along the ' + 'specified direction. Directions are represented by any one of x, x-, y, ' + 'y-, z or z-. "x" direction increases towards the right side of the ' + 'subject, while "x-" increases towards the left side of the subject. ' + 'Similarly, "y" and "y-" are along the anterior-posterior direction of ' + 'the subject, and "z" & "z-" are along the inferior-superior direction. ' + 'When this flag is not used, BDP uses "y" as the default phase-encoding ' + 'direction. ') + echoSpacing = traits.Float( + argstr='--echo-spacing=%f', + desc= + 'Sets the echo spacing to t seconds, which is used for fieldmap-based ' + 'distortion correction. This flag is required when using fieldmapCorrection' + ) + bValRatioThreshold = traits.Float( + argstr='--bval-ratio-threshold %f', + desc= + 'Sets a threshold which is used to determine b=0 images. When there are ' + 'no diffusion weighted image with b-value of zero, then BDP tries to use ' + 'diffusion weighted images with a low b-value in place of b=0 image. The ' + 'diffusion images with minimum b-value is used as b=0 image only if the ' + 'ratio of the maximum and minimum b-value is more than the specified ' + 'threshold. A lower value of threshold will allow diffusion images with ' + 'higher b-value to be used as b=0 image. The default value of this ' + 'threshold is set to 45, if this trait is not set. ') + estimateTensors = traits.Bool( + argstr='--tensors', + desc= + 'Estimates diffusion tensors using a weighted log-linear estimation and ' + 'saves derived diffusion tensor parameters (FA, MD, axial, radial, L2, ' + 'L3). This is the default behavior if no diffusion modeling flags are ' + 'specified. The estimated diffusion tensors can be visualized by loading ' + 'the saved *.eig.nii.gz file in BrainSuite. BDP reports diffusivity (MD, ' + 'axial, radial, L2 and L3) in a unit which is reciprocal inverse of the ' + 'unit of input b-value. ') + estimateODF_FRACT = traits.Bool( + argstr='--FRACT', + desc= + 'Estimates ODFs using the Funk-Radon and Cosine Transformation (FRACT). ' + 'The outputs are saved in a separate directory with name "FRACT" and the ' + 'ODFs can be visualized by loading the saved ".odf" file in BrainSuite. ' + ) + estimateODF_FRT = traits.Bool( + argstr='--FRT', + desc= + 'Estimates ODFs using Funk-Radon Transformation (FRT). The coefficient ' + 'maps for ODFs are saved in a separate directory with name "FRT" and the ' + 'ODFs can be visualized by loading the saved ".odf" file in BrainSuite. ' + 'The derived generalized-FA (GFA) maps are also saved in the output ' + 'directory. ') + estimateODF_3DShore = traits.Float( + argstr='--3dshore --diffusion_time_ms %f', + desc='Estimates ODFs using 3Dshore. Pass in diffusion time, in ms') + odfLambta = traits.Bool( + argstr='--odf-lambda ', + desc= + 'Sets the regularization parameter, lambda, of the Laplace-Beltrami ' + 'operator while estimating ODFs. The default value is set to 0.006 . This ' + 'can be used to set the appropriate regularization for the input ' + 'diffusion data. ') + t1Mask = File( + argstr='--t1-mask %s', + desc= + 'Specifies the filename of the brain-mask file for input T1-weighted ' + 'image. This mask can be same as the brain mask generated during ' + 'BrainSuite extraction sequence. For best results, the mask should not ' + 'include any extra-meningial tissues from T1-weighted image. The mask ' + 'must be in the same coordinates as input T1-weighted image (i.e. should ' + 'overlay correctly with input .bfc.nii.gz file in ' + 'BrainSuite). This mask is used for co-registration and defining brain ' + 'boundary for statistics computation. The mask can be generated and/or ' + 'edited in BrainSuite. In case outputDiffusionCoordinates is also ' + 'used, this mask is first transformed to diffusion coordinate and the ' + 'transformed mask is used for defining brain boundary in diffusion ' + 'coordinates. When t1Mask is not set, BDP will try to use ' + 'fileprefix>.mask.nii.gz as brain-mask. If .mask.nii.gz is ' + 'not found, then BDP will use the input .bfc.nii.gz itself as ' + 'mask (i.e. all non-zero voxels in .bfc.nii.gz is assumed to ' + 'constitute brain mask). ') + dwiMask = File( + argstr='--dwi-mask %s', + desc= + 'Specifies the filename of the brain-mask file for diffusion data. This ' + 'mask is used only for co-registration purposes and can affect overall ' + 'quality of co-registration (see t1Mask for definition of brain mask ' + 'for statistics computation). The mask must be a 3D volume and should be ' + 'in the same coordinates as input Diffusion file/data (i.e. should ' + 'overlay correctly with input diffusion data in BrainSuite). For best ' + 'results, the mask should include only brain voxels (CSF voxels around ' + 'brain is also acceptable). When this flag is not used, BDP will generate ' + 'a pseudo mask using first b=0 image volume and would save it as ' + 'fileprefix>.dwi.RSA.mask.nii.gz. In case co-registration is not ' + 'accurate with automatically generated pseudo mask, BDP should be re-run ' + 'with a refined diffusion mask. The mask can be generated and/or edited ' + 'in BrainSuite. ') + rigidRegMeasure = traits.Enum( + 'MI', + 'INVERSION', + 'BDP', + argstr='--rigid-reg-measure %s', + desc='Defines the similarity measure to be used for rigid registration. ' + 'Possible measures are "MI", "INVERSION" and "BDP". MI measure uses ' + 'normalized mutual information based cost function. INVERSION measure ' + 'uses simpler cost function based on sum of squared difference by ' + 'exploiting the approximate inverse-contrast relationship in T1- and ' + 'T2-weighted images. BDP measure combines MI and INVERSION. It starts ' + 'with INVERSION measure and refines the result with MI measure. BDP is ' + 'the default measure when this trait is not set. ') + dcorrRegMeasure = traits.Enum( + 'MI', + 'INVERSION-EPI', + 'INVERSION-T1', + 'INVERSION-BOTH', + 'BDP', + argstr='--dcorr-reg-method %s', + desc='Defines the method for registration-based distortion correction. ' + 'Possible methods are "MI", "INVERSION-EPI", "INVERSION-T1", ' + 'INVERSION-BOTH", and "BDP". MI method uses normalized mutual ' + 'information based cost-function while estimating the distortion field. ' + 'INVERSION-based method uses simpler cost function based on sum of ' + 'squared difference by exploiting the known approximate contrast ' + 'relationship in T1- and T2-weighted images. T2-weighted EPI is inverted ' + 'when INVERSION-EPI is used; T1-image is inverted when INVERSION-T1 is ' + 'used; and both are inverted when INVERSION-BOTH is used. BDP method add ' + 'the MI-based refinement after the correction using INVERSION-BOTH ' + 'method. BDP is the default method when this trait is not set. ') + dcorrWeight = traits.Float( + argstr='--dcorr-regularization-wt %f', + desc= + 'Sets the (scalar) weighting parameter for regularization penalty in ' + 'registration-based distortion correction. Set this trait to a single, non-negative ' + 'number which specifies the weight. A large regularization weight encourages ' + 'smoother distortion field at the cost of low measure of image similarity ' + 'after distortion correction. On the other hand, a smaller regularization ' + 'weight can result into higher measure of image similarity but with ' + 'unrealistic and unsmooth distortion field. A weight of 0.5 would reduce ' + 'the penalty to half of the default regularization penalty (By default, this weight ' + 'is set to 1.0). Similarly, a weight of 2.0 ' + 'would increase the penalty to twice of the default penalty. ') + skipDistortionCorr = traits.Bool( + argstr='--no-distortion-correction', + desc='Skips distortion correction completely and performs only a rigid ' + 'registration of diffusion and T1-weighted image. This can be useful when ' + 'the input diffusion images do not have any distortion or they have been ' + 'corrected for distortion. ') + skipNonuniformityCorr = traits.Bool( + argstr='--no-nonuniformity-correction', + desc='Skips intensity non-uniformity correction in b=0 image for ' + 'registration-based distortion correction. The intensity non-uniformity ' + 'correction does not affect any diffusion modeling. ') + skipIntensityCorr = traits.Bool( + argstr='--no-intensity-correction', + xor=['fieldmapCorrectionMethod'], + desc= + 'Disables intensity correction when performing distortion correction. ' + 'Intensity correction can change the noise distribution in the corrected ' + 'image, but it does not affect estimated diffusion parameters like FA, ' + 'etc. ') + fieldmapCorrection = File( + argstr='--fieldmap-correction %s', + requires=['echoSpacing'], + desc= + 'Use an acquired fieldmap for distortion correction. The fieldmap must ' + 'have units of radians/second. Specify the filename of the fieldmap file. ' + 'The field of view (FOV) of the fieldmap scan must cover the FOV of the diffusion ' + 'scan. BDP will try to check the overlap of the FOV of the two scans and ' + 'will issue a warning/error if the diffusion scan"s FOV is not fully ' + 'covered by the fieldmap"s FOV. BDP uses all of the information saved in ' + 'the NIfTI header to compute the FOV. If you get this error and think ' + 'that it is incorrect, then it can be suppressed using the flag ' + 'ignore-fieldmap-FOV. Neither the image matrix size nor the imaging ' + 'grid resolution of the fieldmap needs to be the same as that of the ' + 'diffusion scan, but the fieldmap must be pre-registred to the diffusion ' + 'scan. BDP does NOT align the fieldmap to the diffusion scan, nor does it ' + 'check the alignment of the fieldmap and diffusion scans. Only NIfTI ' + 'files with extension of .nii or .nii.gz are supported. Fieldmap-based ' + 'distortion correction also requires the echoSpacing. Also ' + 'fieldmapCorrectionMethod allows you to define method for ' + 'distortion correction. least squares is the default method. ') + fieldmapCorrectionMethod = traits.Enum( + 'pixelshift', + 'leastsq', + xor=['skipIntensityCorr'], + argstr='--fieldmap-correction-method %s', + desc='Defines the distortion correction method while using fieldmap. ' + 'Possible methods are "pixelshift" and "leastsq". leastsq is the default ' + 'method when this flag is not used. Pixel-shift (pixelshift) method uses ' + 'image interpolation to un-distort the distorted diffusion images. Least ' + 'squares (leastsq) method uses a physical model of distortion which is ' + 'more accurate (and more computationally expensive) than pixel-shift ' + 'method.') + ignoreFieldmapFOV = traits.Bool( + argstr='--ignore-fieldmap-fov', + desc= + 'Supresses the error generated by an insufficient field of view of the ' + 'input fieldmap and continues with the processing. It is useful only when ' + 'used with fieldmap-based distortion correction. See ' + 'fieldmap-correction for a detailed explanation. ') + fieldmapSmooth = traits.Float( + argstr='--fieldmap-smooth3=%f', + desc='Applies 3D Gaussian smoothing with a standard deviation of S ' + 'millimeters (mm) to the input fieldmap before applying distortion ' + 'correction. This trait is only useful with ' + 'fieldmapCorrection. Skip this trait for no smoothing. ') + transformDiffusionVolume = File( + argstr='--transform-diffusion-volume %s', + desc='This flag allows to define custom volumes in diffusion coordinate ' + 'which would be transformed into T1 coordinate in a rigid fashion. The ' + 'flag must be followed by the name of either a NIfTI file or of a folder ' + 'that contains one or more NIfTI files. All of the files must be in ' + 'diffusion coordinate, i.e. the files should overlay correctly with the ' + 'diffusion scan in BrainSuite. Only NIfTI files with an extension of .nii ' + 'or .nii.gz are supported. The transformed files are written to the ' + 'output directory with suffix ".T1_coord" in the filename and will not be ' + 'corrected for distortion, if any. The trait transformInterpolation can ' + 'be used to define the type of interpolation that would be used (default ' + 'is set to linear). If you are attempting to transform a label file or ' + 'mask file, use "nearest" interpolation method with transformInterpolation. ' + 'See also transformT1Volume and transformInterpolation') + transformT1Volume = File( + argstr='--transform-t1-volume %s', + desc='Same as transformDiffusionVolume except that files specified must ' + 'be in T1 coordinate, i.e. the files should overlay correctly with the ' + 'input .bfc.nii.gz files in BrainSuite. BDP transforms these ' + 'data/images from T1 coordinate to diffusion coordinate. The transformed ' + 'files are written to the output directory with suffix ".D_coord" in the ' + 'filename. See also transformDiffusionVolume and transformInterpolation. ' + ) + transformInterpolation = traits.Enum( + 'linear', + 'nearest', + 'cubic', + 'spline', + argstr='--transform-interpolation %s', + desc= + 'Defines the type of interpolation method which would be used while ' + 'transforming volumes defined by transformT1Volume and ' + 'transformDiffusionVolume. Possible methods are "linear", "nearest", ' + '"cubic" and "spline". By default, "linear" interpolation is used. ') + transformT1Surface = File( + argstr='--transform-t1-surface %s', + desc='Similar to transformT1Volume, except that this flag allows ' + 'transforming surfaces (instead of volumes) in T1 coordinate into ' + 'diffusion coordinate in a rigid fashion. The flag must be followed by ' + 'the name of either a .dfs file or of a folder that contains one or more ' + 'dfs files. All of the files must be in T1 coordinate, i.e. the files ' + 'should overlay correctly with the T1-weighted scan in BrainSuite. The ' + 'transformed files are written to the output directory with suffix ' + 'D_coord" in the filename. ') + transformDiffusionSurface = File( + argstr='--transform-diffusion-surface %s', + desc='Same as transformT1Volume, except that the .dfs files specified ' + 'must be in diffusion coordinate, i.e. the surface files should overlay ' + 'correctly with the diffusion scan in BrainSuite. The transformed files ' + 'are written to the output directory with suffix ".T1_coord" in the ' + 'filename. See also transformT1Volume. ') + transformDataOnly = traits.Bool( + argstr='--transform-data-only', + desc= + 'Skip all of the processing (co-registration, distortion correction and ' + 'tensor/ODF estimation) and directly start transformation of defined ' + 'custom volumes, mask and labels (using transformT1Volume, ' + 'transformDiffusionVolume, transformT1Surface, ' + 'transformDiffusionSurface, customDiffusionLabel, ' + 'customT1Label). This flag is useful when BDP was previously run on a ' + 'subject (or ) and some more data (volumes, mask or labels) ' + 'need to be transformed across the T1-diffusion coordinate spaces. This ' + 'assumes that all the necessary files were generated earlier and all of ' + 'the other flags MUST be used in the same way as they were in the initial ' + 'BDP run that processed the data. ') + generateStats = traits.Bool( + argstr='--generate-stats', + desc= + 'Generate ROI-wise statistics of estimated diffusion tensor parameters. ' + 'Units of the reported statistics are same as that of the estimated ' + 'tensor parameters (see estimateTensors). Mean, variance, and voxel counts of ' + 'white matter(WM), grey matter(GM), and both WM and GM combined are ' + 'written for each estimated parameter in a separate comma-seperated value ' + 'csv) file. BDP uses the ROI labels generated by Surface-Volume ' + 'Registration (SVReg) in the BrainSuite extraction sequence. ' + 'Specifically, it looks for labels saved in either ' + 'fileprefix>.svreg.corr.label.nii.gz or .svreg.label.nii.gz. ' + 'In case both files are present, only the first file is used. Also see ' + 'customDiffusionLabel and customT1Label for specifying your own ' + 'ROIs. It is also possible to forgo computing the SVReg ROI-wise ' + 'statistics and only compute stats with custom labels if SVReg label is ' + 'missing. BDP also transfers (and saves) the label/mask files to ' + 'appropriate coordinates before computing statistics. Also see ' + 'outputDiffusionCoordinates for outputs in diffusion coordinate and ' + 'forcePartialROIStats for an important note about field of view of ' + 'diffusion and T1-weighted scans. ') + onlyStats = traits.Bool( + argstr='--generate-only-stats', + desc= + 'Skip all of the processing (co-registration, distortion correction and ' + 'tensor/ODF estimation) and directly start computation of statistics. ' + 'This flag is useful when BDP was previously run on a subject (or ' + 'fileprefix>) and statistics need to be (re-)computed later. This ' + 'assumes that all the necessary files were generated earlier. All of the ' + 'other flags MUST be used in the same way as they were in the initial BDP ' + 'run that processed the data. ') + forcePartialROIStats = traits.Bool( + argstr='--force-partial-roi-stats', + desc= + 'The field of view (FOV) of the diffusion and T1-weighted scans may ' + 'differ significantly in some situations. This may result in partial ' + 'acquisitions of some ROIs in the diffusion scan. By default, BDP does ' + 'not compute statistics for partially acquired ROIs and shows warnings. ' + 'This flag forces computation of statistics for all ROIs, including those ' + 'which are partially acquired. When this flag is used, number of missing ' + 'voxels are also reported for each ROI in statistics files. Number of ' + 'missing voxels are reported in the same coordinate system as the ' + 'statistics file. ') + customDiffusionLabel = File( + argstr='--custom-diffusion-label %s', + desc= + 'BDP supports custom ROIs in addition to those generated by BrainSuite ' + 'SVReg) for ROI-wise statistics calculation. The flag must be followed ' + 'by the name of either a file (custom ROI file) or of a folder that ' + 'contains one or more ROI files. All of the files must be in diffusion ' + 'coordinate, i.e. the label files should overlay correctly with the ' + 'diffusion scan in BrainSuite. These input label files are also ' + 'transferred (and saved) to T1 coordinate for statistics in T1 ' + 'coordinate. BDP uses nearest-neighborhood interpolation for this ' + 'transformation. Only NIfTI files, with an extension of .nii or .nii.gz ' + 'are supported. In order to avoid confusion with other ROI IDs in the ' + 'statistic files, a 5-digit ROI ID is generated for each custom label ' + 'found and the mapping of ID to label file is saved in the file ' + 'fileprefix>.BDP_ROI_MAP.xml. Custom label files can also be generated ' + 'by using the label painter tool in BrainSuite. See also ' + 'customLabelXML') + customT1Label = File( + argstr='--custom-t1-label %s', + desc='Same as customDiffusionLabelexcept that the label files specified ' + 'must be in T1 coordinate, i.e. the label files should overlay correctly ' + 'with the T1-weighted scan in BrainSuite. If the trait ' + 'outputDiffusionCoordinates is also used then these input label files ' + 'are also transferred (and saved) to diffusion coordinate for statistics ' + 'in diffusion coordinate. BDP uses nearest-neighborhood interpolation for ' + 'this transformation. See also customLabelXML. ') + customLabelXML = File( + argstr='--custom-label-xml %s', + desc= + 'BrainSuite saves a descriptions of the SVReg labels (ROI name, ID, ' + 'color, and description) in an .xml file ' + 'brainsuite_labeldescription.xml). BDP uses the ROI ID"s from this xml ' + 'file to report statistics. This flag allows for the use of a custom ' + 'label description xml file. The flag must be followed by an xml ' + 'filename. This can be useful when you want to limit the ROIs for which ' + 'you compute statistics. You can also use custom xml files to name your ' + 'own ROIs (assign ID"s) for custom labels. BrainSuite can save a label ' + 'description in .xml format after using the label painter tool to create ' + 'a ROI label. The xml file MUST be in the same format as BrainSuite"s ' + 'label description file (see brainsuite_labeldescription.xml for an ' + 'example). When this flag is used, NO 5-digit ROI ID is generated for ' + 'custom label files and NO Statistics will be calculated for ROIs not ' + 'identified in the custom xml file. See also customDiffusionLabel and ' + 'customT1Label.') + outputSubdir = traits.Str( + argstr='--output-subdir %s', + desc= + 'By default, BDP writes out all the output (and intermediate) files in ' + 'the same directory (or folder) as the BFC file. This flag allows to ' + 'specify a sub-directory name in which output (and intermediate) files ' + 'would be written. BDP will create the sub-directory in the same ' + 'directory as BFC file. should be the name of the ' + 'sub-directory without any path. This can be useful to organize all ' + 'outputs generated by BDP in a separate sub-directory. ') + outputDiffusionCoordinates = traits.Bool( + argstr='--output-diffusion-coordinate', + desc= + 'Enables estimation of diffusion tensors and/or ODFs (and statistics if ' + 'applicable) in the native diffusion coordinate in addition to the ' + 'default T1-coordinate. All native diffusion coordinate files are saved ' + 'in a seperate folder named "diffusion_coord_outputs". In case statistics ' + 'computation is required, it will also transform/save all label/mask ' + 'files required to diffusion coordinate (see generateStats for ' + 'details). ') + flagConfigFile = File( + argstr='--flag-conf-file %s', + desc= + 'Uses the defined file to specify BDP flags which can be useful for ' + 'batch processing. A flag configuration file is a plain text file which ' + 'can contain any number of BDP"s optional flags (and their parameters) ' + 'separated by whitespace. Everything coming after # until end-of-line is ' + 'treated as comment and is ignored. If a flag is defined in configuration ' + 'file and is also specified in the command used to run BDP, then the ' + 'later get preference and overrides the definition in configuration ' + 'file. ') + outPrefix = traits.Str( + argstr='--output-fileprefix %s', + desc='Specifies output fileprefix when noStructuralRegistration is ' + 'used. The fileprefix can not start with a dash (-) and should be a ' + 'simple string reflecting the absolute path to desired location, along with outPrefix. When this flag is ' + 'not specified (and noStructuralRegistration is used) then the output ' + 'files have same file-base as the input diffusion file. This trait is ' + 'ignored when noStructuralRegistration is not used. ') + threads = traits.Int( + argstr='--threads=%d', + desc='Sets the number of parallel process threads which can be used for ' + 'computations to N, where N must be an integer. Default value of N is ' + ' ') + lowMemory = traits.Bool( + argstr='--low-memory', + desc='Activates low-memory mode. This will run the registration-based ' + 'distortion correction at a lower resolution, which could result in a ' + 'less-accurate correction. This should only be used when no other ' + 'alternative is available. ') + ignoreMemory = traits.Bool( + argstr='--ignore-memory', + desc='Deactivates the inbuilt memory checks and forces BDP to run ' + 'registration-based distortion correction at its default resolution even ' + 'on machines with a low amount of memory. This may result in an ' + 'out-of-memory error when BDP cannot allocate sufficient memory. ') + + +class BDP(CommandLine): + """ + BrainSuite Diffusion Pipeline (BDP) enables fusion of diffusion and + structural MRI information for advanced image and connectivity analysis. + It provides various methods for distortion correction, co-registration, + diffusion modeling (DTI and ODF) and basic ROI-wise statistic. BDP is a + flexible and diverse tool which supports wide variety of diffusion + datasets. + For more information, please see: + + http://brainsuite.org/processing/diffusion/ + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> bdp = brainsuite.BDP() + >>> bdp.inputs.bfcFile = '/directory/subdir/prefix.bfc.nii.gz' + >>> bdp.inputs.inputDiffusionData = '/directory/subdir/prefix.dwi.nii.gz' + >>> bdp.inputs.BVecBValPair = ['/directory/subdir/prefix.dwi.bvec', '/directory/subdir/prefix.dwi.bval'] + >>> results = bdp.run() #doctest: +SKIP + + + """ + + input_spec = BDPInputSpec + _cmd = 'bdp.sh' + + def _format_arg(self, name, spec, value): + if name == 'BVecBValPair': + return spec.argstr % (value[0], value[1]) + if name == 'dataSinkDelay': + return spec.argstr % '' + return super(BDP, self)._format_arg(name, spec, value) + + +class ThicknessPVCInputSpec(CommandLineInputSpec): + subjectFilePrefix = traits.Str( + argstr='%s', + mandatory=True, + desc='Absolute path and filename prefix of the subject data') + + +class ThicknessPVC(CommandLine): + """ + ThicknessPVC computes cortical thickness using partial tissue fractions. + This thickness measure is then transferred to the atlas surface to + facilitate population studies. It also stores the computed thickness into + separate hemisphere files and subject thickness mapped to the atlas + hemisphere surfaces. ThicknessPVC is not run through the main SVReg + sequence, and should be used after executing the BrainSuite and SVReg + sequence. + For more informaction, please see: + + http://brainsuite.org/processing/svreg/svreg_modules/ + + Examples + -------- + + >>> from nipype.interfaces import brainsuite + >>> thicknessPVC = brainsuite.ThicknessPVC() + >>> thicknessPVC.inputs.subjectFilePrefix = 'home/user/btestsubject/testsubject' + >>> results = thicknessPVC.run() #doctest: +SKIP + + """ + + input_spec = ThicknessPVCInputSpec + _cmd = 'thicknessPVC.sh' + + +# used to generate file names for outputs +# removes pathway and extension of inputName, returns concatenation of: +# inputName and suffix +def getFileName(inputName, suffix): + fullInput = os.path.basename(inputName) + dotRegex = regex.compile("[^.]+") + # extract between last slash and first period + inputNoExtension = dotRegex.findall(fullInput)[0] + return os.path.abspath(''.join((inputNoExtension, suffix))) + + +def l_outputs(self): + outputs = self.output_spec().get() + for key in outputs: + name = self._gen_filename(key) + if name is not None: + outputs[key] = name + + return outputs diff --git a/nipype/interfaces/brainsuite/tests/__init__.py b/nipype/interfaces/brainsuite/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/brainsuite/tests/test_auto_BDP.py b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py new file mode 100644 index 0000000000..ff705edfeb --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_BDP.py @@ -0,0 +1,99 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import BDP + + +def test_BDP_inputs(): + input_map = dict( + BVecBValPair=dict( + argstr='--bvec %s --bval %s', + mandatory=True, + position=-1, + xor=['bMatrixFile'], + ), + args=dict(argstr='%s', ), + bMatrixFile=dict( + argstr='--bmat %s', + mandatory=True, + position=-1, + xor=['BVecBValPair'], + ), + bValRatioThreshold=dict(argstr='--bval-ratio-threshold %f', ), + bfcFile=dict( + argstr='%s', + mandatory=True, + position=0, + xor=['noStructuralRegistration'], + ), + customDiffusionLabel=dict(argstr='--custom-diffusion-label %s', ), + customLabelXML=dict(argstr='--custom-label-xml %s', ), + customT1Label=dict(argstr='--custom-t1-label %s', ), + dataSinkDelay=dict(argstr='%s', ), + dcorrRegMeasure=dict(argstr='--dcorr-reg-method %s', ), + dcorrWeight=dict(argstr='--dcorr-regularization-wt %f', ), + dwiMask=dict(argstr='--dwi-mask %s', ), + echoSpacing=dict(argstr='--echo-spacing=%f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + estimateODF_3DShore=dict(argstr='--3dshore --diffusion_time_ms %f', ), + estimateODF_FRACT=dict(argstr='--FRACT', ), + estimateODF_FRT=dict(argstr='--FRT', ), + estimateTensors=dict(argstr='--tensors', ), + fieldmapCorrection=dict( + argstr='--fieldmap-correction %s', + requires=['echoSpacing'], + ), + fieldmapCorrectionMethod=dict( + argstr='--fieldmap-correction-method %s', + xor=['skipIntensityCorr'], + ), + fieldmapSmooth=dict(argstr='--fieldmap-smooth3=%f', ), + flagConfigFile=dict(argstr='--flag-conf-file %s', ), + forcePartialROIStats=dict(argstr='--force-partial-roi-stats', ), + generateStats=dict(argstr='--generate-stats', ), + ignoreFieldmapFOV=dict(argstr='--ignore-fieldmap-fov', ), + ignoreMemory=dict(argstr='--ignore-memory', ), + inputDiffusionData=dict( + argstr='--nii %s', + mandatory=True, + position=-2, + ), + lowMemory=dict(argstr='--low-memory', ), + noStructuralRegistration=dict( + argstr='--no-structural-registration', + mandatory=True, + position=0, + xor=['bfcFile'], + ), + odfLambta=dict(argstr='--odf-lambda ', ), + onlyStats=dict(argstr='--generate-only-stats', ), + outPrefix=dict(argstr='--output-fileprefix %s', ), + outputDiffusionCoordinates=dict( + argstr='--output-diffusion-coordinate', ), + outputSubdir=dict(argstr='--output-subdir %s', ), + phaseEncodingDirection=dict(argstr='--dir=%s', ), + rigidRegMeasure=dict(argstr='--rigid-reg-measure %s', ), + skipDistortionCorr=dict(argstr='--no-distortion-correction', ), + skipIntensityCorr=dict( + argstr='--no-intensity-correction', + xor=['fieldmapCorrectionMethod'], + ), + skipNonuniformityCorr=dict(argstr='--no-nonuniformity-correction', ), + t1Mask=dict(argstr='--t1-mask %s', ), + threads=dict(argstr='--threads=%d', ), + transformDataOnly=dict(argstr='--transform-data-only', ), + transformDiffusionSurface=dict( + argstr='--transform-diffusion-surface %s', ), + transformDiffusionVolume=dict( + argstr='--transform-diffusion-volume %s', ), + transformInterpolation=dict(argstr='--transform-interpolation %s', ), + transformT1Surface=dict(argstr='--transform-t1-surface %s', ), + transformT1Volume=dict(argstr='--transform-t1-volume %s', ), + ) + inputs = BDP.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py new file mode 100644 index 0000000000..01200e50fc --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bfc.py @@ -0,0 +1,75 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import Bfc + + +def test_Bfc_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + biasEstimateConvergenceThreshold=dict(argstr='--beps %f', ), + biasEstimateSpacing=dict(argstr='-s %d', ), + biasFieldEstimatesOutputPrefix=dict(argstr='--biasprefix %s', ), + biasRange=dict(argstr='%s', ), + controlPointSpacing=dict(argstr='-c %d', ), + convergenceThreshold=dict(argstr='--eps %f', ), + correctWholeVolume=dict(argstr='--extrapolate', ), + correctedImagesOutputPrefix=dict(argstr='--prefix %s', ), + correctionScheduleFile=dict(argstr='--schedule %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + histogramRadius=dict(argstr='-r %d', ), + histogramType=dict(argstr='%s', ), + inputMRIFile=dict( + argstr='-i %s', + mandatory=True, + ), + inputMaskFile=dict( + argstr='-m %s', + hash_files=False, + ), + intermediate_file_type=dict(argstr='%s', ), + iterativeMode=dict(argstr='--iterate', ), + maxBias=dict( + argstr='-U %f', + usedefault=True, + ), + minBias=dict( + argstr='-L %f', + usedefault=True, + ), + outputBiasField=dict( + argstr='--bias %s', + hash_files=False, + ), + outputMRIVolume=dict( + argstr='-o %s', + genfile=True, + hash_files=False, + ), + outputMaskedBiasField=dict( + argstr='--maskedbias %s', + hash_files=False, + ), + splineLambda=dict(argstr='-w %f', ), + timer=dict(argstr='--timer', ), + verbosityLevel=dict(argstr='-v %d', ), + ) + inputs = Bfc.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Bfc_outputs(): + output_map = dict( + correctionScheduleFile=dict(), + outputBiasField=dict(), + outputMRIVolume=dict(), + outputMaskedBiasField=dict(), + ) + outputs = Bfc.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Bse.py b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py new file mode 100644 index 0000000000..bbd154bc24 --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_Bse.py @@ -0,0 +1,92 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import Bse + + +def test_Bse_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + diffusionConstant=dict( + argstr='-d %f', + usedefault=True, + ), + diffusionIterations=dict( + argstr='-n %d', + usedefault=True, + ), + dilateFinalMask=dict( + argstr='-p', + usedefault=True, + ), + edgeDetectionConstant=dict( + argstr='-s %f', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRIFile=dict( + argstr='-i %s', + mandatory=True, + ), + noRotate=dict(argstr='--norotate', ), + outputCortexFile=dict( + argstr='--cortex %s', + hash_files=False, + ), + outputDetailedBrainMask=dict( + argstr='--hires %s', + hash_files=False, + ), + outputDiffusionFilter=dict( + argstr='--adf %s', + hash_files=False, + ), + outputEdgeMap=dict( + argstr='--edge %s', + hash_files=False, + ), + outputMRIVolume=dict( + argstr='-o %s', + genfile=True, + hash_files=False, + ), + outputMaskFile=dict( + argstr='--mask %s', + genfile=True, + hash_files=False, + ), + radius=dict( + argstr='-r %f', + usedefault=True, + ), + timer=dict(argstr='--timer', ), + trim=dict( + argstr='--trim', + usedefault=True, + ), + verbosityLevel=dict( + argstr='-v %f', + usedefault=True, + ), + ) + inputs = Bse.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Bse_outputs(): + output_map = dict( + outputCortexFile=dict(), + outputDetailedBrainMask=dict(), + outputDiffusionFilter=dict(), + outputEdgeMap=dict(), + outputMRIVolume=dict(), + outputMaskFile=dict(), + ) + outputs = Bse.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py new file mode 100644 index 0000000000..808e4347c3 --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cerebro.py @@ -0,0 +1,71 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import Cerebro + + +def test_Cerebro_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + costFunction=dict( + argstr='-c %d', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputAtlasLabelFile=dict( + argstr='--atlaslabels %s', + mandatory=True, + ), + inputAtlasMRIFile=dict( + argstr='--atlas %s', + mandatory=True, + ), + inputBrainMaskFile=dict(argstr='-m %s', ), + inputMRIFile=dict( + argstr='-i %s', + mandatory=True, + ), + keepTempFiles=dict(argstr='--keep', ), + linearConvergence=dict(argstr='--linconv %f', ), + outputAffineTransformFile=dict( + argstr='--air %s', + genfile=True, + ), + outputCerebrumMaskFile=dict( + argstr='-o %s', + genfile=True, + ), + outputLabelVolumeFile=dict( + argstr='-l %s', + genfile=True, + ), + outputWarpTransformFile=dict( + argstr='--warp %s', + genfile=True, + ), + tempDirectory=dict(argstr='--tempdir %s', ), + tempDirectoryBase=dict(argstr='--tempdirbase %s', ), + useCentroids=dict(argstr='--centroids', ), + verbosity=dict(argstr='-v %d', ), + warpConvergence=dict(argstr='--warpconv %f', ), + warpLabel=dict(argstr='--warplevel %d', ), + ) + inputs = Cerebro.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Cerebro_outputs(): + output_map = dict( + outputAffineTransformFile=dict(), + outputCerebrumMaskFile=dict(), + outputLabelVolumeFile=dict(), + outputWarpTransformFile=dict(), + ) + outputs = Cerebro.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py new file mode 100644 index 0000000000..536cb158f2 --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_Cortex.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import Cortex + + +def test_Cortex_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + computeGCBoundary=dict(argstr='-g', ), + computeWGBoundary=dict( + argstr='-w', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + includeAllSubcorticalAreas=dict( + argstr='-a', + usedefault=True, + ), + inputHemisphereLabelFile=dict( + argstr='-h %s', + mandatory=True, + ), + inputTissueFractionFile=dict( + argstr='-f %s', + mandatory=True, + ), + outputCerebrumMask=dict( + argstr='-o %s', + genfile=True, + ), + timer=dict(argstr='--timer', ), + tissueFractionThreshold=dict( + argstr='-p %f', + usedefault=True, + ), + verbosity=dict(argstr='-v %d', ), + ) + inputs = Cortex.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Cortex_outputs(): + output_map = dict(outputCerebrumMask=dict(), ) + outputs = Cortex.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py new file mode 100644 index 0000000000..ba430fb1fd --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dewisp.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import Dewisp + + +def test_Dewisp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskFile=dict( + argstr='-i %s', + mandatory=True, + ), + maximumIterations=dict(argstr='-n %d', ), + outputMaskFile=dict( + argstr='-o %s', + genfile=True, + ), + sizeThreshold=dict(argstr='-t %d', ), + timer=dict(argstr='--timer', ), + verbosity=dict(argstr='-v %d', ), + ) + inputs = Dewisp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Dewisp_outputs(): + output_map = dict(outputMaskFile=dict(), ) + outputs = Dewisp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py new file mode 100644 index 0000000000..c69232fd01 --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_Dfs.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import Dfs + + +def test_Dfs_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + curvatureWeighting=dict( + argstr='-w %f', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputShadingVolume=dict(argstr='-c %s', ), + inputVolumeFile=dict( + argstr='-i %s', + mandatory=True, + ), + noNormalsFlag=dict(argstr='--nonormals', ), + nonZeroTessellation=dict( + argstr='-nz', + xor=('nonZeroTessellation', 'specialTessellation'), + ), + outputSurfaceFile=dict( + argstr='-o %s', + genfile=True, + ), + postSmoothFlag=dict(argstr='--postsmooth', ), + scalingPercentile=dict(argstr='-f %f', ), + smoothingConstant=dict( + argstr='-a %f', + usedefault=True, + ), + smoothingIterations=dict( + argstr='-n %d', + usedefault=True, + ), + specialTessellation=dict( + argstr='%s', + position=-1, + requires=['tessellationThreshold'], + xor=('nonZeroTessellation', 'specialTessellation'), + ), + tessellationThreshold=dict(argstr='%f', ), + timer=dict(argstr='--timer', ), + verbosity=dict(argstr='-v %d', ), + zeroPadFlag=dict(argstr='-z', ), + ) + inputs = Dfs.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Dfs_outputs(): + output_map = dict(outputSurfaceFile=dict(), ) + outputs = Dfs.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py new file mode 100644 index 0000000000..9e3db80dd9 --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_Hemisplit.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import Hemisplit + + +def test_Hemisplit_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputHemisphereLabelFile=dict( + argstr='-l %s', + mandatory=True, + ), + inputSurfaceFile=dict( + argstr='-i %s', + mandatory=True, + ), + outputLeftHemisphere=dict( + argstr='--left %s', + genfile=True, + ), + outputLeftPialHemisphere=dict( + argstr='-pl %s', + genfile=True, + ), + outputRightHemisphere=dict( + argstr='--right %s', + genfile=True, + ), + outputRightPialHemisphere=dict( + argstr='-pr %s', + genfile=True, + ), + pialSurfaceFile=dict(argstr='-p %s', ), + timer=dict(argstr='--timer', ), + verbosity=dict(argstr='-v %d', ), + ) + inputs = Hemisplit.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Hemisplit_outputs(): + output_map = dict( + outputLeftHemisphere=dict(), + outputLeftPialHemisphere=dict(), + outputRightHemisphere=dict(), + outputRightPialHemisphere=dict(), + ) + outputs = Hemisplit.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py new file mode 100644 index 0000000000..afc621a56e --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pialmesh.py @@ -0,0 +1,78 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import Pialmesh + + +def test_Pialmesh_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + exportPrefix=dict(argstr='--prefix %s', ), + inputMaskFile=dict( + argstr='-m %s', + mandatory=True, + ), + inputSurfaceFile=dict( + argstr='-i %s', + mandatory=True, + ), + inputTissueFractionFile=dict( + argstr='-f %s', + mandatory=True, + ), + laplacianSmoothing=dict( + argstr='--smooth %f', + usedefault=True, + ), + maxThickness=dict( + argstr='--max %f', + usedefault=True, + ), + normalSmoother=dict( + argstr='--nc %f', + usedefault=True, + ), + numIterations=dict( + argstr='-n %d', + usedefault=True, + ), + outputInterval=dict( + argstr='--interval %d', + usedefault=True, + ), + outputSurfaceFile=dict( + argstr='-o %s', + genfile=True, + ), + recomputeNormals=dict(argstr='--norm', ), + searchRadius=dict( + argstr='-r %f', + usedefault=True, + ), + stepSize=dict( + argstr='-s %f', + usedefault=True, + ), + tangentSmoother=dict(argstr='--tc %f', ), + timer=dict(argstr='--timer', ), + tissueThreshold=dict( + argstr='-t %f', + usedefault=True, + ), + verbosity=dict(argstr='-v %d', ), + ) + inputs = Pialmesh.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Pialmesh_outputs(): + output_map = dict(outputSurfaceFile=dict(), ) + outputs = Pialmesh.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py new file mode 100644 index 0000000000..d425d4ddac --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_Pvc.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import Pvc + + +def test_Pvc_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRIFile=dict( + argstr='-i %s', + mandatory=True, + ), + inputMaskFile=dict(argstr='-m %s', ), + outputLabelFile=dict( + argstr='-o %s', + genfile=True, + ), + outputTissueFractionFile=dict( + argstr='-f %s', + genfile=True, + ), + spatialPrior=dict(argstr='-l %f', ), + threeClassFlag=dict(argstr='-3', ), + timer=dict(argstr='--timer', ), + verbosity=dict(argstr='-v %d', ), + ) + inputs = Pvc.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Pvc_outputs(): + output_map = dict( + outputLabelFile=dict(), + outputTissueFractionFile=dict(), + ) + outputs = Pvc.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py new file mode 100644 index 0000000000..7449e1488a --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_SVReg.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import SVReg + + +def test_SVReg_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + atlasFilePrefix=dict( + argstr="'%s'", + position=1, + ), + curveMatchingInstructions=dict(argstr="'-cur %s'", ), + dataSinkDelay=dict(argstr='%s', ), + displayModuleName=dict(argstr="'-m'", ), + displayTimestamps=dict(argstr="'-t'", ), + environ=dict( + nohash=True, + usedefault=True, + ), + iterations=dict(argstr="'-H %d'", ), + keepIntermediates=dict(argstr="'-k'", ), + pialSurfaceMaskDilation=dict(argstr="'-D %d'", ), + refineOutputs=dict(argstr="'-r'", ), + shortMessages=dict(argstr="'-gui'", ), + skipToIntensityReg=dict(argstr="'-p'", ), + skipToVolumeReg=dict(argstr="'-s'", ), + skipVolumetricProcessing=dict(argstr="'-S'", ), + subjectFilePrefix=dict( + argstr="'%s'", + mandatory=True, + position=0, + ), + useCerebrumMask=dict(argstr="'-C'", ), + useManualMaskFile=dict(argstr="'-cbm'", ), + useMultiThreading=dict(argstr="'-P'", ), + useSingleThreading=dict(argstr="'-U'", ), + verbosity0=dict( + argstr="'-v0'", + xor=('verbosity0', 'verbosity1', 'verbosity2'), + ), + verbosity1=dict( + argstr="'-v1'", + xor=('verbosity0', 'verbosity1', 'verbosity2'), + ), + verbosity2=dict( + argstr="'v2'", + xor=('verbosity0', 'verbosity1', 'verbosity2'), + ), + ) + inputs = SVReg.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py new file mode 100644 index 0000000000..eb672d12b7 --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_Scrubmask.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import Scrubmask + + +def test_Scrubmask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + backgroundFillThreshold=dict( + argstr='-b %d', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + foregroundTrimThreshold=dict( + argstr='-f %d', + usedefault=True, + ), + inputMaskFile=dict( + argstr='-i %s', + mandatory=True, + ), + numberIterations=dict(argstr='-n %d', ), + outputMaskFile=dict( + argstr='-o %s', + genfile=True, + ), + timer=dict(argstr='--timer', ), + verbosity=dict(argstr='-v %d', ), + ) + inputs = Scrubmask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Scrubmask_outputs(): + output_map = dict(outputMaskFile=dict(), ) + outputs = Scrubmask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py new file mode 100644 index 0000000000..2191f7b133 --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_Skullfinder.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import Skullfinder + + +def test_Skullfinder_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bgLabelValue=dict(argstr='--bglabel %d', ), + brainLabelValue=dict(argstr='--brainlabel %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRIFile=dict( + argstr='-i %s', + mandatory=True, + ), + inputMaskFile=dict( + argstr='-m %s', + mandatory=True, + ), + lowerThreshold=dict(argstr='-l %d', ), + outputLabelFile=dict( + argstr='-o %s', + genfile=True, + ), + performFinalOpening=dict(argstr='--finalOpening', ), + scalpLabelValue=dict(argstr='--scalplabel %d', ), + skullLabelValue=dict(argstr='--skulllabel %d', ), + spaceLabelValue=dict(argstr='--spacelabel %d', ), + surfaceFilePrefix=dict(argstr='-s %s', ), + upperThreshold=dict(argstr='-u %d', ), + verbosity=dict(argstr='-v %d', ), + ) + inputs = Skullfinder.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Skullfinder_outputs(): + output_map = dict(outputLabelFile=dict(), ) + outputs = Skullfinder.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_Tca.py b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py new file mode 100644 index 0000000000..ec2886b42f --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_Tca.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import Tca + + +def test_Tca_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + foregroundDelta=dict( + argstr='--delta %d', + usedefault=True, + ), + inputMaskFile=dict( + argstr='-i %s', + mandatory=True, + ), + maxCorrectionSize=dict(argstr='-n %d', ), + minCorrectionSize=dict( + argstr='-m %d', + usedefault=True, + ), + outputMaskFile=dict( + argstr='-o %s', + genfile=True, + ), + timer=dict(argstr='--timer', ), + verbosity=dict(argstr='-v %d', ), + ) + inputs = Tca.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Tca_outputs(): + output_map = dict(outputMaskFile=dict(), ) + outputs = Tca.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py new file mode 100644 index 0000000000..d614ce335a --- /dev/null +++ b/nipype/interfaces/brainsuite/tests/test_auto_ThicknessPVC.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsuite import ThicknessPVC + + +def test_ThicknessPVC_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + subjectFilePrefix=dict( + argstr='%s', + mandatory=True, + ), + ) + inputs = ThicknessPVC.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/bru2nii.py b/nipype/interfaces/bru2nii.py new file mode 100644 index 0000000000..c1c1484d38 --- /dev/null +++ b/nipype/interfaces/bru2nii.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +"""The bru2nii module provides basic functions for dicom conversion +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +from .base import (CommandLine, CommandLineInputSpec, traits, TraitedSpec, + isdefined, File, Directory) + + +class Bru2InputSpec(CommandLineInputSpec): + input_dir = Directory( + desc="Input Directory", + exists=True, + mandatory=True, + position=-1, + argstr="%s") + actual_size = traits.Bool( + argstr='-a', + desc="Keep actual size - otherwise x10 scale so animals match human.") + force_conversion = traits.Bool( + argstr='-f', + desc="Force conversion of localizers images (multiple slice " + "orientations).") + append_protocol_name = traits.Bool( + argstr='-p', desc="Append protocol name to output filename.") + output_filename = traits.Str( + argstr="-o %s", + desc="Output filename ('.nii' will be appended)", + genfile=True) + + +class Bru2OutputSpec(TraitedSpec): + nii_file = File(exists=True) + + +class Bru2(CommandLine): + """Uses bru2nii's Bru2 to convert Bruker files + + Examples + ======== + + >>> from nipype.interfaces.bru2nii import Bru2 + >>> converter = Bru2() + >>> converter.inputs.input_dir = "brukerdir" + >>> converter.cmdline # doctest: +ELLIPSIS + 'Bru2 -o .../nipype/testing/data/brukerdir brukerdir' + """ + input_spec = Bru2InputSpec + output_spec = Bru2OutputSpec + _cmd = "Bru2" + + def _list_outputs(self): + outputs = self._outputs().get() + if isdefined(self.inputs.output_filename): + output_filename1 = os.path.abspath(self.inputs.output_filename) + else: + output_filename1 = self._gen_filename('output_filename') + outputs["nii_file"] = output_filename1 + ".nii" + return outputs + + def _gen_filename(self, name): + if name == 'output_filename': + outfile = os.path.join( + os.getcwd(), + os.path.basename(os.path.normpath(self.inputs.input_dir))) + return outfile diff --git a/nipype/interfaces/c3.py b/nipype/interfaces/c3.py new file mode 100644 index 0000000000..115804cc3f --- /dev/null +++ b/nipype/interfaces/c3.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +"""The ants module provides basic functions for interfacing with ants + functions. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os +from glob import glob + +from .base import (CommandLineInputSpec, traits, TraitedSpec, File, + SEMLikeCommandLine, InputMultiPath, OutputMultiPath, + CommandLine, isdefined) +from ..utils.filemanip import split_filename +from .. import logging + +iflogger = logging.getLogger("interface") + + +class C3dAffineToolInputSpec(CommandLineInputSpec): + reference_file = File(exists=True, argstr="-ref %s", position=1) + source_file = File(exists=True, argstr='-src %s', position=2) + transform_file = File(exists=True, argstr='%s', position=3) + itk_transform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Export ITK transform.", + argstr="-oitk %s", + position=5) + fsl2ras = traits.Bool(argstr='-fsl2ras', position=4) + + +class C3dAffineToolOutputSpec(TraitedSpec): + itk_transform = File(exists=True) + + +class C3dAffineTool(SEMLikeCommandLine): + """Converts fsl-style Affine registration into ANTS compatible itk format + + Example + ======= + + >>> from nipype.interfaces.c3 import C3dAffineTool + >>> c3 = C3dAffineTool() + >>> c3.inputs.source_file = 'cmatrix.mat' + >>> c3.inputs.itk_transform = 'affine.txt' + >>> c3.inputs.fsl2ras = True + >>> c3.cmdline + 'c3d_affine_tool -src cmatrix.mat -fsl2ras -oitk affine.txt' + """ + input_spec = C3dAffineToolInputSpec + output_spec = C3dAffineToolOutputSpec + + _cmd = 'c3d_affine_tool' + _outputs_filenames = {'itk_transform': 'affine.txt'} + + +class C3dInputSpec(CommandLineInputSpec): + in_file = InputMultiPath( + File(), + position=1, + argstr="%s", + mandatory=True, + desc="Input file (wildcard and multiple are supported).") + out_file = File( + exists=False, + argstr="-o %s", + position=-1, + xor=["out_files"], + desc="Output file of last image on the stack.") + out_files = InputMultiPath( + File(), + argstr="-oo %s", + xor=["out_file"], + position=-1, + desc=("Write all images on the convert3d stack as multiple files." + " Supports both list of output files or a pattern for the output" + " filenames (using %d substituion).")) + pix_type = traits.Enum( + "float", "char", "uchar", "short", "ushort", "int", "uint", "double", + argstr="-type %s", + desc=("Specifies the pixel type for the output image. By default," + " images are written in floating point (float) format")) + scale = traits.Either( + traits.Int(), traits.Float(), + argstr="-scale %s", + desc=("Multiplies the intensity of each voxel in the last image on the" + " stack by the given factor.")) + shift = traits.Either( + traits.Int(), traits.Float(), + argstr="-shift %s", + desc='Adds the given constant to every voxel.') + interp = traits.Enum( + "Linear", "NearestNeighbor", "Cubic", "Sinc", "Gaussian", + argstr="-interpolation %s", + desc=("Specifies the interpolation used with -resample and other" + " commands. Default is Linear.")) + resample = traits.Str( + argstr="-resample %s", + desc=("Resamples the image, keeping the bounding box the same, but" + " changing the number of voxels in the image. The dimensions can be" + " specified as a percentage, for example to double the number of voxels" + " in each direction. The -interpolation flag affects how sampling is" + " performed.")) + smooth = traits.Str( + argstr="-smooth %s", + desc=("Applies Gaussian smoothing to the image. The parameter vector" + " specifies the standard deviation of the Gaussian kernel.")) + multicomp_split = traits.Bool( + False, + usedefault=True, + argstr="-mcr", + position=0, + desc="Enable reading of multi-component images.") + is_4d = traits.Bool( + False, + usedefault=True, + desc=("Changes command to support 4D file operations (default is" + " false).")) + + +class C3dOutputSpec(TraitedSpec): + out_files = OutputMultiPath(File(exists=False)) + + +class C3d(CommandLine): + """ + Convert3d is a command-line tool for converting 3D (or 4D) images between + common file formats. The tool also includes a growing list of commands for + image manipulation, such as thresholding and resampling. The tool can also + be used to obtain information about image files. More information on + Convert3d can be found at: + https://sourceforge.net/p/c3d/git/ci/master/tree/doc/c3d.md + + + Example + ======= + + >>> from nipype.interfaces.c3 import C3d + >>> c3 = C3d() + >>> c3.inputs.in_file = "T1.nii" + >>> c3.inputs.pix_type = "short" + >>> c3.inputs.out_file = "T1.img" + >>> c3.cmdline + 'c3d T1.nii -type short -o T1.img' + >>> c3.inputs.is_4d = True + >>> c3.inputs.in_file = "epi.nii" + >>> c3.inputs.out_file = "epi.img" + >>> c3.cmdline + 'c4d epi.nii -type short -o epi.img' + """ + input_spec = C3dInputSpec + output_spec = C3dOutputSpec + + _cmd = "c3d" + + def __init__(self, **inputs): + super(C3d, self).__init__(**inputs) + self.inputs.on_trait_change(self._is_4d, "is_4d") + if self.inputs.is_4d: + self._is_4d() + + def _is_4d(self): + self._cmd = "c4d" if self.inputs.is_4d else "c3d" + + def _run_interface(self, runtime): + cmd = self._cmd + if (not isdefined(self.inputs.out_file) + and not isdefined(self.inputs.out_files)): + # Convert3d does not want to override file, by default + # so we define a new output file + self._gen_outfile() + runtime = super(C3d, self)._run_interface(runtime) + self._cmd = cmd + return runtime + + def _gen_outfile(self): + # if many infiles, raise exception + if (len(self.inputs.in_file) > 1) or ("*" in self.inputs.in_file[0]): + raise AttributeError("Multiple in_files found - specify either" + " `out_file` or `out_files`.") + _, fn, ext = split_filename(self.inputs.in_file[0]) + self.inputs.out_file = fn + "_generated" + ext + # if generated file will overwrite, raise error + if os.path.exists(os.path.abspath(self.inputs.out_file)): + raise IOError("File already found - to overwrite, use `out_file`.") + iflogger.info("Generating `out_file`.") + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.out_file): + outputs["out_files"] = os.path.abspath(self.inputs.out_file) + if isdefined(self.inputs.out_files): + if len(self.inputs.out_files) == 1: + _out_files = glob(os.path.abspath(self.inputs.out_files[0])) + else: + _out_files = [os.path.abspath(f) for f in self.inputs.out_files + if os.path.exists(os.path.abspath(f))] + outputs["out_files"] = _out_files + + return outputs + diff --git a/nipype/interfaces/camino/__init__.py b/nipype/interfaces/camino/__init__.py new file mode 100644 index 0000000000..0120732ef6 --- /dev/null +++ b/nipype/interfaces/camino/__init__.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Camino top level namespace +""" + +from .connectivity import Conmat +from .convert import (Image2Voxel, FSL2Scheme, VtkStreamlines, ProcStreamlines, + TractShredder, DT2NIfTI, NIfTIDT2Camino, AnalyzeHeader, + Shredder) +from .dti import (DTIFit, ModelFit, DTLUTGen, PicoPDFs, Track, TrackPICo, + TrackBayesDirac, TrackDT, TrackBallStick, TrackBootstrap, + TrackBedpostxDeter, TrackBedpostxProba, + ComputeFractionalAnisotropy, ComputeMeanDiffusivity, + ComputeTensorTrace, ComputeEigensystem, DTMetric) +from .calib import (SFPICOCalibData, SFLUTGen) +from .odf import (QBallMX, LinRecon, SFPeaks, MESD) +from .utils import ImageStats diff --git a/nipype/interfaces/camino/calib.py b/nipype/interfaces/camino/calib.py new file mode 100644 index 0000000000..1921f62651 --- /dev/null +++ b/nipype/interfaces/camino/calib.py @@ -0,0 +1,296 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os + +from ...utils.filemanip import split_filename +from ..base import (traits, TraitedSpec, File, StdOutCommandLine, + StdOutCommandLineInputSpec) + + +class SFPICOCalibDataInputSpec(StdOutCommandLineInputSpec): + snr = traits.Float( + argstr='-snr %f', + units='NA', + desc=('Specifies the signal-to-noise ratio of the ' + 'non-diffusion-weighted measurements to use in simulations.')) + scheme_file = File( + exists=True, + argstr='-schemefile %s', + mandatory=True, + desc='Specifies the scheme file for the diffusion MRI data') + info_file = File( + desc='The name to be given to the information output filename.', + argstr='-infooutputfile %s', + mandatory=True, + genfile=True, + hash_files=False) # Genfile and hash_files? + trace = traits.Float( + argstr='-trace %f', + units='NA', + desc='Trace of the diffusion tensor(s) used in the test function.') + onedtfarange = traits.List( + traits.Float, + argstr='-onedtfarange %s', + minlen=2, + maxlen=2, + units='NA', + desc=('Minimum and maximum FA for the single tensor ' + 'synthetic data.')) + onedtfastep = traits.Float( + argstr='-onedtfastep %f', + units='NA', + desc=('FA step size controlling how many steps there are ' + 'between the minimum and maximum FA settings.')) + twodtfarange = traits.List( + traits.Float, + argstr='-twodtfarange %s', + minlen=2, + maxlen=2, + units='NA', + desc=('Minimum and maximum FA for the two tensor ' + 'synthetic data. FA is varied for both tensors ' + 'to give all the different permutations.')) + twodtfastep = traits.Float( + argstr='-twodtfastep %f', + units='NA', + desc=('FA step size controlling how many steps there are ' + 'between the minimum and maximum FA settings ' + 'for the two tensor cases.')) + twodtanglerange = traits.List( + traits.Float, + argstr='-twodtanglerange %s', + minlen=2, + maxlen=2, + units='NA', + desc=('Minimum and maximum crossing angles ' + 'between the two fibres.')) + twodtanglestep = traits.Float( + argstr='-twodtanglestep %f', + units='NA', + desc=('Angle step size controlling how many steps there are ' + 'between the minimum and maximum crossing angles for ' + 'the two tensor cases.')) + twodtmixmax = traits.Float( + argstr='-twodtmixmax %f', + units='NA', + desc= + ('Mixing parameter controlling the proportion of one fibre population ' + 'to the other. The minimum mixing parameter is (1 - twodtmixmax).')) + twodtmixstep = traits.Float( + argstr='-twodtmixstep %f', + units='NA', + desc=('Mixing parameter step size for the two tensor cases. ' + 'Specify how many mixing parameter increments to use.')) + seed = traits.Float( + argstr='-seed %f', + units='NA', + desc= + 'Specifies the random seed to use for noise generation in simulation trials.' + ) + + +class SFPICOCalibDataOutputSpec(TraitedSpec): + PICOCalib = File(exists=True, desc='Calibration dataset') + calib_info = File(exists=True, desc='Calibration dataset') + + +class SFPICOCalibData(StdOutCommandLine): + """ + Generates Spherical Function PICo Calibration Data. + + SFPICOCalibData creates synthetic data for use with SFLUTGen. The + synthetic data is generated using a mixture of gaussians, in the + same way datasynth generates data. Each voxel of data models a + slightly different fibre configuration (varying FA and fibre- + crossings) and undergoes a random rotation to help account for any + directional bias in the chosen acquisition scheme. A second file, + which stores information about the datafile, is generated along with + the datafile. + + Example 1 + --------- + To create a calibration dataset using the default settings + + >>> import nipype.interfaces.camino as cam + >>> calib = cam.SFPICOCalibData() + >>> calib.inputs.scheme_file = 'A.scheme' + >>> calib.inputs.snr = 20 + >>> calib.inputs.info_file = 'PICO_calib.info' + >>> calib.run() # doctest: +SKIP + + The default settings create a large dataset (249,231 voxels), of + which 3401 voxels contain a single fibre population per voxel and + the rest of the voxels contain two fibre-populations. The amount of + data produced can be varied by specifying the ranges and steps of + the parameters for both the one and two fibre datasets used. + + Example 2 + --------- + To create a custom calibration dataset + + >>> import nipype.interfaces.camino as cam + >>> calib = cam.SFPICOCalibData() + >>> calib.inputs.scheme_file = 'A.scheme' + >>> calib.inputs.snr = 20 + >>> calib.inputs.info_file = 'PICO_calib.info' + >>> calib.inputs.twodtfarange = [0.3, 0.9] + >>> calib.inputs.twodtfastep = 0.02 + >>> calib.inputs.twodtanglerange = [0, 0.785] + >>> calib.inputs.twodtanglestep = 0.03925 + >>> calib.inputs.twodtmixmax = 0.8 + >>> calib.inputs.twodtmixstep = 0.1 + >>> calib.run() # doctest: +SKIP + + This would provide 76,313 voxels of synthetic data, where 3401 voxels + simulate the one fibre cases and 72,912 voxels simulate the various + two fibre cases. However, care should be taken to ensure that enough + data is generated for calculating the LUT. # doctest: +SKIP + """ + _cmd = 'sfpicocalibdata' + input_spec = SFPICOCalibDataInputSpec + output_spec = SFPICOCalibDataOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['PICOCalib'] = os.path.abspath(self._gen_outfilename()) + outputs['calib_info'] = os.path.abspath(self.inputs.info_file) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.scheme_file) + return name + '_PICOCalib.Bfloat' + + +class SFLUTGenInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='-inputfile %s', + mandatory=True, + desc='Voxel-order data of the spherical functions peaks.') + info_file = File( + argstr='-infofile %s', + mandatory=True, + desc=('The Info file that corresponds to the calibration ' + 'datafile used in the reconstruction.')) + outputstem = traits.Str( + 'LUT', + argstr='-outputstem %s', + desc= + ('Define the name of the generated luts. The form of the filenames will be ' + '[outputstem]_oneFibreSurfaceCoeffs.Bdouble and ' + '[outputstem]_twoFibreSurfaceCoeffs.Bdouble'), + usedefault=True) + pdf = traits.Enum( + 'bingham', + 'watson', + argstr='-pdf %s', + desc= + ('Sets the distribution to use for the calibration. The default is the Bingham ' + 'distribution, which allows elliptical probability density contours. ' + 'Currently supported options are: ' + ' bingham - The Bingham distribution, which allows elliptical probability ' + ' density contours. ' + ' watson - The Watson distribution. This distribution is rotationally symmetric.' + ), + usedefault=True) + binincsize = traits.Int( + argstr='-binincsize %d', + units='NA', + desc= + ('Sets the size of the bins. In the case of 2D histograms such as the ' + 'Bingham, the bins are always square. Default is 1.')) + minvectsperbin = traits.Int( + argstr='-minvectsperbin %d', + units='NA', + desc= + ('Specifies the minimum number of fibre-orientation estimates a bin ' + 'must contain before it is used in the lut line/surface generation. ' + 'Default is 50. If you get the error "no fibre-orientation estimates ' + 'in histogram!", the calibration data set is too small to get enough ' + 'samples in any of the histogram bins. You can decrease the minimum ' + 'number per bin to get things running in quick tests, but the sta- ' + 'tistics will not be reliable and for serious applications, you need ' + 'to increase the size of the calibration data set until the error goes.' + )) + directmap = traits.Bool( + argstr='-directmap', + desc= + ('Use direct mapping between the eigenvalues and the distribution parameters ' + 'instead of the log of the eigenvalues.')) + order = traits.Int( + argstr='-order %d', + units='NA', + desc= + ('The order of the polynomial fitting the surface. Order 1 is linear. ' + 'Order 2 (default) is quadratic.')) + + +class SFLUTGenOutputSpec(TraitedSpec): + lut_one_fibre = File(exists=True, desc='PICo lut for one-fibre model') + lut_two_fibres = File(exists=True, desc='PICo lut for two-fibre model') + + +class SFLUTGen(StdOutCommandLine): + """ + Generates PICo lookup tables (LUT) for multi-fibre methods such as + PASMRI and Q-Ball. + + SFLUTGen creates the lookup tables for the generalized multi-fibre + implementation of the PICo tractography algorithm. The outputs of + this utility are either surface or line coefficients up to a given + order. The calibration can be performed for different distributions, + such as the Bingham and Watson distributions. + + This utility uses calibration data generated from SFPICOCalibData + and peak information created by SFPeaks. + + The utility outputs two lut's, *_oneFibreSurfaceCoeffs.Bdouble and + *_twoFibreSurfaceCoeffs.Bdouble. Each of these files contains big- + endian doubles as standard. The format of the output is: :: + + dimensions (1 for Watson, 2 for Bingham) + order (the order of the polynomial) + coefficient_1 + coefficient_2 + ... + coefficient_N + + In the case of the Watson, there is a single set of coefficients, + which are ordered: :: + + constant, x, x^2, ..., x^order. + + In the case of the Bingham, there are two sets of coefficients (one + for each surface), ordered so that: :: + + for j = 1 to order + for k = 1 to order + coeff_i = x^j * y^k + where j+k < order + + Example + --------- + To create a calibration dataset using the default settings + + >>> import nipype.interfaces.camino as cam + >>> lutgen = cam.SFLUTGen() + >>> lutgen.inputs.in_file = 'QSH_peaks.Bdouble' + >>> lutgen.inputs.info_file = 'PICO_calib.info' + >>> lutgen.run() # doctest: +SKIP + """ + _cmd = 'sflutgen' + input_spec = SFLUTGenInputSpec + output_spec = SFLUTGenOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs[ + 'lut_one_fibre'] = self.inputs.outputstem + '_oneFibreSurfaceCoeffs.Bdouble' + outputs[ + 'lut_two_fibres'] = self.inputs.outputstem + '_twoFibreSurfaceCoeffs.Bdouble' + return outputs + + def _gen_outfilename(self): + return '/dev/null' diff --git a/nipype/interfaces/camino/connectivity.py b/nipype/interfaces/camino/connectivity.py new file mode 100644 index 0000000000..97e400e0f5 --- /dev/null +++ b/nipype/interfaces/camino/connectivity.py @@ -0,0 +1,178 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os + +from ...utils.filemanip import split_filename +from ..base import (traits, TraitedSpec, File, CommandLine, + CommandLineInputSpec, isdefined) + + +class ConmatInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='-inputfile %s', + mandatory=True, + desc='Streamlines as generated by the Track interface') + + target_file = File( + exists=True, + argstr='-targetfile %s', + mandatory=True, + desc= + 'An image containing targets, as used in ProcStreamlines interface.') + + scalar_file = File( + exists=True, + argstr='-scalarfile %s', + desc=('Optional scalar file for computing tract-based statistics. ' + 'Must be in the same space as the target file.'), + requires=['tract_stat']) + + targetname_file = File( + exists=True, + argstr='-targetnamefile %s', + desc= + ('Optional names of targets. This file should contain one entry per line, ' + 'with the target intensity followed by the name, separated by white space. ' + 'For example: ' + ' 1 some_brain_region ' + ' 2 some_other_region ' + 'These names will be used in the output. The names themselves should not ' + 'contain spaces or commas. The labels may be in any order but the output ' + 'matrices will be ordered by label intensity.')) + + tract_stat = traits.Enum( + "mean", + "min", + "max", + "sum", + "median", + "var", + argstr='-tractstat %s', + units='NA', + desc=("Tract statistic to use. See TractStats for other options."), + requires=['scalar_file'], + xor=['tract_prop']) + + tract_prop = traits.Enum( + "length", + "endpointsep", + argstr='-tractstat %s', + units='NA', + xor=['tract_stat'], + desc=('Tract property average to compute in the connectivity matrix. ' + 'See TractStats for details.')) + + output_root = File( + argstr='-outputroot %s', + genfile=True, + desc=('filename root prepended onto the names of the output files. ' + 'The extension will be determined from the input.')) + + +class ConmatOutputSpec(TraitedSpec): + conmat_sc = File(exists=True, desc='Connectivity matrix in CSV file.') + conmat_ts = File(desc='Tract statistics in CSV file.') + + +class Conmat(CommandLine): + """ + Creates a connectivity matrix using a 3D label image (the target image) + and a set of streamlines. The connectivity matrix records how many stream- + lines connect each pair of targets, and optionally the mean tractwise + statistic (eg tract-averaged FA, or length). + + The output is a comma separated variable file or files. The first row of + the output matrix is label names. Label names may be defined by the user, + otherwise they are assigned based on label intensity. + + Starting from the seed point, we move along the streamline until we find + a point in a labeled region. This is done in both directions from the seed + point. Streamlines are counted if they connect two target regions, one on + either side of the seed point. Only the labeled region closest to the seed + is counted, for example if the input contains two streamlines: :: + + 1: A-----B------SEED---C + 2: A--------SEED----------- + + then the output would be :: + + A,B,C + 0,0,0 + 0,0,1 + 0,1,0 + + There are zero connections to A because in streamline 1, the connection + to B is closer to the seed than the connection to A, and in streamline 2 + there is no region reached in the other direction. + + The connected target regions can have the same label, as long as the seed + point is outside of the labeled region and both ends connect to the same + label (which may be in different locations). Therefore this is allowed: :: + + A------SEED-------A + + Such fibers will add to the diagonal elements of the matrix. To remove + these entries, run procstreamlines with -endpointfile before running conmat. + + If the seed point is inside a labled region, it counts as one end of the + connection. So :: + + ----[SEED inside A]---------B + + counts as a connection between A and B, while :: + + C----[SEED inside A]---------B + + counts as a connection between A and C, because C is closer to the seed point. + + In all cases, distance to the seed point is defined along the streamline path. + + Example 1 + --------- + To create a standard connectivity matrix based on streamline counts. + + >>> import nipype.interfaces.camino as cam + >>> conmat = cam.Conmat() + >>> conmat.inputs.in_file = 'tracts.Bdouble' + >>> conmat.inputs.target_file = 'atlas.nii.gz' + >>> conmat.run() # doctest: +SKIP + + Example 1 + --------- + To create a standard connectivity matrix and mean tractwise FA statistics. + + >>> import nipype.interfaces.camino as cam + >>> conmat = cam.Conmat() + >>> conmat.inputs.in_file = 'tracts.Bdouble' + >>> conmat.inputs.target_file = 'atlas.nii.gz' + >>> conmat.inputs.scalar_file = 'fa.nii.gz' + >>> conmat.tract_stat = 'mean' + >>> conmat.run() # doctest: +SKIP + """ + _cmd = 'conmat' + input_spec = ConmatInputSpec + output_spec = ConmatOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + output_root = self._gen_outputroot() + outputs['conmat_sc'] = os.path.abspath(output_root + "sc.csv") + outputs['conmat_ts'] = os.path.abspath(output_root + "ts.csv") + return outputs + + def _gen_outfilename(self): + return self._gen_outputroot() + + def _gen_outputroot(self): + output_root = self.inputs.output_root + if not isdefined(output_root): + output_root = self._gen_filename('output_root') + return output_root + + def _gen_filename(self, name): + if name == 'output_root': + _, filename, _ = split_filename(self.inputs.in_file) + filename = filename + "_" + return filename diff --git a/nipype/interfaces/camino/convert.py b/nipype/interfaces/camino/convert.py new file mode 100644 index 0000000000..ee2ae2eb82 --- /dev/null +++ b/nipype/interfaces/camino/convert.py @@ -0,0 +1,974 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +import glob + +from ...utils.filemanip import split_filename +from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, + File, StdOutCommandLine, OutputMultiPath, + StdOutCommandLineInputSpec, isdefined) + + +class Image2VoxelInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='-4dimage %s', + mandatory=True, + position=1, + desc='4d image file') + # TODO convert list of files on the fly + # imagelist = File(exists=True, argstr='-imagelist %s', + # mandatory=True, position=1, + # desc='Name of a file containing a list of 3D images') + # + # imageprefix = traits.Str(argstr='-imageprefix %s', position=3, + # desc='Path to prepend onto filenames in the imagelist.') + + out_type = traits.Enum( + "float", + "char", + "short", + "int", + "long", + "double", + argstr='-outputdatatype %s', + position=2, + desc= + '"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', + usedefault=True) + + +class Image2VoxelOutputSpec(TraitedSpec): + voxel_order = File( + exists=True, desc='path/name of 4D volume in voxel order') + + +class Image2Voxel(StdOutCommandLine): + """ + Converts Analyze / NIFTI / MHA files to voxel order. + + Converts scanner-order data in a supported image format to voxel-order data. + Either takes a 4D file (all measurements in single image) + or a list of 3D images. + + Examples + -------- + + >>> import nipype.interfaces.camino as cmon + >>> img2vox = cmon.Image2Voxel() + >>> img2vox.inputs.in_file = '4d_dwi.nii' + >>> img2vox.run() # doctest: +SKIP + """ + _cmd = 'image2voxel' + input_spec = Image2VoxelInputSpec + output_spec = Image2VoxelOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['voxel_order'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '.B' + self.inputs.out_type + + +class FSL2SchemeInputSpec(StdOutCommandLineInputSpec): + bvec_file = File( + exists=True, + argstr='-bvecfile %s', + mandatory=True, + position=1, + desc='b vector file') + + bval_file = File( + exists=True, + argstr='-bvalfile %s', + mandatory=True, + position=2, + desc='b value file') + + numscans = traits.Int( + argstr='-numscans %d', + units='NA', + desc= + "Output all measurements numerous (n) times, used when combining multiple scans from the same imaging session." + ) + + interleave = traits.Bool( + argstr='-interleave', + desc="Interleave repeated scans. Only used with -numscans.") + + bscale = traits.Float( + argstr='-bscale %d', + units='NA', + desc= + "Scaling factor to convert the b-values into different units. Default is 10^6." + ) + + diffusiontime = traits.Float( + argstr='-diffusiontime %f', units='NA', desc="Diffusion time") + + flipx = traits.Bool( + argstr='-flipx', desc="Negate the x component of all the vectors.") + flipy = traits.Bool( + argstr='-flipy', desc="Negate the y component of all the vectors.") + flipz = traits.Bool( + argstr='-flipz', desc="Negate the z component of all the vectors.") + usegradmod = traits.Bool( + argstr='-usegradmod', + desc= + "Use the gradient magnitude to scale b. This option has no effect if your gradient directions have unit magnitude." + ) + + +class FSL2SchemeOutputSpec(TraitedSpec): + scheme = File(exists=True, desc='Scheme file') + + +class FSL2Scheme(StdOutCommandLine): + """ + Converts b-vectors and b-values from FSL format to a Camino scheme file. + + Examples + -------- + + >>> import nipype.interfaces.camino as cmon + >>> makescheme = cmon.FSL2Scheme() + >>> makescheme.inputs.bvec_file = 'bvecs' + >>> makescheme.inputs.bvec_file = 'bvals' + >>> makescheme.run() # doctest: +SKIP + + """ + _cmd = 'fsl2scheme' + input_spec = FSL2SchemeInputSpec + output_spec = FSL2SchemeOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['scheme'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.bvec_file) + return name + '.scheme' + + +class VtkStreamlinesInputSpec(StdOutCommandLineInputSpec): + inputmodel = traits.Enum( + 'raw', + 'voxels', + argstr='-inputmodel %s', + desc='input model type (raw or voxels)', + usedefault=True) + + in_file = File( + exists=True, + argstr=' < %s', + mandatory=True, + position=-2, + desc='data file') + + voxeldims = traits.List( + traits.Int, + desc='voxel dimensions in mm', + argstr='-voxeldims %s', + minlen=3, + maxlen=3, + position=4, + units='mm') + + seed_file = File( + exists=False, + argstr='-seedfile %s', + position=1, + desc='image containing seed points') + + target_file = File( + exists=False, + argstr='-targetfile %s', + position=2, + desc='image containing integer-valued target regions') + + scalar_file = File( + exists=False, + argstr='-scalarfile %s', + position=3, + desc='image that is in the same physical space as the tracts') + + colourorient = traits.Bool( + argstr='-colourorient', + desc= + "Each point on the streamline is coloured by the local orientation.") + interpolatescalars = traits.Bool( + argstr='-interpolatescalars', + desc= + "the scalar value at each point on the streamline is calculated by trilinear interpolation" + ) + interpolate = traits.Bool( + argstr='-interpolate', + desc= + "the scalar value at each point on the streamline is calculated by trilinear interpolation" + ) + + +class VtkStreamlinesOutputSpec(TraitedSpec): + vtk = File(exists=True, desc='Streamlines in VTK format') + + +class VtkStreamlines(StdOutCommandLine): + """ + Use vtkstreamlines to convert raw or voxel format streamlines to VTK polydata + + Examples + -------- + + >>> import nipype.interfaces.camino as cmon + >>> vtk = cmon.VtkStreamlines() + >>> vtk.inputs.in_file = 'tract_data.Bfloat' + >>> vtk.inputs.voxeldims = [1,1,1] + >>> vtk.run() # doctest: +SKIP + """ + _cmd = 'vtkstreamlines' + input_spec = VtkStreamlinesInputSpec + output_spec = VtkStreamlinesOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['vtk'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '.vtk' + + +class ProcStreamlinesInputSpec(StdOutCommandLineInputSpec): + inputmodel = traits.Enum( + 'raw', + 'voxels', + argstr='-inputmodel %s', + desc='input model type (raw or voxels)', + usedefault=True) + + in_file = File( + exists=True, + argstr='-inputfile %s', + mandatory=True, + position=1, + desc='data file') + + maxtractpoints = traits.Int( + argstr='-maxtractpoints %d', + units='NA', + desc="maximum number of tract points") + mintractpoints = traits.Int( + argstr='-mintractpoints %d', + units='NA', + desc="minimum number of tract points") + maxtractlength = traits.Int( + argstr='-maxtractlength %d', + units='mm', + desc="maximum length of tracts") + mintractlength = traits.Int( + argstr='-mintractlength %d', + units='mm', + desc="minimum length of tracts") + datadims = traits.List( + traits.Int, + desc='data dimensions in voxels', + argstr='-datadims %s', + minlen=3, + maxlen=3, + units='voxels') + voxeldims = traits.List( + traits.Int, + desc='voxel dimensions in mm', + argstr='-voxeldims %s', + minlen=3, + maxlen=3, + units='mm') + seedpointmm = traits.List( + traits.Int, + desc='The coordinates of a single seed point for tractography in mm', + argstr='-seedpointmm %s', + minlen=3, + maxlen=3, + units='mm') + seedpointvox = traits.List( + traits.Int, + desc= + 'The coordinates of a single seed point for tractography in voxels', + argstr='-seedpointvox %s', + minlen=3, + maxlen=3, + units='voxels') + seedfile = File( + exists=False, + argstr='-seedfile %s', + desc='Image Containing Seed Points') + regionindex = traits.Int( + argstr='-regionindex %d', + units='mm', + desc="index of specific region to process") + iterations = traits.Float( + argstr='-iterations %d', + units='NA', + desc= + "Number of streamlines generated for each seed. Not required when outputting streamlines, but needed to create PICo images. The default is 1 if the output is streamlines, and 5000 if the output is connection probability images." + ) + targetfile = File( + exists=False, + argstr='-targetfile %s', + desc='Image containing target volumes.') + allowmultitargets = traits.Bool( + argstr='-allowmultitargets', + desc="Allows streamlines to connect to multiple target volumes.") + directional = traits.List( + traits.Int, + desc= + 'Splits the streamlines at the seed point and computes separate connection probabilities for each segment. Streamline segments are grouped according to their dot product with the vector (X, Y, Z). The ideal vector will be tangential to the streamline trajectory at the seed, such that the streamline projects from the seed along (X, Y, Z) and -(X, Y, Z). However, it is only necessary for the streamline trajectory to not be orthogonal to (X, Y, Z).', + argstr='-directional %s', + minlen=3, + maxlen=3, + units='NA') + waypointfile = File( + exists=False, + argstr='-waypointfile %s', + desc= + 'Image containing waypoints. Waypoints are defined as regions of the image with the same intensity, where 0 is background and any value > 0 is a waypoint.' + ) + truncateloops = traits.Bool( + argstr='-truncateloops', + desc= + "This option allows streamlines to enter a waypoint exactly once. After the streamline leaves the waypoint, it is truncated upon a second entry to the waypoint." + ) + discardloops = traits.Bool( + argstr='-discardloops', + desc= + "This option allows streamlines to enter a waypoint exactly once. After the streamline leaves the waypoint, the entire streamline is discarded upon a second entry to the waypoint." + ) + exclusionfile = File( + exists=False, + argstr='-exclusionfile %s', + desc= + 'Image containing exclusion ROIs. This should be an Analyze 7.5 header / image file.hdr and file.img.' + ) + truncateinexclusion = traits.Bool( + argstr='-truncateinexclusion', + desc="Retain segments of a streamline before entry to an exclusion ROI." + ) + + endpointfile = File( + exists=False, + argstr='-endpointfile %s', + desc= + 'Image containing endpoint ROIs. This should be an Analyze 7.5 header / image file.hdr and file.img.' + ) + + resamplestepsize = traits.Float( + argstr='-resamplestepsize %d', + units='NA', + desc= + "Each point on a streamline is tested for entry into target, exclusion or waypoint volumes. If the length between points on a tract is not much smaller than the voxel length, then streamlines may pass through part of a voxel without being counted. To avoid this, the program resamples streamlines such that the step size is one tenth of the smallest voxel dimension in the image. This increases the size of raw or oogl streamline output and incurs some performance penalty. The resample resolution can be controlled with this option or disabled altogether by passing a negative step size or by passing the -noresample option." + ) + + noresample = traits.Bool( + argstr='-noresample', + desc= + "Disables resampling of input streamlines. Resampling is automatically disabled if the input model is voxels." + ) + + outputtracts = traits.Bool( + argstr='-outputtracts', + desc="Output streamlines in raw binary format.") + + outputroot = File( + exists=False, + argstr='-outputroot %s', + desc='Prepended onto all output file names.') + + gzip = traits.Bool( + argstr='-gzip', desc="save the output image in gzip format") + outputcp = traits.Bool( + argstr='-outputcp', + desc="output the connection probability map (Analyze image, float)", + requires=['outputroot', 'seedfile']) + outputsc = traits.Bool( + argstr='-outputsc', + desc="output the connection probability map (raw streamlines, int)", + requires=['outputroot', 'seedfile']) + outputacm = traits.Bool( + argstr='-outputacm', + desc= + "output all tracts in a single connection probability map (Analyze image)", + requires=['outputroot', 'seedfile']) + outputcbs = traits.Bool( + argstr='-outputcbs', + desc= + "outputs connectivity-based segmentation maps; requires target outputfile", + requires=['outputroot', 'targetfile', 'seedfile']) + + +class ProcStreamlinesOutputSpec(TraitedSpec): + proc = File(exists=True, desc='Processed Streamlines') + outputroot_files = OutputMultiPath(File(exists=True)) + + +class ProcStreamlines(StdOutCommandLine): + """ + Process streamline data + + This program does post-processing of streamline output from track. It can either output streamlines or connection probability maps. + * http://web4.cs.ucl.ac.uk/research/medic/camino/pmwiki/pmwiki.php?n=Man.procstreamlines + + Examples + -------- + + >>> import nipype.interfaces.camino as cmon + >>> proc = cmon.ProcStreamlines() + >>> proc.inputs.in_file = 'tract_data.Bfloat' + >>> proc.run() # doctest: +SKIP + """ + _cmd = 'procstreamlines' + input_spec = ProcStreamlinesInputSpec + output_spec = ProcStreamlinesOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'outputroot': + return spec.argstr % self._get_actual_outputroot(value) + return super(ProcStreamlines, self)._format_arg(name, spec, value) + + def _run_interface(self, runtime): + outputroot = self.inputs.outputroot + if isdefined(outputroot): + actual_outputroot = self._get_actual_outputroot(outputroot) + base, filename, ext = split_filename(actual_outputroot) + if not os.path.exists(base): + os.makedirs(base) + new_runtime = super(ProcStreamlines, self)._run_interface(runtime) + self.outputroot_files = glob.glob( + os.path.join(os.getcwd(), actual_outputroot + '*')) + return new_runtime + else: + new_runtime = super(ProcStreamlines, self)._run_interface(runtime) + return new_runtime + + def _get_actual_outputroot(self, outputroot): + actual_outputroot = os.path.join('procstream_outfiles', outputroot) + return actual_outputroot + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['proc'] = os.path.abspath(self._gen_outfilename()) + outputs['outputroot_files'] = self.outputroot_files + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_proc' + + +class TractShredderInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='< %s', + mandatory=True, + position=-2, + desc='tract file') + + offset = traits.Int( + argstr='%d', + units='NA', + desc='initial offset of offset tracts', + position=1) + + bunchsize = traits.Int( + argstr='%d', + units='NA', + desc='reads and outputs a group of bunchsize tracts', + position=2) + + space = traits.Int( + argstr='%d', units='NA', desc='skips space tracts', position=3) + + +class TractShredderOutputSpec(TraitedSpec): + shredded = File(exists=True, desc='Shredded tract file') + + +class TractShredder(StdOutCommandLine): + """ + Extracts bunches of streamlines. + + tractshredder works in a similar way to shredder, but processes streamlines instead of scalar data. + The input is raw streamlines, in the format produced by track or procstreamlines. + + The program first makes an initial offset of offset tracts. It then reads and outputs a group of + bunchsize tracts, skips space tracts, and repeats until there is no more input. + + Examples + -------- + + >>> import nipype.interfaces.camino as cmon + >>> shred = cmon.TractShredder() + >>> shred.inputs.in_file = 'tract_data.Bfloat' + >>> shred.inputs.offset = 0 + >>> shred.inputs.bunchsize = 1 + >>> shred.inputs.space = 2 + >>> shred.run() # doctest: +SKIP + """ + _cmd = 'tractshredder' + input_spec = TractShredderInputSpec + output_spec = TractShredderOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['shredded'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + "_shredded" + + +class DT2NIfTIInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='-inputfile %s', + mandatory=True, + position=1, + desc='tract file') + + output_root = File( + argstr='-outputroot %s', + position=2, + genfile=True, + desc='filename root prepended onto the names of three output files.') + + header_file = File( + exists=True, + argstr='-header %s', + mandatory=True, + position=3, + desc=' A Nifti .nii or .hdr file containing the header information') + + +class DT2NIfTIOutputSpec(TraitedSpec): + dt = File(exists=True, desc='diffusion tensors in NIfTI format') + + exitcode = File( + exists=True, + desc='exit codes from Camino reconstruction in NIfTI format') + + lns0 = File( + exists=True, + desc='estimated lns0 from Camino reconstruction in NIfTI format') + + +class DT2NIfTI(CommandLine): + """ + Converts camino tensor data to NIfTI format + + Reads Camino diffusion tensors, and converts them to NIFTI format as three .nii files. + """ + _cmd = 'dt2nii' + input_spec = DT2NIfTIInputSpec + output_spec = DT2NIfTIOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + output_root = self._gen_outputroot() + outputs["dt"] = os.path.abspath(output_root + "dt.nii") + outputs["exitcode"] = os.path.abspath(output_root + "exitcode.nii") + outputs["lns0"] = os.path.abspath(output_root + "lns0.nii") + return outputs + + def _gen_outfilename(self): + return self._gen_outputroot() + + def _gen_outputroot(self): + output_root = self.inputs.output_root + if not isdefined(output_root): + output_root = self._gen_filename('output_root') + return output_root + + def _gen_filename(self, name): + if name == 'output_root': + _, filename, _ = split_filename(self.inputs.in_file) + filename = filename + "_" + return filename + + +class NIfTIDT2CaminoInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='-inputfile %s', + mandatory=True, + position=1, + desc= + 'A NIFTI-1 dataset containing diffusion tensors. The tensors are assumed to be ' + 'in lower-triangular order as specified by the NIFTI standard for the storage of ' + 'symmetric matrices. This file should be either a .nii or a .hdr file.' + ) + + s0_file = File( + argstr='-s0 %s', + exists=True, + desc= + 'File containing the unweighted signal for each voxel, may be a raw binary ' + 'file (specify type with -inputdatatype) or a supported image file.') + + lns0_file = File( + argstr='-lns0 %s', + exists=True, + desc= + 'File containing the log of the unweighted signal for each voxel, may be a ' + 'raw binary file (specify type with -inputdatatype) or a supported image file.' + ) + + bgmask = File( + argstr='-bgmask %s', + exists=True, + desc= + 'Binary valued brain / background segmentation, may be a raw binary file ' + '(specify type with -maskdatatype) or a supported image file.') + + scaleslope = traits.Float( + argstr='-scaleslope %s', + desc='A value v in the diffusion tensor is scaled to v * s + i. This is ' + 'applied after any scaling specified by the input image. Default is 1.0.' + ) + + scaleinter = traits.Float( + argstr='-scaleinter %s', + desc='A value v in the diffusion tensor is scaled to v * s + i. This is ' + 'applied after any scaling specified by the input image. Default is 0.0.' + ) + + uppertriangular = traits.Bool( + argstr='-uppertriangular %s', + desc='Specifies input in upper-triangular (VTK style) order.') + + +class NIfTIDT2CaminoOutputSpec(TraitedSpec): + out_file = File(desc='diffusion tensors data in Camino format') + + +class NIfTIDT2Camino(CommandLine): + """ + Converts NIFTI-1 diffusion tensors to Camino format. The program reads the + NIFTI header but does not apply any spatial transformations to the data. The + NIFTI intensity scaling parameters are applied. + + The output is the tensors in Camino voxel ordering: [exit, ln(S0), dxx, dxy, + dxz, dyy, dyz, dzz]. + + The exit code is set to 0 unless a background mask is supplied, in which case + the code is 0 in brain voxels and -1 in background voxels. + + The value of ln(S0) in the output is taken from a file if one is supplied, + otherwise it is set to 0. + + NOTE FOR FSL USERS - FSL's dtifit can output NIFTI tensors, but they are not + stored in the usual way (which is using NIFTI_INTENT_SYMMATRIX). FSL's + tensors follow the ITK / VTK "upper-triangular" convention, so you will need + to use the -uppertriangular option to convert these correctly. + + """ + _cmd = 'niftidt2camino' + input_spec = NIfTIDT2CaminoInputSpec + output_spec = NIfTIDT2CaminoOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs["out_file"] = self._gen_filename('out_file') + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + _, filename, _ = split_filename(self.inputs.in_file) + return filename + + +class AnalyzeHeaderInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='< %s', + mandatory=True, + position=1, + desc='Tensor-fitted data filename') + + scheme_file = File( + exists=True, + argstr='%s', + position=2, + desc=('Camino scheme file (b values / vectors, ' + 'see camino.fsl2scheme)')) + + readheader = File( + exists=True, + argstr='-readheader %s', + position=3, + desc=('Reads header information from file and prints to ' + 'stdout. If this option is not specified, then the ' + 'program writes a header based on the other ' + 'arguments.')) + + printimagedims = File( + exists=True, + argstr='-printimagedims %s', + position=3, + desc=('Prints image data and voxel dimensions as ' + 'Camino arguments and exits.')) + + # How do we implement both file and enum (for the program) in one argument? + # Is this option useful anyway? + # -printprogargs + # Prints data dimension (and type, if relevant) arguments for a specific + # Camino program, where prog is one of shredder, scanner2voxel, + # vcthreshselect, pdview, track. + printprogargs = File( + exists=True, + argstr='-printprogargs %s', + position=3, + desc=('Prints data dimension (and type, if relevant) ' + 'arguments for a specific Camino program, where ' + 'prog is one of shredder, scanner2voxel, ' + 'vcthreshselect, pdview, track.')) + + printintelbyteorder = File( + exists=True, + argstr='-printintelbyteorder %s', + position=3, + desc=('Prints 1 if the header is little-endian, ' + '0 otherwise.')) + + printbigendian = File( + exists=True, + argstr='-printbigendian %s', + position=3, + desc=('Prints 1 if the header is big-endian, 0 ' + 'otherwise.')) + + initfromheader = File( + exists=True, + argstr='-initfromheader %s', + position=3, + desc=('Reads header information from file and ' + 'intializes a new header with the values read ' + 'from the file. You may replace any ' + 'combination of fields in the new header by ' + 'specifying subsequent options.')) + + data_dims = traits.List( + traits.Int, + desc='data dimensions in voxels', + argstr='-datadims %s', + minlen=3, + maxlen=3, + units='voxels') + + voxel_dims = traits.List( + traits.Float, + desc='voxel dimensions in mm', + argstr='-voxeldims %s', + minlen=3, + maxlen=3, + units='mm') + + centre = traits.List( + traits.Int, + argstr='-centre %s', + minlen=3, + maxlen=3, + units='mm', + desc=('Voxel specifying origin of Talairach ' + 'coordinate system for SPM, default [0 0 0].')) + + picoseed = traits.List( + traits.Int, + argstr='-picoseed %s', + minlen=3, + maxlen=3, + desc=('Voxel specifying the seed (for PICo maps), ' + 'default [0 0 0].'), + units='mm') + + nimages = traits.Int( + argstr='-nimages %d', + units='NA', + desc="Number of images in the img file. Default 1.") + + datatype = traits.Enum( + 'byte', + 'char', + '[u]short', + '[u]int', + 'float', + 'complex', + 'double', + argstr='-datatype %s', + desc=('The char datatype is 8 bit (not the 16 bit ' + 'char of Java), as specified by the Analyze ' + '7.5 standard. The byte, ushort and uint ' + 'types are not part of the Analyze ' + 'specification but are supported by SPM.'), + mandatory=True) + + offset = traits.Int( + argstr='-offset %d', + units='NA', + desc=('According to the Analyze 7.5 standard, this is ' + 'the byte offset in the .img file at which ' + 'voxels start. This value can be negative to ' + 'specify that the absolute value is applied for ' + 'every image in the file.')) + + greylevels = traits.List( + traits.Int, + argstr='-gl %s', + minlen=2, + maxlen=2, + desc=('Minimum and maximum greylevels. Stored as ' + 'shorts in the header.'), + units='NA') + + scaleslope = traits.Float( + argstr='-scaleslope %d', + units='NA', + desc=('Intensities in the image are scaled by ' + 'this factor by SPM and MRICro. Default is ' + '1.0.')) + + scaleinter = traits.Float( + argstr='-scaleinter %d', + units='NA', + desc=('Constant to add to the image intensities. ' + 'Used by SPM and MRIcro.')) + + description = traits.String( + argstr='-description %s', + desc=('Short description - No spaces, max ' + 'length 79 bytes. Will be null ' + 'terminated automatically.')) + + intelbyteorder = traits.Bool( + argstr='-intelbyteorder', + desc=("Write header in intel byte order " + "(little-endian).")) + + networkbyteorder = traits.Bool( + argstr='-networkbyteorder', + desc=("Write header in network byte order " + "(big-endian). This is the default " + "for new headers.")) + + +class AnalyzeHeaderOutputSpec(TraitedSpec): + header = File(exists=True, desc='Analyze header') + + +class AnalyzeHeader(StdOutCommandLine): + """ + Create or read an Analyze 7.5 header file. + + Analyze image header, provides support for the most common header fields. + Some fields, such as patient_id, are not currently supported. The program allows + three nonstandard options: the field image_dimension.funused1 is the image scale. + The intensity of each pixel in the associated .img file is (image value from file) * scale. + Also, the origin of the Talairach coordinates (midline of the anterior commisure) are encoded + in the field data_history.originator. These changes are included for compatibility with SPM. + + All headers written with this program are big endian by default. + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> hdr = cmon.AnalyzeHeader() + >>> hdr.inputs.in_file = 'tensor_fitted_data.Bdouble' + >>> hdr.inputs.scheme_file = 'A.scheme' + >>> hdr.inputs.data_dims = [256,256,256] + >>> hdr.inputs.voxel_dims = [1,1,1] + >>> hdr.run() # doctest: +SKIP + """ + _cmd = 'analyzeheader' + input_spec = AnalyzeHeaderInputSpec + output_spec = AnalyzeHeaderOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['header'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + ".hdr" + + +class ShredderInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='< %s', + mandatory=True, + position=-2, + desc='raw binary data file') + + offset = traits.Int( + argstr='%d', + units='NA', + desc='initial offset of offset bytes', + position=1) + + chunksize = traits.Int( + argstr='%d', + units='NA', + desc='reads and outputs a chunk of chunksize bytes', + position=2) + + space = traits.Int( + argstr='%d', units='NA', desc='skips space bytes', position=3) + + +class ShredderOutputSpec(TraitedSpec): + shredded = File(exists=True, desc='Shredded binary data file') + + +class Shredder(StdOutCommandLine): + """ + Extracts periodic chunks from a data stream. + + Shredder makes an initial offset of offset bytes. It then reads and outputs + chunksize bytes, skips space bytes, and repeats until there is no more input. + + If the chunksize is negative, chunks of size chunksize are read and the + byte ordering of each chunk is reversed. The whole chunk will be reversed, so + the chunk must be the same size as the data type, otherwise the order of the + values in the chunk, as well as their endianness, will be reversed. + + Examples + -------- + + >>> import nipype.interfaces.camino as cam + >>> shred = cam.Shredder() + >>> shred.inputs.in_file = 'SubjectA.Bfloat' + >>> shred.inputs.offset = 0 + >>> shred.inputs.chunksize = 1 + >>> shred.inputs.space = 2 + >>> shred.run() # doctest: +SKIP + """ + _cmd = 'shredder' + input_spec = ShredderInputSpec + output_spec = ShredderOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['shredded_file'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + "_shredded" diff --git a/nipype/interfaces/camino/dti.py b/nipype/interfaces/camino/dti.py new file mode 100644 index 0000000000..b32b9dc528 --- /dev/null +++ b/nipype/interfaces/camino/dti.py @@ -0,0 +1,1516 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os + +from ...utils.filemanip import split_filename +from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, + File, Directory, StdOutCommandLine, + StdOutCommandLineInputSpec, isdefined, InputMultiPath) + + +class DTIFitInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=1, + desc='voxel-order data filename') + + bgmask = File( + argstr='-bgmask %s', + exists=True, + desc= + ('Provides the name of a file containing a background mask computed using, ' + 'for example, FSL bet2 program. The mask file contains zero in background ' + 'voxels and non-zero in foreground.')) + + scheme_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=2, + desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') + + non_linear = traits.Bool( + argstr='-nonlinear', + position=3, + desc= + "Use non-linear fitting instead of the default linear regression to the log measurements. " + ) + + +class DTIFitOutputSpec(TraitedSpec): + tensor_fitted = File( + exists=True, desc='path/name of 4D volume in voxel order') + + +class DTIFit(StdOutCommandLine): + """ + Reads diffusion MRI data, acquired using the acquisition scheme detailed in the scheme file, from the data file. + + Use non-linear fitting instead of the default linear regression to the log measurements. + The data file stores the diffusion MRI data in voxel order with the measurements stored in big-endian format and ordered as in the scheme file. + The default input data type is four-byte float. The default output data type is eight-byte double. + See modelfit and camino for the format of the data file and scheme file. + The program fits the diffusion tensor to each voxel and outputs the results, + in voxel order and as big-endian eight-byte doubles, to the standard output. + The program outputs eight values in each voxel: [exit code, ln(S(0)), D_xx, D_xy, D_xz, D_yy, D_yz, D_zz]. + An exit code of zero indicates no problems. For a list of other exit codes, see modelfit(1). The entry S(0) is an estimate of the signal at q=0. + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> fit = cmon.DTIFit() + >>> fit.inputs.scheme_file = 'A.scheme' + >>> fit.inputs.in_file = 'tensor_fitted_data.Bdouble' + >>> fit.run() # doctest: +SKIP + """ + _cmd = 'dtfit' + input_spec = DTIFitInputSpec + output_spec = DTIFitOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['tensor_fitted'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_DT.Bdouble' + + +class DTMetricInputSpec(CommandLineInputSpec): + eigen_data = File( + exists=True, + argstr='-inputfile %s', + mandatory=True, + desc='voxel-order data filename') + + metric = traits.Enum( + 'fa', + 'md', + 'rd', + 'l1', + 'l2', + 'l3', + 'tr', + 'ra', + '2dfa', + 'cl', + 'cp', + 'cs', + argstr='-stat %s', + mandatory=True, + desc= + ('Specifies the metric to compute. Possible choices are: ' + '"fa", "md", "rd", "l1", "l2", "l3", "tr", "ra", "2dfa", "cl", "cp" or "cs".' + )) + + inputdatatype = traits.Enum( + 'double', + 'float', + 'long', + 'int', + 'short', + 'char', + argstr='-inputdatatype %s', + usedefault=True, + desc=('Specifies the data type of the input data. ' + 'The data type can be any of the following strings: ' + '"char", "short", "int", "long", "float" or "double".' + 'Default is double data type')) + + outputdatatype = traits.Enum( + 'double', + 'float', + 'long', + 'int', + 'short', + 'char', + argstr='-outputdatatype %s', + usedefault=True, + desc=('Specifies the data type of the output data. ' + 'The data type can be any of the following strings: ' + '"char", "short", "int", "long", "float" or "double".' + 'Default is double data type')) + + data_header = File( + argstr='-header %s', + exists=True, + desc=( + 'A Nifti .nii or .nii.gz file containing the header information. ' + 'Usually this will be the header of the raw data file from which ' + 'the diffusion tensors were reconstructed.')) + + outputfile = File( + argstr='-outputfile %s', + genfile=True, + desc= + ('Output name. Output will be a .nii.gz file if data_header is provided and' + 'in voxel order with outputdatatype datatype (default: double) otherwise.' + )) + + +class DTMetricOutputSpec(TraitedSpec): + metric_stats = File( + exists=True, desc='Diffusion Tensor statistics of the chosen metric') + + +class DTMetric(CommandLine): + """ + Computes tensor metric statistics based on the eigenvalues l1 >= l2 >= l3 + typically obtained from ComputeEigensystem. + + The full list of statistics is: + + - = (l1 - l2) / l1 , a measure of linearity + - = (l2 - l3) / l1 , a measure of planarity + - = l3 / l1 , a measure of isotropy + with: cl + cp + cs = 1 + - = first eigenvalue + - = second eigenvalue + - = third eigenvalue + - = l1 + l2 + l3 + - = tr / 3 + - = (l2 + l3) / 2 + - = fractional anisotropy. (Basser et al, J Magn Reson B 1996) + - = relative anisotropy (Basser et al, J Magn Reson B 1996) + - <2dfa> = 2D FA of the two minor eigenvalues l2 and l3 + i.e. sqrt( 2 * [(l2 - )^2 + (l3 - )^2] / (l2^2 + l3^2) ) + with: = (l2 + l3) / 2 + + + Example + ------- + Compute the CP planar metric as float data type. + + >>> import nipype.interfaces.camino as cam + >>> dtmetric = cam.DTMetric() + >>> dtmetric.inputs.eigen_data = 'dteig.Bdouble' + >>> dtmetric.inputs.metric = 'cp' + >>> dtmetric.inputs.outputdatatype = 'float' + >>> dtmetric.run() # doctest: +SKIP + """ + _cmd = 'dtshape' + input_spec = DTMetricInputSpec + output_spec = DTMetricOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['metric_stats'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + return self._gen_outputfile() + + def _gen_outputfile(self): + outputfile = self.inputs.outputfile + if not isdefined(outputfile): + outputfile = self._gen_filename('outputfile') + return outputfile + + def _gen_filename(self, name): + if name == 'outputfile': + _, name, _ = split_filename(self.inputs.eigen_data) + metric = self.inputs.metric + datatype = self.inputs.outputdatatype + if isdefined(self.inputs.data_header): + filename = name + '_' + metric + '.nii.gz' + else: + filename = name + '_' + metric + '.B' + datatype + return filename + + +class ModelFitInputSpec(StdOutCommandLineInputSpec): + def _gen_model_options(): # @NoSelf + """ + Generate all possible permutations of < multi - tensor > < single - tensor > options + """ + + single_tensor = [ + 'dt', 'restore', 'algdt', 'nldt_pos', 'nldt', 'ldt_wtd' + ] + multi_tensor = [ + 'cylcyl', 'cylcyl_eq', 'pospos', 'pospos_eq', 'poscyl', + 'poscyl_eq', 'cylcylcyl', 'cylcylcyl_eq', 'pospospos', + 'pospospos_eq', 'posposcyl', 'posposcyl_eq', 'poscylcyl', + 'poscylcyl_eq' + ] + other = ['adc', 'ball_stick'] + + model_list = single_tensor + model_list.extend(other) + model_list.extend([ + multi + ' ' + single for multi in multi_tensor + for single in single_tensor + ]) + return model_list + + model = traits.Enum( + _gen_model_options(), + argstr='-model %s', + mandatory=True, + desc='Specifies the model to be fit to the data.') + + in_file = File( + exists=True, + argstr='-inputfile %s', + mandatory=True, + desc='voxel-order data filename') + + inputdatatype = traits.Enum( + 'float', + 'char', + 'short', + 'int', + 'long', + 'double', + argstr='-inputdatatype %s', + desc= + 'Specifies the data type of the input file: "char", "short", "int", "long", "float" or "double". The input file must have BIG-ENDIAN ordering. By default, the input type is "float".' + ) + + scheme_file = File( + exists=True, + argstr='-schemefile %s', + mandatory=True, + desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') + + outputfile = File( + argstr='-outputfile %s', desc='Filename of the output file.') + + outlier = File( + argstr='-outliermap %s', + exists=True, + desc= + 'Specifies the name of the file to contain the outlier map generated by the RESTORE algorithm.' + ) + + noisemap = File( + argstr='-noisemap %s', + exists=True, + desc= + 'Specifies the name of the file to contain the estimated noise variance on the diffusion-weighted signal, generated by a weighted tensor fit. The data type of this file is big-endian double.' + ) + + residualmap = File( + argstr='-residualmap %s', + exists=True, + desc= + 'Specifies the name of the file to contain the weighted residual errors after computing a weighted linear tensor fit. One value is produced per measurement, in voxel order.The data type of this file is big-endian double. Images of the residuals for each measurement can be extracted with shredder.' + ) + + sigma = traits.Float( + argstr='-sigma %G', + desc= + 'Specifies the standard deviation of the noise in the data. Required by the RESTORE algorithm.' + ) + + bgthresh = traits.Float( + argstr='-bgthresh %G', + desc= + 'Sets a threshold on the average q=0 measurement to separate foreground and background. The program does not process background voxels, but outputs the same number of values in background voxels and foreground voxels. Each value is zero in background voxels apart from the exit code which is -1.' + ) + + bgmask = File( + argstr='-bgmask %s', + exists=True, + desc= + 'Provides the name of a file containing a background mask computed using, for example, FSL\'s bet2 program. The mask file contains zero in background voxels and non-zero in foreground.' + ) + + cfthresh = traits.Float( + argstr='-csfthresh %G', + desc= + 'Sets a threshold on the average q=0 measurement to determine which voxels are CSF. This program does not treat CSF voxels any different to other voxels.' + ) + + fixedmodq = traits.List( + traits.Float, + argstr='-fixedmod %s', + minlen=4, + maxlen=4, + desc= + 'Specifies a spherical acquisition scheme with M measurements with q=0 and N measurements with |q|=Q and diffusion time tau. The N measurements with |q|=Q have unique directions. The program reads in the directions from the files in directory PointSets.' + ) + + fixedbvalue = traits.List( + traits.Float, + argstr='-fixedbvalue %s', + minlen=3, + maxlen=3, + desc= + 'As above, but specifies . The resulting scheme is the same whether you specify b directly or indirectly using -fixedmodq.' + ) + + tau = traits.Float( + argstr='-tau %G', + desc= + 'Sets the diffusion time separately. This overrides the diffusion time specified in a scheme file or by a scheme index for both the acquisition scheme and in the data synthesis.' + ) + + +class ModelFitOutputSpec(TraitedSpec): + fitted_data = File( + exists=True, desc='output file of 4D volume in voxel order') + + +class ModelFit(StdOutCommandLine): + """ + Fits models of the spin-displacement density to diffusion MRI measurements. + + This is an interface to various model fitting routines for diffusion MRI data that + fit models of the spin-displacement density function. In particular, it will fit the + diffusion tensor to a set of measurements as well as various other models including + two or three-tensor models. The program can read input data from a file or can + generate synthetic data using various test functions for testing and simulations. + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> fit = cmon.ModelFit() + >>> fit.model = 'dt' + >>> fit.inputs.scheme_file = 'A.scheme' + >>> fit.inputs.in_file = 'tensor_fitted_data.Bdouble' + >>> fit.run() # doctest: +SKIP + """ + _cmd = 'modelfit' + input_spec = ModelFitInputSpec + output_spec = ModelFitOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['fitted_data'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_fit.Bdouble' + + +class DTLUTGenInputSpec(StdOutCommandLineInputSpec): + lrange = traits.List( + traits.Float, + desc='Index to one-tensor LUTs. This is the ratio L1/L3 and L2 / L3.' + 'The LUT is square, with half the values calculated (because L2 / L3 cannot be less than L1 / L3 by definition).' + 'The minimum must be >= 1. For comparison, a ratio L1 / L3 = 10 with L2 / L3 = 1 corresponds to an FA of 0.891, ' + 'and L1 / L3 = 15 with L2 / L3 = 1 corresponds to an FA of 0.929. The default range is 1 to 10.', + argstr='-lrange %s', + minlen=2, + maxlen=2, + position=1, + units='NA') + + frange = traits.List( + traits.Float, + desc='Index to two-tensor LUTs. This is the fractional anisotropy' + ' of the two tensors. The default is 0.3 to 0.94', + argstr='-frange %s', + minlen=2, + maxlen=2, + position=1, + units='NA') + + step = traits.Float( + argstr='-step %f', + units='NA', + desc='Distance between points in the LUT.' + 'For example, if lrange is 1 to 10 and the step is 0.1, LUT entries will be computed ' + 'at L1 / L3 = 1, 1.1, 1.2 ... 10.0 and at L2 / L3 = 1.0, 1.1 ... L1 / L3.' + 'For single tensor LUTs, the default step is 0.2, for two-tensor LUTs it is 0.02.' + ) + + samples = traits.Int( + argstr='-samples %d', + units='NA', + desc= + 'The number of synthetic measurements to generate at each point in the LUT. The default is 2000.' + ) + + snr = traits.Float( + argstr='-snr %f', + units='NA', + desc='The signal to noise ratio of the unweighted (q = 0) measurements.' + 'This should match the SNR (in white matter) of the images that the LUTs are used with.' + ) + + bingham = traits.Bool( + argstr='-bingham', + desc="Compute a LUT for the Bingham PDF. This is the default.") + + acg = traits.Bool(argstr='-acg', desc="Compute a LUT for the ACG PDF.") + + watson = traits.Bool( + argstr='-watson', desc="Compute a LUT for the Watson PDF.") + + inversion = traits.Int( + argstr='-inversion %d', + units='NA', + desc= + 'Index of the inversion to use. The default is 1 (linear single tensor inversion).' + ) + + trace = traits.Float( + argstr='-trace %G', + units='NA', + desc= + 'Trace of the diffusion tensor(s) used in the test function in the LUT generation. The default is 2100E-12 m^2 s^-1.' + ) + + scheme_file = File( + argstr='-schemefile %s', + mandatory=True, + position=2, + desc='The scheme file of the images to be processed using this LUT.') + + +class DTLUTGenOutputSpec(TraitedSpec): + dtLUT = File(exists=True, desc='Lookup Table') + + +class DTLUTGen(StdOutCommandLine): + """ + Calibrates the PDFs for PICo probabilistic tractography. + + This program needs to be run once for every acquisition scheme. + It outputs a lookup table that is used by the dtpicoparams program to find PICo PDF parameters for an image. + The default single tensor LUT contains parameters of the Bingham distribution and is generated by supplying + a scheme file and an estimated signal to noise in white matter regions of the (q=0) image. + The default inversion is linear (inversion index 1). + + Advanced users can control several options, including the extent and resolution of the LUT, + the inversion index, and the type of PDF. See dtlutgen(1) for details. + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> dtl = cmon.DTLUTGen() + >>> dtl.inputs.snr = 16 + >>> dtl.inputs.scheme_file = 'A.scheme' + >>> dtl.run() # doctest: +SKIP + """ + _cmd = 'dtlutgen' + input_spec = DTLUTGenInputSpec + output_spec = DTLUTGenOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['dtLUT'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.scheme_file) + return name + '.dat' + + +class PicoPDFsInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='< %s', + mandatory=True, + position=1, + desc='voxel-order data filename') + + inputmodel = traits.Enum( + 'dt', + 'multitensor', + 'pds', + argstr='-inputmodel %s', + position=2, + desc='input model type', + usedefault=True) + + luts = InputMultiPath( + File(exists=True), + argstr='-luts %s', + mandatory=True, + desc='Files containing the lookup tables.' + 'For tensor data, one lut must be specified for each type of inversion used in the image (one-tensor, two-tensor, three-tensor).' + 'For pds, the number of LUTs must match -numpds (it is acceptable to use the same LUT several times - see example, above).' + 'These LUTs may be generated with dtlutgen.') + + pdf = traits.Enum( + 'bingham', + 'watson', + 'acg', + argstr='-pdf %s', + position=4, + desc=' Specifies the PDF to use. There are three choices:' + 'watson - The Watson distribution. This distribution is rotationally symmetric.' + 'bingham - The Bingham distributionn, which allows elliptical probability density contours.' + 'acg - The Angular Central Gaussian distribution, which also allows elliptical probability density contours', + usedefault=True) + + directmap = traits.Bool( + argstr='-directmap', + desc= + "Only applicable when using pds as the inputmodel. Use direct mapping between the eigenvalues and the distribution parameters instead of the log of the eigenvalues." + ) + + maxcomponents = traits.Int( + argstr='-maxcomponents %d', + units='NA', + desc= + 'The maximum number of tensor components in a voxel (default 2) for multitensor data.' + 'Currently, only the default is supported, but future releases may allow the input of three-tensor data using this option.' + ) + + numpds = traits.Int( + argstr='-numpds %d', + units='NA', + desc='The maximum number of PDs in a voxel (default 3) for PD data.' + 'This option determines the size of the input and output voxels.' + 'This means that the data file may be large enough to accomodate three or more PDs,' + 'but does not mean that any of the voxels are classified as containing three or more PDs.' + ) + + +class PicoPDFsOutputSpec(TraitedSpec): + pdfs = File(exists=True, desc='path/name of 4D volume in voxel order') + + +class PicoPDFs(StdOutCommandLine): + """ + Constructs a spherical PDF in each voxel for probabilistic tractography. + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> pdf = cmon.PicoPDFs() + >>> pdf.inputs.inputmodel = 'dt' + >>> pdf.inputs.luts = ['lut_file'] + >>> pdf.inputs.in_file = 'voxel-order_data.Bfloat' + >>> pdf.run() # doctest: +SKIP + """ + _cmd = 'picopdfs' + input_spec = PicoPDFsInputSpec + output_spec = PicoPDFsOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['pdfs'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_pdfs.Bdouble' + + +class TrackInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='-inputfile %s', + position=1, + desc='input data file') + + seed_file = File( + exists=True, argstr='-seedfile %s', position=2, desc='seed file') + + inputmodel = traits.Enum( + 'dt', + 'multitensor', + 'sfpeak', + 'pico', + 'repbs_dt', + 'repbs_multitensor', + 'ballstick', + 'wildbs_dt', + 'bayesdirac', + 'bayesdirac_dt', + 'bedpostx_dyad', + 'bedpostx', + argstr='-inputmodel %s', + desc='input model type', + usedefault=True) + + tracker = traits.Enum( + 'fact', + 'euler', + 'rk4', + argstr='-tracker %s', + desc=("The tracking algorithm controls streamlines are " + "generated from the data. The choices are: " + "- FACT, which follows the local fibre orientation " + "in each voxel. No interpolation is used." + "- EULER, which uses a fixed step size along the " + "local fibre orientation. With nearest-neighbour " + "interpolation, this method may be very similar to " + "FACT, except that the step size is fixed, whereas " + "FACT steps extend to the boundary of the next voxel " + "(distance variable depending on the entry and exit " + "points to the voxel)." + "- RK4: Fourth-order Runge-Kutta method. The step " + "size is fixed, however the eventual direction of " + "the step is determined by taking and averaging a " + "series of partial steps."), + usedefault=True) + + interpolator = traits.Enum( + 'nn', + 'prob_nn', + 'linear', + argstr='-interpolator %s', + desc=("The interpolation algorithm determines how " + "the fiber orientation(s) are defined at a given " + "continuous point within the input image. " + "Interpolators are only used when the tracking " + "algorithm is not FACT. The choices are: " + "- NN: Nearest-neighbour interpolation, just " + "uses the local voxel data directly." + "- PROB_NN: Probabilistic nearest-neighbor " + "interpolation, similar to the method pro- " + "posed by Behrens et al [Magnetic Resonance " + "in Medicine, 50:1077-1088, 2003]. The data " + "is not interpolated, but at each point we " + "randomly choose one of the 8 voxels sur- " + "rounding a point. The probability of choosing " + "a particular voxel is based on how close the " + "point is to the centre of that voxel." + "- LINEAR: Linear interpolation of the vector " + "field containing the principal directions at " + "each point.")) + + stepsize = traits.Float( + argstr='-stepsize %f', + requires=['tracker'], + desc=('Step size for EULER and RK4 tracking. ' + 'The default is 1mm.')) + + inputdatatype = traits.Enum( + 'float', 'double', argstr='-inputdatatype %s', desc='input file type') + + gzip = traits.Bool( + argstr='-gzip', desc="save the output image in gzip format") + + maxcomponents = traits.Int( + argstr='-maxcomponents %d', + units='NA', + desc=("The maximum number of tensor components in a " + "voxel. This determines the size of the input " + "file and does not say anything about the " + "voxel classification. The default is 2 if " + "the input model is multitensor and 1 if the " + "input model is dt.")) + + numpds = traits.Int( + argstr='-numpds %d', + units='NA', + desc=("The maximum number of PDs in a voxel for input " + "models sfpeak and pico. The default is 3 for input " + "model sfpeak and 1 for input model pico. This option " + "determines the size of the voxels in the input file " + "and does not affect tracking. For tensor data, use " + "the -maxcomponents option.")) + + data_dims = traits.List( + traits.Int, + desc='data dimensions in voxels', + argstr='-datadims %s', + minlen=3, + maxlen=3, + units='voxels') + + voxel_dims = traits.List( + traits.Float, + desc='voxel dimensions in mm', + argstr='-voxeldims %s', + minlen=3, + maxlen=3, + units='mm') + + ipthresh = traits.Float( + argstr='-ipthresh %f', + desc=('Curvature threshold for tracking, expressed as ' + 'the minimum dot product between two streamline ' + 'orientations calculated over the length of a ' + 'voxel. If the dot product between the previous ' + 'and current directions is less than this ' + 'threshold, then the streamline terminates. The ' + 'default setting will terminate fibres that curve ' + 'by more than 80 degrees. Set this to -1.0 to ' + 'disable curvature checking completely.')) + + curvethresh = traits.Float( + argstr='-curvethresh %f', + desc=('Curvature threshold for tracking, expressed ' + 'as the maximum angle (in degrees) between ' + 'between two streamline orientations ' + 'calculated over the length of a voxel. If ' + 'the angle is greater than this, then the ' + 'streamline terminates.')) + + curveinterval = traits.Float( + argstr='-curveinterval %f', + requires=['curvethresh'], + desc=('Interval over which the curvature threshold ' + 'should be evaluated, in mm. The default is ' + '5mm. When using the default curvature ' + 'threshold of 90 degrees, this means that ' + 'streamlines will terminate if they curve by ' + 'more than 90 degrees over a path length ' + 'of 5mm.')) + + anisthresh = traits.Float( + argstr='-anisthresh %f', + desc=('Terminate fibres that enter a voxel with lower ' + 'anisotropy than the threshold.')) + + anisfile = File( + argstr='-anisfile %s', + exists=True, + desc=('File containing the anisotropy map. This is required to ' + 'apply an anisotropy threshold with non tensor data. If ' + 'the map issupplied it is always used, even in tensor ' + 'data.')) + + outputtracts = traits.Enum( + 'float', + 'double', + 'oogl', + argstr='-outputtracts %s', + desc='output tract file type') + + out_file = File( + argstr='-outputfile %s', + position=-1, + genfile=True, + desc='output data file') + + output_root = File( + exists=False, + argstr='-outputroot %s', + position=-1, + desc='root directory for output') + + +class TrackOutputSpec(TraitedSpec): + tracked = File( + exists=True, desc='output file containing reconstructed tracts') + + +class Track(CommandLine): + """ + Performs tractography using one of the following models: + dt', 'multitensor', 'pds', 'pico', 'bootstrap', 'ballstick', 'bayesdirac' + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> track = cmon.Track() + >>> track.inputs.inputmodel = 'dt' + >>> track.inputs.in_file = 'data.Bfloat' + >>> track.inputs.seed_file = 'seed_mask.nii' + >>> track.run() # doctest: +SKIP + """ + + _cmd = 'track' + + input_spec = TrackInputSpec + output_spec = TrackOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.out_file): + out_file_path = os.path.abspath(self.inputs.out_file) + else: + out_file_path = os.path.abspath(self._gen_outfilename()) + outputs['tracked'] = out_file_path + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + # Currently in_file is only undefined for bedpostx input + if not isdefined(self.inputs.in_file): + name = 'bedpostx' + else: + _, name, _ = split_filename(self.inputs.in_file) + return name + '_tracked' + + +class TrackDT(Track): + """ + Performs streamline tractography using tensor data + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> track = cmon.TrackDT() + >>> track.inputs.in_file = 'tensor_fitted_data.Bdouble' + >>> track.inputs.seed_file = 'seed_mask.nii' + >>> track.run() # doctest: +SKIP + """ + + def __init__(self, command=None, **inputs): + inputs["inputmodel"] = "dt" + return super(TrackDT, self).__init__(command, **inputs) + + +class TrackPICoInputSpec(TrackInputSpec): + pdf = traits.Enum( + 'bingham', + 'watson', + 'acg', + argstr='-pdf %s', + desc='Specifies the model for PICo parameters. The default is "bingham.' + ) + + iterations = traits.Int( + argstr='-iterations %d', + units='NA', + desc= + "Number of streamlines to generate at each seed point. The default is 5000." + ) + + +class TrackPICo(Track): + """ + Performs streamline tractography using the Probabilistic Index of Connectivity (PICo) algorithm + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> track = cmon.TrackPICo() + >>> track.inputs.in_file = 'pdfs.Bfloat' + >>> track.inputs.seed_file = 'seed_mask.nii' + >>> track.run() # doctest: +SKIP + """ + + input_spec = TrackPICoInputSpec + + def __init__(self, command=None, **inputs): + inputs["inputmodel"] = "pico" + return super(TrackPICo, self).__init__(command, **inputs) + + +class TrackBedpostxDeterInputSpec(TrackInputSpec): + bedpostxdir = Directory( + argstr='-bedpostxdir %s', + mandatory=True, + exists=True, + desc=('Directory containing bedpostx output')) + + min_vol_frac = traits.Float( + argstr='-bedpostxminf %d', + units='NA', + desc=("Zeros out compartments in bedpostx data " + "with a mean volume fraction f of less than " + "min_vol_frac. The default is 0.01.")) + + +class TrackBedpostxDeter(Track): + """ + Data from FSL's bedpostx can be imported into Camino for deterministic tracking. + (Use TrackBedpostxProba for bedpostx probabilistic tractography.) + + The tracking is based on the vector images dyads1.nii.gz, ... , dyadsN.nii.gz, + where there are a maximum of N compartments (corresponding to each fiber + population) in each voxel. + + It also uses the N images mean_f1samples.nii.gz, ..., mean_fNsamples.nii.gz, + normalized such that the sum of all compartments is 1. Compartments where the + mean_f is less than a threshold are discarded and not used for tracking. + The default value is 0.01. This can be changed with the min_vol_frac option. + + Example + ------- + + >>> import nipype.interfaces.camino as cam + >>> track = cam.TrackBedpostxDeter() + >>> track.inputs.bedpostxdir = 'bedpostxout' + >>> track.inputs.seed_file = 'seed_mask.nii' + >>> track.run() # doctest: +SKIP + """ + + input_spec = TrackBedpostxDeterInputSpec + + def __init__(self, command=None, **inputs): + inputs["inputmodel"] = "bedpostx_dyad" + return super(TrackBedpostxDeter, self).__init__(command, **inputs) + + +class TrackBedpostxProbaInputSpec(TrackInputSpec): + bedpostxdir = Directory( + argstr='-bedpostxdir %s', + mandatory=True, + exists=True, + desc=('Directory containing bedpostx output')) + + min_vol_frac = traits.Float( + argstr='-bedpostxminf %d', + units='NA', + desc=("Zeros out compartments in bedpostx data " + "with a mean volume fraction f of less than " + "min_vol_frac. The default is 0.01.")) + + iterations = traits.Int( + argstr='-iterations %d', + units='NA', + desc=("Number of streamlines to generate at each " + "seed point. The default is 1.")) + + +class TrackBedpostxProba(Track): + """ + Data from FSL's bedpostx can be imported into Camino for probabilistic tracking. + (Use TrackBedpostxDeter for bedpostx deterministic tractography.) + + The tracking uses the files merged_th1samples.nii.gz, merged_ph1samples.nii.gz, + ... , merged_thNsamples.nii.gz, merged_phNsamples.nii.gz where there are a + maximum of N compartments (corresponding to each fiber population) in each + voxel. These images contain M samples of theta and phi, the polar coordinates + describing the "stick" for each compartment. At each iteration, a random number + X between 1 and M is drawn and the Xth samples of theta and phi become the + principal directions in the voxel. + + It also uses the N images mean_f1samples.nii.gz, ..., mean_fNsamples.nii.gz, + normalized such that the sum of all compartments is 1. Compartments where the + mean_f is less than a threshold are discarded and not used for tracking. + The default value is 0.01. This can be changed with the min_vol_frac option. + + Example + ------- + + >>> import nipype.interfaces.camino as cam + >>> track = cam.TrackBedpostxProba() + >>> track.inputs.bedpostxdir = 'bedpostxout' + >>> track.inputs.seed_file = 'seed_mask.nii' + >>> track.inputs.iterations = 100 + >>> track.run() # doctest: +SKIP + """ + + input_spec = TrackBedpostxProbaInputSpec + + def __init__(self, command=None, **inputs): + inputs["inputmodel"] = "bedpostx_dyad" + return super(TrackBedpostxProba, self).__init__(command, **inputs) + + +class TrackBayesDiracInputSpec(TrackInputSpec): + scheme_file = File( + argstr='-schemefile %s', + mandatory=True, + exists=True, + desc=('The scheme file corresponding to the data being ' + 'processed.')) + + iterations = traits.Int( + argstr='-iterations %d', + units='NA', + desc=("Number of streamlines to generate at each " + "seed point. The default is 5000.")) + + pdf = traits.Enum( + 'bingham', + 'watson', + 'acg', + argstr='-pdf %s', + desc= + 'Specifies the model for PICo priors (not the curvature priors). The default is "bingham".' + ) + + pointset = traits.Int( + argstr='-pointset %s', + desc= + 'Index to the point set to use for Bayesian likelihood calculation. The index specifies a set of evenly distributed points on the unit sphere, where each point x defines two possible step directions (x or -x) for the streamline path. A larger number indexes a larger point set, which gives higher angular resolution at the expense of computation time. The default is index 1, which gives 1922 points, index 0 gives 1082 points, index 2 gives 3002 points.' + ) + + datamodel = traits.Enum( + 'cylsymmdt', + 'ballstick', + argstr='-datamodel %s', + desc= + 'Model of the data for Bayesian tracking. The default model is "cylsymmdt", a diffusion tensor with cylindrical symmetry about e_1, ie L1 >= L_2 = L_3. The other model is "ballstick", the partial volume model (see ballstickfit).' + ) + + curvepriork = traits.Float( + argstr='-curvepriork %G', + desc= + 'Concentration parameter for the prior distribution on fibre orientations given the fibre orientation at the previous step. Larger values of k make curvature less likely.' + ) + + curvepriorg = traits.Float( + argstr='-curvepriorg %G', + desc= + 'Concentration parameter for the prior distribution on fibre orientations given the fibre orientation at the previous step. Larger values of g make curvature less likely.' + ) + + extpriorfile = File( + exists=True, + argstr='-extpriorfile %s', + desc= + 'Path to a PICo image produced by picopdfs. The PDF in each voxel is used as a prior for the fibre orientation in Bayesian tracking. The prior image must be in the same space as the diffusion data.' + ) + + extpriordatatype = traits.Enum( + 'float', + 'double', + argstr='-extpriordatatype %s', + desc='Datatype of the prior image. The default is "double".') + + +class TrackBayesDirac(Track): + """ + Performs streamline tractography using a Bayesian tracking with Dirac priors + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> track = cmon.TrackBayesDirac() + >>> track.inputs.in_file = 'tensor_fitted_data.Bdouble' + >>> track.inputs.seed_file = 'seed_mask.nii' + >>> track.inputs.scheme_file = 'bvecs.scheme' + >>> track.run() # doctest: +SKIP + """ + + input_spec = TrackBayesDiracInputSpec + + def __init__(self, command=None, **inputs): + inputs["inputmodel"] = "bayesdirac" + return super(TrackBayesDirac, self).__init__(command, **inputs) + + +class TrackBallStick(Track): + """ + Performs streamline tractography using ball-stick fitted data + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> track = cmon.TrackBallStick() + >>> track.inputs.in_file = 'ballstickfit_data.Bfloat' + >>> track.inputs.seed_file = 'seed_mask.nii' + >>> track.run() # doctest: +SKIP + """ + + def __init__(self, command=None, **inputs): + inputs["inputmodel"] = "ballstick" + return super(TrackBallStick, self).__init__(command, **inputs) + + +class TrackBootstrapInputSpec(TrackInputSpec): + scheme_file = File( + argstr='-schemefile %s', + mandatory=True, + exists=True, + desc='The scheme file corresponding to the data being processed.') + + iterations = traits.Int( + argstr='-iterations %d', + units='NA', + desc="Number of streamlines to generate at each seed point.") + + inversion = traits.Int( + argstr='-inversion %s', + desc= + 'Tensor reconstruction algorithm for repetition bootstrapping. Default is 1 (linear reconstruction, single tensor).' + ) + + bsdatafiles = traits.List( + File(exists=True), + mandatory=True, + argstr='-bsdatafile %s', + desc= + 'Specifies files containing raw data for repetition bootstrapping. Use -inputfile for wild bootstrap data.' + ) + + bgmask = File( + argstr='-bgmask %s', + exists=True, + desc= + 'Provides the name of a file containing a background mask computed using, for example, FSL\'s bet2 program. The mask file contains zero in background voxels and non-zero in foreground.' + ) + + +class TrackBootstrap(Track): + """ + Performs bootstrap streamline tractography using mulitple scans of the same subject + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> track = cmon.TrackBootstrap() + >>> track.inputs.inputmodel='repbs_dt' + >>> track.inputs.scheme_file = 'bvecs.scheme' + >>> track.inputs.bsdatafiles = ['fitted_data1.Bfloat', 'fitted_data2.Bfloat'] + >>> track.inputs.seed_file = 'seed_mask.nii' + >>> track.run() # doctest: +SKIP + """ + + input_spec = TrackBootstrapInputSpec + + def __init__(self, command=None, **inputs): + return super(TrackBootstrap, self).__init__(command, **inputs) + + +class ComputeMeanDiffusivityInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='< %s', + mandatory=True, + position=1, + desc='Tensor-fitted data filename') + + scheme_file = File( + exists=True, + argstr='%s', + position=2, + desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') + + out_file = File(argstr="> %s", position=-1, genfile=True) + + inputmodel = traits.Enum( + 'dt', + 'twotensor', + 'threetensor', + argstr='-inputmodel %s', + desc= + 'Specifies the model that the input tensor data contains parameters for.' + 'Possible model types are: "dt" (diffusion-tensor data), "twotensor" (two-tensor data), ' + '"threetensor" (three-tensor data). By default, the program assumes that the input data ' + 'contains a single diffusion tensor in each voxel.') + + inputdatatype = traits.Enum( + 'char', + 'short', + 'int', + 'long', + 'float', + 'double', + argstr='-inputdatatype %s', + desc= + 'Specifies the data type of the input file. The data type can be any of the' + 'following strings: "char", "short", "int", "long", "float" or "double".' + ) + + outputdatatype = traits.Enum( + 'char', + 'short', + 'int', + 'long', + 'float', + 'double', + argstr='-outputdatatype %s', + desc= + 'Specifies the data type of the output data. The data type can be any of the' + 'following strings: "char", "short", "int", "long", "float" or "double".' + ) + + +class ComputeMeanDiffusivityOutputSpec(TraitedSpec): + md = File(exists=True, desc='Mean Diffusivity Map') + + +class ComputeMeanDiffusivity(StdOutCommandLine): + """ + Computes the mean diffusivity (trace/3) from diffusion tensors. + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> md = cmon.ComputeMeanDiffusivity() + >>> md.inputs.in_file = 'tensor_fitted_data.Bdouble' + >>> md.inputs.scheme_file = 'A.scheme' + >>> md.run() # doctest: +SKIP + """ + _cmd = 'md' + input_spec = ComputeMeanDiffusivityInputSpec + output_spec = ComputeMeanDiffusivityOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs["md"] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + "_MD.img" # Need to change to self.inputs.outputdatatype + + +class ComputeFractionalAnisotropyInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='< %s', + mandatory=True, + position=1, + desc='Tensor-fitted data filename') + + scheme_file = File( + exists=True, + argstr='%s', + position=2, + desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') + + inputmodel = traits.Enum( + 'dt', + 'twotensor', + 'threetensor', + 'multitensor', + argstr='-inputmodel %s', + desc= + 'Specifies the model that the input tensor data contains parameters for.' + 'Possible model types are: "dt" (diffusion-tensor data), "twotensor" (two-tensor data), ' + '"threetensor" (three-tensor data). By default, the program assumes that the input data ' + 'contains a single diffusion tensor in each voxel.') + + inputdatatype = traits.Enum( + 'char', + 'short', + 'int', + 'long', + 'float', + 'double', + argstr='-inputdatatype %s', + desc= + 'Specifies the data type of the input file. The data type can be any of the' + 'following strings: "char", "short", "int", "long", "float" or "double".' + ) + + outputdatatype = traits.Enum( + 'char', + 'short', + 'int', + 'long', + 'float', + 'double', + argstr='-outputdatatype %s', + desc= + 'Specifies the data type of the output data. The data type can be any of the' + 'following strings: "char", "short", "int", "long", "float" or "double".' + ) + + +class ComputeFractionalAnisotropyOutputSpec(TraitedSpec): + fa = File(exists=True, desc='Fractional Anisotropy Map') + + +class ComputeFractionalAnisotropy(StdOutCommandLine): + """ + Computes the fractional anisotropy of tensors. + + Reads diffusion tensor (single, two-tensor or three-tensor) data from the standard input, + computes the fractional anisotropy (FA) of each tensor and outputs the results to the + standard output. For multiple-tensor data the program outputs the FA of each tensor, + so for three-tensor data, for example, the output contains three fractional anisotropy + values per voxel. + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> fa = cmon.ComputeFractionalAnisotropy() + >>> fa.inputs.in_file = 'tensor_fitted_data.Bdouble' + >>> fa.inputs.scheme_file = 'A.scheme' + >>> fa.run() # doctest: +SKIP + """ + _cmd = 'fa' + input_spec = ComputeFractionalAnisotropyInputSpec + output_spec = ComputeFractionalAnisotropyOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['fa'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_FA.Bdouble' # Need to change to self.inputs.outputdatatype + + +class ComputeTensorTraceInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='< %s', + mandatory=True, + position=1, + desc='Tensor-fitted data filename') + + scheme_file = File( + exists=True, + argstr='%s', + position=2, + desc='Camino scheme file (b values / vectors, see camino.fsl2scheme)') + + inputmodel = traits.Enum( + 'dt', + 'twotensor', + 'threetensor', + 'multitensor', + argstr='-inputmodel %s', + desc= + 'Specifies the model that the input tensor data contains parameters for.' + 'Possible model types are: "dt" (diffusion-tensor data), "twotensor" (two-tensor data), ' + '"threetensor" (three-tensor data). By default, the program assumes that the input data ' + 'contains a single diffusion tensor in each voxel.') + + inputdatatype = traits.Enum( + 'char', + 'short', + 'int', + 'long', + 'float', + 'double', + argstr='-inputdatatype %s', + desc= + 'Specifies the data type of the input file. The data type can be any of the' + 'following strings: "char", "short", "int", "long", "float" or "double".' + ) + + outputdatatype = traits.Enum( + 'char', + 'short', + 'int', + 'long', + 'float', + 'double', + argstr='-outputdatatype %s', + desc= + 'Specifies the data type of the output data. The data type can be any of the' + 'following strings: "char", "short", "int", "long", "float" or "double".' + ) + + +class ComputeTensorTraceOutputSpec(TraitedSpec): + trace = File(exists=True, desc='Trace of the diffusion tensor') + + +class ComputeTensorTrace(StdOutCommandLine): + """ + Computes the trace of tensors. + + Reads diffusion tensor (single, two-tensor or three-tensor) data from the standard input, + computes the trace of each tensor, i.e., three times the mean diffusivity, and outputs + the results to the standard output. For multiple-tensor data the program outputs the + trace of each tensor, so for three-tensor data, for example, the output contains three + values per voxel. + + Divide the output by three to get the mean diffusivity. + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> trace = cmon.ComputeTensorTrace() + >>> trace.inputs.in_file = 'tensor_fitted_data.Bdouble' + >>> trace.inputs.scheme_file = 'A.scheme' + >>> trace.run() # doctest: +SKIP + """ + _cmd = 'trd' + input_spec = ComputeTensorTraceInputSpec + output_spec = ComputeTensorTraceOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['trace'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_TrD.img' # Need to change to self.inputs.outputdatatype + + +class ComputeEigensystemInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='< %s', + mandatory=True, + position=1, + desc='Tensor-fitted data filename') + + inputmodel = traits.Enum( + 'dt', + 'multitensor', + argstr='-inputmodel %s', + desc= + 'Specifies the model that the input data contains parameters for. Possible model types are: "dt" (diffusion-tensor data) and "multitensor"' + ) + + maxcomponents = traits.Int( + argstr='-maxcomponents %d', + desc= + 'The maximum number of tensor components in a voxel of the input data.' + ) + + inputdatatype = traits.Enum( + 'double', + 'float', + 'long', + 'int', + 'short', + 'char', + argstr='-inputdatatype %s', + usedefault=True, + desc=('Specifies the data type of the input data. ' + 'The data type can be any of the following strings: ' + '"char", "short", "int", "long", "float" or "double".' + 'Default is double data type')) + + outputdatatype = traits.Enum( + 'double', + 'float', + 'long', + 'int', + 'short', + 'char', + argstr='-outputdatatype %s', + usedefault=True, + desc=('Specifies the data type of the output data. ' + 'The data type can be any of the following strings: ' + '"char", "short", "int", "long", "float" or "double".' + 'Default is double data type')) + + +class ComputeEigensystemOutputSpec(TraitedSpec): + eigen = File(exists=True, desc='Trace of the diffusion tensor') + + +class ComputeEigensystem(StdOutCommandLine): + """ + Computes the eigensystem from tensor fitted data. + + Reads diffusion tensor (single, two-tensor, three-tensor or multitensor) data from the + standard input, computes the eigenvalues and eigenvectors of each tensor and outputs the + results to the standard output. For multiple-tensor data the program outputs the + eigensystem of each tensor. For each tensor the program outputs: {l_1, e_11, e_12, e_13, + l_2, e_21, e_22, e_33, l_3, e_31, e_32, e_33}, where l_1 >= l_2 >= l_3 and e_i = (e_i1, + e_i2, e_i3) is the eigenvector with eigenvalue l_i. For three-tensor data, for example, + the output contains thirty-six values per voxel. + + Example + ------- + + >>> import nipype.interfaces.camino as cmon + >>> dteig = cmon.ComputeEigensystem() + >>> dteig.inputs.in_file = 'tensor_fitted_data.Bdouble' + >>> dteig.run() # doctest: +SKIP + """ + _cmd = 'dteig' + input_spec = ComputeEigensystemInputSpec + output_spec = ComputeEigensystemOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs["eigen"] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + datatype = self.inputs.outputdatatype + return name + '_eig.B' + datatype diff --git a/nipype/interfaces/camino/odf.py b/nipype/interfaces/camino/odf.py new file mode 100644 index 0000000000..6fea6fdcfd --- /dev/null +++ b/nipype/interfaces/camino/odf.py @@ -0,0 +1,544 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os + +from ...utils.filemanip import split_filename +from ..base import (traits, TraitedSpec, File, StdOutCommandLine, + StdOutCommandLineInputSpec) + + +class QBallMXInputSpec(StdOutCommandLineInputSpec): + basistype = traits.Enum( + 'rbf', + 'sh', + argstr='-basistype %s', + desc=('Basis function type. "rbf" to use radial basis functions ' + '"sh" to use spherical harmonics'), + usedefault=True) + scheme_file = File( + exists=True, + argstr='-schemefile %s', + mandatory=True, + desc='Specifies the scheme file for the diffusion MRI data') + order = traits.Int( + argstr='-order %d', + units='NA', + desc=( + 'Specific to sh. Maximum order of the spherical harmonic series. ' + 'Default is 4.')) + rbfpointset = traits.Int( + argstr='-rbfpointset %d', + units='NA', + desc=( + 'Specific to rbf. Sets the number of radial basis functions to use. ' + 'The value specified must be present in the Pointsets directory. ' + 'The default value is 246.')) + rbfsigma = traits.Float( + argstr='-rbfsigma %f', + units='NA', + desc= + ('Specific to rbf. Sets the width of the interpolating basis functions. ' + 'The default value is 0.2618 (15 degrees).')) + smoothingsigma = traits.Float( + argstr='-smoothingsigma %f', + units='NA', + desc=( + 'Specific to rbf. Sets the width of the smoothing basis functions. ' + 'The default value is 0.1309 (7.5 degrees).')) + + +class QBallMXOutputSpec(TraitedSpec): + qmat = File(exists=True, desc='Q-Ball reconstruction matrix') + + +class QBallMX(StdOutCommandLine): + """ + Generates a reconstruction matrix for Q-Ball. Used in LinRecon with + the same scheme file to reconstruct data. + + Example 1 + --------- + To create a linear transform matrix using Spherical Harmonics (sh). + + >>> import nipype.interfaces.camino as cam + >>> qballmx = cam.QBallMX() + >>> qballmx.inputs.scheme_file = 'A.scheme' + >>> qballmx.inputs.basistype = 'sh' + >>> qballmx.inputs.order = 6 + >>> qballmx.run() # doctest: +SKIP + + Example 2 + --------- + To create a linear transform matrix using Radial Basis Functions + (rbf). This command uses the default setting of rbf sigma = 0.2618 + (15 degrees), data smoothing sigma = 0.1309 (7.5 degrees), rbf + pointset 246 + + >>> import nipype.interfaces.camino as cam + >>> qballmx = cam.QBallMX() + >>> qballmx.inputs.scheme_file = 'A.scheme' + >>> qballmx.run() # doctest: +SKIP + + The linear transform matrix from any of these two examples can then + be run over each voxel using LinRecon + + >>> qballcoeffs = cam.LinRecon() + >>> qballcoeffs.inputs.in_file = 'SubjectA.Bfloat' + >>> qballcoeffs.inputs.scheme_file = 'A.scheme' + >>> qballcoeffs.inputs.qball_mat = 'A_qmat.Bdouble' + >>> qballcoeffs.inputs.normalize = True + >>> qballcoeffs.inputs.bgmask = 'brain_mask.nii' + >>> qballcoeffs.run() # doctest: +SKIP + """ + _cmd = 'qballmx' + input_spec = QBallMXInputSpec + output_spec = QBallMXOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['qmat'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.scheme_file) + return name + '_qmat.Bdouble' + + +class LinReconInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=1, + desc='voxel-order data filename') + scheme_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=2, + desc='Specifies the scheme file for the diffusion MRI data') + qball_mat = File( + exists=True, + argstr='%s', + mandatory=True, + position=3, + desc='Linear transformation matrix.') + normalize = traits.Bool( + argstr='-normalize', + desc=('Normalize the measurements and discard ' + 'the zero measurements before the linear transform.')) + log = traits.Bool( + argstr='-log', + desc=('Transform the log measurements rather than the ' + 'measurements themselves')) + bgmask = File(exists=True, argstr='-bgmask %s', desc='background mask') + + +class LinReconOutputSpec(TraitedSpec): + recon_data = File(exists=True, desc='Transformed data') + + +class LinRecon(StdOutCommandLine): + """ + Runs a linear transformation in each voxel. + + Reads a linear transformation from the matrix file assuming the + imaging scheme specified in the scheme file. Performs the linear + transformation on the data in every voxel and outputs the result to + the standard output. The ouput in every voxel is actually: :: + + [exit code, ln(S(0)), p1, ..., pR] + + where p1, ..., pR are the parameters of the reconstruction. + Possible exit codes are: + + - 0. No problems. + - 6. Bad data replaced by substitution of zero. + + The matrix must be R by N+M where N+M is the number of measurements + and R is the number of parameters of the reconstruction. The matrix + file contains binary double-precision floats. The matrix elements + are stored row by row. + + Example + --------- + First run QBallMX and create a linear transform matrix using + Spherical Harmonics (sh). + + >>> import nipype.interfaces.camino as cam + >>> qballmx = cam.QBallMX() + >>> qballmx.inputs.scheme_file = 'A.scheme' + >>> qballmx.inputs.basistype = 'sh' + >>> qballmx.inputs.order = 4 + >>> qballmx.run() # doctest: +SKIP + + Then run it over each voxel using LinRecon + + >>> qballcoeffs = cam.LinRecon() + >>> qballcoeffs.inputs.in_file = 'SubjectA.Bfloat' + >>> qballcoeffs.inputs.scheme_file = 'A.scheme' + >>> qballcoeffs.inputs.qball_mat = 'A_qmat.Bdouble' + >>> qballcoeffs.inputs.normalize = True + >>> qballcoeffs.run() # doctest: +SKIP + """ + _cmd = 'linrecon' + input_spec = LinReconInputSpec + output_spec = LinReconOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['recon_data'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.scheme_file) + return name + '_recondata.Bdouble' + + +class MESDInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='-inputfile %s', + mandatory=True, + position=1, + desc='voxel-order data filename') + inverter = traits.Enum( + 'SPIKE', + 'PAS', + argstr='-filter %s', + position=2, + mandatory=True, + desc= + ('The inversion index specifies the type of inversion to perform on the data.' + 'The currently available choices are:' + 'Inverter name | Inverter parameters' + '---------------|------------------' + 'SPIKE | bd (b-value x diffusivity along the fibre.)' + 'PAS | r')) + inverter_param = traits.Float( + argstr='%f', + units='NA', + position=3, + mandatory=True, + desc= + ('Parameter associated with the inverter. Cf. inverter description for' + 'more information.')) + fastmesd = traits.Bool( + argstr='-fastmesd', + requires=['mepointset'], + desc= + ('Turns off numerical integration checks and fixes the integration point set size at that of' + 'the index specified by -basepointset..')) + mepointset = traits.Int( + argstr='-mepointset %d', + units='NA', + desc= + ('Use a set of directions other than those in the scheme file for the deconvolution kernel.' + 'The number refers to the number of directions on the unit sphere. For example, ' + '"-mepointset 54" uses the directions in "camino/PointSets/Elec054.txt".' + )) + scheme_file = File( + exists=True, + argstr='-schemefile %s', + mandatory=True, + desc='Specifies the scheme file for the diffusion MRI data') + bgmask = File(exists=True, argstr='-bgmask %s', desc='background mask') + inputdatatype = traits.Enum( + 'float', + 'char', + 'short', + 'int', + 'long', + 'double', + argstr='-inputdatatype %s', + desc= + ('Specifies the data type of the input file: "char", "short", "int", "long",' + '"float" or "double". The input file must have BIG-ENDIAN ordering.' + 'By default, the input type is "float".')) + + +class MESDOutputSpec(TraitedSpec): + mesd_data = File(exists=True, desc='MESD data') + + +class MESD(StdOutCommandLine): + """ + MESD is a general program for maximum entropy spherical deconvolution. + It also runs PASMRI, which is a special case of spherical deconvolution. + The input data must be in voxel order. + + The format of the output in each voxel is: + { exitcode, ln(A^star(0)), lambda_0, lambda_1, ..., lambda_N } + + The exitcode contains the results of three tests. The first test thresholds + the maximum relative error between the numerical integrals computed at con- + vergence and those computed using a larger test point set; if the error is + greater than a threshold the exitcode is increased from zero to one as a + warning; if it is greater than a larger threshold the exitcode is increased to + two to suggest failure. The second test thresholds the predicted error in + numerical integrals computed using the test point set; if the predicted error + is greater than a threshold the exitcode is increased by 10. The third test + thresholds the RMS error between the measurements and their predictions from + the fitted deconvolution; if the errors are greater than a threshold, the exit + code is increased by 100. An exitcode of 112 means that all three tests were + failed and the result is likely to be unreliable. If all is well the exitcode + is zero. Results are often still reliable even if one or two of the tests are + failed. + + Other possible exitcodes are: + + - 5 - The optimization failed to converge + - -1 - Background + - -100 - Something wrong in the MRI data, e.g. negative or zero measurements, + so that the optimization could not run. + + The standard MESD implementation is computationally demanding, particularly + as the number of measurements increases (computation is approximately O(N^2), + where N is the number of measurements). There are two ways to obtain significant + computational speed-up: + + i) Turn off error checks and use a small point set for computing numerical + integrals in the algorithm by adding the flag -fastmesd. Sakaie CDMRI 2008 + shows that using the smallest point set (-basepointset 0) with no + error checks usually has only a minor effect on the output of the algorithm, + but provides a major reduction in computation time. You can increase the point + set size using -basepointset with an argument higher than 0, which may produce + better results in some voxels, but will increase computation time, which + approximately doubles every time the point set index increases by 1. + + ii) Reduce the complexity of the maximum entropy encoding using -mepointset . + By default = N, the number of measurements, and is the number of parameters + in the max. ent. representation of the output function, ie the number of + lambda parameters, as described in Jansons and Alexander Inverse Problems 2003. + However, we can represent the function using less components and here + specifies the number of lambda parameters. To obtain speed-up, set + < N; complexity become O(^2) rather than O(N^2). Note that must be chosen + so that the camino/PointSets directory contains a point set with that number + of elements. When -mepointset decreases, the numerical integration checks + make less and less of a difference and smaller point sets for numerical + integration (see -basepointset) become adequate. So when is low -fastmesd is + worth using to get even more speed-up. + + The choice of is a parameter of the technique. Too low and you lose angular + resoloution; too high and you see no computational benefit and may even suffer + from overfitting. Empirically, we have found that =16 often gives good + results and good speed up, but it is worth trying a few values a comparing + performance. The reduced encoding is described in the following ISMRM abstract: + Sweet and Alexander "Reduced Encoding Persistent Angular Structure" 572 ISMRM 2010. + + Example + --------- + Run MESD on every voxel of the data file SubjectA.Bfloat using the PASMRI kernel. + + >>> import nipype.interfaces.camino as cam + >>> mesd = cam.MESD() + >>> mesd.inputs.in_file = 'SubjectA.Bfloat' + >>> mesd.inputs.scheme_file = 'A.scheme' + >>> mesd.inputs.inverter = 'PAS' + >>> mesd.inputs.inverter_param = 1.4 + >>> mesd.run() # doctest: +SKIP + """ + _cmd = 'mesd' + input_spec = MESDInputSpec + output_spec = MESDOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['mesd_data'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.scheme_file) + return name + '_MESD.Bdouble' + + +class SFPeaksInputSpec(StdOutCommandLineInputSpec): + in_file = File( + exists=True, + argstr='-inputfile %s', + mandatory=True, + desc='Voxel-order data of spherical functions') + inputmodel = traits.Enum( + 'sh', + 'maxent', + 'rbf', + argstr='-inputmodel %s', + mandatory=True, + desc= + ('Type of functions input via in_file. Currently supported options are: ' + ' sh - Spherical harmonic series. Specify the maximum order of the SH series ' + ' with the "order" attribute if different from the default of 4. ' + ' maxent - Maximum entropy representations output by MESD. The reconstruction ' + ' directions input to MESD must be specified. By default this is the ' + ' same set of gradient directions (excluding zero gradients) in the ' + ' scheme file, so specify the "schemefile" attribute unless the ' + ' "mepointset" attribute was set in MESD. ' + ' rbf - Sums of radial basis functions. Specify the pointset with the attribute ' + ' "rbfpointset" if different from the default. See QBallMX.')) + order = traits.Int( + argstr='-order %d', + units='NA', + desc='Specific to sh. Maximum order of the spherical harmonic series.') + scheme_file = File( + exists=True, + argstr='%s', + desc='Specific to maxent. Specifies the scheme file.') + rbfpointset = traits.Int( + argstr='-rbfpointset %d', + units='NA', + desc=( + 'Specific to rbf. Sets the number of radial basis functions to use. ' + 'The value specified must be present in the Pointsets directory. ' + 'The default value is 246.')) + mepointset = traits.Int( + argstr='-mepointset %d', + units='NA', + desc= + ('Use a set of directions other than those in the scheme file for the deconvolution ' + 'kernel. The number refers to the number of directions on the unit sphere. ' + 'For example, "mepointset = 54" uses the directions in "camino/PointSets/Elec054.txt" ' + 'Use this option only if you told MESD to use a custom set of directions with the same ' + 'option. Otherwise, specify the scheme file with the "schemefile" attribute.' + )) + numpds = traits.Int( + argstr='-numpds %d', + units='NA', + desc='The largest number of peak directions to output in each voxel.') + noconsistencycheck = traits.Bool( + argstr='-noconsistencycheck', + desc= + 'Turns off the consistency check. The output shows all consistencies as true.' + ) + searchradius = traits.Float( + argstr='-searchradius %f', + units='NA', + desc= + 'The search radius in the peak finding algorithm. The default is 0.4 (cf. "density")' + ) + density = traits.Int( + argstr='-density %d', + units='NA', + desc= + ('The number of randomly rotated icosahedra to use in constructing the set of points for ' + 'random sampling in the peak finding algorithm. Default is 1000, which works well for very ' + 'spiky maxent functions. For other types of function, it is reasonable to set the density ' + 'much lower and increase the search radius slightly, which speeds up the computation.' + )) + pointset = traits.Int( + argstr='-pointset %d', + units='NA', + desc= + ('To sample using an evenly distributed set of points instead. The integer can be ' + '0, 1, ..., 7. Index 0 gives 1082 points, 1 gives 1922, 2 gives 3002, 3 gives 4322, ' + '4 gives 5882, 5 gives 8672, 6 gives 12002, 7 gives 15872.')) + pdthresh = traits.Float( + argstr='-pdthresh %f', + units='NA', + desc= + ('Base threshold on the actual peak direction strength divided by the mean of the ' + 'function. The default is 1.0 (the peak must be equal or greater than the mean).' + )) + stdsfrommean = traits.Float( + argstr='-stdsfrommean %f', + units='NA', + desc= + ('This is the number of standard deviations of the function to be added to the ' + '"pdthresh" attribute in the peak directions pruning.')) + + +class SFPeaksOutputSpec(TraitedSpec): + peaks = File(exists=True, desc='Peaks of the spherical functions.') + + +class SFPeaks(StdOutCommandLine): + """ + Finds the peaks of spherical functions. + + This utility reads coefficients of the spherical functions and + outputs a list of peak directions of the function. It computes the + value of the function at each of a set of sample points. Then it + finds local maxima by finding all points at which the function is + larger than for any other point within a fixed search radius (the + default is 0.4). The utility then uses Powell's algorithm to + optimize the position of each local maximum. Finally the utility + removes duplicates and tiny peaks with function value smaller than + some threshold, which is the mean of the function plus some number + of standard deviations. By default the program checks for con- + sistency with a second set of starting points, but skips the + optimization step. To speed up execution, you can turn off the con- + sistency check by setting the noconsistencycheck flag to True. + + By default, the utility constructs a set of sample points by + randomly rotating a unit icosahedron repeatedly (the default is 1000 + times, which produces a set of 6000 points) and concatenating the + lists of vertices. The 'pointset = ' attribute can tell the + utility to use an evenly distributed set of points (index 0 gives + 1082 points, 1 gives 1922, 2 gives 4322, 3 gives 8672, 4 gives 15872, + 5 gives 32762, 6 gives 72032), which is quicker, because you can get + away with fewer points. We estimate that you can use a factor of 2.5 + less evenly distributed points than randomly distributed points and + still expect similar performance levels. + + The output for each voxel is: + + - exitcode (inherited from the input data). + - ln(A(0)) + - number of peaks found. + - flag for consistency with a repeated run (number of directions is + the same and the directions are the same to within a threshold.) + - mean(f). + - std(f). + - direction 1 (x, y, z, f, H00, H01, H10, H11). + - direction 2 (x, y, z, f, H00, H01, H10, H11). + - direction 3 (x, y, z, f, H00, H01, H10, H11). + + H is the Hessian of f at the peak. It is the matrix: :: + + [d^2f/ds^2 d^2f/dsdt] + [d^2f/dtds d^2f/dt^2] + = [H00 H01] + [H10 H11] + + where s and t are orthogonal coordinates local to the peak. + + By default the maximum number of peak directions output in each + voxel is three. If less than three directions are found, zeros are + output for later directions. The peaks are ordered by the value of + the function at the peak. If more than the maximum number of + directions are found only the strongest ones are output. The maximum + number can be changed setting the 'numpds' attribute. + + The utility can read various kinds of spherical function, but must + be told what kind of function is input using the 'inputmodel' + attribute. The description of the 'inputmodel' attribute lists + additional information required by SFPeaks for each input model. + + + Example + --------- + First run QBallMX and create a linear transform matrix using + Spherical Harmonics (sh). + + >>> import nipype.interfaces.camino as cam + >>> sf_peaks = cam.SFPeaks() + >>> sf_peaks.inputs.in_file = 'A_recon_params.Bdouble' + >>> sf_peaks.inputs.inputmodel = 'sh' + >>> sf_peaks.inputs.order = 4 + >>> sf_peaks.inputs.density = 100 + >>> sf_peaks.inputs.searchradius = 1.0 + >>> sf_peaks.run() # doctest: +SKIP + """ + _cmd = 'sfpeaks' + input_spec = SFPeaksInputSpec + output_spec = SFPeaksOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['peaks'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_peaks.Bdouble' diff --git a/nipype/interfaces/camino/tests/__init__.py b/nipype/interfaces/camino/tests/__init__.py new file mode 100644 index 0000000000..99fb243f19 --- /dev/null +++ b/nipype/interfaces/camino/tests/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py new file mode 100644 index 0000000000..5d796126cc --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_AnalyzeHeader.py @@ -0,0 +1,106 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import AnalyzeHeader + + +def test_AnalyzeHeader_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + centre=dict( + argstr='-centre %s', + units='mm', + ), + data_dims=dict( + argstr='-datadims %s', + units='voxels', + ), + datatype=dict( + argstr='-datatype %s', + mandatory=True, + ), + description=dict(argstr='-description %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + greylevels=dict( + argstr='-gl %s', + units='NA', + ), + in_file=dict( + argstr='< %s', + mandatory=True, + position=1, + ), + initfromheader=dict( + argstr='-initfromheader %s', + position=3, + ), + intelbyteorder=dict(argstr='-intelbyteorder', ), + networkbyteorder=dict(argstr='-networkbyteorder', ), + nimages=dict( + argstr='-nimages %d', + units='NA', + ), + offset=dict( + argstr='-offset %d', + units='NA', + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + picoseed=dict( + argstr='-picoseed %s', + units='mm', + ), + printbigendian=dict( + argstr='-printbigendian %s', + position=3, + ), + printimagedims=dict( + argstr='-printimagedims %s', + position=3, + ), + printintelbyteorder=dict( + argstr='-printintelbyteorder %s', + position=3, + ), + printprogargs=dict( + argstr='-printprogargs %s', + position=3, + ), + readheader=dict( + argstr='-readheader %s', + position=3, + ), + scaleinter=dict( + argstr='-scaleinter %d', + units='NA', + ), + scaleslope=dict( + argstr='-scaleslope %d', + units='NA', + ), + scheme_file=dict( + argstr='%s', + position=2, + ), + voxel_dims=dict( + argstr='-voxeldims %s', + units='mm', + ), + ) + inputs = AnalyzeHeader.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AnalyzeHeader_outputs(): + output_map = dict(header=dict(), ) + outputs = AnalyzeHeader.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py new file mode 100644 index 0000000000..caee9e68cf --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_ComputeEigensystem.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import ComputeEigensystem + + +def test_ComputeEigensystem_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='< %s', + mandatory=True, + position=1, + ), + inputdatatype=dict( + argstr='-inputdatatype %s', + usedefault=True, + ), + inputmodel=dict(argstr='-inputmodel %s', ), + maxcomponents=dict(argstr='-maxcomponents %d', ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + outputdatatype=dict( + argstr='-outputdatatype %s', + usedefault=True, + ), + ) + inputs = ComputeEigensystem.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ComputeEigensystem_outputs(): + output_map = dict(eigen=dict(), ) + outputs = ComputeEigensystem.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py new file mode 100644 index 0000000000..49cfdfd96a --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_ComputeFractionalAnisotropy.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import ComputeFractionalAnisotropy + + +def test_ComputeFractionalAnisotropy_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='< %s', + mandatory=True, + position=1, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + inputmodel=dict(argstr='-inputmodel %s', ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + outputdatatype=dict(argstr='-outputdatatype %s', ), + scheme_file=dict( + argstr='%s', + position=2, + ), + ) + inputs = ComputeFractionalAnisotropy.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ComputeFractionalAnisotropy_outputs(): + output_map = dict(fa=dict(), ) + outputs = ComputeFractionalAnisotropy.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py new file mode 100644 index 0000000000..212477d149 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_ComputeMeanDiffusivity.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import ComputeMeanDiffusivity + + +def test_ComputeMeanDiffusivity_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='< %s', + mandatory=True, + position=1, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + inputmodel=dict(argstr='-inputmodel %s', ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + outputdatatype=dict(argstr='-outputdatatype %s', ), + scheme_file=dict( + argstr='%s', + position=2, + ), + ) + inputs = ComputeMeanDiffusivity.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ComputeMeanDiffusivity_outputs(): + output_map = dict(md=dict(), ) + outputs = ComputeMeanDiffusivity.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py new file mode 100644 index 0000000000..1112c7c743 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_ComputeTensorTrace.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import ComputeTensorTrace + + +def test_ComputeTensorTrace_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='< %s', + mandatory=True, + position=1, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + inputmodel=dict(argstr='-inputmodel %s', ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + outputdatatype=dict(argstr='-outputdatatype %s', ), + scheme_file=dict( + argstr='%s', + position=2, + ), + ) + inputs = ComputeTensorTrace.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ComputeTensorTrace_outputs(): + output_map = dict(trace=dict(), ) + outputs = ComputeTensorTrace.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_Conmat.py b/nipype/interfaces/camino/tests/test_auto_Conmat.py new file mode 100644 index 0000000000..06a76fdf2b --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_Conmat.py @@ -0,0 +1,56 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..connectivity import Conmat + + +def test_Conmat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-inputfile %s', + mandatory=True, + ), + output_root=dict( + argstr='-outputroot %s', + genfile=True, + ), + scalar_file=dict( + argstr='-scalarfile %s', + requires=['tract_stat'], + ), + target_file=dict( + argstr='-targetfile %s', + mandatory=True, + ), + targetname_file=dict(argstr='-targetnamefile %s', ), + tract_prop=dict( + argstr='-tractstat %s', + units='NA', + xor=['tract_stat'], + ), + tract_stat=dict( + argstr='-tractstat %s', + requires=['scalar_file'], + units='NA', + xor=['tract_prop'], + ), + ) + inputs = Conmat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Conmat_outputs(): + output_map = dict( + conmat_sc=dict(), + conmat_ts=dict(), + ) + outputs = Conmat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py new file mode 100644 index 0000000000..b20dcbf3bf --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_DT2NIfTI.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import DT2NIfTI + + +def test_DT2NIfTI_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + header_file=dict( + argstr='-header %s', + mandatory=True, + position=3, + ), + in_file=dict( + argstr='-inputfile %s', + mandatory=True, + position=1, + ), + output_root=dict( + argstr='-outputroot %s', + genfile=True, + position=2, + ), + ) + inputs = DT2NIfTI.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DT2NIfTI_outputs(): + output_map = dict( + dt=dict(), + exitcode=dict(), + lns0=dict(), + ) + outputs = DT2NIfTI.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_DTIFit.py b/nipype/interfaces/camino/tests/test_auto_DTIFit.py new file mode 100644 index 0000000000..a23da89552 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_DTIFit.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import DTIFit + + +def test_DTIFit_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bgmask=dict(argstr='-bgmask %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + non_linear=dict( + argstr='-nonlinear', + position=3, + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + scheme_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + ) + inputs = DTIFit.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DTIFit_outputs(): + output_map = dict(tensor_fitted=dict(), ) + outputs = DTIFit.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py new file mode 100644 index 0000000000..95e8e0bea5 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_DTLUTGen.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import DTLUTGen + + +def test_DTLUTGen_inputs(): + input_map = dict( + acg=dict(argstr='-acg', ), + args=dict(argstr='%s', ), + bingham=dict(argstr='-bingham', ), + environ=dict( + nohash=True, + usedefault=True, + ), + frange=dict( + argstr='-frange %s', + position=1, + units='NA', + ), + inversion=dict( + argstr='-inversion %d', + units='NA', + ), + lrange=dict( + argstr='-lrange %s', + position=1, + units='NA', + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + samples=dict( + argstr='-samples %d', + units='NA', + ), + scheme_file=dict( + argstr='-schemefile %s', + mandatory=True, + position=2, + ), + snr=dict( + argstr='-snr %f', + units='NA', + ), + step=dict( + argstr='-step %f', + units='NA', + ), + trace=dict( + argstr='-trace %G', + units='NA', + ), + watson=dict(argstr='-watson', ), + ) + inputs = DTLUTGen.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DTLUTGen_outputs(): + output_map = dict(dtLUT=dict(), ) + outputs = DTLUTGen.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_DTMetric.py b/nipype/interfaces/camino/tests/test_auto_DTMetric.py new file mode 100644 index 0000000000..12256a50d8 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_DTMetric.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import DTMetric + + +def test_DTMetric_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + data_header=dict(argstr='-header %s', ), + eigen_data=dict( + argstr='-inputfile %s', + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputdatatype=dict( + argstr='-inputdatatype %s', + usedefault=True, + ), + metric=dict( + argstr='-stat %s', + mandatory=True, + ), + outputdatatype=dict( + argstr='-outputdatatype %s', + usedefault=True, + ), + outputfile=dict( + argstr='-outputfile %s', + genfile=True, + ), + ) + inputs = DTMetric.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DTMetric_outputs(): + output_map = dict(metric_stats=dict(), ) + outputs = DTMetric.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py new file mode 100644 index 0000000000..a2ebfbf0a6 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_FSL2Scheme.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import FSL2Scheme + + +def test_FSL2Scheme_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bscale=dict( + argstr='-bscale %d', + units='NA', + ), + bval_file=dict( + argstr='-bvalfile %s', + mandatory=True, + position=2, + ), + bvec_file=dict( + argstr='-bvecfile %s', + mandatory=True, + position=1, + ), + diffusiontime=dict( + argstr='-diffusiontime %f', + units='NA', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flipx=dict(argstr='-flipx', ), + flipy=dict(argstr='-flipy', ), + flipz=dict(argstr='-flipz', ), + interleave=dict(argstr='-interleave', ), + numscans=dict( + argstr='-numscans %d', + units='NA', + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + usegradmod=dict(argstr='-usegradmod', ), + ) + inputs = FSL2Scheme.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FSL2Scheme_outputs(): + output_map = dict(scheme=dict(), ) + outputs = FSL2Scheme.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py new file mode 100644 index 0000000000..adae10f0bb --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_Image2Voxel.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import Image2Voxel + + +def test_Image2Voxel_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-4dimage %s', + mandatory=True, + position=1, + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + out_type=dict( + argstr='-outputdatatype %s', + position=2, + usedefault=True, + ), + ) + inputs = Image2Voxel.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Image2Voxel_outputs(): + output_map = dict(voxel_order=dict(), ) + outputs = Image2Voxel.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_ImageStats.py b/nipype/interfaces/camino/tests/test_auto_ImageStats.py new file mode 100644 index 0000000000..3813051025 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_ImageStats.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ImageStats + + +def test_ImageStats_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr='-images %s', + mandatory=True, + position=-1, + ), + out_type=dict( + argstr='-outputdatatype %s', + usedefault=True, + ), + output_root=dict( + argstr='-outputroot %s', + mandatory=True, + ), + stat=dict( + argstr='-stat %s', + mandatory=True, + units='NA', + ), + ) + inputs = ImageStats.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ImageStats_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ImageStats.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_LinRecon.py b/nipype/interfaces/camino/tests/test_auto_LinRecon.py new file mode 100644 index 0000000000..8998f42602 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_LinRecon.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..odf import LinRecon + + +def test_LinRecon_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bgmask=dict(argstr='-bgmask %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + log=dict(argstr='-log', ), + normalize=dict(argstr='-normalize', ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + qball_mat=dict( + argstr='%s', + mandatory=True, + position=3, + ), + scheme_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + ) + inputs = LinRecon.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_LinRecon_outputs(): + output_map = dict(recon_data=dict(), ) + outputs = LinRecon.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_MESD.py b/nipype/interfaces/camino/tests/test_auto_MESD.py new file mode 100644 index 0000000000..73089a3395 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_MESD.py @@ -0,0 +1,60 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..odf import MESD + + +def test_MESD_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bgmask=dict(argstr='-bgmask %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fastmesd=dict( + argstr='-fastmesd', + requires=['mepointset'], + ), + in_file=dict( + argstr='-inputfile %s', + mandatory=True, + position=1, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + inverter=dict( + argstr='-filter %s', + mandatory=True, + position=2, + ), + inverter_param=dict( + argstr='%f', + mandatory=True, + position=3, + units='NA', + ), + mepointset=dict( + argstr='-mepointset %d', + units='NA', + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + scheme_file=dict( + argstr='-schemefile %s', + mandatory=True, + ), + ) + inputs = MESD.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MESD_outputs(): + output_map = dict(mesd_data=dict(), ) + outputs = MESD.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_ModelFit.py b/nipype/interfaces/camino/tests/test_auto_ModelFit.py new file mode 100644 index 0000000000..04d22bbe59 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_ModelFit.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import ModelFit + + +def test_ModelFit_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bgmask=dict(argstr='-bgmask %s', ), + bgthresh=dict(argstr='-bgthresh %G', ), + cfthresh=dict(argstr='-csfthresh %G', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedbvalue=dict(argstr='-fixedbvalue %s', ), + fixedmodq=dict(argstr='-fixedmod %s', ), + in_file=dict( + argstr='-inputfile %s', + mandatory=True, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + model=dict( + argstr='-model %s', + mandatory=True, + ), + noisemap=dict(argstr='-noisemap %s', ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + outlier=dict(argstr='-outliermap %s', ), + outputfile=dict(argstr='-outputfile %s', ), + residualmap=dict(argstr='-residualmap %s', ), + scheme_file=dict( + argstr='-schemefile %s', + mandatory=True, + ), + sigma=dict(argstr='-sigma %G', ), + tau=dict(argstr='-tau %G', ), + ) + inputs = ModelFit.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ModelFit_outputs(): + output_map = dict(fitted_data=dict(), ) + outputs = ModelFit.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py new file mode 100644 index 0000000000..9a4867a73c --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_NIfTIDT2Camino.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import NIfTIDT2Camino + + +def test_NIfTIDT2Camino_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bgmask=dict(argstr='-bgmask %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-inputfile %s', + mandatory=True, + position=1, + ), + lns0_file=dict(argstr='-lns0 %s', ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + s0_file=dict(argstr='-s0 %s', ), + scaleinter=dict(argstr='-scaleinter %s', ), + scaleslope=dict(argstr='-scaleslope %s', ), + uppertriangular=dict(argstr='-uppertriangular %s', ), + ) + inputs = NIfTIDT2Camino.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_NIfTIDT2Camino_outputs(): + output_map = dict(out_file=dict(), ) + outputs = NIfTIDT2Camino.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py new file mode 100644 index 0000000000..f2b59666a2 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_PicoPDFs.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import PicoPDFs + + +def test_PicoPDFs_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + directmap=dict(argstr='-directmap', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='< %s', + mandatory=True, + position=1, + ), + inputmodel=dict( + argstr='-inputmodel %s', + position=2, + usedefault=True, + ), + luts=dict( + argstr='-luts %s', + mandatory=True, + ), + maxcomponents=dict( + argstr='-maxcomponents %d', + units='NA', + ), + numpds=dict( + argstr='-numpds %d', + units='NA', + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + pdf=dict( + argstr='-pdf %s', + position=4, + usedefault=True, + ), + ) + inputs = PicoPDFs.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PicoPDFs_outputs(): + output_map = dict(pdfs=dict(), ) + outputs = PicoPDFs.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py new file mode 100644 index 0000000000..8838bb72cf --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_ProcStreamlines.py @@ -0,0 +1,119 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import ProcStreamlines + + +def test_ProcStreamlines_inputs(): + input_map = dict( + allowmultitargets=dict(argstr='-allowmultitargets', ), + args=dict(argstr='%s', ), + datadims=dict( + argstr='-datadims %s', + units='voxels', + ), + directional=dict( + argstr='-directional %s', + units='NA', + ), + discardloops=dict(argstr='-discardloops', ), + endpointfile=dict(argstr='-endpointfile %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + exclusionfile=dict(argstr='-exclusionfile %s', ), + gzip=dict(argstr='-gzip', ), + in_file=dict( + argstr='-inputfile %s', + mandatory=True, + position=1, + ), + inputmodel=dict( + argstr='-inputmodel %s', + usedefault=True, + ), + iterations=dict( + argstr='-iterations %d', + units='NA', + ), + maxtractlength=dict( + argstr='-maxtractlength %d', + units='mm', + ), + maxtractpoints=dict( + argstr='-maxtractpoints %d', + units='NA', + ), + mintractlength=dict( + argstr='-mintractlength %d', + units='mm', + ), + mintractpoints=dict( + argstr='-mintractpoints %d', + units='NA', + ), + noresample=dict(argstr='-noresample', ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + outputacm=dict( + argstr='-outputacm', + requires=['outputroot', 'seedfile'], + ), + outputcbs=dict( + argstr='-outputcbs', + requires=['outputroot', 'targetfile', 'seedfile'], + ), + outputcp=dict( + argstr='-outputcp', + requires=['outputroot', 'seedfile'], + ), + outputroot=dict(argstr='-outputroot %s', ), + outputsc=dict( + argstr='-outputsc', + requires=['outputroot', 'seedfile'], + ), + outputtracts=dict(argstr='-outputtracts', ), + regionindex=dict( + argstr='-regionindex %d', + units='mm', + ), + resamplestepsize=dict( + argstr='-resamplestepsize %d', + units='NA', + ), + seedfile=dict(argstr='-seedfile %s', ), + seedpointmm=dict( + argstr='-seedpointmm %s', + units='mm', + ), + seedpointvox=dict( + argstr='-seedpointvox %s', + units='voxels', + ), + targetfile=dict(argstr='-targetfile %s', ), + truncateinexclusion=dict(argstr='-truncateinexclusion', ), + truncateloops=dict(argstr='-truncateloops', ), + voxeldims=dict( + argstr='-voxeldims %s', + units='mm', + ), + waypointfile=dict(argstr='-waypointfile %s', ), + ) + inputs = ProcStreamlines.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ProcStreamlines_outputs(): + output_map = dict( + outputroot_files=dict(), + proc=dict(), + ) + outputs = ProcStreamlines.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_QBallMX.py b/nipype/interfaces/camino/tests/test_auto_QBallMX.py new file mode 100644 index 0000000000..e965cb606a --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_QBallMX.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..odf import QBallMX + + +def test_QBallMX_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + basistype=dict( + argstr='-basistype %s', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + order=dict( + argstr='-order %d', + units='NA', + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + rbfpointset=dict( + argstr='-rbfpointset %d', + units='NA', + ), + rbfsigma=dict( + argstr='-rbfsigma %f', + units='NA', + ), + scheme_file=dict( + argstr='-schemefile %s', + mandatory=True, + ), + smoothingsigma=dict( + argstr='-smoothingsigma %f', + units='NA', + ), + ) + inputs = QBallMX.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_QBallMX_outputs(): + output_map = dict(qmat=dict(), ) + outputs = QBallMX.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py new file mode 100644 index 0000000000..c52ca04929 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_SFLUTGen.py @@ -0,0 +1,62 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..calib import SFLUTGen + + +def test_SFLUTGen_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + binincsize=dict( + argstr='-binincsize %d', + units='NA', + ), + directmap=dict(argstr='-directmap', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-inputfile %s', + mandatory=True, + ), + info_file=dict( + argstr='-infofile %s', + mandatory=True, + ), + minvectsperbin=dict( + argstr='-minvectsperbin %d', + units='NA', + ), + order=dict( + argstr='-order %d', + units='NA', + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + outputstem=dict( + argstr='-outputstem %s', + usedefault=True, + ), + pdf=dict( + argstr='-pdf %s', + usedefault=True, + ), + ) + inputs = SFLUTGen.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SFLUTGen_outputs(): + output_map = dict( + lut_one_fibre=dict(), + lut_two_fibres=dict(), + ) + outputs = SFLUTGen.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py new file mode 100644 index 0000000000..e83a01e2c3 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_SFPICOCalibData.py @@ -0,0 +1,87 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..calib import SFPICOCalibData + + +def test_SFPICOCalibData_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + info_file=dict( + argstr='-infooutputfile %s', + genfile=True, + hash_files=False, + mandatory=True, + ), + onedtfarange=dict( + argstr='-onedtfarange %s', + units='NA', + ), + onedtfastep=dict( + argstr='-onedtfastep %f', + units='NA', + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + scheme_file=dict( + argstr='-schemefile %s', + mandatory=True, + ), + seed=dict( + argstr='-seed %f', + units='NA', + ), + snr=dict( + argstr='-snr %f', + units='NA', + ), + trace=dict( + argstr='-trace %f', + units='NA', + ), + twodtanglerange=dict( + argstr='-twodtanglerange %s', + units='NA', + ), + twodtanglestep=dict( + argstr='-twodtanglestep %f', + units='NA', + ), + twodtfarange=dict( + argstr='-twodtfarange %s', + units='NA', + ), + twodtfastep=dict( + argstr='-twodtfastep %f', + units='NA', + ), + twodtmixmax=dict( + argstr='-twodtmixmax %f', + units='NA', + ), + twodtmixstep=dict( + argstr='-twodtmixstep %f', + units='NA', + ), + ) + inputs = SFPICOCalibData.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SFPICOCalibData_outputs(): + output_map = dict( + PICOCalib=dict(), + calib_info=dict(), + ) + outputs = SFPICOCalibData.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_SFPeaks.py b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py new file mode 100644 index 0000000000..959545d042 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_SFPeaks.py @@ -0,0 +1,76 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..odf import SFPeaks + + +def test_SFPeaks_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + density=dict( + argstr='-density %d', + units='NA', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-inputfile %s', + mandatory=True, + ), + inputmodel=dict( + argstr='-inputmodel %s', + mandatory=True, + ), + mepointset=dict( + argstr='-mepointset %d', + units='NA', + ), + noconsistencycheck=dict(argstr='-noconsistencycheck', ), + numpds=dict( + argstr='-numpds %d', + units='NA', + ), + order=dict( + argstr='-order %d', + units='NA', + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + pdthresh=dict( + argstr='-pdthresh %f', + units='NA', + ), + pointset=dict( + argstr='-pointset %d', + units='NA', + ), + rbfpointset=dict( + argstr='-rbfpointset %d', + units='NA', + ), + scheme_file=dict(argstr='%s', ), + searchradius=dict( + argstr='-searchradius %f', + units='NA', + ), + stdsfrommean=dict( + argstr='-stdsfrommean %f', + units='NA', + ), + ) + inputs = SFPeaks.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SFPeaks_outputs(): + output_map = dict(peaks=dict(), ) + outputs = SFPeaks.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_Shredder.py b/nipype/interfaces/camino/tests/test_auto_Shredder.py new file mode 100644 index 0000000000..1099693874 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_Shredder.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import Shredder + + +def test_Shredder_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + chunksize=dict( + argstr='%d', + position=2, + units='NA', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='< %s', + mandatory=True, + position=-2, + ), + offset=dict( + argstr='%d', + position=1, + units='NA', + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + space=dict( + argstr='%d', + position=3, + units='NA', + ), + ) + inputs = Shredder.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Shredder_outputs(): + output_map = dict(shredded=dict(), ) + outputs = Shredder.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_Track.py b/nipype/interfaces/camino/tests/test_auto_Track.py new file mode 100644 index 0000000000..ad8d3ff2bb --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_Track.py @@ -0,0 +1,82 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import Track + + +def test_Track_inputs(): + input_map = dict( + anisfile=dict(argstr='-anisfile %s', ), + anisthresh=dict(argstr='-anisthresh %f', ), + args=dict(argstr='%s', ), + curveinterval=dict( + argstr='-curveinterval %f', + requires=['curvethresh'], + ), + curvethresh=dict(argstr='-curvethresh %f', ), + data_dims=dict( + argstr='-datadims %s', + units='voxels', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict(argstr='-gzip', ), + in_file=dict( + argstr='-inputfile %s', + position=1, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + inputmodel=dict( + argstr='-inputmodel %s', + usedefault=True, + ), + interpolator=dict(argstr='-interpolator %s', ), + ipthresh=dict(argstr='-ipthresh %f', ), + maxcomponents=dict( + argstr='-maxcomponents %d', + units='NA', + ), + numpds=dict( + argstr='-numpds %d', + units='NA', + ), + out_file=dict( + argstr='-outputfile %s', + genfile=True, + position=-1, + ), + output_root=dict( + argstr='-outputroot %s', + position=-1, + ), + outputtracts=dict(argstr='-outputtracts %s', ), + seed_file=dict( + argstr='-seedfile %s', + position=2, + ), + stepsize=dict( + argstr='-stepsize %f', + requires=['tracker'], + ), + tracker=dict( + argstr='-tracker %s', + usedefault=True, + ), + voxel_dims=dict( + argstr='-voxeldims %s', + units='mm', + ), + ) + inputs = Track.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Track_outputs(): + output_map = dict(tracked=dict(), ) + outputs = Track.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py new file mode 100644 index 0000000000..89515cef55 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_TrackBallStick.py @@ -0,0 +1,82 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import TrackBallStick + + +def test_TrackBallStick_inputs(): + input_map = dict( + anisfile=dict(argstr='-anisfile %s', ), + anisthresh=dict(argstr='-anisthresh %f', ), + args=dict(argstr='%s', ), + curveinterval=dict( + argstr='-curveinterval %f', + requires=['curvethresh'], + ), + curvethresh=dict(argstr='-curvethresh %f', ), + data_dims=dict( + argstr='-datadims %s', + units='voxels', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict(argstr='-gzip', ), + in_file=dict( + argstr='-inputfile %s', + position=1, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + inputmodel=dict( + argstr='-inputmodel %s', + usedefault=True, + ), + interpolator=dict(argstr='-interpolator %s', ), + ipthresh=dict(argstr='-ipthresh %f', ), + maxcomponents=dict( + argstr='-maxcomponents %d', + units='NA', + ), + numpds=dict( + argstr='-numpds %d', + units='NA', + ), + out_file=dict( + argstr='-outputfile %s', + genfile=True, + position=-1, + ), + output_root=dict( + argstr='-outputroot %s', + position=-1, + ), + outputtracts=dict(argstr='-outputtracts %s', ), + seed_file=dict( + argstr='-seedfile %s', + position=2, + ), + stepsize=dict( + argstr='-stepsize %f', + requires=['tracker'], + ), + tracker=dict( + argstr='-tracker %s', + usedefault=True, + ), + voxel_dims=dict( + argstr='-voxeldims %s', + units='mm', + ), + ) + inputs = TrackBallStick.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TrackBallStick_outputs(): + output_map = dict(tracked=dict(), ) + outputs = TrackBallStick.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py new file mode 100644 index 0000000000..2815ef18ee --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_TrackBayesDirac.py @@ -0,0 +1,97 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import TrackBayesDirac + + +def test_TrackBayesDirac_inputs(): + input_map = dict( + anisfile=dict(argstr='-anisfile %s', ), + anisthresh=dict(argstr='-anisthresh %f', ), + args=dict(argstr='%s', ), + curveinterval=dict( + argstr='-curveinterval %f', + requires=['curvethresh'], + ), + curvepriorg=dict(argstr='-curvepriorg %G', ), + curvepriork=dict(argstr='-curvepriork %G', ), + curvethresh=dict(argstr='-curvethresh %f', ), + data_dims=dict( + argstr='-datadims %s', + units='voxels', + ), + datamodel=dict(argstr='-datamodel %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + extpriordatatype=dict(argstr='-extpriordatatype %s', ), + extpriorfile=dict(argstr='-extpriorfile %s', ), + gzip=dict(argstr='-gzip', ), + in_file=dict( + argstr='-inputfile %s', + position=1, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + inputmodel=dict( + argstr='-inputmodel %s', + usedefault=True, + ), + interpolator=dict(argstr='-interpolator %s', ), + ipthresh=dict(argstr='-ipthresh %f', ), + iterations=dict( + argstr='-iterations %d', + units='NA', + ), + maxcomponents=dict( + argstr='-maxcomponents %d', + units='NA', + ), + numpds=dict( + argstr='-numpds %d', + units='NA', + ), + out_file=dict( + argstr='-outputfile %s', + genfile=True, + position=-1, + ), + output_root=dict( + argstr='-outputroot %s', + position=-1, + ), + outputtracts=dict(argstr='-outputtracts %s', ), + pdf=dict(argstr='-pdf %s', ), + pointset=dict(argstr='-pointset %s', ), + scheme_file=dict( + argstr='-schemefile %s', + mandatory=True, + ), + seed_file=dict( + argstr='-seedfile %s', + position=2, + ), + stepsize=dict( + argstr='-stepsize %f', + requires=['tracker'], + ), + tracker=dict( + argstr='-tracker %s', + usedefault=True, + ), + voxel_dims=dict( + argstr='-voxeldims %s', + units='mm', + ), + ) + inputs = TrackBayesDirac.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TrackBayesDirac_outputs(): + output_map = dict(tracked=dict(), ) + outputs = TrackBayesDirac.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py new file mode 100644 index 0000000000..827dbb2f27 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxDeter.py @@ -0,0 +1,90 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import TrackBedpostxDeter + + +def test_TrackBedpostxDeter_inputs(): + input_map = dict( + anisfile=dict(argstr='-anisfile %s', ), + anisthresh=dict(argstr='-anisthresh %f', ), + args=dict(argstr='%s', ), + bedpostxdir=dict( + argstr='-bedpostxdir %s', + mandatory=True, + ), + curveinterval=dict( + argstr='-curveinterval %f', + requires=['curvethresh'], + ), + curvethresh=dict(argstr='-curvethresh %f', ), + data_dims=dict( + argstr='-datadims %s', + units='voxels', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict(argstr='-gzip', ), + in_file=dict( + argstr='-inputfile %s', + position=1, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + inputmodel=dict( + argstr='-inputmodel %s', + usedefault=True, + ), + interpolator=dict(argstr='-interpolator %s', ), + ipthresh=dict(argstr='-ipthresh %f', ), + maxcomponents=dict( + argstr='-maxcomponents %d', + units='NA', + ), + min_vol_frac=dict( + argstr='-bedpostxminf %d', + units='NA', + ), + numpds=dict( + argstr='-numpds %d', + units='NA', + ), + out_file=dict( + argstr='-outputfile %s', + genfile=True, + position=-1, + ), + output_root=dict( + argstr='-outputroot %s', + position=-1, + ), + outputtracts=dict(argstr='-outputtracts %s', ), + seed_file=dict( + argstr='-seedfile %s', + position=2, + ), + stepsize=dict( + argstr='-stepsize %f', + requires=['tracker'], + ), + tracker=dict( + argstr='-tracker %s', + usedefault=True, + ), + voxel_dims=dict( + argstr='-voxeldims %s', + units='mm', + ), + ) + inputs = TrackBedpostxDeter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TrackBedpostxDeter_outputs(): + output_map = dict(tracked=dict(), ) + outputs = TrackBedpostxDeter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py new file mode 100644 index 0000000000..e250ec9c40 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_TrackBedpostxProba.py @@ -0,0 +1,94 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import TrackBedpostxProba + + +def test_TrackBedpostxProba_inputs(): + input_map = dict( + anisfile=dict(argstr='-anisfile %s', ), + anisthresh=dict(argstr='-anisthresh %f', ), + args=dict(argstr='%s', ), + bedpostxdir=dict( + argstr='-bedpostxdir %s', + mandatory=True, + ), + curveinterval=dict( + argstr='-curveinterval %f', + requires=['curvethresh'], + ), + curvethresh=dict(argstr='-curvethresh %f', ), + data_dims=dict( + argstr='-datadims %s', + units='voxels', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict(argstr='-gzip', ), + in_file=dict( + argstr='-inputfile %s', + position=1, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + inputmodel=dict( + argstr='-inputmodel %s', + usedefault=True, + ), + interpolator=dict(argstr='-interpolator %s', ), + ipthresh=dict(argstr='-ipthresh %f', ), + iterations=dict( + argstr='-iterations %d', + units='NA', + ), + maxcomponents=dict( + argstr='-maxcomponents %d', + units='NA', + ), + min_vol_frac=dict( + argstr='-bedpostxminf %d', + units='NA', + ), + numpds=dict( + argstr='-numpds %d', + units='NA', + ), + out_file=dict( + argstr='-outputfile %s', + genfile=True, + position=-1, + ), + output_root=dict( + argstr='-outputroot %s', + position=-1, + ), + outputtracts=dict(argstr='-outputtracts %s', ), + seed_file=dict( + argstr='-seedfile %s', + position=2, + ), + stepsize=dict( + argstr='-stepsize %f', + requires=['tracker'], + ), + tracker=dict( + argstr='-tracker %s', + usedefault=True, + ), + voxel_dims=dict( + argstr='-voxeldims %s', + units='mm', + ), + ) + inputs = TrackBedpostxProba.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TrackBedpostxProba_outputs(): + output_map = dict(tracked=dict(), ) + outputs = TrackBedpostxProba.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py new file mode 100644 index 0000000000..90ffd05a2d --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_TrackBootstrap.py @@ -0,0 +1,96 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import TrackBootstrap + + +def test_TrackBootstrap_inputs(): + input_map = dict( + anisfile=dict(argstr='-anisfile %s', ), + anisthresh=dict(argstr='-anisthresh %f', ), + args=dict(argstr='%s', ), + bgmask=dict(argstr='-bgmask %s', ), + bsdatafiles=dict( + argstr='-bsdatafile %s', + mandatory=True, + ), + curveinterval=dict( + argstr='-curveinterval %f', + requires=['curvethresh'], + ), + curvethresh=dict(argstr='-curvethresh %f', ), + data_dims=dict( + argstr='-datadims %s', + units='voxels', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict(argstr='-gzip', ), + in_file=dict( + argstr='-inputfile %s', + position=1, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + inputmodel=dict( + argstr='-inputmodel %s', + usedefault=True, + ), + interpolator=dict(argstr='-interpolator %s', ), + inversion=dict(argstr='-inversion %s', ), + ipthresh=dict(argstr='-ipthresh %f', ), + iterations=dict( + argstr='-iterations %d', + units='NA', + ), + maxcomponents=dict( + argstr='-maxcomponents %d', + units='NA', + ), + numpds=dict( + argstr='-numpds %d', + units='NA', + ), + out_file=dict( + argstr='-outputfile %s', + genfile=True, + position=-1, + ), + output_root=dict( + argstr='-outputroot %s', + position=-1, + ), + outputtracts=dict(argstr='-outputtracts %s', ), + scheme_file=dict( + argstr='-schemefile %s', + mandatory=True, + ), + seed_file=dict( + argstr='-seedfile %s', + position=2, + ), + stepsize=dict( + argstr='-stepsize %f', + requires=['tracker'], + ), + tracker=dict( + argstr='-tracker %s', + usedefault=True, + ), + voxel_dims=dict( + argstr='-voxeldims %s', + units='mm', + ), + ) + inputs = TrackBootstrap.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TrackBootstrap_outputs(): + output_map = dict(tracked=dict(), ) + outputs = TrackBootstrap.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_TrackDT.py b/nipype/interfaces/camino/tests/test_auto_TrackDT.py new file mode 100644 index 0000000000..7ab3772468 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_TrackDT.py @@ -0,0 +1,82 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import TrackDT + + +def test_TrackDT_inputs(): + input_map = dict( + anisfile=dict(argstr='-anisfile %s', ), + anisthresh=dict(argstr='-anisthresh %f', ), + args=dict(argstr='%s', ), + curveinterval=dict( + argstr='-curveinterval %f', + requires=['curvethresh'], + ), + curvethresh=dict(argstr='-curvethresh %f', ), + data_dims=dict( + argstr='-datadims %s', + units='voxels', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict(argstr='-gzip', ), + in_file=dict( + argstr='-inputfile %s', + position=1, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + inputmodel=dict( + argstr='-inputmodel %s', + usedefault=True, + ), + interpolator=dict(argstr='-interpolator %s', ), + ipthresh=dict(argstr='-ipthresh %f', ), + maxcomponents=dict( + argstr='-maxcomponents %d', + units='NA', + ), + numpds=dict( + argstr='-numpds %d', + units='NA', + ), + out_file=dict( + argstr='-outputfile %s', + genfile=True, + position=-1, + ), + output_root=dict( + argstr='-outputroot %s', + position=-1, + ), + outputtracts=dict(argstr='-outputtracts %s', ), + seed_file=dict( + argstr='-seedfile %s', + position=2, + ), + stepsize=dict( + argstr='-stepsize %f', + requires=['tracker'], + ), + tracker=dict( + argstr='-tracker %s', + usedefault=True, + ), + voxel_dims=dict( + argstr='-voxeldims %s', + units='mm', + ), + ) + inputs = TrackDT.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TrackDT_outputs(): + output_map = dict(tracked=dict(), ) + outputs = TrackDT.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_TrackPICo.py b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py new file mode 100644 index 0000000000..30ff12db9f --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_TrackPICo.py @@ -0,0 +1,87 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import TrackPICo + + +def test_TrackPICo_inputs(): + input_map = dict( + anisfile=dict(argstr='-anisfile %s', ), + anisthresh=dict(argstr='-anisthresh %f', ), + args=dict(argstr='%s', ), + curveinterval=dict( + argstr='-curveinterval %f', + requires=['curvethresh'], + ), + curvethresh=dict(argstr='-curvethresh %f', ), + data_dims=dict( + argstr='-datadims %s', + units='voxels', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gzip=dict(argstr='-gzip', ), + in_file=dict( + argstr='-inputfile %s', + position=1, + ), + inputdatatype=dict(argstr='-inputdatatype %s', ), + inputmodel=dict( + argstr='-inputmodel %s', + usedefault=True, + ), + interpolator=dict(argstr='-interpolator %s', ), + ipthresh=dict(argstr='-ipthresh %f', ), + iterations=dict( + argstr='-iterations %d', + units='NA', + ), + maxcomponents=dict( + argstr='-maxcomponents %d', + units='NA', + ), + numpds=dict( + argstr='-numpds %d', + units='NA', + ), + out_file=dict( + argstr='-outputfile %s', + genfile=True, + position=-1, + ), + output_root=dict( + argstr='-outputroot %s', + position=-1, + ), + outputtracts=dict(argstr='-outputtracts %s', ), + pdf=dict(argstr='-pdf %s', ), + seed_file=dict( + argstr='-seedfile %s', + position=2, + ), + stepsize=dict( + argstr='-stepsize %f', + requires=['tracker'], + ), + tracker=dict( + argstr='-tracker %s', + usedefault=True, + ), + voxel_dims=dict( + argstr='-voxeldims %s', + units='mm', + ), + ) + inputs = TrackPICo.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TrackPICo_outputs(): + output_map = dict(tracked=dict(), ) + outputs = TrackPICo.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_TractShredder.py b/nipype/interfaces/camino/tests/test_auto_TractShredder.py new file mode 100644 index 0000000000..c9cf40e1c3 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_TractShredder.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import TractShredder + + +def test_TractShredder_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bunchsize=dict( + argstr='%d', + position=2, + units='NA', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='< %s', + mandatory=True, + position=-2, + ), + offset=dict( + argstr='%d', + position=1, + units='NA', + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + space=dict( + argstr='%d', + position=3, + units='NA', + ), + ) + inputs = TractShredder.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TractShredder_outputs(): + output_map = dict(shredded=dict(), ) + outputs = TractShredder.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py new file mode 100644 index 0000000000..8dc8bd03e9 --- /dev/null +++ b/nipype/interfaces/camino/tests/test_auto_VtkStreamlines.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import VtkStreamlines + + +def test_VtkStreamlines_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + colourorient=dict(argstr='-colourorient', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr=' < %s', + mandatory=True, + position=-2, + ), + inputmodel=dict( + argstr='-inputmodel %s', + usedefault=True, + ), + interpolate=dict(argstr='-interpolate', ), + interpolatescalars=dict(argstr='-interpolatescalars', ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + scalar_file=dict( + argstr='-scalarfile %s', + position=3, + ), + seed_file=dict( + argstr='-seedfile %s', + position=1, + ), + target_file=dict( + argstr='-targetfile %s', + position=2, + ), + voxeldims=dict( + argstr='-voxeldims %s', + position=4, + units='mm', + ), + ) + inputs = VtkStreamlines.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_VtkStreamlines_outputs(): + output_map = dict(vtk=dict(), ) + outputs = VtkStreamlines.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino/utils.py b/nipype/interfaces/camino/utils.py new file mode 100644 index 0000000000..57fcd58d9a --- /dev/null +++ b/nipype/interfaces/camino/utils.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os + +from ..base import (traits, TraitedSpec, File, CommandLine, + CommandLineInputSpec, InputMultiPath) +from ...utils.filemanip import split_filename + + +class ImageStatsInputSpec(CommandLineInputSpec): + in_files = InputMultiPath( + File(exists=True), + argstr='-images %s', + mandatory=True, + position=-1, + desc=('List of images to process. They must ' + 'be in the same space and have the same ' + 'dimensions.')) + stat = traits.Enum( + "min", + "max", + "mean", + "median", + "sum", + "std", + "var", + argstr='-stat %s', + units='NA', + mandatory=True, + desc="The statistic to compute.") + + out_type = traits.Enum( + "float", + "char", + "short", + "int", + "long", + "double", + argstr='-outputdatatype %s', + usedefault=True, + desc=('A Camino data type string, default is "float". ' + 'Type must be signed.')) + output_root = File( + argstr='-outputroot %s', + mandatory=True, + desc=('Filename root prepended onto the names of the output ' + ' files. The extension will be determined from the input.')) + + +class ImageStatsOutputSpec(TraitedSpec): + out_file = File( + exists=True, + desc='Path of the file computed with the statistic chosen') + + +class ImageStats(CommandLine): + """ + This program computes voxelwise statistics on a series of 3D images. The images + must be in the same space; the operation is performed voxelwise and one output + is produced per voxel. + + Examples + -------- + + >>> import nipype.interfaces.camino as cam + >>> imstats = cam.ImageStats() + >>> imstats.inputs.in_files = ['im1.nii','im2.nii','im3.nii'] + >>> imstats.inputs.stat = 'max' + >>> imstats.run() # doctest: +SKIP + """ + _cmd = 'imagestats' + input_spec = ImageStatsInputSpec + output_spec = ImageStatsOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + output_root = self.inputs.output_root + first_file = self.inputs.in_files[0] + _, _, ext = split_filename(first_file) + return output_root + ext diff --git a/nipype/interfaces/camino2trackvis/__init__.py b/nipype/interfaces/camino2trackvis/__init__.py new file mode 100644 index 0000000000..94d3e458a7 --- /dev/null +++ b/nipype/interfaces/camino2trackvis/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Camino2Trackvis top level namespace +""" + +from .convert import Camino2Trackvis, Trackvis2Camino diff --git a/nipype/interfaces/camino2trackvis/convert.py b/nipype/interfaces/camino2trackvis/convert.py new file mode 100644 index 0000000000..573ddffe2d --- /dev/null +++ b/nipype/interfaces/camino2trackvis/convert.py @@ -0,0 +1,181 @@ +# -*- coding: utf-8 -*- +""" +Provides interfaces to various commands provided by Camino-Trackvis +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os + +from ...utils.filemanip import split_filename +from ..base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File + + +class Camino2TrackvisInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='-i %s', + mandatory=True, + position=1, + desc='The input .Bfloat (camino) file.') + + out_file = File( + argstr='-o %s', + genfile=True, + position=2, + desc='The filename to which to write the .trk (trackvis) file.') + + min_length = traits.Float( + argstr='-l %d', + position=3, + units='mm', + desc='The minimum length of tracts to output') + + data_dims = traits.List( + traits.Int, + argstr='-d %s', + sep=',', + mandatory=True, + position=4, + minlen=3, + maxlen=3, + desc= + 'Three comma-separated integers giving the number of voxels along each dimension of the source scans.' + ) + + voxel_dims = traits.List( + traits.Float, + argstr='-x %s', + sep=',', + mandatory=True, + position=5, + minlen=3, + maxlen=3, + desc= + 'Three comma-separated numbers giving the size of each voxel in mm.') + + # Change to enum with all combinations? i.e. LAS, LPI, RAS, etc.. + voxel_order = File( + argstr='--voxel-order %s', + mandatory=True, + position=6, + desc='Set the order in which various directions were stored.\ + Specify with three letters consisting of one each \ + from the pairs LR, AP, and SI. These stand for Left-Right, \ + Anterior-Posterior, and Superior-Inferior. \ + Whichever is specified in each position will \ + be the direction of increasing order. \ + Read coordinate system from a NIfTI file.') + + nifti_file = File( + argstr='--nifti %s', + exists=True, + position=7, + desc='Read coordinate system from a NIfTI file.') + + +class Camino2TrackvisOutputSpec(TraitedSpec): + trackvis = File( + exists=True, + desc='The filename to which to write the .trk (trackvis) file.') + + +class Camino2Trackvis(CommandLine): + """ Wraps camino_to_trackvis from Camino-Trackvis + + Convert files from camino .Bfloat format to trackvis .trk format. + + Example + ------- + + >>> import nipype.interfaces.camino2trackvis as cam2trk + >>> c2t = cam2trk.Camino2Trackvis() + >>> c2t.inputs.in_file = 'data.Bfloat' + >>> c2t.inputs.out_file = 'streamlines.trk' + >>> c2t.inputs.min_length = 30 + >>> c2t.inputs.data_dims = [128, 104, 64] + >>> c2t.inputs.voxel_dims = [2.0, 2.0, 2.0] + >>> c2t.inputs.voxel_order = 'LAS' + >>> c2t.run() # doctest: +SKIP + """ + + _cmd = 'camino_to_trackvis' + input_spec = Camino2TrackvisInputSpec + output_spec = Camino2TrackvisOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['trackvis'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '.trk' + + +class Trackvis2CaminoInputSpec(CommandLineInputSpec): + """ Wraps trackvis_to_camino from Camino-Trackvis + + Convert files from camino .Bfloat format to trackvis .trk format. + + Example + ------- + + >>> import nipype.interfaces.camino2trackvis as cam2trk + >>> t2c = cam2trk.Trackvis2Camino() + >>> t2c.inputs.in_file = 'streamlines.trk' + >>> t2c.inputs.out_file = 'streamlines.Bfloat' + >>> t2c.run() # doctest: +SKIP + """ + + in_file = File( + exists=True, + argstr='-i %s', + mandatory=True, + position=1, + desc='The input .trk (trackvis) file.') + + out_file = File( + argstr='-o %s', + genfile=True, + position=2, + desc='The filename to which to write the .Bfloat (camino).') + + append_file = File( + exists=True, + argstr='-a %s', + position=2, + desc='A file to which the append the .Bfloat data. ') + + +class Trackvis2CaminoOutputSpec(TraitedSpec): + camino = File( + exists=True, + desc='The filename to which to write the .Bfloat (camino).') + + +class Trackvis2Camino(CommandLine): + _cmd = 'trackvis_to_camino' + input_spec = Trackvis2CaminoInputSpec + output_spec = Trackvis2CaminoOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['camino'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '.Bfloat' diff --git a/nipype/interfaces/camino2trackvis/tests/__init__.py b/nipype/interfaces/camino2trackvis/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/camino2trackvis/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py new file mode 100644 index 0000000000..b4d5092dab --- /dev/null +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Camino2Trackvis.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import Camino2Trackvis + + +def test_Camino2Trackvis_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + data_dims=dict( + argstr='-d %s', + mandatory=True, + position=4, + sep=',', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + position=1, + ), + min_length=dict( + argstr='-l %d', + position=3, + units='mm', + ), + nifti_file=dict( + argstr='--nifti %s', + position=7, + ), + out_file=dict( + argstr='-o %s', + genfile=True, + position=2, + ), + voxel_dims=dict( + argstr='-x %s', + mandatory=True, + position=5, + sep=',', + ), + voxel_order=dict( + argstr='--voxel-order %s', + mandatory=True, + position=6, + ), + ) + inputs = Camino2Trackvis.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Camino2Trackvis_outputs(): + output_map = dict(trackvis=dict(), ) + outputs = Camino2Trackvis.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py new file mode 100644 index 0000000000..978b2439a2 --- /dev/null +++ b/nipype/interfaces/camino2trackvis/tests/test_auto_Trackvis2Camino.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import Trackvis2Camino + + +def test_Trackvis2Camino_inputs(): + input_map = dict( + append_file=dict( + argstr='-a %s', + position=2, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + position=1, + ), + out_file=dict( + argstr='-o %s', + genfile=True, + position=2, + ), + ) + inputs = Trackvis2Camino.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Trackvis2Camino_outputs(): + output_map = dict(camino=dict(), ) + outputs = Trackvis2Camino.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cmtk/__init__.py b/nipype/interfaces/cmtk/__init__.py new file mode 100644 index 0000000000..60c7d636d5 --- /dev/null +++ b/nipype/interfaces/cmtk/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +from .cmtk import ROIGen, CreateMatrix, CreateNodes +from .nx import NetworkXMetrics, AverageNetworks +from .parcellation import Parcellate +from .convert import CFFConverter, MergeCNetworks +from .nbs import NetworkBasedStatistic diff --git a/nipype/interfaces/cmtk/base.py b/nipype/interfaces/cmtk/base.py new file mode 100644 index 0000000000..4aedd56bdb --- /dev/null +++ b/nipype/interfaces/cmtk/base.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" Base interface for cmtk """ + +from ..base import LibraryBaseInterface +from ...utils.misc import package_check + + +class CFFBaseInterface(LibraryBaseInterface): + _pkg = 'cfflib' + + +# Originally set in convert, nbs, nx, parcellation +# Set here to be imported, in case anybody depends on its presence +# Remove in 2.0 +have_cmp = True +try: + package_check('cmp') +except ImportError: + have_cmp = False + +have_cfflib = True +try: + package_check('cfflib') +except ImportError: + have_cfflib = False + +have_cv = True +try: + package_check('cviewer') +except ImportError: + have_cv = False diff --git a/nipype/interfaces/cmtk/cmtk.py b/nipype/interfaces/cmtk/cmtk.py new file mode 100644 index 0000000000..c26220c6b0 --- /dev/null +++ b/nipype/interfaces/cmtk/cmtk.py @@ -0,0 +1,1008 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, open + +import pickle +import os.path as op + +import numpy as np +import nibabel as nb +import networkx as nx +import scipy.io as sio + +from ... import logging +from ...utils.filemanip import split_filename +from ...utils import NUMPY_MMAP + +from ..base import (BaseInterface, BaseInterfaceInputSpec, traits, File, + TraitedSpec, Directory, OutputMultiPath, isdefined) +iflogger = logging.getLogger('nipype.interface') + + +def length(xyz, along=False): + """ + Euclidean length of track line + + Parameters + ---------- + xyz : array-like shape (N,3) + array representing x,y,z of N points in a track + along : bool, optional + If True, return array giving cumulative length along track, + otherwise (default) return scalar giving total length. + + Returns + ------- + L : scalar or array shape (N-1,) + scalar in case of `along` == False, giving total length, array if + `along` == True, giving cumulative lengths. + + Examples + -------- + >>> xyz = np.array([[1,1,1],[2,3,4],[0,0,0]]) + >>> expected_lens = np.sqrt([1+2**2+3**2, 2**2+3**2+4**2]) + >>> length(xyz) == expected_lens.sum() + True + >>> len_along = length(xyz, along=True) + >>> np.allclose(len_along, expected_lens.cumsum()) + True + >>> length([]) + 0 + >>> length([[1, 2, 3]]) + 0 + >>> length([], along=True) + array([0]) + """ + xyz = np.asarray(xyz) + if xyz.shape[0] < 2: + if along: + return np.array([0]) + return 0 + dists = np.sqrt((np.diff(xyz, axis=0)**2).sum(axis=1)) + if along: + return np.cumsum(dists) + return np.sum(dists) + + +def get_rois_crossed(pointsmm, roiData, voxelSize): + n_points = len(pointsmm) + rois_crossed = [] + for j in range(0, n_points): + # store point + x = int(pointsmm[j, 0] / float(voxelSize[0])) + y = int(pointsmm[j, 1] / float(voxelSize[1])) + z = int(pointsmm[j, 2] / float(voxelSize[2])) + if not roiData[x, y, z] == 0: + rois_crossed.append(roiData[x, y, z]) + rois_crossed = list( + dict.fromkeys(rois_crossed).keys()) # Removed duplicates from the list + return rois_crossed + + +def get_connectivity_matrix(n_rois, list_of_roi_crossed_lists): + connectivity_matrix = np.zeros((n_rois, n_rois), dtype=np.uint) + for rois_crossed in list_of_roi_crossed_lists: + for idx_i, roi_i in enumerate(rois_crossed): + for idx_j, roi_j in enumerate(rois_crossed): + if idx_i > idx_j: + if not roi_i == roi_j: + connectivity_matrix[roi_i - 1, roi_j - 1] += 1 + connectivity_matrix = connectivity_matrix + connectivity_matrix.T + return connectivity_matrix + + +def create_allpoints_cmat(streamlines, roiData, voxelSize, n_rois): + """ Create the intersection arrays for each fiber + """ + n_fib = len(streamlines) + pc = -1 + # Computation for each fiber + final_fiber_ids = [] + list_of_roi_crossed_lists = [] + for i, fiber in enumerate(streamlines): + pcN = int(round(float(100 * i) / n_fib)) + if pcN > pc and pcN % 1 == 0: + pc = pcN + print('%4.0f%%' % (pc)) + rois_crossed = get_rois_crossed(fiber[0], roiData, voxelSize) + if len(rois_crossed) > 0: + list_of_roi_crossed_lists.append(list(rois_crossed)) + final_fiber_ids.append(i) + + connectivity_matrix = get_connectivity_matrix(n_rois, + list_of_roi_crossed_lists) + dis = n_fib - len(final_fiber_ids) + iflogger.info( + 'Found %i (%f percent out of %i fibers) fibers that start or ' + 'terminate in a voxel which is not labeled. (orphans)', dis, + dis * 100.0 / n_fib, n_fib) + iflogger.info('Valid fibers: %i (%f percent)', n_fib - dis, + 100 - dis * 100.0 / n_fib) + iflogger.info('Returning the intersecting point connectivity matrix') + return connectivity_matrix, final_fiber_ids + + +def create_endpoints_array(fib, voxelSize): + """ Create the endpoints arrays for each fiber + Parameters + ---------- + fib: the fibers data + voxelSize: 3-tuple containing the voxel size of the ROI image + Returns + ------- + (endpoints: matrix of size [#fibers, 2, 3] containing for each fiber the + index of its first and last point in the voxelSize volume + endpointsmm) : endpoints in milimeter coordinates + """ + + # Init + n = len(fib) + endpoints = np.zeros((n, 2, 3)) + endpointsmm = np.zeros((n, 2, 3)) + + # Computation for each fiber + for i, fi in enumerate(fib): + f = fi[0] + + # store startpoint + endpoints[i, 0, :] = f[0, :] + # store endpoint + endpoints[i, 1, :] = f[-1, :] + + # store startpoint + endpointsmm[i, 0, :] = f[0, :] + # store endpoint + endpointsmm[i, 1, :] = f[-1, :] + + # Translate from mm to index + endpoints[i, 0, 0] = int(endpoints[i, 0, 0] / float(voxelSize[0])) + endpoints[i, 0, 1] = int(endpoints[i, 0, 1] / float(voxelSize[1])) + endpoints[i, 0, 2] = int(endpoints[i, 0, 2] / float(voxelSize[2])) + endpoints[i, 1, 0] = int(endpoints[i, 1, 0] / float(voxelSize[0])) + endpoints[i, 1, 1] = int(endpoints[i, 1, 1] / float(voxelSize[1])) + endpoints[i, 1, 2] = int(endpoints[i, 1, 2] / float(voxelSize[2])) + + # Return the matrices + iflogger.info('Returning the endpoint matrix') + return (endpoints, endpointsmm) + + +def cmat(track_file, + roi_file, + resolution_network_file, + matrix_name, + matrix_mat_name, + endpoint_name, + intersections=False): + """ Create the connection matrix for each resolution using fibers and ROIs. """ + + stats = {} + iflogger.info('Running cmat function') + # Identify the endpoints of each fiber + en_fname = op.abspath(endpoint_name + '_endpoints.npy') + en_fnamemm = op.abspath(endpoint_name + '_endpointsmm.npy') + + iflogger.info('Reading Trackvis file %s', track_file) + fib, hdr = nb.trackvis.read(track_file, False) + stats['orig_n_fib'] = len(fib) + + roi = nb.load(roi_file, mmap=NUMPY_MMAP) + roiData = roi.get_data() + roiVoxelSize = roi.header.get_zooms() + (endpoints, endpointsmm) = create_endpoints_array(fib, roiVoxelSize) + + # Output endpoint arrays + iflogger.info('Saving endpoint array: %s', en_fname) + np.save(en_fname, endpoints) + iflogger.info('Saving endpoint array in mm: %s', en_fnamemm) + np.save(en_fnamemm, endpointsmm) + + n = len(fib) + iflogger.info('Number of fibers: %i', n) + + # Create empty fiber label array + fiberlabels = np.zeros((n, 2)) + final_fiberlabels = [] + final_fibers_idx = [] + + # Add node information from specified parcellation scheme + path, name, ext = split_filename(resolution_network_file) + if ext == '.pck': + gp = nx.read_gpickle(resolution_network_file) + elif ext == '.graphml': + gp = nx.read_graphml(resolution_network_file) + else: + raise TypeError("Unable to read file:", resolution_network_file) + nROIs = len(gp.nodes()) + + # add node information from parcellation + if 'dn_position' in gp.nodes[list(gp.nodes())[0]]: + G = gp.copy() + else: + G = nx.Graph() + for u, d in gp.nodes(data=True): + G.add_node(int(u), **d) + # compute a position for the node based on the mean position of the + # ROI in voxel coordinates (segmentation volume ) + xyz = tuple( + np.mean( + np.where( + np.flipud(roiData) == int(d["dn_correspondence_id"])), + axis=1)) + G.nodes[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]]) + + if intersections: + iflogger.info("Filtering tractography from intersections") + intersection_matrix, final_fiber_ids = create_allpoints_cmat( + fib, roiData, roiVoxelSize, nROIs) + finalfibers_fname = op.abspath( + endpoint_name + '_intersections_streamline_final.trk') + stats['intersections_n_fib'] = save_fibers(hdr, fib, finalfibers_fname, + final_fiber_ids) + intersection_matrix = np.matrix(intersection_matrix) + I = G.copy() + H = nx.from_numpy_matrix(np.matrix(intersection_matrix)) + H = nx.relabel_nodes( + H, lambda x: x + 1) # relabel nodes so they start at 1 + I.add_weighted_edges_from( + ((u, v, d['weight']) for u, v, d in H.edges(data=True))) + + dis = 0 + for i in range(endpoints.shape[0]): + + # ROI start => ROI end + try: + startROI = int(roiData[endpoints[i, 0, 0], endpoints[i, 0, 1], + endpoints[i, 0, 2]]) + endROI = int(roiData[endpoints[i, 1, 0], endpoints[i, 1, 1], + endpoints[i, 1, 2]]) + except IndexError: + iflogger.error('AN INDEXERROR EXCEPTION OCCURED FOR FIBER %s. ' + 'PLEASE CHECK ENDPOINT GENERATION', i) + break + + # Filter + if startROI == 0 or endROI == 0: + dis += 1 + fiberlabels[i, 0] = -1 + continue + + if startROI > nROIs or endROI > nROIs: + iflogger.error( + "Start or endpoint of fiber terminate in a voxel which is labeled higher" + ) + iflogger.error( + "than is expected by the parcellation node information.") + iflogger.error("Start ROI: %i, End ROI: %i", startROI, endROI) + iflogger.error("This needs bugfixing!") + continue + + # Update fiber label + # switch the rois in order to enforce startROI < endROI + if endROI < startROI: + tmp = startROI + startROI = endROI + endROI = tmp + + fiberlabels[i, 0] = startROI + fiberlabels[i, 1] = endROI + + final_fiberlabels.append([startROI, endROI]) + final_fibers_idx.append(i) + + # Add edge to graph + if G.has_edge(startROI, + endROI) and 'fiblist' in G.edge[startROI][endROI]: + G.edge[startROI][endROI]['fiblist'].append(i) + else: + G.add_edge(startROI, endROI, fiblist=[i]) + + # create a final fiber length array + finalfiberlength = [] + if intersections: + final_fibers_indices = final_fiber_ids + else: + final_fibers_indices = final_fibers_idx + + for idx in final_fibers_indices: + # compute length of fiber + finalfiberlength.append(length(fib[idx][0])) + + # convert to array + final_fiberlength_array = np.array(finalfiberlength) + + # make final fiber labels as array + final_fiberlabels_array = np.array(final_fiberlabels, dtype=int) + + iflogger.info( + 'Found %i (%f percent out of %i fibers) fibers that start or ' + 'terminate in a voxel which is not labeled. (orphans)', dis, + dis * 100.0 / n, n) + iflogger.info('Valid fibers: %i (%f%%)', n - dis, 100 - dis * 100.0 / n) + + numfib = nx.Graph() + numfib.add_nodes_from(G) + fibmean = numfib.copy() + fibmedian = numfib.copy() + fibdev = numfib.copy() + for u, v, d in G.edges(data=True): + G.remove_edge(u, v) + di = {} + if 'fiblist' in d: + di['number_of_fibers'] = len(d['fiblist']) + idx = np.where((final_fiberlabels_array[:, 0] == int(u)) & + (final_fiberlabels_array[:, 1] == int(v)))[0] + di['fiber_length_mean'] = float( + np.mean(final_fiberlength_array[idx])) + di['fiber_length_median'] = float( + np.median(final_fiberlength_array[idx])) + di['fiber_length_std'] = float( + np.std(final_fiberlength_array[idx])) + else: + di['number_of_fibers'] = 0 + di['fiber_length_mean'] = 0 + di['fiber_length_median'] = 0 + di['fiber_length_std'] = 0 + if not u == v: # Fix for self loop problem + G.add_edge(u, v, **di) + if 'fiblist' in d: + numfib.add_edge(u, v, weight=di['number_of_fibers']) + fibmean.add_edge(u, v, weight=di['fiber_length_mean']) + fibmedian.add_edge(u, v, weight=di['fiber_length_median']) + fibdev.add_edge(u, v, weight=di['fiber_length_std']) + + iflogger.info('Writing network as %s', matrix_name) + nx.write_gpickle(G, op.abspath(matrix_name)) + + numfib_mlab = nx.to_numpy_matrix(numfib, dtype=int) + numfib_dict = {'number_of_fibers': numfib_mlab} + fibmean_mlab = nx.to_numpy_matrix(fibmean, dtype=np.float64) + fibmean_dict = {'mean_fiber_length': fibmean_mlab} + fibmedian_mlab = nx.to_numpy_matrix(fibmedian, dtype=np.float64) + fibmedian_dict = {'median_fiber_length': fibmedian_mlab} + fibdev_mlab = nx.to_numpy_matrix(fibdev, dtype=np.float64) + fibdev_dict = {'fiber_length_std': fibdev_mlab} + + if intersections: + path, name, ext = split_filename(matrix_name) + intersection_matrix_name = op.abspath(name + '_intersections') + ext + iflogger.info('Writing intersection network as %s', + intersection_matrix_name) + nx.write_gpickle(I, intersection_matrix_name) + + path, name, ext = split_filename(matrix_mat_name) + if not ext == '.mat': + ext = '.mat' + matrix_mat_name = matrix_mat_name + ext + + iflogger.info('Writing matlab matrix as %s', matrix_mat_name) + sio.savemat(matrix_mat_name, numfib_dict) + + if intersections: + intersect_dict = {'intersections': intersection_matrix} + intersection_matrix_mat_name = op.abspath(name + '_intersections') + ext + iflogger.info('Writing intersection matrix as %s', + intersection_matrix_mat_name) + sio.savemat(intersection_matrix_mat_name, intersect_dict) + + mean_fiber_length_matrix_name = op.abspath( + name + '_mean_fiber_length') + ext + iflogger.info('Writing matlab mean fiber length matrix as %s', + mean_fiber_length_matrix_name) + sio.savemat(mean_fiber_length_matrix_name, fibmean_dict) + + median_fiber_length_matrix_name = op.abspath( + name + '_median_fiber_length') + ext + iflogger.info('Writing matlab median fiber length matrix as %s', + median_fiber_length_matrix_name) + sio.savemat(median_fiber_length_matrix_name, fibmedian_dict) + + fiber_length_std_matrix_name = op.abspath(name + '_fiber_length_std') + ext + iflogger.info('Writing matlab fiber length deviation matrix as %s', + fiber_length_std_matrix_name) + sio.savemat(fiber_length_std_matrix_name, fibdev_dict) + + fiberlengths_fname = op.abspath(endpoint_name + '_final_fiberslength.npy') + iflogger.info('Storing final fiber length array as %s', fiberlengths_fname) + np.save(fiberlengths_fname, final_fiberlength_array) + + fiberlabels_fname = op.abspath(endpoint_name + '_filtered_fiberslabel.npy') + iflogger.info('Storing all fiber labels (with orphans) as %s', + fiberlabels_fname) + np.save( + fiberlabels_fname, + np.array(fiberlabels, dtype=np.int32), + ) + + fiberlabels_noorphans_fname = op.abspath( + endpoint_name + '_final_fiberslabels.npy') + iflogger.info('Storing final fiber labels (no orphans) as %s', + fiberlabels_noorphans_fname) + np.save(fiberlabels_noorphans_fname, final_fiberlabels_array) + + iflogger.info("Filtering tractography - keeping only no orphan fibers") + finalfibers_fname = op.abspath(endpoint_name + '_streamline_final.trk') + stats['endpoint_n_fib'] = save_fibers(hdr, fib, finalfibers_fname, + final_fibers_idx) + stats['endpoints_percent'] = float(stats['endpoint_n_fib']) / float( + stats['orig_n_fib']) * 100 + stats['intersections_percent'] = float( + stats['intersections_n_fib']) / float(stats['orig_n_fib']) * 100 + + out_stats_file = op.abspath(endpoint_name + '_statistics.mat') + iflogger.info('Saving matrix creation statistics as %s', out_stats_file) + sio.savemat(out_stats_file, stats) + + +def save_fibers(oldhdr, oldfib, fname, indices): + """ Stores a new trackvis file fname using only given indices """ + hdrnew = oldhdr.copy() + outstreams = [] + for i in indices: + outstreams.append(oldfib[i]) + n_fib_out = len(outstreams) + hdrnew['n_count'] = n_fib_out + iflogger.info('Writing final non-orphan fibers as %s', fname) + nb.trackvis.write(fname, outstreams, hdrnew) + return n_fib_out + + +class CreateMatrixInputSpec(TraitedSpec): + roi_file = File( + exists=True, mandatory=True, desc='Freesurfer aparc+aseg file') + tract_file = File(exists=True, mandatory=True, desc='Trackvis tract file') + resolution_network_file = File( + exists=True, + mandatory=True, + desc='Parcellation files from Connectome Mapping Toolkit') + count_region_intersections = traits.Bool( + False, + usedefault=True, + desc= + 'Counts all of the fiber-region traversals in the connectivity matrix (requires significantly more computational time)' + ) + out_matrix_file = File( + genfile=True, desc='NetworkX graph describing the connectivity') + out_matrix_mat_file = File( + 'cmatrix.mat', + usedefault=True, + desc='Matlab matrix describing the connectivity') + out_mean_fiber_length_matrix_mat_file = File( + genfile=True, + desc= + 'Matlab matrix describing the mean fiber lengths between each node.') + out_median_fiber_length_matrix_mat_file = File( + genfile=True, + desc= + 'Matlab matrix describing the mean fiber lengths between each node.') + out_fiber_length_std_matrix_mat_file = File( + genfile=True, + desc= + 'Matlab matrix describing the deviation in fiber lengths connecting each node.' + ) + out_intersection_matrix_mat_file = File( + genfile=True, + desc= + 'Matlab connectivity matrix if all region/fiber intersections are counted.' + ) + out_endpoint_array_name = File( + genfile=True, desc='Name for the generated endpoint arrays') + + +class CreateMatrixOutputSpec(TraitedSpec): + matrix_file = File( + desc='NetworkX graph describing the connectivity', exists=True) + intersection_matrix_file = File( + desc='NetworkX graph describing the connectivity', exists=True) + matrix_files = OutputMultiPath( + File( + desc='All of the gpickled network files output by this interface', + exists=True)) + matlab_matrix_files = OutputMultiPath( + File( + desc='All of the MATLAB .mat files output by this interface', + exists=True)) + matrix_mat_file = File( + desc='Matlab matrix describing the connectivity', exists=True) + intersection_matrix_mat_file = File( + desc= + 'Matlab matrix describing the mean fiber lengths between each node.', + exists=True) + mean_fiber_length_matrix_mat_file = File( + desc= + 'Matlab matrix describing the mean fiber lengths between each node.', + exists=True) + median_fiber_length_matrix_mat_file = File( + desc= + 'Matlab matrix describing the median fiber lengths between each node.', + exists=True) + fiber_length_std_matrix_mat_file = File( + desc= + 'Matlab matrix describing the deviation in fiber lengths connecting each node.', + exists=True) + endpoint_file = File( + desc='Saved Numpy array with the endpoints of each fiber', exists=True) + endpoint_file_mm = File( + desc= + 'Saved Numpy array with the endpoints of each fiber (in millimeters)', + exists=True) + fiber_length_file = File( + desc='Saved Numpy array with the lengths of each fiber', exists=True) + fiber_label_file = File( + desc='Saved Numpy array with the labels for each fiber', exists=True) + fiber_labels_noorphans = File( + desc='Saved Numpy array with the labels for each non-orphan fiber', + exists=True) + filtered_tractography = File( + desc= + 'TrackVis file containing only those fibers originate in one and terminate in another region', + exists=True) + filtered_tractography_by_intersections = File( + desc='TrackVis file containing all fibers which connect two regions', + exists=True) + filtered_tractographies = OutputMultiPath( + File( + desc= + 'TrackVis file containing only those fibers originate in one and terminate in another region', + exists=True)) + stats_file = File( + desc= + 'Saved Matlab .mat file with the number of fibers saved at each stage', + exists=True) + + +class CreateMatrix(BaseInterface): + """ + Performs connectivity mapping and outputs the result as a NetworkX graph and a Matlab matrix + + Example + ------- + + >>> import nipype.interfaces.cmtk as cmtk + >>> conmap = cmtk.CreateMatrix() + >>> conmap.roi_file = 'fsLUT_aparc+aseg.nii' + >>> conmap.tract_file = 'fibers.trk' + >>> conmap.run() # doctest: +SKIP + """ + + input_spec = CreateMatrixInputSpec + output_spec = CreateMatrixOutputSpec + + def _run_interface(self, runtime): + if isdefined(self.inputs.out_matrix_file): + path, name, _ = split_filename(self.inputs.out_matrix_file) + matrix_file = op.abspath(name + '.pck') + else: + matrix_file = self._gen_outfilename('.pck') + + matrix_mat_file = op.abspath(self.inputs.out_matrix_mat_file) + path, name, ext = split_filename(matrix_mat_file) + if not ext == '.mat': + ext = '.mat' + matrix_mat_file = matrix_mat_file + ext + + if isdefined(self.inputs.out_mean_fiber_length_matrix_mat_file): + mean_fiber_length_matrix_mat_file = op.abspath( + self.inputs.out_mean_fiber_length_matrix_mat_file) + else: + mean_fiber_length_matrix_name = op.abspath( + self._gen_outfilename('_mean_fiber_length.mat')) + + if isdefined(self.inputs.out_median_fiber_length_matrix_mat_file): + median_fiber_length_matrix_mat_file = op.abspath( + self.inputs.out_median_fiber_length_matrix_mat_file) + else: + median_fiber_length_matrix_name = op.abspath( + self._gen_outfilename('_median_fiber_length.mat')) + + if isdefined(self.inputs.out_fiber_length_std_matrix_mat_file): + fiber_length_std_matrix_mat_file = op.abspath( + self.inputs.out_fiber_length_std_matrix_mat_file) + else: + fiber_length_std_matrix_name = op.abspath( + self._gen_outfilename('_fiber_length_std.mat')) + + if not isdefined(self.inputs.out_endpoint_array_name): + _, endpoint_name, _ = split_filename(self.inputs.tract_file) + endpoint_name = op.abspath(endpoint_name) + else: + endpoint_name = op.abspath(self.inputs.out_endpoint_array_name) + + cmat(self.inputs.tract_file, self.inputs.roi_file, + self.inputs.resolution_network_file, matrix_file, matrix_mat_file, + endpoint_name, self.inputs.count_region_intersections) + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.out_matrix_file): + path, name, _ = split_filename(self.inputs.out_matrix_file) + out_matrix_file = op.abspath(name + '.pck') + out_intersection_matrix_file = op.abspath( + name + '_intersections.pck') + else: + out_matrix_file = op.abspath(self._gen_outfilename('.pck')) + out_intersection_matrix_file = op.abspath( + self._gen_outfilename('_intersections.pck')) + + outputs['matrix_file'] = out_matrix_file + outputs['intersection_matrix_file'] = out_intersection_matrix_file + + matrix_mat_file = op.abspath(self.inputs.out_matrix_mat_file) + path, name, ext = split_filename(matrix_mat_file) + if not ext == '.mat': + ext = '.mat' + matrix_mat_file = matrix_mat_file + ext + + outputs['matrix_mat_file'] = matrix_mat_file + if isdefined(self.inputs.out_mean_fiber_length_matrix_mat_file): + outputs['mean_fiber_length_matrix_mat_file'] = op.abspath( + self.inputs.out_mean_fiber_length_matrix_mat_file) + else: + outputs['mean_fiber_length_matrix_mat_file'] = op.abspath( + self._gen_outfilename('_mean_fiber_length.mat')) + + if isdefined(self.inputs.out_median_fiber_length_matrix_mat_file): + outputs['median_fiber_length_matrix_mat_file'] = op.abspath( + self.inputs.out_median_fiber_length_matrix_mat_file) + else: + outputs['median_fiber_length_matrix_mat_file'] = op.abspath( + self._gen_outfilename('_median_fiber_length.mat')) + + if isdefined(self.inputs.out_fiber_length_std_matrix_mat_file): + outputs['fiber_length_std_matrix_mat_file'] = op.abspath( + self.inputs.out_fiber_length_std_matrix_mat_file) + else: + outputs['fiber_length_std_matrix_mat_file'] = op.abspath( + self._gen_outfilename('_fiber_length_std.mat')) + + if isdefined(self.inputs.out_intersection_matrix_mat_file): + outputs['intersection_matrix_mat_file'] = op.abspath( + self.inputs.out_intersection_matrix_mat_file) + else: + outputs['intersection_matrix_mat_file'] = op.abspath( + self._gen_outfilename('_intersections.mat')) + + if isdefined(self.inputs.out_endpoint_array_name): + endpoint_name = self.inputs.out_endpoint_array_name + outputs['endpoint_file'] = op.abspath( + self.inputs.out_endpoint_array_name + '_endpoints.npy') + outputs['endpoint_file_mm'] = op.abspath( + self.inputs.out_endpoint_array_name + '_endpointsmm.npy') + outputs['fiber_length_file'] = op.abspath( + self.inputs.out_endpoint_array_name + '_final_fiberslength.npy' + ) + outputs['fiber_label_file'] = op.abspath( + self.inputs.out_endpoint_array_name + + '_filtered_fiberslabel.npy') + outputs['fiber_labels_noorphans'] = op.abspath( + self.inputs.out_endpoint_array_name + '_final_fiberslabels.npy' + ) + else: + _, endpoint_name, _ = split_filename(self.inputs.tract_file) + outputs['endpoint_file'] = op.abspath( + endpoint_name + '_endpoints.npy') + outputs['endpoint_file_mm'] = op.abspath( + endpoint_name + '_endpointsmm.npy') + outputs['fiber_length_file'] = op.abspath( + endpoint_name + '_final_fiberslength.npy') + outputs['fiber_label_file'] = op.abspath( + endpoint_name + '_filtered_fiberslabel.npy') + outputs['fiber_labels_noorphans'] = op.abspath( + endpoint_name + '_final_fiberslabels.npy') + + if self.inputs.count_region_intersections: + outputs['matrix_files'] = [ + out_matrix_file, out_intersection_matrix_file + ] + outputs['matlab_matrix_files'] = [ + outputs['matrix_mat_file'], + outputs['mean_fiber_length_matrix_mat_file'], + outputs['median_fiber_length_matrix_mat_file'], + outputs['fiber_length_std_matrix_mat_file'], + outputs['intersection_matrix_mat_file'] + ] + else: + outputs['matrix_files'] = [out_matrix_file] + outputs['matlab_matrix_files'] = [ + outputs['matrix_mat_file'], + outputs['mean_fiber_length_matrix_mat_file'], + outputs['median_fiber_length_matrix_mat_file'], + outputs['fiber_length_std_matrix_mat_file'] + ] + + outputs['filtered_tractography'] = op.abspath( + endpoint_name + '_streamline_final.trk') + outputs['filtered_tractography_by_intersections'] = op.abspath( + endpoint_name + '_intersections_streamline_final.trk') + outputs['filtered_tractographies'] = [ + outputs['filtered_tractography'], + outputs['filtered_tractography_by_intersections'] + ] + outputs['stats_file'] = op.abspath(endpoint_name + '_statistics.mat') + return outputs + + def _gen_outfilename(self, ext): + if ext.endswith("mat") and isdefined(self.inputs.out_matrix_mat_file): + _, name, _ = split_filename(self.inputs.out_matrix_mat_file) + elif isdefined(self.inputs.out_matrix_file): + _, name, _ = split_filename(self.inputs.out_matrix_file) + else: + _, name, _ = split_filename(self.inputs.tract_file) + return name + ext + + +class ROIGenInputSpec(BaseInterfaceInputSpec): + aparc_aseg_file = File( + exists=True, mandatory=True, desc='Freesurfer aparc+aseg file') + LUT_file = File( + exists=True, + xor=['use_freesurfer_LUT'], + desc='Custom lookup table (cf. FreeSurferColorLUT.txt)') + use_freesurfer_LUT = traits.Bool( + xor=['LUT_file'], + desc= + 'Boolean value; Set to True to use default Freesurfer LUT, False for custom LUT' + ) + freesurfer_dir = Directory( + requires=['use_freesurfer_LUT'], desc='Freesurfer main directory') + out_roi_file = File( + genfile=True, desc='Region of Interest file for connectivity mapping') + out_dict_file = File( + genfile=True, desc='Label dictionary saved in Pickle format') + + +class ROIGenOutputSpec(TraitedSpec): + roi_file = File(desc='Region of Interest file for connectivity mapping') + dict_file = File(desc='Label dictionary saved in Pickle format') + + +class ROIGen(BaseInterface): + """ + Generates a ROI file for connectivity mapping and a dictionary file containing relevant node information + + Example + ------- + + >>> import nipype.interfaces.cmtk as cmtk + >>> rg = cmtk.ROIGen() + >>> rg.inputs.aparc_aseg_file = 'aparc+aseg.nii' + >>> rg.inputs.use_freesurfer_LUT = True + >>> rg.inputs.freesurfer_dir = '/usr/local/freesurfer' + >>> rg.run() # doctest: +SKIP + + The label dictionary is written to disk using Pickle. Resulting data can be loaded using: + + >>> file = open("FreeSurferColorLUT_adapted_aparc+aseg_out.pck", "r") + >>> file = open("fsLUT_aparc+aseg.pck", "r") + >>> labelDict = pickle.load(file) # doctest: +SKIP + >>> labelDict # doctest: +SKIP + """ + + input_spec = ROIGenInputSpec + output_spec = ROIGenOutputSpec + + def _run_interface(self, runtime): + aparc_aseg_file = self.inputs.aparc_aseg_file + aparcpath, aparcname, aparcext = split_filename(aparc_aseg_file) + iflogger.info('Using Aparc+Aseg file: %s', aparcname + aparcext) + niiAPARCimg = nb.load(aparc_aseg_file, mmap=NUMPY_MMAP) + niiAPARCdata = niiAPARCimg.get_data() + niiDataLabels = np.unique(niiAPARCdata) + numDataLabels = np.size(niiDataLabels) + iflogger.info('Number of labels in image: %s', numDataLabels) + + write_dict = True + if self.inputs.use_freesurfer_LUT: + self.LUT_file = self.inputs.freesurfer_dir + '/FreeSurferColorLUT.txt' + iflogger.info('Using Freesurfer LUT: %s', self.LUT_file) + prefix = 'fsLUT' + elif not self.inputs.use_freesurfer_LUT and isdefined( + self.inputs.LUT_file): + self.LUT_file = op.abspath(self.inputs.LUT_file) + lutpath, lutname, lutext = split_filename(self.LUT_file) + iflogger.info('Using Custom LUT file: %s', lutname + lutext) + prefix = lutname + else: + prefix = 'hardcoded' + write_dict = False + + if isdefined(self.inputs.out_roi_file): + roi_file = op.abspath(self.inputs.out_roi_file) + else: + roi_file = op.abspath(prefix + '_' + aparcname + '.nii') + + if isdefined(self.inputs.out_dict_file): + dict_file = op.abspath(self.inputs.out_dict_file) + else: + dict_file = op.abspath(prefix + '_' + aparcname + '.pck') + + if write_dict: + iflogger.info('Lookup table: %s', op.abspath(self.LUT_file)) + LUTlabelsRGBA = np.loadtxt( + self.LUT_file, + skiprows=4, + usecols=[0, 1, 2, 3, 4, 5], + comments='#', + dtype={ + 'names': ('index', 'label', 'R', 'G', 'B', 'A'), + 'formats': ('int', '|S30', 'int', 'int', 'int', 'int') + }) + numLUTLabels = np.size(LUTlabelsRGBA) + if numLUTLabels < numDataLabels: + iflogger.error( + 'LUT file provided does not contain all of the regions in the image' + ) + iflogger.error('Removing unmapped regions') + iflogger.info('Number of labels in LUT: %s', numLUTLabels) + LUTlabelDict = {} + """ Create dictionary for input LUT table""" + for labels in range(0, numLUTLabels): + LUTlabelDict[LUTlabelsRGBA[labels][0]] = [ + LUTlabelsRGBA[labels][1], LUTlabelsRGBA[labels][2], + LUTlabelsRGBA[labels][3], LUTlabelsRGBA[labels][4], + LUTlabelsRGBA[labels][5] + ] + + iflogger.info('Printing LUT label dictionary') + iflogger.info(LUTlabelDict) + + mapDict = {} + MAPPING = [[1, 2012], [2, 2019], [3, 2032], [4, 2014], [5, 2020], [ + 6, 2018 + ], [7, 2027], [8, 2028], [9, 2003], [10, 2024], [11, 2017], [12, 2026], + [13, 2002], [14, 2023], [15, 2010], [16, 2022], [17, 2031], + [18, 2029], [19, 2008], [20, 2025], [21, 2005], [22, 2021], + [23, 2011], [24, 2013], [25, 2007], [26, 2016], [27, 2006], + [28, 2033], [29, 2009], [30, 2015], [31, 2001], [32, 2030], + [33, 2034], [34, 2035], [35, 49], [36, 50], [37, 51], [ + 38, 52 + ], [39, 58], [40, 53], [41, 54], [42, 1012], [43, 1019], [ + 44, 1032 + ], [45, 1014], [46, 1020], [47, 1018], [48, 1027], [ + 49, 1028 + ], [50, 1003], [51, 1024], [52, 1017], [53, 1026], [ + 54, 1002 + ], [55, 1023], [56, 1010], [57, 1022], [58, 1031], [ + 59, 1029 + ], [60, 1008], [61, 1025], [62, 1005], [63, 1021], [ + 64, 1011 + ], [65, 1013], [66, 1007], [67, 1016], [68, 1006], [ + 69, 1033 + ], [70, 1009], [71, 1015], [72, 1001], [73, 1030], [ + 74, 1034 + ], [75, 1035], [76, 10], [77, 11], [78, 12], [79, 13], [ + 80, 26 + ], [81, 17], [82, 18], [83, 16]] + """ Create empty grey matter mask, Populate with only those regions defined in the mapping.""" + niiGM = np.zeros(niiAPARCdata.shape, dtype=np.uint) + for ma in MAPPING: + niiGM[niiAPARCdata == ma[1]] = ma[0] + mapDict[ma[0]] = ma[1] + iflogger.info('Grey matter mask created') + greyMaskLabels = np.unique(niiGM) + numGMLabels = np.size(greyMaskLabels) + iflogger.info('Number of grey matter labels: %s', numGMLabels) + + labelDict = {} + GMlabelDict = {} + for label in greyMaskLabels: + try: + mapDict[label] + if write_dict: + GMlabelDict['originalID'] = mapDict[label] + except: + iflogger.info('Label %s not in provided mapping', label) + if write_dict: + del GMlabelDict + GMlabelDict = {} + GMlabelDict['labels'] = LUTlabelDict[label][0] + GMlabelDict['colors'] = [ + LUTlabelDict[label][1], LUTlabelDict[label][2], + LUTlabelDict[label][3] + ] + GMlabelDict['a'] = LUTlabelDict[label][4] + labelDict[label] = GMlabelDict + + roi_image = nb.Nifti1Image(niiGM, niiAPARCimg.affine, + niiAPARCimg.header) + iflogger.info('Saving ROI File to %s', roi_file) + nb.save(roi_image, roi_file) + + if write_dict: + iflogger.info('Saving Dictionary File to %s in Pickle format', + dict_file) + with open(dict_file, 'w') as f: + pickle.dump(labelDict, f) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + if isdefined(self.inputs.out_roi_file): + outputs['roi_file'] = op.abspath(self.inputs.out_roi_file) + else: + outputs['roi_file'] = op.abspath(self._gen_outfilename('nii')) + if isdefined(self.inputs.out_dict_file): + outputs['dict_file'] = op.abspath(self.inputs.out_dict_file) + else: + outputs['dict_file'] = op.abspath(self._gen_outfilename('pck')) + return outputs + + def _gen_outfilename(self, ext): + _, name, _ = split_filename(self.inputs.aparc_aseg_file) + if self.inputs.use_freesurfer_LUT: + prefix = 'fsLUT' + elif not self.inputs.use_freesurfer_LUT and isdefined( + self.inputs.LUT_file): + lutpath, lutname, lutext = split_filename(self.inputs.LUT_file) + prefix = lutname + else: + prefix = 'hardcoded' + return prefix + '_' + name + '.' + ext + + +def create_nodes(roi_file, resolution_network_file, out_filename): + G = nx.Graph() + gp = nx.read_graphml(resolution_network_file) + roi_image = nb.load(roi_file, mmap=NUMPY_MMAP) + roiData = roi_image.get_data() + for u, d in gp.nodes(data=True): + G.add_node(int(u), **d) + xyz = tuple( + np.mean( + np.where(np.flipud(roiData) == int(d["dn_correspondence_id"])), + axis=1)) + G.nodes[int(u)]['dn_position'] = tuple([xyz[0], xyz[2], -xyz[1]]) + nx.write_gpickle(G, out_filename) + return out_filename + + +class CreateNodesInputSpec(BaseInterfaceInputSpec): + roi_file = File( + exists=True, mandatory=True, desc='Region of interest file') + resolution_network_file = File( + exists=True, + mandatory=True, + desc='Parcellation file from Connectome Mapping Toolkit') + out_filename = File( + 'nodenetwork.pck', + usedefault=True, + desc='Output gpickled network with the nodes defined.') + + +class CreateNodesOutputSpec(TraitedSpec): + node_network = File(desc='Output gpickled network with the nodes defined.') + + +class CreateNodes(BaseInterface): + """ + Generates a NetworkX graph containing nodes at the centroid of each region in the input ROI file. + Node data is added from the resolution network file. + + Example + ------- + + >>> import nipype.interfaces.cmtk as cmtk + >>> mknode = cmtk.CreateNodes() + >>> mknode.inputs.roi_file = 'ROI_scale500.nii.gz' + >>> mknode.run() # doctest: +SKIP + """ + + input_spec = CreateNodesInputSpec + output_spec = CreateNodesOutputSpec + + def _run_interface(self, runtime): + iflogger.info('Creating nodes...') + create_nodes(self.inputs.roi_file, self.inputs.resolution_network_file, + self.inputs.out_filename) + iflogger.info('Saving node network to %s', + op.abspath(self.inputs.out_filename)) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['node_network'] = op.abspath(self.inputs.out_filename) + return outputs diff --git a/nipype/interfaces/cmtk/convert.py b/nipype/interfaces/cmtk/convert.py new file mode 100644 index 0000000000..93802d5eb8 --- /dev/null +++ b/nipype/interfaces/cmtk/convert.py @@ -0,0 +1,280 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +import os.path as op +import datetime +import string +import networkx as nx + +from ...utils.filemanip import split_filename +from ..base import (BaseInterfaceInputSpec, traits, File, + TraitedSpec, InputMultiPath, isdefined) +from .base import CFFBaseInterface, have_cfflib + + +class CFFConverterInputSpec(BaseInterfaceInputSpec): + graphml_networks = InputMultiPath( + File(exists=True), desc='list of graphML networks') + gpickled_networks = InputMultiPath( + File(exists=True), desc='list of gpickled Networkx graphs') + + gifti_surfaces = InputMultiPath( + File(exists=True), desc='list of GIFTI surfaces') + gifti_labels = InputMultiPath( + File(exists=True), desc='list of GIFTI labels') + nifti_volumes = InputMultiPath( + File(exists=True), desc='list of NIFTI volumes') + tract_files = InputMultiPath( + File(exists=True), desc='list of Trackvis fiber files') + + timeseries_files = InputMultiPath( + File(exists=True), desc='list of HDF5 timeseries files') + script_files = InputMultiPath( + File(exists=True), desc='list of script files to include') + data_files = InputMultiPath( + File(exists=True), + desc='list of external data files (i.e. Numpy, HD5, XML) ') + + title = traits.Str(desc='Connectome Title') + creator = traits.Str(desc='Creator') + email = traits.Str(desc='Email address') + publisher = traits.Str(desc='Publisher') + license = traits.Str(desc='License') + rights = traits.Str(desc='Rights') + references = traits.Str(desc='References') + relation = traits.Str(desc='Relation') + species = traits.Str('Homo sapiens', desc='Species', usedefault=True) + description = traits.Str( + 'Created with the Nipype CFF converter', + desc='Description', + usedefault=True) + + out_file = File( + 'connectome.cff', usedefault=True, desc='Output connectome file') + + +class CFFConverterOutputSpec(TraitedSpec): + connectome_file = File(exists=True, desc='Output connectome file') + + +class CFFConverter(CFFBaseInterface): + """ + Creates a Connectome File Format (CFF) file from input networks, surfaces, volumes, tracts, etcetera.... + + Example + ------- + + >>> import nipype.interfaces.cmtk as cmtk + >>> cvt = cmtk.CFFConverter() + >>> cvt.inputs.title = 'subject 1' + >>> cvt.inputs.gifti_surfaces = ['lh.pial_converted.gii', 'rh.pial_converted.gii'] + >>> cvt.inputs.tract_files = ['streamlines.trk'] + >>> cvt.inputs.gpickled_networks = ['network0.gpickle'] + >>> cvt.run() # doctest: +SKIP + """ + + input_spec = CFFConverterInputSpec + output_spec = CFFConverterOutputSpec + + def _run_interface(self, runtime): + import cfflib as cf + a = cf.connectome() + + if isdefined(self.inputs.title): + a.connectome_meta.set_title(self.inputs.title) + else: + a.connectome_meta.set_title(self.inputs.out_file) + + if isdefined(self.inputs.creator): + a.connectome_meta.set_creator(self.inputs.creator) + else: + # Probably only works on some OSes... + a.connectome_meta.set_creator(os.getenv('USER')) + + if isdefined(self.inputs.email): + a.connectome_meta.set_email(self.inputs.email) + + if isdefined(self.inputs.publisher): + a.connectome_meta.set_publisher(self.inputs.publisher) + + if isdefined(self.inputs.license): + a.connectome_meta.set_license(self.inputs.license) + + if isdefined(self.inputs.rights): + a.connectome_meta.set_rights(self.inputs.rights) + + if isdefined(self.inputs.references): + a.connectome_meta.set_references(self.inputs.references) + + if isdefined(self.inputs.relation): + a.connectome_meta.set_relation(self.inputs.relation) + + if isdefined(self.inputs.species): + a.connectome_meta.set_species(self.inputs.species) + + if isdefined(self.inputs.description): + a.connectome_meta.set_description(self.inputs.description) + + a.connectome_meta.set_created(datetime.date.today()) + + count = 0 + if isdefined(self.inputs.graphml_networks): + for ntwk in self.inputs.graphml_networks: + # There must be a better way to deal with the unique name problem + # (i.e. tracks and networks can't use the same name, and previously we were pulling them both from the input files) + ntwk_name = 'Network {cnt}'.format(cnt=count) + a.add_connectome_network_from_graphml(ntwk_name, ntwk) + count += 1 + + if isdefined(self.inputs.gpickled_networks): + unpickled = [] + for ntwk in self.inputs.gpickled_networks: + _, ntwk_name, _ = split_filename(ntwk) + unpickled = nx.read_gpickle(ntwk) + cnet = cf.CNetwork(name=ntwk_name) + cnet.set_with_nxgraph(unpickled) + a.add_connectome_network(cnet) + count += 1 + + count = 0 + if isdefined(self.inputs.tract_files): + for trk in self.inputs.tract_files: + _, trk_name, _ = split_filename(trk) + ctrack = cf.CTrack(trk_name, trk) + a.add_connectome_track(ctrack) + count += 1 + + count = 0 + if isdefined(self.inputs.gifti_surfaces): + for surf in self.inputs.gifti_surfaces: + _, surf_name, _ = split_filename(surf) + csurf = cf.CSurface.create_from_gifti("Surface %d - %s" % + (count, surf_name), surf) + csurf.fileformat = 'Gifti' + csurf.dtype = 'Surfaceset' + a.add_connectome_surface(csurf) + count += 1 + + count = 0 + if isdefined(self.inputs.gifti_labels): + for label in self.inputs.gifti_labels: + _, label_name, _ = split_filename(label) + csurf = cf.CSurface.create_from_gifti( + "Surface Label %d - %s" % (count, label_name), label) + csurf.fileformat = 'Gifti' + csurf.dtype = 'Labels' + a.add_connectome_surface(csurf) + count += 1 + + if isdefined(self.inputs.nifti_volumes): + for vol in self.inputs.nifti_volumes: + _, vol_name, _ = split_filename(vol) + cvol = cf.CVolume.create_from_nifti(vol_name, vol) + a.add_connectome_volume(cvol) + + if isdefined(self.inputs.script_files): + for script in self.inputs.script_files: + _, script_name, _ = split_filename(script) + cscript = cf.CScript.create_from_file(script_name, script) + a.add_connectome_script(cscript) + + if isdefined(self.inputs.data_files): + for data in self.inputs.data_files: + _, data_name, _ = split_filename(data) + cda = cf.CData(name=data_name, src=data, fileformat='NumPy') + if not string.find(data_name, 'lengths') == -1: + cda.dtype = 'FinalFiberLengthArray' + if not string.find(data_name, 'endpoints') == -1: + cda.dtype = 'FiberEndpoints' + if not string.find(data_name, 'labels') == -1: + cda.dtype = 'FinalFiberLabels' + a.add_connectome_data(cda) + + a.print_summary() + _, name, ext = split_filename(self.inputs.out_file) + if not ext == '.cff': + ext = '.cff' + cf.save_to_cff(a, op.abspath(name + ext)) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + _, name, ext = split_filename(self.inputs.out_file) + if not ext == '.cff': + ext = '.cff' + outputs['connectome_file'] = op.abspath(name + ext) + return outputs + + +class MergeCNetworksInputSpec(BaseInterfaceInputSpec): + in_files = InputMultiPath( + File(exists=True), + mandatory=True, + desc='List of CFF files to extract networks from') + out_file = File( + 'merged_network_connectome.cff', + usedefault=True, + desc='Output CFF file with all the networks added') + + +class MergeCNetworksOutputSpec(TraitedSpec): + connectome_file = File( + exists=True, desc='Output CFF file with all the networks added') + + +class MergeCNetworks(CFFBaseInterface): + """ Merges networks from multiple CFF files into one new CFF file. + + Example + ------- + + >>> import nipype.interfaces.cmtk as cmtk + >>> mrg = cmtk.MergeCNetworks() + >>> mrg.inputs.in_files = ['subj1.cff','subj2.cff'] + >>> mrg.run() # doctest: +SKIP + + """ + input_spec = MergeCNetworksInputSpec + output_spec = MergeCNetworksOutputSpec + + def _run_interface(self, runtime): + import cfflib as cf + extracted_networks = [] + + for i, con in enumerate(self.inputs.in_files): + mycon = cf.load(con) + nets = mycon.get_connectome_network() + for ne in nets: + # here, you might want to skip networks with a given + # metadata information + ne.load() + contitle = mycon.get_connectome_meta().get_title() + ne.set_name(str(i) + ': ' + contitle + ' - ' + ne.get_name()) + ne.set_src(ne.get_name()) + extracted_networks.append(ne) + + # Add networks to new connectome + newcon = cf.connectome( + title='All CNetworks', connectome_network=extracted_networks) + # Setting additional metadata + metadata = newcon.get_connectome_meta() + metadata.set_creator('My Name') + metadata.set_email('My Email') + + _, name, ext = split_filename(self.inputs.out_file) + if not ext == '.cff': + ext = '.cff' + cf.save_to_cff(newcon, op.abspath(name + ext)) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + _, name, ext = split_filename(self.inputs.out_file) + if not ext == '.cff': + ext = '.cff' + outputs['connectome_file'] = op.abspath(name + ext) + return outputs diff --git a/nipype/interfaces/cmtk/nbs.py b/nipype/interfaces/cmtk/nbs.py new file mode 100644 index 0000000000..f21f2b33ea --- /dev/null +++ b/nipype/interfaces/cmtk/nbs.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os.path as op + +import numpy as np +import networkx as nx + +from ... import logging +from ..base import (LibraryBaseInterface, BaseInterfaceInputSpec, traits, File, + TraitedSpec, InputMultiPath, OutputMultiPath, isdefined) +from .base import have_cv +iflogger = logging.getLogger('nipype.interface') + + +def ntwks_to_matrices(in_files, edge_key): + first = nx.read_gpickle(in_files[0]) + files = len(in_files) + nodes = len(first.nodes()) + matrix = np.zeros((nodes, nodes, files)) + for idx, name in enumerate(in_files): + graph = nx.read_gpickle(name) + for u, v, d in graph.edges(data=True): + try: + graph[u][v]['weight'] = d[ + edge_key] # Setting the edge requested edge value as weight value + except: + raise KeyError( + "the graph edges do not have {} attribute".format( + edge_key)) + matrix[:, :, idx] = nx.to_numpy_matrix(graph) # Retrieve the matrix + return matrix + + +class NetworkBasedStatisticInputSpec(BaseInterfaceInputSpec): + in_group1 = InputMultiPath( + File(exists=True), + mandatory=True, + desc='Networks for the first group of subjects') + in_group2 = InputMultiPath( + File(exists=True), + mandatory=True, + desc='Networks for the second group of subjects') + node_position_network = File( + desc= + 'An optional network used to position the nodes for the output networks' + ) + number_of_permutations = traits.Int( + 1000, usedefault=True, desc='Number of permutations to perform') + threshold = traits.Float(3, usedefault=True, desc='T-statistic threshold') + t_tail = traits.Enum( + 'left', + 'right', + 'both', + usedefault=True, + desc='Can be one of "left", "right", or "both"') + edge_key = traits.Str( + 'number_of_fibers', + usedefault=True, + desc= + 'Usually "number_of_fibers, "fiber_length_mean", "fiber_length_std" for matrices made with CMTK' + 'Sometimes "weight" or "value" for functional networks.') + out_nbs_network = File( + desc='Output network with edges identified by the NBS') + out_nbs_pval_network = File( + desc= + 'Output network with p-values to weight the edges identified by the NBS' + ) + + +class NetworkBasedStatisticOutputSpec(TraitedSpec): + nbs_network = File( + exists=True, desc='Output network with edges identified by the NBS') + nbs_pval_network = File( + exists=True, + desc= + 'Output network with p-values to weight the edges identified by the NBS' + ) + network_files = OutputMultiPath( + File(exists=True), + desc='Output network with edges identified by the NBS') + + +class NetworkBasedStatistic(LibraryBaseInterface): + """ + Calculates and outputs the average network given a set of input NetworkX gpickle files + + For documentation of Network-based statistic parameters: + + https://github.com/LTS5/connectomeviewer/blob/master/cviewer/libs/pyconto/groupstatistics/nbs/_nbs.py + + Example + ------- + + >>> import nipype.interfaces.cmtk as cmtk + >>> nbs = cmtk.NetworkBasedStatistic() + >>> nbs.inputs.in_group1 = ['subj1.pck', 'subj2.pck'] # doctest: +SKIP + >>> nbs.inputs.in_group2 = ['pat1.pck', 'pat2.pck'] # doctest: +SKIP + >>> nbs.run() # doctest: +SKIP + """ + input_spec = NetworkBasedStatisticInputSpec + output_spec = NetworkBasedStatisticOutputSpec + _pkg = 'cviewer' + + def _run_interface(self, runtime): + from cviewer.libs.pyconto.groupstatistics import nbs + + THRESH = self.inputs.threshold + K = self.inputs.number_of_permutations + TAIL = self.inputs.t_tail + edge_key = self.inputs.edge_key + details = edge_key + '-thresh-' + str(THRESH) + '-k-' + str( + K) + '-tail-' + TAIL + '.pck' + + # Fill in the data from the networks + X = ntwks_to_matrices(self.inputs.in_group1, edge_key) + Y = ntwks_to_matrices(self.inputs.in_group2, edge_key) + + PVAL, ADJ, _ = nbs.compute_nbs(X, Y, THRESH, K, TAIL) + + iflogger.info('p-values:') + iflogger.info(PVAL) + + pADJ = ADJ.copy() + for idx, _ in enumerate(PVAL): + x, y = np.where(ADJ == idx + 1) + pADJ[x, y] = PVAL[idx] + + # Create networkx graphs from the adjacency matrix + nbsgraph = nx.from_numpy_matrix(ADJ) + nbs_pval_graph = nx.from_numpy_matrix(pADJ) + + # Relabel nodes because they should not start at zero for our convention + nbsgraph = nx.relabel_nodes(nbsgraph, lambda x: x + 1) + nbs_pval_graph = nx.relabel_nodes(nbs_pval_graph, lambda x: x + 1) + + if isdefined(self.inputs.node_position_network): + node_ntwk_name = self.inputs.node_position_network + else: + node_ntwk_name = self.inputs.in_group1[0] + + node_network = nx.read_gpickle(node_ntwk_name) + iflogger.info('Populating node dictionaries with attributes from %s', + node_ntwk_name) + + for nid, ndata in node_network.nodes(data=True): + nbsgraph.nodes[nid] = ndata + nbs_pval_graph.nodes[nid] = ndata + + path = op.abspath('NBS_Result_' + details) + iflogger.info(path) + nx.write_gpickle(nbsgraph, path) + iflogger.info('Saving output NBS edge network as %s', path) + + pval_path = op.abspath('NBS_P_vals_' + details) + iflogger.info(pval_path) + nx.write_gpickle(nbs_pval_graph, pval_path) + iflogger.info('Saving output p-value network as %s', pval_path) + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + + THRESH = self.inputs.threshold + K = self.inputs.number_of_permutations + TAIL = self.inputs.t_tail + edge_key = self.inputs.edge_key + details = edge_key + '-thresh-' + str(THRESH) + '-k-' + str( + K) + '-tail-' + TAIL + '.pck' + path = op.abspath('NBS_Result_' + details) + pval_path = op.abspath('NBS_P_vals_' + details) + + outputs['nbs_network'] = path + outputs['nbs_pval_network'] = pval_path + outputs['network_files'] = [path, pval_path] + return outputs + + def _gen_outfilename(self, name, ext): + return name + '.' + ext diff --git a/nipype/interfaces/cmtk/nx.py b/nipype/interfaces/cmtk/nx.py new file mode 100644 index 0000000000..1b58494f2c --- /dev/null +++ b/nipype/interfaces/cmtk/nx.py @@ -0,0 +1,656 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, open, range + +import os.path as op +import pickle + +import numpy as np +import networkx as nx +import scipy.io as sio + +from ... import logging +from ...utils.filemanip import split_filename +from ..base import (BaseInterface, BaseInterfaceInputSpec, traits, File, + TraitedSpec, InputMultiPath, OutputMultiPath, isdefined) +from .base import have_cmp + +iflogger = logging.getLogger('nipype.interface') + + +def read_unknown_ntwk(ntwk): + if not isinstance(ntwk, nx.classes.graph.Graph): + _, _, ext = split_filename(ntwk) + if ext == '.pck': + ntwk = nx.read_gpickle(ntwk) + elif ext == '.graphml': + ntwk = nx.read_graphml(ntwk) + return ntwk + + +def remove_all_edges(ntwk): + ntwktmp = ntwk.copy() + edges = list(ntwktmp.edges()) + for edge in edges: + ntwk.remove_edge(edge[0], edge[1]) + return ntwk + + +def fix_keys_for_gexf(orig): + """ + GEXF Networks can be read in Gephi, however, the keys for the node and edge IDs must be converted to strings + """ + import networkx as nx + ntwk = nx.Graph() + nodes = list(orig.nodes()) + edges = list(orig.edges()) + for node in nodes: + newnodedata = {} + newnodedata.update(orig.nodes[node]) + if 'dn_fsname' in orig.nodes[node]: + newnodedata['label'] = orig.nodes[node]['dn_fsname'] + ntwk.add_node(str(node), **newnodedata) + if 'dn_position' in ntwk.nodes[str( + node)] and 'dn_position' in newnodedata: + ntwk.nodes[str(node)]['dn_position'] = str( + newnodedata['dn_position']) + for edge in edges: + data = {} + data = orig.edge[edge[0]][edge[1]] + ntwk.add_edge(str(edge[0]), str(edge[1]), **data) + if 'fiber_length_mean' in ntwk.edge[str(edge[0])][str(edge[1])]: + ntwk.edge[str(edge[0])][str(edge[1])]['fiber_length_mean'] = str( + data['fiber_length_mean']) + if 'fiber_length_std' in ntwk.edge[str(edge[0])][str(edge[1])]: + ntwk.edge[str(edge[0])][str(edge[1])]['fiber_length_std'] = str( + data['fiber_length_std']) + if 'number_of_fibers' in ntwk.edge[str(edge[0])][str(edge[1])]: + ntwk.edge[str(edge[0])][str(edge[1])]['number_of_fibers'] = str( + data['number_of_fibers']) + if 'value' in ntwk.edge[str(edge[0])][str(edge[1])]: + ntwk.edge[str(edge[0])][str(edge[1])]['value'] = str(data['value']) + return ntwk + + +def add_dicts_by_key(in_dict1, in_dict2): + """ + Combines two dictionaries and adds the values for those keys that are shared + """ + both = {} + for key1 in in_dict1: + for key2 in in_dict2: + if key1 == key2: + both[key1] = in_dict1[key1] + in_dict2[key2] + return both + + +def average_networks(in_files, ntwk_res_file, group_id): + """ + Sums the edges of input networks and divides by the number of networks + Writes the average network as .pck and .gexf and returns the name of the written networks + """ + import networkx as nx + import os.path as op + iflogger.info('Creating average network for group: %s', group_id) + matlab_network_list = [] + if len(in_files) == 1: + avg_ntwk = read_unknown_ntwk(in_files[0]) + else: + count_to_keep_edge = np.round(len(in_files) / 2.0) + iflogger.info('Number of networks: %i, an edge must occur in at ' + 'least %i to remain in the average network', + len(in_files), count_to_keep_edge) + ntwk_res_file = read_unknown_ntwk(ntwk_res_file) + iflogger.info('%i nodes found in network resolution file', + ntwk_res_file.number_of_nodes()) + ntwk = remove_all_edges(ntwk_res_file) + counting_ntwk = ntwk.copy() + # Sums all the relevant variables + for index, subject in enumerate(in_files): + tmp = nx.read_gpickle(subject) + iflogger.info('File %s has %i edges', subject, + tmp.number_of_edges()) + edges = list(tmp.edges()) + for edge in edges: + data = {} + data = tmp.edge[edge[0]][edge[1]] + data['count'] = 1 + if ntwk.has_edge(edge[0], edge[1]): + current = {} + current = ntwk.edge[edge[0]][edge[1]] + data = add_dicts_by_key(current, data) + ntwk.add_edge(edge[0], edge[1], **data) + nodes = list(tmp.nodes()) + for node in nodes: + data = {} + data = ntwk.nodes[node] + if 'value' in tmp.nodes[node]: + data['value'] = data['value'] + tmp.nodes[node]['value'] + ntwk.add_node(node, **data) + + # Divides each value by the number of files + nodes = list(ntwk.nodes()) + edges = list(ntwk.edges()) + iflogger.info('Total network has %i edges', ntwk.number_of_edges()) + avg_ntwk = nx.Graph() + newdata = {} + for node in nodes: + data = ntwk.nodes[node] + newdata = data + if 'value' in data: + newdata['value'] = data['value'] / len(in_files) + ntwk.nodes[node]['value'] = newdata + avg_ntwk.add_node(node, **newdata) + + edge_dict = {} + edge_dict['count'] = np.zeros((avg_ntwk.number_of_nodes(), + avg_ntwk.number_of_nodes())) + for edge in edges: + data = ntwk.edge[edge[0]][edge[1]] + if ntwk.edge[edge[0]][edge[1]]['count'] >= count_to_keep_edge: + for key in list(data.keys()): + if not key == 'count': + data[key] = data[key] / len(in_files) + ntwk.edge[edge[0]][edge[1]] = data + avg_ntwk.add_edge(edge[0], edge[1], **data) + edge_dict['count'][edge[0] - 1][edge[1] - 1] = ntwk.edge[edge[0]][ + edge[1]]['count'] + + iflogger.info('After thresholding, the average network has %i edges', + avg_ntwk.number_of_edges()) + + avg_edges = avg_ntwk.edges() + for edge in avg_edges: + data = avg_ntwk.edge[edge[0]][edge[1]] + for key in list(data.keys()): + if not key == 'count': + edge_dict[key] = np.zeros((avg_ntwk.number_of_nodes(), + avg_ntwk.number_of_nodes())) + edge_dict[key][edge[0] - 1][edge[1] - 1] = data[key] + + for key in list(edge_dict.keys()): + tmp = {} + network_name = group_id + '_' + key + '_average.mat' + matlab_network_list.append(op.abspath(network_name)) + tmp[key] = edge_dict[key] + sio.savemat(op.abspath(network_name), tmp) + iflogger.info('Saving average network for key: %s as %s', key, + op.abspath(network_name)) + + # Writes the networks and returns the name + network_name = group_id + '_average.pck' + nx.write_gpickle(avg_ntwk, op.abspath(network_name)) + iflogger.info('Saving average network as %s', op.abspath(network_name)) + avg_ntwk = fix_keys_for_gexf(avg_ntwk) + network_name = group_id + '_average.gexf' + nx.write_gexf(avg_ntwk, op.abspath(network_name)) + iflogger.info('Saving average network as %s', op.abspath(network_name)) + return network_name, matlab_network_list + + +def compute_node_measures(ntwk, calculate_cliques=False): + """ + These return node-based measures + """ + iflogger.info('Computing node measures:') + measures = {} + iflogger.info('...Computing degree...') + measures['degree'] = np.array(list(ntwk.degree().values())) + iflogger.info('...Computing load centrality...') + measures['load_centrality'] = np.array( + list(nx.load_centrality(ntwk).values())) + iflogger.info('...Computing betweenness centrality...') + measures['betweenness_centrality'] = np.array( + list(nx.betweenness_centrality(ntwk).values())) + iflogger.info('...Computing degree centrality...') + measures['degree_centrality'] = np.array( + list(nx.degree_centrality(ntwk).values())) + iflogger.info('...Computing closeness centrality...') + measures['closeness_centrality'] = np.array( + list(nx.closeness_centrality(ntwk).values())) + # iflogger.info('...Computing eigenvector centrality...') + # measures['eigenvector_centrality'] = np.array(nx.eigenvector_centrality(ntwk, max_iter=100000).values()) + iflogger.info('...Computing triangles...') + measures['triangles'] = np.array(list(nx.triangles(ntwk).values())) + iflogger.info('...Computing clustering...') + measures['clustering'] = np.array(list(nx.clustering(ntwk).values())) + iflogger.info('...Computing k-core number') + measures['core_number'] = np.array(list(nx.core_number(ntwk).values())) + iflogger.info('...Identifying network isolates...') + isolate_list = nx.isolates(ntwk) + binarized = np.zeros((ntwk.number_of_nodes(), 1)) + for value in isolate_list: + value = value - 1 # Zero indexing + binarized[value] = 1 + measures['isolates'] = binarized + if calculate_cliques: + iflogger.info('...Calculating node clique number') + measures['node_clique_number'] = np.array( + list(nx.node_clique_number(ntwk).values())) + iflogger.info('...Computing number of cliques for each node...') + measures['number_of_cliques'] = np.array( + list(nx.number_of_cliques(ntwk).values())) + return measures + + +def compute_edge_measures(ntwk): + """ + These return edge-based measures + """ + iflogger.info('Computing edge measures:') + measures = {} + # iflogger.info('...Computing google matrix...' #Makes really large networks (500k+ edges)) + # measures['google_matrix'] = nx.google_matrix(ntwk) + # iflogger.info('...Computing hub matrix...') + # measures['hub_matrix'] = nx.hub_matrix(ntwk) + # iflogger.info('...Computing authority matrix...') + # measures['authority_matrix'] = nx.authority_matrix(ntwk) + return measures + + +def compute_dict_measures(ntwk): + """ + Returns a dictionary + """ + iflogger.info('Computing measures which return a dictionary:') + measures = {} + iflogger.info('...Computing rich club coefficient...') + measures['rich_club_coef'] = nx.rich_club_coefficient(ntwk) + return measures + + +def compute_singlevalued_measures(ntwk, weighted=True, + calculate_cliques=False): + """ + Returns a single value per network + """ + iflogger.info('Computing single valued measures:') + measures = {} + iflogger.info('...Computing degree assortativity (pearson number) ...') + try: + measures['degree_pearsonr'] = nx.degree_pearsonr(ntwk) + except AttributeError: # For NetworkX 1.6 + measures[ + 'degree_pearsonr'] = nx.degree_pearson_correlation_coefficient( + ntwk) + iflogger.info('...Computing degree assortativity...') + try: + measures['degree_assortativity'] = nx.degree_assortativity(ntwk) + except AttributeError: + measures['degree_assortativity'] = nx.degree_assortativity_coefficient( + ntwk) + iflogger.info('...Computing transitivity...') + measures['transitivity'] = nx.transitivity(ntwk) + iflogger.info('...Computing number of connected_components...') + measures['number_connected_components'] = nx.number_connected_components( + ntwk) + iflogger.info('...Computing graph density...') + measures['graph_density'] = nx.density(ntwk) + iflogger.info('...Recording number of edges...') + measures['number_of_edges'] = nx.number_of_edges(ntwk) + iflogger.info('...Recording number of nodes...') + measures['number_of_nodes'] = nx.number_of_nodes(ntwk) + iflogger.info('...Computing average clustering...') + measures['average_clustering'] = nx.average_clustering(ntwk) + if nx.is_connected(ntwk): + iflogger.info('...Calculating average shortest path length...') + measures[ + 'average_shortest_path_length'] = nx.average_shortest_path_length( + ntwk, weighted) + else: + iflogger.info('...Calculating average shortest path length...') + measures[ + 'average_shortest_path_length'] = nx.average_shortest_path_length( + nx.connected_component_subgraphs(ntwk)[0], weighted) + if calculate_cliques: + iflogger.info('...Computing graph clique number...') + measures['graph_clique_number'] = nx.graph_clique_number( + ntwk) # out of memory error + return measures + + +def compute_network_measures(ntwk): + measures = {} + # iflogger.info('Identifying k-core') + # measures['k_core'] = nx.k_core(ntwk) + # iflogger.info('Identifying k-shell') + # measures['k_shell'] = nx.k_shell(ntwk) + # iflogger.info('Identifying k-crust') + # measures['k_crust'] = nx.k_crust(ntwk) + return measures + + +def add_node_data(node_array, ntwk): + node_ntwk = nx.Graph() + newdata = {} + for idx, data in ntwk.nodes(data=True): + if not int(idx) == 0: + newdata['value'] = node_array[int(idx) - 1] + data.update(newdata) + node_ntwk.add_node(int(idx), **data) + return node_ntwk + + +def add_edge_data(edge_array, ntwk, above=0, below=0): + edge_ntwk = ntwk.copy() + data = {} + for x, row in enumerate(edge_array): + for y in range(0, np.max(np.shape(edge_array[x]))): + if not edge_array[x, y] == 0: + data['value'] = edge_array[x, y] + if data['value'] <= below or data['value'] >= above: + if edge_ntwk.has_edge(x + 1, y + 1): + old_edge_dict = edge_ntwk.edge[x + 1][y + 1] + edge_ntwk.remove_edge(x + 1, y + 1) + data.update(old_edge_dict) + edge_ntwk.add_edge(x + 1, y + 1, **data) + return edge_ntwk + + +class NetworkXMetricsInputSpec(BaseInterfaceInputSpec): + in_file = File(exists=True, mandatory=True, desc='Input network') + out_k_core = File( + 'k_core', + usedefault=True, + desc='Computed k-core network stored as a NetworkX pickle.') + out_k_shell = File( + 'k_shell', + usedefault=True, + desc='Computed k-shell network stored as a NetworkX pickle.') + out_k_crust = File( + 'k_crust', + usedefault=True, + desc='Computed k-crust network stored as a NetworkX pickle.') + treat_as_weighted_graph = traits.Bool( + True, + usedefault=True, + desc= + 'Some network metrics can be calculated while considering only a binarized version of the graph' + ) + compute_clique_related_measures = traits.Bool( + False, + usedefault=True, + desc= + 'Computing clique-related measures (e.g. node clique number) can be very time consuming' + ) + out_global_metrics_matlab = File( + genfile=True, desc='Output node metrics in MATLAB .mat format') + out_node_metrics_matlab = File( + genfile=True, desc='Output node metrics in MATLAB .mat format') + out_edge_metrics_matlab = File( + genfile=True, desc='Output edge metrics in MATLAB .mat format') + out_pickled_extra_measures = File( + 'extra_measures', + usedefault=True, + desc= + 'Network measures for group 1 that return dictionaries stored as a Pickle.' + ) + + +class NetworkXMetricsOutputSpec(TraitedSpec): + gpickled_network_files = OutputMultiPath( + File(desc='Output gpickled network files')) + matlab_matrix_files = OutputMultiPath( + File(desc='Output network metrics in MATLAB .mat format')) + global_measures_matlab = File( + desc='Output global metrics in MATLAB .mat format') + node_measures_matlab = File( + desc='Output node metrics in MATLAB .mat format') + edge_measures_matlab = File( + desc='Output edge metrics in MATLAB .mat format') + node_measure_networks = OutputMultiPath( + File(desc='Output gpickled network files for all node-based measures')) + edge_measure_networks = OutputMultiPath( + File(desc='Output gpickled network files for all edge-based measures')) + k_networks = OutputMultiPath( + File( + desc= + 'Output gpickled network files for the k-core, k-shell, and k-crust networks' + )) + k_core = File(desc='Computed k-core network stored as a NetworkX pickle.') + k_shell = File( + desc='Computed k-shell network stored as a NetworkX pickle.') + k_crust = File( + desc='Computed k-crust network stored as a NetworkX pickle.') + pickled_extra_measures = File( + desc= + 'Network measures for the group that return dictionaries, stored as a Pickle.' + ) + matlab_dict_measures = OutputMultiPath( + File( + desc= + 'Network measures for the group that return dictionaries, stored as matlab matrices.' + )) + + +class NetworkXMetrics(BaseInterface): + """ + Calculates and outputs NetworkX-based measures for an input network + + Example + ------- + + >>> import nipype.interfaces.cmtk as cmtk + >>> nxmetrics = cmtk.NetworkXMetrics() + >>> nxmetrics.inputs.in_file = 'subj1.pck' + >>> nxmetrics.run() # doctest: +SKIP + """ + input_spec = NetworkXMetricsInputSpec + output_spec = NetworkXMetricsOutputSpec + + def _run_interface(self, runtime): + global gpickled, nodentwks, edgentwks, kntwks, matlab + gpickled = list() + nodentwks = list() + edgentwks = list() + kntwks = list() + matlab = list() + ntwk = nx.read_gpickle(self.inputs.in_file) + + # Each block computes, writes, and saves a measure + # The names are then added to the output .pck file list + # In the case of the degeneracy networks, they are given specified output names + + calculate_cliques = self.inputs.compute_clique_related_measures + weighted = self.inputs.treat_as_weighted_graph + + global_measures = compute_singlevalued_measures( + ntwk, weighted, calculate_cliques) + if isdefined(self.inputs.out_global_metrics_matlab): + global_out_file = op.abspath(self.inputs.out_global_metrics_matlab) + else: + global_out_file = op.abspath( + self._gen_outfilename('globalmetrics', 'mat')) + sio.savemat(global_out_file, global_measures, oned_as='column') + matlab.append(global_out_file) + + node_measures = compute_node_measures(ntwk, calculate_cliques) + for key in list(node_measures.keys()): + newntwk = add_node_data(node_measures[key], ntwk) + out_file = op.abspath(self._gen_outfilename(key, 'pck')) + nx.write_gpickle(newntwk, out_file) + nodentwks.append(out_file) + if isdefined(self.inputs.out_node_metrics_matlab): + node_out_file = op.abspath(self.inputs.out_node_metrics_matlab) + else: + node_out_file = op.abspath( + self._gen_outfilename('nodemetrics', 'mat')) + sio.savemat(node_out_file, node_measures, oned_as='column') + matlab.append(node_out_file) + gpickled.extend(nodentwks) + + edge_measures = compute_edge_measures(ntwk) + for key in list(edge_measures.keys()): + newntwk = add_edge_data(edge_measures[key], ntwk) + out_file = op.abspath(self._gen_outfilename(key, 'pck')) + nx.write_gpickle(newntwk, out_file) + edgentwks.append(out_file) + if isdefined(self.inputs.out_edge_metrics_matlab): + edge_out_file = op.abspath(self.inputs.out_edge_metrics_matlab) + else: + edge_out_file = op.abspath( + self._gen_outfilename('edgemetrics', 'mat')) + sio.savemat(edge_out_file, edge_measures, oned_as='column') + matlab.append(edge_out_file) + gpickled.extend(edgentwks) + + ntwk_measures = compute_network_measures(ntwk) + for key in list(ntwk_measures.keys()): + if key == 'k_core': + out_file = op.abspath( + self._gen_outfilename(self.inputs.out_k_core, 'pck')) + if key == 'k_shell': + out_file = op.abspath( + self._gen_outfilename(self.inputs.out_k_shell, 'pck')) + if key == 'k_crust': + out_file = op.abspath( + self._gen_outfilename(self.inputs.out_k_crust, 'pck')) + nx.write_gpickle(ntwk_measures[key], out_file) + kntwks.append(out_file) + gpickled.extend(kntwks) + + out_pickled_extra_measures = op.abspath( + self._gen_outfilename(self.inputs.out_pickled_extra_measures, + 'pck')) + dict_measures = compute_dict_measures(ntwk) + iflogger.info('Saving extra measure file to %s in Pickle format', + op.abspath(out_pickled_extra_measures)) + with open(out_pickled_extra_measures, 'w') as fo: + pickle.dump(dict_measures, fo) + + iflogger.info('Saving MATLAB measures as %s', matlab) + + # Loops through the measures which return a dictionary, + # converts the keys and values to a Numpy array, + # stacks them together, and saves them in a MATLAB .mat file via Scipy + global dicts + dicts = list() + for idx, key in enumerate(dict_measures.keys()): + for idxd, keyd in enumerate(dict_measures[key].keys()): + if idxd == 0: + nparraykeys = np.array(keyd) + nparrayvalues = np.array(dict_measures[key][keyd]) + else: + nparraykeys = np.append(nparraykeys, np.array(keyd)) + values = np.array(dict_measures[key][keyd]) + nparrayvalues = np.append(nparrayvalues, values) + nparray = np.vstack((nparraykeys, nparrayvalues)) + out_file = op.abspath(self._gen_outfilename(key, 'mat')) + npdict = {} + npdict[key] = nparray + sio.savemat(out_file, npdict, oned_as='column') + dicts.append(out_file) + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs["k_core"] = op.abspath( + self._gen_outfilename(self.inputs.out_k_core, 'pck')) + outputs["k_shell"] = op.abspath( + self._gen_outfilename(self.inputs.out_k_shell, 'pck')) + outputs["k_crust"] = op.abspath( + self._gen_outfilename(self.inputs.out_k_crust, 'pck')) + outputs["gpickled_network_files"] = gpickled + outputs["k_networks"] = kntwks + outputs["node_measure_networks"] = nodentwks + outputs["edge_measure_networks"] = edgentwks + outputs["matlab_dict_measures"] = dicts + outputs["global_measures_matlab"] = op.abspath( + self._gen_outfilename('globalmetrics', 'mat')) + outputs["node_measures_matlab"] = op.abspath( + self._gen_outfilename('nodemetrics', 'mat')) + outputs["edge_measures_matlab"] = op.abspath( + self._gen_outfilename('edgemetrics', 'mat')) + outputs["matlab_matrix_files"] = [ + outputs["global_measures_matlab"], outputs["node_measures_matlab"], + outputs["edge_measures_matlab"] + ] + outputs["pickled_extra_measures"] = op.abspath( + self._gen_outfilename(self.inputs.out_pickled_extra_measures, + 'pck')) + return outputs + + def _gen_outfilename(self, name, ext): + return name + '.' + ext + + +class AverageNetworksInputSpec(BaseInterfaceInputSpec): + in_files = InputMultiPath( + File(exists=True), + mandatory=True, + desc='Networks for a group of subjects') + resolution_network_file = File( + exists=True, + desc= + 'Parcellation files from Connectome Mapping Toolkit. This is not necessary' + ', but if included, the interface will output the statistical maps as networkx graphs.' + ) + group_id = traits.Str('group1', usedefault=True, desc='ID for group') + out_gpickled_groupavg = File( + desc='Average network saved as a NetworkX .pck') + out_gexf_groupavg = File(desc='Average network saved as a .gexf file') + + +class AverageNetworksOutputSpec(TraitedSpec): + gpickled_groupavg = File(desc='Average network saved as a NetworkX .pck') + gexf_groupavg = File(desc='Average network saved as a .gexf file') + matlab_groupavgs = OutputMultiPath( + File(desc='Average network saved as a .gexf file')) + + +class AverageNetworks(BaseInterface): + """ + Calculates and outputs the average network given a set of input NetworkX gpickle files + + This interface will only keep an edge in the averaged network if that edge is present in + at least half of the input networks. + + Example + ------- + + >>> import nipype.interfaces.cmtk as cmtk + >>> avg = cmtk.AverageNetworks() + >>> avg.inputs.in_files = ['subj1.pck', 'subj2.pck'] + >>> avg.run() # doctest: +SKIP + + """ + input_spec = AverageNetworksInputSpec + output_spec = AverageNetworksOutputSpec + + def _run_interface(self, runtime): + if isdefined(self.inputs.resolution_network_file): + ntwk_res_file = self.inputs.resolution_network_file + else: + ntwk_res_file = self.inputs.in_files[0] + + global matlab_network_list + network_name, matlab_network_list = average_networks( + self.inputs.in_files, ntwk_res_file, self.inputs.group_id) + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + if not isdefined(self.inputs.out_gpickled_groupavg): + outputs["gpickled_groupavg"] = op.abspath( + self._gen_outfilename(self.inputs.group_id + '_average', + 'pck')) + else: + outputs["gpickled_groupavg"] = op.abspath( + self.inputs.out_gpickled_groupavg) + + if not isdefined(self.inputs.out_gexf_groupavg): + outputs["gexf_groupavg"] = op.abspath( + self._gen_outfilename(self.inputs.group_id + '_average', + 'gexf')) + else: + outputs["gexf_groupavg"] = op.abspath( + self.inputs.out_gexf_groupavg) + + outputs["matlab_groupavgs"] = matlab_network_list + return outputs + + def _gen_outfilename(self, name, ext): + return name + '.' + ext diff --git a/nipype/interfaces/cmtk/parcellation.py b/nipype/interfaces/cmtk/parcellation.py new file mode 100644 index 0000000000..0e25e8eb10 --- /dev/null +++ b/nipype/interfaces/cmtk/parcellation.py @@ -0,0 +1,616 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range + +import os +import os.path as op +import shutil + +import numpy as np +import nibabel as nb +import networkx as nx + +from ... import logging +from ..base import (BaseInterface, LibraryBaseInterface, + BaseInterfaceInputSpec, traits, File, + TraitedSpec, Directory, isdefined) +from .base import have_cmp +iflogger = logging.getLogger('nipype.interface') + + +def create_annot_label(subject_id, subjects_dir, fs_dir, parcellation_name): + import cmp + from cmp.util import runCmd + iflogger.info("Create the cortical labels necessary for our ROIs") + iflogger.info("=================================================") + fs_label_dir = op.join(op.join(subjects_dir, subject_id), 'label') + output_dir = op.abspath(op.curdir) + paths = [] + cmp_config = cmp.configuration.PipelineConfiguration() + cmp_config.parcellation_scheme = "Lausanne2008" + for hemi in ['lh', 'rh']: + spath = cmp_config._get_lausanne_parcellation('Lausanne2008')[ + parcellation_name]['fs_label_subdir_name'] % hemi + paths.append(spath) + for p in paths: + try: + os.makedirs(op.join('.', p)) + except: + pass + if '33' in parcellation_name: + comp = [ + ('rh', 'myatlas_36_rh.gcs', 'rh.myaparc_36.annot', + 'regenerated_rh_36', 'myaparc_36'), + ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', + 'regenerated_rh_60', 'myaparc_60'), + ('lh', 'myatlas_36_lh.gcs', 'lh.myaparc_36.annot', + 'regenerated_lh_36', 'myaparc_36'), + ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', + 'regenerated_lh_60', 'myaparc_60'), + ] + elif '60' in parcellation_name: + comp = [ + ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', + 'regenerated_rh_60', 'myaparc_60'), + ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', + 'regenerated_lh_60', 'myaparc_60'), + ] + elif '125' in parcellation_name: + comp = [ + ('rh', 'myatlas_125_rh.gcs', 'rh.myaparc_125.annot', + 'regenerated_rh_125', 'myaparc_125'), + ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', + 'regenerated_rh_60', 'myaparc_60'), + ('lh', 'myatlas_125_lh.gcs', 'lh.myaparc_125.annot', + 'regenerated_lh_125', 'myaparc_125'), + ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', + 'regenerated_lh_60', 'myaparc_60'), + ] + elif '250' in parcellation_name: + comp = [ + ('rh', 'myatlas_250_rh.gcs', 'rh.myaparc_250.annot', + 'regenerated_rh_250', 'myaparc_250'), + ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', + 'regenerated_rh_60', 'myaparc_60'), + ('lh', 'myatlas_250_lh.gcs', 'lh.myaparc_250.annot', + 'regenerated_lh_250', 'myaparc_250'), + ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', + 'regenerated_lh_60', 'myaparc_60'), + ] + else: + comp = [ + ('rh', 'myatlas_36_rh.gcs', 'rh.myaparc_36.annot', + 'regenerated_rh_36', 'myaparc_36'), + ('rh', 'myatlasP1_16_rh.gcs', 'rh.myaparcP1_16.annot', + 'regenerated_rh_500', 'myaparcP1_16'), + ('rh', 'myatlasP17_28_rh.gcs', 'rh.myaparcP17_28.annot', + 'regenerated_rh_500', 'myaparcP17_28'), + ('rh', 'myatlasP29_36_rh.gcs', 'rh.myaparcP29_36.annot', + 'regenerated_rh_500', 'myaparcP29_36'), + ('rh', 'myatlas_60_rh.gcs', 'rh.myaparc_60.annot', + 'regenerated_rh_60', 'myaparc_60'), + ('rh', 'myatlas_125_rh.gcs', 'rh.myaparc_125.annot', + 'regenerated_rh_125', 'myaparc_125'), + ('rh', 'myatlas_250_rh.gcs', 'rh.myaparc_250.annot', + 'regenerated_rh_250', 'myaparc_250'), + ('lh', 'myatlas_36_lh.gcs', 'lh.myaparc_36.annot', + 'regenerated_lh_36', 'myaparc_36'), + ('lh', 'myatlasP1_16_lh.gcs', 'lh.myaparcP1_16.annot', + 'regenerated_lh_500', 'myaparcP1_16'), + ('lh', 'myatlasP17_28_lh.gcs', 'lh.myaparcP17_28.annot', + 'regenerated_lh_500', 'myaparcP17_28'), + ('lh', 'myatlasP29_36_lh.gcs', 'lh.myaparcP29_36.annot', + 'regenerated_lh_500', 'myaparcP29_36'), + ('lh', 'myatlas_60_lh.gcs', 'lh.myaparc_60.annot', + 'regenerated_lh_60', 'myaparc_60'), + ('lh', 'myatlas_125_lh.gcs', 'lh.myaparc_125.annot', + 'regenerated_lh_125', 'myaparc_125'), + ('lh', 'myatlas_250_lh.gcs', 'lh.myaparc_250.annot', + 'regenerated_lh_250', 'myaparc_250'), + ] + + log = cmp_config.get_logger() + + for out in comp: + mris_cmd = 'mris_ca_label %s %s "%s/surf/%s.sphere.reg" "%s" "%s" ' % ( + subject_id, out[0], op.join(subjects_dir, subject_id), out[0], + cmp_config.get_lausanne_atlas(out[1]), + op.join(fs_label_dir, out[2])) + runCmd(mris_cmd, log) + iflogger.info('-----------') + + annot = '--annotation "%s"' % out[4] + + mri_an_cmd = 'mri_annotation2label --subject %s --hemi %s --outdir "%s" %s' % ( + subject_id, out[0], op.join(output_dir, out[3]), annot) + iflogger.info(mri_an_cmd) + runCmd(mri_an_cmd, log) + iflogger.info('-----------') + iflogger.info(os.environ['SUBJECTS_DIR']) + # extract cc and unknown to add to tractography mask, we do not want this as a region of interest + # in FS 5.0, unknown and corpuscallosum are not available for the 35 scale (why?), + # but for the other scales only, take the ones from _60 + rhun = op.join(output_dir, 'rh.unknown.label') + lhun = op.join(output_dir, 'lh.unknown.label') + rhco = op.join(output_dir, 'rh.corpuscallosum.label') + lhco = op.join(output_dir, 'lh.corpuscallosum.label') + shutil.copy( + op.join(output_dir, 'regenerated_rh_60', 'rh.unknown.label'), rhun) + shutil.copy( + op.join(output_dir, 'regenerated_lh_60', 'lh.unknown.label'), lhun) + shutil.copy( + op.join(output_dir, 'regenerated_rh_60', 'rh.corpuscallosum.label'), + rhco) + shutil.copy( + op.join(output_dir, 'regenerated_lh_60', 'lh.corpuscallosum.label'), + lhco) + + mri_cmd = """mri_label2vol --label "%s" --label "%s" --label "%s" --label "%s" --temp "%s" --o "%s" --identity """ % ( + rhun, lhun, rhco, lhco, + op.join(op.join(subjects_dir, subject_id), 'mri', 'orig.mgz'), + op.join(fs_label_dir, 'cc_unknown.nii.gz')) + runCmd(mri_cmd, log) + runCmd('mris_volmask %s' % subject_id, log) + mri_cmd = 'mri_convert -i "%s/mri/ribbon.mgz" -o "%s/mri/ribbon.nii.gz"' % ( + op.join(subjects_dir, subject_id), op.join(subjects_dir, subject_id)) + runCmd(mri_cmd, log) + mri_cmd = 'mri_convert -i "%s/mri/aseg.mgz" -o "%s/mri/aseg.nii.gz"' % ( + op.join(subjects_dir, subject_id), op.join(subjects_dir, subject_id)) + runCmd(mri_cmd, log) + + iflogger.info("[ DONE ]") + + +def create_roi(subject_id, subjects_dir, fs_dir, parcellation_name, dilation): + """ Creates the ROI_%s.nii.gz files using the given parcellation information + from networks. Iteratively create volume. """ + import cmp + from cmp.util import runCmd + iflogger.info("Create the ROIs:") + output_dir = op.abspath(op.curdir) + fs_dir = op.join(subjects_dir, subject_id) + cmp_config = cmp.configuration.PipelineConfiguration() + cmp_config.parcellation_scheme = "Lausanne2008" + log = cmp_config.get_logger() + parval = cmp_config._get_lausanne_parcellation('Lausanne2008')[ + parcellation_name] + pgpath = parval['node_information_graphml'] + aseg = nb.load(op.join(fs_dir, 'mri', 'aseg.nii.gz')) + asegd = aseg.get_data() + + # identify cortical voxels, right (3) and left (42) hemispheres + idxr = np.where(asegd == 3) + idxl = np.where(asegd == 42) + xx = np.concatenate((idxr[0], idxl[0])) + yy = np.concatenate((idxr[1], idxl[1])) + zz = np.concatenate((idxr[2], idxl[2])) + + # initialize variables necessary for cortical ROIs dilation + # dimensions of the neighbourhood for rois labels assignment (choose odd dimensions!) + shape = (25, 25, 25) + center = np.array(shape) // 2 + # dist: distances from the center of the neighbourhood + dist = np.zeros(shape, dtype='float32') + for x in range(shape[0]): + for y in range(shape[1]): + for z in range(shape[2]): + distxyz = center - [x, y, z] + dist[x, y, z] = np.sqrt(np.sum(np.multiply(distxyz, distxyz))) + + iflogger.info("Working on parcellation: ") + iflogger.info( + cmp_config._get_lausanne_parcellation('Lausanne2008')[ + parcellation_name]) + iflogger.info("========================") + pg = nx.read_graphml(pgpath) + # each node represents a brain region + # create a big 256^3 volume for storage of all ROIs + rois = np.zeros((256, 256, 256), dtype=np.int16) + + count = 0 + for brk, brv in pg.nodes(data=True): + count = count + 1 + iflogger.info(brv) + iflogger.info(brk) + if brv['dn_hemisphere'] == 'left': + hemi = 'lh' + elif brv['dn_hemisphere'] == 'right': + hemi = 'rh' + if brv['dn_region'] == 'subcortical': + iflogger.info(brv) + iflogger.info('---------------------') + iflogger.info('Work on brain region: %s', brv['dn_region']) + iflogger.info('Freesurfer Name: %s', brv['dn_fsname']) + iflogger.info('Region %s of %s', count, pg.number_of_nodes()) + iflogger.info('---------------------') + # if it is subcortical, retrieve roi from aseg + idx = np.where(asegd == int(brv['dn_fs_aseg_val'])) + rois[idx] = int(brv['dn_correspondence_id']) + + elif brv['dn_region'] == 'cortical': + iflogger.info(brv) + iflogger.info('---------------------') + iflogger.info('Work on brain region: %s', brv['dn_region']) + iflogger.info('Freesurfer Name: %s', brv['dn_fsname']) + iflogger.info('Region %s of %s', count, pg.number_of_nodes()) + iflogger.info('---------------------') + + labelpath = op.join(output_dir, + parval['fs_label_subdir_name'] % hemi) + # construct .label file name + + fname = '%s.%s.label' % (hemi, brv['dn_fsname']) + + # execute fs mri_label2vol to generate volume roi from the label file + # store it in temporary file to be overwritten for each region + + mri_cmd = 'mri_label2vol --label "%s" --temp "%s" --o "%s" --identity' % ( + op.join(labelpath, fname), op.join(fs_dir, 'mri', 'orig.mgz'), + op.join(output_dir, 'tmp.nii.gz')) + runCmd(mri_cmd, log) + + tmp = nb.load(op.join(output_dir, 'tmp.nii.gz')) + tmpd = tmp.get_data() + + # find voxel and set them to intensityvalue in rois + idx = np.where(tmpd == 1) + rois[idx] = int(brv['dn_correspondence_id']) + + # store volume eg in ROI_scale33.nii.gz + out_roi = op.abspath('ROI_%s.nii.gz' % parcellation_name) + + # update the header + hdr = aseg.header + hdr2 = hdr.copy() + hdr2.set_data_dtype(np.uint16) + + log.info("Save output image to %s" % out_roi) + img = nb.Nifti1Image(rois, aseg.affine, hdr2) + nb.save(img, out_roi) + + iflogger.info("[ DONE ]") + # dilate cortical regions + if dilation is True: + iflogger.info("Dilating cortical regions...") + # loop throughout all the voxels belonging to the aseg GM volume + for j in range(xx.size): + if rois[xx[j], yy[j], zz[j]] == 0: + local = extract( + rois, shape, position=(xx[j], yy[j], zz[j]), fill=0) + mask = local.copy() + mask[np.nonzero(local > 0)] = 1 + thisdist = np.multiply(dist, mask) + thisdist[np.nonzero(thisdist == 0)] = np.amax(thisdist) + value = np.int_( + local[np.nonzero(thisdist == np.amin(thisdist))]) + if value.size > 1: + counts = np.bincount(value) + value = np.argmax(counts) + rois[xx[j], yy[j], zz[j]] = value + + # store volume eg in ROIv_scale33.nii.gz + out_roi = op.abspath('ROIv_%s.nii.gz' % parcellation_name) + iflogger.info('Save output image to %s', out_roi) + img = nb.Nifti1Image(rois, aseg.affine, hdr2) + nb.save(img, out_roi) + + iflogger.info("[ DONE ]") + + +def create_wm_mask(subject_id, subjects_dir, fs_dir, parcellation_name): + import cmp + import scipy.ndimage.morphology as nd + iflogger.info("Create white matter mask") + fs_dir = op.join(subjects_dir, subject_id) + cmp_config = cmp.configuration.PipelineConfiguration() + cmp_config.parcellation_scheme = "Lausanne2008" + pgpath = cmp_config._get_lausanne_parcellation('Lausanne2008')[ + parcellation_name]['node_information_graphml'] + # load ribbon as basis for white matter mask + fsmask = nb.load(op.join(fs_dir, 'mri', 'ribbon.nii.gz')) + fsmaskd = fsmask.get_data() + + wmmask = np.zeros(fsmaskd.shape) + # extract right and left white matter + idx_lh = np.where(fsmaskd == 120) + idx_rh = np.where(fsmaskd == 20) + + wmmask[idx_lh] = 1 + wmmask[idx_rh] = 1 + + # remove subcortical nuclei from white matter mask + aseg = nb.load(op.join(fs_dir, 'mri', 'aseg.nii.gz')) + asegd = aseg.get_data() + + # need binary erosion function + imerode = nd.binary_erosion + + # ventricle erosion + csfA = np.zeros(asegd.shape) + csfB = np.zeros(asegd.shape) + + # structuring elements for erosion + se1 = np.zeros((3, 3, 5)) + se1[1, :, 2] = 1 + se1[:, 1, 2] = 1 + se1[1, 1, :] = 1 + se = np.zeros((3, 3, 3)) + se[1, :, 1] = 1 + se[:, 1, 1] = 1 + se[1, 1, :] = 1 + + # lateral ventricles, thalamus proper and caudate + # the latter two removed for better erosion, but put back afterwards + idx = np.where((asegd == 4) | (asegd == 43) | (asegd == 11) | (asegd == 50) + | (asegd == 31) | (asegd == 63) | (asegd == 10) + | (asegd == 49)) + csfA[idx] = 1 + csfA = imerode(imerode(csfA, se1), se) + + # thalmus proper and cuadate are put back because they are not lateral ventricles + idx = np.where((asegd == 11) | (asegd == 50) | (asegd == 10) + | (asegd == 49)) + csfA[idx] = 0 + + # REST CSF, IE 3RD AND 4TH VENTRICULE AND EXTRACEREBRAL CSF + idx = np.where((asegd == 5) | (asegd == 14) | (asegd == 15) | (asegd == 24) + | (asegd == 44) | (asegd == 72) | (asegd == 75) + | (asegd == 76) | (asegd == 213) | (asegd == 221)) + # 43 ??, 4?? 213?, 221? + # more to discuss. + for i in [5, 14, 15, 24, 44, 72, 75, 76, 213, 221]: + idx = np.where(asegd == i) + csfB[idx] = 1 + + # do not remove the subthalamic nucleus for now from the wm mask + # 23, 60 + # would stop the fiber going to the segmented "brainstem" + + # grey nuclei, either with or without erosion + gr_ncl = np.zeros(asegd.shape) + + # with erosion + for i in [10, 11, 12, 49, 50, 51]: + idx = np.where(asegd == i) + # temporary volume + tmp = np.zeros(asegd.shape) + tmp[idx] = 1 + tmp = imerode(tmp, se) + idx = np.where(tmp == 1) + gr_ncl[idx] = 1 + + # without erosion + for i in [13, 17, 18, 26, 52, 53, 54, 58]: + idx = np.where(asegd == i) + gr_ncl[idx] = 1 + + # remove remaining structure, e.g. brainstem + remaining = np.zeros(asegd.shape) + idx = np.where(asegd == 16) + remaining[idx] = 1 + + # now remove all the structures from the white matter + idx = np.where((csfA != 0) | (csfB != 0) | (gr_ncl != 0) + | (remaining != 0)) + wmmask[idx] = 0 + iflogger.info( + "Removing lateral ventricles and eroded grey nuclei and brainstem from white matter mask" + ) + + # ADD voxels from 'cc_unknown.nii.gz' dataset + ccun = nb.load(op.join(fs_dir, 'label', 'cc_unknown.nii.gz')) + ccund = ccun.get_data() + idx = np.where(ccund != 0) + iflogger.info("Add corpus callosum and unknown to wm mask") + wmmask[idx] = 1 + + # check if we should subtract the cortical rois from this parcellation + iflogger.info('Loading ROI_%s.nii.gz to subtract cortical ROIs from white ' + 'matter mask', parcellation_name) + roi = nb.load(op.join(op.curdir, 'ROI_%s.nii.gz' % parcellation_name)) + roid = roi.get_data() + assert roid.shape[0] == wmmask.shape[0] + pg = nx.read_graphml(pgpath) + for brk, brv in pg.nodes(data=True): + if brv['dn_region'] == 'cortical': + iflogger.info('Subtracting region %s with intensity value %s', + brv['dn_region'], brv['dn_correspondence_id']) + idx = np.where(roid == int(brv['dn_correspondence_id'])) + wmmask[idx] = 0 + + # output white matter mask. crop and move it afterwards + wm_out = op.join(fs_dir, 'mri', 'fsmask_1mm.nii.gz') + img = nb.Nifti1Image(wmmask, fsmask.affine, fsmask.header) + iflogger.info('Save white matter mask: %s', wm_out) + nb.save(img, wm_out) + + +def crop_and_move_datasets(subject_id, subjects_dir, fs_dir, parcellation_name, + out_roi_file, dilation): + from cmp.util import runCmd + fs_dir = op.join(subjects_dir, subject_id) + cmp_config = cmp.configuration.PipelineConfiguration() + cmp_config.parcellation_scheme = "Lausanne2008" + log = cmp_config.get_logger() + output_dir = op.abspath(op.curdir) + + iflogger.info('Cropping and moving datasets to %s', output_dir) + ds = [(op.join(fs_dir, 'mri', 'aseg.nii.gz'), + op.abspath('aseg.nii.gz')), (op.join(fs_dir, 'mri', + 'ribbon.nii.gz'), + op.abspath('ribbon.nii.gz')), + (op.join(fs_dir, 'mri', 'fsmask_1mm.nii.gz'), + op.abspath('fsmask_1mm.nii.gz')), (op.join(fs_dir, 'label', + 'cc_unknown.nii.gz'), + op.abspath('cc_unknown.nii.gz'))] + + ds.append((op.abspath('ROI_%s.nii.gz' % parcellation_name), + op.abspath('ROI_HR_th.nii.gz'))) + if dilation is True: + ds.append((op.abspath('ROIv_%s.nii.gz' % parcellation_name), + op.abspath('ROIv_HR_th.nii.gz'))) + orig = op.join(fs_dir, 'mri', 'orig', '001.mgz') + for d in ds: + iflogger.info('Processing %s:', d[0]) + if not op.exists(d[0]): + raise Exception('File %s does not exist.' % d[0]) + # reslice to original volume because the roi creation with freesurfer + # changed to 256x256x256 resolution + mri_cmd = 'mri_convert -rl "%s" -rt nearest "%s" -nc "%s"' % (orig, + d[0], + d[1]) + runCmd(mri_cmd, log) + + +def extract(Z, shape, position, fill): + """ Extract voxel neighbourhood +Parameters +---------- +Z: the original data +shape: tuple containing neighbourhood dimensions +position: tuple containing central point indexes +fill: value for the padding of Z +Returns +------- +R: the neighbourhood of the specified point in Z +""" + R = np.ones(shape, dtype=Z.dtype) * \ + fill # initialize output block to the fill value + P = np.array(list(position)).astype( + int) # position coordinates(numpy array) + Rs = np.array(list(R.shape)).astype( + int) # output block dimensions (numpy array) + Zs = np.array(list(Z.shape)).astype( + int) # original volume dimensions (numpy array) + + R_start = np.zeros(len(shape)).astype(int) + R_stop = np.array(list(shape)).astype(int) + Z_start = (P - Rs // 2) + Z_start_cor = (np.maximum(Z_start, 0)).tolist() # handle borders + R_start = R_start + (Z_start_cor - Z_start) + Z_stop = (P + Rs // 2) + Rs % 2 + Z_stop_cor = (np.minimum(Z_stop, Zs)).tolist() # handle borders + R_stop = R_stop - (Z_stop - Z_stop_cor) + + R[R_start[0]:R_stop[0], R_start[1]:R_stop[1], R_start[2]:R_stop[ + 2]] = Z[Z_start_cor[0]:Z_stop_cor[0], Z_start_cor[1]:Z_stop_cor[1], + Z_start_cor[2]:Z_stop_cor[2]] + + return R + + +class ParcellateInputSpec(BaseInterfaceInputSpec): + subject_id = traits.String(mandatory=True, desc='Subject ID') + parcellation_name = traits.Enum( + 'scale500', ['scale33', 'scale60', 'scale125', 'scale250', 'scale500'], + usedefault=True) + freesurfer_dir = Directory(exists=True, desc='Freesurfer main directory') + subjects_dir = Directory(exists=True, desc='Freesurfer subjects directory') + out_roi_file = File( + genfile=True, desc='Region of Interest file for connectivity mapping') + dilation = traits.Bool( + False, + usedefault=True, + desc='Dilate cortical parcels? Useful for fMRI connectivity') + + +class ParcellateOutputSpec(TraitedSpec): + roi_file = File( + exists=True, desc='Region of Interest file for connectivity mapping') + roiv_file = File( + desc='Region of Interest file for fMRI connectivity mapping') + white_matter_mask_file = File(exists=True, desc='White matter mask file') + cc_unknown_file = File( + desc='Image file with regions labelled as unknown cortical structures', + exists=True) + ribbon_file = File( + desc='Image file detailing the cortical ribbon', exists=True) + aseg_file = File( + desc= + 'Automated segmentation file converted from Freesurfer "subjects" directory', + exists=True) + roi_file_in_structural_space = File( + desc= + 'ROI image resliced to the dimensions of the original structural image', + exists=True) + dilated_roi_file_in_structural_space = File( + desc= + 'dilated ROI image resliced to the dimensions of the original structural image' + ) + + +class Parcellate(LibraryBaseInterface): + """Subdivides segmented ROI file into smaller subregions + + This interface implements the same procedure as in the ConnectomeMapper's + parcellation stage (cmp/stages/parcellation/maskcreation.py) for a single + parcellation scheme (e.g. 'scale500'). + + Example + ------- + + >>> import nipype.interfaces.cmtk as cmtk + >>> parcellate = cmtk.Parcellate() + >>> parcellate.inputs.freesurfer_dir = '.' + >>> parcellate.inputs.subjects_dir = '.' + >>> parcellate.inputs.subject_id = 'subj1' + >>> parcellate.inputs.dilation = True + >>> parcellate.inputs.parcellation_name = 'scale500' + >>> parcellate.run() # doctest: +SKIP + """ + + input_spec = ParcellateInputSpec + output_spec = ParcellateOutputSpec + _pkg = 'cmp' + imports = ('scipy', ) + + def _run_interface(self, runtime): + if self.inputs.subjects_dir: + os.environ.update({'SUBJECTS_DIR': self.inputs.subjects_dir}) + + if not os.path.exists( + op.join(self.inputs.subjects_dir, self.inputs.subject_id)): + raise Exception + iflogger.info("ROI_HR_th.nii.gz / fsmask_1mm.nii.gz CREATION") + iflogger.info("=============================================") + create_annot_label(self.inputs.subject_id, self.inputs.subjects_dir, + self.inputs.freesurfer_dir, + self.inputs.parcellation_name) + create_roi(self.inputs.subject_id, self.inputs.subjects_dir, + self.inputs.freesurfer_dir, self.inputs.parcellation_name, + self.inputs.dilation) + create_wm_mask(self.inputs.subject_id, self.inputs.subjects_dir, + self.inputs.freesurfer_dir, + self.inputs.parcellation_name) + crop_and_move_datasets( + self.inputs.subject_id, self.inputs.subjects_dir, + self.inputs.freesurfer_dir, self.inputs.parcellation_name, + self.inputs.out_roi_file, self.inputs.dilation) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + if isdefined(self.inputs.out_roi_file): + outputs['roi_file'] = op.abspath(self.inputs.out_roi_file) + else: + outputs['roi_file'] = op.abspath( + self._gen_outfilename('nii.gz', 'ROI')) + if self.inputs.dilation is True: + outputs['roiv_file'] = op.abspath( + self._gen_outfilename('nii.gz', 'ROIv')) + outputs['white_matter_mask_file'] = op.abspath('fsmask_1mm.nii.gz') + outputs['cc_unknown_file'] = op.abspath('cc_unknown.nii.gz') + outputs['ribbon_file'] = op.abspath('ribbon.nii.gz') + outputs['aseg_file'] = op.abspath('aseg.nii.gz') + outputs['roi_file_in_structural_space'] = op.abspath( + 'ROI_HR_th.nii.gz') + if self.inputs.dilation is True: + outputs['dilated_roi_file_in_structural_space'] = op.abspath( + 'ROIv_HR_th.nii.gz') + return outputs + + def _gen_outfilename(self, ext, prefix='ROI'): + return prefix + '_' + self.inputs.parcellation_name + '.' + ext diff --git a/nipype/interfaces/cmtk/tests/__init__.py b/nipype/interfaces/cmtk/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/cmtk/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py new file mode 100644 index 0000000000..a80bbe757a --- /dev/null +++ b/nipype/interfaces/cmtk/tests/test_auto_AverageNetworks.py @@ -0,0 +1,29 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..nx import AverageNetworks + + +def test_AverageNetworks_inputs(): + input_map = dict( + group_id=dict(usedefault=True, ), + in_files=dict(mandatory=True, ), + out_gexf_groupavg=dict(), + out_gpickled_groupavg=dict(), + resolution_network_file=dict(), + ) + inputs = AverageNetworks.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AverageNetworks_outputs(): + output_map = dict( + gexf_groupavg=dict(), + gpickled_groupavg=dict(), + matlab_groupavgs=dict(), + ) + outputs = AverageNetworks.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cmtk/tests/test_auto_CFFBaseInterface.py b/nipype/interfaces/cmtk/tests/test_auto_CFFBaseInterface.py new file mode 100644 index 0000000000..942f477518 --- /dev/null +++ b/nipype/interfaces/cmtk/tests/test_auto_CFFBaseInterface.py @@ -0,0 +1,12 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import CFFBaseInterface + + +def test_CFFBaseInterface_inputs(): + input_map = dict() + inputs = CFFBaseInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py new file mode 100644 index 0000000000..00a1acea98 --- /dev/null +++ b/nipype/interfaces/cmtk/tests/test_auto_CFFConverter.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import CFFConverter + + +def test_CFFConverter_inputs(): + input_map = dict( + creator=dict(), + data_files=dict(), + description=dict(usedefault=True, ), + email=dict(), + gifti_labels=dict(), + gifti_surfaces=dict(), + gpickled_networks=dict(), + graphml_networks=dict(), + license=dict(), + nifti_volumes=dict(), + out_file=dict(usedefault=True, ), + publisher=dict(), + references=dict(), + relation=dict(), + rights=dict(), + script_files=dict(), + species=dict(usedefault=True, ), + timeseries_files=dict(), + title=dict(), + tract_files=dict(), + ) + inputs = CFFConverter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CFFConverter_outputs(): + output_map = dict(connectome_file=dict(), ) + outputs = CFFConverter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py new file mode 100644 index 0000000000..60e8596f5e --- /dev/null +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateMatrix.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..cmtk import CreateMatrix + + +def test_CreateMatrix_inputs(): + input_map = dict( + count_region_intersections=dict(usedefault=True, ), + out_endpoint_array_name=dict(genfile=True, ), + out_fiber_length_std_matrix_mat_file=dict(genfile=True, ), + out_intersection_matrix_mat_file=dict(genfile=True, ), + out_matrix_file=dict(genfile=True, ), + out_matrix_mat_file=dict(usedefault=True, ), + out_mean_fiber_length_matrix_mat_file=dict(genfile=True, ), + out_median_fiber_length_matrix_mat_file=dict(genfile=True, ), + resolution_network_file=dict(mandatory=True, ), + roi_file=dict(mandatory=True, ), + tract_file=dict(mandatory=True, ), + ) + inputs = CreateMatrix.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CreateMatrix_outputs(): + output_map = dict( + endpoint_file=dict(), + endpoint_file_mm=dict(), + fiber_label_file=dict(), + fiber_labels_noorphans=dict(), + fiber_length_file=dict(), + fiber_length_std_matrix_mat_file=dict(), + filtered_tractographies=dict(), + filtered_tractography=dict(), + filtered_tractography_by_intersections=dict(), + intersection_matrix_file=dict(), + intersection_matrix_mat_file=dict(), + matlab_matrix_files=dict(), + matrix_file=dict(), + matrix_files=dict(), + matrix_mat_file=dict(), + mean_fiber_length_matrix_mat_file=dict(), + median_fiber_length_matrix_mat_file=dict(), + stats_file=dict(), + ) + outputs = CreateMatrix.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py new file mode 100644 index 0000000000..3635f21e59 --- /dev/null +++ b/nipype/interfaces/cmtk/tests/test_auto_CreateNodes.py @@ -0,0 +1,23 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..cmtk import CreateNodes + + +def test_CreateNodes_inputs(): + input_map = dict( + out_filename=dict(usedefault=True, ), + resolution_network_file=dict(mandatory=True, ), + roi_file=dict(mandatory=True, ), + ) + inputs = CreateNodes.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CreateNodes_outputs(): + output_map = dict(node_network=dict(), ) + outputs = CreateNodes.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py new file mode 100644 index 0000000000..ceaa6d8dea --- /dev/null +++ b/nipype/interfaces/cmtk/tests/test_auto_MergeCNetworks.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import MergeCNetworks + + +def test_MergeCNetworks_inputs(): + input_map = dict( + in_files=dict(mandatory=True, ), + out_file=dict(usedefault=True, ), + ) + inputs = MergeCNetworks.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MergeCNetworks_outputs(): + output_map = dict(connectome_file=dict(), ) + outputs = MergeCNetworks.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py new file mode 100644 index 0000000000..e3220e4790 --- /dev/null +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkBasedStatistic.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..nbs import NetworkBasedStatistic + + +def test_NetworkBasedStatistic_inputs(): + input_map = dict( + edge_key=dict(usedefault=True, ), + in_group1=dict(mandatory=True, ), + in_group2=dict(mandatory=True, ), + node_position_network=dict(), + number_of_permutations=dict(usedefault=True, ), + out_nbs_network=dict(), + out_nbs_pval_network=dict(), + t_tail=dict(usedefault=True, ), + threshold=dict(usedefault=True, ), + ) + inputs = NetworkBasedStatistic.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_NetworkBasedStatistic_outputs(): + output_map = dict( + nbs_network=dict(), + nbs_pval_network=dict(), + network_files=dict(), + ) + outputs = NetworkBasedStatistic.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py new file mode 100644 index 0000000000..d9a3f0c740 --- /dev/null +++ b/nipype/interfaces/cmtk/tests/test_auto_NetworkXMetrics.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..nx import NetworkXMetrics + + +def test_NetworkXMetrics_inputs(): + input_map = dict( + compute_clique_related_measures=dict(usedefault=True, ), + in_file=dict(mandatory=True, ), + out_edge_metrics_matlab=dict(genfile=True, ), + out_global_metrics_matlab=dict(genfile=True, ), + out_k_core=dict(usedefault=True, ), + out_k_crust=dict(usedefault=True, ), + out_k_shell=dict(usedefault=True, ), + out_node_metrics_matlab=dict(genfile=True, ), + out_pickled_extra_measures=dict(usedefault=True, ), + treat_as_weighted_graph=dict(usedefault=True, ), + ) + inputs = NetworkXMetrics.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_NetworkXMetrics_outputs(): + output_map = dict( + edge_measure_networks=dict(), + edge_measures_matlab=dict(), + global_measures_matlab=dict(), + gpickled_network_files=dict(), + k_core=dict(), + k_crust=dict(), + k_networks=dict(), + k_shell=dict(), + matlab_dict_measures=dict(), + matlab_matrix_files=dict(), + node_measure_networks=dict(), + node_measures_matlab=dict(), + pickled_extra_measures=dict(), + ) + outputs = NetworkXMetrics.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py new file mode 100644 index 0000000000..edcdf2e7a1 --- /dev/null +++ b/nipype/interfaces/cmtk/tests/test_auto_Parcellate.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..parcellation import Parcellate + + +def test_Parcellate_inputs(): + input_map = dict( + dilation=dict(usedefault=True, ), + freesurfer_dir=dict(), + out_roi_file=dict(genfile=True, ), + parcellation_name=dict(usedefault=True, ), + subject_id=dict(mandatory=True, ), + subjects_dir=dict(), + ) + inputs = Parcellate.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Parcellate_outputs(): + output_map = dict( + aseg_file=dict(), + cc_unknown_file=dict(), + dilated_roi_file_in_structural_space=dict(), + ribbon_file=dict(), + roi_file=dict(), + roi_file_in_structural_space=dict(), + roiv_file=dict(), + white_matter_mask_file=dict(), + ) + outputs = Parcellate.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py new file mode 100644 index 0000000000..dd2ce50aec --- /dev/null +++ b/nipype/interfaces/cmtk/tests/test_auto_ROIGen.py @@ -0,0 +1,29 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..cmtk import ROIGen + + +def test_ROIGen_inputs(): + input_map = dict( + LUT_file=dict(xor=['use_freesurfer_LUT'], ), + aparc_aseg_file=dict(mandatory=True, ), + freesurfer_dir=dict(requires=['use_freesurfer_LUT'], ), + out_dict_file=dict(genfile=True, ), + out_roi_file=dict(genfile=True, ), + use_freesurfer_LUT=dict(xor=['LUT_file'], ), + ) + inputs = ROIGen.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ROIGen_outputs(): + output_map = dict( + dict_file=dict(), + roi_file=dict(), + ) + outputs = ROIGen.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/cmtk/tests/test_nbs.py b/nipype/interfaces/cmtk/tests/test_nbs.py new file mode 100644 index 0000000000..be3008fb09 --- /dev/null +++ b/nipype/interfaces/cmtk/tests/test_nbs.py @@ -0,0 +1,60 @@ +from __future__ import unicode_literals +from ..nbs import NetworkBasedStatistic +from ....utils.misc import package_check +import numpy as np +import networkx as nx +import pytest + +have_cv = True +try: + package_check('cviewer') +except Exception as e: + have_cv = False + + +@pytest.fixture() +def creating_graphs(tmpdir): + graphlist = [] + graphnames = ["name" + str(i) for i in range(6)] + for idx, name in enumerate(graphnames): + graph = np.random.rand(10, 10) + G = nx.from_numpy_matrix(graph) + out_file = tmpdir.strpath + graphnames[idx] + '.pck' + # Save as pck file + nx.write_gpickle(G, out_file) + graphlist.append(out_file) + return graphlist + + +@pytest.mark.skipif( + have_cv, reason="tests for import error, cviewer available") +def test_importerror(creating_graphs, tmpdir): + tmpdir.chdir() + graphlist = creating_graphs + group1 = graphlist[:3] + group2 = graphlist[3:] + + nbs = NetworkBasedStatistic() + nbs.inputs.in_group1 = group1 + nbs.inputs.in_group2 = group2 + nbs.inputs.edge_key = "weight" + + with pytest.raises(ImportError) as e: + nbs.run() + + +@pytest.mark.skipif(not have_cv, reason="cviewer has to be available") +def test_keyerror(creating_graphs): + graphlist = creating_graphs + + group1 = graphlist[:3] + group2 = graphlist[3:] + + nbs = NetworkBasedStatistic() + nbs.inputs.in_group1 = group1 + nbs.inputs.in_group2 = group2 + nbs.inputs.edge_key = "Your_edge" + + with pytest.raises(KeyError) as e: + nbs.run() + assert "the graph edges do not have Your_edge attribute" in str(e.value) diff --git a/nipype/interfaces/dcm2nii.py b/nipype/interfaces/dcm2nii.py new file mode 100644 index 0000000000..db0c0ec4cf --- /dev/null +++ b/nipype/interfaces/dcm2nii.py @@ -0,0 +1,460 @@ +# -*- coding: utf-8 -*- +"""The dcm2nii module provides basic functions for dicom conversion +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, open +import os +import re +from copy import deepcopy + +from ..utils.filemanip import split_filename +from .base import (CommandLine, CommandLineInputSpec, InputMultiPath, traits, + TraitedSpec, OutputMultiPath, isdefined, File, Directory, + PackageInfo) + + +class Info(PackageInfo): + """Handle dcm2niix version information""" + + version_cmd = 'dcm2niix' + + @staticmethod + def parse_version(raw_info): + m = re.search(r'version (\S+)', raw_info) + return m.groups()[0] if m else None + + +class Dcm2niiInputSpec(CommandLineInputSpec): + source_names = InputMultiPath( + File(exists=True), + argstr="%s", + position=-1, + copyfile=False, + mandatory=True, + xor=['source_dir']) + source_dir = Directory( + exists=True, + argstr="%s", + position=-1, + mandatory=True, + xor=['source_names']) + anonymize = traits.Bool( + True, + argstr='-a', + usedefault=True, + desc="Remove identifying information") + config_file = File( + exists=True, + argstr="-b %s", + genfile=True, + desc="Load settings from specified inifile") + collapse_folders = traits.Bool( + True, argstr='-c', usedefault=True, desc="Collapse input folders") + date_in_filename = traits.Bool( + True, argstr='-d', usedefault=True, desc="Date in filename") + events_in_filename = traits.Bool( + True, + argstr='-e', + usedefault=True, + desc="Events (series/acq) in filename") + source_in_filename = traits.Bool( + False, argstr='-f', usedefault=True, desc="Source filename") + gzip_output = traits.Bool( + False, argstr='-g', usedefault=True, desc="Gzip output (.gz)") + id_in_filename = traits.Bool( + False, argstr='-i', usedefault=True, desc="ID in filename") + nii_output = traits.Bool( + True, + argstr='-n', + usedefault=True, + desc="Save as .nii - if no, create .hdr/.img pair") + output_dir = Directory( + exists=True, + argstr='-o %s', + genfile=True, + desc="Output dir - if unspecified, source directory is used") + protocol_in_filename = traits.Bool( + True, argstr='-p', usedefault=True, desc="Protocol in filename") + reorient = traits.Bool( + argstr='-r', desc="Reorient image to nearest orthogonal") + spm_analyze = traits.Bool( + argstr='-s', xor=['nii_output'], desc="SPM2/Analyze not SPM5/NIfTI") + convert_all_pars = traits.Bool( + True, + argstr='-v', + usedefault=True, + desc="Convert every image in directory") + reorient_and_crop = traits.Bool( + False, + argstr='-x', + usedefault=True, + desc="Reorient and crop 3D images") + + +class Dcm2niiOutputSpec(TraitedSpec): + converted_files = OutputMultiPath(File(exists=True)) + reoriented_files = OutputMultiPath(File(exists=True)) + reoriented_and_cropped_files = OutputMultiPath(File(exists=True)) + bvecs = OutputMultiPath(File(exists=True)) + bvals = OutputMultiPath(File(exists=True)) + + +class Dcm2nii(CommandLine): + """Uses MRIcron's dcm2nii to convert dicom files + + Examples + ======== + + >>> from nipype.interfaces.dcm2nii import Dcm2nii + >>> converter = Dcm2nii() + >>> converter.inputs.source_names = ['functional_1.dcm', 'functional_2.dcm'] + >>> converter.inputs.gzip_output = True + >>> converter.inputs.output_dir = '.' + >>> converter.cmdline # doctest: +ELLIPSIS + 'dcm2nii -a y -c y -b config.ini -v y -d y -e y -g y -i n -n y -o . -p y -x n -f n functional_1.dcm' +""" + + input_spec = Dcm2niiInputSpec + output_spec = Dcm2niiOutputSpec + _cmd = 'dcm2nii' + + def _format_arg(self, opt, spec, val): + if opt in [ + 'anonymize', 'collapse_folders', 'date_in_filename', + 'events_in_filename', 'source_in_filename', 'gzip_output', + 'id_in_filename', 'nii_output', 'protocol_in_filename', + 'reorient', 'spm_analyze', 'convert_all_pars', + 'reorient_and_crop' + ]: + spec = deepcopy(spec) + if val: + spec.argstr += ' y' + else: + spec.argstr += ' n' + val = True + if opt == 'source_names': + return spec.argstr % val[0] + return super(Dcm2nii, self)._format_arg(opt, spec, val) + + def _run_interface(self, runtime): + self._config_created = False + new_runtime = super(Dcm2nii, self)._run_interface(runtime) + (self.output_files, self.reoriented_files, + self.reoriented_and_cropped_files, self.bvecs, + self.bvals) = self._parse_stdout(new_runtime.stdout) + if self._config_created: + os.remove('config.ini') + return new_runtime + + def _parse_stdout(self, stdout): + files = [] + reoriented_files = [] + reoriented_and_cropped_files = [] + bvecs = [] + bvals = [] + skip = False + last_added_file = None + for line in stdout.split("\n"): + if not skip: + out_file = None + if line.startswith("Saving "): + out_file = line[len("Saving "):] + elif line.startswith("GZip..."): + # for gzipped output files are not absolute + fname = line[len("GZip..."):] + if len(files) and os.path.basename( + files[-1]) == fname[:-3]: + # we are seeing a previously reported conversion + # as being saved in gzipped form -- remove the + # obsolete, uncompressed file + files.pop() + if isdefined(self.inputs.output_dir): + output_dir = self.inputs.output_dir + else: + output_dir = self._gen_filename('output_dir') + out_file = os.path.abspath(os.path.join(output_dir, fname)) + elif line.startswith("Number of diffusion directions "): + if last_added_file: + base, filename, ext = split_filename(last_added_file) + bvecs.append(os.path.join(base, filename + ".bvec")) + bvals.append(os.path.join(base, filename + ".bval")) + elif line.startswith("Removed DWI from DTI scan"): + # such line can only follow the 'diffusion' case handled + # just above + for l in (bvecs, bvals): + l[-1] = os.path.join( + os.path.dirname(l[-1]), + 'x%s' % (os.path.basename(l[-1]), )) + elif re.search('.*->(.*)', line): + val = re.search('.*->(.*)', line) + val = val.groups()[0] + if isdefined(self.inputs.output_dir): + output_dir = self.inputs.output_dir + else: + output_dir = self._gen_filename('output_dir') + val = os.path.join(output_dir, val) + if os.path.exists(val): + out_file = val + + if out_file: + if out_file not in files: + files.append(out_file) + last_added_file = out_file + continue + + if line.startswith("Reorienting as "): + reoriented_files.append(line[len("Reorienting as "):]) + skip = True + continue + elif line.startswith("Cropping NIfTI/Analyze image "): + base, filename = os.path.split( + line[len("Cropping NIfTI/Analyze image "):]) + filename = "c" + filename + if os.path.exists(os.path.join( + base, filename)) or self.inputs.reorient_and_crop: + # if reorient&crop is true but the file doesn't exist, this errors when setting outputs + reoriented_and_cropped_files.append( + os.path.join(base, filename)) + skip = True + continue + + skip = False + return files, reoriented_files, reoriented_and_cropped_files, bvecs, bvals + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['converted_files'] = self.output_files + outputs['reoriented_files'] = self.reoriented_files + outputs[ + 'reoriented_and_cropped_files'] = self.reoriented_and_cropped_files + outputs['bvecs'] = self.bvecs + outputs['bvals'] = self.bvals + return outputs + + def _gen_filename(self, name): + if name == 'output_dir': + return os.getcwd() + elif name == 'config_file': + self._config_created = True + config_file = "config.ini" + with open(config_file, "w") as f: + # disable interactive mode + f.write("[BOOL]\nManualNIfTIConv=0\n") + return config_file + return None + + +class Dcm2niixInputSpec(CommandLineInputSpec): + source_names = InputMultiPath( + File(exists=True), + argstr="%s", + position=-1, + copyfile=False, + mandatory=True, + desc=('A set of filenames to be converted. Note that the current ' + 'version (1.0.20180328) of dcm2niix converts any files in the ' + 'directory. To only convert specific files they should be in an ' + 'isolated directory'), + xor=['source_dir']) + source_dir = Directory( + exists=True, + argstr="%s", + position=-1, + mandatory=True, + desc='A directory containing dicom files to be converted', + xor=['source_names']) + out_filename = traits.Str( + argstr="-f %s", + desc="Output filename template (" + "%a=antenna (coil) number, " + "%c=comments, " + "%d=description, " + "%e=echo number, " + "%f=folder name, " + "%i=ID of patient, " + "%j=seriesInstanceUID, " + "%k=studyInstanceUID, " + "%m=manufacturer, " + "%n=name of patient, " + "%p=protocol, " + "%s=series number, " + "%t=time, " + "%u=acquisition number, " + "%v=vendor, " + "%x=study ID; " + "%z=sequence name)") + output_dir = Directory( + ".", + usedefault=True, + exists=True, + argstr='-o %s', + desc="Output directory") + bids_format = traits.Bool( + True, + argstr='-b', + usedefault=True, + desc="Create a BIDS sidecar file") + anon_bids = traits.Bool( + argstr='-ba', + requires=["bids_format"], + desc="Anonymize BIDS") + compress = traits.Enum( + 'y', 'i', 'n', '3', + argstr='-z %s', + usedefault=True, + desc="Gzip compress images - [y=pigz, i=internal, n=no, 3=no,3D]") + merge_imgs = traits.Bool( + False, + argstr='-m', + usedefault=True, + desc="merge 2D slices from same series") + single_file = traits.Bool( + False, + argstr='-s', + usedefault=True, + desc="Single file mode") + verbose = traits.Bool( + False, + argstr='-v', + usedefault=True, + desc="Verbose output") + crop = traits.Bool( + False, + argstr='-x', + usedefault=True, + desc="Crop 3D T1 acquisitions") + has_private = traits.Bool( + False, + argstr='-t', + usedefault=True, + desc="Flag if text notes include private patient details") + compression = traits.Enum( + 1, 2, 3, 4, 5, 6, 7, 8, 9, + argstr='-%d', + desc="Gz compression level (1=fastest, 9=smallest)") + comment = traits.Str( + argstr='-c %s', + desc="Comment stored as NIfTI aux_file") + ignore_deriv = traits.Bool( + argstr='-i', + desc="Ignore derived, localizer and 2D images") + series_numbers = InputMultiPath( + traits.Str(), + argstr='-n %s...', + desc="Selectively convert by series number - can be used up to 16 times") + philips_float = traits.Bool( + argstr='-p', + desc="Philips precise float (not display) scaling") + + +class Dcm2niixOutputSpec(TraitedSpec): + converted_files = OutputMultiPath(File(exists=True)) + bvecs = OutputMultiPath(File(exists=True)) + bvals = OutputMultiPath(File(exists=True)) + bids = OutputMultiPath(File(exists=True)) + + +class Dcm2niix(CommandLine): + """Uses Chris Rorden's dcm2niix to convert dicom files + + Examples + ======== + + >>> from nipype.interfaces.dcm2nii import Dcm2niix + >>> converter = Dcm2niix() + >>> converter.inputs.source_dir = 'dicomdir' + >>> converter.inputs.compression = 5 + >>> converter.inputs.output_dir = 'ds005' + >>> converter.cmdline + 'dcm2niix -b y -z y -5 -x n -t n -m n -o ds005 -s n -v n dicomdir' + >>> converter.run() # doctest: +SKIP + + # In the example below, we note that the current version of dcm2niix + # converts any files in the directory containing the files in the list. We + # also do not support nested filenames with this option. Thus all files + # should have a common root directory. + >>> converter = Dcm2niix() + >>> converter.inputs.source_names = ['functional_1.dcm', 'functional_2.dcm'] + >>> converter.inputs.compression = 5 + >>> converter.inputs.output_dir = 'ds005' + >>> converter.cmdline + 'dcm2niix -b y -z y -5 -x n -t n -m n -o ds005 -s n -v n .' + >>> converter.run() # doctest: +SKIP + """ + + input_spec = Dcm2niixInputSpec + output_spec = Dcm2niixOutputSpec + _cmd = 'dcm2niix' + + @property + def version(self): + return Info.version() + + def _format_arg(self, opt, spec, val): + bools = ['bids_format', 'merge_imgs', 'single_file', 'verbose', 'crop', + 'has_private', 'anon_bids', 'ignore_deriv', 'philips_float'] + if opt in bools: + spec = deepcopy(spec) + if val: + spec.argstr += ' y' + else: + spec.argstr += ' n' + val = True + if opt == 'source_names': + return spec.argstr % (os.path.dirname(val[0]) or '.') + return super(Dcm2niix, self)._format_arg(opt, spec, val) + + def _run_interface(self, runtime): + # may use return code 1 despite conversion + runtime = super(Dcm2niix, self)._run_interface( + runtime, correct_return_codes=(0, 1, )) + if self.inputs.bids_format: + (self.output_files, self.bvecs, self.bvals, + self.bids) = self._parse_stdout(runtime.stdout) + else: + (self.output_files, self.bvecs, self.bvals) = self._parse_stdout( + runtime.stdout) + return runtime + + def _parse_stdout(self, stdout): + files = [] + bvecs = [] + bvals = [] + bids = [] + skip = False + find_b = False + for line in stdout.split("\n"): + if not skip: + out_file = None + if line.startswith("Convert "): # output + fname = str(re.search('\S+/\S+', line).group(0)) + out_file = os.path.abspath(fname) + # extract bvals + if find_b: + bvecs.append(out_file + ".bvec") + bvals.append(out_file + ".bval") + find_b = False + # next scan will have bvals/bvecs + elif 'DTI gradients' in line or 'DTI gradient directions' in line or 'DTI vectors' in line: + find_b = True + if out_file: + ext = '.nii' if self.inputs.compress == 'n' else '.nii.gz' + files.append(out_file + ext) + if self.inputs.bids_format: + bids.append(out_file + ".json") + skip = False + # just return what was done + if not bids: + return files, bvecs, bvals + else: + return files, bvecs, bvals, bids + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['converted_files'] = self.output_files + outputs['bvecs'] = self.bvecs + outputs['bvals'] = self.bvals + if self.inputs.bids_format: + outputs['bids'] = self.bids + return outputs diff --git a/nipype/interfaces/dcmstack.py b/nipype/interfaces/dcmstack.py new file mode 100644 index 0000000000..626cabe6cf --- /dev/null +++ b/nipype/interfaces/dcmstack.py @@ -0,0 +1,418 @@ +# -*- coding: utf-8 -*- +"""Provides interfaces to various commands provided by dcmstack +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +from os import path as op +import string +import errno +from glob import glob + +import nibabel as nb +import imghdr + +from .base import (TraitedSpec, DynamicTraitedSpec, InputMultiPath, File, + Directory, traits, BaseInterface, isdefined, Undefined) +from ..utils import NUMPY_MMAP + +have_dcmstack = True +try: + import dicom + import dcmstack + from dcmstack.dcmmeta import NiftiWrapper +except ImportError: + have_dcmstack = False + + +def sanitize_path_comp(path_comp): + result = [] + for char in path_comp: + if char not in string.letters + string.digits + '-_.': + result.append('_') + else: + result.append(char) + return ''.join(result) + + +class NiftiGeneratorBaseInputSpec(TraitedSpec): + out_format = traits.Str(desc="String which can be formatted with " + "meta data to create the output filename(s)") + out_ext = traits.Str( + '.nii.gz', usedefault=True, desc="Determines output file type") + out_path = Directory( + desc='output path, current working directory if not set') + + +class NiftiGeneratorBase(BaseInterface): + '''Base class for interfaces that produce Nifti files, potentially with + embedded meta data.''' + + def _get_out_path(self, meta, idx=None): + '''Return the output path for the gernerated Nifti.''' + if self.inputs.out_format: + out_fmt = self.inputs.out_format + else: + # If no out_format is specified, use a sane default that will work + # with the provided meta data. + out_fmt = [] + if idx is not None: + out_fmt.append('%03d' % idx) + if 'SeriesNumber' in meta: + out_fmt.append('%(SeriesNumber)03d') + if 'ProtocolName' in meta: + out_fmt.append('%(ProtocolName)s') + elif 'SeriesDescription' in meta: + out_fmt.append('%(SeriesDescription)s') + else: + out_fmt.append('sequence') + out_fmt = '-'.join(out_fmt) + out_fn = (out_fmt % meta) + self.inputs.out_ext + out_fn = sanitize_path_comp(out_fn) + + out_path = os.getcwd() + if isdefined(self.inputs.out_path): + out_path = op.abspath(self.inputs.out_path) + + # now, mkdir -p $out_path + try: + os.makedirs(out_path) + except OSError as exc: # Python >2.5 + if exc.errno == errno.EEXIST and op.isdir(out_path): + pass + else: + raise + + return op.join(out_path, out_fn) + + +class DcmStackInputSpec(NiftiGeneratorBaseInputSpec): + dicom_files = traits.Either( + InputMultiPath(File(exists=True)), + Directory(exists=True), + traits.Str(), + mandatory=True) + embed_meta = traits.Bool(desc="Embed DICOM meta data into result") + exclude_regexes = traits.List(desc="Meta data to exclude, suplementing " + "any default exclude filters") + include_regexes = traits.List(desc="Meta data to include, overriding any " + "exclude filters") + force_read = traits.Bool( + True, + usedefault=True, + desc=('Force reading files without DICM marker')) + + +class DcmStackOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class DcmStack(NiftiGeneratorBase): + '''Create one Nifti file from a set of DICOM files. Can optionally embed + meta data. + + Example + ------- + + >>> from nipype.interfaces.dcmstack import DcmStack + >>> stacker = DcmStack() + >>> stacker.inputs.dicom_files = 'path/to/series/' + >>> stacker.run() # doctest: +SKIP + >>> result.outputs.out_file # doctest: +SKIP + '/path/to/cwd/sequence.nii.gz' + ''' + input_spec = DcmStackInputSpec + output_spec = DcmStackOutputSpec + + def _get_filelist(self, trait_input): + if isinstance(trait_input, (str, bytes)): + if op.isdir(trait_input): + return glob(op.join(trait_input, '*.dcm')) + else: + return glob(trait_input) + + return trait_input + + def _run_interface(self, runtime): + src_paths = self._get_filelist(self.inputs.dicom_files) + include_regexes = dcmstack.default_key_incl_res + if isdefined(self.inputs.include_regexes): + include_regexes += self.inputs.include_regexes + exclude_regexes = dcmstack.default_key_excl_res + if isdefined(self.inputs.exclude_regexes): + exclude_regexes += self.inputs.exclude_regexes + meta_filter = dcmstack.make_key_regex_filter(exclude_regexes, + include_regexes) + stack = dcmstack.DicomStack(meta_filter=meta_filter) + for src_path in src_paths: + if not imghdr.what(src_path) == "gif": + src_dcm = dicom.read_file( + src_path, force=self.inputs.force_read) + stack.add_dcm(src_dcm) + nii = stack.to_nifti(embed_meta=True) + nw = NiftiWrapper(nii) + self.out_path = \ + self._get_out_path(nw.meta_ext.get_class_dict(('global', 'const'))) + if not self.inputs.embed_meta: + nw.remove_extension() + nb.save(nii, self.out_path) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = self.out_path + return outputs + + +class GroupAndStackOutputSpec(TraitedSpec): + out_list = traits.List(desc="List of output nifti files") + + +class GroupAndStack(DcmStack): + '''Create (potentially) multiple Nifti files for a set of DICOM files. + ''' + input_spec = DcmStackInputSpec + output_spec = GroupAndStackOutputSpec + + def _run_interface(self, runtime): + src_paths = self._get_filelist(self.inputs.dicom_files) + stacks = dcmstack.parse_and_stack(src_paths) + + self.out_list = [] + for key, stack in list(stacks.items()): + nw = NiftiWrapper(stack.to_nifti(embed_meta=True)) + const_meta = nw.meta_ext.get_class_dict(('global', 'const')) + out_path = self._get_out_path(const_meta) + if not self.inputs.embed_meta: + nw.remove_extension() + nb.save(nw.nii_img, out_path) + self.out_list.append(out_path) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_list"] = self.out_list + return outputs + + +class LookupMetaInputSpec(TraitedSpec): + in_file = File(mandatory=True, exists=True, desc='The input Nifti file') + meta_keys = traits.Either( + traits.List(), + traits.Dict(), + mandatory=True, + desc=("List of meta data keys to lookup, or a " + "dict where keys specify the meta data " + "keys to lookup and the values specify " + "the output names")) + + +class LookupMeta(BaseInterface): + '''Lookup meta data values from a Nifti with embedded meta data. + + Example + ------- + + >>> from nipype.interfaces import dcmstack + >>> lookup = dcmstack.LookupMeta() + >>> lookup.inputs.in_file = 'functional.nii' + >>> lookup.inputs.meta_keys = {'RepetitionTime' : 'TR', \ + 'EchoTime' : 'TE'} + >>> result = lookup.run() # doctest: +SKIP + >>> result.outputs.TR # doctest: +SKIP + 9500.0 + >>> result.outputs.TE # doctest: +SKIP + 95.0 + ''' + input_spec = LookupMetaInputSpec + output_spec = DynamicTraitedSpec + + def _make_name_map(self): + if isinstance(self.inputs.meta_keys, list): + self._meta_keys = {} + for key in self.inputs.meta_keys: + self._meta_keys[key] = key + else: + self._meta_keys = self.inputs.meta_keys + + def _outputs(self): + self._make_name_map() + outputs = super(LookupMeta, self)._outputs() + undefined_traits = {} + for out_name in list(self._meta_keys.values()): + outputs.add_trait(out_name, traits.Any) + undefined_traits[out_name] = Undefined + outputs.trait_set(trait_change_notify=False, **undefined_traits) + # Not sure why this is needed + for out_name in list(self._meta_keys.values()): + _ = getattr(outputs, out_name) + return outputs + + def _run_interface(self, runtime): + # If the 'meta_keys' input is a list, covert it to a dict + self._make_name_map() + nw = NiftiWrapper.from_filename(self.inputs.in_file) + self.result = {} + for meta_key, out_name in list(self._meta_keys.items()): + self.result[out_name] = nw.meta_ext.get_values(meta_key) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs.update(self.result) + return outputs + + +class CopyMetaInputSpec(TraitedSpec): + src_file = File(mandatory=True, exists=True) + dest_file = File(mandatory=True, exists=True) + include_classes = traits.List(desc="List of specific meta data " + "classifications to include. If not " + "specified include everything.") + exclude_classes = traits.List(desc="List of meta data " + "classifications to exclude") + + +class CopyMetaOutputSpec(TraitedSpec): + dest_file = File(exists=True) + + +class CopyMeta(BaseInterface): + '''Copy meta data from one Nifti file to another. Useful for preserving + meta data after some processing steps.''' + input_spec = CopyMetaInputSpec + output_spec = CopyMetaOutputSpec + + def _run_interface(self, runtime): + src_nii = nb.load(self.inputs.src_file) + src = NiftiWrapper(src_nii, make_empty=True) + dest_nii = nb.load(self.inputs.dest_file) + dest = NiftiWrapper(dest_nii, make_empty=True) + classes = src.meta_ext.get_valid_classes() + if self.inputs.include_classes: + classes = [ + cls for cls in classes if cls in self.inputs.include_classes + ] + if self.inputs.exclude_classes: + classes = [ + cls for cls in classes + if cls not in self.inputs.exclude_classes + ] + + for cls in classes: + src_dict = src.meta_ext.get_class_dict(cls) + dest_dict = dest.meta_ext.get_class_dict(cls) + dest_dict.update(src_dict) + # Update the shape and slice dimension to reflect the meta extension + # update. + dest.meta_ext.slice_dim = src.meta_ext.slice_dim + dest.meta_ext.shape = src.meta_ext.shape + + self.out_path = op.join(os.getcwd(), op.basename( + self.inputs.dest_file)) + dest.to_filename(self.out_path) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['dest_file'] = self.out_path + return outputs + + +class MergeNiftiInputSpec(NiftiGeneratorBaseInputSpec): + in_files = traits.List(mandatory=True, desc="List of Nifti files to merge") + sort_order = traits.Either( + traits.Str(), + traits.List(), + desc="One or more meta data keys to " + "sort files by.") + merge_dim = traits.Int(desc="Dimension to merge along. If not " + "specified, the last singular or " + "non-existant dimension is used.") + + +class MergeNiftiOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="Merged Nifti file") + + +def make_key_func(meta_keys, index=None): + def key_func(src_nii): + result = [src_nii.get_meta(key, index) for key in meta_keys] + return result + + return key_func + + +class MergeNifti(NiftiGeneratorBase): + '''Merge multiple Nifti files into one. Merges together meta data + extensions as well.''' + input_spec = MergeNiftiInputSpec + output_spec = MergeNiftiOutputSpec + + def _run_interface(self, runtime): + niis = [nb.load(fn, mmap=NUMPY_MMAP) for fn in self.inputs.in_files] + nws = [NiftiWrapper(nii, make_empty=True) for nii in niis] + if self.inputs.sort_order: + sort_order = self.inputs.sort_order + if isinstance(sort_order, (str, bytes)): + sort_order = [sort_order] + nws.sort(key=make_key_func(sort_order)) + if self.inputs.merge_dim == traits.Undefined: + merge_dim = None + else: + merge_dim = self.inputs.merge_dim + merged = NiftiWrapper.from_sequence(nws, merge_dim) + const_meta = merged.meta_ext.get_class_dict(('global', 'const')) + self.out_path = self._get_out_path(const_meta) + nb.save(merged.nii_img, self.out_path) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = self.out_path + return outputs + + +class SplitNiftiInputSpec(NiftiGeneratorBaseInputSpec): + in_file = File(exists=True, mandatory=True, desc="Nifti file to split") + split_dim = traits.Int(desc="Dimension to split along. If not " + "specified, the last dimension is used.") + + +class SplitNiftiOutputSpec(TraitedSpec): + out_list = traits.List(File(exists=True), desc="Split Nifti files") + + +class SplitNifti(NiftiGeneratorBase): + ''' + Split one Nifti file into many along the specified dimension. Each + result has an updated meta data extension as well. + ''' + input_spec = SplitNiftiInputSpec + output_spec = SplitNiftiOutputSpec + + def _run_interface(self, runtime): + self.out_list = [] + nii = nb.load(self.inputs.in_file) + nw = NiftiWrapper(nii, make_empty=True) + split_dim = None + if self.inputs.split_dim == traits.Undefined: + split_dim = None + else: + split_dim = self.inputs.split_dim + for split_idx, split_nw in enumerate(nw.split(split_dim)): + const_meta = split_nw.meta_ext.get_class_dict(('global', 'const')) + out_path = self._get_out_path(const_meta, idx=split_idx) + nb.save(split_nw.nii_img, out_path) + self.out_list.append(out_path) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_list'] = self.out_list + return outputs diff --git a/nipype/interfaces/diffusion_toolkit/__init__.py b/nipype/interfaces/diffusion_toolkit/__init__.py new file mode 100644 index 0000000000..cef13227c4 --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from .base import Info +from .postproc import SplineFilter, TrackMerge +from .dti import DTIRecon, DTITracker +from .odf import HARDIMat, ODFRecon, ODFTracker diff --git a/nipype/interfaces/diffusion_toolkit/base.py b/nipype/interfaces/diffusion_toolkit/base.py new file mode 100644 index 0000000000..c8e3a17c61 --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/base.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The dtk module provides basic functions for interfacing with +Diffusion Toolkit tools. + +Currently these tools are supported: + + * TODO + +Examples +-------- +See the docstrings for the individual classes for 'working' examples. + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import object +import re +from ..base import CommandLine + +__docformat__ = 'restructuredtext' + + +class Info(object): + """ Handle dtk output type and version information. + + Examples + -------- + + >>> from nipype.interfaces.diffusion_toolkit import Info + >>> Info.version() # doctest: +SKIP + >>> Info.subjectsdir() # doctest: +SKIP + + """ + + @staticmethod + def version(): + """Check for dtk version on system + + Parameters + ---------- + None + + Returns + ------- + version : str + Version number as string or None if FSL not found + + """ + clout = CommandLine( + command='dti_recon', terminal_output='allatonce').run() + + if clout.runtime.returncode is not 0: + return None + + dtirecon = clout.runtime.stdout + result = re.search('dti_recon (.*)\n', dtirecon) + version = result.group(0).split()[1] + return version diff --git a/nipype/interfaces/diffusion_toolkit/dti.py b/nipype/interfaces/diffusion_toolkit/dti.py new file mode 100644 index 0000000000..570ae55df5 --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/dti.py @@ -0,0 +1,276 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Provides interfaces to various commands provided by diffusion toolkit +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, open + +import os +import re + +from ...utils.filemanip import fname_presuffix, split_filename, copyfile +from ..base import (TraitedSpec, File, traits, CommandLine, + CommandLineInputSpec, isdefined) + +__docformat__ = 'restructuredtext' + + +class DTIReconInputSpec(CommandLineInputSpec): + DWI = File( + desc='Input diffusion volume', + argstr='%s', + exists=True, + mandatory=True, + position=1) + out_prefix = traits.Str( + "dti", + desc='Output file prefix', + argstr='%s', + usedefault=True, + position=2) + output_type = traits.Enum( + 'nii', + 'analyze', + 'ni1', + 'nii.gz', + argstr='-ot %s', + desc='output file type', + usedefault=True) + bvecs = File( + exists=True, desc='b vectors file', argstr='-gm %s', mandatory=True) + bvals = File(exists=True, desc='b values file', mandatory=True) + n_averages = traits.Int(desc='Number of averages', argstr='-nex %s') + image_orientation_vectors = traits.List( + traits.Float(), + minlen=6, + maxlen=6, + desc="""specify image orientation vectors. if just one argument given, + will treat it as filename and read the orientation vectors from + the file. if 6 arguments are given, will treat them as 6 float + numbers and construct the 1st and 2nd vector and calculate the 3rd + one automatically. + this information will be used to determine image orientation, + as well as to adjust gradient vectors with oblique angle when""", + argstr="-iop %f") + oblique_correction = traits.Bool( + desc="""when oblique angle(s) applied, some SIEMENS dti protocols do not + adjust gradient accordingly, thus it requires adjustment for correct + diffusion tensor calculation""", + argstr="-oc") + b0_threshold = traits.Float( + desc= + """program will use b0 image with the given threshold to mask out high + background of fa/adc maps. by default it will calculate threshold + automatically. but if it failed, you need to set it manually.""", + argstr="-b0_th") + + +class DTIReconOutputSpec(TraitedSpec): + ADC = File(exists=True) + B0 = File(exists=True) + L1 = File(exists=True) + L2 = File(exists=True) + L3 = File(exists=True) + exp = File(exists=True) + FA = File(exists=True) + FA_color = File(exists=True) + tensor = File(exists=True) + V1 = File(exists=True) + V2 = File(exists=True) + V3 = File(exists=True) + + +class DTIRecon(CommandLine): + """Use dti_recon to generate tensors and other maps + """ + + input_spec = DTIReconInputSpec + output_spec = DTIReconOutputSpec + + _cmd = 'dti_recon' + + def _create_gradient_matrix(self, bvecs_file, bvals_file): + _gradient_matrix_file = 'gradient_matrix.txt' + with open(bvals_file) as fbvals: + bvals = [val for val in re.split('\s+', fbvals.readline().strip())] + with open(bvecs_file) as fbvecs: + bvecs_x = fbvecs.readline().split() + bvecs_y = fbvecs.readline().split() + bvecs_z = fbvecs.readline().split() + + with open(_gradient_matrix_file, 'w') as gradient_matrix_f: + for i in range(len(bvals)): + gradient_matrix_f.write("%s, %s, %s, %s\n" % + (bvecs_x[i], bvecs_y[i], bvecs_z[i], + bvals[i])) + return _gradient_matrix_file + + def _format_arg(self, name, spec, value): + if name == "bvecs": + new_val = self._create_gradient_matrix(self.inputs.bvecs, + self.inputs.bvals) + return super(DTIRecon, self)._format_arg("bvecs", spec, new_val) + return super(DTIRecon, self)._format_arg(name, spec, value) + + def _list_outputs(self): + out_prefix = self.inputs.out_prefix + output_type = self.inputs.output_type + + outputs = self.output_spec().get() + outputs['ADC'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_adc.' + output_type)) + outputs['B0'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_b0.' + output_type)) + outputs['L1'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_e1.' + output_type)) + outputs['L2'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_e2.' + output_type)) + outputs['L3'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_e3.' + output_type)) + outputs['exp'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_exp.' + output_type)) + outputs['FA'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_fa.' + output_type)) + outputs['FA_color'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_fa_color.' + output_type)) + outputs['tensor'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_tensor.' + output_type)) + outputs['V1'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_v1.' + output_type)) + outputs['V2'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_v2.' + output_type)) + outputs['V3'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_v3.' + output_type)) + + return outputs + + +class DTITrackerInputSpec(CommandLineInputSpec): + tensor_file = File(exists=True, desc="reconstructed tensor file") + input_type = traits.Enum( + 'nii', + 'analyze', + 'ni1', + 'nii.gz', + desc="""input and output file type. accepted values are: + analyze -> analyze format 7.5 + ni1 -> nifti format saved in seperate .hdr and .img file + nii -> nifti format with one .nii file + nii.gz -> nifti format with compression + default type is 'nii'""", + argstr="-it %s") + tracking_method = traits.Enum( + 'fact', + 'rk2', + 'tl', + 'sl', + desc="""fact -> use FACT method for tracking. this is the default method. + rk2 -> use 2nd order runge-kutta method for tracking. + tl -> use tensorline method for tracking. + sl -> use interpolated streamline method with fixed step-length""", + argstr="-%s") + step_length = traits.Float( + desc="""set step length, in the unit of minimum voxel size. + default value is 0.5 for interpolated streamline method + and 0.1 for other methods""", + argstr="-l %f") + angle_threshold = traits.Float( + desc="set angle threshold. default value is 35 degree", + argstr="-at %f") + angle_threshold_weight = traits.Float( + desc= + "set angle threshold weighting factor. weighting will be be applied \ + on top of the angle_threshold", + argstr="-atw %f") + random_seed = traits.Int( + desc="use random location in a voxel instead of the center of the voxel \ + to seed. can also define number of seed per voxel. default is 1", + argstr="-rseed %d") + invert_x = traits.Bool( + desc="invert x component of the vector", argstr="-ix") + invert_y = traits.Bool( + desc="invert y component of the vector", argstr="-iy") + invert_z = traits.Bool( + desc="invert z component of the vector", argstr="-iz") + swap_xy = traits.Bool( + desc="swap x & y vectors while tracking", argstr="-sxy") + swap_yz = traits.Bool( + desc="swap y & z vectors while tracking", argstr="-syz") + swap_zx = traits.Bool( + desc="swap x & z vectors while tracking", argstr="-szx") + mask1_file = File( + desc="first mask image", mandatory=True, argstr="-m %s", position=2) + mask1_threshold = traits.Float( + desc= + "threshold value for the first mask image, if not given, the program will \ + try automatically find the threshold", + position=3) + mask2_file = File(desc="second mask image", argstr="-m2 %s", position=4) + mask2_threshold = traits.Float( + desc= + "threshold value for the second mask image, if not given, the program will \ + try automatically find the threshold", + position=5) + input_data_prefix = traits.Str( + "dti", + desc="for internal naming use only", + position=0, + argstr="%s", + usedefault=True) + output_file = File( + "tracks.trk", + "file containing tracks", + argstr="%s", + position=1, + usedefault=True) + output_mask = File( + desc="output a binary mask file in analyze format", argstr="-om %s") + primary_vector = traits.Enum( + 'v2', + 'v3', + desc= + "which vector to use for fibre tracking: v2 or v3. If not set use v1", + argstr="-%s") + + +class DTITrackerOutputSpec(TraitedSpec): + track_file = File(exists=True) + mask_file = File(exists=True) + + +class DTITracker(CommandLine): + input_spec = DTITrackerInputSpec + output_spec = DTITrackerOutputSpec + + _cmd = 'dti_tracker' + + def _run_interface(self, runtime): + _, _, ext = split_filename(self.inputs.tensor_file) + copyfile( + self.inputs.tensor_file, + os.path.abspath(self.inputs.input_data_prefix + "_tensor" + ext), + copy=False) + + return super(DTITracker, self)._run_interface(runtime) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['track_file'] = os.path.abspath(self.inputs.output_file) + if isdefined(self.inputs.output_mask) and self.inputs.output_mask: + outputs['mask_file'] = os.path.abspath(self.inputs.output_mask) + + return outputs diff --git a/nipype/interfaces/diffusion_toolkit/odf.py b/nipype/interfaces/diffusion_toolkit/odf.py new file mode 100644 index 0000000000..cf4eb683a2 --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/odf.py @@ -0,0 +1,387 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Provides interfaces to various commands provided by diffusion toolkit +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range + +import os +import re + +from ...utils.filemanip import fname_presuffix, split_filename, copyfile +from ..base import (TraitedSpec, File, traits, CommandLine, + CommandLineInputSpec, isdefined) + +__docformat__ = 'restructuredtext' + + +class HARDIMatInputSpec(CommandLineInputSpec): + bvecs = File( + exists=True, + desc='b vectors file', + argstr='%s', + position=1, + mandatory=True) + bvals = File(exists=True, desc='b values file', mandatory=True) + out_file = File( + "recon_mat.dat", + desc='output matrix file', + argstr='%s', + usedefault=True, + position=2) + order = traits.Int( + argstr='-order %s', + desc= + """maximum order of spherical harmonics. must be even number. default + is 4""") + odf_file = File( + exists=True, + argstr='-odf %s', + desc= + """filename that contains the reconstruction points on a HEMI-sphere. + use the pre-set 181 points by default""") + reference_file = File( + exists=True, + argstr='-ref %s', + desc= + """provide a dicom or nifti image as the reference for the program to + figure out the image orientation information. if no such info was + found in the given image header, the next 5 options -info, etc., + will be used if provided. if image orientation info can be found + in the given reference, all other 5 image orientation options will + be IGNORED""") + image_info = File( + exists=True, + argstr='-info %s', + desc="""specify image information file. the image info file is generated + from original dicom image by diff_unpack program and contains image + orientation and other information needed for reconstruction and + tracking. by default will look into the image folder for .info file""") + image_orientation_vectors = traits.List( + traits.Float(), + minlen=6, + maxlen=6, + desc="""specify image orientation vectors. if just one argument given, + will treat it as filename and read the orientation vectors from + the file. if 6 arguments are given, will treat them as 6 float + numbers and construct the 1st and 2nd vector and calculate the 3rd + one automatically. + this information will be used to determine image orientation, + as well as to adjust gradient vectors with oblique angle when""", + argstr="-iop %f") + oblique_correction = traits.Bool( + desc="""when oblique angle(s) applied, some SIEMENS dti protocols do not + adjust gradient accordingly, thus it requires adjustment for correct + diffusion tensor calculation""", + argstr="-oc") + + +class HARDIMatOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output matrix file') + + +class HARDIMat(CommandLine): + """Use hardi_mat to calculate a reconstruction matrix from a gradient table + """ + input_spec = HARDIMatInputSpec + output_spec = HARDIMatOutputSpec + + _cmd = 'hardi_mat' + + def _create_gradient_matrix(self, bvecs_file, bvals_file): + _gradient_matrix_file = 'gradient_matrix.txt' + bvals = [ + val for val in re.split('\s+', + open(bvals_file).readline().strip()) + ] + bvecs_f = open(bvecs_file) + bvecs_x = [val for val in re.split('\s+', bvecs_f.readline().strip())] + bvecs_y = [val for val in re.split('\s+', bvecs_f.readline().strip())] + bvecs_z = [val for val in re.split('\s+', bvecs_f.readline().strip())] + bvecs_f.close() + gradient_matrix_f = open(_gradient_matrix_file, 'w') + for i in range(len(bvals)): + if int(bvals[i]) == 0: + continue + gradient_matrix_f.write("%s %s %s\n" % (bvecs_x[i], bvecs_y[i], + bvecs_z[i])) + gradient_matrix_f.close() + return _gradient_matrix_file + + def _format_arg(self, name, spec, value): + if name == "bvecs": + new_val = self._create_gradient_matrix(self.inputs.bvecs, + self.inputs.bvals) + return super(HARDIMat, self)._format_arg("bvecs", spec, new_val) + return super(HARDIMat, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class ODFReconInputSpec(CommandLineInputSpec): + DWI = File( + desc='Input raw data', + argstr='%s', + exists=True, + mandatory=True, + position=1) + n_directions = traits.Int( + desc='Number of directions', argstr='%s', mandatory=True, position=2) + n_output_directions = traits.Int( + desc='Number of output directions', + argstr='%s', + mandatory=True, + position=3) + out_prefix = traits.Str( + "odf", + desc='Output file prefix', + argstr='%s', + usedefault=True, + position=4) + matrix = File( + argstr='-mat %s', + exists=True, + desc="""use given file as reconstruction matrix.""", + mandatory=True) + n_b0 = traits.Int( + argstr='-b0 %s', + desc="""number of b0 scans. by default the program gets this information + from the number of directions and number of volumes in + the raw data. useful when dealing with incomplete raw + data set or only using part of raw data set to reconstruct""", + mandatory=True) + output_type = traits.Enum( + 'nii', + 'analyze', + 'ni1', + 'nii.gz', + argstr='-ot %s', + desc='output file type', + usedefault=True) + sharpness = traits.Float( + desc="""smooth or sharpen the raw data. factor > 0 is smoothing. + factor < 0 is sharpening. default value is 0 + NOTE: this option applies to DSI study only""", + argstr='-s %f') + filter = traits.Bool( + desc="""apply a filter (e.g. high pass) to the raw image""", + argstr='-f') + subtract_background = traits.Bool( + desc="""subtract the background value before reconstruction""", + argstr='-bg') + dsi = traits.Bool(desc="""indicates that the data is dsi""", argstr='-dsi') + output_entropy = traits.Bool(desc="""output entropy map""", argstr='-oe') + image_orientation_vectors = traits.List( + traits.Float(), + minlen=6, + maxlen=6, + desc="""specify image orientation vectors. if just one argument given, + will treat it as filename and read the orientation vectors from + the file. if 6 arguments are given, will treat them as 6 float + numbers and construct the 1st and 2nd vector and calculate the 3rd + one automatically. + this information will be used to determine image orientation, + as well as to adjust gradient vectors with oblique angle when""", + argstr="-iop %f") + oblique_correction = traits.Bool( + desc="""when oblique angle(s) applied, some SIEMENS dti protocols do not + adjust gradient accordingly, thus it requires adjustment for correct + diffusion tensor calculation""", + argstr="-oc") + + +class ODFReconOutputSpec(TraitedSpec): + B0 = File(exists=True) + DWI = File(exists=True) + max = File(exists=True) + ODF = File(exists=True) + entropy = File() + + +class ODFRecon(CommandLine): + """Use odf_recon to generate tensors and other maps + """ + + input_spec = ODFReconInputSpec + output_spec = ODFReconOutputSpec + + _cmd = 'odf_recon' + + def _list_outputs(self): + out_prefix = self.inputs.out_prefix + output_type = self.inputs.output_type + + outputs = self.output_spec().get() + outputs['B0'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_b0.' + output_type)) + outputs['DWI'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_dwi.' + output_type)) + outputs['max'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_max.' + output_type)) + outputs['ODF'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_odf.' + output_type)) + if isdefined(self.inputs.output_entropy): + outputs['entropy'] = os.path.abspath( + fname_presuffix( + "", prefix=out_prefix, suffix='_entropy.' + output_type)) + + return outputs + + +class ODFTrackerInputSpec(CommandLineInputSpec): + max = File(exists=True, mandatory=True) + ODF = File(exists=True, mandatory=True) + input_data_prefix = traits.Str( + "odf", + desc='recon data prefix', + argstr='%s', + usedefault=True, + position=0) + out_file = File( + "tracks.trk", + desc='output track file', + argstr='%s', + usedefault=True, + position=1) + input_output_type = traits.Enum( + 'nii', + 'analyze', + 'ni1', + 'nii.gz', + argstr='-it %s', + desc='input and output file type', + usedefault=True) + runge_kutta2 = traits.Bool( + argstr='-rk2', + desc="""use 2nd order runge-kutta method for tracking. + default tracking method is non-interpolate streamline""") + step_length = traits.Float( + argstr='-l %f', + desc="""set step length, in the unit of minimum voxel size. + default value is 0.1.""") + angle_threshold = traits.Float( + argstr='-at %f', + desc="""set angle threshold. default value is 35 degree for + default tracking method and 25 for rk2""") + random_seed = traits.Int( + argstr='-rseed %s', + desc="""use random location in a voxel instead of the center of the voxel + to seed. can also define number of seed per voxel. default is 1""") + invert_x = traits.Bool( + argstr='-ix', desc='invert x component of the vector') + invert_y = traits.Bool( + argstr='-iy', desc='invert y component of the vector') + invert_z = traits.Bool( + argstr='-iz', desc='invert z component of the vector') + swap_xy = traits.Bool( + argstr='-sxy', desc='swap x and y vectors while tracking') + swap_yz = traits.Bool( + argstr='-syz', desc='swap y and z vectors while tracking') + swap_zx = traits.Bool( + argstr='-szx', desc='swap x and z vectors while tracking') + disc = traits.Bool(argstr='-disc', desc='use disc tracking') + mask1_file = File( + desc="first mask image", mandatory=True, argstr="-m %s", position=2) + mask1_threshold = traits.Float( + desc= + "threshold value for the first mask image, if not given, the program will \ + try automatically find the threshold", + position=3) + mask2_file = File(desc="second mask image", argstr="-m2 %s", position=4) + mask2_threshold = traits.Float( + desc= + "threshold value for the second mask image, if not given, the program will \ + try automatically find the threshold", + position=5) + limit = traits.Int( + argstr='-limit %d', + desc="""in some special case, such as heart data, some track may go into + infinite circle and take long time to stop. this option allows + setting a limit for the longest tracking steps (voxels)""") + dsi = traits.Bool( + argstr='-dsi', + desc=""" specify the input odf data is dsi. because dsi recon uses fixed + pre-calculated matrix, some special orientation patch needs to + be applied to keep dti/dsi/q-ball consistent.""") + image_orientation_vectors = traits.List( + traits.Float(), + minlen=6, + maxlen=6, + desc="""specify image orientation vectors. if just one argument given, + will treat it as filename and read the orientation vectors from + the file. if 6 arguments are given, will treat them as 6 float + numbers and construct the 1st and 2nd vector and calculate the 3rd + one automatically. + this information will be used to determine image orientation, + as well as to adjust gradient vectors with oblique angle when""", + argstr="-iop %f") + slice_order = traits.Int( + argstr='-sorder %d', + desc= + 'set the slice order. 1 means normal, -1 means reversed. default value is 1' + ) + voxel_order = traits.Enum( + 'RAS', + 'RPS', + 'RAI', + 'RPI', + 'LAI', + 'LAS', + 'LPS', + 'LPI', + argstr='-vorder %s', + desc= + """specify the voxel order in RL/AP/IS (human brain) reference. must be + 3 letters with no space in between. + for example, RAS means the voxel row is from L->R, the column + is from P->A and the slice order is from I->S. + by default voxel order is determined by the image orientation + (but NOT guaranteed to be correct because of various standards). + for example, siemens axial image is LPS, coronal image is LIP and + sagittal image is PIL. + this information also is NOT needed for tracking but will be saved + in the track file and is essential for track display to map onto + the right coordinates""") + + +class ODFTrackerOutputSpec(TraitedSpec): + track_file = File(exists=True, desc='output track file') + + +class ODFTracker(CommandLine): + """Use odf_tracker to generate track file + """ + + input_spec = ODFTrackerInputSpec + output_spec = ODFTrackerOutputSpec + + _cmd = 'odf_tracker' + + def _run_interface(self, runtime): + _, _, ext = split_filename(self.inputs.max) + copyfile( + self.inputs.max, + os.path.abspath(self.inputs.input_data_prefix + "_max" + ext), + copy=False) + + _, _, ext = split_filename(self.inputs.ODF) + copyfile( + self.inputs.ODF, + os.path.abspath(self.inputs.input_data_prefix + "_odf" + ext), + copy=False) + + return super(ODFTracker, self)._run_interface(runtime) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['track_file'] = os.path.abspath(self.inputs.out_file) + return outputs diff --git a/nipype/interfaces/diffusion_toolkit/postproc.py b/nipype/interfaces/diffusion_toolkit/postproc.py new file mode 100644 index 0000000000..20aaeea927 --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/postproc.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Provides interfaces to various commands provided by diffusion toolkit +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +from ..base import (TraitedSpec, File, traits, CommandLine, InputMultiPath, + CommandLineInputSpec) + +__docformat__ = 'restructuredtext' + + +class SplineFilterInputSpec(CommandLineInputSpec): + track_file = File( + exists=True, + desc="file containing tracks to be filtered", + position=0, + argstr="%s", + mandatory=True) + step_length = traits.Float( + desc="in the unit of minimum voxel size", + position=1, + argstr="%f", + mandatory=True) + output_file = File( + "spline_tracks.trk", + desc="target file for smoothed tracks", + position=2, + argstr="%s", + usedefault=True) + + +class SplineFilterOutputSpec(TraitedSpec): + smoothed_track_file = File(exists=True) + + +class SplineFilter(CommandLine): + """ + Smoothes TrackVis track files with a B-Spline filter. + + Helps remove redundant track points and segments + (thus reducing the size of the track file) and also + make tracks nicely smoothed. It will NOT change the + quality of the tracks or lose any original information. + + Example + ------- + + >>> import nipype.interfaces.diffusion_toolkit as dtk + >>> filt = dtk.SplineFilter() + >>> filt.inputs.track_file = 'tracks.trk' + >>> filt.inputs.step_length = 0.5 + >>> filt.run() # doctest: +SKIP + """ + input_spec = SplineFilterInputSpec + output_spec = SplineFilterOutputSpec + + _cmd = "spline_filter" + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['smoothed_track_file'] = os.path.abspath( + self.inputs.output_file) + return outputs + + +class TrackMergeInputSpec(CommandLineInputSpec): + track_files = InputMultiPath( + File(exists=True), + desc="file containing tracks to be filtered", + position=0, + argstr="%s...", + mandatory=True) + output_file = File( + "merged_tracks.trk", + desc="target file for merged tracks", + position=-1, + argstr="%s", + usedefault=True) + + +class TrackMergeOutputSpec(TraitedSpec): + track_file = File(exists=True) + + +class TrackMerge(CommandLine): + """ + Merges several TrackVis track files into a single track + file. + + An id type property tag is added to each track in the + newly merged file, with each unique id representing where + the track was originally from. When the merged file is + loaded in TrackVis, a property filter will show up in + Track Property panel. Users can adjust that to distinguish + and sub-group tracks by its id (origin). + + Example + ------- + + >>> import nipype.interfaces.diffusion_toolkit as dtk + >>> mrg = dtk.TrackMerge() + >>> mrg.inputs.track_files = ['track1.trk','track2.trk'] + >>> mrg.run() # doctest: +SKIP + """ + input_spec = TrackMergeInputSpec + output_spec = TrackMergeOutputSpec + + _cmd = "track_merge" + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['track_file'] = os.path.abspath(self.inputs.output_file) + return outputs diff --git a/nipype/interfaces/diffusion_toolkit/tests/__init__.py b/nipype/interfaces/diffusion_toolkit/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py new file mode 100644 index 0000000000..a39dbf6c3b --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTIRecon.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import DTIRecon + + +def test_DTIRecon_inputs(): + input_map = dict( + DWI=dict( + argstr='%s', + mandatory=True, + position=1, + ), + args=dict(argstr='%s', ), + b0_threshold=dict(argstr='-b0_th', ), + bvals=dict(mandatory=True, ), + bvecs=dict( + argstr='-gm %s', + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image_orientation_vectors=dict(argstr='-iop %f', ), + n_averages=dict(argstr='-nex %s', ), + oblique_correction=dict(argstr='-oc', ), + out_prefix=dict( + argstr='%s', + position=2, + usedefault=True, + ), + output_type=dict( + argstr='-ot %s', + usedefault=True, + ), + ) + inputs = DTIRecon.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DTIRecon_outputs(): + output_map = dict( + ADC=dict(), + B0=dict(), + FA=dict(), + FA_color=dict(), + L1=dict(), + L2=dict(), + L3=dict(), + V1=dict(), + V2=dict(), + V3=dict(), + exp=dict(), + tensor=dict(), + ) + outputs = DTIRecon.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py new file mode 100644 index 0000000000..cf483d00d0 --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_DTITracker.py @@ -0,0 +1,64 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import DTITracker + + +def test_DTITracker_inputs(): + input_map = dict( + angle_threshold=dict(argstr='-at %f', ), + angle_threshold_weight=dict(argstr='-atw %f', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_data_prefix=dict( + argstr='%s', + position=0, + usedefault=True, + ), + input_type=dict(argstr='-it %s', ), + invert_x=dict(argstr='-ix', ), + invert_y=dict(argstr='-iy', ), + invert_z=dict(argstr='-iz', ), + mask1_file=dict( + argstr='-m %s', + mandatory=True, + position=2, + ), + mask1_threshold=dict(position=3, ), + mask2_file=dict( + argstr='-m2 %s', + position=4, + ), + mask2_threshold=dict(position=5, ), + output_file=dict( + argstr='%s', + position=1, + usedefault=True, + ), + output_mask=dict(argstr='-om %s', ), + primary_vector=dict(argstr='-%s', ), + random_seed=dict(argstr='-rseed %d', ), + step_length=dict(argstr='-l %f', ), + swap_xy=dict(argstr='-sxy', ), + swap_yz=dict(argstr='-syz', ), + swap_zx=dict(argstr='-szx', ), + tensor_file=dict(), + tracking_method=dict(argstr='-%s', ), + ) + inputs = DTITracker.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DTITracker_outputs(): + output_map = dict( + mask_file=dict(), + track_file=dict(), + ) + outputs = DTITracker.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py new file mode 100644 index 0000000000..59bc8c25a5 --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_HARDIMat.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..odf import HARDIMat + + +def test_HARDIMat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bvals=dict(mandatory=True, ), + bvecs=dict( + argstr='%s', + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image_info=dict(argstr='-info %s', ), + image_orientation_vectors=dict(argstr='-iop %f', ), + oblique_correction=dict(argstr='-oc', ), + odf_file=dict(argstr='-odf %s', ), + order=dict(argstr='-order %s', ), + out_file=dict( + argstr='%s', + position=2, + usedefault=True, + ), + reference_file=dict(argstr='-ref %s', ), + ) + inputs = HARDIMat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_HARDIMat_outputs(): + output_map = dict(out_file=dict(), ) + outputs = HARDIMat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py new file mode 100644 index 0000000000..1e66b93bec --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFRecon.py @@ -0,0 +1,70 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..odf import ODFRecon + + +def test_ODFRecon_inputs(): + input_map = dict( + DWI=dict( + argstr='%s', + mandatory=True, + position=1, + ), + args=dict(argstr='%s', ), + dsi=dict(argstr='-dsi', ), + environ=dict( + nohash=True, + usedefault=True, + ), + filter=dict(argstr='-f', ), + image_orientation_vectors=dict(argstr='-iop %f', ), + matrix=dict( + argstr='-mat %s', + mandatory=True, + ), + n_b0=dict( + argstr='-b0 %s', + mandatory=True, + ), + n_directions=dict( + argstr='%s', + mandatory=True, + position=2, + ), + n_output_directions=dict( + argstr='%s', + mandatory=True, + position=3, + ), + oblique_correction=dict(argstr='-oc', ), + out_prefix=dict( + argstr='%s', + position=4, + usedefault=True, + ), + output_entropy=dict(argstr='-oe', ), + output_type=dict( + argstr='-ot %s', + usedefault=True, + ), + sharpness=dict(argstr='-s %f', ), + subtract_background=dict(argstr='-bg', ), + ) + inputs = ODFRecon.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ODFRecon_outputs(): + output_map = dict( + B0=dict(), + DWI=dict(), + ODF=dict(), + entropy=dict(), + max=dict(), + ) + outputs = ODFRecon.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py new file mode 100644 index 0000000000..41b2d530f6 --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_ODFTracker.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..odf import ODFTracker + + +def test_ODFTracker_inputs(): + input_map = dict( + ODF=dict(mandatory=True, ), + angle_threshold=dict(argstr='-at %f', ), + args=dict(argstr='%s', ), + disc=dict(argstr='-disc', ), + dsi=dict(argstr='-dsi', ), + environ=dict( + nohash=True, + usedefault=True, + ), + image_orientation_vectors=dict(argstr='-iop %f', ), + input_data_prefix=dict( + argstr='%s', + position=0, + usedefault=True, + ), + input_output_type=dict( + argstr='-it %s', + usedefault=True, + ), + invert_x=dict(argstr='-ix', ), + invert_y=dict(argstr='-iy', ), + invert_z=dict(argstr='-iz', ), + limit=dict(argstr='-limit %d', ), + mask1_file=dict( + argstr='-m %s', + mandatory=True, + position=2, + ), + mask1_threshold=dict(position=3, ), + mask2_file=dict( + argstr='-m2 %s', + position=4, + ), + mask2_threshold=dict(position=5, ), + max=dict(mandatory=True, ), + out_file=dict( + argstr='%s', + position=1, + usedefault=True, + ), + random_seed=dict(argstr='-rseed %s', ), + runge_kutta2=dict(argstr='-rk2', ), + slice_order=dict(argstr='-sorder %d', ), + step_length=dict(argstr='-l %f', ), + swap_xy=dict(argstr='-sxy', ), + swap_yz=dict(argstr='-syz', ), + swap_zx=dict(argstr='-szx', ), + voxel_order=dict(argstr='-vorder %s', ), + ) + inputs = ODFTracker.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ODFTracker_outputs(): + output_map = dict(track_file=dict(), ) + outputs = ODFTracker.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py new file mode 100644 index 0000000000..8648a1f1f9 --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_SplineFilter.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..postproc import SplineFilter + + +def test_SplineFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + output_file=dict( + argstr='%s', + position=2, + usedefault=True, + ), + step_length=dict( + argstr='%f', + mandatory=True, + position=1, + ), + track_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + ) + inputs = SplineFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SplineFilter_outputs(): + output_map = dict(smoothed_track_file=dict(), ) + outputs = SplineFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py new file mode 100644 index 0000000000..b004678175 --- /dev/null +++ b/nipype/interfaces/diffusion_toolkit/tests/test_auto_TrackMerge.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..postproc import TrackMerge + + +def test_TrackMerge_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + output_file=dict( + argstr='%s', + position=-1, + usedefault=True, + ), + track_files=dict( + argstr='%s...', + mandatory=True, + position=0, + ), + ) + inputs = TrackMerge.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TrackMerge_outputs(): + output_map = dict(track_file=dict(), ) + outputs = TrackMerge.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/__init__.py b/nipype/interfaces/dipy/__init__.py new file mode 100644 index 0000000000..1bd5dcb217 --- /dev/null +++ b/nipype/interfaces/dipy/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +from .tracks import StreamlineTractography, TrackDensityMap +from .tensors import TensorMode, DTI +from .preprocess import Resample, Denoise +from .reconstruction import RESTORE, EstimateResponseSH, CSD +from .simulate import SimulateMultiTensor +from .anisotropic_power import APMQball diff --git a/nipype/interfaces/dipy/anisotropic_power.py b/nipype/interfaces/dipy/anisotropic_power.py new file mode 100644 index 0000000000..e28ae2bd19 --- /dev/null +++ b/nipype/interfaces/dipy/anisotropic_power.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import nibabel as nb + +from ... import logging +from ..base import TraitedSpec, File, isdefined +from .base import DipyDiffusionInterface, DipyBaseInterfaceInputSpec + +IFLOGGER = logging.getLogger('nipype.interface') + + +class APMQballInputSpec(DipyBaseInterfaceInputSpec): + mask_file = File(exists=True, desc='An optional brain mask') + + +class APMQballOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class APMQball(DipyDiffusionInterface): + """ + Calculates the anisotropic power map + + Example + ------- + + >>> import nipype.interfaces.dipy as dipy + >>> apm = dipy.APMQball() + >>> apm.inputs.in_file = 'diffusion.nii' + >>> apm.inputs.in_bvec = 'bvecs' + >>> apm.inputs.in_bval = 'bvals' + >>> apm.run() # doctest: +SKIP + """ + input_spec = APMQballInputSpec + output_spec = APMQballOutputSpec + + def _run_interface(self, runtime): + from dipy.reconst import shm + from dipy.data import get_sphere + from dipy.reconst.peaks import peaks_from_model + + gtab = self._get_gradient_table() + + img = nb.load(self.inputs.in_file) + data = img.get_data() + affine = img.affine + mask = None + if isdefined(self.inputs.mask_file): + mask = nb.load(self.inputs.mask_file).get_data() + + # Fit it + model = shm.QballModel(gtab, 8) + sphere = get_sphere('symmetric724') + peaks = peaks_from_model( + model=model, + data=data, + relative_peak_threshold=.5, + min_separation_angle=25, + sphere=sphere, + mask=mask) + apm = shm.anisotropic_power(peaks.shm_coeff) + out_file = self._gen_filename('apm') + nb.Nifti1Image(apm.astype("float32"), affine).to_filename(out_file) + IFLOGGER.info('APM qball image saved as %s', out_file) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = self._gen_filename('apm') + + return outputs diff --git a/nipype/interfaces/dipy/base.py b/nipype/interfaces/dipy/base.py new file mode 100644 index 0000000000..7a9221e3d1 --- /dev/null +++ b/nipype/interfaces/dipy/base.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +""" Base interfaces for dipy """ +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os.path as op +import numpy as np +from ... import logging +from ..base import (traits, File, isdefined, LibraryBaseInterface, + BaseInterfaceInputSpec) + +HAVE_DIPY = True +try: + import dipy +except ImportError: + HAVE_DIPY = False + + +def no_dipy(): + """ Check if dipy is available """ + global HAVE_DIPY + return not HAVE_DIPY + + +def dipy_version(): + """ Check dipy version """ + if no_dipy(): + return None + + return dipy.__version__ + + +class DipyBaseInterface(LibraryBaseInterface): + """ + A base interface for py:mod:`dipy` computations + """ + _pkg = 'dipy' + + +class DipyBaseInterfaceInputSpec(BaseInterfaceInputSpec): + in_file = File(exists=True, mandatory=True, desc=('input diffusion data')) + in_bval = File(exists=True, mandatory=True, desc=('input b-values table')) + in_bvec = File(exists=True, mandatory=True, desc=('input b-vectors table')) + b0_thres = traits.Int(700, usedefault=True, desc=('b0 threshold')) + out_prefix = traits.Str(desc=('output prefix for file names')) + + +class DipyDiffusionInterface(DipyBaseInterface): + """ + A base interface for py:mod:`dipy` computations + """ + input_spec = DipyBaseInterfaceInputSpec + + def _get_gradient_table(self): + bval = np.loadtxt(self.inputs.in_bval) + bvec = np.loadtxt(self.inputs.in_bvec).T + from dipy.core.gradients import gradient_table + gtab = gradient_table(bval, bvec) + + gtab.b0_threshold = self.inputs.b0_thres + return gtab + + def _gen_filename(self, name, ext=None): + fname, fext = op.splitext(op.basename(self.inputs.in_file)) + if fext == '.gz': + fname, fext2 = op.splitext(fname) + fext = fext2 + fext + + if not isdefined(self.inputs.out_prefix): + out_prefix = op.abspath(fname) + else: + out_prefix = self.inputs.out_prefix + + if ext is None: + ext = fext + + return out_prefix + '_' + name + ext diff --git a/nipype/interfaces/dipy/preprocess.py b/nipype/interfaces/dipy/preprocess.py new file mode 100644 index 0000000000..cc589f6579 --- /dev/null +++ b/nipype/interfaces/dipy/preprocess.py @@ -0,0 +1,318 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os.path as op +import nibabel as nb +import numpy as np + +from ...utils import NUMPY_MMAP + +from ... import logging +from ..base import (traits, TraitedSpec, File, isdefined) +from .base import DipyBaseInterface + +IFLOGGER = logging.getLogger('nipype.interface') + + +class ResampleInputSpec(TraitedSpec): + in_file = File( + exists=True, + mandatory=True, + desc='The input 4D diffusion-weighted image file') + vox_size = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + desc=('specify the new voxel zooms. If no vox_size' + ' is set, then isotropic regridding will ' + 'be performed, with spacing equal to the ' + 'smallest current zoom.')) + interp = traits.Int( + 1, + mandatory=True, + usedefault=True, + desc=('order of the interpolator (0 = nearest, 1 = linear, etc.')) + + +class ResampleOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class Resample(DipyBaseInterface): + """ + An interface to reslicing diffusion datasets. + See + http://nipy.org/dipy/examples_built/reslice_datasets.html#example-reslice-datasets. + + Example + ------- + + >>> import nipype.interfaces.dipy as dipy + >>> reslice = dipy.Resample() + >>> reslice.inputs.in_file = 'diffusion.nii' + >>> reslice.run() # doctest: +SKIP + """ + input_spec = ResampleInputSpec + output_spec = ResampleOutputSpec + + def _run_interface(self, runtime): + order = self.inputs.interp + vox_size = None + + if isdefined(self.inputs.vox_size): + vox_size = self.inputs.vox_size + + out_file = op.abspath(self._gen_outfilename()) + resample_proxy( + self.inputs.in_file, + order=order, + new_zooms=vox_size, + out_file=out_file) + + IFLOGGER.info('Resliced image saved as %s', out_file) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = op.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + fname, fext = op.splitext(op.basename(self.inputs.in_file)) + if fext == '.gz': + fname, fext2 = op.splitext(fname) + fext = fext2 + fext + return op.abspath('%s_reslice%s' % (fname, fext)) + + +class DenoiseInputSpec(TraitedSpec): + in_file = File( + exists=True, + mandatory=True, + desc='The input 4D diffusion-weighted image file') + in_mask = File(exists=True, desc='brain mask') + noise_model = traits.Enum( + 'rician', + 'gaussian', + mandatory=True, + usedefault=True, + desc=('noise distribution model')) + signal_mask = File( + desc=('mask in which the mean signal ' + 'will be computed'), + exists=True) + noise_mask = File( + desc=('mask in which the standard deviation of noise ' + 'will be computed'), + exists=True) + patch_radius = traits.Int(1, usedefault=True, desc='patch radius') + block_radius = traits.Int(5, usedefault=True, desc='block_radius') + snr = traits.Float(desc='manually set an SNR') + + +class DenoiseOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class Denoise(DipyBaseInterface): + """ + An interface to denoising diffusion datasets [Coupe2008]_. + See + http://nipy.org/dipy/examples_built/denoise_nlmeans.html#example-denoise-nlmeans. + + .. [Coupe2008] Coupe P et al., `An Optimized Blockwise Non Local Means + Denoising Filter for 3D Magnetic Resonance Images + `_, + IEEE Transactions on Medical Imaging, 27(4):425-441, 2008. + + + Example + ------- + + >>> import nipype.interfaces.dipy as dipy + >>> denoise = dipy.Denoise() + >>> denoise.inputs.in_file = 'diffusion.nii' + >>> denoise.run() # doctest: +SKIP + """ + input_spec = DenoiseInputSpec + output_spec = DenoiseOutputSpec + + def _run_interface(self, runtime): + out_file = op.abspath(self._gen_outfilename()) + + settings = dict( + mask=None, rician=(self.inputs.noise_model == 'rician')) + + if isdefined(self.inputs.in_mask): + settings['mask'] = nb.load(self.inputs.in_mask).get_data() + + if isdefined(self.inputs.patch_radius): + settings['patch_radius'] = self.inputs.patch_radius + + if isdefined(self.inputs.block_radius): + settings['block_radius'] = self.inputs.block_radius + + snr = None + if isdefined(self.inputs.snr): + snr = self.inputs.snr + + signal_mask = None + if isdefined(self.inputs.signal_mask): + signal_mask = nb.load(self.inputs.signal_mask).get_data() + noise_mask = None + if isdefined(self.inputs.noise_mask): + noise_mask = nb.load(self.inputs.noise_mask).get_data() + + _, s = nlmeans_proxy( + self.inputs.in_file, + settings, + snr=snr, + smask=signal_mask, + nmask=noise_mask, + out_file=out_file) + IFLOGGER.info('Denoised image saved as %s, estimated SNR=%s', out_file, + str(s)) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = op.abspath(self._gen_outfilename()) + return outputs + + def _gen_outfilename(self): + fname, fext = op.splitext(op.basename(self.inputs.in_file)) + if fext == '.gz': + fname, fext2 = op.splitext(fname) + fext = fext2 + fext + return op.abspath('%s_denoise%s' % (fname, fext)) + + +def resample_proxy(in_file, order=3, new_zooms=None, out_file=None): + """ + Performs regridding of an image to set isotropic voxel sizes using dipy. + """ + from dipy.align.reslice import reslice + + if out_file is None: + fname, fext = op.splitext(op.basename(in_file)) + if fext == '.gz': + fname, fext2 = op.splitext(fname) + fext = fext2 + fext + out_file = op.abspath('./%s_reslice%s' % (fname, fext)) + + img = nb.load(in_file, mmap=NUMPY_MMAP) + hdr = img.header.copy() + data = img.get_data().astype(np.float32) + affine = img.affine + im_zooms = hdr.get_zooms()[:3] + + if new_zooms is None: + minzoom = np.array(im_zooms).min() + new_zooms = tuple(np.ones((3, )) * minzoom) + + if np.all(im_zooms == new_zooms): + return in_file + + data2, affine2 = reslice(data, affine, im_zooms, new_zooms, order=order) + tmp_zooms = np.array(hdr.get_zooms()) + tmp_zooms[:3] = new_zooms[0] + hdr.set_zooms(tuple(tmp_zooms)) + hdr.set_data_shape(data2.shape) + hdr.set_xyzt_units('mm') + nb.Nifti1Image(data2.astype(hdr.get_data_dtype()), affine2, + hdr).to_filename(out_file) + return out_file, new_zooms + + +def nlmeans_proxy(in_file, + settings, + snr=None, + smask=None, + nmask=None, + out_file=None): + """ + Uses non-local means to denoise 4D datasets + """ + from dipy.denoise.nlmeans import nlmeans + from scipy.ndimage.morphology import binary_erosion + from scipy import ndimage + + if out_file is None: + fname, fext = op.splitext(op.basename(in_file)) + if fext == '.gz': + fname, fext2 = op.splitext(fname) + fext = fext2 + fext + out_file = op.abspath('./%s_denoise%s' % (fname, fext)) + + img = nb.load(in_file, mmap=NUMPY_MMAP) + hdr = img.header + data = img.get_data() + aff = img.affine + + if data.ndim < 4: + data = data[..., np.newaxis] + + data = np.nan_to_num(data) + + if data.max() < 1.0e-4: + raise RuntimeError('There is no signal in the image') + + df = 1.0 + if data.max() < 1000.0: + df = 1000. / data.max() + data *= df + + b0 = data[..., 0] + + if smask is None: + smask = np.zeros_like(b0) + smask[b0 > np.percentile(b0, 85.)] = 1 + + smask = binary_erosion( + smask.astype(np.uint8), iterations=2).astype(np.uint8) + + if nmask is None: + nmask = np.ones_like(b0, dtype=np.uint8) + bmask = settings['mask'] + if bmask is None: + bmask = np.zeros_like(b0) + bmask[b0 > np.percentile(b0[b0 > 0], 10)] = 1 + label_im, nb_labels = ndimage.label(bmask) + sizes = ndimage.sum(bmask, label_im, range(nb_labels + 1)) + maxidx = np.argmax(sizes) + bmask = np.zeros_like(b0, dtype=np.uint8) + bmask[label_im == maxidx] = 1 + nmask[bmask > 0] = 0 + else: + nmask = np.squeeze(nmask) + nmask[nmask > 0.0] = 1 + nmask[nmask < 1] = 0 + nmask = nmask.astype(bool) + + nmask = binary_erosion(nmask, iterations=1).astype(np.uint8) + + den = np.zeros_like(data) + + est_snr = True + if snr is not None: + snr = [snr] * data.shape[-1] + est_snr = False + else: + snr = [] + + for i in range(data.shape[-1]): + d = data[..., i] + if est_snr: + s = np.mean(d[smask > 0]) + n = np.std(d[nmask > 0]) + snr.append(s / n) + + den[..., i] = nlmeans(d, snr[i], **settings) + + den = np.squeeze(den) + den /= df + + nb.Nifti1Image(den.astype(hdr.get_data_dtype()), aff, + hdr).to_filename(out_file) + return out_file, snr diff --git a/nipype/interfaces/dipy/reconstruction.py b/nipype/interfaces/dipy/reconstruction.py new file mode 100644 index 0000000000..26a7014f79 --- /dev/null +++ b/nipype/interfaces/dipy/reconstruction.py @@ -0,0 +1,373 @@ +# -*- coding: utf-8 -*- +""" +Interfaces to the reconstruction algorithms in dipy + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from future import standard_library +standard_library.install_aliases() +from builtins import str, open + +import os.path as op + +import numpy as np +import nibabel as nb + +from ... import logging +from ..base import TraitedSpec, File, traits, isdefined +from .base import DipyDiffusionInterface, DipyBaseInterfaceInputSpec + +IFLOGGER = logging.getLogger('nipype.interface') + + +class RESTOREInputSpec(DipyBaseInterfaceInputSpec): + in_mask = File(exists=True, desc=('input mask in which compute tensors')) + noise_mask = File( + exists=True, desc=('input mask in which compute noise variance')) + + +class RESTOREOutputSpec(TraitedSpec): + fa = File(desc='output fractional anisotropy (FA) map computed from ' + 'the fitted DTI') + md = File(desc='output mean diffusivity (MD) map computed from the ' + 'fitted DTI') + rd = File(desc='output radial diffusivity (RD) map computed from ' + 'the fitted DTI') + mode = File(desc=('output mode (MO) map computed from the fitted DTI')) + trace = File( + desc=('output the tensor trace map computed from the ' + 'fitted DTI')) + evals = File(desc=('output the eigenvalues of the fitted DTI')) + evecs = File(desc=('output the eigenvectors of the fitted DTI')) + + +class RESTORE(DipyDiffusionInterface): + """ + Uses RESTORE [Chang2005]_ to perform DTI fitting with outlier detection. + The interface uses :py:mod:`dipy`, as explained in `dipy's documentation`_. + + .. [Chang2005] Chang, LC, Jones, DK and Pierpaoli, C. RESTORE: robust \ + estimation of tensors by outlier rejection. MRM, 53:1088-95, (2005). + + .. _dipy's documentation: \ + http://nipy.org/dipy/examples_built/restore_dti.html + + + Example + ------- + + >>> from nipype.interfaces import dipy as ndp + >>> dti = ndp.RESTORE() + >>> dti.inputs.in_file = '4d_dwi.nii' + >>> dti.inputs.in_bval = 'bvals' + >>> dti.inputs.in_bvec = 'bvecs' + >>> res = dti.run() # doctest: +SKIP + + + """ + input_spec = RESTOREInputSpec + output_spec = RESTOREOutputSpec + + def _run_interface(self, runtime): + from scipy.special import gamma + from dipy.reconst.dti import TensorModel + import gc + + img = nb.load(self.inputs.in_file) + hdr = img.header.copy() + affine = img.affine + data = img.get_data() + gtab = self._get_gradient_table() + + if isdefined(self.inputs.in_mask): + msk = nb.load(self.inputs.in_mask).get_data().astype(np.uint8) + else: + msk = np.ones(data.shape[:3], dtype=np.uint8) + + try_b0 = True + if isdefined(self.inputs.noise_mask): + noise_msk = nb.load(self.inputs.noise_mask).get_data().reshape(-1) + noise_msk[noise_msk > 0.5] = 1 + noise_msk[noise_msk < 1.0] = 0 + noise_msk = noise_msk.astype(np.uint8) + try_b0 = False + elif np.all(data[msk == 0, 0] == 0): + IFLOGGER.info('Input data are masked.') + noise_msk = msk.reshape(-1).astype(np.uint8) + else: + noise_msk = (1 - msk).reshape(-1).astype(np.uint8) + + nb0 = np.sum(gtab.b0s_mask) + dsample = data.reshape(-1, data.shape[-1]) + + if try_b0 and (nb0 > 1): + noise_data = dsample.take( + np.where(gtab.b0s_mask), axis=-1)[noise_msk == 0, ...] + n = nb0 + else: + nodiff = np.where(~gtab.b0s_mask) + nodiffidx = nodiff[0].tolist() + n = 20 if len(nodiffidx) >= 20 else len(nodiffidx) + idxs = np.random.choice(nodiffidx, size=n, replace=False) + noise_data = dsample.take(idxs, axis=-1)[noise_msk == 1, ...] + + # Estimate sigma required by RESTORE + mean_std = np.median(noise_data.std(-1)) + try: + bias = (1. - np.sqrt(2. / (n - 1)) * (gamma(n / 2.) / gamma( + (n - 1) / 2.))) + except: + bias = .0 + pass + + sigma = mean_std * (1 + bias) + + if sigma == 0: + IFLOGGER.warn('Noise std is 0.0, looks like data was masked and ' + 'noise cannot be estimated correctly. Using default ' + 'tensor model instead of RESTORE.') + dti = TensorModel(gtab) + else: + IFLOGGER.info('Performing RESTORE with noise std=%.4f.', sigma) + dti = TensorModel(gtab, fit_method='RESTORE', sigma=sigma) + + try: + fit_restore = dti.fit(data, msk) + except TypeError: + dti = TensorModel(gtab) + fit_restore = dti.fit(data, msk) + + hdr.set_data_dtype(np.float32) + hdr['data_type'] = 16 + + for k in self._outputs().get(): + scalar = getattr(fit_restore, k) + hdr.set_data_shape(np.shape(scalar)) + nb.Nifti1Image(scalar.astype(np.float32), affine, hdr).to_filename( + self._gen_filename(k)) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + for k in list(outputs.keys()): + outputs[k] = self._gen_filename(k) + return outputs + + +class EstimateResponseSHInputSpec(DipyBaseInterfaceInputSpec): + in_evals = File( + exists=True, mandatory=True, desc=('input eigenvalues file')) + in_mask = File( + exists=True, desc=('input mask in which we find single fibers')) + fa_thresh = traits.Float(0.7, usedefault=True, desc=('FA threshold')) + roi_radius = traits.Int( + 10, usedefault=True, desc=('ROI radius to be used in auto_response')) + auto = traits.Bool( + xor=['recursive'], desc='use the auto_response estimator from dipy') + recursive = traits.Bool( + xor=['auto'], desc='use the recursive response estimator from dipy') + response = File( + 'response.txt', usedefault=True, desc=('the output response file')) + out_mask = File('wm_mask.nii.gz', usedefault=True, desc='computed wm mask') + + +class EstimateResponseSHOutputSpec(TraitedSpec): + response = File(exists=True, desc=('the response file')) + out_mask = File(exists=True, desc=('output wm mask')) + + +class EstimateResponseSH(DipyDiffusionInterface): + """ + Uses dipy to compute the single fiber response to be used in spherical + deconvolution methods, in a similar way to MRTrix's command + ``estimate_response``. + + + Example + ------- + + >>> from nipype.interfaces import dipy as ndp + >>> dti = ndp.EstimateResponseSH() + >>> dti.inputs.in_file = '4d_dwi.nii' + >>> dti.inputs.in_bval = 'bvals' + >>> dti.inputs.in_bvec = 'bvecs' + >>> dti.inputs.in_evals = 'dwi_evals.nii' + >>> res = dti.run() # doctest: +SKIP + + + """ + input_spec = EstimateResponseSHInputSpec + output_spec = EstimateResponseSHOutputSpec + + def _run_interface(self, runtime): + from dipy.core.gradients import GradientTable + from dipy.reconst.dti import fractional_anisotropy, mean_diffusivity + from dipy.reconst.csdeconv import recursive_response, auto_response + + img = nb.load(self.inputs.in_file) + imref = nb.four_to_three(img)[0] + affine = img.affine + + if isdefined(self.inputs.in_mask): + msk = nb.load(self.inputs.in_mask).get_data() + msk[msk > 0] = 1 + msk[msk < 0] = 0 + else: + msk = np.ones(imref.shape) + + data = img.get_data().astype(np.float32) + gtab = self._get_gradient_table() + + evals = np.nan_to_num(nb.load(self.inputs.in_evals).get_data()) + FA = np.nan_to_num(fractional_anisotropy(evals)) * msk + indices = np.where(FA > self.inputs.fa_thresh) + S0s = data[indices][:, np.nonzero(gtab.b0s_mask)[0]] + S0 = np.mean(S0s) + + if self.inputs.auto: + response, ratio = auto_response( + gtab, + data, + roi_radius=self.inputs.roi_radius, + fa_thr=self.inputs.fa_thresh) + response = response[0].tolist() + [S0] + elif self.inputs.recursive: + MD = np.nan_to_num(mean_diffusivity(evals)) * msk + indices = np.logical_or(FA >= 0.4, + (np.logical_and(FA >= 0.15, MD >= 0.0011))) + data = nb.load(self.inputs.in_file).get_data() + response = recursive_response( + gtab, + data, + mask=indices, + sh_order=8, + peak_thr=0.01, + init_fa=0.08, + init_trace=0.0021, + iter=8, + convergence=0.001, + parallel=True) + ratio = abs(response[1] / response[0]) + else: + lambdas = evals[indices] + l01 = np.sort(np.mean(lambdas, axis=0)) + + response = np.array([l01[-1], l01[-2], l01[-2], S0]) + ratio = abs(response[1] / response[0]) + + if ratio > 0.25: + IFLOGGER.warn('Estimated response is not prolate enough. ' + 'Ratio=%0.3f.', ratio) + elif ratio < 1.e-5 or np.any(np.isnan(response)): + response = np.array([1.8e-3, 3.6e-4, 3.6e-4, S0]) + IFLOGGER.warn( + 'Estimated response is not valid, using a default one') + else: + IFLOGGER.info('Estimated response: %s', str(response[:3])) + + np.savetxt(op.abspath(self.inputs.response), response) + + wm_mask = np.zeros_like(FA) + wm_mask[indices] = 1 + nb.Nifti1Image(wm_mask.astype(np.uint8), affine, None).to_filename( + op.abspath(self.inputs.out_mask)) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['response'] = op.abspath(self.inputs.response) + outputs['out_mask'] = op.abspath(self.inputs.out_mask) + return outputs + + +class CSDInputSpec(DipyBaseInterfaceInputSpec): + in_mask = File(exists=True, desc=('input mask in which compute tensors')) + response = File(exists=True, desc=('single fiber estimated response')) + sh_order = traits.Int( + 8, usedefault=True, desc=('maximal shperical harmonics order')) + save_fods = traits.Bool(True, usedefault=True, desc=('save fODFs in file')) + out_fods = File(desc=('fODFs output file name')) + + +class CSDOutputSpec(TraitedSpec): + model = File(desc='Python pickled object of the CSD model fitted.') + out_fods = File(desc=('fODFs output file name')) + + +class CSD(DipyDiffusionInterface): + """ + Uses CSD [Tournier2007]_ to generate the fODF of DWIs. The interface uses + :py:mod:`dipy`, as explained in `dipy's CSD example + `_. + + .. [Tournier2007] Tournier, J.D., et al. NeuroImage 2007. + Robust determination of the fibre orientation distribution in diffusion + MRI: Non-negativity constrained super-resolved spherical deconvolution + + + Example + ------- + + >>> from nipype.interfaces import dipy as ndp + >>> csd = ndp.CSD() + >>> csd.inputs.in_file = '4d_dwi.nii' + >>> csd.inputs.in_bval = 'bvals' + >>> csd.inputs.in_bvec = 'bvecs' + >>> res = csd.run() # doctest: +SKIP + """ + input_spec = CSDInputSpec + output_spec = CSDOutputSpec + + def _run_interface(self, runtime): + from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel + from dipy.data import get_sphere + # import marshal as pickle + import pickle as pickle + import gzip + + img = nb.load(self.inputs.in_file) + imref = nb.four_to_three(img)[0] + + if isdefined(self.inputs.in_mask): + msk = nb.load(self.inputs.in_mask).get_data() + else: + msk = np.ones(imref.shape) + + data = img.get_data().astype(np.float32) + + gtab = self._get_gradient_table() + resp_file = np.loadtxt(self.inputs.response) + + response = (np.array(resp_file[0:3]), resp_file[-1]) + ratio = response[0][1] / response[0][0] + + if abs(ratio - 0.2) > 0.1: + IFLOGGER.warn('Estimated response is not prolate enough. ' + 'Ratio=%0.3f.', ratio) + + csd_model = ConstrainedSphericalDeconvModel( + gtab, response, sh_order=self.inputs.sh_order) + + IFLOGGER.info('Fitting CSD model') + csd_fit = csd_model.fit(data, msk) + + f = gzip.open(self._gen_filename('csdmodel', ext='.pklz'), 'wb') + pickle.dump(csd_model, f, -1) + f.close() + + if self.inputs.save_fods: + sphere = get_sphere('symmetric724') + fods = csd_fit.odf(sphere) + nb.Nifti1Image(fods.astype(np.float32), img.affine, + None).to_filename(self._gen_filename('fods')) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['model'] = self._gen_filename('csdmodel', ext='.pklz') + if self.inputs.save_fods: + outputs['out_fods'] = self._gen_filename('fods') + return outputs diff --git a/nipype/interfaces/dipy/setup.py b/nipype/interfaces/dipy/setup.py new file mode 100644 index 0000000000..e6c8f000b2 --- /dev/null +++ b/nipype/interfaces/dipy/setup.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + + +def configuration(parent_package='', top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('dipy', parent_package, top_path) + + # config.add_data_dir('tests') + return config + + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) diff --git a/nipype/interfaces/dipy/simulate.py b/nipype/interfaces/dipy/simulate.py new file mode 100644 index 0000000000..4a995c8fa9 --- /dev/null +++ b/nipype/interfaces/dipy/simulate.py @@ -0,0 +1,352 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from multiprocessing import (Pool, cpu_count) +import os.path as op +from builtins import range + +import numpy as np +import nibabel as nb + +from ... import logging +from ...utils import NUMPY_MMAP +from ..base import (traits, TraitedSpec, BaseInterfaceInputSpec, File, + InputMultiPath, isdefined) +from .base import DipyBaseInterface +IFLOGGER = logging.getLogger('nipype.interface') + + +class SimulateMultiTensorInputSpec(BaseInterfaceInputSpec): + in_dirs = InputMultiPath( + File(exists=True), + mandatory=True, + desc='list of fibers (principal directions)') + in_frac = InputMultiPath( + File(exists=True), + mandatory=True, + desc=('volume fraction of each fiber')) + in_vfms = InputMultiPath( + File(exists=True), + mandatory=True, + desc=('volume fractions of isotropic ' + 'compartiments')) + in_mask = File(exists=True, desc='mask to simulate data') + + diff_iso = traits.List( + [3000e-6, 960e-6, 680e-6], + traits.Float, + usedefault=True, + desc='Diffusivity of isotropic compartments') + diff_sf = traits.Tuple( + (1700e-6, 200e-6, 200e-6), + traits.Float, + traits.Float, + traits.Float, + usedefault=True, + desc='Single fiber tensor') + + n_proc = traits.Int(0, usedefault=True, desc='number of processes') + baseline = File(exists=True, mandatory=True, desc='baseline T2 signal') + gradients = File(exists=True, desc='gradients file') + in_bvec = File(exists=True, desc='input bvecs file') + in_bval = File(exists=True, desc='input bvals file') + num_dirs = traits.Int( + 32, + usedefault=True, + desc=('number of gradient directions (when table ' + 'is automatically generated)')) + bvalues = traits.List( + traits.Int, + value=[1000, 3000], + usedefault=True, + desc=('list of b-values (when table ' + 'is automatically generated)')) + out_file = File( + 'sim_dwi.nii.gz', + usedefault=True, + desc='output file with fractions to be simluated') + out_mask = File( + 'sim_msk.nii.gz', usedefault=True, desc='file with the mask simulated') + out_bvec = File('bvec.sim', usedefault=True, desc='simulated b vectors') + out_bval = File('bval.sim', usedefault=True, desc='simulated b values') + snr = traits.Int(0, usedefault=True, desc='signal-to-noise ratio (dB)') + + +class SimulateMultiTensorOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='simulated DWIs') + out_mask = File(exists=True, desc='mask file') + out_bvec = File(exists=True, desc='simulated b vectors') + out_bval = File(exists=True, desc='simulated b values') + + +class SimulateMultiTensor(DipyBaseInterface): + """ + Interface to MultiTensor model simulator in dipy + http://nipy.org/dipy/examples_built/simulate_multi_tensor.html + + Example + ------- + + >>> import nipype.interfaces.dipy as dipy + >>> sim = dipy.SimulateMultiTensor() + >>> sim.inputs.in_dirs = ['fdir00.nii', 'fdir01.nii'] + >>> sim.inputs.in_frac = ['ffra00.nii', 'ffra01.nii'] + >>> sim.inputs.in_vfms = ['tpm_00.nii.gz', 'tpm_01.nii.gz', + ... 'tpm_02.nii.gz'] + >>> sim.inputs.baseline = 'b0.nii' + >>> sim.inputs.in_bvec = 'bvecs' + >>> sim.inputs.in_bval = 'bvals' + >>> sim.run() # doctest: +SKIP + """ + input_spec = SimulateMultiTensorInputSpec + output_spec = SimulateMultiTensorOutputSpec + + def _run_interface(self, runtime): + from dipy.core.gradients import gradient_table + + # Gradient table + if isdefined(self.inputs.in_bval) and isdefined(self.inputs.in_bvec): + # Load the gradient strengths and directions + bvals = np.loadtxt(self.inputs.in_bval) + bvecs = np.loadtxt(self.inputs.in_bvec).T + gtab = gradient_table(bvals, bvecs) + else: + gtab = _generate_gradients(self.inputs.num_dirs, + self.inputs.bvalues) + ndirs = len(gtab.bvals) + np.savetxt(op.abspath(self.inputs.out_bvec), gtab.bvecs.T) + np.savetxt(op.abspath(self.inputs.out_bval), gtab.bvals) + + # Load the baseline b0 signal + b0_im = nb.load(self.inputs.baseline) + hdr = b0_im.header + shape = b0_im.shape + aff = b0_im.affine + + # Check and load sticks and their volume fractions + nsticks = len(self.inputs.in_dirs) + if len(self.inputs.in_frac) != nsticks: + raise RuntimeError(('Number of sticks and their volume fractions' + ' must match.')) + + # Volume fractions of isotropic compartments + nballs = len(self.inputs.in_vfms) + vfs = np.squeeze( + nb.concat_images([ + nb.load(f, mmap=NUMPY_MMAP) for f in self.inputs.in_vfms + ]).get_data()) + if nballs == 1: + vfs = vfs[..., np.newaxis] + total_vf = np.sum(vfs, axis=3) + + # Generate a mask + if isdefined(self.inputs.in_mask): + msk = nb.load(self.inputs.in_mask).get_data() + msk[msk > 0.0] = 1.0 + msk[msk < 1.0] = 0.0 + else: + msk = np.zeros(shape) + msk[total_vf > 0.0] = 1.0 + + msk = np.clip(msk, 0.0, 1.0) + nvox = len(msk[msk > 0]) + + # Fiber fractions + ffsim = nb.concat_images( + [nb.load(f, mmap=NUMPY_MMAP) for f in self.inputs.in_frac]) + ffs = np.nan_to_num(np.squeeze(ffsim.get_data())) # fiber fractions + ffs = np.clip(ffs, 0., 1.) + if nsticks == 1: + ffs = ffs[..., np.newaxis] + + for i in range(nsticks): + ffs[..., i] *= msk + + total_ff = np.sum(ffs, axis=3) + + # Fix incongruencies in fiber fractions + for i in range(1, nsticks): + if np.any(total_ff > 1.0): + errors = np.zeros_like(total_ff) + errors[total_ff > 1.0] = total_ff[total_ff > 1.0] - 1.0 + ffs[..., i] -= errors + ffs[ffs < 0.0] = 0.0 + total_ff = np.sum(ffs, axis=3) + + for i in range(vfs.shape[-1]): + vfs[..., i] -= total_ff + vfs = np.clip(vfs, 0., 1.) + + fractions = np.concatenate((ffs, vfs), axis=3) + + nb.Nifti1Image(fractions, aff, None).to_filename('fractions.nii.gz') + nb.Nifti1Image(np.sum(fractions, axis=3), aff, + None).to_filename('total_vf.nii.gz') + + mhdr = hdr.copy() + mhdr.set_data_dtype(np.uint8) + mhdr.set_xyzt_units('mm', 'sec') + nb.Nifti1Image(msk, aff, mhdr).to_filename( + op.abspath(self.inputs.out_mask)) + + # Initialize stack of args + fracs = fractions[msk > 0] + + # Stack directions + dirs = None + for i in range(nsticks): + f = self.inputs.in_dirs[i] + fd = np.nan_to_num(nb.load(f, mmap=NUMPY_MMAP).get_data()) + w = np.linalg.norm(fd, axis=3)[..., np.newaxis] + w[w < np.finfo(float).eps] = 1.0 + fd /= w + if dirs is None: + dirs = fd[msk > 0].copy() + else: + dirs = np.hstack((dirs, fd[msk > 0])) + + # Add random directions for isotropic components + for d in range(nballs): + fd = np.random.randn(nvox, 3) + w = np.linalg.norm(fd, axis=1) + fd[w < np.finfo(float).eps, ...] = np.array([1., 0., 0.]) + w[w < np.finfo(float).eps] = 1.0 + fd /= w[..., np.newaxis] + dirs = np.hstack((dirs, fd)) + + sf_evals = list(self.inputs.diff_sf) + ba_evals = list(self.inputs.diff_iso) + + mevals = [sf_evals] * nsticks + \ + [[ba_evals[d]] * 3 for d in range(nballs)] + + b0 = b0_im.get_data()[msk > 0] + args = [] + for i in range(nvox): + args.append({ + 'fractions': + fracs[i, ...].tolist(), + 'sticks': + [tuple(dirs[i, j:j + 3]) for j in range(nsticks + nballs)], + 'gradients': + gtab, + 'mevals': + mevals, + 'S0': + b0[i], + 'snr': + self.inputs.snr + }) + + n_proc = self.inputs.n_proc + if n_proc == 0: + n_proc = cpu_count() + + try: + pool = Pool(processes=n_proc, maxtasksperchild=50) + except TypeError: + pool = Pool(processes=n_proc) + + # Simulate sticks using dipy + IFLOGGER.info( + 'Starting simulation of %d voxels, %d diffusion directions.', + len(args), ndirs) + result = np.array(pool.map(_compute_voxel, args)) + if np.shape(result)[1] != ndirs: + raise RuntimeError(('Computed directions do not match number' + 'of b-values.')) + + signal = np.zeros((shape[0], shape[1], shape[2], ndirs)) + signal[msk > 0] = result + + simhdr = hdr.copy() + simhdr.set_data_dtype(np.float32) + simhdr.set_xyzt_units('mm', 'sec') + nb.Nifti1Image(signal.astype(np.float32), aff, simhdr).to_filename( + op.abspath(self.inputs.out_file)) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + outputs['out_mask'] = op.abspath(self.inputs.out_mask) + outputs['out_bvec'] = op.abspath(self.inputs.out_bvec) + outputs['out_bval'] = op.abspath(self.inputs.out_bval) + + return outputs + + +def _compute_voxel(args): + """ + Simulate DW signal for one voxel. Uses the multi-tensor model and + three isotropic compartments. + + Apparent diffusivity tensors are taken from [Alexander2002]_ + and [Pierpaoli1996]_. + + .. [Alexander2002] Alexander et al., Detection and modeling of non-Gaussian + apparent diffusion coefficient profiles in human brain data, MRM + 48(2):331-340, 2002, doi: `10.1002/mrm.10209 + `_. + .. [Pierpaoli1996] Pierpaoli et al., Diffusion tensor MR imaging + of the human brain, Radiology 201:637-648. 1996. + """ + from dipy.sims.voxel import multi_tensor + + ffs = args['fractions'] + gtab = args['gradients'] + signal = np.zeros_like(gtab.bvals, dtype=np.float32) + + # Simulate dwi signal + sf_vf = np.sum(ffs) + if sf_vf > 0.0: + ffs = ((np.array(ffs) / sf_vf) * 100) + snr = args['snr'] if args['snr'] > 0 else None + + try: + signal, _ = multi_tensor( + gtab, + args['mevals'], + S0=args['S0'], + angles=args['sticks'], + fractions=ffs, + snr=snr) + except Exception: + pass + + return signal.tolist() + + +def _generate_gradients(ndirs=64, values=[1000, 3000], nb0s=1): + """ + Automatically generate a `gradient table + `_ + + """ + import numpy as np + from dipy.core.sphere import (disperse_charges, Sphere, HemiSphere) + from dipy.core.gradients import gradient_table + + theta = np.pi * np.random.rand(ndirs) + phi = 2 * np.pi * np.random.rand(ndirs) + hsph_initial = HemiSphere(theta=theta, phi=phi) + hsph_updated, potential = disperse_charges(hsph_initial, 5000) + + values = np.atleast_1d(values).tolist() + vertices = hsph_updated.vertices + bvecs = vertices.copy() + bvals = np.ones(vertices.shape[0]) * values[0] + + for v in values[1:]: + bvecs = np.vstack((bvecs, vertices)) + bvals = np.hstack((bvals, v * np.ones(vertices.shape[0]))) + + for i in range(0, nb0s): + bvals = bvals.tolist() + bvals.insert(0, 0) + + bvecs = bvecs.tolist() + bvecs.insert(0, np.zeros(3)) + + return gradient_table(bvals, bvecs) diff --git a/nipype/interfaces/dipy/tensors.py b/nipype/interfaces/dipy/tensors.py new file mode 100644 index 0000000000..c79e063683 --- /dev/null +++ b/nipype/interfaces/dipy/tensors.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import nibabel as nb + +from ... import logging +from ..base import TraitedSpec, File, isdefined +from .base import DipyDiffusionInterface, DipyBaseInterfaceInputSpec + +IFLOGGER = logging.getLogger('nipype.interface') + + +class DTIInputSpec(DipyBaseInterfaceInputSpec): + mask_file = File(exists=True, desc='An optional white matter mask') + + +class DTIOutputSpec(TraitedSpec): + out_file = File(exists=True) + fa_file = File(exists=True) + md_file = File(exists=True) + rd_file = File(exists=True) + ad_file = File(exists=True) + + +class DTI(DipyDiffusionInterface): + """ + Calculates the diffusion tensor model parameters + + Example + ------- + + >>> import nipype.interfaces.dipy as dipy + >>> dti = dipy.DTI() + >>> dti.inputs.in_file = 'diffusion.nii' + >>> dti.inputs.in_bvec = 'bvecs' + >>> dti.inputs.in_bval = 'bvals' + >>> dti.run() # doctest: +SKIP + """ + input_spec = DTIInputSpec + output_spec = DTIOutputSpec + + def _run_interface(self, runtime): + from dipy.reconst import dti + from dipy.io.utils import nifti1_symmat + gtab = self._get_gradient_table() + + img = nb.load(self.inputs.in_file) + data = img.get_data() + affine = img.affine + mask = None + if isdefined(self.inputs.mask_file): + mask = nb.load(self.inputs.mask_file).get_data() + + # Fit it + tenmodel = dti.TensorModel(gtab) + ten_fit = tenmodel.fit(data, mask) + lower_triangular = ten_fit.lower_triangular() + img = nifti1_symmat(lower_triangular, affine) + out_file = self._gen_filename('dti') + nb.save(img, out_file) + IFLOGGER.info('DTI parameters image saved as %s', out_file) + + # FA MD RD and AD + for metric in ["fa", "md", "rd", "ad"]: + data = getattr(ten_fit, metric).astype("float32") + out_name = self._gen_filename(metric) + nb.Nifti1Image(data, affine).to_filename(out_name) + IFLOGGER.info('DTI %s image saved as %s', metric, out_name) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = self._gen_filename('dti') + + for metric in ["fa", "md", "rd", "ad"]: + outputs["{}_file".format(metric)] = self._gen_filename(metric) + + return outputs + + +class TensorModeInputSpec(DipyBaseInterfaceInputSpec): + mask_file = File(exists=True, desc='An optional white matter mask') + + +class TensorModeOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class TensorMode(DipyDiffusionInterface): + """ + Creates a map of the mode of the diffusion tensors given a set of + diffusion-weighted images, as well as their associated b-values and + b-vectors. Fits the diffusion tensors and calculates tensor mode + with Dipy. + + .. [1] Daniel B. Ennis and G. Kindlmann, "Orthogonal Tensor + Invariants and the Analysis of Diffusion Tensor Magnetic Resonance + Images", Magnetic Resonance in Medicine, vol. 55, no. 1, pp. 136-146, + 2006. + + Example + ------- + + >>> import nipype.interfaces.dipy as dipy + >>> mode = dipy.TensorMode() + >>> mode.inputs.in_file = 'diffusion.nii' + >>> mode.inputs.in_bvec = 'bvecs' + >>> mode.inputs.in_bval = 'bvals' + >>> mode.run() # doctest: +SKIP + """ + input_spec = TensorModeInputSpec + output_spec = TensorModeOutputSpec + + def _run_interface(self, runtime): + from dipy.reconst import dti + + # Load the 4D image files + img = nb.load(self.inputs.in_file) + data = img.get_data() + affine = img.affine + + # Load the gradient strengths and directions + gtab = self._get_gradient_table() + + # Mask the data so that tensors are not fit for + # unnecessary voxels + mask = data[..., 0] > 50 + + # Fit the tensors to the data + tenmodel = dti.TensorModel(gtab) + tenfit = tenmodel.fit(data, mask) + + # Calculate the mode of each voxel's tensor + mode_data = tenfit.mode + + # Write as a 3D Nifti image with the original affine + img = nb.Nifti1Image(mode_data, affine) + out_file = self._gen_filename('mode') + nb.save(img, out_file) + IFLOGGER.info('Tensor mode image saved as %s', out_file) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = self._gen_filename('mode') + return outputs diff --git a/nipype/interfaces/dipy/tests/__init__.py b/nipype/interfaces/dipy/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/dipy/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/dipy/tests/test_auto_APMQball.py b/nipype/interfaces/dipy/tests/test_auto_APMQball.py new file mode 100644 index 0000000000..f6f3f2e4c6 --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_APMQball.py @@ -0,0 +1,26 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..anisotropic_power import APMQball + + +def test_APMQball_inputs(): + input_map = dict( + b0_thres=dict(usedefault=True, ), + in_bval=dict(mandatory=True, ), + in_bvec=dict(mandatory=True, ), + in_file=dict(mandatory=True, ), + mask_file=dict(), + out_prefix=dict(), + ) + inputs = APMQball.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_APMQball_outputs(): + output_map = dict(out_file=dict(), ) + outputs = APMQball.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tests/test_auto_CSD.py b/nipype/interfaces/dipy/tests/test_auto_CSD.py new file mode 100644 index 0000000000..a30efaa3cc --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_CSD.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..reconstruction import CSD + + +def test_CSD_inputs(): + input_map = dict( + b0_thres=dict(usedefault=True, ), + in_bval=dict(mandatory=True, ), + in_bvec=dict(mandatory=True, ), + in_file=dict(mandatory=True, ), + in_mask=dict(), + out_fods=dict(), + out_prefix=dict(), + response=dict(), + save_fods=dict(usedefault=True, ), + sh_order=dict(usedefault=True, ), + ) + inputs = CSD.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CSD_outputs(): + output_map = dict( + model=dict(), + out_fods=dict(), + ) + outputs = CSD.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tests/test_auto_DTI.py b/nipype/interfaces/dipy/tests/test_auto_DTI.py new file mode 100644 index 0000000000..68c9ae1437 --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_DTI.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tensors import DTI + + +def test_DTI_inputs(): + input_map = dict( + b0_thres=dict(usedefault=True, ), + in_bval=dict(mandatory=True, ), + in_bvec=dict(mandatory=True, ), + in_file=dict(mandatory=True, ), + mask_file=dict(), + out_prefix=dict(), + ) + inputs = DTI.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DTI_outputs(): + output_map = dict( + ad_file=dict(), + fa_file=dict(), + md_file=dict(), + out_file=dict(), + rd_file=dict(), + ) + outputs = DTI.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tests/test_auto_Denoise.py b/nipype/interfaces/dipy/tests/test_auto_Denoise.py new file mode 100644 index 0000000000..88a1bc5314 --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_Denoise.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Denoise + + +def test_Denoise_inputs(): + input_map = dict( + block_radius=dict(usedefault=True, ), + in_file=dict(mandatory=True, ), + in_mask=dict(), + noise_mask=dict(), + noise_model=dict( + mandatory=True, + usedefault=True, + ), + patch_radius=dict(usedefault=True, ), + signal_mask=dict(), + snr=dict(), + ) + inputs = Denoise.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Denoise_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Denoise.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py b/nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py new file mode 100644 index 0000000000..e133e266c4 --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_DipyBaseInterface.py @@ -0,0 +1,12 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import DipyBaseInterface + + +def test_DipyBaseInterface_inputs(): + input_map = dict() + inputs = DipyBaseInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py new file mode 100644 index 0000000000..ee9022ca58 --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_DipyDiffusionInterface.py @@ -0,0 +1,18 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import DipyDiffusionInterface + + +def test_DipyDiffusionInterface_inputs(): + input_map = dict( + b0_thres=dict(usedefault=True, ), + in_bval=dict(mandatory=True, ), + in_bvec=dict(mandatory=True, ), + in_file=dict(mandatory=True, ), + out_prefix=dict(), + ) + inputs = DipyDiffusionInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py new file mode 100644 index 0000000000..1270d94b13 --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_EstimateResponseSH.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..reconstruction import EstimateResponseSH + + +def test_EstimateResponseSH_inputs(): + input_map = dict( + auto=dict(xor=['recursive'], ), + b0_thres=dict(usedefault=True, ), + fa_thresh=dict(usedefault=True, ), + in_bval=dict(mandatory=True, ), + in_bvec=dict(mandatory=True, ), + in_evals=dict(mandatory=True, ), + in_file=dict(mandatory=True, ), + in_mask=dict(), + out_mask=dict(usedefault=True, ), + out_prefix=dict(), + recursive=dict(xor=['auto'], ), + response=dict(usedefault=True, ), + roi_radius=dict(usedefault=True, ), + ) + inputs = EstimateResponseSH.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EstimateResponseSH_outputs(): + output_map = dict( + out_mask=dict(), + response=dict(), + ) + outputs = EstimateResponseSH.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tests/test_auto_RESTORE.py b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py new file mode 100644 index 0000000000..9f62d05e0c --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_RESTORE.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..reconstruction import RESTORE + + +def test_RESTORE_inputs(): + input_map = dict( + b0_thres=dict(usedefault=True, ), + in_bval=dict(mandatory=True, ), + in_bvec=dict(mandatory=True, ), + in_file=dict(mandatory=True, ), + in_mask=dict(), + noise_mask=dict(), + out_prefix=dict(), + ) + inputs = RESTORE.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RESTORE_outputs(): + output_map = dict( + evals=dict(), + evecs=dict(), + fa=dict(), + md=dict(), + mode=dict(), + rd=dict(), + trace=dict(), + ) + outputs = RESTORE.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tests/test_auto_Resample.py b/nipype/interfaces/dipy/tests/test_auto_Resample.py new file mode 100644 index 0000000000..ff51e0efe4 --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_Resample.py @@ -0,0 +1,26 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Resample + + +def test_Resample_inputs(): + input_map = dict( + in_file=dict(mandatory=True, ), + interp=dict( + mandatory=True, + usedefault=True, + ), + vox_size=dict(), + ) + inputs = Resample.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Resample_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Resample.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py new file mode 100644 index 0000000000..db46a3b982 --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_SimulateMultiTensor.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..simulate import SimulateMultiTensor + + +def test_SimulateMultiTensor_inputs(): + input_map = dict( + baseline=dict(mandatory=True, ), + bvalues=dict(usedefault=True, ), + diff_iso=dict(usedefault=True, ), + diff_sf=dict(usedefault=True, ), + gradients=dict(), + in_bval=dict(), + in_bvec=dict(), + in_dirs=dict(mandatory=True, ), + in_frac=dict(mandatory=True, ), + in_mask=dict(), + in_vfms=dict(mandatory=True, ), + n_proc=dict(usedefault=True, ), + num_dirs=dict(usedefault=True, ), + out_bval=dict(usedefault=True, ), + out_bvec=dict(usedefault=True, ), + out_file=dict(usedefault=True, ), + out_mask=dict(usedefault=True, ), + snr=dict(usedefault=True, ), + ) + inputs = SimulateMultiTensor.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SimulateMultiTensor_outputs(): + output_map = dict( + out_bval=dict(), + out_bvec=dict(), + out_file=dict(), + out_mask=dict(), + ) + outputs = SimulateMultiTensor.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py new file mode 100644 index 0000000000..1bc1a2ea97 --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_StreamlineTractography.py @@ -0,0 +1,56 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tracks import StreamlineTractography + + +def test_StreamlineTractography_inputs(): + input_map = dict( + gfa_thresh=dict( + mandatory=True, + usedefault=True, + ), + in_file=dict(mandatory=True, ), + in_model=dict(), + in_peaks=dict(), + min_angle=dict( + mandatory=True, + usedefault=True, + ), + multiprocess=dict( + mandatory=True, + usedefault=True, + ), + num_seeds=dict( + mandatory=True, + usedefault=True, + ), + out_prefix=dict(), + peak_threshold=dict( + mandatory=True, + usedefault=True, + ), + save_seeds=dict( + mandatory=True, + usedefault=True, + ), + seed_coord=dict(), + seed_mask=dict(), + tracking_mask=dict(), + ) + inputs = StreamlineTractography.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_StreamlineTractography_outputs(): + output_map = dict( + gfa=dict(), + odf_peaks=dict(), + out_seeds=dict(), + tracks=dict(), + ) + outputs = StreamlineTractography.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tests/test_auto_TensorMode.py b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py new file mode 100644 index 0000000000..d072af78fc --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_TensorMode.py @@ -0,0 +1,26 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tensors import TensorMode + + +def test_TensorMode_inputs(): + input_map = dict( + b0_thres=dict(usedefault=True, ), + in_bval=dict(mandatory=True, ), + in_bvec=dict(mandatory=True, ), + in_file=dict(mandatory=True, ), + mask_file=dict(), + out_prefix=dict(), + ) + inputs = TensorMode.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TensorMode_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TensorMode.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py new file mode 100644 index 0000000000..79af3b5940 --- /dev/null +++ b/nipype/interfaces/dipy/tests/test_auto_TrackDensityMap.py @@ -0,0 +1,26 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tracks import TrackDensityMap + + +def test_TrackDensityMap_inputs(): + input_map = dict( + data_dims=dict(), + in_file=dict(mandatory=True, ), + out_filename=dict(usedefault=True, ), + points_space=dict(usedefault=True, ), + reference=dict(), + voxel_dims=dict(), + ) + inputs = TrackDensityMap.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TrackDensityMap_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TrackDensityMap.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dipy/tracks.py b/nipype/interfaces/dipy/tracks.py new file mode 100644 index 0000000000..b7d965ece1 --- /dev/null +++ b/nipype/interfaces/dipy/tracks.py @@ -0,0 +1,324 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os.path as op +import numpy as np +import nibabel as nb +import nibabel.trackvis as nbt + +from ... import logging +from ..base import (TraitedSpec, BaseInterfaceInputSpec, File, isdefined, + traits) +from .base import DipyBaseInterface +IFLOGGER = logging.getLogger('nipype.interface') + + +class TrackDensityMapInputSpec(BaseInterfaceInputSpec): + in_file = File( + exists=True, mandatory=True, desc='The input TrackVis track file') + reference = File( + exists=True, desc='A reference file to define RAS coordinates space') + points_space = traits.Enum( + 'rasmm', + 'voxel', + None, + usedefault=True, + desc='coordinates of trk file') + voxel_dims = traits.List( + traits.Float, minlen=3, maxlen=3, desc='The size of each voxel in mm.') + data_dims = traits.List( + traits.Int, + minlen=3, + maxlen=3, + desc='The size of the image in voxels.') + out_filename = File( + 'tdi.nii', + usedefault=True, + desc='The output filename for the tracks in TrackVis ' + '(.trk) format') + + +class TrackDensityMapOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class TrackDensityMap(DipyBaseInterface): + """ + Creates a tract density image from a TrackVis track file using functions + from dipy + + Example + ------- + + >>> import nipype.interfaces.dipy as dipy + >>> trk2tdi = dipy.TrackDensityMap() + >>> trk2tdi.inputs.in_file = 'converted.trk' + >>> trk2tdi.run() # doctest: +SKIP + + """ + input_spec = TrackDensityMapInputSpec + output_spec = TrackDensityMapOutputSpec + + def _run_interface(self, runtime): + from numpy import min_scalar_type + from dipy.tracking.utils import density_map + + tracks, header = nbt.read(self.inputs.in_file) + streams = ((ii[0]) for ii in tracks) + + if isdefined(self.inputs.reference): + refnii = nb.load(self.inputs.reference) + affine = refnii.affine + data_dims = refnii.shape[:3] + kwargs = dict(affine=affine) + else: + IFLOGGER.warn('voxel_dims and data_dims are deprecated as of dipy ' + '0.7.1. Please use reference input instead') + + if not isdefined(self.inputs.data_dims): + data_dims = header['dim'] + else: + data_dims = self.inputs.data_dims + if not isdefined(self.inputs.voxel_dims): + voxel_size = header['voxel_size'] + else: + voxel_size = self.inputs.voxel_dims + + affine = header['vox_to_ras'] + kwargs = dict(voxel_size=voxel_size) + + data = density_map(streams, data_dims, **kwargs) + data = data.astype(min_scalar_type(data.max())) + img = nb.Nifti1Image(data, affine) + out_file = op.abspath(self.inputs.out_filename) + nb.save(img, out_file) + + IFLOGGER.info('Track density map saved as %s, size=%s, dimensions=%s', + out_file, img.shape, img.header.get_zooms()) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = op.abspath(self.inputs.out_filename) + return outputs + + +class StreamlineTractographyInputSpec(BaseInterfaceInputSpec): + in_file = File(exists=True, mandatory=True, desc=('input diffusion data')) + in_model = File(exists=True, desc=('input f/d-ODF model extracted from.')) + tracking_mask = File( + exists=True, desc=('input mask within which perform tracking')) + seed_mask = File( + exists=True, desc=('input mask within which perform seeding')) + in_peaks = File(exists=True, desc=('peaks computed from the odf')) + seed_coord = File( + exists=True, + desc=('file containing the list of seed voxel ' + 'coordinates (N,3)')) + gfa_thresh = traits.Float( + 0.2, + mandatory=True, + usedefault=True, + desc=('GFA threshold to compute tracking mask')) + peak_threshold = traits.Float( + 0.5, + mandatory=True, + usedefault=True, + desc=('threshold to consider peaks from model')) + min_angle = traits.Float( + 25.0, + mandatory=True, + usedefault=True, + desc=('minimum separation angle')) + multiprocess = traits.Bool( + True, mandatory=True, usedefault=True, desc=('use multiprocessing')) + save_seeds = traits.Bool( + False, + mandatory=True, + usedefault=True, + desc=('save seeding voxels coordinates')) + num_seeds = traits.Int( + 10000, + mandatory=True, + usedefault=True, + desc=('desired number of tracks in tractography')) + out_prefix = traits.Str(desc=('output prefix for file names')) + + +class StreamlineTractographyOutputSpec(TraitedSpec): + tracks = File(desc='TrackVis file containing extracted streamlines') + gfa = File( + desc=('The resulting GFA (generalized FA) computed using the ' + 'peaks of the ODF')) + odf_peaks = File(desc=('peaks computed from the odf')) + out_seeds = File( + desc=('file containing the (N,3) *voxel* coordinates used' + ' in seeding.')) + + +class StreamlineTractography(DipyBaseInterface): + """ + Streamline tractography using EuDX [Garyfallidis12]_. + + .. [Garyfallidis12] Garyfallidis E., “Towards an accurate brain + tractography”, PhD thesis, University of Cambridge, 2012 + + Example + ------- + + >>> from nipype.interfaces import dipy as ndp + >>> track = ndp.StreamlineTractography() + >>> track.inputs.in_file = '4d_dwi.nii' + >>> track.inputs.in_model = 'model.pklz' + >>> track.inputs.tracking_mask = 'dilated_wm_mask.nii' + >>> res = track.run() # doctest: +SKIP + """ + input_spec = StreamlineTractographyInputSpec + output_spec = StreamlineTractographyOutputSpec + + def _run_interface(self, runtime): + from dipy.reconst.peaks import peaks_from_model + from dipy.tracking.eudx import EuDX + from dipy.data import get_sphere + # import marshal as pickle + import pickle as pickle + import gzip + + if (not (isdefined(self.inputs.in_model) + or isdefined(self.inputs.in_peaks))): + raise RuntimeError(('At least one of in_model or in_peaks should ' + 'be supplied')) + + img = nb.load(self.inputs.in_file) + imref = nb.four_to_three(img)[0] + affine = img.affine + + data = img.get_data().astype(np.float32) + hdr = imref.header.copy() + hdr.set_data_dtype(np.float32) + hdr['data_type'] = 16 + + sphere = get_sphere('symmetric724') + + self._save_peaks = False + if isdefined(self.inputs.in_peaks): + IFLOGGER.info('Peaks file found, skipping ODF peaks search...') + f = gzip.open(self.inputs.in_peaks, 'rb') + peaks = pickle.load(f) + f.close() + else: + self._save_peaks = True + IFLOGGER.info('Loading model and computing ODF peaks') + f = gzip.open(self.inputs.in_model, 'rb') + odf_model = pickle.load(f) + f.close() + + peaks = peaks_from_model( + model=odf_model, + data=data, + sphere=sphere, + relative_peak_threshold=self.inputs.peak_threshold, + min_separation_angle=self.inputs.min_angle, + parallel=self.inputs.multiprocess) + + f = gzip.open(self._gen_filename('peaks', ext='.pklz'), 'wb') + pickle.dump(peaks, f, -1) + f.close() + + hdr.set_data_shape(peaks.gfa.shape) + nb.Nifti1Image(peaks.gfa.astype(np.float32), affine, hdr).to_filename( + self._gen_filename('gfa')) + + IFLOGGER.info('Performing tractography') + + if isdefined(self.inputs.tracking_mask): + msk = nb.load(self.inputs.tracking_mask).get_data() + msk[msk > 0] = 1 + msk[msk < 0] = 0 + else: + msk = np.ones(imref.shape) + + gfa = peaks.gfa * msk + seeds = self.inputs.num_seeds + + if isdefined(self.inputs.seed_coord): + seeds = np.loadtxt(self.inputs.seed_coord) + + elif isdefined(self.inputs.seed_mask): + seedmsk = nb.load(self.inputs.seed_mask).get_data() + assert (seedmsk.shape == data.shape[:3]) + seedmsk[seedmsk > 0] = 1 + seedmsk[seedmsk < 1] = 0 + seedps = np.array(np.where(seedmsk == 1), dtype=np.float32).T + vseeds = seedps.shape[0] + nsperv = (seeds // vseeds) + 1 + IFLOGGER.info('Seed mask is provided (%d voxels inside ' + 'mask), computing seeds (%d seeds/voxel).', vseeds, + nsperv) + if nsperv > 1: + IFLOGGER.info('Needed %d seeds per selected voxel (total %d).', + nsperv, vseeds) + seedps = np.vstack(np.array([seedps] * nsperv)) + voxcoord = seedps + np.random.uniform(-1, 1, size=seedps.shape) + nseeds = voxcoord.shape[0] + seeds = affine.dot( + np.vstack((voxcoord.T, np.ones((1, nseeds)))))[:3, :].T + + if self.inputs.save_seeds: + np.savetxt(self._gen_filename('seeds', ext='.txt'), seeds) + + if isdefined(self.inputs.tracking_mask): + tmask = msk + a_low = 0.1 + else: + tmask = gfa + a_low = self.inputs.gfa_thresh + + eu = EuDX( + tmask, + peaks.peak_indices[..., 0], + seeds=seeds, + affine=affine, + odf_vertices=sphere.vertices, + a_low=a_low) + + ss_mm = [np.array(s) for s in eu] + + trkfilev = nb.trackvis.TrackvisFile( + [(s, None, None) for s in ss_mm], + points_space='rasmm', + affine=np.eye(4)) + trkfilev.to_file(self._gen_filename('tracked', ext='.trk')) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['tracks'] = self._gen_filename('tracked', ext='.trk') + outputs['gfa'] = self._gen_filename('gfa') + if self._save_peaks: + outputs['odf_peaks'] = self._gen_filename('peaks', ext='.pklz') + if self.inputs.save_seeds: + if isdefined(self.inputs.seed_coord): + outputs['out_seeds'] = self.inputs.seed_coord + else: + outputs['out_seeds'] = self._gen_filename('seeds', ext='.txt') + + return outputs + + def _gen_filename(self, name, ext=None): + fname, fext = op.splitext(op.basename(self.inputs.in_file)) + if fext == '.gz': + fname, fext2 = op.splitext(fname) + fext = fext2 + fext + + if not isdefined(self.inputs.out_prefix): + out_prefix = op.abspath(fname) + else: + out_prefix = self.inputs.out_prefix + + if ext is None: + ext = fext + + return out_prefix + '_' + name + ext diff --git a/nipype/interfaces/dtitk/__init__.py b/nipype/interfaces/dtitk/__init__.py new file mode 100644 index 0000000000..a41c09e588 --- /dev/null +++ b/nipype/interfaces/dtitk/__init__.py @@ -0,0 +1,13 @@ +"""The dtitk module provides classes for interfacing with the `Diffusion +Tensor Imaging Toolkit (DTI-TK) +`_ command line tools. + +Top-level namespace for dti-tk. +""" + +# from .base import () +from .registration import (Rigid, Affine, Diffeo, + ComposeXfm, DiffeoSymTensor3DVol, AffSymTensor3DVol, + AffScalarVol, DiffeoScalarVol) +from .utils import (TVAdjustVoxSp, SVAdjustVoxSp, TVResample, SVResample, + TVtool, BinThresh) diff --git a/nipype/interfaces/dtitk/base.py b/nipype/interfaces/dtitk/base.py new file mode 100644 index 0000000000..5cfb81d9dd --- /dev/null +++ b/nipype/interfaces/dtitk/base.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The dtitk module provides classes for interfacing with the `DTITK +`_ command line tools. + +These are the base tools for working with DTITK. +Preprocessing tools are found in dtitk/preprocess.py +Registration tools are found in dtitk/registration.py + +Currently these tools are supported: + +* Rigid Tensor Registration +* Affine Tensor Registration +* Diffeomorphic Tensor Registration +* Combine affiine and diffeomorphic transforms +* Application of transform to tensor and scalar volumes +* Threshold and Binarize +* Adjusting the voxel space of tensor and scalar volumes +* Resampling tensor and scalar volumes +* Calculation of tensor metrics from tensor volume + +Examples +-------- +See the docstrings of the individual classes for examples. + +""" +from __future__ import print_function, division, unicode_literals, \ + absolute_import + +import os + +from ... import logging +from ...utils.filemanip import fname_presuffix +from ..base import CommandLine +from nipype.interfaces.fsl.base import Info +import warnings + +LOGGER = logging.getLogger('nipype.interface') + + +class DTITKRenameMixin(object): + def __init__(self, *args, **kwargs): + classes = [cls.__name__ for cls in self.__class__.mro()] + dep_name = classes[0] + rename_idx = classes.index('DTITKRenameMixin') + new_name = classes[rename_idx + 1] + warnings.warn('The {} interface has been renamed to {}\n' + 'Please see the documentation for DTI-TK ' + 'interfaces, as some inputs have been ' + 'added or renamed for clarity.' + ''.format(dep_name, new_name), + DeprecationWarning) + super(DTITKRenameMixin, self).__init__(*args, **kwargs) + + +class CommandLineDtitk(CommandLine): + + def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, + ext=None): + """Generate a filename based on the given parameters. + + The filename will take the form: cwd/basename. + If change_ext is True, it will use the extentions specified in + intputs.output_type. + + Parameters + ---------- + basename : str + Filename to base the new filename on. + cwd : str + Path to prefix to the new filename. (default is os.getcwd()) + suffix : str + Suffix to add to the `basename`. (defaults is '' ) + change_ext : bool + Flag to change the filename extension to the FSL output type. + (default True) + + Returns + ------- + fname : str + New filename based on given parameters. + + """ + + if basename == '': + msg = 'Unable to generate filename for command %s. ' % self.cmd + msg += 'basename is not set!' + raise ValueError(msg) + if cwd is None: + cwd = os.getcwd() + if ext is None: + ext = Info.output_type_to_ext(self.inputs.output_type) + if change_ext: + if suffix: + suffix = ''.join((suffix, ext)) + else: + suffix = ext + if suffix is None: + suffix = '' + fname = fname_presuffix(basename, suffix=suffix, + use_ext=False, newpath=cwd) + return fname diff --git a/nipype/interfaces/dtitk/registration.py b/nipype/interfaces/dtitk/registration.py new file mode 100644 index 0000000000..6aa40d4201 --- /dev/null +++ b/nipype/interfaces/dtitk/registration.py @@ -0,0 +1,489 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""DTITK registration interfaces + +DTI-TK developed by Gary Hui Zhang, gary.zhang@ucl.ac.uk +For additional help, visit http://dti-tk.sf.net + +The high-dimensional tensor-based DTI registration algorithm + +Zhang, H., Avants, B.B, Yushkevich, P.A., Woo, J.H., Wang, S., McCluskey, L.H., + Elman, L.B., Melhem, E.R., Gee, J.C., High-dimensional spatial normalization + of diffusion tensor images improves the detection of white matter differences + in amyotrophic lateral sclerosis, IEEE Transactions on Medical Imaging, + 26(11):1585-1597, November 2007. PMID: 18041273. + +The original piecewise-affine tensor-based DTI registration algorithm at the +core of DTI-TK + +Zhang, H., Yushkevich, P.A., Alexander, D.C., Gee, J.C., Deformable + registration of diffusion tensor MR images with explicit orientation + optimization, Medical Image Analysis, 10(5):764-785, October 2006. PMID: + 16899392. + +""" + +from ..base import TraitedSpec, CommandLineInputSpec, traits, File, isdefined +from ...utils.filemanip import fname_presuffix, split_filename +from .base import CommandLineDtitk, DTITKRenameMixin +import os + +__docformat__ = 'restructuredtext' + + +class RigidInputSpec(CommandLineInputSpec): + fixed_file = File(desc="fixed tensor volume", exists=True, + mandatory=True, position=0, argstr="%s", copyfile=False) + moving_file = File(desc="moving tensor volume", exists=True, + mandatory=True, position=1, argstr="%s", copyfile=False) + similarity_metric = traits.Enum('EDS', 'GDS', 'DDS', 'NMI', + mandatory=True, position=2, argstr="%s", + desc="similarity metric", usedefault=True) + sampling_xyz = traits.Tuple((4, 4, 4), mandatory=True, position=3, + argstr="%g %g %g", usedefault=True, + desc="dist between samp points (mm) (x,y,z)") + ftol = traits.Float(mandatory=True, position=4, argstr="%g", + desc="cost function tolerance", default_value=0.01, + usedefault=True) + initialize_xfm = File(copyfile=True, desc="Initialize w/DTITK-FORMAT" + "affine", position=5, argstr="%s", exists=True) + + +class RigidOutputSpec(TraitedSpec): + out_file = File(exists=True) + out_file_xfm = File(exists=True) + + +class Rigid(CommandLineDtitk): + """Performs rigid registration between two tensor volumes + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.Rigid() + >>> node.inputs.fixed_file = 'im1.nii' + >>> node.inputs.moving_file = 'im2.nii' + >>> node.inputs.similarity_metric = 'EDS' + >>> node.inputs.sampling_xyz = (4,4,4) + >>> node.inputs.ftol = 0.01 + >>> node.cmdline + 'dti_rigid_reg im1.nii im2.nii EDS 4 4 4 0.01' + >>> node.run() # doctest: +SKIP + """ + input_spec = RigidInputSpec + output_spec = RigidOutputSpec + _cmd = 'dti_rigid_reg' + + '''def _format_arg(self, name, spec, value): + if name == 'initialize_xfm': + value = 1 + return super(Rigid, self)._format_arg(name, spec, value)''' + + def _run_interface(self, runtime): + runtime = super(Rigid, self)._run_interface(runtime) + if '''.aff doesn't exist or can't be opened''' in runtime.stderr: + self.raise_exception(runtime) + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + moving = self.inputs.moving_file + outputs['out_file_xfm'] = fname_presuffix(moving, suffix='.aff', + use_ext=False) + outputs['out_file'] = fname_presuffix(moving, suffix='_aff') + return outputs + + +class Affine(Rigid): + """Performs affine registration between two tensor volumes + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.Affine() + >>> node.inputs.fixed_file = 'im1.nii' + >>> node.inputs.moving_file = 'im2.nii' + >>> node.inputs.similarity_metric = 'EDS' + >>> node.inputs.sampling_xyz = (4,4,4) + >>> node.inputs.ftol = 0.01 + >>> node.inputs.initialize_xfm = 'im_affine.aff' + >>> node.cmdline + 'dti_affine_reg im1.nii im2.nii EDS 4 4 4 0.01 im_affine.aff' + >>> node.run() # doctest: +SKIP + """ + _cmd = 'dti_affine_reg' + + +class DiffeoInputSpec(CommandLineInputSpec): + fixed_file = File(desc="fixed tensor volume", + exists=True, position=0, argstr="%s") + moving_file = File(desc="moving tensor volume", + exists=True, position=1, argstr="%s", copyfile=False) + mask_file = File(desc="mask", exists=True, position=2, argstr="%s") + legacy = traits.Enum(1, desc="legacy parameter; always set to 1", + usedefault=True, mandatory=True, + position=3, argstr="%d") + n_iters = traits.Int(6, desc="number of iterations", + mandatory=True, + position=4, argstr="%d", usedefault=True) + ftol = traits.Float(0.002, desc="iteration for the optimization to stop", + mandatory=True, position=5, argstr="%g", + usedefault=True) + + +class DiffeoOutputSpec(TraitedSpec): + out_file = File(exists=True) + out_file_xfm = File(exists=True) + + +class Diffeo(CommandLineDtitk): + """Performs diffeomorphic registration between two tensor volumes + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.Diffeo() + >>> node.inputs.fixed_file = 'im1.nii' + >>> node.inputs.moving_file = 'im2.nii' + >>> node.inputs.mask_file = 'mask.nii' + >>> node.inputs.legacy = 1 + >>> node.inputs.n_iters = 6 + >>> node.inputs.ftol = 0.002 + >>> node.cmdline + 'dti_diffeomorphic_reg im1.nii im2.nii mask.nii 1 6 0.002' + >>> node.run() # doctest: +SKIP + """ + input_spec = DiffeoInputSpec + output_spec = DiffeoOutputSpec + _cmd = 'dti_diffeomorphic_reg' + + def _list_outputs(self): + outputs = self.output_spec().get() + moving = self.inputs.moving_file + outputs['out_file_xfm'] = fname_presuffix(moving, suffix='_diffeo.df') + outputs['out_file'] = fname_presuffix(moving, suffix='_diffeo') + return outputs + + +class ComposeXfmInputSpec(CommandLineInputSpec): + in_df = File(desc='diffeomorphic warp file', exists=True, + argstr="-df %s", mandatory=True) + in_aff = File(desc='affine transform file', exists=True, + argstr="-aff %s", mandatory=True) + out_file = File(desc='output path', + argstr="-out %s", genfile=True) + + +class ComposeXfmOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class ComposeXfm(CommandLineDtitk): + """ + Combines diffeomorphic and affine transforms + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.ComposeXfm() + >>> node.inputs.in_df = 'im_warp.df.nii' + >>> node.inputs.in_aff= 'im_affine.aff' + >>> node.cmdline + 'dfRightComposeAffine -aff im_affine.aff -df im_warp.df.nii -out + im_warp_affdf.df.nii' + >>> node.run() # doctest: +SKIP + """ + input_spec = ComposeXfmInputSpec + output_spec = ComposeXfmOutputSpec + _cmd = 'dfRightComposeAffine' + + def _list_outputs(self): + outputs = self._outputs().get() + out_file = self.inputs.out_file + if not isdefined(out_file): + out_file = self._gen_filename('out_file') + outputs['out_file'] = os.path.abspath(out_file) + return outputs + + def _gen_filename(self, name): + if name != 'out_file': + return + path, base, ext = split_filename(self.inputs.in_df) + suffix = '_affdf' + if base.endswith('.df'): + suffix += '.df' + base = base[:-3] + return fname_presuffix(base, suffix=suffix + ext, use_ext=False) + + +class AffSymTensor3DVolInputSpec(CommandLineInputSpec): + in_file = File(desc='moving tensor volume', exists=True, + argstr="-in %s", mandatory=True) + out_file = File(desc='output filename', + argstr="-out %s", name_source="in_file", + name_template="%s_affxfmd", keep_extension=True) + transform = File(exists=True, argstr="-trans %s", + xor=['target', 'translation', 'euler', 'deformation'], + desc='transform to apply: specify an input transformation' + ' file; parameters input will be ignored',) + interpolation = traits.Enum('LEI', 'EI', usedefault=True, + argstr="-interp %s", + desc='Log Euclidean/Euclidean Interpolation') + reorient = traits.Enum('PPD', 'NO', 'FS', argstr='-reorient %s', + usedefault=True, desc='Reorientation strategy: ' + 'preservation of principal direction, no ' + 'reorientation, or finite strain') + target = File(exists=True, argstr="-target %s", xor=['transform'], + desc='output volume specification read from the target ' + 'volume if specified') + translation = traits.Tuple((traits.Float(), traits.Float(), + traits.Float()), + desc='translation (x,y,z) in mm', + argstr='-translation %g %g %g', + xor=['transform']) + euler = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), + desc='(theta, phi, psi) in degrees', + xor=['transform'], argstr='-euler %g %g %g') + deformation = traits.Tuple((traits.Float(),) * 6, + desc='(xx,yy,zz,xy,yz,xz)', xor=['transform'], + argstr='-deformation %g %g %g %g %g %g') + + +class AffSymTensor3DVolOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class AffSymTensor3DVol(CommandLineDtitk): + """ + Applies affine transform to a tensor volume + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.AffSymTensor3DVol() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.transform = 'im_affine.aff' + >>> node.cmdline + 'affineSymTensor3DVolume -in im1.nii -interp LEI -out im1_affxfmd.nii + -reorient PPD -trans im_affine.aff' + >>> node.run() # doctest: +SKIP + """ + input_spec = AffSymTensor3DVolInputSpec + output_spec = AffSymTensor3DVolOutputSpec + _cmd = 'affineSymTensor3DVolume' + + +class AffScalarVolInputSpec(CommandLineInputSpec): + in_file = File(desc='moving scalar volume', exists=True, + argstr="-in %s", mandatory=True) + out_file = File(desc='output filename', + argstr="-out %s", name_source="in_file", + name_template="%s_affxfmd", keep_extension=True) + transform = File(exists=True, argstr="-trans %s", + xor=['target', 'translation', 'euler', 'deformation'], + desc='transform to apply: specify an input transformation' + ' file; parameters input will be ignored',) + interpolation = traits.Enum('trilinear', 'NN', + usedefault=True, argstr="-interp %s", + desc='trilinear or nearest neighbor' + ' interpolation') + target = File(exists=True, argstr="-target %s", xor=['transform'], + desc='output volume specification read from the target ' + 'volume if specified') + translation = traits.Tuple((traits.Float(), traits.Float(), + traits.Float()), + desc='translation (x,y,z) in mm', + argstr='-translation %g %g %g', + xor=['transform']) + euler = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), + desc='(theta, phi, psi) in degrees', + xor=['transform'], argstr='-euler %g %g %g') + deformation = traits.Tuple((traits.Float(),) * 6, + desc='(xx,yy,zz,xy,yz,xz)', xor=['transform'], + argstr='-deformation %g %g %g %g %g %g') + + +class AffScalarVolOutputSpec(TraitedSpec): + out_file = File(desc='moved volume', exists=True) + + +class AffScalarVol(CommandLineDtitk): + """ + Applies affine transform to a scalar volume + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.AffScalarVol() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.transform = 'im_affine.aff' + >>> node.cmdline + 'affineScalarVolume -in im1.nii -interp 0 -out im1_affxfmd.nii -trans + im_affine.aff' + >>> node.run() # doctest: +SKIP + """ + input_spec = AffScalarVolInputSpec + output_spec = AffScalarVolOutputSpec + _cmd = 'affineScalarVolume' + + def _format_arg(self, name, spec, value): + if name == 'interpolation': + value = {'trilinear': 0, 'NN': 1}[value] + return super(AffScalarVol, self)._format_arg(name, spec, value) + + +class DiffeoSymTensor3DVolInputSpec(CommandLineInputSpec): + in_file = File(desc='moving tensor volume', exists=True, + argstr="-in %s", mandatory=True) + out_file = File(desc='output filename', + argstr="-out %s", name_source="in_file", + name_template="%s_diffeoxfmd", keep_extension=True) + transform = File(exists=True, argstr="-trans %s", + mandatory=True, desc='transform to apply') + df = traits.Str('FD', argstr="-df %s", usedefault=True) + interpolation = traits.Enum('LEI', 'EI', usedefault=True, + argstr="-interp %s", + desc='Log Euclidean/Euclidean Interpolation') + reorient = traits.Enum('PPD', 'FS', argstr='-reorient %s', + usedefault=True, desc='Reorientation strategy: ' + 'preservation of principal direction or finite ' + 'strain') + target = File(exists=True, argstr="-target %s", xor=['voxel_size'], + desc='output volume specification read from the target ' + 'volume if specified') + voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), + desc='xyz voxel size (superseded by target)', + argstr="-vsize %g %g %g", xor=['target']) + flip = traits.Tuple((traits.Int(), traits.Int(), traits.Int()), + argstr="-flip %d %d %d") + resampling_type = traits.Enum('backward', 'forward', + desc='use backward or forward resampling', + argstr="-type %s") + + +class DiffeoSymTensor3DVolOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class DiffeoSymTensor3DVol(CommandLineDtitk): + """ + Applies diffeomorphic transform to a tensor volume + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.DiffeoSymTensor3DVol() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.transform = 'im_warp.df.nii' + >>> node.cmdline + 'deformationSymTensor3DVolume -df FD -in im1.nii -interp LEI -out + im1_diffeoxfmd.nii -reorient PPD -trans im_warp.df.nii' + >>> node.run() # doctest: +SKIP + """ + + input_spec = DiffeoSymTensor3DVolInputSpec + output_spec = DiffeoSymTensor3DVolOutputSpec + _cmd = 'deformationSymTensor3DVolume' + + def _format_arg(self, name, spec, value): + if name == 'resampling_type': + value = {'forward': 0, 'backward': 1}[value] + return super(DiffeoSymTensor3DVol, self)._format_arg(name, spec, value) + + +class DiffeoScalarVolInputSpec(CommandLineInputSpec): + in_file = File(desc='moving scalar volume', exists=True, + argstr="-in %s", mandatory=True) + out_file = File(desc='output filename', + argstr="-out %s", name_source="in_file", + name_template="%s_diffeoxfmd", keep_extension=True) + transform = File(exists=True, argstr="-trans %s", + mandatory=True, desc='transform to apply') + target = File(exists=True, argstr="-target %s", xor=['voxel_size'], + desc='output volume specification read from the target ' + 'volume if specified') + voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), + desc='xyz voxel size (superseded by target)', + argstr="-vsize %g %g %g", xor=['target']) + flip = traits.Tuple((traits.Int(), traits.Int(), traits.Int()), + argstr="-flip %d %d %d") + resampling_type = traits.Enum('backward', 'forward', + desc='use backward or forward resampling', + argstr="-type %s") + interpolation = traits.Enum('trilinear', 'NN', + desc='trilinear, or nearest neighbor', + argstr="-interp %s", + usedefault=True) + + +class DiffeoScalarVolOutputSpec(TraitedSpec): + out_file = File(desc='moved volume', exists=True) + + +class DiffeoScalarVol(CommandLineDtitk): + """ + Applies diffeomorphic transform to a scalar volume + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.DiffeoScalarVol() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.transform = 'im_warp.df.nii' + >>> node.cmdline + 'deformationScalarVolume -in im1.nii -interp 0 -out im1_diffeoxfmd.nii + -trans im_warp.df.nii' + >>> node.run() # doctest: +SKIP + """ + + input_spec = DiffeoScalarVolInputSpec + output_spec = DiffeoScalarVolOutputSpec + _cmd = 'deformationScalarVolume' + + def _format_arg(self, name, spec, value): + if name == 'resampling_type': + value = {'forward': 0, 'backward': 1}[value] + elif name == 'interpolation': + value = {'trilinear': 0, 'NN': 1}[value] + return super(DiffeoScalarVol, self)._format_arg(name, spec, value) + + +class RigidTask(DTITKRenameMixin, Rigid): + pass + + +class AffineTask(DTITKRenameMixin, Affine): + pass + + +class DiffeoTask(DTITKRenameMixin, Diffeo): + pass + + +class ComposeXfmTask(DTITKRenameMixin, ComposeXfm): + pass + + +class affScalarVolTask(DTITKRenameMixin, AffScalarVol): + pass + + +class affSymTensor3DVolTask(DTITKRenameMixin, AffSymTensor3DVol): + pass + + +class diffeoScalarVolTask(DTITKRenameMixin, DiffeoScalarVol): + pass + + +class diffeoSymTensor3DVolTask(DTITKRenameMixin, DiffeoSymTensor3DVol): + pass diff --git a/nipype/interfaces/dtitk/tests/__init__.py b/nipype/interfaces/dtitk/tests/__init__.py new file mode 100644 index 0000000000..99fb243f19 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py new file mode 100644 index 0000000000..5f9262d788 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_AffScalarVol.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import AffScalarVol + + +def test_AffScalarVol_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + deformation=dict( + argstr='-deformation %g %g %g %g %g %g', + xor=['transform'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + euler=dict( + argstr='-euler %g %g %g', + xor=['transform'], + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + interpolation=dict( + argstr='-interp %s', + usedefault=True, + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_affxfmd', + ), + target=dict( + argstr='-target %s', + xor=['transform'], + ), + transform=dict( + argstr='-trans %s', + xor=['target', 'translation', 'euler', 'deformation'], + ), + translation=dict( + argstr='-translation %g %g %g', + xor=['transform'], + ), + ) + inputs = AffScalarVol.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AffScalarVol_outputs(): + output_map = dict(out_file=dict(), ) + outputs = AffScalarVol.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py new file mode 100644 index 0000000000..6d7abc852a --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_AffSymTensor3DVol.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import AffSymTensor3DVol + + +def test_AffSymTensor3DVol_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + deformation=dict( + argstr='-deformation %g %g %g %g %g %g', + xor=['transform'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + euler=dict( + argstr='-euler %g %g %g', + xor=['transform'], + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + interpolation=dict( + argstr='-interp %s', + usedefault=True, + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_affxfmd', + ), + reorient=dict( + argstr='-reorient %s', + usedefault=True, + ), + target=dict( + argstr='-target %s', + xor=['transform'], + ), + transform=dict( + argstr='-trans %s', + xor=['target', 'translation', 'euler', 'deformation'], + ), + translation=dict( + argstr='-translation %g %g %g', + xor=['transform'], + ), + ) + inputs = AffSymTensor3DVol.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AffSymTensor3DVol_outputs(): + output_map = dict(out_file=dict(), ) + outputs = AffSymTensor3DVol.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_Affine.py b/nipype/interfaces/dtitk/tests/test_auto_Affine.py new file mode 100644 index 0000000000..78d2e6f011 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_Affine.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import Affine + + +def test_Affine_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=0, + ), + ftol=dict( + argstr='%g', + mandatory=True, + position=4, + usedefault=True, + ), + initialize_xfm=dict( + argstr='%s', + copyfile=True, + position=5, + ), + moving_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=1, + ), + sampling_xyz=dict( + argstr='%g %g %g', + mandatory=True, + position=3, + usedefault=True, + ), + similarity_metric=dict( + argstr='%s', + mandatory=True, + position=2, + usedefault=True, + ), + ) + inputs = Affine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Affine_outputs(): + output_map = dict( + out_file=dict(), + out_file_xfm=dict(), + ) + outputs = Affine.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py new file mode 100644 index 0000000000..5f3b43153a --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_AffineTask.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import AffineTask + + +def test_AffineTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=0, + ), + ftol=dict( + argstr='%g', + mandatory=True, + position=4, + usedefault=True, + ), + initialize_xfm=dict( + argstr='%s', + copyfile=True, + position=5, + ), + moving_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=1, + ), + sampling_xyz=dict( + argstr='%g %g %g', + mandatory=True, + position=3, + usedefault=True, + ), + similarity_metric=dict( + argstr='%s', + mandatory=True, + position=2, + usedefault=True, + ), + ) + inputs = AffineTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AffineTask_outputs(): + output_map = dict( + out_file=dict(), + out_file_xfm=dict(), + ) + outputs = AffineTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py new file mode 100644 index 0000000000..cbd4efccb2 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThresh.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import BinThresh + + +def test_BinThresh_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + inside_value=dict( + argstr='%g', + mandatory=True, + position=4, + usedefault=True, + ), + lower_bound=dict( + argstr='%g', + mandatory=True, + position=2, + usedefault=True, + ), + out_file=dict( + argstr='%s', + keep_extension=True, + name_source='in_file', + name_template='%s_thrbin', + position=1, + ), + outside_value=dict( + argstr='%g', + mandatory=True, + position=5, + usedefault=True, + ), + upper_bound=dict( + argstr='%g', + mandatory=True, + position=3, + usedefault=True, + ), + ) + inputs = BinThresh.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BinThresh_outputs(): + output_map = dict(out_file=dict(), ) + outputs = BinThresh.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py new file mode 100644 index 0000000000..dcd396abe2 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_BinThreshTask.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import BinThreshTask + + +def test_BinThreshTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + inside_value=dict( + argstr='%g', + mandatory=True, + position=4, + usedefault=True, + ), + lower_bound=dict( + argstr='%g', + mandatory=True, + position=2, + usedefault=True, + ), + out_file=dict( + argstr='%s', + keep_extension=True, + name_source='in_file', + name_template='%s_thrbin', + position=1, + ), + outside_value=dict( + argstr='%g', + mandatory=True, + position=5, + usedefault=True, + ), + upper_bound=dict( + argstr='%g', + mandatory=True, + position=3, + usedefault=True, + ), + ) + inputs = BinThreshTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BinThreshTask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = BinThreshTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py b/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py new file mode 100644 index 0000000000..b43fcc5e97 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_CommandLineDtitk.py @@ -0,0 +1,18 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import CommandLineDtitk + + +def test_CommandLineDtitk_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) + inputs = CommandLineDtitk.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py new file mode 100644 index 0000000000..09fcdc186e --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfm.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import ComposeXfm + + +def test_ComposeXfm_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_aff=dict( + argstr='-aff %s', + mandatory=True, + ), + in_df=dict( + argstr='-df %s', + mandatory=True, + ), + out_file=dict( + argstr='-out %s', + genfile=True, + ), + ) + inputs = ComposeXfm.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ComposeXfm_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ComposeXfm.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py new file mode 100644 index 0000000000..99c6f6d340 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_ComposeXfmTask.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import ComposeXfmTask + + +def test_ComposeXfmTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_aff=dict( + argstr='-aff %s', + mandatory=True, + ), + in_df=dict( + argstr='-df %s', + mandatory=True, + ), + out_file=dict( + argstr='-out %s', + genfile=True, + ), + ) + inputs = ComposeXfmTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ComposeXfmTask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ComposeXfmTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py new file mode 100644 index 0000000000..ad532bd631 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_Diffeo.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import Diffeo + + +def test_Diffeo_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_file=dict( + argstr='%s', + position=0, + ), + ftol=dict( + argstr='%g', + mandatory=True, + position=5, + usedefault=True, + ), + legacy=dict( + argstr='%d', + mandatory=True, + position=3, + usedefault=True, + ), + mask_file=dict( + argstr='%s', + position=2, + ), + moving_file=dict( + argstr='%s', + copyfile=False, + position=1, + ), + n_iters=dict( + argstr='%d', + mandatory=True, + position=4, + usedefault=True, + ), + ) + inputs = Diffeo.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Diffeo_outputs(): + output_map = dict( + out_file=dict(), + out_file_xfm=dict(), + ) + outputs = Diffeo.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py new file mode 100644 index 0000000000..7d1305d384 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoScalarVol.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import DiffeoScalarVol + + +def test_DiffeoScalarVol_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip=dict(argstr='-flip %d %d %d', ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + interpolation=dict( + argstr='-interp %s', + usedefault=True, + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_diffeoxfmd', + ), + resampling_type=dict(argstr='-type %s', ), + target=dict( + argstr='-target %s', + xor=['voxel_size'], + ), + transform=dict( + argstr='-trans %s', + mandatory=True, + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target'], + ), + ) + inputs = DiffeoScalarVol.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DiffeoScalarVol_outputs(): + output_map = dict(out_file=dict(), ) + outputs = DiffeoScalarVol.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py new file mode 100644 index 0000000000..b934c56d2b --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoSymTensor3DVol.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import DiffeoSymTensor3DVol + + +def test_DiffeoSymTensor3DVol_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + df=dict( + argstr='-df %s', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip=dict(argstr='-flip %d %d %d', ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + interpolation=dict( + argstr='-interp %s', + usedefault=True, + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_diffeoxfmd', + ), + reorient=dict( + argstr='-reorient %s', + usedefault=True, + ), + resampling_type=dict(argstr='-type %s', ), + target=dict( + argstr='-target %s', + xor=['voxel_size'], + ), + transform=dict( + argstr='-trans %s', + mandatory=True, + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target'], + ), + ) + inputs = DiffeoSymTensor3DVol.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DiffeoSymTensor3DVol_outputs(): + output_map = dict(out_file=dict(), ) + outputs = DiffeoSymTensor3DVol.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py new file mode 100644 index 0000000000..5aea665d4c --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_DiffeoTask.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import DiffeoTask + + +def test_DiffeoTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_file=dict( + argstr='%s', + position=0, + ), + ftol=dict( + argstr='%g', + mandatory=True, + position=5, + usedefault=True, + ), + legacy=dict( + argstr='%d', + mandatory=True, + position=3, + usedefault=True, + ), + mask_file=dict( + argstr='%s', + position=2, + ), + moving_file=dict( + argstr='%s', + copyfile=False, + position=1, + ), + n_iters=dict( + argstr='%d', + mandatory=True, + position=4, + usedefault=True, + ), + ) + inputs = DiffeoTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DiffeoTask_outputs(): + output_map = dict( + out_file=dict(), + out_file_xfm=dict(), + ) + outputs = DiffeoTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_Rigid.py b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py new file mode 100644 index 0000000000..ecb7c2d33b --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_Rigid.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import Rigid + + +def test_Rigid_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=0, + ), + ftol=dict( + argstr='%g', + mandatory=True, + position=4, + usedefault=True, + ), + initialize_xfm=dict( + argstr='%s', + copyfile=True, + position=5, + ), + moving_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=1, + ), + sampling_xyz=dict( + argstr='%g %g %g', + mandatory=True, + position=3, + usedefault=True, + ), + similarity_metric=dict( + argstr='%s', + mandatory=True, + position=2, + usedefault=True, + ), + ) + inputs = Rigid.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Rigid_outputs(): + output_map = dict( + out_file=dict(), + out_file_xfm=dict(), + ) + outputs = Rigid.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py new file mode 100644 index 0000000000..c627fda741 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_RigidTask.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import RigidTask + + +def test_RigidTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=0, + ), + ftol=dict( + argstr='%g', + mandatory=True, + position=4, + usedefault=True, + ), + initialize_xfm=dict( + argstr='%s', + copyfile=True, + position=5, + ), + moving_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=1, + ), + sampling_xyz=dict( + argstr='%g %g %g', + mandatory=True, + position=3, + usedefault=True, + ), + similarity_metric=dict( + argstr='%s', + mandatory=True, + position=2, + usedefault=True, + ), + ) + inputs = RigidTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RigidTask_outputs(): + output_map = dict( + out_file=dict(), + out_file_xfm=dict(), + ) + outputs = RigidTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py new file mode 100644 index 0000000000..3574906455 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSp.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import SVAdjustVoxSp + + +def test_SVAdjustVoxSp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + origin=dict( + argstr='-origin %g %g %g', + xor=['target_file'], + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_avs', + ), + target_file=dict( + argstr='-target %s', + xor=['voxel_size', 'origin'], + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target_file'], + ), + ) + inputs = SVAdjustVoxSp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SVAdjustVoxSp_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SVAdjustVoxSp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py new file mode 100644 index 0000000000..40a7592c19 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_SVAdjustVoxSpTask.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import SVAdjustVoxSpTask + + +def test_SVAdjustVoxSpTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + origin=dict( + argstr='-origin %g %g %g', + xor=['target_file'], + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_avs', + ), + target_file=dict( + argstr='-target %s', + xor=['voxel_size', 'origin'], + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target_file'], + ), + ) + inputs = SVAdjustVoxSpTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SVAdjustVoxSpTask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SVAdjustVoxSpTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResample.py b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py new file mode 100644 index 0000000000..91ca638f22 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResample.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import SVResample + + +def test_SVResample_inputs(): + input_map = dict( + align=dict(argstr='-align %s', ), + args=dict(argstr='%s', ), + array_size=dict( + argstr='-size %d %d %d', + xor=['target_file'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + origin=dict( + argstr='-origin %g %g %g', + xor=['target_file'], + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_resampled', + ), + target_file=dict( + argstr='-target %s', + xor=['array_size', 'voxel_size', 'origin'], + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target_file'], + ), + ) + inputs = SVResample.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SVResample_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SVResample.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py new file mode 100644 index 0000000000..8a7574bfd8 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_SVResampleTask.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import SVResampleTask + + +def test_SVResampleTask_inputs(): + input_map = dict( + align=dict(argstr='-align %s', ), + args=dict(argstr='%s', ), + array_size=dict( + argstr='-size %d %d %d', + xor=['target_file'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + origin=dict( + argstr='-origin %g %g %g', + xor=['target_file'], + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_resampled', + ), + target_file=dict( + argstr='-target %s', + xor=['array_size', 'voxel_size', 'origin'], + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target_file'], + ), + ) + inputs = SVResampleTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SVResampleTask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SVResampleTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py new file mode 100644 index 0000000000..bda9128369 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustOriginTask.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TVAdjustOriginTask + + +def test_TVAdjustOriginTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + origin=dict( + argstr='-origin %g %g %g', + xor=['target_file'], + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_avs', + ), + target_file=dict( + argstr='-target %s', + xor=['voxel_size', 'origin'], + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target_file'], + ), + ) + inputs = TVAdjustOriginTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TVAdjustOriginTask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TVAdjustOriginTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py new file mode 100644 index 0000000000..b8ce9039d5 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSp.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TVAdjustVoxSp + + +def test_TVAdjustVoxSp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + origin=dict( + argstr='-origin %g %g %g', + xor=['target_file'], + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_avs', + ), + target_file=dict( + argstr='-target %s', + xor=['voxel_size', 'origin'], + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target_file'], + ), + ) + inputs = TVAdjustVoxSp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TVAdjustVoxSp_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TVAdjustVoxSp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py new file mode 100644 index 0000000000..e9ef8137dc --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_TVAdjustVoxSpTask.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TVAdjustVoxSpTask + + +def test_TVAdjustVoxSpTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + origin=dict( + argstr='-origin %g %g %g', + xor=['target_file'], + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_avs', + ), + target_file=dict( + argstr='-target %s', + xor=['voxel_size', 'origin'], + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target_file'], + ), + ) + inputs = TVAdjustVoxSpTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TVAdjustVoxSpTask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TVAdjustVoxSpTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResample.py b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py new file mode 100644 index 0000000000..aefafc6430 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResample.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TVResample + + +def test_TVResample_inputs(): + input_map = dict( + align=dict(argstr='-align %s', ), + args=dict(argstr='%s', ), + array_size=dict( + argstr='-size %d %d %d', + xor=['target_file'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + interpolation=dict(argstr='-interp %s', ), + origin=dict( + argstr='-origin %g %g %g', + xor=['target_file'], + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_resampled', + ), + target_file=dict( + argstr='-target %s', + xor=['array_size', 'voxel_size', 'origin'], + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target_file'], + ), + ) + inputs = TVResample.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TVResample_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TVResample.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py new file mode 100644 index 0000000000..b3c70bb729 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_TVResampleTask.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TVResampleTask + + +def test_TVResampleTask_inputs(): + input_map = dict( + align=dict(argstr='-align %s', ), + args=dict(argstr='%s', ), + array_size=dict( + argstr='-size %d %d %d', + xor=['target_file'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + interpolation=dict(argstr='-interp %s', ), + origin=dict( + argstr='-origin %g %g %g', + xor=['target_file'], + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_resampled', + ), + target_file=dict( + argstr='-target %s', + xor=['array_size', 'voxel_size', 'origin'], + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target_file'], + ), + ) + inputs = TVResampleTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TVResampleTask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TVResampleTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtool.py b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py new file mode 100644 index 0000000000..2267228631 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtool.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TVtool + + +def test_TVtool_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + in_flag=dict(argstr='-%s', ), + out_file=dict( + argstr='-out %s', + genfile=True, + ), + ) + inputs = TVtool.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TVtool_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TVtool.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py new file mode 100644 index 0000000000..252d7c9d0a --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_TVtoolTask.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TVtoolTask + + +def test_TVtoolTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + in_flag=dict(argstr='-%s', ), + out_file=dict( + argstr='-out %s', + genfile=True, + ), + ) + inputs = TVtoolTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TVtoolTask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TVtoolTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py new file mode 100644 index 0000000000..72330737b6 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_affScalarVolTask.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import affScalarVolTask + + +def test_affScalarVolTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + deformation=dict( + argstr='-deformation %g %g %g %g %g %g', + xor=['transform'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + euler=dict( + argstr='-euler %g %g %g', + xor=['transform'], + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + interpolation=dict( + argstr='-interp %s', + usedefault=True, + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_affxfmd', + ), + target=dict( + argstr='-target %s', + xor=['transform'], + ), + transform=dict( + argstr='-trans %s', + xor=['target', 'translation', 'euler', 'deformation'], + ), + translation=dict( + argstr='-translation %g %g %g', + xor=['transform'], + ), + ) + inputs = affScalarVolTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_affScalarVolTask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = affScalarVolTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py new file mode 100644 index 0000000000..da8fae25cc --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_affSymTensor3DVolTask.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import affSymTensor3DVolTask + + +def test_affSymTensor3DVolTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + deformation=dict( + argstr='-deformation %g %g %g %g %g %g', + xor=['transform'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + euler=dict( + argstr='-euler %g %g %g', + xor=['transform'], + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + interpolation=dict( + argstr='-interp %s', + usedefault=True, + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_affxfmd', + ), + reorient=dict( + argstr='-reorient %s', + usedefault=True, + ), + target=dict( + argstr='-target %s', + xor=['transform'], + ), + transform=dict( + argstr='-trans %s', + xor=['target', 'translation', 'euler', 'deformation'], + ), + translation=dict( + argstr='-translation %g %g %g', + xor=['transform'], + ), + ) + inputs = affSymTensor3DVolTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_affSymTensor3DVolTask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = affSymTensor3DVolTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py new file mode 100644 index 0000000000..10965b7077 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoScalarVolTask.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import diffeoScalarVolTask + + +def test_diffeoScalarVolTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip=dict(argstr='-flip %d %d %d', ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + interpolation=dict( + argstr='-interp %s', + usedefault=True, + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_diffeoxfmd', + ), + resampling_type=dict(argstr='-type %s', ), + target=dict( + argstr='-target %s', + xor=['voxel_size'], + ), + transform=dict( + argstr='-trans %s', + mandatory=True, + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target'], + ), + ) + inputs = diffeoScalarVolTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_diffeoScalarVolTask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = diffeoScalarVolTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py new file mode 100644 index 0000000000..52112735b1 --- /dev/null +++ b/nipype/interfaces/dtitk/tests/test_auto_diffeoSymTensor3DVolTask.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import diffeoSymTensor3DVolTask + + +def test_diffeoSymTensor3DVolTask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + df=dict( + argstr='-df %s', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip=dict(argstr='-flip %d %d %d', ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + interpolation=dict( + argstr='-interp %s', + usedefault=True, + ), + out_file=dict( + argstr='-out %s', + keep_extension=True, + name_source='in_file', + name_template='%s_diffeoxfmd', + ), + reorient=dict( + argstr='-reorient %s', + usedefault=True, + ), + resampling_type=dict(argstr='-type %s', ), + target=dict( + argstr='-target %s', + xor=['voxel_size'], + ), + transform=dict( + argstr='-trans %s', + mandatory=True, + ), + voxel_size=dict( + argstr='-vsize %g %g %g', + xor=['target'], + ), + ) + inputs = diffeoSymTensor3DVolTask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_diffeoSymTensor3DVolTask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = diffeoSymTensor3DVolTask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/dtitk/utils.py b/nipype/interfaces/dtitk/utils.py new file mode 100644 index 0000000000..3ed6e61395 --- /dev/null +++ b/nipype/interfaces/dtitk/utils.py @@ -0,0 +1,335 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""DTITK utility interfaces + +DTI-TK developed by Gary Hui Zhang, gary.zhang@ucl.ac.uk +For additional help, visit http://dti-tk.sf.net + +The high-dimensional tensor-based DTI registration algorithm + +Zhang, H., Avants, B.B, Yushkevich, P.A., Woo, J.H., Wang, S., McCluskey, L.H., +Elman, L.B., Melhem, E.R., Gee, J.C., High-dimensional spatial normalization of +diffusion tensor images improves the detection of white matter differences in +amyotrophic lateral sclerosis, IEEE Transactions on Medical Imaging, +26(11):1585-1597, November 2007. PMID: 18041273. + +The original piecewise-affine tensor-based DTI registration algorithm at the +core of DTI-TK + +Zhang, H., Yushkevich, P.A., Alexander, D.C., Gee, J.C., Deformable +registration of diffusion tensor MR images with explicit orientation +optimization, Medical Image Analysis, 10(5):764-785, October 2006. PMID: +16899392. + +""" + +from ..base import TraitedSpec, CommandLineInputSpec, File, traits, isdefined +from ...utils.filemanip import fname_presuffix +from .base import CommandLineDtitk, DTITKRenameMixin +import os + +__docformat__ = 'restructuredtext' + + +class TVAdjustVoxSpInputSpec(CommandLineInputSpec): + in_file = File(desc="tensor volume to modify", exists=True, + mandatory=True, argstr="-in %s") + out_file = File(desc='output path', + argstr="-out %s", name_source='in_file', + name_template='%s_avs', keep_extension=True) + target_file = File(desc='target volume to match', + argstr="-target %s", + xor=['voxel_size', 'origin']) + voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), + desc='xyz voxel size (superseded by target)', + argstr="-vsize %g %g %g", xor=['target_file']) + origin = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), + desc='xyz origin (superseded by target)', + argstr='-origin %g %g %g', + xor=['target_file']) + + +class TVAdjustVoxSpOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class TVAdjustVoxSp(CommandLineDtitk): + """ + Adjusts the voxel space of a tensor volume + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.TVAdjustVoxSp() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.target_file = 'im2.nii' + >>> node.cmdline + 'TVAdjustVoxelspace -in im1.nii -out im1_avs.nii -target im2.nii' + >>> node.run() # doctest: +SKIP + """ + input_spec = TVAdjustVoxSpInputSpec + output_spec = TVAdjustVoxSpOutputSpec + _cmd = 'TVAdjustVoxelspace' + + +class SVAdjustVoxSpInputSpec(CommandLineInputSpec): + in_file = File(desc="scalar volume to modify", exists=True, + mandatory=True, argstr="-in %s") + out_file = File(desc='output path', argstr="-out %s", + name_source="in_file", name_template='%s_avs', + keep_extension=True) + target_file = File(desc='target volume to match', + argstr="-target %s", xor=['voxel_size', 'origin']) + voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), + desc='xyz voxel size (superseded by target)', + argstr="-vsize %g %g %g", xor=['target_file']) + origin = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), + desc='xyz origin (superseded by target)', + argstr='-origin %g %g %g', + xor=['target_file']) + + +class SVAdjustVoxSpOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class SVAdjustVoxSp(CommandLineDtitk): + """ + Adjusts the voxel space of a scalar volume + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.SVAdjustVoxSp() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.target_file = 'im2.nii' + >>> node.cmdline + 'SVAdjustVoxelspace -in im1.nii -out im1_avs.nii -target im2.nii' + >>> node.run() # doctest: +SKIP + """ + input_spec = SVAdjustVoxSpInputSpec + output_spec = SVAdjustVoxSpOutputSpec + _cmd = 'SVAdjustVoxelspace' + + +class TVResampleInputSpec(CommandLineInputSpec): + in_file = File(desc="tensor volume to resample", exists=True, + mandatory=True, argstr="-in %s") + out_file = File(desc='output path', + name_source="in_file", name_template="%s_resampled", + keep_extension=True, argstr="-out %s") + target_file = File(desc='specs read from the target volume', + argstr="-target %s", + xor=['array_size', 'voxel_size', 'origin']) + align = traits.Enum('center', 'origin', argstr="-align %s", + desc='how to align output volume to input volume') + interpolation = traits.Enum('LEI', 'EI', argstr="-interp %s", + desc='Log Euclidean Euclidean Interpolation') + array_size = traits.Tuple((traits.Int(), traits.Int(), traits.Int()), + desc='resampled array size', xor=['target_file'], + argstr="-size %d %d %d") + voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), + desc='resampled voxel size', xor=['target_file'], + argstr="-vsize %g %g %g") + origin = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), + desc='xyz origin', xor=['target_file'], + argstr='-origin %g %g %g') + + +class TVResampleOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class TVResample(CommandLineDtitk): + """ + Resamples a tensor volume + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.TVResample() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.target_file = 'im2.nii' + >>> node.cmdline + 'TVResample -in im1.nii -out im1_resampled.nii -target im2.nii' + >>> node.run() # doctest: +SKIP + """ + input_spec = TVResampleInputSpec + output_spec = TVResampleOutputSpec + _cmd = 'TVResample' + + +class SVResampleInputSpec(CommandLineInputSpec): + in_file = File(desc="image to resample", exists=True, + mandatory=True, argstr="-in %s") + out_file = File(desc='output path', + name_source="in_file", name_template="%s_resampled", + keep_extension=True, argstr="-out %s") + target_file = File(desc='specs read from the target volume', + argstr="-target %s", + xor=['array_size', 'voxel_size', 'origin']) + align = traits.Enum('center', 'origin', argstr="-align %s", + desc='how to align output volume to input volume') + array_size = traits.Tuple((traits.Int(), traits.Int(), traits.Int()), + desc='resampled array size', xor=['target_file'], + argstr="-size %d %d %d") + voxel_size = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), + desc='resampled voxel size', xor=['target_file'], + argstr="-vsize %g %g %g") + origin = traits.Tuple((traits.Float(), traits.Float(), traits.Float()), + desc='xyz origin', xor=['target_file'], + argstr='-origin %g %g %g') + + +class SVResampleOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class SVResample(CommandLineDtitk): + """ + Resamples a scalar volume + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.SVResample() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.target_file = 'im2.nii' + >>> node.cmdline + 'SVResample -in im1.nii -out im1_resampled.nii -target im2.nii' + >>> node.run() # doctest: +SKIP + """ + input_spec = SVResampleInputSpec + output_spec = SVResampleOutputSpec + _cmd = 'SVResample' + + +class TVtoolInputSpec(CommandLineInputSpec): + in_file = File(desc="scalar volume to resample", exists=True, + argstr="-in %s", mandatory=True) + '''NOTE: there are a lot more options here; not implementing all of them''' + in_flag = traits.Enum('fa', 'tr', 'ad', 'rd', 'pd', 'rgb', + argstr="-%s", desc='') + out_file = File(argstr="-out %s", genfile=True) + + +class TVtoolOutputSpec(TraitedSpec): + out_file = File() + + +class TVtool(CommandLineDtitk): + """ + Calculates a tensor metric volume from a tensor volume + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.TVtool() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.in_flag = 'fa' + >>> node.cmdline + 'TVtool -in im1.nii -fa -out im1_fa.nii' + >>> node.run() # doctest: +SKIP + """ + input_spec = TVtoolInputSpec + output_spec = TVtoolOutputSpec + _cmd = 'TVtool' + + def _list_outputs(self): + outputs = self._outputs().get() + out_file = self.inputs.out_file + if not isdefined(out_file): + out_file = self._gen_filename('out_file') + outputs['out_file'] = os.path.abspath(out_file) + return outputs + + def _gen_filename(self, name): + if name != 'out_file': + return + return fname_presuffix(os.path.basename(self.inputs.in_file), + suffix='_' + self.inputs.in_flag) + + +'''Note: SVTool not implemented at this time''' + + +class BinThreshInputSpec(CommandLineInputSpec): + in_file = File(desc='Image to threshold/binarize', exists=True, + position=0, argstr="%s", mandatory=True) + out_file = File(desc='output path', position=1, argstr="%s", + keep_extension=True, name_source='in_file', + name_template='%s_thrbin') + lower_bound = traits.Float(0.01, usedefault=True, + position=2, argstr="%g", mandatory=True, + desc='lower bound of binarization range') + upper_bound = traits.Float(100, usedefault=True, + position=3, argstr="%g", mandatory=True, + desc='upper bound of binarization range') + inside_value = traits.Float(1, position=4, argstr="%g", usedefault=True, + mandatory=True, desc='value for voxels in ' + 'binarization range') + outside_value = traits.Float(0, position=5, argstr="%g", usedefault=True, + mandatory=True, desc='value for voxels' + 'outside of binarization range') + + +class BinThreshOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class BinThresh(CommandLineDtitk): + """ + Binarizes an image + + Example + ------- + + >>> from nipype.interfaces import dtitk + >>> node = dtitk.BinThresh() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.lower_bound = 0 + >>> node.inputs.upper_bound = 100 + >>> node.inputs.inside_value = 1 + >>> node.inputs.outside_value = 0 + >>> node.cmdline + 'BinaryThresholdImageFilter im1.nii im1_thrbin.nii 0 100 1 0' + >>> node.run() # doctest: +SKIP + """ + + input_spec = BinThreshInputSpec + output_spec = BinThreshOutputSpec + _cmd = 'BinaryThresholdImageFilter' + + +class BinThreshTask(DTITKRenameMixin, BinThresh): + pass + + +class SVAdjustVoxSpTask(DTITKRenameMixin, SVAdjustVoxSp): + pass + + +class SVResampleTask(DTITKRenameMixin, SVResample): + pass + + +class TVAdjustOriginTask(DTITKRenameMixin, TVAdjustVoxSp): + pass + + +class TVAdjustVoxSpTask(DTITKRenameMixin, TVAdjustVoxSp): + pass + + +class TVResampleTask(DTITKRenameMixin, TVResample): + pass + + +class TVtoolTask(DTITKRenameMixin, TVtool): + pass diff --git a/nipype/interfaces/dynamic_slicer.py b/nipype/interfaces/dynamic_slicer.py new file mode 100644 index 0000000000..5d3a3c1899 --- /dev/null +++ b/nipype/interfaces/dynamic_slicer.py @@ -0,0 +1,225 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +import warnings +import xml.dom.minidom + +from .base import (CommandLine, CommandLineInputSpec, DynamicTraitedSpec, + traits, Undefined, File, isdefined) + + +class SlicerCommandLineInputSpec(DynamicTraitedSpec, CommandLineInputSpec): + module = traits.Str( + desc="name of the Slicer command line module you want to use") + + +class SlicerCommandLine(CommandLine): + """Experimental Slicer wrapper. Work in progress. + + """ + _cmd = "Slicer3" + input_spec = SlicerCommandLineInputSpec + output_spec = DynamicTraitedSpec + + def _grab_xml(self, module): + cmd = CommandLine( + command="Slicer3", + resource_monitor=False, + args="--launch %s --xml" % module) + ret = cmd.run() + if ret.runtime.returncode == 0: + return xml.dom.minidom.parseString(ret.runtime.stdout) + else: + raise Exception(cmd.cmdline + " failed:\n%s" % ret.runtime.stderr) + + def _outputs(self): + base = super(SlicerCommandLine, self)._outputs() + undefined_output_traits = {} + for key in [ + node.getElementsByTagName('name')[0].firstChild.nodeValue + for node in self._outputs_nodes + ]: + base.add_trait(key, File(exists=True)) + undefined_output_traits[key] = Undefined + + base.trait_set(trait_change_notify=False, **undefined_output_traits) + return base + + def __init__(self, module, **inputs): + warnings.warn('slicer is Not fully implemented', RuntimeWarning) + super(SlicerCommandLine, self).__init__( + command="Slicer3 --launch %s " % module, name=module, **inputs) + dom = self._grab_xml(module) + self._outputs_filenames = {} + + self._outputs_nodes = [] + + undefined_traits = {} + + for paramGroup in dom.getElementsByTagName("parameters"): + for param in paramGroup.childNodes: + if param.nodeName in [ + 'label', 'description', '#text', '#comment' + ]: + continue + traitsParams = {} + + name = param.getElementsByTagName('name')[ + 0].firstChild.nodeValue + + longFlagNode = param.getElementsByTagName('longflag') + if longFlagNode: + traitsParams[ + "argstr"] = "--" + longFlagNode[0].firstChild.nodeValue + " " + else: + traitsParams["argstr"] = "--" + name + " " + + argsDict = { + 'file': '%s', + 'integer': "%d", + 'double': "%f", + 'float': "%f", + 'image': "%s", + 'transform': "%s", + 'boolean': '', + 'string-enumeration': '%s', + 'string': "%s" + } + + if param.nodeName.endswith('-vector'): + traitsParams["argstr"] += argsDict[param.nodeName[:-7]] + else: + traitsParams["argstr"] += argsDict[param.nodeName] + + index = param.getElementsByTagName('index') + if index: + traitsParams["position"] = index[0].firstChild.nodeValue + + desc = param.getElementsByTagName('description') + if index: + traitsParams["desc"] = desc[0].firstChild.nodeValue + + name = param.getElementsByTagName('name')[ + 0].firstChild.nodeValue + + typesDict = { + 'integer': traits.Int, + 'double': traits.Float, + 'float': traits.Float, + 'image': File, + 'transform': File, + 'boolean': traits.Bool, + 'string': traits.Str, + 'file': File + } + + if param.nodeName == 'string-enumeration': + type = traits.Enum + values = [ + el.firstChild.nodeValue + for el in param.getElementsByTagName('element') + ] + elif param.nodeName.endswith('-vector'): + type = traits.List + values = [typesDict[param.nodeName[:-7]]] + traitsParams["sep"] = ',' + else: + values = [] + type = typesDict[param.nodeName] + + if param.nodeName in [ + 'file', 'directory', 'image', 'transform' + ] and param.getElementsByTagName( + 'channel')[0].firstChild.nodeValue == 'output': + self.inputs.add_trait(name, + traits.Either( + traits.Bool, File, + **traitsParams)) + undefined_traits[name] = Undefined + + # traitsParams["exists"] = True + self._outputs_filenames[ + name] = self._gen_filename_from_param(param) + # undefined_output_traits[name] = Undefined + # self._outputs().add_trait(name, File(*values, **traitsParams)) + self._outputs_nodes.append(param) + else: + if param.nodeName in [ + 'file', 'directory', 'image', 'transform' + ]: + traitsParams["exists"] = True + self.inputs.add_trait(name, type(*values, **traitsParams)) + undefined_traits[name] = Undefined + + self.inputs.trait_set(trait_change_notify=False, **undefined_traits) + for name in list(undefined_traits.keys()): + _ = getattr(self.inputs, name) + # self._outputs().trait_set(trait_change_notify=False, **undefined_output_traits) + + def _gen_filename(self, name): + if name in self._outputs_filenames: + return os.path.join(os.getcwd(), self._outputs_filenames[name]) + return None + + def _gen_filename_from_param(self, param): + base = param.getElementsByTagName('name')[0].firstChild.nodeValue + fileExtensions = param.getAttribute("fileExtensions") + if fileExtensions: + ext = fileExtensions + else: + ext = { + 'image': '.nii', + 'transform': '.txt', + 'file': '' + }[param.nodeName] + return base + ext + + def _list_outputs(self): + outputs = self.output_spec().get() + for output_node in self._outputs_nodes: + name = output_node.getElementsByTagName('name')[ + 0].firstChild.nodeValue + outputs[name] = getattr(self.inputs, name) + if isdefined(outputs[name]) and isinstance(outputs[name], bool): + if outputs[name]: + outputs[name] = self._gen_filename(name) + else: + outputs[name] = Undefined + return outputs + + def _format_arg(self, name, spec, value): + if name in [ + output_node.getElementsByTagName('name')[0] + .firstChild.nodeValue for output_node in self._outputs_nodes + ]: + if isinstance(value, bool): + fname = self._gen_filename(name) + else: + fname = value + return spec.argstr % fname + return super(SlicerCommandLine, self)._format_arg(name, spec, value) + + +# test = SlicerCommandLine(module="BRAINSFit") +# test.inputs.fixedVolume = "/home/filo/workspace/fmri_tumour/data/pilot1/10_co_COR_3D_IR_PREP.nii" +# test.inputs.movingVolume = "/home/filo/workspace/fmri_tumour/data/pilot1/2_line_bisection.nii" +# test.inputs.outputTransform = True +# test.inputs.transformType = ["Affine"] +# print test.cmdline +# print test.inputs +# print test._outputs() +# ret = test.run() + +# test = SlicerCommandLine(name="BRAINSResample") +# test.inputs.referenceVolume = "/home/filo/workspace/fmri_tumour/data/pilot1/10_co_COR_3D_IR_PREP.nii" +# test.inputs.inputVolume = "/home/filo/workspace/fmri_tumour/data/pilot1/2_line_bisection.nii" +# test.inputs.outputVolume = True +# test.inputs.warpTransform = "/home/filo/workspace/nipype/nipype/interfaces/outputTransform.mat" +# print test.cmdline +# ret = test.run() +# print ret.runtime.stderr +# print ret.runtime.returncode diff --git a/nipype/interfaces/elastix/__init__.py b/nipype/interfaces/elastix/__init__.py new file mode 100644 index 0000000000..9dcdb88c18 --- /dev/null +++ b/nipype/interfaces/elastix/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Top-level namespace for elastix.""" + +from __future__ import absolute_import + +from .registration import Registration, ApplyWarp, AnalyzeWarp, PointsWarp +from .utils import EditTransform diff --git a/nipype/interfaces/elastix/base.py b/nipype/interfaces/elastix/base.py new file mode 100644 index 0000000000..b47e1fec17 --- /dev/null +++ b/nipype/interfaces/elastix/base.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The :py:mod:`nipype.interfaces.elastix` provides the interface to +the elastix registration software. + +.. note:: http://elastix.isi.uu.nl/ + + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from ... import logging +from ..base import CommandLineInputSpec, Directory, traits +iflogger = logging.getLogger('nipype.interface') + + +class ElastixBaseInputSpec(CommandLineInputSpec): + output_path = Directory( + './', + exists=True, + mandatory=True, + usedefault=True, + argstr='-out %s', + desc='output directory') + num_threads = traits.Int( + 1, usedefault=True, + argstr='-threads %01d', + nohash=True, + desc='set the maximum number of threads of elastix') diff --git a/nipype/interfaces/elastix/registration.py b/nipype/interfaces/elastix/registration.py new file mode 100644 index 0000000000..7f2565d58c --- /dev/null +++ b/nipype/interfaces/elastix/registration.py @@ -0,0 +1,271 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Interfaces to perform image registrations and to apply the resulting +displacement maps to images and points. + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +import os.path as op +import re + +from ... import logging +from .base import ElastixBaseInputSpec +from ..base import CommandLine, TraitedSpec, File, traits, InputMultiPath + +iflogger = logging.getLogger('nipype.interface') + + +class RegistrationInputSpec(ElastixBaseInputSpec): + fixed_image = File( + exists=True, mandatory=True, argstr='-f %s', desc='fixed image') + moving_image = File( + exists=True, mandatory=True, argstr='-m %s', desc='moving image') + parameters = InputMultiPath( + File(exists=True), + mandatory=True, + argstr='-p %s...', + desc='parameter file, elastix handles 1 or more -p') + fixed_mask = File( + exists=True, argstr='-fMask %s', desc='mask for fixed image') + moving_mask = File( + exists=True, argstr='-mMask %s', desc='mask for moving image') + initial_transform = File( + exists=True, + argstr='-t0 %s', + desc='parameter file for initial transform') + + +class RegistrationOutputSpec(TraitedSpec): + transform = InputMultiPath(File(exists=True), desc='output transform') + warped_file = File(desc='input moving image warped to fixed image') + warped_files = InputMultiPath( + File(exists=False), + desc=('input moving image warped to fixed image at each level')) + warped_files_flags = traits.List( + traits.Bool(False), + desc='flag indicating if warped image was generated') + + +class Registration(CommandLine): + """ + Elastix nonlinear registration interface + + Example + ------- + + >>> from nipype.interfaces.elastix import Registration + >>> reg = Registration() + >>> reg.inputs.fixed_image = 'fixed1.nii' + >>> reg.inputs.moving_image = 'moving1.nii' + >>> reg.inputs.parameters = ['elastix.txt'] + >>> reg.cmdline + 'elastix -f fixed1.nii -m moving1.nii -threads 1 -out ./ -p elastix.txt' + + + """ + + _cmd = 'elastix' + input_spec = RegistrationInputSpec + output_spec = RegistrationOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + + out_dir = op.abspath(self.inputs.output_path) + + regex = re.compile(r'^\((\w+)\s(.+)\)$') + + outputs['transform'] = [] + outputs['warped_files'] = [] + outputs['warped_files_flags'] = [] + + for i, params in enumerate(self.inputs.parameters): + config = {} + + with open(params, 'r') as f: + for line in f.readlines(): + line = line.strip() + if not line.startswith('//') and line: + m = regex.search(line) + if m: + value = self._cast(m.group(2).strip()) + config[m.group(1).strip()] = value + + outputs['transform'].append( + op.join(out_dir, 'TransformParameters.%01d.txt' % i)) + + warped_file = None + if config['WriteResultImage']: + warped_file = op.join(out_dir, 'result.%01d.%s' % + (i, config['ResultImageFormat'])) + + outputs['warped_files'].append(warped_file) + outputs['warped_files_flags'].append(config['WriteResultImage']) + + if outputs['warped_files_flags'][-1]: + outputs['warped_file'] = outputs['warped_files'][-1] + + return outputs + + def _cast(self, val): + if val.startswith('"') and val.endswith('"'): + if val == '"true"': + return True + elif val == '"false"': + return False + else: + return val[1:-1] + + try: + return int(val) + except ValueError: + try: + return float(val) + except ValueError: + return val + + +class ApplyWarpInputSpec(ElastixBaseInputSpec): + transform_file = File( + exists=True, + mandatory=True, + argstr='-tp %s', + desc='transform-parameter file, only 1') + + moving_image = File( + exists=True, + argstr='-in %s', + mandatory=True, + desc='input image to deform') + + +class ApplyWarpOutputSpec(TraitedSpec): + warped_file = File(desc='input moving image warped to fixed image') + + +class ApplyWarp(CommandLine): + """ + Use ``transformix`` to apply a transform on an input image. + The transform is specified in the transform-parameter file. + + Example + ------- + + >>> from nipype.interfaces.elastix import ApplyWarp + >>> reg = ApplyWarp() + >>> reg.inputs.moving_image = 'moving1.nii' + >>> reg.inputs.transform_file = 'TransformParameters.0.txt' + >>> reg.cmdline + 'transformix -in moving1.nii -threads 1 -out ./ -tp TransformParameters.0.txt' + + + """ + + _cmd = 'transformix' + input_spec = ApplyWarpInputSpec + output_spec = ApplyWarpOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + out_dir = op.abspath(self.inputs.output_path) + outputs['warped_file'] = op.join(out_dir, 'result.nii.gz') + return outputs + + +class AnalyzeWarpInputSpec(ElastixBaseInputSpec): + transform_file = File( + exists=True, + mandatory=True, + argstr='-tp %s', + desc='transform-parameter file, only 1') + + +class AnalyzeWarpOutputSpec(TraitedSpec): + disp_field = File(desc='displacements field') + jacdet_map = File(desc='det(Jacobian) map') + jacmat_map = File(desc='Jacobian matrix map') + + +class AnalyzeWarp(CommandLine): + """ + Use transformix to get details from the input transform (generate + the corresponding deformation field, generate the determinant of the + Jacobian map or the Jacobian map itself) + + Example + ------- + + >>> from nipype.interfaces.elastix import AnalyzeWarp + >>> reg = AnalyzeWarp() + >>> reg.inputs.transform_file = 'TransformParameters.0.txt' + >>> reg.cmdline + 'transformix -def all -jac all -jacmat all -threads 1 -out ./ -tp TransformParameters.0.txt' + + + """ + + _cmd = 'transformix -def all -jac all -jacmat all' + input_spec = AnalyzeWarpInputSpec + output_spec = AnalyzeWarpOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + out_dir = op.abspath(self.inputs.output_path) + outputs['disp_field'] = op.join(out_dir, 'deformationField.nii.gz') + outputs['jacdet_map'] = op.join(out_dir, 'spatialJacobian.nii.gz') + outputs['jacmat_map'] = op.join(out_dir, 'fullSpatialJacobian.nii.gz') + return outputs + + +class PointsWarpInputSpec(ElastixBaseInputSpec): + points_file = File( + exists=True, + argstr='-def %s', + mandatory=True, + desc='input points (accepts .vtk triangular meshes).') + transform_file = File( + exists=True, + mandatory=True, + argstr='-tp %s', + desc='transform-parameter file, only 1') + + +class PointsWarpOutputSpec(TraitedSpec): + warped_file = File(desc='input points displaced in fixed image domain') + + +class PointsWarp(CommandLine): + """Use ``transformix`` to apply a transform on an input point set. + The transform is specified in the transform-parameter file. + + Example + ------- + + >>> from nipype.interfaces.elastix import PointsWarp + >>> reg = PointsWarp() + >>> reg.inputs.points_file = 'surf1.vtk' + >>> reg.inputs.transform_file = 'TransformParameters.0.txt' + >>> reg.cmdline + 'transformix -threads 1 -out ./ -def surf1.vtk -tp TransformParameters.0.txt' + + + """ + + _cmd = 'transformix' + input_spec = PointsWarpInputSpec + output_spec = PointsWarpOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + out_dir = op.abspath(self.inputs.output_path) + + fname, ext = op.splitext(op.basename(self.inputs.points_file)) + + outputs['warped_file'] = op.join(out_dir, 'outputpoints%s' % ext) + return outputs diff --git a/nipype/interfaces/elastix/tests/__init__.py b/nipype/interfaces/elastix/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/elastix/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py new file mode 100644 index 0000000000..cb486b8487 --- /dev/null +++ b/nipype/interfaces/elastix/tests/test_auto_AnalyzeWarp.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import AnalyzeWarp + + +def test_AnalyzeWarp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + argstr='-threads %01d', + nohash=True, + usedefault=True, + ), + output_path=dict( + argstr='-out %s', + mandatory=True, + usedefault=True, + ), + transform_file=dict( + argstr='-tp %s', + mandatory=True, + ), + ) + inputs = AnalyzeWarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AnalyzeWarp_outputs(): + output_map = dict( + disp_field=dict(), + jacdet_map=dict(), + jacmat_map=dict(), + ) + outputs = AnalyzeWarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py new file mode 100644 index 0000000000..16f2b54079 --- /dev/null +++ b/nipype/interfaces/elastix/tests/test_auto_ApplyWarp.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import ApplyWarp + + +def test_ApplyWarp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + moving_image=dict( + argstr='-in %s', + mandatory=True, + ), + num_threads=dict( + argstr='-threads %01d', + nohash=True, + usedefault=True, + ), + output_path=dict( + argstr='-out %s', + mandatory=True, + usedefault=True, + ), + transform_file=dict( + argstr='-tp %s', + mandatory=True, + ), + ) + inputs = ApplyWarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ApplyWarp_outputs(): + output_map = dict(warped_file=dict(), ) + outputs = ApplyWarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/elastix/tests/test_auto_EditTransform.py b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py new file mode 100644 index 0000000000..ef227da7f8 --- /dev/null +++ b/nipype/interfaces/elastix/tests/test_auto_EditTransform.py @@ -0,0 +1,29 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import EditTransform + + +def test_EditTransform_inputs(): + input_map = dict( + interpolation=dict( + argstr='FinalBSplineInterpolationOrder', + usedefault=True, + ), + output_file=dict(), + output_format=dict(argstr='ResultImageFormat', ), + output_type=dict(argstr='ResultImagePixelType', ), + reference_image=dict(), + transform_file=dict(mandatory=True, ), + ) + inputs = EditTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EditTransform_outputs(): + output_map = dict(output_file=dict(), ) + outputs = EditTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py new file mode 100644 index 0000000000..c93a0526c1 --- /dev/null +++ b/nipype/interfaces/elastix/tests/test_auto_PointsWarp.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import PointsWarp + + +def test_PointsWarp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict( + argstr='-threads %01d', + nohash=True, + usedefault=True, + ), + output_path=dict( + argstr='-out %s', + mandatory=True, + usedefault=True, + ), + points_file=dict( + argstr='-def %s', + mandatory=True, + ), + transform_file=dict( + argstr='-tp %s', + mandatory=True, + ), + ) + inputs = PointsWarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PointsWarp_outputs(): + output_map = dict(warped_file=dict(), ) + outputs = PointsWarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/elastix/tests/test_auto_Registration.py b/nipype/interfaces/elastix/tests/test_auto_Registration.py new file mode 100644 index 0000000000..4e774ab036 --- /dev/null +++ b/nipype/interfaces/elastix/tests/test_auto_Registration.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import Registration + + +def test_Registration_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_image=dict( + argstr='-f %s', + mandatory=True, + ), + fixed_mask=dict(argstr='-fMask %s', ), + initial_transform=dict(argstr='-t0 %s', ), + moving_image=dict( + argstr='-m %s', + mandatory=True, + ), + moving_mask=dict(argstr='-mMask %s', ), + num_threads=dict( + argstr='-threads %01d', + nohash=True, + usedefault=True, + ), + output_path=dict( + argstr='-out %s', + mandatory=True, + usedefault=True, + ), + parameters=dict( + argstr='-p %s...', + mandatory=True, + ), + ) + inputs = Registration.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Registration_outputs(): + output_map = dict( + transform=dict(), + warped_file=dict(), + warped_files=dict(), + warped_files_flags=dict(), + ) + outputs = Registration.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/elastix/utils.py b/nipype/interfaces/elastix/utils.py new file mode 100644 index 0000000000..505115b05a --- /dev/null +++ b/nipype/interfaces/elastix/utils.py @@ -0,0 +1,173 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Generic interfaces to manipulate registration parameters files, including +transform files (to configure warpings) + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +import os.path as op + +from ... import logging +from ..base import (BaseInterface, BaseInterfaceInputSpec, isdefined, + TraitedSpec, File, traits) +iflogger = logging.getLogger('nipype.interface') + + +class EditTransformInputSpec(BaseInterfaceInputSpec): + transform_file = File( + exists=True, mandatory=True, desc='transform-parameter file, only 1') + reference_image = File( + exists=True, + desc=('set a new reference image to change the ' + 'target coordinate system.')) + interpolation = traits.Enum( + 'cubic', + 'linear', + 'nearest', + usedefault=True, + argstr='FinalBSplineInterpolationOrder', + desc='set a new interpolator for transformation') + + output_type = traits.Enum( + 'float', + 'unsigned char', + 'unsigned short', + 'short', + 'unsigned long', + 'long', + 'double', + argstr='ResultImagePixelType', + desc='set a new output pixel type for resampled images') + output_format = traits.Enum( + 'nii.gz', + 'nii', + 'mhd', + 'hdr', + 'vtk', + argstr='ResultImageFormat', + desc='set a new image format for resampled images') + output_file = File(desc='the filename for the resulting transform file') + + +class EditTransformOutputSpec(TraitedSpec): + output_file = File(exists=True, desc='output transform file') + + +class EditTransform(BaseInterface): + """ + Manipulates an existing transform file generated with elastix + + Example + ------- + + >>> from nipype.interfaces.elastix import EditTransform + >>> tfm = EditTransform() + >>> tfm.inputs.transform_file = 'TransformParameters.0.txt' # doctest: +SKIP + >>> tfm.inputs.reference_image = 'fixed1.nii' # doctest: +SKIP + >>> tfm.inputs.output_type = 'unsigned char' + >>> tfm.run() # doctest: +SKIP + + """ + + input_spec = EditTransformInputSpec + output_spec = EditTransformOutputSpec + _out_file = '' + _pattern = '\((?P%s\s\"?)([-\.\s\w]+)(\"?\))' + + _interp = {'nearest': 0, 'linear': 1, 'cubic': 3} + + def _run_interface(self, runtime): + import re + import nibabel as nb + import numpy as np + + contents = '' + + with open(self.inputs.transform_file, 'r') as f: + contents = f.read() + + if isdefined(self.inputs.output_type): + p = re.compile((self._pattern % + 'ResultImagePixelType').decode('string-escape')) + rep = '(\g%s\g<3>' % self.inputs.output_type + contents = p.sub(rep, contents) + + if isdefined(self.inputs.output_format): + p = re.compile( + (self._pattern % 'ResultImageFormat').decode('string-escape')) + rep = '(\g%s\g<3>' % self.inputs.output_format + contents = p.sub(rep, contents) + + if isdefined(self.inputs.interpolation): + p = re.compile( + (self._pattern % + 'FinalBSplineInterpolationOrder').decode('string-escape')) + rep = '(\g%s\g<3>' % self._interp[self.inputs.interpolation] + contents = p.sub(rep, contents) + + if isdefined(self.inputs.reference_image): + im = nb.load(self.inputs.reference_image) + + if len(im.header.get_zooms()) == 4: + im = nb.func.four_to_three(im)[0] + + size = ' '.join(["%01d" % s for s in im.shape]) + p = re.compile((self._pattern % 'Size').decode('string-escape')) + rep = '(\g%s\g<3>' % size + contents = p.sub(rep, contents) + + index = ' '.join(["0" for s in im.shape]) + p = re.compile((self._pattern % 'Index').decode('string-escape')) + rep = '(\g%s\g<3>' % index + contents = p.sub(rep, contents) + + spacing = ' '.join(["%0.4f" % f for f in im.header.get_zooms()]) + p = re.compile((self._pattern % 'Spacing').decode('string-escape')) + rep = '(\g%s\g<3>' % spacing + contents = p.sub(rep, contents) + + itkmat = np.eye(4) + itkmat[0, 0] = -1 + itkmat[1, 1] = -1 + + affine = np.dot(itkmat, im.affine) + dirs = ' '.join( + ['%0.4f' % f for f in affine[0:3, 0:3].reshape(-1)]) + orig = ' '.join(['%0.4f' % f for f in affine[0:3, 3].reshape(-1)]) + + # p = re.compile((self._pattern % 'Direction').decode('string-escape')) + # rep = '(\g%s\g<3>' % dirs + # contents = p.sub(rep, contents) + + p = re.compile((self._pattern % 'Origin').decode('string-escape')) + rep = '(\g%s\g<3>' % orig + contents = p.sub(rep, contents) + + with open(self._get_outfile(), 'w') as of: + of.write(contents) + + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['output_file'] = getattr(self, '_out_file') + return outputs + + def _get_outfile(self): + val = getattr(self, '_out_file') + if val is not None and val != '': + return val + + if isdefined(self.inputs.output_file): + setattr(self, '_out_file', self.inputs.output_file) + return self.inputs.output_file + + out_file = op.abspath(op.basename(self.inputs.transform_file)) + setattr(self, '_out_file', out_file) + return out_file diff --git a/nipype/interfaces/freesurfer/__init__.py b/nipype/interfaces/freesurfer/__init__.py new file mode 100644 index 0000000000..44c939706e --- /dev/null +++ b/nipype/interfaces/freesurfer/__init__.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Top-level namespace for freesurfer.""" + +from .base import Info, FSCommand, no_freesurfer +from .preprocess import ( + ParseDICOMDir, UnpackSDICOMDir, MRIConvert, Resample, ReconAll, BBRegister, + ApplyVolTransform, Smooth, DICOMConvert, RobustRegister, FitMSParams, + SynthesizeFLASH, MNIBiasCorrection, WatershedSkullStrip, Normalize, + CANormalize, CARegister, CALabel, MRIsCALabel, SegmentCC, SegmentWM, + EditWMwithAseg, ConcatenateLTA) +from .model import (MRISPreproc, MRISPreprocReconAll, GLMFit, OneSampleTTest, + Binarize, Concatenate, SegStats, SegStatsReconAll, + Label2Vol, MS_LDA, Label2Label, Label2Annot, + SphericalAverage) +from .utils import ( + SampleToSurface, SurfaceSmooth, SurfaceTransform, Surface2VolTransform, + SurfaceSnapshots, ApplyMask, MRIsConvert, MRITessellate, MRIPretess, + MRIMarchingCubes, SmoothTessellation, MakeAverageSubject, + ExtractMainComponent, Tkregister2, AddXFormToHeader, + CheckTalairachAlignment, TalairachAVI, TalairachQC, RemoveNeck, MRIFill, + MRIsInflate, Sphere, FixTopology, EulerNumber, RemoveIntersection, + MakeSurfaces, Curvature, CurvatureStats, Jacobian, MRIsCalc, VolumeMask, + ParcellationStats, Contrast, RelabelHypointensities, Aparc2Aseg, Apas2Aseg, + MRIsExpand, MRIsCombine) +from .longitudinal import (RobustTemplate, FuseSegmentations) +from .registration import (MPRtoMNI305, RegisterAVItoTalairach, EMRegister, + Register, Paint, MRICoreg) diff --git a/nipype/interfaces/freesurfer/base.py b/nipype/interfaces/freesurfer/base.py new file mode 100644 index 0000000000..cda527a5ea --- /dev/null +++ b/nipype/interfaces/freesurfer/base.py @@ -0,0 +1,286 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The freesurfer module provides basic functions for interfacing with +freesurfer tools. + +Currently these tools are supported: + + * Dicom2Nifti: using mri_convert + * Resample: using mri_convert + +Examples +-------- +See the docstrings for the individual classes for 'working' examples. + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open, object, str + +import os + +from ... import LooseVersion +from ...utils.filemanip import fname_presuffix +from ..base import (CommandLine, Directory, CommandLineInputSpec, isdefined, + traits, TraitedSpec, File, PackageInfo) + +__docformat__ = 'restructuredtext' + + +class Info(PackageInfo): + """ Freesurfer subject directory and version information. + + Examples + -------- + + >>> from nipype.interfaces.freesurfer import Info + >>> Info.version() # doctest: +SKIP + >>> Info.subjectsdir() # doctest: +SKIP + + """ + if os.getenv('FREESURFER_HOME'): + version_file = os.path.join( + os.getenv('FREESURFER_HOME'), 'build-stamp.txt') + + @staticmethod + def parse_version(raw_info): + return raw_info.splitlines()[0] + + @classmethod + def looseversion(cls): + """ Return a comparable version object + + If no version found, use LooseVersion('0.0.0') + """ + ver = cls.version() + if ver is None: + return LooseVersion('0.0.0') + + vinfo = ver.rstrip().split('-') + try: + int(vinfo[-1], 16) + except ValueError: + githash = '' + else: + githash = '.' + vinfo[-1] + + # As of FreeSurfer v6.0.0, the final component is a githash + if githash: + if vinfo[3] == 'dev': + # This will need updating when v6.0.1 comes out + vstr = '6.0.0-dev' + githash + elif vinfo[5][0] == 'v': + vstr = vinfo[5][1:] + else: + raise RuntimeError('Unknown version string: ' + ver) + # Retain pre-6.0.0 heuristics + elif 'dev' in ver: + vstr = vinfo[-1] + '-dev' + else: + vstr = ver.rstrip().split('-v')[-1] + + return LooseVersion(vstr) + + @classmethod + def subjectsdir(cls): + """Check the global SUBJECTS_DIR + + Parameters + ---------- + + subjects_dir : string + The system defined subjects directory + + Returns + ------- + + subject_dir : string + Represents the current environment setting of SUBJECTS_DIR + + """ + if cls.version(): + return os.environ['SUBJECTS_DIR'] + return None + + +class FSTraitedSpec(CommandLineInputSpec): + subjects_dir = Directory(exists=True, desc='subjects directory') + + +class FSCommand(CommandLine): + """General support for FreeSurfer commands. + + Every FS command accepts 'subjects_dir' input. + """ + + input_spec = FSTraitedSpec + + _subjects_dir = None + + def __init__(self, **inputs): + super(FSCommand, self).__init__(**inputs) + self.inputs.on_trait_change(self._subjects_dir_update, 'subjects_dir') + if not self._subjects_dir: + self._subjects_dir = Info.subjectsdir() + if not isdefined(self.inputs.subjects_dir) and self._subjects_dir: + self.inputs.subjects_dir = self._subjects_dir + self._subjects_dir_update() + + def _subjects_dir_update(self): + if self.inputs.subjects_dir: + self.inputs.environ.update({ + 'SUBJECTS_DIR': self.inputs.subjects_dir + }) + + @classmethod + def set_default_subjects_dir(cls, subjects_dir): + cls._subjects_dir = subjects_dir + + def run(self, **inputs): + if 'subjects_dir' in inputs: + self.inputs.subjects_dir = inputs['subjects_dir'] + self._subjects_dir_update() + return super(FSCommand, self).run(**inputs) + + def _gen_fname(self, + basename, + fname=None, + cwd=None, + suffix='_fs', + use_ext=True): + '''Define a generic mapping for a single outfile + + The filename is potentially autogenerated by suffixing inputs.infile + + Parameters + ---------- + basename : string (required) + filename to base the new filename on + fname : string + if not None, just use this fname + cwd : string + prefix paths with cwd, otherwise os.getcwd() + suffix : string + default suffix + ''' + if basename == '': + msg = 'Unable to generate filename for command %s. ' % self.cmd + msg += 'basename is not set!' + raise ValueError(msg) + if cwd is None: + cwd = os.getcwd() + fname = fname_presuffix( + basename, suffix=suffix, use_ext=use_ext, newpath=cwd) + return fname + + @property + def version(self): + ver = Info.looseversion() + if ver > LooseVersion("0.0.0"): + return ver.vstring + + +class FSSurfaceCommand(FSCommand): + """Support for FreeSurfer surface-related functions. + For some functions, if the output file is not specified starting with 'lh.' + or 'rh.', FreeSurfer prepends the prefix from the input file to the output + filename. Output out_file must be adjusted to accommodate this. By + including the full path in the filename, we can also avoid this behavior. + """ + + def _get_filecopy_info(self): + self._normalize_filenames() + return super(FSSurfaceCommand, self)._get_filecopy_info() + + def _normalize_filenames(self): + """Filename normalization routine to perform only when run in Node + context + """ + pass + + @staticmethod + def _associated_file(in_file, out_name): + """Based on MRIsBuildFileName in freesurfer/utils/mrisurf.c + + If no path information is provided for out_name, use path and + hemisphere (if also unspecified) from in_file to determine the path + of the associated file. + Use in_file prefix to indicate hemisphere for out_name, rather than + inspecting the surface data structure. + """ + path, base = os.path.split(out_name) + if path == '': + path, in_file = os.path.split(in_file) + hemis = ('lh.', 'rh.') + if in_file[:3] in hemis and base[:3] not in hemis: + base = in_file[:3] + base + return os.path.join(path, base) + + +class FSScriptCommand(FSCommand): + """ Support for Freesurfer script commands with log terminal_output + """ + _terminal_output = 'file' + _always_run = False + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['log_file'] = os.path.abspath('output.nipype') + return outputs + + +class FSScriptOutputSpec(TraitedSpec): + log_file = File( + 'output.nipype', usedefault=True, exists=True, desc="The output log") + + +class FSTraitedSpecOpenMP(FSTraitedSpec): + num_threads = traits.Int(desc='allows for specifying more threads') + + +class FSCommandOpenMP(FSCommand): + """Support for FS commands that utilize OpenMP + + Sets the environment variable 'OMP_NUM_THREADS' to the number + of threads specified by the input num_threads. + """ + + input_spec = FSTraitedSpecOpenMP + + _num_threads = None + + def __init__(self, **inputs): + super(FSCommandOpenMP, self).__init__(**inputs) + self.inputs.on_trait_change(self._num_threads_update, 'num_threads') + if not self._num_threads: + self._num_threads = os.environ.get('OMP_NUM_THREADS', None) + if not self._num_threads: + self._num_threads = os.environ.get('NSLOTS', None) + if not isdefined(self.inputs.num_threads) and self._num_threads: + self.inputs.num_threads = int(self._num_threads) + self._num_threads_update() + + def _num_threads_update(self): + if self.inputs.num_threads: + self.inputs.environ.update({ + 'OMP_NUM_THREADS': + str(self.inputs.num_threads) + }) + + def run(self, **inputs): + if 'num_threads' in inputs: + self.inputs.num_threads = inputs['num_threads'] + self._num_threads_update() + return super(FSCommandOpenMP, self).run(**inputs) + + +def no_freesurfer(): + """Checks if FreeSurfer is NOT installed + used with skipif to skip tests that will + fail if FreeSurfer is not installed""" + + if Info.version() is None: + return True + else: + return False diff --git a/nipype/interfaces/freesurfer/longitudinal.py b/nipype/interfaces/freesurfer/longitudinal.py new file mode 100644 index 0000000000..1d982a7a44 --- /dev/null +++ b/nipype/interfaces/freesurfer/longitudinal.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Provides interfaces to various longitudinal commands provided by freesurfer +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os + +from ... import logging +from ..base import (TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, + isdefined) +from .base import (FSCommand, FSTraitedSpec, FSCommandOpenMP, + FSTraitedSpecOpenMP) + +__docformat__ = 'restructuredtext' +iflogger = logging.getLogger('nipype.interface') + + +class RobustTemplateInputSpec(FSTraitedSpecOpenMP): + # required + in_files = InputMultiPath( + File(exists=True), + mandatory=True, + argstr='--mov %s', + desc='input movable volumes to be aligned to common mean/median ' + 'template') + out_file = File( + 'mri_robust_template_out.mgz', + mandatory=True, + usedefault=True, + argstr='--template %s', + desc='output template volume (final mean/median image)') + auto_detect_sensitivity = traits.Bool( + argstr='--satit', + xor=['outlier_sensitivity'], + mandatory=True, + desc='auto-detect good sensitivity (recommended for head or full ' + 'brain scans)') + outlier_sensitivity = traits.Float( + argstr='--sat %.4f', + xor=['auto_detect_sensitivity'], + mandatory=True, + desc='set outlier sensitivity manually (e.g. "--sat 4.685" ). Higher ' + 'values mean less sensitivity.') + # optional + transform_outputs = traits.Either( + InputMultiPath(File(exists=False)), + traits.Bool, + argstr='--lta %s', + desc='output xforms to template (for each input)') + intensity_scaling = traits.Bool( + default_value=False, + argstr='--iscale', + desc='allow also intensity scaling (default off)') + scaled_intensity_outputs = traits.Either( + InputMultiPath(File(exists=False)), + traits.Bool, + argstr='--iscaleout %s', + desc='final intensity scales (will activate --iscale)') + subsample_threshold = traits.Int( + argstr='--subsample %d', + desc='subsample if dim > # on all axes (default no subs.)') + average_metric = traits.Enum( + 'median', + 'mean', + argstr='--average %d', + desc='construct template from: 0 Mean, 1 Median (default)') + initial_timepoint = traits.Int( + argstr='--inittp %d', + desc='use TP# for spacial init (default random), 0: no init') + fixed_timepoint = traits.Bool( + default_value=False, + argstr='--fixtp', + desc='map everthing to init TP# (init TP is not resampled)') + no_iteration = traits.Bool( + default_value=False, + argstr='--noit', + desc='do not iterate, just create first template') + initial_transforms = InputMultiPath( + File(exists=True), + argstr='--ixforms %s', + desc='use initial transforms (lta) on source') + in_intensity_scales = InputMultiPath( + File(exists=True), + argstr='--iscalein %s', + desc='use initial intensity scales') + + +class RobustTemplateOutputSpec(TraitedSpec): + out_file = File( + exists=True, desc='output template volume (final mean/median image)') + transform_outputs = OutputMultiPath( + File(exists=True), desc="output xform files from moving to template") + scaled_intensity_outputs = OutputMultiPath( + File(exists=True), desc="output final intensity scales") + + +class RobustTemplate(FSCommandOpenMP): + """ construct an unbiased robust template for longitudinal volumes + + Examples + -------- + >>> from nipype.interfaces.freesurfer import RobustTemplate + >>> template = RobustTemplate() + >>> template.inputs.in_files = ['structural.nii', 'functional.nii'] + >>> template.inputs.auto_detect_sensitivity = True + >>> template.inputs.average_metric = 'mean' + >>> template.inputs.initial_timepoint = 1 + >>> template.inputs.fixed_timepoint = True + >>> template.inputs.no_iteration = True + >>> template.inputs.subsample_threshold = 200 + >>> template.cmdline #doctest: + 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template mri_robust_template_out.mgz --subsample 200' + >>> template.inputs.out_file = 'T1.nii' + >>> template.cmdline #doctest: + 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --subsample 200' + + >>> template.inputs.transform_outputs = ['structural.lta', + ... 'functional.lta'] + >>> template.inputs.scaled_intensity_outputs = ['structural-iscale.txt', + ... 'functional-iscale.txt'] + >>> template.cmdline #doctest: +ELLIPSIS + 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --iscaleout .../structural-iscale.txt .../functional-iscale.txt --subsample 200 --lta .../structural.lta .../functional.lta' + + >>> template.inputs.transform_outputs = True + >>> template.inputs.scaled_intensity_outputs = True + >>> template.cmdline #doctest: +ELLIPSIS + 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --iscaleout .../is1.txt .../is2.txt --subsample 200 --lta .../tp1.lta .../tp2.lta' + + >>> template.run() #doctest: +SKIP + + References + ---------- + [https://surfer.nmr.mgh.harvard.edu/fswiki/mri_robust_template] + + """ + + _cmd = 'mri_robust_template' + input_spec = RobustTemplateInputSpec + output_spec = RobustTemplateOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'average_metric': + # return enumeration value + return spec.argstr % {"mean": 0, "median": 1}[value] + if name in ('transform_outputs', 'scaled_intensity_outputs'): + value = self._list_outputs()[name] + return super(RobustTemplate, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + n_files = len(self.inputs.in_files) + fmt = '{}{:02d}.{}' if n_files > 9 else '{}{:d}.{}' + if isdefined(self.inputs.transform_outputs): + fnames = self.inputs.transform_outputs + if fnames is True: + fnames = [ + fmt.format('tp', i + 1, 'lta') for i in range(n_files) + ] + outputs['transform_outputs'] = [os.path.abspath(x) for x in fnames] + if isdefined(self.inputs.scaled_intensity_outputs): + fnames = self.inputs.scaled_intensity_outputs + if fnames is True: + fnames = [ + fmt.format('is', i + 1, 'txt') for i in range(n_files) + ] + outputs['scaled_intensity_outputs'] = [ + os.path.abspath(x) for x in fnames + ] + return outputs + + +class FuseSegmentationsInputSpec(FSTraitedSpec): + # required + subject_id = traits.String( + argstr='%s', position=-3, desc="subject_id being processed") + timepoints = InputMultiPath( + traits.String(), + mandatory=True, + argstr='%s', + position=-2, + desc='subject_ids or timepoints to be processed') + out_file = File( + exists=False, + mandatory=True, + position=-1, + desc="output fused segmentation file") + in_segmentations = InputMultiPath( + File(exists=True), + argstr="-a %s", + mandatory=True, + desc="name of aseg file to use (default: aseg.mgz) \ + must include the aseg files for all the given timepoints") + in_segmentations_noCC = InputMultiPath( + File(exists=True), + argstr="-c %s", + mandatory=True, + desc="name of aseg file w/o CC labels (default: aseg.auto_noCCseg.mgz) \ + must include the corresponding file for all the given timepoints") + in_norms = InputMultiPath( + File(exists=True), + argstr="-n %s", + mandatory=True, + desc="-n - name of norm file to use (default: norm.mgs) \ + must include the corresponding norm file for all given timepoints \ + as well as for the current subject") + + +class FuseSegmentationsOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="output fused segmentation file") + + +class FuseSegmentations(FSCommand): + """ fuse segmentations together from multiple timepoints + + Examples + -------- + >>> from nipype.interfaces.freesurfer import FuseSegmentations + >>> fuse = FuseSegmentations() + >>> fuse.inputs.subject_id = 'tp.long.A.template' + >>> fuse.inputs.timepoints = ['tp1', 'tp2'] + >>> fuse.inputs.out_file = 'aseg.fused.mgz' + >>> fuse.inputs.in_segmentations = ['aseg.mgz', 'aseg.mgz'] + >>> fuse.inputs.in_segmentations_noCC = ['aseg.mgz', 'aseg.mgz'] + >>> fuse.inputs.in_norms = ['norm.mgz', 'norm.mgz', 'norm.mgz'] + >>> fuse.cmdline + 'mri_fuse_segmentations -n norm.mgz -a aseg.mgz -c aseg.mgz tp.long.A.template tp1 tp2' + """ + + _cmd = 'mri_fuse_segmentations' + input_spec = FuseSegmentationsInputSpec + output_spec = FuseSegmentationsOutputSpec + + def _format_arg(self, name, spec, value): + if name in ('in_segmentations', 'in_segmentations_noCC', 'in_norms'): + # return enumeration value + return spec.argstr % os.path.basename(value[0]) + return super(FuseSegmentations, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs diff --git a/nipype/interfaces/freesurfer/model.py b/nipype/interfaces/freesurfer/model.py new file mode 100644 index 0000000000..58d168e2d7 --- /dev/null +++ b/nipype/interfaces/freesurfer/model.py @@ -0,0 +1,1646 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The freesurfer module provides basic functions for interfacing with + freesurfer tools. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os + +from ...utils.filemanip import fname_presuffix, split_filename +from ..base import (TraitedSpec, File, traits, InputMultiPath, OutputMultiPath, + Directory, isdefined) +from .base import FSCommand, FSTraitedSpec +from .utils import copy2subjdir + +__docformat__ = 'restructuredtext' + + +class MRISPreprocInputSpec(FSTraitedSpec): + out_file = File(argstr='--out %s', genfile=True, desc='output filename') + target = traits.Str( + argstr='--target %s', mandatory=True, desc='target subject name') + hemi = traits.Enum( + 'lh', + 'rh', + argstr='--hemi %s', + mandatory=True, + desc='hemisphere for source and target') + surf_measure = traits.Str( + argstr='--meas %s', + xor=('surf_measure', 'surf_measure_file', 'surf_area'), + desc='Use subject/surf/hemi.surf_measure as input') + surf_area = traits.Str( + argstr='--area %s', + xor=('surf_measure', 'surf_measure_file', 'surf_area'), + desc= + 'Extract vertex area from subject/surf/hemi.surfname to use as input.') + subjects = traits.List( + argstr='--s %s...', + xor=('subjects', 'fsgd_file', 'subject_file'), + desc='subjects from who measures are calculated') + fsgd_file = File( + exists=True, + argstr='--fsgd %s', + xor=('subjects', 'fsgd_file', 'subject_file'), + desc='specify subjects using fsgd file') + subject_file = File( + exists=True, + argstr='--f %s', + xor=('subjects', 'fsgd_file', 'subject_file'), + desc='file specifying subjects separated by white space') + surf_measure_file = InputMultiPath( + File(exists=True), + argstr='--is %s...', + xor=('surf_measure', 'surf_measure_file', 'surf_area'), + desc='file alternative to surfmeas, still requires list of subjects') + source_format = traits.Str(argstr='--srcfmt %s', desc='source format') + surf_dir = traits.Str( + argstr='--surfdir %s', desc='alternative directory (instead of surf)') + vol_measure_file = InputMultiPath( + traits.Tuple(File(exists=True), File(exists=True)), + argstr='--iv %s %s...', + desc='list of volume measure and reg file tuples') + proj_frac = traits.Float( + argstr='--projfrac %s', desc='projection fraction for vol2surf') + fwhm = traits.Float( + argstr='--fwhm %f', + xor=['num_iters'], + desc='smooth by fwhm mm on the target surface') + num_iters = traits.Int( + argstr='--niters %d', + xor=['fwhm'], + desc='niters : smooth by niters on the target surface') + fwhm_source = traits.Float( + argstr='--fwhm-src %f', + xor=['num_iters_source'], + desc='smooth by fwhm mm on the source surface') + num_iters_source = traits.Int( + argstr='--niterssrc %d', + xor=['fwhm_source'], + desc='niters : smooth by niters on the source surface') + smooth_cortex_only = traits.Bool( + argstr='--smooth-cortex-only', + desc='only smooth cortex (ie, exclude medial wall)') + + +class MRISPreprocOutputSpec(TraitedSpec): + out_file = File(desc='preprocessed output file') + + +class MRISPreproc(FSCommand): + """Use FreeSurfer mris_preproc to prepare a group of contrasts for + a second level analysis + + Examples + -------- + + >>> preproc = MRISPreproc() + >>> preproc.inputs.target = 'fsaverage' + >>> preproc.inputs.hemi = 'lh' + >>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), \ + ('cont1a.nii', 'register.dat')] + >>> preproc.inputs.out_file = 'concatenated_file.mgz' + >>> preproc.cmdline + 'mris_preproc --hemi lh --out concatenated_file.mgz --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat' + + """ + + _cmd = 'mris_preproc' + input_spec = MRISPreprocInputSpec + output_spec = MRISPreprocOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outfile = self.inputs.out_file + outputs['out_file'] = outfile + if not isdefined(outfile): + outputs['out_file'] = os.path.join( + os.getcwd(), 'concat_%s_%s.mgz' % (self.inputs.hemi, + self.inputs.target)) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()[name] + return None + + +class MRISPreprocReconAllInputSpec(MRISPreprocInputSpec): + surf_measure_file = File( + exists=True, + argstr='--meas %s', + xor=('surf_measure', 'surf_measure_file', 'surf_area'), + desc='file necessary for surfmeas') + surfreg_files = InputMultiPath( + File(exists=True), + argstr="--surfreg %s", + requires=['lh_surfreg_target', 'rh_surfreg_target'], + desc="lh and rh input surface registration files") + lh_surfreg_target = File( + desc="Implicit target surface registration file", + requires=['surfreg_files']) + rh_surfreg_target = File( + desc="Implicit target surface registration file", + requires=['surfreg_files']) + subject_id = traits.String( + 'subject_id', + argstr='--s %s', + usedefault=True, + xor=('subjects', 'fsgd_file', 'subject_file', 'subject_id'), + desc='subject from whom measures are calculated') + copy_inputs = traits.Bool( + desc="If running as a node, set this to True " + + "this will copy some implicit inputs to the " + "node directory.") + + +class MRISPreprocReconAll(MRISPreproc): + """Extends MRISPreproc to allow it to be used in a recon-all workflow + + Examples + ======== + >>> preproc = MRISPreprocReconAll() + >>> preproc.inputs.target = 'fsaverage' + >>> preproc.inputs.hemi = 'lh' + >>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), \ + ('cont1a.nii', 'register.dat')] + >>> preproc.inputs.out_file = 'concatenated_file.mgz' + >>> preproc.cmdline + 'mris_preproc --hemi lh --out concatenated_file.mgz --s subject_id --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat' + """ + + input_spec = MRISPreprocReconAllInputSpec + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + if isdefined(self.inputs.surf_dir): + folder = self.inputs.surf_dir + else: + folder = 'surf' + if isdefined(self.inputs.surfreg_files): + for surfreg in self.inputs.surfreg_files: + basename = os.path.basename(surfreg) + copy2subjdir(self, surfreg, folder, basename) + if basename.startswith('lh.'): + copy2subjdir( + self, + self.inputs.lh_surfreg_target, + folder, + basename, + subject_id=self.inputs.target) + else: + copy2subjdir( + self, + self.inputs.rh_surfreg_target, + folder, + basename, + subject_id=self.inputs.target) + + if isdefined(self.inputs.surf_measure_file): + copy2subjdir(self, self.inputs.surf_measure_file, folder) + + return super(MRISPreprocReconAll, self).run(**inputs) + + def _format_arg(self, name, spec, value): + # mris_preproc looks for these files in the surf dir + if name == 'surfreg_files': + basename = os.path.basename(value[0]) + return spec.argstr % basename.lstrip('rh.').lstrip('lh.') + if name == "surf_measure_file": + basename = os.path.basename(value) + return spec.argstr % basename.lstrip('rh.').lstrip('lh.') + return super(MRISPreprocReconAll, self)._format_arg(name, spec, value) + + +class GLMFitInputSpec(FSTraitedSpec): + glm_dir = traits.Str( + argstr='--glmdir %s', desc='save outputs to dir', genfile=True) + in_file = File( + desc='input 4D file', argstr='--y %s', mandatory=True, copyfile=False) + _design_xor = ('fsgd', 'design', 'one_sample') + fsgd = traits.Tuple( + File(exists=True), + traits.Enum('doss', 'dods'), + argstr='--fsgd %s %s', + xor=_design_xor, + desc='freesurfer descriptor file') + design = File( + exists=True, + argstr='--X %s', + xor=_design_xor, + desc='design matrix file') + contrast = InputMultiPath( + File(exists=True), argstr='--C %s...', desc='contrast file') + + one_sample = traits.Bool( + argstr='--osgm', + xor=('one_sample', 'fsgd', 'design', 'contrast'), + desc='construct X and C as a one-sample group mean') + no_contrast_ok = traits.Bool( + argstr='--no-contrasts-ok', + desc='do not fail if no contrasts specified') + per_voxel_reg = InputMultiPath( + File(exists=True), argstr='--pvr %s...', desc='per-voxel regressors') + self_reg = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + argstr='--selfreg %d %d %d', + desc='self-regressor from index col row slice') + weighted_ls = File( + exists=True, + argstr='--wls %s', + xor=('weight_file', 'weight_inv', 'weight_sqrt'), + desc='weighted least squares') + fixed_fx_var = File( + exists=True, argstr='--yffxvar %s', desc='for fixed effects analysis') + fixed_fx_dof = traits.Int( + argstr='--ffxdof %d', + xor=['fixed_fx_dof_file'], + desc='dof for fixed effects analysis') + fixed_fx_dof_file = File( + argstr='--ffxdofdat %d', + xor=['fixed_fx_dof'], + desc='text file with dof for fixed effects analysis') + weight_file = File( + exists=True, + xor=['weighted_ls'], + desc='weight for each input at each voxel') + weight_inv = traits.Bool( + argstr='--w-inv', desc='invert weights', xor=['weighted_ls']) + weight_sqrt = traits.Bool( + argstr='--w-sqrt', desc='sqrt of weights', xor=['weighted_ls']) + fwhm = traits.Range( + low=0.0, argstr='--fwhm %f', desc='smooth input by fwhm') + var_fwhm = traits.Range( + low=0.0, argstr='--var-fwhm %f', desc='smooth variance by fwhm') + no_mask_smooth = traits.Bool( + argstr='--no-mask-smooth', desc='do not mask when smoothing') + no_est_fwhm = traits.Bool( + argstr='--no-est-fwhm', desc='turn off FWHM output estimation') + mask_file = File(exists=True, argstr='--mask %s', desc='binary mask') + label_file = File( + exists=True, + argstr='--label %s', + xor=['cortex'], + desc='use label as mask, surfaces only') + cortex = traits.Bool( + argstr='--cortex', + xor=['label_file'], + desc='use subjects ?h.cortex.label as label') + invert_mask = traits.Bool(argstr='--mask-inv', desc='invert mask') + prune = traits.Bool( + argstr='--prune', + desc= + 'remove voxels that do not have a non-zero value at each frame (def)') + no_prune = traits.Bool( + argstr='--no-prune', xor=['prunethresh'], desc='do not prune') + prune_thresh = traits.Float( + argstr='--prune_thr %f', + xor=['noprune'], + desc='prune threshold. Default is FLT_MIN') + compute_log_y = traits.Bool( + argstr='--logy', desc='compute natural log of y prior to analysis') + save_estimate = traits.Bool( + argstr='--yhat-save', desc='save signal estimate (yhat)') + save_residual = traits.Bool( + argstr='--eres-save', desc='save residual error (eres)') + save_res_corr_mtx = traits.Bool( + argstr='--eres-scm', + desc='save residual error spatial correlation matrix (eres.scm). Big!') + surf = traits.Bool( + argstr="--surf %s %s %s", + requires=["subject_id", "hemi"], + desc="analysis is on a surface mesh") + subject_id = traits.Str(desc="subject id for surface geometry") + hemi = traits.Enum("lh", "rh", desc="surface hemisphere") + surf_geo = traits.Str( + "white", + usedefault=True, + desc="surface geometry name (e.g. white, pial)") + simulation = traits.Tuple( + traits.Enum('perm', 'mc-full', 'mc-z'), + traits.Int(min=1), + traits.Float, + traits.Str, + argstr='--sim %s %d %f %s', + desc='nulltype nsim thresh csdbasename') + sim_sign = traits.Enum( + 'abs', 'pos', 'neg', argstr='--sim-sign %s', desc='abs, pos, or neg') + uniform = traits.Tuple( + traits.Float, + traits.Float, + argstr='--uniform %f %f', + desc='use uniform distribution instead of gaussian') + pca = traits.Bool( + argstr='--pca', desc='perform pca/svd analysis on residual') + calc_AR1 = traits.Bool( + argstr='--tar1', desc='compute and save temporal AR1 of residual') + save_cond = traits.Bool( + argstr='--save-cond', + desc='flag to save design matrix condition at each voxel') + vox_dump = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + argstr='--voxdump %d %d %d', + desc='dump voxel GLM and exit') + seed = traits.Int(argstr='--seed %d', desc='used for synthesizing noise') + synth = traits.Bool(argstr='--synth', desc='replace input with gaussian') + resynth_test = traits.Int( + argstr='--resynthtest %d', desc='test GLM by resynthsis') + profile = traits.Int(argstr='--profile %d', desc='niters : test speed') + force_perm = traits.Bool( + argstr='--perm-force', + desc='force perumtation test, even when design matrix is not orthog') + diag = traits.Int(argstr='--diag %d', desc='Gdiag_no : set diagnositc level') + diag_cluster = traits.Bool( + argstr='--diag-cluster', + desc='save sig volume and exit from first sim loop') + debug = traits.Bool(argstr='--debug', desc='turn on debugging') + check_opts = traits.Bool( + argstr='--checkopts', + desc="don't run anything, just check options and exit") + allow_repeated_subjects = traits.Bool( + argstr='--allowsubjrep', + desc= + 'allow subject names to repeat in the fsgd file (must appear before --fsgd' + ) + allow_ill_cond = traits.Bool( + argstr='--illcond', desc='allow ill-conditioned design matrices') + sim_done_file = File( + argstr='--sim-done %s', desc='create file when simulation finished') + + +class GLMFitOutputSpec(TraitedSpec): + + glm_dir = Directory(exists=True, desc="output directory") + beta_file = File(exists=True, desc="map of regression coefficients") + error_file = File(desc="map of residual error") + error_var_file = File(desc="map of residual error variance") + error_stddev_file = File(desc="map of residual error standard deviation") + estimate_file = File(desc="map of the estimated Y values") + mask_file = File(desc="map of the mask used in the analysis") + fwhm_file = File(desc="text file with estimated smoothness") + dof_file = File( + desc="text file with effective degrees-of-freedom for the analysis") + gamma_file = OutputMultiPath( + desc="map of contrast of regression coefficients") + gamma_var_file = OutputMultiPath( + desc="map of regression contrast variance") + sig_file = OutputMultiPath(desc="map of F-test significance (in -log10p)") + ftest_file = OutputMultiPath(desc="map of test statistic values") + spatial_eigenvectors = File( + desc="map of spatial eigenvectors from residual PCA") + frame_eigenvectors = File( + desc="matrix of frame eigenvectors from residual PCA") + singular_values = File(desc="matrix singular values from residual PCA") + svd_stats_file = File(desc="text file summarizing the residual PCA") + + +class GLMFit(FSCommand): + """Use FreeSurfer's mri_glmfit to specify and estimate a general linear model. + + Examples + -------- + + >>> glmfit = GLMFit() + >>> glmfit.inputs.in_file = 'functional.nii' + >>> glmfit.inputs.one_sample = True + >>> glmfit.cmdline == 'mri_glmfit --glmdir %s --y functional.nii --osgm'%os.getcwd() + True + + """ + + _cmd = 'mri_glmfit' + input_spec = GLMFitInputSpec + output_spec = GLMFitOutputSpec + + def _format_arg(self, name, spec, value): + if name == "surf": + _si = self.inputs + return spec.argstr % (_si.subject_id, _si.hemi, _si.surf_geo) + return super(GLMFit, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + # Get the top-level output directory + if not isdefined(self.inputs.glm_dir): + glmdir = os.getcwd() + else: + glmdir = os.path.abspath(self.inputs.glm_dir) + outputs["glm_dir"] = glmdir + + # Assign the output files that always get created + outputs["beta_file"] = os.path.join(glmdir, "beta.mgh") + outputs["error_var_file"] = os.path.join(glmdir, "rvar.mgh") + outputs["error_stddev_file"] = os.path.join(glmdir, "rstd.mgh") + outputs["mask_file"] = os.path.join(glmdir, "mask.mgh") + outputs["fwhm_file"] = os.path.join(glmdir, "fwhm.dat") + outputs["dof_file"] = os.path.join(glmdir, "dof.dat") + # Assign the conditional outputs + if isdefined(self.inputs.save_residual) and self.inputs.save_residual: + outputs["error_file"] = os.path.join(glmdir, "eres.mgh") + if isdefined(self.inputs.save_estimate) and self.inputs.save_estimate: + outputs["estimate_file"] = os.path.join(glmdir, "yhat.mgh") + + # Get the contrast directory name(s) + if isdefined(self.inputs.contrast): + contrasts = [] + for c in self.inputs.contrast: + if split_filename(c)[2] in [".mat", ".dat", ".mtx", ".con"]: + contrasts.append(split_filename(c)[1]) + else: + contrasts.append(os.path.split(c)[1]) + elif isdefined(self.inputs.one_sample) and self.inputs.one_sample: + contrasts = ["osgm"] + + # Add in the contrast images + outputs["sig_file"] = [ + os.path.join(glmdir, c, "sig.mgh") for c in contrasts + ] + outputs["ftest_file"] = [ + os.path.join(glmdir, c, "F.mgh") for c in contrasts + ] + outputs["gamma_file"] = [ + os.path.join(glmdir, c, "gamma.mgh") for c in contrasts + ] + outputs["gamma_var_file"] = [ + os.path.join(glmdir, c, "gammavar.mgh") for c in contrasts + ] + + # Add in the PCA results, if relevant + if isdefined(self.inputs.pca) and self.inputs.pca: + pcadir = os.path.join(glmdir, "pca-eres") + outputs["spatial_eigenvectors"] = os.path.join(pcadir, "v.mgh") + outputs["frame_eigenvectors"] = os.path.join(pcadir, "u.mtx") + outputs["singluar_values"] = os.path.join(pcadir, "sdiag.mat") + outputs["svd_stats_file"] = os.path.join(pcadir, "stats.dat") + + return outputs + + def _gen_filename(self, name): + if name == 'glm_dir': + return os.getcwd() + return None + + +class OneSampleTTest(GLMFit): + def __init__(self, **kwargs): + super(OneSampleTTest, self).__init__(**kwargs) + self.inputs.one_sample = True + + +class BinarizeInputSpec(FSTraitedSpec): + in_file = File( + exists=True, + argstr='--i %s', + mandatory=True, + copyfile=False, + desc='input volume') + min = traits.Float( + argstr='--min %f', xor=['wm_ven_csf'], desc='min thresh') + max = traits.Float( + argstr='--max %f', xor=['wm_ven_csf'], desc='max thresh') + rmin = traits.Float( + argstr='--rmin %f', desc='compute min based on rmin*globalmean') + rmax = traits.Float( + argstr='--rmax %f', desc='compute max based on rmax*globalmean') + match = traits.List( + traits.Int, argstr='--match %d...', desc='match instead of threshold') + wm = traits.Bool( + argstr='--wm', + desc='set match vals to 2 and 41 (aseg for cerebral WM)') + ventricles = traits.Bool( + argstr='--ventricles', + desc='set match vals those for aseg ventricles+choroid (not 4th)') + wm_ven_csf = traits.Bool( + argstr='--wm+vcsf', + xor=['min', 'max'], + desc='WM and ventricular CSF, including choroid (not 4th)') + binary_file = File( + argstr='--o %s', genfile=True, desc='binary output volume') + out_type = traits.Enum( + 'nii', 'nii.gz', 'mgz', argstr='', desc='output file type') + count_file = traits.Either( + traits.Bool, + File, + argstr='--count %s', + desc='save number of hits in ascii file (hits, ntotvox, pct)') + bin_val = traits.Int( + argstr='--binval %d', + desc='set vox within thresh to val (default is 1)') + bin_val_not = traits.Int( + argstr='--binvalnot %d', + desc='set vox outside range to val (default is 0)') + invert = traits.Bool(argstr='--inv', desc='set binval=0, binvalnot=1') + frame_no = traits.Int( + argstr='--frame %s', desc='use 0-based frame of input (default is 0)') + merge_file = File( + exists=True, argstr='--merge %s', desc='merge with mergevol') + mask_file = File( + exists=True, argstr='--mask maskvol', desc='must be within mask') + mask_thresh = traits.Float( + argstr='--mask-thresh %f', desc='set thresh for mask') + abs = traits.Bool( + argstr='--abs', desc='take abs of invol first (ie, make unsigned)') + bin_col_num = traits.Bool( + argstr='--bincol', + desc='set binarized voxel value to its column number') + zero_edges = traits.Bool( + argstr='--zero-edges', desc='zero the edge voxels') + zero_slice_edge = traits.Bool( + argstr='--zero-slice-edges', desc='zero the edge slice voxels') + dilate = traits.Int( + argstr='--dilate %d', desc='niters: dilate binarization in 3D') + erode = traits.Int( + argstr='--erode %d', + desc='nerode: erode binarization in 3D (after any dilation)') + erode2d = traits.Int( + argstr='--erode2d %d', + desc='nerode2d: erode binarization in 2D (after any 3D erosion)') + + +class BinarizeOutputSpec(TraitedSpec): + binary_file = File(exists=True, desc='binarized output volume') + count_file = File(desc='ascii file containing number of hits') + + +class Binarize(FSCommand): + """Use FreeSurfer mri_binarize to threshold an input volume + + Examples + -------- + + >>> binvol = Binarize(in_file='structural.nii', min=10, binary_file='foo_out.nii') + >>> binvol.cmdline + 'mri_binarize --o foo_out.nii --i structural.nii --min 10.000000' + + """ + + _cmd = 'mri_binarize' + input_spec = BinarizeInputSpec + output_spec = BinarizeOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outfile = self.inputs.binary_file + if not isdefined(outfile): + if isdefined(self.inputs.out_type): + outfile = fname_presuffix( + self.inputs.in_file, + newpath=os.getcwd(), + suffix='.'.join(('_thresh', self.inputs.out_type)), + use_ext=False) + else: + outfile = fname_presuffix( + self.inputs.in_file, newpath=os.getcwd(), suffix='_thresh') + outputs['binary_file'] = os.path.abspath(outfile) + value = self.inputs.count_file + if isdefined(value): + if isinstance(value, bool): + if value: + outputs['count_file'] = fname_presuffix( + self.inputs.in_file, + suffix='_count.txt', + newpath=os.getcwd(), + use_ext=False) + else: + outputs['count_file'] = value + return outputs + + def _format_arg(self, name, spec, value): + if name == 'count_file': + if isinstance(value, bool): + fname = self._list_outputs()[name] + else: + fname = value + return spec.argstr % fname + if name == 'out_type': + return '' + return super(Binarize, self)._format_arg(name, spec, value) + + def _gen_filename(self, name): + if name == 'binary_file': + return self._list_outputs()[name] + return None + + +class ConcatenateInputSpec(FSTraitedSpec): + in_files = InputMultiPath( + File(exists=True), + desc='Individual volumes to be concatenated', + argstr='--i %s...', + mandatory=True) + concatenated_file = File( + desc='Output volume', argstr='--o %s', genfile=True) + sign = traits.Enum( + 'abs', + 'pos', + 'neg', + argstr='--%s', + desc='Take only pos or neg voxles from input, or take abs') + stats = traits.Enum( + 'sum', + 'var', + 'std', + 'max', + 'min', + 'mean', + argstr='--%s', + desc='Compute the sum, var, std, max, min or mean of the input volumes' + ) + paired_stats = traits.Enum( + 'sum', + 'avg', + 'diff', + 'diff-norm', + 'diff-norm1', + 'diff-norm2', + argstr='--paired-%s', + desc='Compute paired sum, avg, or diff') + gmean = traits.Int( + argstr='--gmean %d', + desc='create matrix to average Ng groups, Nper=Ntot/Ng') + mean_div_n = traits.Bool( + argstr='--mean-div-n', desc='compute mean/nframes (good for var)') + multiply_by = traits.Float( + argstr='--mul %f', desc='Multiply input volume by some amount') + add_val = traits.Float( + argstr='--add %f', desc='Add some amount to the input volume') + multiply_matrix_file = File( + exists=True, + argstr='--mtx %s', + desc='Multiply input by an ascii matrix in file') + combine = traits.Bool( + argstr='--combine', + desc='Combine non-zero values into single frame volume') + keep_dtype = traits.Bool( + argstr='--keep-datatype', + desc='Keep voxelwise precision type (default is float') + max_bonfcor = traits.Bool( + argstr='--max-bonfcor', + desc='Compute max and bonferroni correct (assumes -log10(ps))') + max_index = traits.Bool( + argstr='--max-index', + desc='Compute the index of max voxel in concatenated volumes') + mask_file = File( + exists=True, argstr='--mask %s', desc='Mask input with a volume') + vote = traits.Bool( + argstr='--vote', + desc='Most frequent value at each voxel and fraction of occurances') + sort = traits.Bool( + argstr='--sort', desc='Sort each voxel by ascending frame value') + + +class ConcatenateOutputSpec(TraitedSpec): + concatenated_file = File( + exists=True, desc='Path/name of the output volume') + + +class Concatenate(FSCommand): + """Use Freesurfer mri_concat to combine several input volumes + into one output volume. Can concatenate by frames, or compute + a variety of statistics on the input volumes. + + Examples + -------- + + Combine two input volumes into one volume with two frames + + >>> concat = Concatenate() + >>> concat.inputs.in_files = ['cont1.nii', 'cont2.nii'] + >>> concat.inputs.concatenated_file = 'bar.nii' + >>> concat.cmdline + 'mri_concat --o bar.nii --i cont1.nii --i cont2.nii' + + """ + + _cmd = 'mri_concat' + input_spec = ConcatenateInputSpec + output_spec = ConcatenateOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + + fname = self.inputs.concatenated_file + if not isdefined(fname): + fname = 'concat_output.nii.gz' + outputs['concatenated_file'] = os.path.join(os.getcwd(), fname) + return outputs + + def _gen_filename(self, name): + if name == 'concatenated_file': + return self._list_outputs()[name] + return None + + +class SegStatsInputSpec(FSTraitedSpec): + _xor_inputs = ('segmentation_file', 'annot', 'surf_label') + segmentation_file = File( + exists=True, + argstr='--seg %s', + xor=_xor_inputs, + mandatory=True, + desc='segmentation volume path') + annot = traits.Tuple( + traits.Str, + traits.Enum('lh', 'rh'), + traits.Str, + argstr='--annot %s %s %s', + xor=_xor_inputs, + mandatory=True, + desc='subject hemi parc : use surface parcellation') + surf_label = traits.Tuple( + traits.Str, + traits.Enum('lh', 'rh'), + traits.Str, + argstr='--slabel %s %s %s', + xor=_xor_inputs, + mandatory=True, + desc='subject hemi label : use surface label') + summary_file = File( + argstr='--sum %s', + genfile=True, + position=-1, + desc='Segmentation stats summary table file') + partial_volume_file = File( + exists=True, argstr='--pv %s', desc='Compensate for partial voluming') + in_file = File( + exists=True, + argstr='--i %s', + desc='Use the segmentation to report stats on this volume') + frame = traits.Int( + argstr='--frame %d', desc='Report stats on nth frame of input volume') + multiply = traits.Float(argstr='--mul %f', desc='multiply input by val') + calc_snr = traits.Bool( + argstr='--snr', desc='save mean/std as extra column in output table') + calc_power = traits.Enum( + 'sqr', + 'sqrt', + argstr='--%s', + desc='Compute either the sqr or the sqrt of the input') + _ctab_inputs = ('color_table_file', 'default_color_table', + 'gca_color_table') + color_table_file = File( + exists=True, + argstr='--ctab %s', + xor=_ctab_inputs, + desc='color table file with seg id names') + default_color_table = traits.Bool( + argstr='--ctab-default', + xor=_ctab_inputs, + desc='use $FREESURFER_HOME/FreeSurferColorLUT.txt') + gca_color_table = File( + exists=True, + argstr='--ctab-gca %s', + xor=_ctab_inputs, + desc='get color table from GCA (CMA)') + segment_id = traits.List( + argstr='--id %s...', desc='Manually specify segmentation ids') + exclude_id = traits.Int( + argstr='--excludeid %d', desc='Exclude seg id from report') + exclude_ctx_gm_wm = traits.Bool( + argstr='--excl-ctxgmwm', desc='exclude cortical gray and white matter') + wm_vol_from_surf = traits.Bool( + argstr='--surf-wm-vol', desc='Compute wm volume from surf') + cortex_vol_from_surf = traits.Bool( + argstr='--surf-ctx-vol', desc='Compute cortex volume from surf') + non_empty_only = traits.Bool( + argstr='--nonempty', desc='Only report nonempty segmentations') + empty = traits.Bool( + argstr="--empty", + desc="Report on segmentations listed in the color table") + mask_file = File( + exists=True, argstr='--mask %s', desc='Mask volume (same size as seg') + mask_thresh = traits.Float( + argstr='--maskthresh %f', + desc='binarize mask with this threshold <0.5>') + mask_sign = traits.Enum( + 'abs', + 'pos', + 'neg', + '--masksign %s', + desc='Sign for mask threshold: pos, neg, or abs') + mask_frame = traits.Int( + '--maskframe %d', + requires=['mask_file'], + desc='Mask with this (0 based) frame of the mask volume') + mask_invert = traits.Bool( + argstr='--maskinvert', desc='Invert binarized mask volume') + mask_erode = traits.Int( + argstr='--maskerode %d', desc='Erode mask by some amount') + brain_vol = traits.Enum( + 'brain-vol-from-seg', + 'brainmask', + argstr='--%s', + desc= + 'Compute brain volume either with ``brainmask`` or ``brain-vol-from-seg``' + ) + brainmask_file = File( + argstr="--brainmask %s", + exists=True, + desc= + "Load brain mask and compute the volume of the brain as the non-zero voxels in this volume" + ) + etiv = traits.Bool( + argstr='--etiv', desc='Compute ICV from talairach transform') + etiv_only = traits.Enum( + 'etiv', + 'old-etiv', + '--%s-only', + desc='Compute etiv and exit. Use ``etiv`` or ``old-etiv``') + avgwf_txt_file = traits.Either( + traits.Bool, + File, + argstr='--avgwf %s', + desc='Save average waveform into file (bool or filename)') + avgwf_file = traits.Either( + traits.Bool, + File, + argstr='--avgwfvol %s', + desc='Save as binary volume (bool or filename)') + sf_avg_file = traits.Either( + traits.Bool, + File, + argstr='--sfavg %s', + desc='Save mean across space and time') + vox = traits.List( + traits.Int, + argstr='--vox %s', + desc='Replace seg with all 0s except at C R S (three int inputs)') + supratent = traits.Bool( + argstr="--supratent", desc="Undocumented input flag") + subcort_gm = traits.Bool( + argstr="--subcortgray", + desc="Compute volume of subcortical gray matter") + total_gray = traits.Bool( + argstr="--totalgray", desc="Compute volume of total gray matter") + euler = traits.Bool( + argstr="--euler", + desc= + "Write out number of defect holes in orig.nofix based on the euler number" + ) + in_intensity = File( + argstr="--in %s --in-intensity-name %s", + desc="Undocumented input norm.mgz file") + intensity_units = traits.Enum( + 'MR', + argstr="--in-intensity-units %s", + requires=["in_intensity"], + desc="Intensity units") + + +class SegStatsOutputSpec(TraitedSpec): + summary_file = File( + exists=True, desc='Segmentation summary statistics table') + avgwf_txt_file = File( + desc='Text file with functional statistics averaged over segs') + avgwf_file = File( + desc='Volume with functional statistics averaged over segs') + sf_avg_file = File( + desc='Text file with func statistics averaged over segs and framss') + + +class SegStats(FSCommand): + """Use FreeSurfer mri_segstats for ROI analysis + + Examples + -------- + + >>> import nipype.interfaces.freesurfer as fs + >>> ss = fs.SegStats() + >>> ss.inputs.annot = ('PWS04', 'lh', 'aparc') + >>> ss.inputs.in_file = 'functional.nii' + >>> ss.inputs.subjects_dir = '.' + >>> ss.inputs.avgwf_txt_file = 'avgwf.txt' + >>> ss.inputs.summary_file = 'summary.stats' + >>> ss.cmdline + 'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --i functional.nii --sum ./summary.stats' + + """ + + _cmd = 'mri_segstats' + input_spec = SegStatsInputSpec + output_spec = SegStatsOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.summary_file): + outputs['summary_file'] = os.path.abspath(self.inputs.summary_file) + else: + outputs['summary_file'] = os.path.join(os.getcwd(), + 'summary.stats') + suffices = dict( + avgwf_txt_file='_avgwf.txt', + avgwf_file='_avgwf.nii.gz', + sf_avg_file='sfavg.txt') + if isdefined(self.inputs.segmentation_file): + _, src = os.path.split(self.inputs.segmentation_file) + if isdefined(self.inputs.annot): + src = '_'.join(self.inputs.annot) + if isdefined(self.inputs.surf_label): + src = '_'.join(self.inputs.surf_label) + for name, suffix in list(suffices.items()): + value = getattr(self.inputs, name) + if isdefined(value): + if isinstance(value, bool): + outputs[name] = fname_presuffix( + src, suffix=suffix, newpath=os.getcwd(), use_ext=False) + else: + outputs[name] = os.path.abspath(value) + return outputs + + def _format_arg(self, name, spec, value): + if name in ('summary_file', 'avgwf_txt_file'): + if not isinstance(value, bool): + if not os.path.isabs(value): + value = os.path.join('.', value) + if name in ['avgwf_txt_file', 'avgwf_file', 'sf_avg_file']: + if isinstance(value, bool): + fname = self._list_outputs()[name] + else: + fname = value + return spec.argstr % fname + elif name == 'in_intensity': + intensity_name = os.path.basename( + self.inputs.in_intensity).replace('.mgz', '') + return spec.argstr % (value, intensity_name) + return super(SegStats, self)._format_arg(name, spec, value) + + def _gen_filename(self, name): + if name == 'summary_file': + return self._list_outputs()[name] + return None + + +class SegStatsReconAllInputSpec(SegStatsInputSpec): + # recon-all input requirements + subject_id = traits.String( + 'subject_id', + usedefault=True, + argstr="--subject %s", + mandatory=True, + desc="Subject id being processed") + # implicit + ribbon = traits.File( + mandatory=True, exists=True, desc="Input file mri/ribbon.mgz") + presurf_seg = File(exists=True, desc="Input segmentation volume") + transform = File(mandatory=True, exists=True, desc="Input transform file") + lh_orig_nofix = File( + mandatory=True, exists=True, desc="Input lh.orig.nofix") + rh_orig_nofix = File( + mandatory=True, exists=True, desc="Input rh.orig.nofix") + lh_white = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/lh.white") + rh_white = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/rh.white") + lh_pial = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/lh.pial") + rh_pial = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/rh.pial") + aseg = File(exists=True, desc="Mandatory implicit input in 5.3") + copy_inputs = traits.Bool(desc="If running as a node, set this to True " + + "otherwise, this will copy the implicit inputs " + + "to the node directory.") + + +class SegStatsReconAll(SegStats): + """ + This class inherits SegStats and modifies it for use in a recon-all workflow. + This implementation mandates implicit inputs that SegStats. + To ensure backwards compatability of SegStats, this class was created. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import SegStatsReconAll + >>> segstatsreconall = SegStatsReconAll() + >>> segstatsreconall.inputs.annot = ('PWS04', 'lh', 'aparc') + >>> segstatsreconall.inputs.avgwf_txt_file = 'avgwf.txt' + >>> segstatsreconall.inputs.summary_file = 'summary.stats' + >>> segstatsreconall.inputs.subject_id = '10335' + >>> segstatsreconall.inputs.ribbon = 'wm.mgz' + >>> segstatsreconall.inputs.transform = 'trans.mat' + >>> segstatsreconall.inputs.presurf_seg = 'wm.mgz' + >>> segstatsreconall.inputs.lh_orig_nofix = 'lh.pial' + >>> segstatsreconall.inputs.rh_orig_nofix = 'lh.pial' + >>> segstatsreconall.inputs.lh_pial = 'lh.pial' + >>> segstatsreconall.inputs.rh_pial = 'lh.pial' + >>> segstatsreconall.inputs.lh_white = 'lh.pial' + >>> segstatsreconall.inputs.rh_white = 'lh.pial' + >>> segstatsreconall.inputs.empty = True + >>> segstatsreconall.inputs.brain_vol = 'brain-vol-from-seg' + >>> segstatsreconall.inputs.exclude_ctx_gm_wm = True + >>> segstatsreconall.inputs.supratent = True + >>> segstatsreconall.inputs.subcort_gm = True + >>> segstatsreconall.inputs.etiv = True + >>> segstatsreconall.inputs.wm_vol_from_surf = True + >>> segstatsreconall.inputs.cortex_vol_from_surf = True + >>> segstatsreconall.inputs.total_gray = True + >>> segstatsreconall.inputs.euler = True + >>> segstatsreconall.inputs.exclude_id = 0 + >>> segstatsreconall.cmdline + 'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --brain-vol-from-seg --surf-ctx-vol --empty --etiv --euler --excl-ctxgmwm --excludeid 0 --subcortgray --subject 10335 --supratent --totalgray --surf-wm-vol --sum ./summary.stats' + """ + input_spec = SegStatsReconAllInputSpec + output_spec = SegStatsOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'brainmask_file': + return spec.argstr % os.path.basename(value) + return super(SegStatsReconAll, self)._format_arg(name, spec, value) + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.lh_orig_nofix, 'surf', + 'lh.orig.nofix') + copy2subjdir(self, self.inputs.rh_orig_nofix, 'surf', + 'rh.orig.nofix') + copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white') + copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white') + copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial') + copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial') + copy2subjdir(self, self.inputs.ribbon, 'mri', 'ribbon.mgz') + copy2subjdir(self, self.inputs.presurf_seg, 'mri', + 'aseg.presurf.mgz') + copy2subjdir(self, self.inputs.aseg, 'mri', 'aseg.mgz') + copy2subjdir(self, self.inputs.transform, + os.path.join('mri', 'transforms'), 'talairach.xfm') + copy2subjdir(self, self.inputs.in_intensity, 'mri') + copy2subjdir(self, self.inputs.brainmask_file, 'mri') + return super(SegStatsReconAll, self).run(**inputs) + + +class Label2VolInputSpec(FSTraitedSpec): + label_file = InputMultiPath( + File(exists=True), + argstr='--label %s...', + xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + copyfile=False, + mandatory=True, + desc='list of label files') + annot_file = File( + exists=True, + argstr='--annot %s', + xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + requires=('subject_id', 'hemi'), + mandatory=True, + copyfile=False, + desc='surface annotation file') + seg_file = File( + exists=True, + argstr='--seg %s', + xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + mandatory=True, + copyfile=False, + desc='segmentation file') + aparc_aseg = traits.Bool( + argstr='--aparc+aseg', + xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + mandatory=True, + desc='use aparc+aseg.mgz in subjectdir as seg') + template_file = File( + exists=True, + argstr='--temp %s', + mandatory=True, + desc='output template volume') + reg_file = File( + exists=True, + argstr='--reg %s', + xor=('reg_file', 'reg_header', 'identity'), + desc='tkregister style matrix VolXYZ = R*LabelXYZ') + reg_header = File( + exists=True, + argstr='--regheader %s', + xor=('reg_file', 'reg_header', 'identity'), + desc='label template volume') + identity = traits.Bool( + argstr='--identity', + xor=('reg_file', 'reg_header', 'identity'), + desc='set R=I') + invert_mtx = traits.Bool( + argstr='--invertmtx', desc='Invert the registration matrix') + fill_thresh = traits.Range( + 0., 1., argstr='--fillthresh %g', desc='thresh : between 0 and 1') + label_voxel_volume = traits.Float( + argstr='--labvoxvol %f', desc='volume of each label point (def 1mm3)') + proj = traits.Tuple( + traits.Enum('abs', 'frac'), + traits.Float, + traits.Float, + traits.Float, + argstr='--proj %s %f %f %f', + requires=('subject_id', 'hemi'), + desc='project along surface normal') + subject_id = traits.Str(argstr='--subject %s', desc='subject id') + hemi = traits.Enum( + 'lh', 'rh', argstr='--hemi %s', desc='hemisphere to use lh or rh') + surface = traits.Str( + argstr='--surf %s', desc='use surface instead of white') + vol_label_file = File(argstr='--o %s', genfile=True, desc='output volume') + label_hit_file = File( + argstr='--hits %s', desc='file with each frame is nhits for a label') + map_label_stat = File( + argstr='--label-stat %s', + desc='map the label stats field into the vol') + native_vox2ras = traits.Bool( + argstr='--native-vox2ras', + desc='use native vox2ras xform instead of tkregister-style') + + +class Label2VolOutputSpec(TraitedSpec): + vol_label_file = File(exists=True, desc='output volume') + + +class Label2Vol(FSCommand): + """Make a binary volume from a Freesurfer label + + Examples + -------- + + >>> binvol = Label2Vol(label_file='cortex.label', template_file='structural.nii', reg_file='register.dat', fill_thresh=0.5, vol_label_file='foo_out.nii') + >>> binvol.cmdline + 'mri_label2vol --fillthresh 0.5 --label cortex.label --reg register.dat --temp structural.nii --o foo_out.nii' + + """ + + _cmd = 'mri_label2vol' + input_spec = Label2VolInputSpec + output_spec = Label2VolOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outfile = self.inputs.vol_label_file + if not isdefined(outfile): + for key in ['label_file', 'annot_file', 'seg_file']: + if isdefined(getattr(self.inputs, key)): + path = getattr(self.inputs, key) + if isinstance(path, list): + path = path[0] + _, src = os.path.split(path) + if isdefined(self.inputs.aparc_aseg): + src = 'aparc+aseg.mgz' + outfile = fname_presuffix( + src, suffix='_vol.nii.gz', newpath=os.getcwd(), use_ext=False) + outputs['vol_label_file'] = outfile + return outputs + + def _gen_filename(self, name): + if name == 'vol_label_file': + return self._list_outputs()[name] + return None + + +class MS_LDAInputSpec(FSTraitedSpec): + lda_labels = traits.List( + traits.Int(), + argstr='-lda %s', + mandatory=True, + minlen=2, + maxlen=2, + sep=' ', + desc='pair of class labels to optimize') + weight_file = traits.File( + argstr='-weight %s', + mandatory=True, + desc='filename for the LDA weights (input or output)') + vol_synth_file = traits.File( + exists=False, + argstr='-synth %s', + mandatory=True, + desc=('filename for the synthesized output ' + 'volume')) + label_file = traits.File( + exists=True, argstr='-label %s', desc='filename of the label volume') + mask_file = traits.File( + exists=True, + argstr='-mask %s', + desc='filename of the brain mask volume') + shift = traits.Int( + argstr='-shift %d', + desc='shift all values equal to the given value to zero') + conform = traits.Bool( + argstr='-conform', + desc=('Conform the input volumes (brain mask ' + 'typically already conformed)')) + use_weights = traits.Bool( + argstr='-W', + desc=('Use the weights from a previously ' + 'generated weight file')) + images = InputMultiPath( + File(exists=True), + argstr='%s', + mandatory=True, + copyfile=False, + desc='list of input FLASH images', + position=-1) + + +class MS_LDAOutputSpec(TraitedSpec): + weight_file = File(exists=True, desc='') + vol_synth_file = File(exists=True, desc='') + + +class MS_LDA(FSCommand): + """Perform LDA reduction on the intensity space of an arbitrary # of FLASH images + + Examples + -------- + + >>> grey_label = 2 + >>> white_label = 3 + >>> zero_value = 1 + >>> optimalWeights = MS_LDA(lda_labels=[grey_label, white_label], \ + label_file='label.mgz', weight_file='weights.txt', \ + shift=zero_value, vol_synth_file='synth_out.mgz', \ + conform=True, use_weights=True, \ + images=['FLASH1.mgz', 'FLASH2.mgz', 'FLASH3.mgz']) + >>> optimalWeights.cmdline + 'mri_ms_LDA -conform -label label.mgz -lda 2 3 -shift 1 -W -synth synth_out.mgz -weight weights.txt FLASH1.mgz FLASH2.mgz FLASH3.mgz' + """ + + _cmd = 'mri_ms_LDA' + input_spec = MS_LDAInputSpec + output_spec = MS_LDAOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + if isdefined(self.inputs.output_synth): + outputs['vol_synth_file'] = os.path.abspath( + self.inputs.output_synth) + else: + outputs['vol_synth_file'] = os.path.abspath( + self.inputs.vol_synth_file) + if not isdefined( + self.inputs.use_weights) or self.inputs.use_weights is False: + outputs['weight_file'] = os.path.abspath(self.inputs.weight_file) + return outputs + + def _verify_weights_file_exists(self): + if not os.path.exists(os.path.abspath(self.inputs.weight_file)): + raise traits.TraitError( + "MS_LDA: use_weights must accompany an existing weights file") + + def _format_arg(self, name, spec, value): + if name == 'use_weights': + if self.inputs.use_weights is True: + self._verify_weights_file_exists() + else: + return '' + # TODO: Fix bug when boolean values are set explicitly to false + return super(MS_LDA, self)._format_arg(name, spec, value) + + def _gen_filename(self, name): + pass + + +class Label2LabelInputSpec(FSTraitedSpec): + hemisphere = traits.Enum( + 'lh', + 'rh', + argstr="--hemi %s", + mandatory=True, + desc="Input hemisphere") + subject_id = traits.String( + 'subject_id', + usedefault=True, + argstr="--trgsubject %s", + mandatory=True, + desc="Target subject") + sphere_reg = File( + mandatory=True, + exists=True, + desc="Implicit input .sphere.reg") + white = File( + mandatory=True, exists=True, desc="Implicit input .white") + source_sphere_reg = File( + mandatory=True, + exists=True, + desc="Implicit input .sphere.reg") + source_white = File( + mandatory=True, exists=True, desc="Implicit input .white") + source_label = File( + argstr="--srclabel %s", + mandatory=True, + exists=True, + desc="Source label") + source_subject = traits.String( + argstr="--srcsubject %s", mandatory=True, desc="Source subject name") + # optional + out_file = File( + argstr="--trglabel %s", + name_source=['source_label'], + name_template='%s_converted', + hash_files=False, + keep_extension=True, + desc="Target label") + registration_method = traits.Enum( + 'surface', + 'volume', + usedefault=True, + argstr="--regmethod %s", + desc="Registration method") + copy_inputs = traits.Bool( + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + "directory.") + + +class Label2LabelOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='Output label') + + +class Label2Label(FSCommand): + """ + Converts a label in one subject's space to a label + in another subject's space using either talairach or spherical + as an intermediate registration space. + + If a source mask is used, then the input label must have been + created from a surface (ie, the vertex numbers are valid). The + format can be anything supported by mri_convert or curv or paint. + Vertices in the source label that do not meet threshold in the + mask will be removed from the label. + + Examples + -------- + >>> from nipype.interfaces.freesurfer import Label2Label + >>> l2l = Label2Label() + >>> l2l.inputs.hemisphere = 'lh' + >>> l2l.inputs.subject_id = '10335' + >>> l2l.inputs.sphere_reg = 'lh.pial' + >>> l2l.inputs.white = 'lh.pial' + >>> l2l.inputs.source_subject = 'fsaverage' + >>> l2l.inputs.source_label = 'lh-pial.stl' + >>> l2l.inputs.source_white = 'lh.pial' + >>> l2l.inputs.source_sphere_reg = 'lh.pial' + >>> l2l.cmdline + 'mri_label2label --hemi lh --trglabel lh-pial_converted.stl --regmethod surface --srclabel lh-pial.stl --srcsubject fsaverage --trgsubject 10335' + """ + + _cmd = 'mri_label2label' + input_spec = Label2LabelInputSpec + output_spec = Label2LabelOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id, 'label', + self.inputs.out_file) + return outputs + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + hemi = self.inputs.hemisphere + copy2subjdir(self, self.inputs.sphere_reg, 'surf', + '{0}.sphere.reg'.format(hemi)) + copy2subjdir(self, self.inputs.white, 'surf', + '{0}.white'.format(hemi)) + copy2subjdir( + self, + self.inputs.source_sphere_reg, + 'surf', + '{0}.sphere.reg'.format(hemi), + subject_id=self.inputs.source_subject) + copy2subjdir( + self, + self.inputs.source_white, + 'surf', + '{0}.white'.format(hemi), + subject_id=self.inputs.source_subject) + + # label dir must exist in order for output file to be written + label_dir = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id, 'label') + if not os.path.isdir(label_dir): + os.makedirs(label_dir) + + return super(Label2Label, self).run(**inputs) + + +class Label2AnnotInputSpec(FSTraitedSpec): + # required + hemisphere = traits.Enum( + 'lh', + 'rh', + argstr="--hemi %s", + mandatory=True, + desc="Input hemisphere") + subject_id = traits.String( + 'subject_id', + usedefault=True, + argstr="--s %s", + mandatory=True, + desc="Subject name/ID") + in_labels = traits.List( + argstr="--l %s...", mandatory=True, desc="List of input label files") + out_annot = traits.String( + argstr="--a %s", + mandatory=True, + desc="Name of the annotation to create") + orig = File(exists=True, mandatory=True, desc="implicit {hemisphere}.orig") + # optional + keep_max = traits.Bool( + argstr="--maxstatwinner", desc="Keep label with highest 'stat' value") + verbose_off = traits.Bool( + argstr="--noverbose", + desc="Turn off overlap and stat override messages") + color_table = File( + argstr="--ctab %s", + exists=True, + desc= + "File that defines the structure names, their indices, and their color" + ) + copy_inputs = traits.Bool( + desc="copy implicit inputs and create a temp subjects_dir") + + +class Label2AnnotOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='Output annotation file') + + +class Label2Annot(FSCommand): + """ + Converts a set of surface labels to an annotation file + + Examples + -------- + >>> from nipype.interfaces.freesurfer import Label2Annot + >>> l2a = Label2Annot() + >>> l2a.inputs.hemisphere = 'lh' + >>> l2a.inputs.subject_id = '10335' + >>> l2a.inputs.in_labels = ['lh.aparc.label'] + >>> l2a.inputs.orig = 'lh.pial' + >>> l2a.inputs.out_annot = 'test' + >>> l2a.cmdline + 'mris_label2annot --hemi lh --l lh.aparc.label --a test --s 10335' + """ + + _cmd = 'mris_label2annot' + input_spec = Label2AnnotInputSpec + output_spec = Label2AnnotOutputSpec + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + copy2subjdir( + self, + self.inputs.orig, + folder='surf', + basename='{0}.orig'.format(self.inputs.hemisphere)) + # label dir must exist in order for output file to be written + label_dir = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id, 'label') + if not os.path.isdir(label_dir): + os.makedirs(label_dir) + return super(Label2Annot, self).run(**inputs) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.join( + str(self.inputs.subjects_dir), str(self.inputs.subject_id), + 'label', + str(self.inputs.hemisphere) + '.' + str(self.inputs.out_annot) + + '.annot') + return outputs + + +class SphericalAverageInputSpec(FSTraitedSpec): + out_file = File( + argstr="%s", + genfile=True, + exists=False, + position=-1, + desc="Output filename") + in_average = traits.Directory( + argstr="%s", + exists=True, + genfile=True, + position=-2, + desc="Average subject") + in_surf = File( + argstr="%s", + mandatory=True, + exists=True, + position=-3, + desc="Input surface file") + hemisphere = traits.Enum( + 'lh', + 'rh', + argstr="%s", + mandatory=True, + position=-4, + desc="Input hemisphere") + fname = traits.String( + argstr="%s", + mandatory=True, + position=-5, + desc="""Filename from the average subject directory. + Example: to use rh.entorhinal.label as the input label + filename, set fname to 'rh.entorhinal' and which to + 'label'. The program will then search for + '{in_average}/label/rh.entorhinal.label' + """) + which = traits.Enum( + 'coords', + 'label', + 'vals', + 'curv', + 'area', + argstr="%s", + mandatory=True, + position=-6, + desc="No documentation") + subject_id = traits.String( + argstr="-o %s", mandatory=True, desc="Output subject id") + # optional + erode = traits.Int(argstr="-erode %d", desc="Undocumented") + in_orig = File( + argstr="-orig %s", exists=True, desc="Original surface filename") + threshold = traits.Float(argstr="-t %.1f", desc="Undocumented") + + +class SphericalAverageOutputSpec(TraitedSpec): + out_file = File(exists=False, desc='Output label') + + +class SphericalAverage(FSCommand): + """ + This program will add a template into an average surface. + + Examples + -------- + >>> from nipype.interfaces.freesurfer import SphericalAverage + >>> sphericalavg = SphericalAverage() + >>> sphericalavg.inputs.out_file = 'test.out' + >>> sphericalavg.inputs.in_average = '.' + >>> sphericalavg.inputs.in_surf = 'lh.pial' + >>> sphericalavg.inputs.hemisphere = 'lh' + >>> sphericalavg.inputs.fname = 'lh.entorhinal' + >>> sphericalavg.inputs.which = 'label' + >>> sphericalavg.inputs.subject_id = '10335' + >>> sphericalavg.inputs.erode = 2 + >>> sphericalavg.inputs.threshold = 5 + >>> sphericalavg.cmdline + 'mris_spherical_average -erode 2 -o 10335 -t 5.0 label lh.entorhinal lh pial . test.out' + """ + + _cmd = 'mris_spherical_average' + input_spec = SphericalAverageInputSpec + output_spec = SphericalAverageOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'in_orig' or name == 'in_surf': + surf = os.path.basename(value) + for item in ['lh.', 'rh.']: + surf = surf.replace(item, '') + return spec.argstr % surf + return super(SphericalAverage, self)._format_arg(name, spec, value) + + def _gen_filename(self, name): + if name == 'in_average': + avg_subject = str(self.inputs.hemisphere) + '.EC_average' + avg_directory = os.path.join(self.inputs.subjects_dir, avg_subject) + if not os.path.isdir(avg_directory): + fs_home = os.path.abspath(os.environ.get('FREESURFER_HOME')) + return avg_subject + elif name == 'out_file': + return self._list_outputs()[name] + else: + return None + + def _list_outputs(self): + outputs = self._outputs().get() + if isdefined(self.inputs.out_file): + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + else: + out_dir = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id, 'label') + if isdefined(self.inputs.in_average): + basename = os.path.basename(self.inputs.in_average) + basename = basename.replace('_', '_exvivo_') + '.label' + else: + basename = str( + self.inputs.hemisphere) + '.EC_exvivo_average.label' + outputs['out_file'] = os.path.join(out_dir, basename) + return outputs diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py new file mode 100644 index 0000000000..f99696e02a --- /dev/null +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -0,0 +1,3043 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Provides interfaces to various commands provided by FreeSurfer +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range + +import os +import os.path as op +from glob import glob +import shutil + +import numpy as np +from nibabel import load + +from ... import logging, LooseVersion +from ...utils.filemanip import fname_presuffix, check_depends +from ..io import FreeSurferSource +from ..base import (TraitedSpec, File, traits, Directory, InputMultiPath, + OutputMultiPath, CommandLine, CommandLineInputSpec, + isdefined) +from .base import (FSCommand, FSTraitedSpec, FSTraitedSpecOpenMP, + FSCommandOpenMP, Info) +from .utils import copy2subjdir + +__docformat__ = 'restructuredtext' +iflogger = logging.getLogger('nipype.interface') + +# Keeping this to avoid breaking external programs that depend on it, but +# this should not be used internally +FSVersion = Info.looseversion().vstring + + +class ParseDICOMDirInputSpec(FSTraitedSpec): + dicom_dir = Directory( + exists=True, + argstr='--d %s', + mandatory=True, + desc='path to siemens dicom directory') + dicom_info_file = File( + 'dicominfo.txt', + argstr='--o %s', + usedefault=True, + desc='file to which results are written') + sortbyrun = traits.Bool(argstr='--sortbyrun', desc='assign run numbers') + summarize = traits.Bool( + argstr='--summarize', desc='only print out info for run leaders') + + +class ParseDICOMDirOutputSpec(TraitedSpec): + dicom_info_file = File( + exists=True, desc='text file containing dicom information') + + +class ParseDICOMDir(FSCommand): + """Uses mri_parse_sdcmdir to get information from dicom directories + + Examples + -------- + + >>> from nipype.interfaces.freesurfer import ParseDICOMDir + >>> dcminfo = ParseDICOMDir() + >>> dcminfo.inputs.dicom_dir = '.' + >>> dcminfo.inputs.sortbyrun = True + >>> dcminfo.inputs.summarize = True + >>> dcminfo.cmdline + 'mri_parse_sdcmdir --d . --o dicominfo.txt --sortbyrun --summarize' + + """ + + _cmd = 'mri_parse_sdcmdir' + input_spec = ParseDICOMDirInputSpec + output_spec = ParseDICOMDirOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.dicom_info_file): + outputs['dicom_info_file'] = os.path.join( + os.getcwd(), self.inputs.dicom_info_file) + return outputs + + +class UnpackSDICOMDirInputSpec(FSTraitedSpec): + source_dir = Directory( + exists=True, + argstr='-src %s', + mandatory=True, + desc='directory with the DICOM files') + output_dir = Directory( + argstr='-targ %s', + desc='top directory into which the files will be unpacked') + run_info = traits.Tuple( + traits.Int, + traits.Str, + traits.Str, + traits.Str, + mandatory=True, + argstr='-run %d %s %s %s', + xor=('run_info', 'config', 'seq_config'), + desc='runno subdir format name : spec unpacking rules on cmdline') + config = File( + exists=True, + argstr='-cfg %s', + mandatory=True, + xor=('run_info', 'config', 'seq_config'), + desc='specify unpacking rules in file') + seq_config = File( + exists=True, + argstr='-seqcfg %s', + mandatory=True, + xor=('run_info', 'config', 'seq_config'), + desc='specify unpacking rules based on sequence') + dir_structure = traits.Enum( + 'fsfast', + 'generic', + argstr='-%s', + desc='unpack to specified directory structures') + no_info_dump = traits.Bool( + argstr='-noinfodump', desc='do not create infodump file') + scan_only = File( + exists=True, + argstr='-scanonly %s', + desc='only scan the directory and put result in file') + log_file = File( + exists=True, argstr='-log %s', desc='explicilty set log file') + spm_zeropad = traits.Int( + argstr='-nspmzeropad %d', + desc='set frame number zero padding width for SPM') + no_unpack_err = traits.Bool( + argstr='-no-unpackerr', desc='do not try to unpack runs with errors') + + +class UnpackSDICOMDir(FSCommand): + """Use unpacksdcmdir to convert dicom files + + Call unpacksdcmdir -help from the command line to see more information on + using this command. + + Examples + -------- + + >>> from nipype.interfaces.freesurfer import UnpackSDICOMDir + >>> unpack = UnpackSDICOMDir() + >>> unpack.inputs.source_dir = '.' + >>> unpack.inputs.output_dir = '.' + >>> unpack.inputs.run_info = (5, 'mprage', 'nii', 'struct') + >>> unpack.inputs.dir_structure = 'generic' + >>> unpack.cmdline + 'unpacksdcmdir -generic -targ . -run 5 mprage nii struct -src .' + """ + _cmd = 'unpacksdcmdir' + input_spec = UnpackSDICOMDirInputSpec + + +class MRIConvertInputSpec(FSTraitedSpec): + read_only = traits.Bool(argstr='--read_only', desc='read the input volume') + no_write = traits.Bool(argstr='--no_write', desc='do not write output') + in_info = traits.Bool(argstr='--in_info', desc='display input info') + out_info = traits.Bool(argstr='--out_info', desc='display output info') + in_stats = traits.Bool(argstr='--in_stats', desc='display input stats') + out_stats = traits.Bool(argstr='--out_stats', desc='display output stats') + in_matrix = traits.Bool(argstr='--in_matrix', desc='display input matrix') + out_matrix = traits.Bool( + argstr='--out_matrix', desc='display output matrix') + in_i_size = traits.Int(argstr='--in_i_size %d', desc='input i size') + in_j_size = traits.Int(argstr='--in_j_size %d', desc='input j size') + in_k_size = traits.Int(argstr='--in_k_size %d', desc='input k size') + force_ras = traits.Bool( + argstr='--force_ras_good', + desc='use default when orientation info absent') + in_i_dir = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--in_i_direction %f %f %f', + desc=' ') + in_j_dir = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--in_j_direction %f %f %f', + desc=' ') + in_k_dir = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--in_k_direction %f %f %f', + desc=' ') + _orientations = [ + 'LAI', 'LIA', 'ALI', 'AIL', 'ILA', 'IAL', 'LAS', 'LSA', 'ALS', 'ASL', + 'SLA', 'SAL', 'LPI', 'LIP', 'PLI', 'PIL', 'ILP', 'IPL', 'LPS', 'LSP', + 'PLS', 'PSL', 'SLP', 'SPL', 'RAI', 'RIA', 'ARI', 'AIR', 'IRA', 'IAR', + 'RAS', 'RSA', 'ARS', 'ASR', 'SRA', 'SAR', 'RPI', 'RIP', 'PRI', 'PIR', + 'IRP', 'IPR', 'RPS', 'RSP', 'PRS', 'PSR', 'SRP', 'SPR' + ] + # _orientations = [comb for comb in itertools.chain(*[[''.join(c) for c in itertools.permutations(s)] for s in [a+b+c for a in 'LR' for b in 'AP' for c in 'IS']])] + in_orientation = traits.Enum( + _orientations, + argstr='--in_orientation %s', + desc='specify the input orientation') + in_center = traits.List( + traits.Float, + maxlen=3, + argstr='--in_center %s', + desc=' ') + sphinx = traits.Bool( + argstr='--sphinx', desc='change orientation info to sphinx') + out_i_count = traits.Int( + argstr='--out_i_count %d', desc='some count ?? in i direction') + out_j_count = traits.Int( + argstr='--out_j_count %d', desc='some count ?? in j direction') + out_k_count = traits.Int( + argstr='--out_k_count %d', desc='some count ?? in k direction') + vox_size = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='-voxsize %f %f %f', + desc= + ' specify the size (mm) - useful for upsampling or downsampling' + ) + out_i_size = traits.Int(argstr='--out_i_size %d', desc='output i size') + out_j_size = traits.Int(argstr='--out_j_size %d', desc='output j size') + out_k_size = traits.Int(argstr='--out_k_size %d', desc='output k size') + out_i_dir = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--out_i_direction %f %f %f', + desc=' ') + out_j_dir = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--out_j_direction %f %f %f', + desc=' ') + out_k_dir = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--out_k_direction %f %f %f', + desc=' ') + out_orientation = traits.Enum( + _orientations, + argstr='--out_orientation %s', + desc='specify the output orientation') + out_center = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--out_center %f %f %f', + desc=' ') + out_datatype = traits.Enum( + 'uchar', + 'short', + 'int', + 'float', + argstr='--out_data_type %s', + desc='output data type ') + resample_type = traits.Enum( + 'interpolate', + 'weighted', + 'nearest', + 'sinc', + 'cubic', + argstr='--resample_type %s', + desc= + ' (default is interpolate)') + no_scale = traits.Bool( + argstr='--no_scale 1', desc='dont rescale values for COR') + no_change = traits.Bool( + argstr='--nochange', + desc="don't change type of input to that of template") + tr = traits.Int(argstr='-tr %d', desc='TR in msec') + te = traits.Int(argstr='-te %d', desc='TE in msec') + ti = traits.Int(argstr='-ti %d', desc='TI in msec (note upper case flag)') + autoalign_matrix = File( + exists=True, + argstr='--autoalign %s', + desc='text file with autoalign matrix') + unwarp_gradient = traits.Bool( + argstr='--unwarp_gradient_nonlinearity', + desc='unwarp gradient nonlinearity') + apply_transform = File( + exists=True, argstr='--apply_transform %s', desc='apply xfm file') + apply_inv_transform = File( + exists=True, + argstr='--apply_inverse_transform %s', + desc='apply inverse transformation xfm file') + devolve_transform = traits.Str(argstr='--devolvexfm %s', desc='subject id') + crop_center = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + argstr='--crop %d %d %d', + desc=' crop to 256 around center (x, y, z)') + crop_size = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + argstr='--cropsize %d %d %d', + desc=' crop to size ') + cut_ends = traits.Int( + argstr='--cutends %d', desc='remove ncut slices from the ends') + slice_crop = traits.Tuple( + traits.Int, + traits.Int, + argstr='--slice-crop %d %d', + desc='s_start s_end : keep slices s_start to s_end') + slice_reverse = traits.Bool( + argstr='--slice-reverse', + desc='reverse order of slices, update vox2ras') + slice_bias = traits.Float( + argstr='--slice-bias %f', desc='apply half-cosine bias field') + fwhm = traits.Float( + argstr='--fwhm %f', desc='smooth input volume by fwhm mm') + _filetypes = [ + 'cor', 'mgh', 'mgz', 'minc', 'analyze', 'analyze4d', 'spm', 'afni', + 'brik', 'bshort', 'bfloat', 'sdt', 'outline', 'otl', 'gdf', 'nifti1', + 'nii', 'niigz' + ] + _infiletypes = [ + 'ge', 'gelx', 'lx', 'ximg', 'siemens', 'dicom', 'siemens_dicom' + ] + in_type = traits.Enum( + _filetypes + _infiletypes, + argstr='--in_type %s', + desc='input file type') + out_type = traits.Enum( + _filetypes, argstr='--out_type %s', desc='output file type') + ascii = traits.Bool( + argstr='--ascii', desc='save output as ascii col>row>slice>frame') + reorder = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + argstr='--reorder %d %d %d', + desc='olddim1 olddim2 olddim3') + invert_contrast = traits.Float( + argstr='--invert_contrast %f', + desc='threshold for inversting contrast') + in_file = File( + exists=True, + mandatory=True, + position=-2, + argstr='--input_volume %s', + desc='File to read/convert') + out_file = File( + argstr='--output_volume %s', + position=-1, + genfile=True, + desc='output filename or True to generate one') + conform = traits.Bool( + argstr='--conform', + desc= + 'conform to 1mm voxel size in coronal slice direction with 256^3 or more' + ) + conform_min = traits.Bool( + argstr='--conform_min', desc='conform to smallest size') + conform_size = traits.Float( + argstr='--conform_size %s', desc='conform to size_in_mm') + cw256 = traits.Bool( + argstr='--cw256', desc='confrom to dimensions of 256^3') + parse_only = traits.Bool(argstr='--parse_only', desc='parse input only') + subject_name = traits.Str( + argstr='--subject_name %s', desc='subject name ???') + reslice_like = File( + exists=True, + argstr='--reslice_like %s', + desc='reslice output to match file') + template_type = traits.Enum( + _filetypes + _infiletypes, + argstr='--template_type %s', + desc='template file type') + split = traits.Bool( + argstr='--split', + desc='split output frames into separate output files.') + frame = traits.Int( + argstr='--frame %d', desc='keep only 0-based frame number') + midframe = traits.Bool( + argstr='--mid-frame', desc='keep only the middle frame') + skip_n = traits.Int(argstr='--nskip %d', desc='skip the first n frames') + drop_n = traits.Int(argstr='--ndrop %d', desc='drop the last n frames') + frame_subsample = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + argstr='--fsubsample %d %d %d', + desc='start delta end : frame subsampling (end = -1 for end)') + in_scale = traits.Float( + argstr='--scale %f', desc='input intensity scale factor') + out_scale = traits.Float( + argstr='--out-scale %d', desc='output intensity scale factor') + in_like = File(exists=True, argstr='--in_like %s', desc='input looks like') + fill_parcellation = traits.Bool( + argstr='--fill_parcellation', desc='fill parcellation') + smooth_parcellation = traits.Bool( + argstr='--smooth_parcellation', desc='smooth parcellation') + zero_outlines = traits.Bool(argstr='--zero_outlines', desc='zero outlines') + color_file = File(exists=True, argstr='--color_file %s', desc='color file') + no_translate = traits.Bool(argstr='--no_translate', desc='???') + status_file = File( + argstr='--status %s', desc='status file for DICOM conversion') + sdcm_list = File( + exists=True, + argstr='--sdcmlist %s', + desc='list of DICOM files for conversion') + template_info = traits.Bool( + argstr='--template_info', desc='dump info about template') + crop_gdf = traits.Bool(argstr='--crop_gdf', desc='apply GDF cropping') + zero_ge_z_offset = traits.Bool( + argstr='--zero_ge_z_offset', desc='zero ge z offset ???') + + +class MRIConvertOutputSpec(TraitedSpec): + out_file = OutputMultiPath(File(exists=True), desc='converted output file') + + +class MRIConvert(FSCommand): + """use fs mri_convert to manipulate files + + .. note:: + Adds niigz as an output type option + + Examples + -------- + + >>> mc = MRIConvert() + >>> mc.inputs.in_file = 'structural.nii' + >>> mc.inputs.out_file = 'outfile.mgz' + >>> mc.inputs.out_type = 'mgz' + >>> mc.cmdline + 'mri_convert --out_type mgz --input_volume structural.nii --output_volume outfile.mgz' + + """ + _cmd = 'mri_convert' + input_spec = MRIConvertInputSpec + output_spec = MRIConvertOutputSpec + + filemap = dict( + cor='cor', + mgh='mgh', + mgz='mgz', + minc='mnc', + afni='brik', + brik='brik', + bshort='bshort', + spm='img', + analyze='img', + analyze4d='img', + bfloat='bfloat', + nifti1='img', + nii='nii', + niigz='nii.gz') + + def _format_arg(self, name, spec, value): + if name in ['in_type', 'out_type', 'template_type']: + if value == 'niigz': + return spec.argstr % 'nii' + return super(MRIConvert, self)._format_arg(name, spec, value) + + def _get_outfilename(self): + outfile = self.inputs.out_file + if not isdefined(outfile): + if isdefined(self.inputs.out_type): + suffix = '_out.' + self.filemap[self.inputs.out_type] + else: + suffix = '_out.nii.gz' + outfile = fname_presuffix( + self.inputs.in_file, + newpath=os.getcwd(), + suffix=suffix, + use_ext=False) + return os.path.abspath(outfile) + + def _list_outputs(self): + outputs = self.output_spec().get() + outfile = self._get_outfilename() + if isdefined(self.inputs.split) and self.inputs.split: + size = load(self.inputs.in_file).shape + if len(size) == 3: + tp = 1 + else: + tp = size[-1] + if outfile.endswith('.mgz'): + stem = outfile.split('.mgz')[0] + ext = '.mgz' + elif outfile.endswith('.nii.gz'): + stem = outfile.split('.nii.gz')[0] + ext = '.nii.gz' + else: + stem = '.'.join(outfile.split('.')[:-1]) + ext = '.' + outfile.split('.')[-1] + outfile = [] + for idx in range(0, tp): + outfile.append(stem + '%04d' % idx + ext) + if isdefined(self.inputs.out_type): + if self.inputs.out_type in ['spm', 'analyze']: + # generate all outputs + size = load(self.inputs.in_file).shape + if len(size) == 3: + tp = 1 + else: + tp = size[-1] + # have to take care of all the frame manipulations + raise Exception( + 'Not taking frame manipulations into account- please warn the developers' + ) + outfiles = [] + outfile = self._get_outfilename() + for i in range(tp): + outfiles.append( + fname_presuffix(outfile, suffix='%03d' % (i + 1))) + outfile = outfiles + outputs['out_file'] = outfile + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._get_outfilename() + return None + + +class DICOMConvertInputSpec(FSTraitedSpec): + dicom_dir = Directory( + exists=True, + mandatory=True, + desc='dicom directory from which to convert dicom files') + base_output_dir = Directory( + mandatory=True, + desc='directory in which subject directories are created') + subject_dir_template = traits.Str( + 'S.%04d', usedefault=True, desc='template for subject directory name') + subject_id = traits.Any(desc='subject identifier to insert into template') + file_mapping = traits.List( + traits.Tuple(traits.Str, traits.Str), + desc='defines the output fields of interface') + out_type = traits.Enum( + 'niigz', + MRIConvertInputSpec._filetypes, + usedefault=True, + desc='defines the type of output file produced') + dicom_info = File( + exists=True, + desc='File containing summary information from mri_parse_sdcmdir') + seq_list = traits.List( + traits.Str, + requires=['dicom_info'], + desc='list of pulse sequence names to be converted.') + ignore_single_slice = traits.Bool( + requires=['dicom_info'], + desc='ignore volumes containing a single slice') + + +class DICOMConvert(FSCommand): + """use fs mri_convert to convert dicom files + + Examples + -------- + + >>> from nipype.interfaces.freesurfer import DICOMConvert + >>> cvt = DICOMConvert() + >>> cvt.inputs.dicom_dir = 'dicomdir' + >>> cvt.inputs.file_mapping = [('nifti', '*.nii'), ('info', 'dicom*.txt'), ('dti', '*dti.bv*')] + + """ + _cmd = 'mri_convert' + input_spec = DICOMConvertInputSpec + + def _get_dicomfiles(self): + """validate fsl bet options + if set to None ignore + """ + return glob( + os.path.abspath(os.path.join(self.inputs.dicom_dir, '*-1.dcm'))) + + def _get_outdir(self): + """returns output directory""" + subjid = self.inputs.subject_id + if not isdefined(subjid): + path, fname = os.path.split(self._get_dicomfiles()[0]) + subjid = int(fname.split('-')[0]) + if isdefined(self.inputs.subject_dir_template): + subjid = self.inputs.subject_dir_template % subjid + basedir = self.inputs.base_output_dir + if not isdefined(basedir): + basedir = os.path.abspath('.') + outdir = os.path.abspath(os.path.join(basedir, subjid)) + return outdir + + def _get_runs(self): + """Returns list of dicom series that should be converted. + + Requires a dicom info summary file generated by ``DicomDirInfo`` + + """ + seq = np.genfromtxt(self.inputs.dicom_info, dtype=object) + runs = [] + for s in seq: + if self.inputs.seq_list: + if self.inputs.ignore_single_slice: + if (int(s[8]) > 1) and any( + [s[12].startswith(sn) for sn in self.inputs.seq_list]): + runs.append(int(s[2])) + else: + if any( + [s[12].startswith(sn) for sn in self.inputs.seq_list]): + runs.append(int(s[2])) + else: + runs.append(int(s[2])) + return runs + + def _get_filelist(self, outdir): + """Returns list of files to be converted""" + filemap = {} + for f in self._get_dicomfiles(): + head, fname = os.path.split(f) + fname, ext = os.path.splitext(fname) + fileparts = fname.split('-') + runno = int(fileparts[1]) + out_type = MRIConvert.filemap[self.inputs.out_type] + outfile = os.path.join(outdir, '.'.join( + ('%s-%02d' % (fileparts[0], runno), out_type))) + filemap[runno] = (f, outfile) + if self.inputs.dicom_info: + files = [filemap[r] for r in self._get_runs()] + else: + files = [filemap[r] for r in list(filemap.keys())] + return files + + @property + def cmdline(self): + """ `command` plus any arguments (args) + validates arguments and generates command line""" + self._check_mandatory_inputs() + outdir = self._get_outdir() + cmd = [] + if not os.path.exists(outdir): + cmdstr = 'python -c "import os; os.makedirs(\'%s\')"' % outdir + cmd.extend([cmdstr]) + infofile = os.path.join(outdir, 'shortinfo.txt') + if not os.path.exists(infofile): + cmdstr = 'dcmdir-info-mgh %s > %s' % (self.inputs.dicom_dir, + infofile) + cmd.extend([cmdstr]) + files = self._get_filelist(outdir) + for infile, outfile in files: + if not os.path.exists(outfile): + single_cmd = '%s%s %s %s' % (self._cmd_prefix, self.cmd, + infile, os.path.join(outdir, + outfile)) + cmd.extend([single_cmd]) + return '; '.join(cmd) + + +class ResampleInputSpec(FSTraitedSpec): + in_file = File( + exists=True, + argstr='-i %s', + mandatory=True, + desc='file to resample', + position=-2) + resampled_file = File( + argstr='-o %s', desc='output filename', genfile=True, position=-1) + voxel_size = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='-vs %.2f %.2f %.2f', + desc='triplet of output voxel sizes', + mandatory=True) + + +class ResampleOutputSpec(TraitedSpec): + resampled_file = File(exists=True, desc='output filename') + + +class Resample(FSCommand): + """Use FreeSurfer mri_convert to up or down-sample image files + + Examples + -------- + + >>> from nipype.interfaces import freesurfer + >>> resampler = freesurfer.Resample() + >>> resampler.inputs.in_file = 'structural.nii' + >>> resampler.inputs.resampled_file = 'resampled.nii' + >>> resampler.inputs.voxel_size = (2.1, 2.1, 2.1) + >>> resampler.cmdline + 'mri_convert -vs 2.10 2.10 2.10 -i structural.nii -o resampled.nii' + + """ + + _cmd = 'mri_convert' + input_spec = ResampleInputSpec + output_spec = ResampleOutputSpec + + def _get_outfilename(self): + if isdefined(self.inputs.resampled_file): + outfile = self.inputs.resampled_file + else: + outfile = fname_presuffix( + self.inputs.in_file, newpath=os.getcwd(), suffix='_resample') + return outfile + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['resampled_file'] = self._get_outfilename() + return outputs + + def _gen_filename(self, name): + if name == 'resampled_file': + return self._get_outfilename() + return None + + +class ReconAllInputSpec(CommandLineInputSpec): + subject_id = traits.Str( + "recon_all", argstr='-subjid %s', desc='subject name', usedefault=True) + directive = traits.Enum( + 'all', + 'autorecon1', + # autorecon2 variants + 'autorecon2', + 'autorecon2-volonly', + 'autorecon2-perhemi', + 'autorecon2-inflate1', + 'autorecon2-cp', + 'autorecon2-wm', + # autorecon3 variants + 'autorecon3', + 'autorecon3-T2pial', + # Mix of autorecon2 and autorecon3 steps + 'autorecon-pial', + 'autorecon-hemi', + # Not "multi-stage flags" + 'localGI', + 'qcache', + argstr='-%s', + desc='process directive', + usedefault=True, + position=0) + hemi = traits.Enum( + 'lh', 'rh', desc='hemisphere to process', argstr="-hemi %s") + T1_files = InputMultiPath( + File(exists=True), + argstr='-i %s...', + desc='name of T1 file to process') + T2_file = File( + exists=True, + argstr="-T2 %s", + min_ver='5.3.0', + desc='Convert T2 image to orig directory') + FLAIR_file = File( + exists=True, + argstr="-FLAIR %s", + min_ver='5.3.0', + desc='Convert FLAIR image to orig directory') + use_T2 = traits.Bool( + argstr="-T2pial", + min_ver='5.3.0', + xor=['use_FLAIR'], + desc='Use T2 image to refine the pial surface') + use_FLAIR = traits.Bool( + argstr="-FLAIRpial", + min_ver='5.3.0', + xor=['use_T2'], + desc='Use FLAIR image to refine the pial surface') + openmp = traits.Int( + argstr="-openmp %d", desc="Number of processors to use in parallel") + parallel = traits.Bool( + argstr="-parallel", desc="Enable parallel execution") + hires = traits.Bool( + argstr="-hires", + min_ver='6.0.0', + desc="Conform to minimum voxel size (for voxels < 1mm)") + mprage = traits.Bool( + argstr='-mprage', + desc=('Assume scan parameters are MGH MP-RAGE ' + 'protocol, which produces darker gray matter')) + big_ventricles = traits.Bool( + argstr='-bigventricles', + desc=('For use in subjects with enlarged ' + 'ventricles')) + brainstem = traits.Bool( + argstr='-brainstem-structures', desc='Segment brainstem structures') + hippocampal_subfields_T1 = traits.Bool( + argstr='-hippocampal-subfields-T1', + min_ver='6.0.0', + desc='segment hippocampal subfields using input T1 scan') + hippocampal_subfields_T2 = traits.Tuple( + File(exists=True), + traits.Str(), + argstr='-hippocampal-subfields-T2 %s %s', + min_ver='6.0.0', + desc=('segment hippocampal subfields using T2 scan, identified by ' + 'ID (may be combined with hippocampal_subfields_T1)')) + expert = File( + exists=True, + argstr='-expert %s', + desc="Set parameters using expert file") + xopts = traits.Enum( + "use", + "clean", + "overwrite", + argstr='-xopts-%s', + desc="Use, delete or overwrite existing expert options file") + subjects_dir = Directory( + exists=True, + argstr='-sd %s', + hash_files=False, + desc='path to subjects directory', + genfile=True) + flags = InputMultiPath( + traits.Str, argstr='%s', desc='additional parameters') + + # Expert options + talairach = traits.Str( + desc="Flags to pass to talairach commands", xor=['expert']) + mri_normalize = traits.Str( + desc="Flags to pass to mri_normalize commands", xor=['expert']) + mri_watershed = traits.Str( + desc="Flags to pass to mri_watershed commands", xor=['expert']) + mri_em_register = traits.Str( + desc="Flags to pass to mri_em_register commands", xor=['expert']) + mri_ca_normalize = traits.Str( + desc="Flags to pass to mri_ca_normalize commands", xor=['expert']) + mri_ca_register = traits.Str( + desc="Flags to pass to mri_ca_register commands", xor=['expert']) + mri_remove_neck = traits.Str( + desc="Flags to pass to mri_remove_neck commands", xor=['expert']) + mri_ca_label = traits.Str( + desc="Flags to pass to mri_ca_label commands", xor=['expert']) + mri_segstats = traits.Str( + desc="Flags to pass to mri_segstats commands", xor=['expert']) + mri_mask = traits.Str( + desc="Flags to pass to mri_mask commands", xor=['expert']) + mri_segment = traits.Str( + desc="Flags to pass to mri_segment commands", xor=['expert']) + mri_edit_wm_with_aseg = traits.Str( + desc="Flags to pass to mri_edit_wm_with_aseg commands", xor=['expert']) + mri_pretess = traits.Str( + desc="Flags to pass to mri_pretess commands", xor=['expert']) + mri_fill = traits.Str( + desc="Flags to pass to mri_fill commands", xor=['expert']) + mri_tessellate = traits.Str( + desc="Flags to pass to mri_tessellate commands", xor=['expert']) + mris_smooth = traits.Str( + desc="Flags to pass to mri_smooth commands", xor=['expert']) + mris_inflate = traits.Str( + desc="Flags to pass to mri_inflate commands", xor=['expert']) + mris_sphere = traits.Str( + desc="Flags to pass to mris_sphere commands", xor=['expert']) + mris_fix_topology = traits.Str( + desc="Flags to pass to mris_fix_topology commands", xor=['expert']) + mris_make_surfaces = traits.Str( + desc="Flags to pass to mris_make_surfaces commands", xor=['expert']) + mris_surf2vol = traits.Str( + desc="Flags to pass to mris_surf2vol commands", xor=['expert']) + mris_register = traits.Str( + desc="Flags to pass to mris_register commands", xor=['expert']) + mrisp_paint = traits.Str( + desc="Flags to pass to mrisp_paint commands", xor=['expert']) + mris_ca_label = traits.Str( + desc="Flags to pass to mris_ca_label commands", xor=['expert']) + mris_anatomical_stats = traits.Str( + desc="Flags to pass to mris_anatomical_stats commands", xor=['expert']) + mri_aparc2aseg = traits.Str( + desc="Flags to pass to mri_aparc2aseg commands", xor=['expert']) + + +class ReconAllOutputSpec(FreeSurferSource.output_spec): + subjects_dir = Directory( + exists=True, desc='Freesurfer subjects directory.') + subject_id = traits.Str(desc='Subject name for whom to retrieve data') + + +class ReconAll(CommandLine): + """Uses recon-all to generate surfaces and parcellations of structural data + from anatomical images of a subject. + + Examples + -------- + + >>> from nipype.interfaces.freesurfer import ReconAll + >>> reconall = ReconAll() + >>> reconall.inputs.subject_id = 'foo' + >>> reconall.inputs.directive = 'all' + >>> reconall.inputs.subjects_dir = '.' + >>> reconall.inputs.T1_files = 'structural.nii' + >>> reconall.cmdline + 'recon-all -all -i structural.nii -subjid foo -sd .' + >>> reconall.inputs.flags = "-qcache" + >>> reconall.cmdline + 'recon-all -all -i structural.nii -qcache -subjid foo -sd .' + >>> reconall.inputs.flags = ["-cw256", "-qcache"] + >>> reconall.cmdline + 'recon-all -all -i structural.nii -cw256 -qcache -subjid foo -sd .' + + Hemisphere may be specified regardless of directive: + + >>> reconall.inputs.flags = [] + >>> reconall.inputs.hemi = 'lh' + >>> reconall.cmdline + 'recon-all -all -i structural.nii -hemi lh -subjid foo -sd .' + + ``-autorecon-hemi`` uses the ``-hemi`` input to specify the hemisphere + to operate upon: + + >>> reconall.inputs.directive = 'autorecon-hemi' + >>> reconall.cmdline + 'recon-all -autorecon-hemi lh -i structural.nii -subjid foo -sd .' + + Hippocampal subfields can accept T1 and T2 images: + + >>> reconall_subfields = ReconAll() + >>> reconall_subfields.inputs.subject_id = 'foo' + >>> reconall_subfields.inputs.directive = 'all' + >>> reconall_subfields.inputs.subjects_dir = '.' + >>> reconall_subfields.inputs.T1_files = 'structural.nii' + >>> reconall_subfields.inputs.hippocampal_subfields_T1 = True + >>> reconall_subfields.cmdline + 'recon-all -all -i structural.nii -hippocampal-subfields-T1 -subjid foo -sd .' + >>> reconall_subfields.inputs.hippocampal_subfields_T2 = ( + ... 'structural.nii', 'test') + >>> reconall_subfields.cmdline + 'recon-all -all -i structural.nii -hippocampal-subfields-T1T2 structural.nii test -subjid foo -sd .' + >>> reconall_subfields.inputs.hippocampal_subfields_T1 = False + >>> reconall_subfields.cmdline + 'recon-all -all -i structural.nii -hippocampal-subfields-T2 structural.nii test -subjid foo -sd .' + """ + + _cmd = 'recon-all' + _additional_metadata = ['loc', 'altkey'] + input_spec = ReconAllInputSpec + output_spec = ReconAllOutputSpec + _can_resume = True + force_run = False + + # Steps are based off of the recon-all tables [0,1] describing, inputs, + # commands, and outputs of each step of the recon-all process, + # controlled by flags. + # + # Each step is a 3-tuple containing (flag, [outputs], [inputs]) + # A step is considered complete if all of its outputs exist and are newer + # than the inputs. An empty input list indicates input mtimes will not + # be checked. This may need updating, if users are working with manually + # edited files. + # + # [0] https://surfer.nmr.mgh.harvard.edu/fswiki/ReconAllTableStableV5.3 + # [1] https://surfer.nmr.mgh.harvard.edu/fswiki/ReconAllTableStableV6.0 + _autorecon1_steps = [ + ('motioncor', ['mri/rawavg.mgz', 'mri/orig.mgz'], []), + ( + 'talairach', + [ + 'mri/orig_nu.mgz', + 'mri/transforms/talairach.auto.xfm', + 'mri/transforms/talairach.xfm', + # 'mri/transforms/talairach_avi.log', + ], + []), + ('nuintensitycor', ['mri/nu.mgz'], []), + ('normalization', ['mri/T1.mgz'], []), + ('skullstrip', [ + 'mri/transforms/talairach_with_skull.lta', + 'mri/brainmask.auto.mgz', 'mri/brainmask.mgz' + ], []), + ] + if Info.looseversion() < LooseVersion("6.0.0"): + _autorecon2_volonly_steps = [ + ('gcareg', ['mri/transforms/talairach.lta'], []), + ('canorm', ['mri/norm.mgz'], []), + ('careg', ['mri/transforms/talairach.m3z'], []), + ('careginv', [ + 'mri/transforms/talairach.m3z.inv.x.mgz', + 'mri/transforms/talairach.m3z.inv.y.mgz', + 'mri/transforms/talairach.m3z.inv.z.mgz', + ], []), + ('rmneck', ['mri/nu_noneck.mgz'], []), + ('skull-lta', ['mri/transforms/talairach_with_skull_2.lta'], []), + ('calabel', [ + 'mri/aseg.auto_noCCseg.mgz', 'mri/aseg.auto.mgz', + 'mri/aseg.mgz' + ], []), + ('normalization2', ['mri/brain.mgz'], []), + ('maskbfs', ['mri/brain.finalsurfs.mgz'], []), + ('segmentation', + ['mri/wm.seg.mgz', 'mri/wm.asegedit.mgz', 'mri/wm.mgz'], []), + ( + 'fill', + [ + 'mri/filled.mgz', + # 'scripts/ponscc.cut.log', + ], + []), + ] + _autorecon2_lh_steps = [ + ('tessellate', ['surf/lh.orig.nofix'], []), + ('smooth1', ['surf/lh.smoothwm.nofix'], []), + ('inflate1', ['surf/lh.inflated.nofix'], []), + ('qsphere', ['surf/lh.qsphere.nofix'], []), + ('fix', ['surf/lh.orig'], []), + ('white', [ + 'surf/lh.white', 'surf/lh.curv', 'surf/lh.area', + 'label/lh.cortex.label' + ], []), + ('smooth2', ['surf/lh.smoothwm'], []), + ('inflate2', [ + 'surf/lh.inflated', 'surf/lh.sulc', 'surf/lh.inflated.H', + 'surf/lh.inflated.K' + ], []), + # Undocumented in ReconAllTableStableV5.3 + ('curvstats', ['stats/lh.curv.stats'], []), + ] + _autorecon3_lh_steps = [ + ('sphere', ['surf/lh.sphere'], []), + ('surfreg', ['surf/lh.sphere.reg'], []), + ('jacobian_white', ['surf/lh.jacobian_white'], []), + ('avgcurv', ['surf/lh.avg_curv'], []), + ('cortparc', ['label/lh.aparc.annot'], []), + ('pial', [ + 'surf/lh.pial', 'surf/lh.curv.pial', 'surf/lh.area.pial', + 'surf/lh.thickness' + ], []), + # Misnamed outputs in ReconAllTableStableV5.3: ?h.w-c.pct.mgz + ('pctsurfcon', ['surf/lh.w-g.pct.mgh'], []), + ('parcstats', ['stats/lh.aparc.stats'], []), + ('cortparc2', ['label/lh.aparc.a2009s.annot'], []), + ('parcstats2', ['stats/lh.aparc.a2009s.stats'], []), + # Undocumented in ReconAllTableStableV5.3 + ('cortparc3', ['label/lh.aparc.DKTatlas40.annot'], []), + # Undocumented in ReconAllTableStableV5.3 + ('parcstats3', ['stats/lh.aparc.a2009s.stats'], []), + ('label-exvivo-ec', ['label/lh.entorhinal_exvivo.label'], []), + ] + _autorecon3_added_steps = [ + ('cortribbon', + ['mri/lh.ribbon.mgz', 'mri/rh.ribbon.mgz', 'mri/ribbon.mgz'], []), + ('segstats', ['stats/aseg.stats'], []), + ('aparc2aseg', ['mri/aparc+aseg.mgz', 'mri/aparc.a2009s+aseg.mgz'], + []), + ('wmparc', ['mri/wmparc.mgz', 'stats/wmparc.stats'], []), + ('balabels', ['label/BA.ctab', 'label/BA.thresh.ctab'], []), + ] + else: + _autorecon2_volonly_steps = [ + ('gcareg', ['mri/transforms/talairach.lta'], []), + ('canorm', ['mri/norm.mgz'], []), + ('careg', ['mri/transforms/talairach.m3z'], []), + ('calabel', [ + 'mri/aseg.auto_noCCseg.mgz', 'mri/aseg.auto.mgz', + 'mri/aseg.mgz' + ], []), + ('normalization2', ['mri/brain.mgz'], []), + ('maskbfs', ['mri/brain.finalsurfs.mgz'], []), + ('segmentation', + ['mri/wm.seg.mgz', 'mri/wm.asegedit.mgz', 'mri/wm.mgz'], []), + ( + 'fill', + [ + 'mri/filled.mgz', + # 'scripts/ponscc.cut.log', + ], + []), + ] + _autorecon2_lh_steps = [ + ('tessellate', ['surf/lh.orig.nofix'], []), + ('smooth1', ['surf/lh.smoothwm.nofix'], []), + ('inflate1', ['surf/lh.inflated.nofix'], []), + ('qsphere', ['surf/lh.qsphere.nofix'], []), + ('fix', ['surf/lh.orig'], []), + ('white', [ + 'surf/lh.white.preaparc', 'surf/lh.curv', 'surf/lh.area', + 'label/lh.cortex.label' + ], []), + ('smooth2', ['surf/lh.smoothwm'], []), + ('inflate2', ['surf/lh.inflated', 'surf/lh.sulc'], []), + ('curvHK', [ + 'surf/lh.white.H', 'surf/lh.white.K', 'surf/lh.inflated.H', + 'surf/lh.inflated.K' + ], []), + ('curvstats', ['stats/lh.curv.stats'], []), + ] + _autorecon3_lh_steps = [ + ('sphere', ['surf/lh.sphere'], []), + ('surfreg', ['surf/lh.sphere.reg'], []), + ('jacobian_white', ['surf/lh.jacobian_white'], []), + ('avgcurv', ['surf/lh.avg_curv'], []), + ('cortparc', ['label/lh.aparc.annot'], []), + ('pial', [ + 'surf/lh.pial', 'surf/lh.curv.pial', 'surf/lh.area.pial', + 'surf/lh.thickness', 'surf/lh.white' + ], []), + ('parcstats', ['stats/lh.aparc.stats'], []), + ('cortparc2', ['label/lh.aparc.a2009s.annot'], []), + ('parcstats2', ['stats/lh.aparc.a2009s.stats'], []), + ('cortparc3', ['label/lh.aparc.DKTatlas.annot'], []), + ('parcstats3', ['stats/lh.aparc.DKTatlas.stats'], []), + ('pctsurfcon', ['surf/lh.w-g.pct.mgh'], []), + ] + _autorecon3_added_steps = [ + ('cortribbon', + ['mri/lh.ribbon.mgz', 'mri/rh.ribbon.mgz', 'mri/ribbon.mgz'], []), + ('hyporelabel', ['mri/aseg.presurf.hypos.mgz'], []), + ('aparc2aseg', [ + 'mri/aparc+aseg.mgz', 'mri/aparc.a2009s+aseg.mgz', + 'mri/aparc.DKTatlas+aseg.mgz' + ], []), + ('apas2aseg', ['mri/aseg.mgz'], ['mri/aparc+aseg.mgz']), + ('segstats', ['stats/aseg.stats'], []), + ('wmparc', ['mri/wmparc.mgz', 'stats/wmparc.stats'], []), + # Note that this is a very incomplete list; however the ctab + # files are last to be touched, so this should be reasonable + ('balabels', [ + 'label/BA_exvivo.ctab', 'label/BA_exvivo.thresh.ctab', + 'label/lh.entorhinal_exvivo.label', + 'label/rh.entorhinal_exvivo.label' + ], []), + ] + + # Fill out autorecon2 steps + _autorecon2_rh_steps = [(step, [out.replace('lh', 'rh') + for out in outs], ins) + for step, outs, ins in _autorecon2_lh_steps] + _autorecon2_perhemi_steps = [(step, [ + of for out in outs for of in (out, out.replace('lh', 'rh')) + ], ins) for step, outs, ins in _autorecon2_lh_steps] + _autorecon2_steps = _autorecon2_volonly_steps + _autorecon2_perhemi_steps + + # Fill out autorecon3 steps + _autorecon3_rh_steps = [(step, [out.replace('lh', 'rh') + for out in outs], ins) + for step, outs, ins in _autorecon3_lh_steps] + _autorecon3_perhemi_steps = [(step, [ + of for out in outs for of in (out, out.replace('lh', 'rh')) + ], ins) for step, outs, ins in _autorecon3_lh_steps] + _autorecon3_steps = _autorecon3_perhemi_steps + _autorecon3_added_steps + + # Fill out autorecon-hemi lh/rh steps + _autorecon_lh_steps = (_autorecon2_lh_steps + _autorecon3_lh_steps) + _autorecon_rh_steps = (_autorecon2_rh_steps + _autorecon3_rh_steps) + + _steps = _autorecon1_steps + _autorecon2_steps + _autorecon3_steps + + _binaries = [ + 'talairach', 'mri_normalize', 'mri_watershed', 'mri_em_register', + 'mri_ca_normalize', 'mri_ca_register', 'mri_remove_neck', + 'mri_ca_label', 'mri_segstats', 'mri_mask', 'mri_segment', + 'mri_edit_wm_with_aseg', 'mri_pretess', 'mri_fill', 'mri_tessellate', + 'mris_smooth', 'mris_inflate', 'mris_sphere', 'mris_fix_topology', + 'mris_make_surfaces', 'mris_surf2vol', 'mris_register', 'mrisp_paint', + 'mris_ca_label', 'mris_anatomical_stats', 'mri_aparc2aseg' + ] + + def _gen_subjects_dir(self): + return os.getcwd() + + def _gen_filename(self, name): + if name == 'subjects_dir': + return self._gen_subjects_dir() + return None + + def _list_outputs(self): + """ + See io.FreeSurferSource.outputs for the list of outputs returned + """ + if isdefined(self.inputs.subjects_dir): + subjects_dir = self.inputs.subjects_dir + else: + subjects_dir = self._gen_subjects_dir() + + if isdefined(self.inputs.hemi): + hemi = self.inputs.hemi + else: + hemi = 'both' + + outputs = self._outputs().get() + + outputs.update( + FreeSurferSource( + subject_id=self.inputs.subject_id, + subjects_dir=subjects_dir, + hemi=hemi)._list_outputs()) + outputs['subject_id'] = self.inputs.subject_id + outputs['subjects_dir'] = subjects_dir + return outputs + + def _is_resuming(self): + subjects_dir = self.inputs.subjects_dir + if not isdefined(subjects_dir): + subjects_dir = self._gen_subjects_dir() + if os.path.isdir( + os.path.join(subjects_dir, self.inputs.subject_id, 'mri')): + return True + return False + + def _format_arg(self, name, trait_spec, value): + if name == 'T1_files': + if self._is_resuming(): + return None + if name == 'hippocampal_subfields_T1' and \ + isdefined(self.inputs.hippocampal_subfields_T2): + return None + if all((name == 'hippocampal_subfields_T2', + isdefined(self.inputs.hippocampal_subfields_T1) + and self.inputs.hippocampal_subfields_T1)): + argstr = trait_spec.argstr.replace('T2', 'T1T2') + return argstr % value + if name == 'directive' and value == 'autorecon-hemi': + if not isdefined(self.inputs.hemi): + raise ValueError("Directive 'autorecon-hemi' requires hemi " + "input to be set") + value += ' ' + self.inputs.hemi + if all((name == 'hemi', isdefined(self.inputs.directive) + and self.inputs.directive == 'autorecon-hemi')): + return None + return super(ReconAll, self)._format_arg(name, trait_spec, value) + + @property + def cmdline(self): + cmd = super(ReconAll, self).cmdline + + # Adds '-expert' flag if expert flags are passed + # Mutually exclusive with 'expert' input parameter + cmd += self._prep_expert_file() + + if not self._is_resuming(): + return cmd + subjects_dir = self.inputs.subjects_dir + if not isdefined(subjects_dir): + subjects_dir = self._gen_subjects_dir() + + # Check only relevant steps + directive = self.inputs.directive + if not isdefined(directive): + steps = [] + elif directive == 'autorecon1': + steps = self._autorecon1_steps + elif directive == 'autorecon2-volonly': + steps = self._autorecon2_volonly_steps + elif directive == 'autorecon2-perhemi': + steps = self._autorecon2_perhemi_steps + elif directive.startswith('autorecon2'): + if isdefined(self.inputs.hemi): + if self.inputs.hemi == 'lh': + steps = (self._autorecon2_volonly_steps + + self._autorecon2_lh_steps) + else: + steps = (self._autorecon2_volonly_steps + + self._autorecon2_rh_steps) + else: + steps = self._autorecon2_steps + elif directive == 'autorecon-hemi': + if self.inputs.hemi == 'lh': + steps = self._autorecon_lh_steps + else: + steps = self._autorecon_rh_steps + elif directive == 'autorecon3': + steps = self._autorecon3_steps + else: + steps = self._steps + + no_run = True + flags = [] + for step, outfiles, infiles in steps: + flag = '-{}'.format(step) + noflag = '-no{}'.format(step) + if noflag in cmd: + continue + elif flag in cmd: + no_run = False + continue + + subj_dir = os.path.join(subjects_dir, self.inputs.subject_id) + if check_depends([os.path.join(subj_dir, f) for f in outfiles], + [os.path.join(subj_dir, f) for f in infiles]): + flags.append(noflag) + else: + no_run = False + + if no_run and not self.force_run: + iflogger.info('recon-all complete : Not running') + return "echo recon-all: nothing to do" + + cmd += ' ' + ' '.join(flags) + iflogger.info('resume recon-all : %s', cmd) + return cmd + + def _prep_expert_file(self): + if isdefined(self.inputs.expert): + return '' + + lines = [] + for binary in self._binaries: + args = getattr(self.inputs, binary) + if isdefined(args): + lines.append('{} {}\n'.format(binary, args)) + + if lines == []: + return '' + + contents = ''.join(lines) + if not isdefined(self.inputs.xopts) and \ + self._get_expert_file() == contents: + return ' -xopts-use' + + expert_fname = os.path.abspath('expert.opts') + with open(expert_fname, 'w') as fobj: + fobj.write(contents) + return ' -expert {}'.format(expert_fname) + + def _get_expert_file(self): + # Read pre-existing options file, if it exists + if isdefined(self.inputs.subjects_dir): + subjects_dir = self.inputs.subjects_dir + else: + subjects_dir = self._gen_subjects_dir() + + xopts_file = os.path.join(subjects_dir, self.inputs.subject_id, + 'scripts', 'expert-options') + if not os.path.exists(xopts_file): + return '' + with open(xopts_file, 'r') as fobj: + return fobj.read() + + +class BBRegisterInputSpec(FSTraitedSpec): + subject_id = traits.Str( + argstr='--s %s', desc='freesurfer subject id', mandatory=True) + source_file = File( + argstr='--mov %s', + desc='source file to be registered', + mandatory=True, + copyfile=False) + init = traits.Enum( + 'spm', + 'fsl', + 'header', + argstr='--init-%s', + mandatory=True, + xor=['init_reg_file'], + desc='initialize registration spm, fsl, header') + init_reg_file = File( + exists=True, + argstr='--init-reg %s', + desc='existing registration file', + xor=['init'], + mandatory=True) + contrast_type = traits.Enum( + 't1', + 't2', + 'bold', + 'dti', + argstr='--%s', + desc='contrast type of image', + mandatory=True) + intermediate_file = File( + exists=True, + argstr="--int %s", + desc="Intermediate image, e.g. in case of partial FOV") + reg_frame = traits.Int( + argstr="--frame %d", + xor=["reg_middle_frame"], + desc="0-based frame index for 4D source file") + reg_middle_frame = traits.Bool( + argstr="--mid-frame", + xor=["reg_frame"], + desc="Register middle frame of 4D source file") + out_reg_file = File( + argstr='--reg %s', desc='output registration file', genfile=True) + spm_nifti = traits.Bool( + argstr="--spm-nii", + desc="force use of nifti rather than analyze with SPM") + epi_mask = traits.Bool( + argstr="--epi-mask", desc="mask out B0 regions in stages 1 and 2") + dof = traits.Enum( + 6, 9, 12, argstr='--%d', desc='number of transform degrees of freedom') + fsldof = traits.Int( + argstr='--fsl-dof %d', + desc='degrees of freedom for initial registration (FSL)') + out_fsl_file = traits.Either( + traits.Bool, + File, + argstr="--fslmat %s", + desc="write the transformation matrix in FSL FLIRT format") + out_lta_file = traits.Either( + traits.Bool, + File, + argstr="--lta %s", + min_ver='5.2.0', + desc="write the transformation matrix in LTA format") + registered_file = traits.Either( + traits.Bool, + File, + argstr='--o %s', + desc='output warped sourcefile either True or filename') + init_cost_file = traits.Either( + traits.Bool, + File, + argstr='--initcost %s', + desc='output initial registration cost file') + + +class BBRegisterInputSpec6(BBRegisterInputSpec): + init = traits.Enum( + 'coreg', + 'rr', + 'spm', + 'fsl', + 'header', + 'best', + argstr='--init-%s', + xor=['init_reg_file'], + desc='initialize registration with mri_coreg, spm, fsl, or header') + init_reg_file = File( + exists=True, + argstr='--init-reg %s', + desc='existing registration file', + xor=['init']) + + +class BBRegisterOutputSpec(TraitedSpec): + out_reg_file = File(exists=True, desc='Output registration file') + out_fsl_file = File( + exists=True, desc='Output FLIRT-style registration file') + out_lta_file = File(exists=True, desc='Output LTA-style registration file') + min_cost_file = File( + exists=True, desc='Output registration minimum cost file') + init_cost_file = File( + exists=True, desc='Output initial registration cost file') + registered_file = File( + exists=True, desc='Registered and resampled source file') + + +class BBRegister(FSCommand): + """Use FreeSurfer bbregister to register a volume to the Freesurfer anatomical. + + This program performs within-subject, cross-modal registration using a + boundary-based cost function. It is required that you have an anatomical + scan of the subject that has already been recon-all-ed using freesurfer. + + Examples + -------- + + >>> from nipype.interfaces.freesurfer import BBRegister + >>> bbreg = BBRegister(subject_id='me', source_file='structural.nii', init='header', contrast_type='t2') + >>> bbreg.cmdline + 'bbregister --t2 --init-header --reg structural_bbreg_me.dat --mov structural.nii --s me' + + """ + + _cmd = 'bbregister' + if LooseVersion('0.0.0') < Info.looseversion() < LooseVersion("6.0.0"): + input_spec = BBRegisterInputSpec + else: + input_spec = BBRegisterInputSpec6 + output_spec = BBRegisterOutputSpec + + def _list_outputs(self): + + outputs = self.output_spec().get() + _in = self.inputs + + if isdefined(_in.out_reg_file): + outputs['out_reg_file'] = op.abspath(_in.out_reg_file) + elif _in.source_file: + suffix = '_bbreg_%s.dat' % _in.subject_id + outputs['out_reg_file'] = fname_presuffix( + _in.source_file, suffix=suffix, use_ext=False) + + if isdefined(_in.registered_file): + if isinstance(_in.registered_file, bool): + outputs['registered_file'] = fname_presuffix( + _in.source_file, suffix='_bbreg') + else: + outputs['registered_file'] = op.abspath(_in.registered_file) + + if isdefined(_in.out_lta_file): + if isinstance(_in.out_lta_file, bool): + suffix = '_bbreg_%s.lta' % _in.subject_id + out_lta_file = fname_presuffix( + _in.source_file, suffix=suffix, use_ext=False) + outputs['out_lta_file'] = out_lta_file + else: + outputs['out_lta_file'] = op.abspath(_in.out_lta_file) + + if isdefined(_in.out_fsl_file): + if isinstance(_in.out_fsl_file, bool): + suffix = '_bbreg_%s.mat' % _in.subject_id + out_fsl_file = fname_presuffix( + _in.source_file, suffix=suffix, use_ext=False) + outputs['out_fsl_file'] = out_fsl_file + else: + outputs['out_fsl_file'] = op.abspath(_in.out_fsl_file) + + if isdefined(_in.init_cost_file): + if isinstance(_in.out_fsl_file, bool): + outputs[ + 'init_cost_file'] = outputs['out_reg_file'] + '.initcost' + else: + outputs['init_cost_file'] = op.abspath(_in.init_cost_file) + + outputs['min_cost_file'] = outputs['out_reg_file'] + '.mincost' + return outputs + + def _format_arg(self, name, spec, value): + if name in ('registered_file', 'out_fsl_file', 'out_lta_file', + 'init_cost_file') and isinstance(value, bool): + value = self._list_outputs()[name] + return super(BBRegister, self)._format_arg(name, spec, value) + + def _gen_filename(self, name): + + if name == 'out_reg_file': + return self._list_outputs()[name] + return None + + +class ApplyVolTransformInputSpec(FSTraitedSpec): + source_file = File( + exists=True, + argstr='--mov %s', + copyfile=False, + mandatory=True, + desc='Input volume you wish to transform') + transformed_file = File( + desc='Output volume', argstr='--o %s', genfile=True) + _targ_xor = ('target_file', 'tal', 'fs_target') + target_file = File( + exists=True, + argstr='--targ %s', + xor=_targ_xor, + desc='Output template volume', + mandatory=True) + tal = traits.Bool( + argstr='--tal', + xor=_targ_xor, + mandatory=True, + desc='map to a sub FOV of MNI305 (with --reg only)') + tal_resolution = traits.Float( + argstr="--talres %.10f", desc="Resolution to sample when using tal") + fs_target = traits.Bool( + argstr='--fstarg', + xor=_targ_xor, + mandatory=True, + requires=['reg_file'], + desc='use orig.mgz from subject in regfile as target') + _reg_xor = ('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', + 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject') + reg_file = File( + exists=True, + xor=_reg_xor, + argstr='--reg %s', + mandatory=True, + desc='tkRAS-to-tkRAS matrix (tkregister2 format)') + lta_file = File( + exists=True, + xor=_reg_xor, + argstr='--lta %s', + mandatory=True, + desc='Linear Transform Array file') + lta_inv_file = File( + exists=True, + xor=_reg_xor, + argstr='--lta-inv %s', + mandatory=True, + desc='LTA, invert') + reg_file = File( + exists=True, + xor=_reg_xor, + argstr='--reg %s', + mandatory=True, + desc='tkRAS-to-tkRAS matrix (tkregister2 format)') + fsl_reg_file = File( + exists=True, + xor=_reg_xor, + argstr='--fsl %s', + mandatory=True, + desc='fslRAS-to-fslRAS matrix (FSL format)') + xfm_reg_file = File( + exists=True, + xor=_reg_xor, + argstr='--xfm %s', + mandatory=True, + desc='ScannerRAS-to-ScannerRAS matrix (MNI format)') + reg_header = traits.Bool( + xor=_reg_xor, + argstr='--regheader', + mandatory=True, + desc='ScannerRAS-to-ScannerRAS matrix = identity') + mni_152_reg = traits.Bool( + xor=_reg_xor, + argstr='--regheader', + mandatory=True, + desc='target MNI152 space') + subject = traits.Str( + xor=_reg_xor, + argstr='--s %s', + mandatory=True, + desc='set matrix = identity and use subject for any templates') + inverse = traits.Bool(desc='sample from target to source', argstr='--inv') + interp = traits.Enum( + 'trilin', + 'nearest', + 'cubic', + argstr='--interp %s', + desc='Interpolation method ( or nearest)') + no_resample = traits.Bool( + desc='Do not resample; just change vox2ras matrix', + argstr='--no-resample') + m3z_file = File( + argstr="--m3z %s", + desc=('This is the morph to be applied to the volume. ' + 'Unless the morph is in mri/transforms (eg.: for ' + 'talairach.m3z computed by reconall), you will need ' + 'to specify the full path to this morph and use the ' + '--noDefM3zPath flag.')) + no_ded_m3z_path = traits.Bool( + argstr="--noDefM3zPath", + requires=['m3z_file'], + desc=('To be used with the m3z flag. ' + 'Instructs the code not to look for the' + 'm3z morph in the default location ' + '(SUBJECTS_DIR/subj/mri/transforms), ' + 'but instead just use the path ' + 'indicated in --m3z.')) + + invert_morph = traits.Bool( + argstr="--inv-morph", + requires=['m3z_file'], + desc=('Compute and use the inverse of the ' + 'non-linear morph to resample the input ' + 'volume. To be used by --m3z.')) + + +class ApplyVolTransformOutputSpec(TraitedSpec): + transformed_file = File( + exists=True, desc='Path to output file if used normally') + + +class ApplyVolTransform(FSCommand): + """Use FreeSurfer mri_vol2vol to apply a transform. + + Examples + -------- + + >>> from nipype.interfaces.freesurfer import ApplyVolTransform + >>> applyreg = ApplyVolTransform() + >>> applyreg.inputs.source_file = 'structural.nii' + >>> applyreg.inputs.reg_file = 'register.dat' + >>> applyreg.inputs.transformed_file = 'struct_warped.nii' + >>> applyreg.inputs.fs_target = True + >>> applyreg.cmdline + 'mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii' + + """ + + _cmd = 'mri_vol2vol' + input_spec = ApplyVolTransformInputSpec + output_spec = ApplyVolTransformOutputSpec + + def _get_outfile(self): + outfile = self.inputs.transformed_file + if not isdefined(outfile): + if self.inputs.inverse is True: + if self.inputs.fs_target is True: + src = 'orig.mgz' + else: + src = self.inputs.target_file + else: + src = self.inputs.source_file + outfile = fname_presuffix( + src, newpath=os.getcwd(), suffix='_warped') + return outfile + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['transformed_file'] = os.path.abspath(self._get_outfile()) + return outputs + + def _gen_filename(self, name): + if name == 'transformed_file': + return self._get_outfile() + return None + + +class SmoothInputSpec(FSTraitedSpec): + in_file = File( + exists=True, desc='source volume', argstr='--i %s', mandatory=True) + reg_file = File( + desc='registers volume to surface anatomical ', + argstr='--reg %s', + mandatory=True, + exists=True) + smoothed_file = File(desc='output volume', argstr='--o %s', genfile=True) + proj_frac_avg = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + xor=['proj_frac'], + desc='average a long normal min max delta', + argstr='--projfrac-avg %.2f %.2f %.2f') + proj_frac = traits.Float( + desc='project frac of thickness a long surface normal', + xor=['proj_frac_avg'], + argstr='--projfrac %s') + surface_fwhm = traits.Range( + low=0.0, + requires=['reg_file'], + mandatory=True, + xor=['num_iters'], + desc='surface FWHM in mm', + argstr='--fwhm %f') + num_iters = traits.Range( + low=1, + xor=['surface_fwhm'], + mandatory=True, + argstr='--niters %d', + desc='number of iterations instead of fwhm') + vol_fwhm = traits.Range( + low=0.0, + argstr='--vol-fwhm %f', + desc='volume smoothing outside of surface') + + +class SmoothOutputSpec(TraitedSpec): + smoothed_file = File(exists=True, desc='smoothed input volume') + + +class Smooth(FSCommand): + """Use FreeSurfer mris_volsmooth to smooth a volume + + This function smoothes cortical regions on a surface and non-cortical + regions in volume. + + .. note:: + Cortical voxels are mapped to the surface (3D->2D) and then the + smoothed values from the surface are put back into the volume to fill + the cortical ribbon. If data is smoothed with this algorithm, one has to + be careful about how further processing is interpreted. + + Examples + -------- + + >>> from nipype.interfaces.freesurfer import Smooth + >>> smoothvol = Smooth(in_file='functional.nii', smoothed_file = 'foo_out.nii', reg_file='register.dat', surface_fwhm=10, vol_fwhm=6) + >>> smoothvol.cmdline + 'mris_volsmooth --i functional.nii --reg register.dat --o foo_out.nii --fwhm 10.000000 --vol-fwhm 6.000000' + + """ + + _cmd = 'mris_volsmooth' + input_spec = SmoothInputSpec + output_spec = SmoothOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outfile = self.inputs.smoothed_file + if not isdefined(outfile): + outfile = self._gen_fname(self.inputs.in_file, suffix='_smooth') + outputs['smoothed_file'] = outfile + return outputs + + def _gen_filename(self, name): + if name == 'smoothed_file': + return self._list_outputs()[name] + return None + + +class RobustRegisterInputSpec(FSTraitedSpec): + + source_file = File( + exists=True, + mandatory=True, + argstr='--mov %s', + desc='volume to be registered') + target_file = File( + exists=True, + mandatory=True, + argstr='--dst %s', + desc='target volume for the registration') + out_reg_file = traits.Either( + True, + File, + default=True, + usedefault=True, + argstr='--lta %s', + desc='registration file; either True or filename') + registered_file = traits.Either( + traits.Bool, + File, + argstr='--warp %s', + desc='registered image; either True or filename') + weights_file = traits.Either( + traits.Bool, + File, + argstr='--weights %s', + desc='weights image to write; either True or filename') + est_int_scale = traits.Bool( + argstr='--iscale', + desc='estimate intensity scale (recommended for unnormalized images)') + trans_only = traits.Bool( + argstr='--transonly', desc='find 3 parameter translation only') + in_xfm_file = File( + exists=True, + argstr='--transform', + desc='use initial transform on source') + half_source = traits.Either( + traits.Bool, + File, + argstr='--halfmov %s', + desc="write source volume mapped to halfway space") + half_targ = traits.Either( + traits.Bool, + File, + argstr="--halfdst %s", + desc="write target volume mapped to halfway space") + half_weights = traits.Either( + traits.Bool, + File, + argstr="--halfweights %s", + desc="write weights volume mapped to halfway space") + half_source_xfm = traits.Either( + traits.Bool, + File, + argstr="--halfmovlta %s", + desc="write transform from source to halfway space") + half_targ_xfm = traits.Either( + traits.Bool, + File, + argstr="--halfdstlta %s", + desc="write transform from target to halfway space") + auto_sens = traits.Bool( + argstr='--satit', + xor=['outlier_sens'], + mandatory=True, + desc='auto-detect good sensitivity') + outlier_sens = traits.Float( + argstr='--sat %.4f', + xor=['auto_sens'], + mandatory=True, + desc='set outlier sensitivity explicitly') + least_squares = traits.Bool( + argstr='--leastsquares', + desc='use least squares instead of robust estimator') + no_init = traits.Bool(argstr='--noinit', desc='skip transform init') + init_orient = traits.Bool( + argstr='--initorient', + desc='use moments for initial orient (recommended for stripped brains)' + ) + max_iterations = traits.Int( + argstr='--maxit %d', desc='maximum # of times on each resolution') + high_iterations = traits.Int( + argstr='--highit %d', desc='max # of times on highest resolution') + iteration_thresh = traits.Float( + argstr='--epsit %.3f', desc='stop iterations when below threshold') + subsample_thresh = traits.Int( + argstr='--subsample %d', + desc='subsample if dimension is above threshold size') + outlier_limit = traits.Float( + argstr='--wlimit %.3f', desc='set maximal outlier limit in satit') + write_vo2vox = traits.Bool( + argstr='--vox2vox', desc='output vox2vox matrix (default is RAS2RAS)') + no_multi = traits.Bool( + argstr='--nomulti', desc='work on highest resolution') + mask_source = File( + exists=True, + argstr='--maskmov %s', + desc='image to mask source volume with') + mask_target = File( + exists=True, + argstr='--maskdst %s', + desc='image to mask target volume with') + force_double = traits.Bool( + argstr='--doubleprec', desc='use double-precision intensities') + force_float = traits.Bool( + argstr='--floattype', desc='use float intensities') + + +class RobustRegisterOutputSpec(TraitedSpec): + + out_reg_file = File(exists=True, desc="output registration file") + registered_file = File( + exists=True, desc="output image with registration applied") + weights_file = File(exists=True, desc="image of weights used") + half_source = File( + exists=True, desc="source image mapped to halfway space") + half_targ = File(exists=True, desc="target image mapped to halfway space") + half_weights = File( + exists=True, desc="weights image mapped to halfway space") + half_source_xfm = File( + exists=True, + desc="transform file to map source image to halfway space") + half_targ_xfm = File( + exists=True, + desc="transform file to map target image to halfway space") + + +class RobustRegister(FSCommand): + """Perform intramodal linear registration (translation and rotation) using + robust statistics. + + Examples + -------- + >>> from nipype.interfaces.freesurfer import RobustRegister + >>> reg = RobustRegister() + >>> reg.inputs.source_file = 'structural.nii' + >>> reg.inputs.target_file = 'T1.nii' + >>> reg.inputs.auto_sens = True + >>> reg.inputs.init_orient = True + >>> reg.cmdline # doctest: +ELLIPSIS + 'mri_robust_register --satit --initorient --lta .../structural_robustreg.lta --mov structural.nii --dst T1.nii' + + References + ---------- + Reuter, M, Rosas, HD, and Fischl, B, (2010). Highly Accurate Inverse + Consistent Registration: A Robust Approach. Neuroimage 53(4) 1181-96. + + """ + + _cmd = 'mri_robust_register' + input_spec = RobustRegisterInputSpec + output_spec = RobustRegisterOutputSpec + + def _format_arg(self, name, spec, value): + options = ("out_reg_file", "registered_file", "weights_file", + "half_source", "half_targ", "half_weights", + "half_source_xfm", "half_targ_xfm") + if name in options and isinstance(value, bool): + value = self._list_outputs()[name] + return super(RobustRegister, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + cwd = os.getcwd() + prefices = dict( + src=self.inputs.source_file, trg=self.inputs.target_file) + suffices = dict( + out_reg_file=("src", "_robustreg.lta", False), + registered_file=("src", "_robustreg", True), + weights_file=("src", "_robustweights", True), + half_source=("src", "_halfway", True), + half_targ=("trg", "_halfway", True), + half_weights=("src", "_halfweights", True), + half_source_xfm=("src", "_robustxfm.lta", False), + half_targ_xfm=("trg", "_robustxfm.lta", False)) + for name, sufftup in list(suffices.items()): + value = getattr(self.inputs, name) + if value: + if value is True: + outputs[name] = fname_presuffix( + prefices[sufftup[0]], + suffix=sufftup[1], + newpath=cwd, + use_ext=sufftup[2]) + else: + outputs[name] = os.path.abspath(value) + return outputs + + +class FitMSParamsInputSpec(FSTraitedSpec): + + in_files = traits.List( + File(exists=True), + argstr="%s", + position=-2, + mandatory=True, + desc="list of FLASH images (must be in mgh format)") + tr_list = traits.List( + traits.Int, desc="list of TRs of the input files (in msec)") + te_list = traits.List( + traits.Float, desc="list of TEs of the input files (in msec)") + flip_list = traits.List( + traits.Int, desc="list of flip angles of the input files") + xfm_list = traits.List( + File(exists=True), + desc="list of transform files to apply to each FLASH image") + out_dir = Directory( + argstr="%s", + position=-1, + genfile=True, + desc="directory to store output in") + + +class FitMSParamsOutputSpec(TraitedSpec): + + t1_image = File( + exists=True, desc="image of estimated T1 relaxation values") + pd_image = File( + exists=True, desc="image of estimated proton density values") + t2star_image = File(exists=True, desc="image of estimated T2* values") + + +class FitMSParams(FSCommand): + """Estimate tissue paramaters from a set of FLASH images. + + Examples + -------- + >>> from nipype.interfaces.freesurfer import FitMSParams + >>> msfit = FitMSParams() + >>> msfit.inputs.in_files = ['flash_05.mgz', 'flash_30.mgz'] + >>> msfit.inputs.out_dir = 'flash_parameters' + >>> msfit.cmdline + 'mri_ms_fitparms flash_05.mgz flash_30.mgz flash_parameters' + + """ + _cmd = "mri_ms_fitparms" + input_spec = FitMSParamsInputSpec + output_spec = FitMSParamsOutputSpec + + def _format_arg(self, name, spec, value): + if name == "in_files": + cmd = "" + for i, file in enumerate(value): + if isdefined(self.inputs.tr_list): + cmd = " ".join((cmd, "-tr %.1f" % self.inputs.tr_list[i])) + if isdefined(self.inputs.te_list): + cmd = " ".join((cmd, "-te %.3f" % self.inputs.te_list[i])) + if isdefined(self.inputs.flip_list): + cmd = " ".join((cmd, + "-fa %.1f" % self.inputs.flip_list[i])) + if isdefined(self.inputs.xfm_list): + cmd = " ".join((cmd, "-at %s" % self.inputs.xfm_list[i])) + cmd = " ".join((cmd, file)) + return cmd + return super(FitMSParams, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + if not isdefined(self.inputs.out_dir): + out_dir = self._gen_filename("out_dir") + else: + out_dir = self.inputs.out_dir + outputs["t1_image"] = os.path.join(out_dir, "T1.mgz") + outputs["pd_image"] = os.path.join(out_dir, "PD.mgz") + outputs["t2star_image"] = os.path.join(out_dir, "T2star.mgz") + return outputs + + def _gen_filename(self, name): + if name == "out_dir": + return os.getcwd() + return None + + +class SynthesizeFLASHInputSpec(FSTraitedSpec): + + fixed_weighting = traits.Bool( + position=1, + argstr="-w", + desc="use a fixed weighting to generate optimal gray/white contrast") + tr = traits.Float( + mandatory=True, + position=2, + argstr="%.2f", + desc="repetition time (in msec)") + flip_angle = traits.Float( + mandatory=True, + position=3, + argstr="%.2f", + desc="flip angle (in degrees)") + te = traits.Float( + mandatory=True, position=4, argstr="%.3f", desc="echo time (in msec)") + t1_image = File( + exists=True, + mandatory=True, + position=5, + argstr="%s", + desc="image of T1 values") + pd_image = File( + exists=True, + mandatory=True, + position=6, + argstr="%s", + desc="image of proton density values") + out_file = File(genfile=True, argstr="%s", desc="image to write") + + +class SynthesizeFLASHOutputSpec(TraitedSpec): + + out_file = File(exists=True, desc="synthesized FLASH acquisition") + + +class SynthesizeFLASH(FSCommand): + """Synthesize a FLASH acquisition from T1 and proton density maps. + + Examples + -------- + >>> from nipype.interfaces.freesurfer import SynthesizeFLASH + >>> syn = SynthesizeFLASH(tr=20, te=3, flip_angle=30) + >>> syn.inputs.t1_image = 'T1.mgz' + >>> syn.inputs.pd_image = 'PD.mgz' + >>> syn.inputs.out_file = 'flash_30syn.mgz' + >>> syn.cmdline + 'mri_synthesize 20.00 30.00 3.000 T1.mgz PD.mgz flash_30syn.mgz' + + """ + _cmd = "mri_synthesize" + input_spec = SynthesizeFLASHInputSpec + output_spec = SynthesizeFLASHOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.out_file): + outputs["out_file"] = self.inputs.out_file + else: + outputs["out_file"] = self._gen_fname( + "synth-flash_%02d.mgz" % self.inputs.flip_angle, suffix="") + return outputs + + def _gen_filename(self, name): + if name == "out_file": + return self._list_outputs()["out_file"] + return None + + +class MNIBiasCorrectionInputSpec(FSTraitedSpec): + # mandatory + in_file = File( + exists=True, + mandatory=True, + argstr="--i %s", + desc="input volume. Input can be any format accepted by mri_convert.") + # optional + out_file = File( + argstr="--o %s", + name_source=['in_file'], + name_template='%s_output', + hash_files=False, + keep_extension=True, + desc="output volume. Output can be any format accepted by mri_convert. " + + "If the output format is COR, then the directory must exist.") + iterations = traits.Int( + 4, usedefault=True, + argstr="--n %d", + desc= + "Number of iterations to run nu_correct. Default is 4. This is the number of times " + + + "that nu_correct is repeated (ie, using the output from the previous run as the input for " + + + "the next). This is different than the -iterations option to nu_correct." + ) + protocol_iterations = traits.Int( + argstr="--proto-iters %d", + desc= + "Passes Np as argument of the -iterations flag of nu_correct. This is different " + + + "than the --n flag above. Default is not to pass nu_correct the -iterations flag." + ) + distance = traits.Int(argstr="--distance %d", desc="N3 -distance option") + no_rescale = traits.Bool( + argstr="--no-rescale", + desc="do not rescale so that global mean of output == input global mean" + ) + mask = File( + exists=True, + argstr="--mask %s", + desc= + "brainmask volume. Input can be any format accepted by mri_convert.") + transform = File( + exists=True, + argstr="--uchar %s", + desc="tal.xfm. Use mri_make_uchar instead of conforming") + stop = traits.Float( + argstr="--stop %f", + desc= + "Convergence threshold below which iteration stops (suggest 0.01 to 0.0001)" + ) + shrink = traits.Int( + argstr="--shrink %d", + desc="Shrink parameter for finer sampling (default is 4)") + + +class MNIBiasCorrectionOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="output volume") + + +class MNIBiasCorrection(FSCommand): + """ Wrapper for nu_correct, a program from the Montreal Neurological Insitute (MNI) + used for correcting intensity non-uniformity (ie, bias fields). You must have the + MNI software installed on your system to run this. See [www.bic.mni.mcgill.ca/software/N3] + for more info. + + mri_nu_correct.mni uses float internally instead of uchar. It also rescales the output so + that the global mean is the same as that of the input. These two changes are linked and + can be turned off with --no-float + + Examples + -------- + >>> from nipype.interfaces.freesurfer import MNIBiasCorrection + >>> correct = MNIBiasCorrection() + >>> correct.inputs.in_file = "norm.mgz" + >>> correct.inputs.iterations = 6 + >>> correct.inputs.protocol_iterations = 1000 + >>> correct.inputs.distance = 50 + >>> correct.cmdline + 'mri_nu_correct.mni --distance 50 --i norm.mgz --n 6 --o norm_output.mgz --proto-iters 1000' + + References: + ---------- + [http://freesurfer.net/fswiki/mri_nu_correct.mni] + [http://www.bic.mni.mcgill.ca/software/N3] + [https://github.com/BIC-MNI/N3] + + """ + _cmd = "mri_nu_correct.mni" + input_spec = MNIBiasCorrectionInputSpec + output_spec = MNIBiasCorrectionOutputSpec + + +class WatershedSkullStripInputSpec(FSTraitedSpec): + # required + in_file = File( + argstr="%s", + exists=True, + mandatory=True, + position=-2, + desc="input volume") + out_file = File( + 'brainmask.auto.mgz', + argstr="%s", + exists=False, + mandatory=True, + position=-1, + usedefault=True, + desc="output volume") + # optional + t1 = traits.Bool( + argstr="-T1", desc="specify T1 input volume (T1 grey value = 110)") + brain_atlas = File( + argstr="-brain_atlas %s", exists=True, position=-4, desc="") + transform = File( + argstr="%s", exists=False, position=-3, desc="undocumented") + + +class WatershedSkullStripOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="skull stripped brain volume") + + +class WatershedSkullStrip(FSCommand): + """ This program strips skull and other outer non-brain tissue and + produces the brain volume from T1 volume or the scanned volume. + + The "watershed" segmentation algorithm was used to dertermine the + intensity values for white matter, grey matter, and CSF. + A force field was then used to fit a spherical surface to the brain. + The shape of the surface fit was then evaluated against a previously + derived template. + + The default parameters are: -w 0.82 -b 0.32 -h 10 -seedpt -ta -wta + + (Segonne 2004) + + Examples + ======== + >>> from nipype.interfaces.freesurfer import WatershedSkullStrip + >>> skullstrip = WatershedSkullStrip() + >>> skullstrip.inputs.in_file = "T1.mgz" + >>> skullstrip.inputs.t1 = True + >>> skullstrip.inputs.transform = "transforms/talairach_with_skull.lta" + >>> skullstrip.inputs.out_file = "brainmask.auto.mgz" + >>> skullstrip.cmdline + 'mri_watershed -T1 transforms/talairach_with_skull.lta T1.mgz brainmask.auto.mgz' + """ + _cmd = 'mri_watershed' + input_spec = WatershedSkullStripInputSpec + output_spec = WatershedSkullStripOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class NormalizeInputSpec(FSTraitedSpec): + # required + in_file = File( + argstr='%s', + exists=True, + mandatory=True, + position=-2, + desc="The input file for Normalize") + out_file = File( + argstr='%s', + position=-1, + name_source=['in_file'], + name_template='%s_norm', + hash_files=False, + keep_extension=True, + desc="The output file for Normalize") + # optional + gradient = traits.Int( + argstr="-g %d", + desc="use max intensity/mm gradient g (default=1)") + mask = File( + argstr="-mask %s", + exists=True, + desc="The input mask file for Normalize") + segmentation = File( + argstr="-aseg %s", + exists=True, + desc="The input segmentation for Normalize") + transform = File( + exists=True, desc="Tranform file from the header of the input file") + + +class NormalizeOutputSpec(TraitedSpec): + out_file = traits.File(exists=False, desc="The output file for Normalize") + + +class Normalize(FSCommand): + """ + Normalize the white-matter, optionally based on control points. The + input volume is converted into a new volume where white matter image + values all range around 110. + + Examples + ======== + >>> from nipype.interfaces import freesurfer + >>> normalize = freesurfer.Normalize() + >>> normalize.inputs.in_file = "T1.mgz" + >>> normalize.inputs.gradient = 1 + >>> normalize.cmdline + 'mri_normalize -g 1 T1.mgz T1_norm.mgz' + """ + _cmd = "mri_normalize" + input_spec = NormalizeInputSpec + output_spec = NormalizeOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class CANormalizeInputSpec(FSTraitedSpec): + in_file = File( + argstr='%s', + exists=True, + mandatory=True, + position=-4, + desc="The input file for CANormalize") + out_file = File( + argstr='%s', + position=-1, + name_source=['in_file'], + name_template='%s_norm', + hash_files=False, + keep_extension=True, + desc="The output file for CANormalize") + atlas = File( + argstr='%s', + exists=True, + mandatory=True, + position=-3, + desc="The atlas file in gca format") + transform = File( + argstr='%s', + exists=True, + mandatory=True, + position=-2, + desc="The tranform file in lta format") + # optional + mask = File( + argstr='-mask %s', exists=True, desc="Specifies volume to use as mask") + control_points = File( + argstr='-c %s', desc="File name for the output control points") + long_file = File( + argstr='-long %s', + desc='undocumented flag used in longitudinal processing') + + +class CANormalizeOutputSpec(TraitedSpec): + out_file = traits.File(exists=False, desc="The output file for Normalize") + control_points = File( + exists=False, desc="The output control points for Normalize") + + +class CANormalize(FSCommand): + """This program creates a normalized volume using the brain volume and an + input gca file. + + For complete details, see the `FS Documentation `_ + + Examples + ======== + + >>> from nipype.interfaces import freesurfer + >>> ca_normalize = freesurfer.CANormalize() + >>> ca_normalize.inputs.in_file = "T1.mgz" + >>> ca_normalize.inputs.atlas = "atlas.nii.gz" # in practice use .gca atlases + >>> ca_normalize.inputs.transform = "trans.mat" # in practice use .lta transforms + >>> ca_normalize.cmdline + 'mri_ca_normalize T1.mgz atlas.nii.gz trans.mat T1_norm.mgz' + """ + _cmd = "mri_ca_normalize" + input_spec = CANormalizeInputSpec + output_spec = CANormalizeOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs['control_points'] = os.path.abspath(self.inputs.control_points) + return outputs + + +class CARegisterInputSpec(FSTraitedSpecOpenMP): + # required + in_file = File( + argstr='%s', + exists=True, + mandatory=True, + position=-3, + desc="The input volume for CARegister") + out_file = File( + argstr='%s', + position=-1, + genfile=True, + desc="The output volume for CARegister") + template = File( + argstr='%s', + exists=True, + position=-2, + desc="The template file in gca format") + # optional + mask = File( + argstr='-mask %s', exists=True, desc="Specifies volume to use as mask") + invert_and_save = traits.Bool( + argstr='-invert-and-save', + position=-4, + desc= + "Invert and save the .m3z multi-dimensional talaraich transform to x, y, and z .mgz files" + ) + no_big_ventricles = traits.Bool( + argstr='-nobigventricles', desc="No big ventricles") + transform = File( + argstr='-T %s', exists=True, desc="Specifies transform in lta format") + align = traits.String( + argstr='-align-%s', desc="Specifies when to perform alignment") + levels = traits.Int( + argstr='-levels %d', + desc= + "defines how many surrounding voxels will be used in interpolations, default is 6" + ) + A = traits.Int( + argstr='-A %d', + desc='undocumented flag used in longitudinal processing') + l_files = InputMultiPath( + File(exists=False), + argstr='-l %s', + desc='undocumented flag used in longitudinal processing') + + +class CARegisterOutputSpec(TraitedSpec): + out_file = traits.File(exists=False, desc="The output file for CARegister") + + +class CARegister(FSCommandOpenMP): + """Generates a multi-dimensional talairach transform from a gca file and talairach.lta file + + For complete details, see the `FS Documentation `_ + + Examples + ======== + >>> from nipype.interfaces import freesurfer + >>> ca_register = freesurfer.CARegister() + >>> ca_register.inputs.in_file = "norm.mgz" + >>> ca_register.inputs.out_file = "talairach.m3z" + >>> ca_register.cmdline + 'mri_ca_register norm.mgz talairach.m3z' + """ + _cmd = "mri_ca_register" + input_spec = CARegisterInputSpec + output_spec = CARegisterOutputSpec + + def _format_arg(self, name, spec, value): + if name == "l_files" and len(value) == 1: + value.append('identity.nofile') + return super(CARegister, self)._format_arg(name, spec, value) + + def _gen_fname(self, name): + if name == 'out_file': + return os.path.abspath('talairach.m3z') + return None + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class CALabelInputSpec(FSTraitedSpecOpenMP): + # required + in_file = File( + argstr="%s", + position=-4, + mandatory=True, + exists=True, + desc="Input volume for CALabel") + out_file = File( + argstr="%s", + position=-1, + mandatory=True, + exists=False, + desc="Output file for CALabel") + transform = File( + argstr="%s", + position=-3, + mandatory=True, + exists=True, + desc="Input transform for CALabel") + template = File( + argstr="%s", + position=-2, + mandatory=True, + exists=True, + desc="Input template for CALabel") + # optional + in_vol = File(argstr="-r %s", exists=True, desc="set input volume") + intensities = File( + argstr="-r %s", + exists=True, + desc="input label intensities file(used in longitudinal processing)") + no_big_ventricles = traits.Bool( + argstr="-nobigventricles", desc="No big ventricles") + align = traits.Bool(argstr="-align", desc="Align CALabel") + prior = traits.Float(argstr="-prior %.1f", desc="Prior for CALabel") + relabel_unlikely = traits.Tuple( + traits.Int, + traits.Float, + argstr="-relabel_unlikely %d %.1f", + desc=("Reclassify voxels at least some std" + " devs from the mean using some size" + " Gaussian window")) + label = traits.File( + argstr="-l %s", + exists=True, + desc= + "Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file" + ) + aseg = traits.File( + argstr="-aseg %s", + exists=True, + desc= + "Undocumented flag. Autorecon3 uses ../mri/aseg.presurf.mgz as input file" + ) + + +class CALabelOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output volume from CALabel") + + +class CALabel(FSCommandOpenMP): + """ + For complete details, see the `FS Documentation `_ + + Examples + ======== + + >>> from nipype.interfaces import freesurfer + >>> ca_label = freesurfer.CALabel() + >>> ca_label.inputs.in_file = "norm.mgz" + >>> ca_label.inputs.out_file = "out.mgz" + >>> ca_label.inputs.transform = "trans.mat" + >>> ca_label.inputs.template = "Template_6.nii" # in practice use .gcs extension + >>> ca_label.cmdline + 'mri_ca_label norm.mgz trans.mat Template_6.nii out.mgz' + """ + _cmd = "mri_ca_label" + input_spec = CALabelInputSpec + output_spec = CALabelOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class MRIsCALabelInputSpec(FSTraitedSpecOpenMP): + # required + subject_id = traits.String( + 'subject_id', + argstr="%s", + position=-5, + usedefault=True, + mandatory=True, + desc="Subject name or ID") + hemisphere = traits.Enum( + 'lh', + 'rh', + argstr="%s", + position=-4, + mandatory=True, + desc="Hemisphere ('lh' or 'rh')") + canonsurf = File( + argstr="%s", + position=-3, + mandatory=True, + exists=True, + desc="Input canonical surface file") + classifier = File( + argstr="%s", + position=-2, + mandatory=True, + exists=True, + desc="Classifier array input file") + smoothwm = File( + mandatory=True, + exists=True, + desc="implicit input {hemisphere}.smoothwm") + curv = File( + mandatory=True, exists=True, desc="implicit input {hemisphere}.curv") + sulc = File( + mandatory=True, exists=True, desc="implicit input {hemisphere}.sulc") + out_file = File( + argstr="%s", + position=-1, + exists=False, + name_source=['hemisphere'], + keep_extension=True, + hash_files=False, + name_template="%s.aparc.annot", + desc="Annotated surface output file") + # optional + label = traits.File( + argstr="-l %s", + exists=True, + desc= + "Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file" + ) + aseg = traits.File( + argstr="-aseg %s", + exists=True, + desc= + "Undocumented flag. Autorecon3 uses ../mri/aseg.presurf.mgz as input file" + ) + seed = traits.Int(argstr="-seed %d", desc="") + copy_inputs = traits.Bool(desc="Copies implicit inputs to node directory " + + "and creates a temp subjects_directory. " + + "Use this when running as a node") + + +class MRIsCALabelOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output volume from MRIsCALabel") + + +class MRIsCALabel(FSCommandOpenMP): + """ + For a single subject, produces an annotation file, in which each + cortical surface vertex is assigned a neuroanatomical label.This + automatic procedure employs data from a previously-prepared atlas + file. An atlas file is created from a training set, capturing region + data manually drawn by neuroanatomists combined with statistics on + variability correlated to geometric information derived from the + cortical model (sulcus and curvature). Besides the atlases provided + with FreeSurfer, new ones can be prepared using mris_ca_train). + + Examples + ======== + + >>> from nipype.interfaces import freesurfer + >>> ca_label = freesurfer.MRIsCALabel() + >>> ca_label.inputs.subject_id = "test" + >>> ca_label.inputs.hemisphere = "lh" + >>> ca_label.inputs.canonsurf = "lh.pial" + >>> ca_label.inputs.curv = "lh.pial" + >>> ca_label.inputs.sulc = "lh.pial" + >>> ca_label.inputs.classifier = "im1.nii" # in pracice, use .gcs extension + >>> ca_label.inputs.smoothwm = "lh.pial" + >>> ca_label.cmdline + 'mris_ca_label test lh lh.pial im1.nii lh.aparc.annot' + """ + _cmd = "mris_ca_label" + input_spec = MRIsCALabelInputSpec + output_spec = MRIsCALabelOutputSpec + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.canonsurf, folder='surf') + copy2subjdir( + self, + self.inputs.smoothwm, + folder='surf', + basename='{0}.smoothwm'.format(self.inputs.hemisphere)) + copy2subjdir( + self, + self.inputs.curv, + folder='surf', + basename='{0}.curv'.format(self.inputs.hemisphere)) + copy2subjdir( + self, + self.inputs.sulc, + folder='surf', + basename='{0}.sulc'.format(self.inputs.hemisphere)) + + # The label directory must exist in order for an output to be written + label_dir = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id, 'label') + if not os.path.isdir(label_dir): + os.makedirs(label_dir) + + return super(MRIsCALabel, self).run(**inputs) + + def _list_outputs(self): + outputs = self.output_spec().get() + out_basename = os.path.basename(self.inputs.out_file) + outputs['out_file'] = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id, 'label', + out_basename) + return outputs + + +class SegmentCCInputSpec(FSTraitedSpec): + in_file = File( + argstr="-aseg %s", + mandatory=True, + exists=True, + desc="Input aseg file to read from subjects directory") + in_norm = File( + mandatory=True, + exists=True, + desc="Required undocumented input {subject}/mri/norm.mgz") + out_file = File( + argstr="-o %s", + exists=False, + name_source=['in_file'], + name_template='%s.auto.mgz', + hash_files=False, + keep_extension=False, + desc="Filename to write aseg including CC") + out_rotation = File( + argstr="-lta %s", + mandatory=True, + exists=False, + desc="Global filepath for writing rotation lta") + subject_id = traits.String( + 'subject_id', + argstr="%s", + mandatory=True, + position=-1, + usedefault=True, + desc="Subject name") + copy_inputs = traits.Bool( + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + "directory.") + + +class SegmentCCOutputSpec(TraitedSpec): + out_file = File( + exists=False, desc="Output segmentation uncluding corpus collosum") + out_rotation = File(exists=False, desc="Output lta rotation file") + + +class SegmentCC(FSCommand): + """ + This program segments the corpus callosum into five separate labels in + the subcortical segmentation volume 'aseg.mgz'. The divisions of the + cc are equally spaced in terms of distance along the primary + eigendirection (pretty much the long axis) of the cc. The lateral + extent can be changed with the -T parameter, where + is the distance off the midline (so -T 1 would result in + the who CC being 3mm thick). The default is 2 so it's 5mm thick. The + aseg.stats values should be volume. + + Examples + ======== + >>> from nipype.interfaces import freesurfer + >>> SegmentCC_node = freesurfer.SegmentCC() + >>> SegmentCC_node.inputs.in_file = "aseg.mgz" + >>> SegmentCC_node.inputs.in_norm = "norm.mgz" + >>> SegmentCC_node.inputs.out_rotation = "cc.lta" + >>> SegmentCC_node.inputs.subject_id = "test" + >>> SegmentCC_node.cmdline + 'mri_cc -aseg aseg.mgz -o aseg.auto.mgz -lta cc.lta test' + """ + + _cmd = "mri_cc" + input_spec = SegmentCCInputSpec + output_spec = SegmentCCOutputSpec + + # mri_cc does not take absolute paths and will look for the + # input files in //mri/ + # So, if the files are not there, they will be copied to that + # location + def _format_arg(self, name, spec, value): + if name in ["in_file", "in_norm", "out_file"]: + # mri_cc can't use abspaths just the basename + basename = os.path.basename(value) + return spec.argstr % basename + return super(SegmentCC, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs['out_rotation'] = os.path.abspath(self.inputs.out_rotation) + return outputs + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + for originalfile in [self.inputs.in_file, self.inputs.in_norm]: + copy2subjdir(self, originalfile, folder='mri') + return super(SegmentCC, self).run(**inputs) + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + # it is necessary to find the output files and move + # them to the correct loacation + predicted_outputs = self._list_outputs() + for name in ['out_file', 'out_rotation']: + out_file = predicted_outputs[name] + if not os.path.isfile(out_file): + out_base = os.path.basename(out_file) + if isdefined(self.inputs.subjects_dir): + subj_dir = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id) + else: + subj_dir = os.path.join(os.getcwd(), + self.inputs.subject_id) + if name == 'out_file': + out_tmp = os.path.join(subj_dir, 'mri', out_base) + elif name == 'out_rotation': + out_tmp = os.path.join(subj_dir, 'mri', 'transforms', + out_base) + else: + out_tmp = None + # move the file to correct location + if out_tmp and os.path.isfile(out_tmp): + if not os.path.isdir(os.path.dirname(out_tmp)): + os.makedirs(os.path.dirname(out_tmp)) + shutil.move(out_tmp, out_file) + return super(SegmentCC, self).aggregate_outputs( + runtime, needed_outputs) + + +class SegmentWMInputSpec(FSTraitedSpec): + in_file = File( + argstr="%s", + exists=True, + mandatory=True, + position=-2, + desc="Input file for SegmentWM") + out_file = File( + argstr="%s", + exists=False, + mandatory=True, + position=-1, + desc="File to be written as output for SegmentWM") + + +class SegmentWMOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output white matter segmentation") + + +class SegmentWM(FSCommand): + """ + This program segments white matter from the input volume. The input + volume should be normalized such that white matter voxels are + ~110-valued, and the volume is conformed to 256^3. + + + Examples + ======== + >>> from nipype.interfaces import freesurfer + >>> SegmentWM_node = freesurfer.SegmentWM() + >>> SegmentWM_node.inputs.in_file = "norm.mgz" + >>> SegmentWM_node.inputs.out_file = "wm.seg.mgz" + >>> SegmentWM_node.cmdline + 'mri_segment norm.mgz wm.seg.mgz' + """ + + _cmd = "mri_segment" + input_spec = SegmentWMInputSpec + output_spec = SegmentWMOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class EditWMwithAsegInputSpec(FSTraitedSpec): + in_file = File( + argstr="%s", + position=-4, + mandatory=True, + exists=True, + desc="Input white matter segmentation file") + brain_file = File( + argstr="%s", + position=-3, + mandatory=True, + exists=True, + desc="Input brain/T1 file") + seg_file = File( + argstr="%s", + position=-2, + mandatory=True, + exists=True, + desc="Input presurf segmentation file") + out_file = File( + argstr="%s", + position=-1, + mandatory=True, + exists=False, + desc="File to be written as output") + # optional + keep_in = traits.Bool( + argstr="-keep-in", desc="Keep edits as found in input volume") + + +class EditWMwithAsegOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output edited WM file") + + +class EditWMwithAseg(FSCommand): + """ + Edits a wm file using a segmentation + + Examples + ======== + >>> from nipype.interfaces.freesurfer import EditWMwithAseg + >>> editwm = EditWMwithAseg() + >>> editwm.inputs.in_file = "T1.mgz" + >>> editwm.inputs.brain_file = "norm.mgz" + >>> editwm.inputs.seg_file = "aseg.mgz" + >>> editwm.inputs.out_file = "wm.asegedit.mgz" + >>> editwm.inputs.keep_in = True + >>> editwm.cmdline + 'mri_edit_wm_with_aseg -keep-in T1.mgz norm.mgz aseg.mgz wm.asegedit.mgz' + """ + _cmd = 'mri_edit_wm_with_aseg' + input_spec = EditWMwithAsegInputSpec + output_spec = EditWMwithAsegOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class ConcatenateLTAInputSpec(FSTraitedSpec): + # required + in_lta1 = File( + exists=True, + mandatory=True, + argstr='%s', + position=-3, + desc='maps some src1 to dst1') + in_lta2 = traits.Either( + File(exists=True), + 'identity.nofile', + argstr='%s', + position=-2, + mandatory=True, + desc='maps dst1(src2) to dst2') + out_file = File( + position=-1, + argstr='%s', + hash_files=False, + name_source=['in_lta1'], + name_template='%s_concat', + keep_extension=True, + desc='the combined LTA maps: src1 to dst2 = LTA2*LTA1') + + # Inversion and transform type + invert_1 = traits.Bool( + argstr='-invert1', desc='invert in_lta1 before applying it') + invert_2 = traits.Bool( + argstr='-invert2', desc='invert in_lta2 before applying it') + invert_out = traits.Bool(argstr='-invertout', desc='invert output LTA') + out_type = traits.Enum( + 'VOX2VOX', 'RAS2RAS', argstr='-out_type %d', desc='set final LTA type') + + # Talairach options + tal_source_file = traits.File( + exists=True, + argstr='-tal %s', + position=-5, + requires=['tal_template_file'], + desc='if in_lta2 is talairach.xfm, specify source for talairach') + tal_template_file = traits.File( + exists=True, + argstr='%s', + position=-4, + requires=['tal_source_file'], + desc='if in_lta2 is talairach.xfm, specify template for talairach') + + subject = traits.Str( + argstr='-subject %s', desc='set subject in output LTA') + # Note rmsdiff would be xor out_file, and would be most easily dealt with + # in a new interface. -CJM 2017.10.05 + + +class ConcatenateLTAOutputSpec(TraitedSpec): + out_file = File( + exists=False, desc='the combined LTA maps: src1 to dst2 = LTA2*LTA1') + + +class ConcatenateLTA(FSCommand): + """ Concatenates two consecutive LTA transformations into one overall + transformation + + Out = LTA2*LTA1 + + Examples + -------- + >>> from nipype.interfaces.freesurfer import ConcatenateLTA + >>> conc_lta = ConcatenateLTA() + >>> conc_lta.inputs.in_lta1 = 'lta1.lta' + >>> conc_lta.inputs.in_lta2 = 'lta2.lta' + >>> conc_lta.cmdline + 'mri_concatenate_lta lta1.lta lta2.lta lta1_concat.lta' + + You can use 'identity.nofile' as the filename for in_lta2, e.g.: + + >>> conc_lta.inputs.in_lta2 = 'identity.nofile' + >>> conc_lta.inputs.invert_1 = True + >>> conc_lta.inputs.out_file = 'inv1.lta' + >>> conc_lta.cmdline + 'mri_concatenate_lta -invert1 lta1.lta identity.nofile inv1.lta' + + To create a RAS2RAS transform: + + >>> conc_lta.inputs.out_type = 'RAS2RAS' + >>> conc_lta.cmdline + 'mri_concatenate_lta -invert1 -out_type 1 lta1.lta identity.nofile inv1.lta' + """ + + _cmd = 'mri_concatenate_lta' + input_spec = ConcatenateLTAInputSpec + output_spec = ConcatenateLTAOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'out_type': + value = {'VOX2VOX': 0, 'RAS2RAS': 1}[value] + return super(ConcatenateLTA, self)._format_arg(name, spec, value) diff --git a/nipype/interfaces/freesurfer/registration.py b/nipype/interfaces/freesurfer/registration.py new file mode 100644 index 0000000000..99ee7d0179 --- /dev/null +++ b/nipype/interfaces/freesurfer/registration.py @@ -0,0 +1,607 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Provides interfaces to various longitudinal commands provided by freesurfer +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +import os.path + +from ... import logging +from ...utils.filemanip import split_filename, copyfile + +from .base import (FSCommand, FSTraitedSpec, FSScriptCommand, + FSScriptOutputSpec, FSCommandOpenMP, FSTraitedSpecOpenMP) +from ..base import (isdefined, TraitedSpec, File, traits, Directory) + +__docformat__ = 'restructuredtext' +iflogger = logging.getLogger('nipype.interface') + + +class MPRtoMNI305InputSpec(FSTraitedSpec): + # environment variables, required + # usedefault=True is hack for on_trait_change in __init__ + reference_dir = Directory( + "", exists=True, mandatory=True, usedefault=True, desc="TODO") + target = traits.String( + "", mandatory=True, usedefault=True, desc="input atlas file") + # required + in_file = File( + argstr='%s', + usedefault=True, + desc="the input file prefix for MPRtoMNI305") + + +class MPRtoMNI305OutputSpec(FSScriptOutputSpec): + out_file = File( + exists=False, + desc="The output file '_to__t4_vox2vox.txt'") + + +class MPRtoMNI305(FSScriptCommand): + """ + For complete details, see FreeSurfer documentation + + Examples + ======== + + >>> from nipype.interfaces.freesurfer import MPRtoMNI305, Info + >>> mprtomni305 = MPRtoMNI305() + >>> mprtomni305.inputs.target = 'structural.nii' + >>> mprtomni305.inputs.reference_dir = '.' # doctest: +SKIP + >>> mprtomni305.cmdline # doctest: +SKIP + 'mpr2mni305 output' + >>> mprtomni305.inputs.out_file = 'struct_out' # doctest: +SKIP + >>> mprtomni305.cmdline # doctest: +SKIP + 'mpr2mni305 struct_out' # doctest: +SKIP + >>> mprtomni305.inputs.environ['REFDIR'] == os.path.join(Info.home(), 'average') # doctest: +SKIP + True + >>> mprtomni305.inputs.environ['MPR2MNI305_TARGET'] # doctest: +SKIP + 'structural' + >>> mprtomni305.run() # doctest: +SKIP + + """ + _cmd = "mpr2mni305" + input_spec = MPRtoMNI305InputSpec + output_spec = MPRtoMNI305OutputSpec + + def __init__(self, **inputs): + super(MPRtoMNI305, self).__init__(**inputs) + self.inputs.on_trait_change(self._environ_update, 'target') + self.inputs.on_trait_change(self._environ_update, 'reference_dir') + + def _format_arg(self, opt, spec, val): + if opt in ['target', 'reference_dir']: + return "" + elif opt == 'in_file': + _, retval, ext = split_filename(val) + # Need to copy file to working cache directory! + copyfile( + val, + os.path.abspath(retval + ext), + copy=True, + hashmethod='content') + return retval + return super(MPRtoMNI305, self)._format_arg(opt, spec, val) + + def _environ_update(self): + # refdir = os.path.join(Info.home(), val) + refdir = self.inputs.reference_dir + target = self.inputs.target + self.inputs.environ['MPR2MNI305_TARGET'] = target + self.inputs.environ["REFDIR"] = refdir + + def _get_fname(self, fname): + return split_filename(fname)[1] + + def _list_outputs(self): + outputs = super(MPRtoMNI305, self)._list_outputs() + fullname = "_".join([ + self._get_fname(self.inputs.in_file), "to", self.inputs.target, + "t4", "vox2vox.txt" + ]) + outputs['out_file'] = os.path.abspath(fullname) + return outputs + + +class RegisterAVItoTalairachInputSpec(FSTraitedSpec): + in_file = File( + argstr='%s', + exists=True, + mandatory=True, + position=0, + desc="The input file") + target = File( + argstr='%s', + exists=True, + mandatory=True, + position=1, + desc="The target file") + vox2vox = File( + argstr='%s', + exists=True, + mandatory=True, + position=2, + desc="The vox2vox file") + out_file = File( + 'talairach.auto.xfm', + usedefault=True, + argstr='%s', + position=3, + desc="The transform output") + + +class RegisterAVItoTalairachOutputSpec(FSScriptOutputSpec): + out_file = traits.File( + exists=False, desc="The output file for RegisterAVItoTalairach") + + +class RegisterAVItoTalairach(FSScriptCommand): + """ + converts the vox2vox from talairach_avi to a talairach.xfm file + + This is a script that converts the vox2vox from talairach_avi to a + talairach.xfm file. It is meant to replace the following cmd line: + + tkregister2_cmdl \ + --mov $InVol \ + --targ $FREESURFER_HOME/average/mni305.cor.mgz \ + --xfmout ${XFM} \ + --vox2vox talsrcimg_to_${target}_t4_vox2vox.txt \ + --noedit \ + --reg talsrcimg.reg.tmp.dat + set targ = $FREESURFER_HOME/average/mni305.cor.mgz + set subject = mgh-02407836-v2 + set InVol = $SUBJECTS_DIR/$subject/mri/orig.mgz + set vox2vox = $SUBJECTS_DIR/$subject/mri/transforms/talsrcimg_to_711-2C_as_mni_average_305_t4_vox2vox.txt + + Examples + ======== + + >>> from nipype.interfaces.freesurfer import RegisterAVItoTalairach + >>> register = RegisterAVItoTalairach() + >>> register.inputs.in_file = 'structural.mgz' # doctest: +SKIP + >>> register.inputs.target = 'mni305.cor.mgz' # doctest: +SKIP + >>> register.inputs.vox2vox = 'talsrcimg_to_structural_t4_vox2vox.txt' # doctest: +SKIP + >>> register.cmdline # doctest: +SKIP + 'avi2talxfm structural.mgz mni305.cor.mgz talsrcimg_to_structural_t4_vox2vox.txt talairach.auto.xfm' + + >>> register.run() # doctest: +SKIP + """ + _cmd = "avi2talxfm" + input_spec = RegisterAVItoTalairachInputSpec + output_spec = RegisterAVItoTalairachOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class EMRegisterInputSpec(FSTraitedSpecOpenMP): + # required + in_file = File( + argstr="%s", + exists=True, + mandatory=True, + position=-3, + desc="in brain volume") + template = File( + argstr="%s", + exists=True, + mandatory=True, + position=-2, + desc="template gca") + out_file = File( + argstr="%s", + exists=False, + name_source=['in_file'], + name_template="%s_transform.lta", + hash_files=False, + keep_extension=False, + position=-1, + desc="output transform") + # optional + skull = traits.Bool( + argstr="-skull", desc="align to atlas containing skull (uns=5)") + mask = File(argstr="-mask %s", exists=True, desc="use volume as a mask") + nbrspacing = traits.Int( + argstr="-uns %d", + desc= + "align to atlas containing skull setting unknown_nbr_spacing = nbrspacing" + ) + transform = File( + argstr="-t %s", exists=True, desc="Previously computed transform") + + +class EMRegisterOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="output transform") + + +class EMRegister(FSCommandOpenMP): + """ This program creates a tranform in lta format + + Examples + ======== + >>> from nipype.interfaces.freesurfer import EMRegister + >>> register = EMRegister() + >>> register.inputs.in_file = 'norm.mgz' + >>> register.inputs.template = 'aseg.mgz' + >>> register.inputs.out_file = 'norm_transform.lta' + >>> register.inputs.skull = True + >>> register.inputs.nbrspacing = 9 + >>> register.cmdline + 'mri_em_register -uns 9 -skull norm.mgz aseg.mgz norm_transform.lta' + """ + _cmd = 'mri_em_register' + input_spec = EMRegisterInputSpec + output_spec = EMRegisterOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class RegisterInputSpec(FSTraitedSpec): + # required + in_surf = File( + argstr="%s", + exists=True, + mandatory=True, + position=-3, + copyfile=True, + desc="Surface to register, often {hemi}.sphere") + target = File( + argstr="%s", + exists=True, + mandatory=True, + position=-2, + desc="The data to register to. In normal recon-all usage, " + + "this is a template file for average surface.") + in_sulc = File( + exists=True, + mandatory=True, + copyfile=True, + desc= + "Undocumented mandatory input file ${SUBJECTS_DIR}/surf/{hemisphere}.sulc " + ) + out_file = File( + argstr="%s", + exists=False, + position=-1, + genfile=True, + desc="Output surface file to capture registration") + # optional + curv = traits.Bool( + argstr="-curv", + requires=['in_smoothwm'], + desc="Use smoothwm curvature for final alignment") + in_smoothwm = File( + exists=True, + copyfile=True, + desc= + "Undocumented input file ${SUBJECTS_DIR}/surf/{hemisphere}.smoothwm ") + + +class RegisterOutputSpec(TraitedSpec): + out_file = File( + exists=False, desc="Output surface file to capture registration") + + +class Register(FSCommand): + """ This program registers a surface to an average surface template. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import Register + >>> register = Register() + >>> register.inputs.in_surf = 'lh.pial' + >>> register.inputs.in_smoothwm = 'lh.pial' + >>> register.inputs.in_sulc = 'lh.pial' + >>> register.inputs.target = 'aseg.mgz' + >>> register.inputs.out_file = 'lh.pial.reg' + >>> register.inputs.curv = True + >>> register.cmdline + 'mris_register -curv lh.pial aseg.mgz lh.pial.reg' + """ + + _cmd = 'mris_register' + input_spec = RegisterInputSpec + output_spec = RegisterOutputSpec + + def _format_arg(self, opt, spec, val): + if opt == 'curv': + return spec.argstr + return super(Register, self)._format_arg(opt, spec, val) + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()[name] + return None + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.out_file): + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + else: + outputs['out_file'] = os.path.abspath(self.inputs.in_surf) + '.reg' + return outputs + + +class PaintInputSpec(FSTraitedSpec): + # required + in_surf = File( + argstr="%s", + exists=True, + mandatory=True, + position=-2, + desc="Surface file with grid (vertices) onto which the " + + "template data is to be sampled or 'painted'") + template = File( + argstr="%s", + exists=True, + mandatory=True, + position=-3, + desc="Template file") + # optional + template_param = traits.Int(desc="Frame number of the input template") + averages = traits.Int(argstr="-a %d", desc="Average curvature patterns") + out_file = File( + argstr="%s", + exists=False, + position=-1, + name_template="%s.avg_curv", + hash_files=False, + name_source=['in_surf'], + keep_extension=False, + desc="File containing a surface-worth of per-vertex values, " + + "saved in 'curvature' format.") + + +class PaintOutputSpec(TraitedSpec): + out_file = File( + exists=False, + desc= + "File containing a surface-worth of per-vertex values, saved in 'curvature' format." + ) + + +class Paint(FSCommand): + """ + This program is useful for extracting one of the arrays ("a variable") + from a surface-registration template file. The output is a file + containing a surface-worth of per-vertex values, saved in "curvature" + format. Because the template data is sampled to a particular surface + mesh, this conjures the idea of "painting to a surface". + + Examples + ======== + >>> from nipype.interfaces.freesurfer import Paint + >>> paint = Paint() + >>> paint.inputs.in_surf = 'lh.pial' + >>> paint.inputs.template = 'aseg.mgz' + >>> paint.inputs.averages = 5 + >>> paint.inputs.out_file = 'lh.avg_curv' + >>> paint.cmdline + 'mrisp_paint -a 5 aseg.mgz lh.pial lh.avg_curv' + """ + + _cmd = 'mrisp_paint' + input_spec = PaintInputSpec + output_spec = PaintOutputSpec + + def _format_arg(self, opt, spec, val): + if opt == 'template': + if isdefined(self.inputs.template_param): + return spec.argstr % ( + val + '#' + str(self.inputs.template_param)) + return super(Paint, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class MRICoregInputSpec(FSTraitedSpec): + source_file = File( + argstr='--mov %s', + desc='source file to be registered', + mandatory=True, + copyfile=False) + reference_file = File( + argstr='--ref %s', + desc='reference (target) file', + mandatory=True, + copyfile=False, + xor=['subject_id']) + out_lta_file = traits.Either( + True, + File, + argstr='--lta %s', + default=True, + usedefault=True, + desc='output registration file (LTA format)') + out_reg_file = traits.Either( + True, + File, + argstr='--regdat %s', + desc='output registration file (REG format)') + out_params_file = traits.Either( + True, File, argstr='--params %s', desc='output parameters file') + + subjects_dir = Directory( + exists=True, argstr='--sd %s', desc='FreeSurfer SUBJECTS_DIR') + subject_id = traits.Str( + argstr='--s %s', + position=1, + mandatory=True, + xor=['reference_file'], + requires=['subjects_dir'], + desc='freesurfer subject ID (implies ``reference_mask == ' + 'aparc+aseg.mgz`` unless otherwise specified)') + dof = traits.Enum( + 6, + 9, + 12, + argstr='--dof %d', + desc='number of transform degrees of freedom') + reference_mask = traits.Either( + False, + traits.Str, + argstr='--ref-mask %s', + position=2, + desc='mask reference volume with given mask, or None if ``False``') + source_mask = traits.Str( + argstr='--mov-mask', desc='mask source file with given mask') + num_threads = traits.Int( + argstr='--threads %d', desc='number of OpenMP threads') + no_coord_dithering = traits.Bool( + argstr='--no-coord-dither', desc='turn off coordinate dithering') + no_intensity_dithering = traits.Bool( + argstr='--no-intensity-dither', desc='turn off intensity dithering') + sep = traits.List( + argstr='--sep %s...', + minlen=1, + maxlen=2, + desc='set spatial scales, in voxels (default [2, 4])') + initial_translation = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--trans %g %g %g', + desc='initial translation in mm (implies no_cras0)') + initial_rotation = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--rot %g %g %g', + desc='initial rotation in degrees') + initial_scale = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--scale %g %g %g', + desc='initial scale') + initial_shear = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--shear %g %g %g', + desc='initial shear (Hxy, Hxz, Hyz)') + no_cras0 = traits.Bool( + argstr='--no-cras0', + desc='do not set translation parameters to align ' + 'centers of source and reference files') + max_iters = traits.Range( + low=1, argstr='--nitersmax %d', desc='maximum iterations (default: 4)') + ftol = traits.Float( + argstr='--ftol %e', desc='floating-point tolerance (default=1e-7)') + linmintol = traits.Float(argstr='--linmintol %e') + saturation_threshold = traits.Range( + low=0.0, + high=100.0, + argstr='--sat %g', + desc='saturation threshold (default=9.999)') + conform_reference = traits.Bool( + argstr='--conf-ref', desc='conform reference without rescaling') + no_brute_force = traits.Bool( + argstr='--no-bf', desc='do not brute force search') + brute_force_limit = traits.Float( + argstr='--bf-lim %g', + xor=['no_brute_force'], + desc='constrain brute force search to +/- lim') + brute_force_samples = traits.Int( + argstr='--bf-nsamp %d', + xor=['no_brute_force'], + desc='number of samples in brute force search') + no_smooth = traits.Bool( + argstr='--no-smooth', + desc='do not apply smoothing to either reference or source file') + ref_fwhm = traits.Float( + argstr='--ref-fwhm', desc='apply smoothing to reference file') + source_oob = traits.Bool( + argstr='--mov-oob', + desc='count source voxels that are out-of-bounds as 0') + # Skipping mat2par + + +class MRICoregOutputSpec(TraitedSpec): + out_reg_file = File(exists=True, desc='output registration file') + out_lta_file = File(exists=True, desc='output LTA-style registration file') + out_params_file = File(exists=True, desc='output parameters file') + + +class MRICoreg(FSCommand): + """ This program registers one volume to another + + mri_coreg is a C reimplementation of spm_coreg in FreeSurfer + + Examples + ======== + >>> from nipype.interfaces.freesurfer import MRICoreg + >>> coreg = MRICoreg() + >>> coreg.inputs.source_file = 'moving1.nii' + >>> coreg.inputs.reference_file = 'fixed1.nii' + >>> coreg.inputs.subjects_dir = '.' + >>> coreg.cmdline # doctest: +ELLIPSIS + 'mri_coreg --lta .../registration.lta --ref fixed1.nii --mov moving1.nii --sd .' + + If passing a subject ID, the reference mask may be disabled: + + >>> coreg = MRICoreg() + >>> coreg.inputs.source_file = 'moving1.nii' + >>> coreg.inputs.subjects_dir = '.' + >>> coreg.inputs.subject_id = 'fsaverage' + >>> coreg.inputs.reference_mask = False + >>> coreg.cmdline # doctest: +ELLIPSIS + 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --mov moving1.nii --sd .' + + Spatial scales may be specified as a list of one or two separations: + + >>> coreg.inputs.sep = [4] + >>> coreg.cmdline # doctest: +ELLIPSIS + 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --sep 4 --mov moving1.nii --sd .' + + >>> coreg.inputs.sep = [4, 5] + >>> coreg.cmdline # doctest: +ELLIPSIS + 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --sep 4 --sep 5 --mov moving1.nii --sd .' + """ + + _cmd = 'mri_coreg' + input_spec = MRICoregInputSpec + output_spec = MRICoregOutputSpec + + def _format_arg(self, opt, spec, val): + if opt in ('out_reg_file', 'out_lta_file', + 'out_params_file') and val is True: + val = self._list_outputs()[opt] + elif opt == 'reference_mask' and val is False: + return '--no-ref-mask' + return super(MRICoreg, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self.output_spec().get() + + out_lta_file = self.inputs.out_lta_file + if isdefined(out_lta_file): + if out_lta_file is True: + out_lta_file = 'registration.lta' + outputs['out_lta_file'] = os.path.abspath(out_lta_file) + + out_reg_file = self.inputs.out_reg_file + if isdefined(out_reg_file): + if out_reg_file is True: + out_reg_file = 'registration.dat' + outputs['out_reg_file'] = os.path.abspath(out_reg_file) + + out_params_file = self.inputs.out_params_file + if isdefined(out_params_file): + if out_params_file is True: + out_params_file = 'registration.par' + outputs['out_params_file'] = os.path.abspath(out_params_file) + + return outputs diff --git a/nipype/interfaces/freesurfer/tests/__init__.py b/nipype/interfaces/freesurfer/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/freesurfer/tests/test_BBRegister.py b/nipype/interfaces/freesurfer/tests/test_BBRegister.py new file mode 100644 index 0000000000..3e80ad4e9c --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_BBRegister.py @@ -0,0 +1,139 @@ +from __future__ import unicode_literals +from ..preprocess import BBRegister, BBRegisterInputSpec6 + + +def test_BBRegister_inputs(): + input_map_5_3 = dict( + args=dict(argstr='%s', ), + contrast_type=dict( + argstr='--%s', + mandatory=True, + ), + dof=dict(argstr='--%d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi_mask=dict(argstr='--epi-mask', ), + fsldof=dict(argstr='--fsl-dof %d', ), + init=dict( + argstr='--init-%s', + mandatory=True, + xor=['init_reg_file'], + ), + init_cost_file=dict(argstr='--initcost %s', ), + init_reg_file=dict( + argstr='--init-reg %s', + mandatory=True, + xor=['init'], + ), + intermediate_file=dict(argstr='--int %s', ), + out_fsl_file=dict(argstr='--fslmat %s', ), + out_lta_file=dict( + argstr='--lta %s', + min_ver='5.2.0', + ), + out_reg_file=dict( + argstr='--reg %s', + genfile=True, + ), + reg_frame=dict( + argstr='--frame %d', + xor=['reg_middle_frame'], + ), + reg_middle_frame=dict( + argstr='--mid-frame', + xor=['reg_frame'], + ), + registered_file=dict(argstr='--o %s', ), + source_file=dict( + argstr='--mov %s', + copyfile=False, + mandatory=True, + ), + spm_nifti=dict(argstr='--spm-nii', ), + subject_id=dict( + argstr='--s %s', + mandatory=True, + ), + subjects_dir=dict(), + ) + input_map_6_0 = dict( + args=dict(argstr='%s', ), + contrast_type=dict( + argstr='--%s', + mandatory=True, + ), + dof=dict(argstr='--%d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi_mask=dict(argstr='--epi-mask', ), + fsldof=dict(argstr='--fsl-dof %d', ), + init=dict( + argstr='--init-%s', + xor=['init_reg_file'], + ), + init_reg_file=dict( + argstr='--init-reg %s', + xor=['init'], + ), + init_cost_file=dict(argstr='--initcost %s', ), + intermediate_file=dict(argstr='--int %s', ), + out_fsl_file=dict(argstr='--fslmat %s', ), + out_lta_file=dict( + argstr='--lta %s', + min_ver='5.2.0', + ), + out_reg_file=dict( + argstr='--reg %s', + genfile=True, + ), + reg_frame=dict( + argstr='--frame %d', + xor=['reg_middle_frame'], + ), + reg_middle_frame=dict( + argstr='--mid-frame', + xor=['reg_frame'], + ), + registered_file=dict(argstr='--o %s', ), + source_file=dict( + argstr='--mov %s', + copyfile=False, + mandatory=True, + ), + spm_nifti=dict(argstr='--spm-nii', ), + subject_id=dict( + argstr='--s %s', + mandatory=True, + ), + subjects_dir=dict(), + ) + + instance = BBRegister() + if isinstance(instance.inputs, BBRegisterInputSpec6): + input_map = input_map_6_0 + else: + input_map = input_map_5_3 + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(instance.inputs.traits()[key], metakey) == value + + +def test_BBRegister_outputs(): + output_map = dict( + init_cost_file=dict(), + min_cost_file=dict(), + out_fsl_file=dict(), + out_lta_file=dict(), + out_reg_file=dict(), + registered_file=dict(), + ) + outputs = BBRegister.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py b/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py new file mode 100644 index 0000000000..704dfeaaf3 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_FSSurfaceCommand.py @@ -0,0 +1,43 @@ +# Modified 2017.04.21 by Chris Markiewicz +from __future__ import unicode_literals +import pytest + +from ..base import FSSurfaceCommand +from ... import freesurfer as fs +from ...io import FreeSurferSource + + +def test_FSSurfaceCommand_inputs(): + input_map = dict( + args=dict(argstr='%s'), + environ=dict(nohash=True, usedefault=True), + subjects_dir=dict(), + ) + inputs = FSSurfaceCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + + +@pytest.mark.skipif(fs.no_freesurfer(), reason="freesurfer is not installed") +def test_associated_file(tmpdir): + fssrc = FreeSurferSource( + subjects_dir=fs.Info.subjectsdir(), subject_id='fsaverage', hemi='lh') + fssrc.base_dir = tmpdir.strpath + fssrc.resource_monitor = False + + fsavginfo = fssrc.run().outputs.get() + + # Pairs of white/pial files in the same directories + for white, pial in [('lh.white', 'lh.pial'), ('./lh.white', './lh.pial'), + (fsavginfo['white'], fsavginfo['pial'])]: + + # Unspecified paths, possibly with missing hemisphere information, + # are equivalent to using the same directory and hemisphere + for name in ('pial', 'lh.pial', pial): + assert FSSurfaceCommand._associated_file(white, name) == pial + + # With path information, no changes are made + for name in ('./pial', './lh.pial', fsavginfo['pial']): + assert FSSurfaceCommand._associated_file(white, name) == name diff --git a/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py new file mode 100644 index 0000000000..ed32693df5 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_AddXFormToHeader.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import AddXFormToHeader + + +def test_AddXFormToHeader_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + copy_name=dict(argstr='-c', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_file=dict( + argstr='%s', + position=-1, + usedefault=True, + ), + subjects_dir=dict(), + transform=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + verbose=dict(argstr='-v', ), + ) + inputs = AddXFormToHeader.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AddXFormToHeader_outputs(): + output_map = dict(out_file=dict(), ) + outputs = AddXFormToHeader.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py new file mode 100644 index 0000000000..fc5bc39b6f --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Aparc2Aseg.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Aparc2Aseg + + +def test_Aparc2Aseg_inputs(): + input_map = dict( + a2009s=dict(argstr='--a2009s', ), + args=dict(argstr='%s', ), + aseg=dict(argstr='--aseg %s', ), + copy_inputs=dict(), + ctxseg=dict(argstr='--ctxseg %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + filled=dict(), + hypo_wm=dict(argstr='--hypo-as-wm', ), + label_wm=dict(argstr='--labelwm', ), + lh_annotation=dict(mandatory=True, ), + lh_pial=dict(mandatory=True, ), + lh_ribbon=dict(mandatory=True, ), + lh_white=dict(mandatory=True, ), + out_file=dict( + argstr='--o %s', + mandatory=True, + ), + rh_annotation=dict(mandatory=True, ), + rh_pial=dict(mandatory=True, ), + rh_ribbon=dict(mandatory=True, ), + rh_white=dict(mandatory=True, ), + ribbon=dict(mandatory=True, ), + rip_unknown=dict(argstr='--rip-unknown', ), + subject_id=dict( + argstr='--s %s', + mandatory=True, + usedefault=True, + ), + subjects_dir=dict(), + volmask=dict(argstr='--volmask', ), + ) + inputs = Aparc2Aseg.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Aparc2Aseg_outputs(): + output_map = dict(out_file=dict(argstr='%s', ), ) + outputs = Aparc2Aseg.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py new file mode 100644 index 0000000000..7044fdde2f --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Apas2Aseg.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Apas2Aseg + + +def test_Apas2Aseg_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='--i %s', + mandatory=True, + ), + out_file=dict( + argstr='--o %s', + mandatory=True, + ), + subjects_dir=dict(), + ) + inputs = Apas2Aseg.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Apas2Aseg_outputs(): + output_map = dict(out_file=dict(argstr='%s', ), ) + outputs = Apas2Aseg.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py new file mode 100644 index 0000000000..69f56d7dde --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ApplyMask + + +def test_ApplyMask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + invert_xfm=dict(argstr='-invert', ), + keep_mask_deletion_edits=dict(argstr='-keep_mask_deletion_edits', ), + mask_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + mask_thresh=dict(argstr='-T %.4f', ), + out_file=dict( + argstr='%s', + hash_files=True, + keep_extension=True, + name_source=['in_file'], + name_template='%s_masked', + position=-1, + ), + subjects_dir=dict(), + transfer=dict(argstr='-transfer %d', ), + use_abs=dict(argstr='-abs', ), + xfm_file=dict(argstr='-xform %s', ), + xfm_source=dict(argstr='-lta_src %s', ), + xfm_target=dict(argstr='-lta_dst %s', ), + ) + inputs = ApplyMask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ApplyMask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ApplyMask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py new file mode 100644 index 0000000000..c9e8f85904 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py @@ -0,0 +1,112 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ApplyVolTransform + + +def test_ApplyVolTransform_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fs_target=dict( + argstr='--fstarg', + mandatory=True, + requires=['reg_file'], + xor=('target_file', 'tal', 'fs_target'), + ), + fsl_reg_file=dict( + argstr='--fsl %s', + mandatory=True, + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', + 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + ), + interp=dict(argstr='--interp %s', ), + inverse=dict(argstr='--inv', ), + invert_morph=dict( + argstr='--inv-morph', + requires=['m3z_file'], + ), + lta_file=dict( + argstr='--lta %s', + mandatory=True, + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', + 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + ), + lta_inv_file=dict( + argstr='--lta-inv %s', + mandatory=True, + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', + 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + ), + m3z_file=dict(argstr='--m3z %s', ), + mni_152_reg=dict( + argstr='--regheader', + mandatory=True, + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', + 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + ), + no_ded_m3z_path=dict( + argstr='--noDefM3zPath', + requires=['m3z_file'], + ), + no_resample=dict(argstr='--no-resample', ), + reg_file=dict( + argstr='--reg %s', + mandatory=True, + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', + 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + ), + reg_header=dict( + argstr='--regheader', + mandatory=True, + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', + 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + ), + source_file=dict( + argstr='--mov %s', + copyfile=False, + mandatory=True, + ), + subject=dict( + argstr='--s %s', + mandatory=True, + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', + 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + ), + subjects_dir=dict(), + tal=dict( + argstr='--tal', + mandatory=True, + xor=('target_file', 'tal', 'fs_target'), + ), + tal_resolution=dict(argstr='--talres %.10f', ), + target_file=dict( + argstr='--targ %s', + mandatory=True, + xor=('target_file', 'tal', 'fs_target'), + ), + transformed_file=dict( + argstr='--o %s', + genfile=True, + ), + xfm_reg_file=dict( + argstr='--xfm %s', + mandatory=True, + xor=('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', + 'xfm_reg_file', 'reg_header', 'mni_152_reg', 'subject'), + ), + ) + inputs = ApplyVolTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ApplyVolTransform_outputs(): + output_map = dict(transformed_file=dict(), ) + outputs = ApplyVolTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py new file mode 100644 index 0000000000..93db55cd30 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Binarize.py @@ -0,0 +1,71 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Binarize + + +def test_Binarize_inputs(): + input_map = dict( + abs=dict(argstr='--abs', ), + args=dict(argstr='%s', ), + bin_col_num=dict(argstr='--bincol', ), + bin_val=dict(argstr='--binval %d', ), + bin_val_not=dict(argstr='--binvalnot %d', ), + binary_file=dict( + argstr='--o %s', + genfile=True, + ), + count_file=dict(argstr='--count %s', ), + dilate=dict(argstr='--dilate %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + erode=dict(argstr='--erode %d', ), + erode2d=dict(argstr='--erode2d %d', ), + frame_no=dict(argstr='--frame %s', ), + in_file=dict( + argstr='--i %s', + copyfile=False, + mandatory=True, + ), + invert=dict(argstr='--inv', ), + mask_file=dict(argstr='--mask maskvol', ), + mask_thresh=dict(argstr='--mask-thresh %f', ), + match=dict(argstr='--match %d...', ), + max=dict( + argstr='--max %f', + xor=['wm_ven_csf'], + ), + merge_file=dict(argstr='--merge %s', ), + min=dict( + argstr='--min %f', + xor=['wm_ven_csf'], + ), + out_type=dict(argstr='', ), + rmax=dict(argstr='--rmax %f', ), + rmin=dict(argstr='--rmin %f', ), + subjects_dir=dict(), + ventricles=dict(argstr='--ventricles', ), + wm=dict(argstr='--wm', ), + wm_ven_csf=dict( + argstr='--wm+vcsf', + xor=['min', 'max'], + ), + zero_edges=dict(argstr='--zero-edges', ), + zero_slice_edge=dict(argstr='--zero-slice-edges', ), + ) + inputs = Binarize.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Binarize_outputs(): + output_map = dict( + binary_file=dict(), + count_file=dict(), + ) + outputs = Binarize.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py new file mode 100644 index 0000000000..1cf35fcedb --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_CALabel.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import CALabel + + +def test_CALabel_inputs(): + input_map = dict( + align=dict(argstr='-align', ), + args=dict(argstr='%s', ), + aseg=dict(argstr='-aseg %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-4, + ), + in_vol=dict(argstr='-r %s', ), + intensities=dict(argstr='-r %s', ), + label=dict(argstr='-l %s', ), + no_big_ventricles=dict(argstr='-nobigventricles', ), + num_threads=dict(), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + prior=dict(argstr='-prior %.1f', ), + relabel_unlikely=dict(argstr='-relabel_unlikely %d %.1f', ), + subjects_dir=dict(), + template=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + transform=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + ) + inputs = CALabel.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CALabel_outputs(): + output_map = dict(out_file=dict(), ) + outputs = CALabel.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py new file mode 100644 index 0000000000..0e8ec025ec --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_CANormalize.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import CANormalize + + +def test_CANormalize_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + atlas=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + control_points=dict(argstr='-c %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-4, + ), + long_file=dict(argstr='-long %s', ), + mask=dict(argstr='-mask %s', ), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=True, + name_source=['in_file'], + name_template='%s_norm', + position=-1, + ), + subjects_dir=dict(), + transform=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + ) + inputs = CANormalize.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CANormalize_outputs(): + output_map = dict( + control_points=dict(), + out_file=dict(), + ) + outputs = CANormalize.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py new file mode 100644 index 0000000000..e3cbf52f10 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_CARegister.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import CARegister + + +def test_CARegister_inputs(): + input_map = dict( + A=dict(argstr='-A %d', ), + align=dict(argstr='-align-%s', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + invert_and_save=dict( + argstr='-invert-and-save', + position=-4, + ), + l_files=dict(argstr='-l %s', ), + levels=dict(argstr='-levels %d', ), + mask=dict(argstr='-mask %s', ), + no_big_ventricles=dict(argstr='-nobigventricles', ), + num_threads=dict(), + out_file=dict( + argstr='%s', + genfile=True, + position=-1, + ), + subjects_dir=dict(), + template=dict( + argstr='%s', + position=-2, + ), + transform=dict(argstr='-T %s', ), + ) + inputs = CARegister.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CARegister_outputs(): + output_map = dict(out_file=dict(), ) + outputs = CARegister.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py new file mode 100644 index 0000000000..a7d3ad3bb0 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_CheckTalairachAlignment.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import CheckTalairachAlignment + + +def test_CheckTalairachAlignment_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-xfm %s', + mandatory=True, + position=-1, + xor=['subject'], + ), + subject=dict( + argstr='-subj %s', + mandatory=True, + position=-1, + xor=['in_file'], + ), + subjects_dir=dict(), + threshold=dict( + argstr='-T %.3f', + usedefault=True, + ), + ) + inputs = CheckTalairachAlignment.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CheckTalairachAlignment_outputs(): + output_map = dict(out_file=dict(), ) + outputs = CheckTalairachAlignment.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py new file mode 100644 index 0000000000..e4e8efb718 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Concatenate.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Concatenate + + +def test_Concatenate_inputs(): + input_map = dict( + add_val=dict(argstr='--add %f', ), + args=dict(argstr='%s', ), + combine=dict(argstr='--combine', ), + concatenated_file=dict( + argstr='--o %s', + genfile=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gmean=dict(argstr='--gmean %d', ), + in_files=dict( + argstr='--i %s...', + mandatory=True, + ), + keep_dtype=dict(argstr='--keep-datatype', ), + mask_file=dict(argstr='--mask %s', ), + max_bonfcor=dict(argstr='--max-bonfcor', ), + max_index=dict(argstr='--max-index', ), + mean_div_n=dict(argstr='--mean-div-n', ), + multiply_by=dict(argstr='--mul %f', ), + multiply_matrix_file=dict(argstr='--mtx %s', ), + paired_stats=dict(argstr='--paired-%s', ), + sign=dict(argstr='--%s', ), + sort=dict(argstr='--sort', ), + stats=dict(argstr='--%s', ), + subjects_dir=dict(), + vote=dict(argstr='--vote', ), + ) + inputs = Concatenate.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Concatenate_outputs(): + output_map = dict(concatenated_file=dict(), ) + outputs = Concatenate.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py new file mode 100644 index 0000000000..40a465b249 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_ConcatenateLTA.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ConcatenateLTA + + +def test_ConcatenateLTA_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_lta1=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + in_lta2=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + invert_1=dict(argstr='-invert1', ), + invert_2=dict(argstr='-invert2', ), + invert_out=dict(argstr='-invertout', ), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=True, + name_source=['in_lta1'], + name_template='%s_concat', + position=-1, + ), + out_type=dict(argstr='-out_type %d', ), + subject=dict(argstr='-subject %s', ), + subjects_dir=dict(), + tal_source_file=dict( + argstr='-tal %s', + position=-5, + requires=['tal_template_file'], + ), + tal_template_file=dict( + argstr='%s', + position=-4, + requires=['tal_source_file'], + ), + ) + inputs = ConcatenateLTA.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ConcatenateLTA_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ConcatenateLTA.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py new file mode 100644 index 0000000000..7999001813 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Contrast.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Contrast + + +def test_Contrast_inputs(): + input_map = dict( + annotation=dict(mandatory=True, ), + args=dict(argstr='%s', ), + copy_inputs=dict(), + cortex=dict(mandatory=True, ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr='--%s-only', + mandatory=True, + ), + orig=dict(mandatory=True, ), + rawavg=dict(mandatory=True, ), + subject_id=dict( + argstr='--s %s', + mandatory=True, + usedefault=True, + ), + subjects_dir=dict(), + thickness=dict(mandatory=True, ), + white=dict(mandatory=True, ), + ) + inputs = Contrast.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Contrast_outputs(): + output_map = dict( + out_contrast=dict(), + out_log=dict(), + out_stats=dict(), + ) + outputs = Contrast.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py new file mode 100644 index 0000000000..f29b76df29 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Curvature.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Curvature + + +def test_Curvature_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + averages=dict(argstr='-a %d', ), + copy_input=dict(), + distances=dict(argstr='-distances %d %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=True, + mandatory=True, + position=-2, + ), + n=dict(argstr='-n', ), + save=dict(argstr='-w', ), + subjects_dir=dict(), + threshold=dict(argstr='-thresh %.3f', ), + ) + inputs = Curvature.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Curvature_outputs(): + output_map = dict( + out_gauss=dict(), + out_mean=dict(), + ) + outputs = Curvature.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py new file mode 100644 index 0000000000..19d6a4772e --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_CurvatureStats.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import CurvatureStats + + +def test_CurvatureStats_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + copy_inputs=dict(), + curvfile1=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + curvfile2=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + min_max=dict(argstr='-m', ), + out_file=dict( + argstr='-o %s', + hash_files=False, + name_source=['hemisphere'], + name_template='%s.curv.stats', + ), + subject_id=dict( + argstr='%s', + mandatory=True, + position=-4, + usedefault=True, + ), + subjects_dir=dict(), + surface=dict(argstr='-F %s', ), + values=dict(argstr='-G', ), + write=dict(argstr='--writeCurvatureFiles', ), + ) + inputs = CurvatureStats.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CurvatureStats_outputs(): + output_map = dict(out_file=dict(), ) + outputs = CurvatureStats.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py new file mode 100644 index 0000000000..efd53fb773 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_DICOMConvert.py @@ -0,0 +1,28 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import DICOMConvert + + +def test_DICOMConvert_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + base_output_dir=dict(mandatory=True, ), + dicom_dir=dict(mandatory=True, ), + dicom_info=dict(), + environ=dict( + nohash=True, + usedefault=True, + ), + file_mapping=dict(), + ignore_single_slice=dict(requires=['dicom_info'], ), + out_type=dict(usedefault=True, ), + seq_list=dict(requires=['dicom_info'], ), + subject_dir_template=dict(usedefault=True, ), + subject_id=dict(), + subjects_dir=dict(), + ) + inputs = DICOMConvert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py new file mode 100644 index 0000000000..e681a03871 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_EMRegister.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import EMRegister + + +def test_EMRegister_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + mask=dict(argstr='-mask %s', ), + nbrspacing=dict(argstr='-uns %d', ), + num_threads=dict(), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=False, + name_source=['in_file'], + name_template='%s_transform.lta', + position=-1, + ), + skull=dict(argstr='-skull', ), + subjects_dir=dict(), + template=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + transform=dict(argstr='-t %s', ), + ) + inputs = EMRegister.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EMRegister_outputs(): + output_map = dict(out_file=dict(), ) + outputs = EMRegister.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py new file mode 100644 index 0000000000..f971f4fab9 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_EditWMwithAseg.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import EditWMwithAseg + + +def test_EditWMwithAseg_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + brain_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-4, + ), + keep_in=dict(argstr='-keep-in', ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + seg_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + subjects_dir=dict(), + ) + inputs = EditWMwithAseg.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EditWMwithAseg_outputs(): + output_map = dict(out_file=dict(), ) + outputs = EditWMwithAseg.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py new file mode 100644 index 0000000000..996d079b48 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_EulerNumber.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import EulerNumber + + +def test_EulerNumber_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + subjects_dir=dict(), + ) + inputs = EulerNumber.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EulerNumber_outputs(): + output_map = dict(out_file=dict(), ) + outputs = EulerNumber.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py new file mode 100644 index 0000000000..f3f4896a75 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ExtractMainComponent + + +def test_ExtractMainComponent_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + out_file=dict( + argstr='%s', + name_source='in_file', + name_template='%s.maincmp', + position=2, + ), + ) + inputs = ExtractMainComponent.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ExtractMainComponent_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ExtractMainComponent.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py new file mode 100644 index 0000000000..0546d99006 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommand.py @@ -0,0 +1,19 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import FSCommand + + +def test_FSCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + subjects_dir=dict(), + ) + inputs = FSCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py new file mode 100644 index 0000000000..5b8bca1484 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSCommandOpenMP.py @@ -0,0 +1,20 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import FSCommandOpenMP + + +def test_FSCommandOpenMP_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + num_threads=dict(), + subjects_dir=dict(), + ) + inputs = FSCommandOpenMP.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py new file mode 100644 index 0000000000..e791bb5976 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_FSScriptCommand.py @@ -0,0 +1,19 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import FSScriptCommand + + +def test_FSScriptCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + subjects_dir=dict(), + ) + inputs = FSScriptCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py new file mode 100644 index 0000000000..9e6b1cbc8b --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_FitMSParams.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import FitMSParams + + +def test_FitMSParams_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip_list=dict(), + in_files=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_dir=dict( + argstr='%s', + genfile=True, + position=-1, + ), + subjects_dir=dict(), + te_list=dict(), + tr_list=dict(), + xfm_list=dict(), + ) + inputs = FitMSParams.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FitMSParams_outputs(): + output_map = dict( + pd_image=dict(), + t1_image=dict(), + t2star_image=dict(), + ) + outputs = FitMSParams.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py new file mode 100644 index 0000000000..8b61823b42 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_FixTopology.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import FixTopology + + +def test_FixTopology_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + copy_inputs=dict(mandatory=True, ), + environ=dict( + nohash=True, + usedefault=True, + ), + ga=dict(argstr='-ga', ), + hemisphere=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + in_brain=dict(mandatory=True, ), + in_inflated=dict(mandatory=True, ), + in_orig=dict(mandatory=True, ), + in_wm=dict(mandatory=True, ), + mgz=dict(argstr='-mgz', ), + seed=dict(argstr='-seed %d', ), + sphere=dict(argstr='-sphere %s', ), + subject_id=dict( + argstr='%s', + mandatory=True, + position=-2, + usedefault=True, + ), + subjects_dir=dict(), + ) + inputs = FixTopology.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FixTopology_outputs(): + output_map = dict(out_file=dict(), ) + outputs = FixTopology.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py new file mode 100644 index 0000000000..8608444fc8 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_FuseSegmentations.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..longitudinal import FuseSegmentations + + +def test_FuseSegmentations_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_norms=dict( + argstr='-n %s', + mandatory=True, + ), + in_segmentations=dict( + argstr='-a %s', + mandatory=True, + ), + in_segmentations_noCC=dict( + argstr='-c %s', + mandatory=True, + ), + out_file=dict( + mandatory=True, + position=-1, + ), + subject_id=dict( + argstr='%s', + position=-3, + ), + subjects_dir=dict(), + timepoints=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + ) + inputs = FuseSegmentations.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FuseSegmentations_outputs(): + output_map = dict(out_file=dict(), ) + outputs = FuseSegmentations.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py new file mode 100644 index 0000000000..828bbfab03 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_GLMFit.py @@ -0,0 +1,144 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import GLMFit + + +def test_GLMFit_inputs(): + input_map = dict( + allow_ill_cond=dict(argstr='--illcond', ), + allow_repeated_subjects=dict(argstr='--allowsubjrep', ), + args=dict(argstr='%s', ), + calc_AR1=dict(argstr='--tar1', ), + check_opts=dict(argstr='--checkopts', ), + compute_log_y=dict(argstr='--logy', ), + contrast=dict(argstr='--C %s...', ), + cortex=dict( + argstr='--cortex', + xor=['label_file'], + ), + debug=dict(argstr='--debug', ), + design=dict( + argstr='--X %s', + xor=('fsgd', 'design', 'one_sample'), + ), + diag=dict(argstr='--diag %d', ), + diag_cluster=dict(argstr='--diag-cluster', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_fx_dof=dict( + argstr='--ffxdof %d', + xor=['fixed_fx_dof_file'], + ), + fixed_fx_dof_file=dict( + argstr='--ffxdofdat %d', + xor=['fixed_fx_dof'], + ), + fixed_fx_var=dict(argstr='--yffxvar %s', ), + force_perm=dict(argstr='--perm-force', ), + fsgd=dict( + argstr='--fsgd %s %s', + xor=('fsgd', 'design', 'one_sample'), + ), + fwhm=dict(argstr='--fwhm %f', ), + glm_dir=dict( + argstr='--glmdir %s', + genfile=True, + ), + hemi=dict(), + in_file=dict( + argstr='--y %s', + copyfile=False, + mandatory=True, + ), + invert_mask=dict(argstr='--mask-inv', ), + label_file=dict( + argstr='--label %s', + xor=['cortex'], + ), + mask_file=dict(argstr='--mask %s', ), + no_contrast_ok=dict(argstr='--no-contrasts-ok', ), + no_est_fwhm=dict(argstr='--no-est-fwhm', ), + no_mask_smooth=dict(argstr='--no-mask-smooth', ), + no_prune=dict( + argstr='--no-prune', + xor=['prunethresh'], + ), + one_sample=dict( + argstr='--osgm', + xor=('one_sample', 'fsgd', 'design', 'contrast'), + ), + pca=dict(argstr='--pca', ), + per_voxel_reg=dict(argstr='--pvr %s...', ), + profile=dict(argstr='--profile %d', ), + prune=dict(argstr='--prune', ), + prune_thresh=dict( + argstr='--prune_thr %f', + xor=['noprune'], + ), + resynth_test=dict(argstr='--resynthtest %d', ), + save_cond=dict(argstr='--save-cond', ), + save_estimate=dict(argstr='--yhat-save', ), + save_res_corr_mtx=dict(argstr='--eres-scm', ), + save_residual=dict(argstr='--eres-save', ), + seed=dict(argstr='--seed %d', ), + self_reg=dict(argstr='--selfreg %d %d %d', ), + sim_done_file=dict(argstr='--sim-done %s', ), + sim_sign=dict(argstr='--sim-sign %s', ), + simulation=dict(argstr='--sim %s %d %f %s', ), + subject_id=dict(), + subjects_dir=dict(), + surf=dict( + argstr='--surf %s %s %s', + requires=['subject_id', 'hemi'], + ), + surf_geo=dict(usedefault=True, ), + synth=dict(argstr='--synth', ), + uniform=dict(argstr='--uniform %f %f', ), + var_fwhm=dict(argstr='--var-fwhm %f', ), + vox_dump=dict(argstr='--voxdump %d %d %d', ), + weight_file=dict(xor=['weighted_ls'], ), + weight_inv=dict( + argstr='--w-inv', + xor=['weighted_ls'], + ), + weight_sqrt=dict( + argstr='--w-sqrt', + xor=['weighted_ls'], + ), + weighted_ls=dict( + argstr='--wls %s', + xor=('weight_file', 'weight_inv', 'weight_sqrt'), + ), + ) + inputs = GLMFit.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GLMFit_outputs(): + output_map = dict( + beta_file=dict(), + dof_file=dict(), + error_file=dict(), + error_stddev_file=dict(), + error_var_file=dict(), + estimate_file=dict(), + frame_eigenvectors=dict(), + ftest_file=dict(), + fwhm_file=dict(), + gamma_file=dict(), + gamma_var_file=dict(), + glm_dir=dict(), + mask_file=dict(), + sig_file=dict(), + singular_values=dict(), + spatial_eigenvectors=dict(), + svd_stats_file=dict(), + ) + outputs = GLMFit.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py new file mode 100644 index 0000000000..2fa225e87e --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ImageInfo + + +def test_ImageInfo_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + position=1, + ), + subjects_dir=dict(), + ) + inputs = ImageInfo.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ImageInfo_outputs(): + output_map = dict( + TE=dict(), + TI=dict(), + TR=dict(), + data_type=dict(), + dimensions=dict(), + file_format=dict(), + info=dict(), + orientation=dict(), + out_file=dict(), + ph_enc_dir=dict(), + vox_sizes=dict(), + ) + outputs = ImageInfo.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py new file mode 100644 index 0000000000..49f5e6b48f --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Jacobian.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Jacobian + + +def test_Jacobian_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_mappedsurf=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + in_origsurf=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=False, + name_source=['in_origsurf'], + name_template='%s.jacobian', + position=-1, + ), + subjects_dir=dict(), + ) + inputs = Jacobian.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Jacobian_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Jacobian.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py new file mode 100644 index 0000000000..bcbf971d01 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_LTAConvert.py @@ -0,0 +1,80 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import LTAConvert + + +def test_LTAConvert_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_fsl=dict( + argstr='--infsl %s', + mandatory=True, + xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', + 'in_itk'), + ), + in_itk=dict( + argstr='--initk %s', + mandatory=True, + xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', + 'in_itk'), + ), + in_lta=dict( + argstr='--inlta %s', + mandatory=True, + xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', + 'in_itk'), + ), + in_mni=dict( + argstr='--inmni %s', + mandatory=True, + xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', + 'in_itk'), + ), + in_niftyreg=dict( + argstr='--inniftyreg %s', + mandatory=True, + xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', + 'in_itk'), + ), + in_reg=dict( + argstr='--inreg %s', + mandatory=True, + xor=('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', + 'in_itk'), + ), + invert=dict(argstr='--invert', ), + ltavox2vox=dict( + argstr='--ltavox2vox', + requires=['out_lta'], + ), + out_fsl=dict(argstr='--outfsl %s', ), + out_itk=dict(argstr='--outitk %s', ), + out_lta=dict(argstr='--outlta %s', ), + out_mni=dict(argstr='--outmni %s', ), + out_reg=dict(argstr='--outreg %s', ), + source_file=dict(argstr='--src %s', ), + target_conform=dict(argstr='--trgconform', ), + target_file=dict(argstr='--trg %s', ), + ) + inputs = LTAConvert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_LTAConvert_outputs(): + output_map = dict( + out_fsl=dict(), + out_itk=dict(), + out_lta=dict(), + out_mni=dict(), + out_reg=dict(), + ) + outputs = LTAConvert.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py new file mode 100644 index 0000000000..2d7761006b --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Annot.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Label2Annot + + +def test_Label2Annot_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + color_table=dict(argstr='--ctab %s', ), + copy_inputs=dict(), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr='--hemi %s', + mandatory=True, + ), + in_labels=dict( + argstr='--l %s...', + mandatory=True, + ), + keep_max=dict(argstr='--maxstatwinner', ), + orig=dict(mandatory=True, ), + out_annot=dict( + argstr='--a %s', + mandatory=True, + ), + subject_id=dict( + argstr='--s %s', + mandatory=True, + usedefault=True, + ), + subjects_dir=dict(), + verbose_off=dict(argstr='--noverbose', ), + ) + inputs = Label2Annot.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Label2Annot_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Label2Annot.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py new file mode 100644 index 0000000000..7511cd0dbb --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Label.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Label2Label + + +def test_Label2Label_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + copy_inputs=dict(), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr='--hemi %s', + mandatory=True, + ), + out_file=dict( + argstr='--trglabel %s', + hash_files=False, + keep_extension=True, + name_source=['source_label'], + name_template='%s_converted', + ), + registration_method=dict( + argstr='--regmethod %s', + usedefault=True, + ), + source_label=dict( + argstr='--srclabel %s', + mandatory=True, + ), + source_sphere_reg=dict(mandatory=True, ), + source_subject=dict( + argstr='--srcsubject %s', + mandatory=True, + ), + source_white=dict(mandatory=True, ), + sphere_reg=dict(mandatory=True, ), + subject_id=dict( + argstr='--trgsubject %s', + mandatory=True, + usedefault=True, + ), + subjects_dir=dict(), + white=dict(mandatory=True, ), + ) + inputs = Label2Label.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Label2Label_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Label2Label.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py new file mode 100644 index 0000000000..fb2726635f --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Label2Vol.py @@ -0,0 +1,83 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Label2Vol + + +def test_Label2Vol_inputs(): + input_map = dict( + annot_file=dict( + argstr='--annot %s', + copyfile=False, + mandatory=True, + requires=('subject_id', 'hemi'), + xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + ), + aparc_aseg=dict( + argstr='--aparc+aseg', + mandatory=True, + xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_thresh=dict(argstr='--fillthresh %g', ), + hemi=dict(argstr='--hemi %s', ), + identity=dict( + argstr='--identity', + xor=('reg_file', 'reg_header', 'identity'), + ), + invert_mtx=dict(argstr='--invertmtx', ), + label_file=dict( + argstr='--label %s...', + copyfile=False, + mandatory=True, + xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + ), + label_hit_file=dict(argstr='--hits %s', ), + label_voxel_volume=dict(argstr='--labvoxvol %f', ), + map_label_stat=dict(argstr='--label-stat %s', ), + native_vox2ras=dict(argstr='--native-vox2ras', ), + proj=dict( + argstr='--proj %s %f %f %f', + requires=('subject_id', 'hemi'), + ), + reg_file=dict( + argstr='--reg %s', + xor=('reg_file', 'reg_header', 'identity'), + ), + reg_header=dict( + argstr='--regheader %s', + xor=('reg_file', 'reg_header', 'identity'), + ), + seg_file=dict( + argstr='--seg %s', + copyfile=False, + mandatory=True, + xor=('label_file', 'annot_file', 'seg_file', 'aparc_aseg'), + ), + subject_id=dict(argstr='--subject %s', ), + subjects_dir=dict(), + surface=dict(argstr='--surf %s', ), + template_file=dict( + argstr='--temp %s', + mandatory=True, + ), + vol_label_file=dict( + argstr='--o %s', + genfile=True, + ), + ) + inputs = Label2Vol.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Label2Vol_outputs(): + output_map = dict(vol_label_file=dict(), ) + outputs = Label2Vol.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py new file mode 100644 index 0000000000..7b6ae4a945 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MNIBiasCorrection.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import MNIBiasCorrection + + +def test_MNIBiasCorrection_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + distance=dict(argstr='--distance %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='--i %s', + mandatory=True, + ), + iterations=dict( + argstr='--n %d', + usedefault=True, + ), + mask=dict(argstr='--mask %s', ), + no_rescale=dict(argstr='--no-rescale', ), + out_file=dict( + argstr='--o %s', + hash_files=False, + keep_extension=True, + name_source=['in_file'], + name_template='%s_output', + ), + protocol_iterations=dict(argstr='--proto-iters %d', ), + shrink=dict(argstr='--shrink %d', ), + stop=dict(argstr='--stop %f', ), + subjects_dir=dict(), + transform=dict(argstr='--uchar %s', ), + ) + inputs = MNIBiasCorrection.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MNIBiasCorrection_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MNIBiasCorrection.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py new file mode 100644 index 0000000000..b4b7436120 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MPRtoMNI305.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import MPRtoMNI305 + + +def test_MPRtoMNI305_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + usedefault=True, + ), + reference_dir=dict( + mandatory=True, + usedefault=True, + ), + subjects_dir=dict(), + target=dict( + mandatory=True, + usedefault=True, + ), + ) + inputs = MPRtoMNI305.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MPRtoMNI305_outputs(): + output_map = dict( + log_file=dict(usedefault=True, ), + out_file=dict(), + ) + outputs = MPRtoMNI305.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py new file mode 100644 index 0000000000..b44cf7a308 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIConvert.py @@ -0,0 +1,117 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import MRIConvert + + +def test_MRIConvert_inputs(): + input_map = dict( + apply_inv_transform=dict(argstr='--apply_inverse_transform %s', ), + apply_transform=dict(argstr='--apply_transform %s', ), + args=dict(argstr='%s', ), + ascii=dict(argstr='--ascii', ), + autoalign_matrix=dict(argstr='--autoalign %s', ), + color_file=dict(argstr='--color_file %s', ), + conform=dict(argstr='--conform', ), + conform_min=dict(argstr='--conform_min', ), + conform_size=dict(argstr='--conform_size %s', ), + crop_center=dict(argstr='--crop %d %d %d', ), + crop_gdf=dict(argstr='--crop_gdf', ), + crop_size=dict(argstr='--cropsize %d %d %d', ), + cut_ends=dict(argstr='--cutends %d', ), + cw256=dict(argstr='--cw256', ), + devolve_transform=dict(argstr='--devolvexfm %s', ), + drop_n=dict(argstr='--ndrop %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_parcellation=dict(argstr='--fill_parcellation', ), + force_ras=dict(argstr='--force_ras_good', ), + frame=dict(argstr='--frame %d', ), + frame_subsample=dict(argstr='--fsubsample %d %d %d', ), + fwhm=dict(argstr='--fwhm %f', ), + in_center=dict(argstr='--in_center %s', ), + in_file=dict( + argstr='--input_volume %s', + mandatory=True, + position=-2, + ), + in_i_dir=dict(argstr='--in_i_direction %f %f %f', ), + in_i_size=dict(argstr='--in_i_size %d', ), + in_info=dict(argstr='--in_info', ), + in_j_dir=dict(argstr='--in_j_direction %f %f %f', ), + in_j_size=dict(argstr='--in_j_size %d', ), + in_k_dir=dict(argstr='--in_k_direction %f %f %f', ), + in_k_size=dict(argstr='--in_k_size %d', ), + in_like=dict(argstr='--in_like %s', ), + in_matrix=dict(argstr='--in_matrix', ), + in_orientation=dict(argstr='--in_orientation %s', ), + in_scale=dict(argstr='--scale %f', ), + in_stats=dict(argstr='--in_stats', ), + in_type=dict(argstr='--in_type %s', ), + invert_contrast=dict(argstr='--invert_contrast %f', ), + midframe=dict(argstr='--mid-frame', ), + no_change=dict(argstr='--nochange', ), + no_scale=dict(argstr='--no_scale 1', ), + no_translate=dict(argstr='--no_translate', ), + no_write=dict(argstr='--no_write', ), + out_center=dict(argstr='--out_center %f %f %f', ), + out_datatype=dict(argstr='--out_data_type %s', ), + out_file=dict( + argstr='--output_volume %s', + genfile=True, + position=-1, + ), + out_i_count=dict(argstr='--out_i_count %d', ), + out_i_dir=dict(argstr='--out_i_direction %f %f %f', ), + out_i_size=dict(argstr='--out_i_size %d', ), + out_info=dict(argstr='--out_info', ), + out_j_count=dict(argstr='--out_j_count %d', ), + out_j_dir=dict(argstr='--out_j_direction %f %f %f', ), + out_j_size=dict(argstr='--out_j_size %d', ), + out_k_count=dict(argstr='--out_k_count %d', ), + out_k_dir=dict(argstr='--out_k_direction %f %f %f', ), + out_k_size=dict(argstr='--out_k_size %d', ), + out_matrix=dict(argstr='--out_matrix', ), + out_orientation=dict(argstr='--out_orientation %s', ), + out_scale=dict(argstr='--out-scale %d', ), + out_stats=dict(argstr='--out_stats', ), + out_type=dict(argstr='--out_type %s', ), + parse_only=dict(argstr='--parse_only', ), + read_only=dict(argstr='--read_only', ), + reorder=dict(argstr='--reorder %d %d %d', ), + resample_type=dict(argstr='--resample_type %s', ), + reslice_like=dict(argstr='--reslice_like %s', ), + sdcm_list=dict(argstr='--sdcmlist %s', ), + skip_n=dict(argstr='--nskip %d', ), + slice_bias=dict(argstr='--slice-bias %f', ), + slice_crop=dict(argstr='--slice-crop %d %d', ), + slice_reverse=dict(argstr='--slice-reverse', ), + smooth_parcellation=dict(argstr='--smooth_parcellation', ), + sphinx=dict(argstr='--sphinx', ), + split=dict(argstr='--split', ), + status_file=dict(argstr='--status %s', ), + subject_name=dict(argstr='--subject_name %s', ), + subjects_dir=dict(), + te=dict(argstr='-te %d', ), + template_info=dict(argstr='--template_info', ), + template_type=dict(argstr='--template_type %s', ), + ti=dict(argstr='-ti %d', ), + tr=dict(argstr='-tr %d', ), + unwarp_gradient=dict(argstr='--unwarp_gradient_nonlinearity', ), + vox_size=dict(argstr='-voxsize %f %f %f', ), + zero_ge_z_offset=dict(argstr='--zero_ge_z_offset', ), + zero_outlines=dict(argstr='--zero_outlines', ), + ) + inputs = MRIConvert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRIConvert_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRIConvert.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py new file mode 100644 index 0000000000..6c12cabdc2 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRICoreg.py @@ -0,0 +1,86 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import MRICoreg + + +def test_MRICoreg_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + brute_force_limit=dict( + argstr='--bf-lim %g', + xor=['no_brute_force'], + ), + brute_force_samples=dict( + argstr='--bf-nsamp %d', + xor=['no_brute_force'], + ), + conform_reference=dict(argstr='--conf-ref', ), + dof=dict(argstr='--dof %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ftol=dict(argstr='--ftol %e', ), + initial_rotation=dict(argstr='--rot %g %g %g', ), + initial_scale=dict(argstr='--scale %g %g %g', ), + initial_shear=dict(argstr='--shear %g %g %g', ), + initial_translation=dict(argstr='--trans %g %g %g', ), + linmintol=dict(argstr='--linmintol %e', ), + max_iters=dict(argstr='--nitersmax %d', ), + no_brute_force=dict(argstr='--no-bf', ), + no_coord_dithering=dict(argstr='--no-coord-dither', ), + no_cras0=dict(argstr='--no-cras0', ), + no_intensity_dithering=dict(argstr='--no-intensity-dither', ), + no_smooth=dict(argstr='--no-smooth', ), + num_threads=dict(argstr='--threads %d', ), + out_lta_file=dict( + argstr='--lta %s', + usedefault=True, + ), + out_params_file=dict(argstr='--params %s', ), + out_reg_file=dict(argstr='--regdat %s', ), + ref_fwhm=dict(argstr='--ref-fwhm', ), + reference_file=dict( + argstr='--ref %s', + copyfile=False, + mandatory=True, + xor=['subject_id'], + ), + reference_mask=dict( + argstr='--ref-mask %s', + position=2, + ), + saturation_threshold=dict(argstr='--sat %g', ), + sep=dict(argstr='--sep %s...', ), + source_file=dict( + argstr='--mov %s', + copyfile=False, + mandatory=True, + ), + source_mask=dict(argstr='--mov-mask', ), + source_oob=dict(argstr='--mov-oob', ), + subject_id=dict( + argstr='--s %s', + mandatory=True, + position=1, + requires=['subjects_dir'], + xor=['reference_file'], + ), + subjects_dir=dict(argstr='--sd %s', ), + ) + inputs = MRICoreg.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRICoreg_outputs(): + output_map = dict( + out_lta_file=dict(), + out_params_file=dict(), + out_reg_file=dict(), + ) + outputs = MRICoreg.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py new file mode 100644 index 0000000000..719986961d --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIFill.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRIFill + + +def test_MRIFill_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + log_file=dict(argstr='-a %s', ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + segmentation=dict(argstr='-segmentation %s', ), + subjects_dir=dict(), + transform=dict(argstr='-xform %s', ), + ) + inputs = MRIFill.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRIFill_outputs(): + output_map = dict( + log_file=dict(), + out_file=dict(), + ) + outputs = MRIFill.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py new file mode 100644 index 0000000000..f43d217256 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIMarchingCubes.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRIMarchingCubes + + +def test_MRIMarchingCubes_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + connectivity_value=dict( + argstr='%d', + position=-1, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + label_value=dict( + argstr='%d', + mandatory=True, + position=2, + ), + out_file=dict( + argstr='./%s', + genfile=True, + position=-2, + ), + subjects_dir=dict(), + ) + inputs = MRIMarchingCubes.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRIMarchingCubes_outputs(): + output_map = dict(surface=dict(), ) + outputs = MRIMarchingCubes.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py new file mode 100644 index 0000000000..2e7c7d2ea5 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIPretess.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRIPretess + + +def test_MRIPretess_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_filled=dict( + argstr='%s', + mandatory=True, + position=-4, + ), + in_norm=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + keep=dict(argstr='-keep', ), + label=dict( + argstr='%s', + mandatory=True, + position=-3, + usedefault=True, + ), + nocorners=dict(argstr='-nocorners', ), + out_file=dict( + argstr='%s', + keep_extension=True, + name_source=['in_filled'], + name_template='%s_pretesswm', + position=-1, + ), + subjects_dir=dict(), + test=dict(argstr='-test', ), + ) + inputs = MRIPretess.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRIPretess_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRIPretess.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py new file mode 100644 index 0000000000..c92b4fea15 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreproc.py @@ -0,0 +1,83 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import MRISPreproc + + +def test_MRISPreproc_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fsgd_file=dict( + argstr='--fsgd %s', + xor=('subjects', 'fsgd_file', 'subject_file'), + ), + fwhm=dict( + argstr='--fwhm %f', + xor=['num_iters'], + ), + fwhm_source=dict( + argstr='--fwhm-src %f', + xor=['num_iters_source'], + ), + hemi=dict( + argstr='--hemi %s', + mandatory=True, + ), + num_iters=dict( + argstr='--niters %d', + xor=['fwhm'], + ), + num_iters_source=dict( + argstr='--niterssrc %d', + xor=['fwhm_source'], + ), + out_file=dict( + argstr='--out %s', + genfile=True, + ), + proj_frac=dict(argstr='--projfrac %s', ), + smooth_cortex_only=dict(argstr='--smooth-cortex-only', ), + source_format=dict(argstr='--srcfmt %s', ), + subject_file=dict( + argstr='--f %s', + xor=('subjects', 'fsgd_file', 'subject_file'), + ), + subjects=dict( + argstr='--s %s...', + xor=('subjects', 'fsgd_file', 'subject_file'), + ), + subjects_dir=dict(), + surf_area=dict( + argstr='--area %s', + xor=('surf_measure', 'surf_measure_file', 'surf_area'), + ), + surf_dir=dict(argstr='--surfdir %s', ), + surf_measure=dict( + argstr='--meas %s', + xor=('surf_measure', 'surf_measure_file', 'surf_area'), + ), + surf_measure_file=dict( + argstr='--is %s...', + xor=('surf_measure', 'surf_measure_file', 'surf_area'), + ), + target=dict( + argstr='--target %s', + mandatory=True, + ), + vol_measure_file=dict(argstr='--iv %s %s...', ), + ) + inputs = MRISPreproc.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRISPreproc_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRISPreproc.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py new file mode 100644 index 0000000000..e72d8adb9e --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRISPreprocReconAll.py @@ -0,0 +1,95 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import MRISPreprocReconAll + + +def test_MRISPreprocReconAll_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + copy_inputs=dict(), + environ=dict( + nohash=True, + usedefault=True, + ), + fsgd_file=dict( + argstr='--fsgd %s', + xor=('subjects', 'fsgd_file', 'subject_file'), + ), + fwhm=dict( + argstr='--fwhm %f', + xor=['num_iters'], + ), + fwhm_source=dict( + argstr='--fwhm-src %f', + xor=['num_iters_source'], + ), + hemi=dict( + argstr='--hemi %s', + mandatory=True, + ), + lh_surfreg_target=dict(requires=['surfreg_files'], ), + num_iters=dict( + argstr='--niters %d', + xor=['fwhm'], + ), + num_iters_source=dict( + argstr='--niterssrc %d', + xor=['fwhm_source'], + ), + out_file=dict( + argstr='--out %s', + genfile=True, + ), + proj_frac=dict(argstr='--projfrac %s', ), + rh_surfreg_target=dict(requires=['surfreg_files'], ), + smooth_cortex_only=dict(argstr='--smooth-cortex-only', ), + source_format=dict(argstr='--srcfmt %s', ), + subject_file=dict( + argstr='--f %s', + xor=('subjects', 'fsgd_file', 'subject_file'), + ), + subject_id=dict( + argstr='--s %s', + usedefault=True, + xor=('subjects', 'fsgd_file', 'subject_file', 'subject_id'), + ), + subjects=dict( + argstr='--s %s...', + xor=('subjects', 'fsgd_file', 'subject_file'), + ), + subjects_dir=dict(), + surf_area=dict( + argstr='--area %s', + xor=('surf_measure', 'surf_measure_file', 'surf_area'), + ), + surf_dir=dict(argstr='--surfdir %s', ), + surf_measure=dict( + argstr='--meas %s', + xor=('surf_measure', 'surf_measure_file', 'surf_area'), + ), + surf_measure_file=dict( + argstr='--meas %s', + xor=('surf_measure', 'surf_measure_file', 'surf_area'), + ), + surfreg_files=dict( + argstr='--surfreg %s', + requires=['lh_surfreg_target', 'rh_surfreg_target'], + ), + target=dict( + argstr='--target %s', + mandatory=True, + ), + vol_measure_file=dict(argstr='--iv %s %s...', ), + ) + inputs = MRISPreprocReconAll.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRISPreprocReconAll_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRISPreprocReconAll.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py new file mode 100644 index 0000000000..a5899cfe70 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRITessellate.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRITessellate + + +def test_MRITessellate_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + label_value=dict( + argstr='%d', + mandatory=True, + position=-2, + ), + out_file=dict( + argstr='%s', + genfile=True, + position=-1, + ), + subjects_dir=dict(), + tesselate_all_voxels=dict(argstr='-a', ), + use_real_RAS_coordinates=dict(argstr='-n', ), + ) + inputs = MRITessellate.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRITessellate_outputs(): + output_map = dict(surface=dict(), ) + outputs = MRITessellate.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py new file mode 100644 index 0000000000..441be3e0a4 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCALabel.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import MRIsCALabel + + +def test_MRIsCALabel_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + aseg=dict(argstr='-aseg %s', ), + canonsurf=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + classifier=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + copy_inputs=dict(), + curv=dict(mandatory=True, ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr='%s', + mandatory=True, + position=-4, + ), + label=dict(argstr='-l %s', ), + num_threads=dict(), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=True, + name_source=['hemisphere'], + name_template='%s.aparc.annot', + position=-1, + ), + seed=dict(argstr='-seed %d', ), + smoothwm=dict(mandatory=True, ), + subject_id=dict( + argstr='%s', + mandatory=True, + position=-5, + usedefault=True, + ), + subjects_dir=dict(), + sulc=dict(mandatory=True, ), + ) + inputs = MRIsCALabel.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRIsCALabel_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRIsCALabel.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py new file mode 100644 index 0000000000..cbd68c8457 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCalc.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRIsCalc + + +def test_MRIsCalc_inputs(): + input_map = dict( + action=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file1=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + in_file2=dict( + argstr='%s', + position=-1, + xor=['in_float', 'in_int'], + ), + in_float=dict( + argstr='%f', + position=-1, + xor=['in_file2', 'in_int'], + ), + in_int=dict( + argstr='%d', + position=-1, + xor=['in_file2', 'in_float'], + ), + out_file=dict( + argstr='-o %s', + mandatory=True, + ), + subjects_dir=dict(), + ) + inputs = MRIsCalc.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRIsCalc_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRIsCalc.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py new file mode 100644 index 0000000000..657292c5c7 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsCombine.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRIsCombine + + +def test_MRIsCombine_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr='--combinesurfs %s', + mandatory=True, + position=1, + ), + out_file=dict( + argstr='%s', + genfile=True, + mandatory=True, + position=-1, + ), + subjects_dir=dict(), + ) + inputs = MRIsCombine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRIsCombine_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRIsCombine.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py new file mode 100644 index 0000000000..21622b2449 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsConvert.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRIsConvert + + +def test_MRIsConvert_inputs(): + input_map = dict( + annot_file=dict(argstr='--annot %s', ), + args=dict(argstr='%s', ), + dataarray_num=dict(argstr='--da_num %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + functional_file=dict(argstr='-f %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + label_file=dict(argstr='--label %s', ), + labelstats_outfile=dict(argstr='--labelstats %s', ), + normal=dict(argstr='-n', ), + origname=dict(argstr='-o %s', ), + out_datatype=dict( + mandatory=True, + xor=['out_file'], + ), + out_file=dict( + argstr='%s', + genfile=True, + mandatory=True, + position=-1, + xor=['out_datatype'], + ), + parcstats_file=dict(argstr='--parcstats %s', ), + patch=dict(argstr='-p', ), + rescale=dict(argstr='-r', ), + scalarcurv_file=dict(argstr='-c %s', ), + scale=dict(argstr='-s %.3f', ), + subjects_dir=dict(), + talairachxfm_subjid=dict(argstr='-t %s', ), + to_scanner=dict(argstr='--to-scanner', ), + to_tkr=dict(argstr='--to-tkr', ), + vertex=dict(argstr='-v', ), + xyz_ascii=dict(argstr='-a', ), + ) + inputs = MRIsConvert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRIsConvert_outputs(): + output_map = dict(converted=dict(), ) + outputs = MRIsConvert.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py new file mode 100644 index 0000000000..8639c1e7b6 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsExpand.py @@ -0,0 +1,60 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRIsExpand + + +def test_MRIsExpand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + distance=dict( + argstr='%g', + mandatory=True, + position=-2, + ), + dt=dict(argstr='-T %g', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-3, + ), + nsurfaces=dict(argstr='-N %d', ), + out_name=dict( + argstr='%s', + position=-1, + usedefault=True, + ), + pial=dict( + argstr='-pial %s', + copyfile=False, + ), + smooth_averages=dict(argstr='-A %d', ), + sphere=dict( + copyfile=False, + usedefault=True, + ), + spring=dict(argstr='-S %g', ), + subjects_dir=dict(), + thickness=dict(argstr='-thickness', ), + thickness_name=dict( + argstr='-thickness_name %s', + copyfile=False, + ), + write_iterations=dict(argstr='-W %d', ), + ) + inputs = MRIsExpand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRIsExpand_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRIsExpand.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py new file mode 100644 index 0000000000..73e48a2521 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MRIsInflate.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRIsInflate + + +def test_MRIsInflate_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=True, + mandatory=True, + position=-2, + ), + no_save_sulc=dict( + argstr='-no-save-sulc', + xor=['out_sulc'], + ), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=True, + name_source=['in_file'], + name_template='%s.inflated', + position=-1, + ), + out_sulc=dict(xor=['no_save_sulc'], ), + subjects_dir=dict(), + ) + inputs = MRIsInflate.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRIsInflate_outputs(): + output_map = dict( + out_file=dict(), + out_sulc=dict(), + ) + outputs = MRIsInflate.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py new file mode 100644 index 0000000000..f3453818c5 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MS_LDA.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import MS_LDA + + +def test_MS_LDA_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + conform=dict(argstr='-conform', ), + environ=dict( + nohash=True, + usedefault=True, + ), + images=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + label_file=dict(argstr='-label %s', ), + lda_labels=dict( + argstr='-lda %s', + mandatory=True, + sep=' ', + ), + mask_file=dict(argstr='-mask %s', ), + shift=dict(argstr='-shift %d', ), + subjects_dir=dict(), + use_weights=dict(argstr='-W', ), + vol_synth_file=dict( + argstr='-synth %s', + mandatory=True, + ), + weight_file=dict( + argstr='-weight %s', + mandatory=True, + ), + ) + inputs = MS_LDA.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MS_LDA_outputs(): + output_map = dict( + vol_synth_file=dict(), + weight_file=dict(), + ) + outputs = MS_LDA.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py new file mode 100644 index 0000000000..5dfb555346 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeAverageSubject.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MakeAverageSubject + + +def test_MakeAverageSubject_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + out_name=dict( + argstr='--out %s', + usedefault=True, + ), + subjects_dir=dict(), + subjects_ids=dict( + argstr='--subjects %s', + mandatory=True, + sep=' ', + ), + ) + inputs = MakeAverageSubject.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MakeAverageSubject_outputs(): + output_map = dict(average_subject_name=dict(), ) + outputs = MakeAverageSubject.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py new file mode 100644 index 0000000000..7ff18eeba6 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_MakeSurfaces.py @@ -0,0 +1,71 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MakeSurfaces + + +def test_MakeSurfaces_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + copy_inputs=dict(), + environ=dict( + nohash=True, + usedefault=True, + ), + fix_mtl=dict(argstr='-fix_mtl', ), + hemisphere=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + in_T1=dict(argstr='-T1 %s', ), + in_aseg=dict(argstr='-aseg %s', ), + in_filled=dict(mandatory=True, ), + in_label=dict(xor=['noaparc'], ), + in_orig=dict( + argstr='-orig %s', + mandatory=True, + ), + in_white=dict(), + in_wm=dict(mandatory=True, ), + longitudinal=dict(argstr='-long', ), + maximum=dict(argstr='-max %.1f', ), + mgz=dict(argstr='-mgz', ), + no_white=dict(argstr='-nowhite', ), + noaparc=dict( + argstr='-noaparc', + xor=['in_label'], + ), + orig_pial=dict( + argstr='-orig_pial %s', + requires=['in_label'], + ), + orig_white=dict(argstr='-orig_white %s', ), + subject_id=dict( + argstr='%s', + mandatory=True, + position=-2, + usedefault=True, + ), + subjects_dir=dict(), + white=dict(argstr='-white %s', ), + white_only=dict(argstr='-whiteonly', ), + ) + inputs = MakeSurfaces.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MakeSurfaces_outputs(): + output_map = dict( + out_area=dict(), + out_cortex=dict(), + out_curv=dict(), + out_pial=dict(), + out_thickness=dict(), + out_white=dict(), + ) + outputs = MakeSurfaces.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py new file mode 100644 index 0000000000..1923b5ceed --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Normalize.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Normalize + + +def test_Normalize_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradient=dict(argstr='-g %d', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + mask=dict(argstr='-mask %s', ), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=True, + name_source=['in_file'], + name_template='%s_norm', + position=-1, + ), + segmentation=dict(argstr='-aseg %s', ), + subjects_dir=dict(), + transform=dict(), + ) + inputs = Normalize.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Normalize_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Normalize.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py new file mode 100644 index 0000000000..1091bdbdc3 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_OneSampleTTest.py @@ -0,0 +1,144 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import OneSampleTTest + + +def test_OneSampleTTest_inputs(): + input_map = dict( + allow_ill_cond=dict(argstr='--illcond', ), + allow_repeated_subjects=dict(argstr='--allowsubjrep', ), + args=dict(argstr='%s', ), + calc_AR1=dict(argstr='--tar1', ), + check_opts=dict(argstr='--checkopts', ), + compute_log_y=dict(argstr='--logy', ), + contrast=dict(argstr='--C %s...', ), + cortex=dict( + argstr='--cortex', + xor=['label_file'], + ), + debug=dict(argstr='--debug', ), + design=dict( + argstr='--X %s', + xor=('fsgd', 'design', 'one_sample'), + ), + diag=dict(argstr='--diag %d', ), + diag_cluster=dict(argstr='--diag-cluster', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_fx_dof=dict( + argstr='--ffxdof %d', + xor=['fixed_fx_dof_file'], + ), + fixed_fx_dof_file=dict( + argstr='--ffxdofdat %d', + xor=['fixed_fx_dof'], + ), + fixed_fx_var=dict(argstr='--yffxvar %s', ), + force_perm=dict(argstr='--perm-force', ), + fsgd=dict( + argstr='--fsgd %s %s', + xor=('fsgd', 'design', 'one_sample'), + ), + fwhm=dict(argstr='--fwhm %f', ), + glm_dir=dict( + argstr='--glmdir %s', + genfile=True, + ), + hemi=dict(), + in_file=dict( + argstr='--y %s', + copyfile=False, + mandatory=True, + ), + invert_mask=dict(argstr='--mask-inv', ), + label_file=dict( + argstr='--label %s', + xor=['cortex'], + ), + mask_file=dict(argstr='--mask %s', ), + no_contrast_ok=dict(argstr='--no-contrasts-ok', ), + no_est_fwhm=dict(argstr='--no-est-fwhm', ), + no_mask_smooth=dict(argstr='--no-mask-smooth', ), + no_prune=dict( + argstr='--no-prune', + xor=['prunethresh'], + ), + one_sample=dict( + argstr='--osgm', + xor=('one_sample', 'fsgd', 'design', 'contrast'), + ), + pca=dict(argstr='--pca', ), + per_voxel_reg=dict(argstr='--pvr %s...', ), + profile=dict(argstr='--profile %d', ), + prune=dict(argstr='--prune', ), + prune_thresh=dict( + argstr='--prune_thr %f', + xor=['noprune'], + ), + resynth_test=dict(argstr='--resynthtest %d', ), + save_cond=dict(argstr='--save-cond', ), + save_estimate=dict(argstr='--yhat-save', ), + save_res_corr_mtx=dict(argstr='--eres-scm', ), + save_residual=dict(argstr='--eres-save', ), + seed=dict(argstr='--seed %d', ), + self_reg=dict(argstr='--selfreg %d %d %d', ), + sim_done_file=dict(argstr='--sim-done %s', ), + sim_sign=dict(argstr='--sim-sign %s', ), + simulation=dict(argstr='--sim %s %d %f %s', ), + subject_id=dict(), + subjects_dir=dict(), + surf=dict( + argstr='--surf %s %s %s', + requires=['subject_id', 'hemi'], + ), + surf_geo=dict(usedefault=True, ), + synth=dict(argstr='--synth', ), + uniform=dict(argstr='--uniform %f %f', ), + var_fwhm=dict(argstr='--var-fwhm %f', ), + vox_dump=dict(argstr='--voxdump %d %d %d', ), + weight_file=dict(xor=['weighted_ls'], ), + weight_inv=dict( + argstr='--w-inv', + xor=['weighted_ls'], + ), + weight_sqrt=dict( + argstr='--w-sqrt', + xor=['weighted_ls'], + ), + weighted_ls=dict( + argstr='--wls %s', + xor=('weight_file', 'weight_inv', 'weight_sqrt'), + ), + ) + inputs = OneSampleTTest.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_OneSampleTTest_outputs(): + output_map = dict( + beta_file=dict(), + dof_file=dict(), + error_file=dict(), + error_stddev_file=dict(), + error_var_file=dict(), + estimate_file=dict(), + frame_eigenvectors=dict(), + ftest_file=dict(), + fwhm_file=dict(), + gamma_file=dict(), + gamma_var_file=dict(), + glm_dir=dict(), + mask_file=dict(), + sig_file=dict(), + singular_values=dict(), + spatial_eigenvectors=dict(), + svd_stats_file=dict(), + ) + outputs = OneSampleTTest.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Paint.py b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py new file mode 100644 index 0000000000..c452594e55 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Paint.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import Paint + + +def test_Paint_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + averages=dict(argstr='-a %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_surf=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=False, + name_source=['in_surf'], + name_template='%s.avg_curv', + position=-1, + ), + subjects_dir=dict(), + template=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + template_param=dict(), + ) + inputs = Paint.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Paint_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Paint.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py new file mode 100644 index 0000000000..81ac276b2f --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParcellationStats.py @@ -0,0 +1,81 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ParcellationStats + + +def test_ParcellationStats_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + aseg=dict(mandatory=True, ), + brainmask=dict(mandatory=True, ), + copy_inputs=dict(), + cortex_label=dict(), + environ=dict( + nohash=True, + usedefault=True, + ), + hemisphere=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + in_annotation=dict( + argstr='-a %s', + xor=['in_label'], + ), + in_cortex=dict(argstr='-cortex %s', ), + in_label=dict( + argstr='-l %s', + xor=['in_annotatoin', 'out_color'], + ), + lh_pial=dict(mandatory=True, ), + lh_white=dict(mandatory=True, ), + mgz=dict(argstr='-mgz', ), + out_color=dict( + argstr='-c %s', + genfile=True, + xor=['in_label'], + ), + out_table=dict( + argstr='-f %s', + genfile=True, + requires=['tabular_output'], + ), + rh_pial=dict(mandatory=True, ), + rh_white=dict(mandatory=True, ), + ribbon=dict(mandatory=True, ), + subject_id=dict( + argstr='%s', + mandatory=True, + position=-3, + usedefault=True, + ), + subjects_dir=dict(), + surface=dict( + argstr='%s', + position=-1, + ), + tabular_output=dict(argstr='-b', ), + th3=dict( + argstr='-th3', + requires=['cortex_label'], + ), + thickness=dict(mandatory=True, ), + transform=dict(mandatory=True, ), + wm=dict(mandatory=True, ), + ) + inputs = ParcellationStats.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ParcellationStats_outputs(): + output_map = dict( + out_color=dict(), + out_table=dict(), + ) + outputs = ParcellationStats.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py new file mode 100644 index 0000000000..93254ddcb3 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_ParseDICOMDir.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ParseDICOMDir + + +def test_ParseDICOMDir_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dicom_dir=dict( + argstr='--d %s', + mandatory=True, + ), + dicom_info_file=dict( + argstr='--o %s', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + sortbyrun=dict(argstr='--sortbyrun', ), + subjects_dir=dict(), + summarize=dict(argstr='--summarize', ), + ) + inputs = ParseDICOMDir.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ParseDICOMDir_outputs(): + output_map = dict(dicom_info_file=dict(), ) + outputs = ParseDICOMDir.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py new file mode 100644 index 0000000000..39f6a8c942 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_ReconAll.py @@ -0,0 +1,190 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ReconAll + + +def test_ReconAll_inputs(): + input_map = dict( + FLAIR_file=dict( + argstr='-FLAIR %s', + min_ver='5.3.0', + ), + T1_files=dict(argstr='-i %s...', ), + T2_file=dict( + argstr='-T2 %s', + min_ver='5.3.0', + ), + args=dict(argstr='%s', ), + big_ventricles=dict(argstr='-bigventricles', ), + brainstem=dict(argstr='-brainstem-structures', ), + directive=dict( + argstr='-%s', + position=0, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + expert=dict(argstr='-expert %s', ), + flags=dict(argstr='%s', ), + hemi=dict(argstr='-hemi %s', ), + hippocampal_subfields_T1=dict( + argstr='-hippocampal-subfields-T1', + min_ver='6.0.0', + ), + hippocampal_subfields_T2=dict( + argstr='-hippocampal-subfields-T2 %s %s', + min_ver='6.0.0', + ), + hires=dict( + argstr='-hires', + min_ver='6.0.0', + ), + mprage=dict(argstr='-mprage', ), + mri_aparc2aseg=dict(xor=['expert'], ), + mri_ca_label=dict(xor=['expert'], ), + mri_ca_normalize=dict(xor=['expert'], ), + mri_ca_register=dict(xor=['expert'], ), + mri_edit_wm_with_aseg=dict(xor=['expert'], ), + mri_em_register=dict(xor=['expert'], ), + mri_fill=dict(xor=['expert'], ), + mri_mask=dict(xor=['expert'], ), + mri_normalize=dict(xor=['expert'], ), + mri_pretess=dict(xor=['expert'], ), + mri_remove_neck=dict(xor=['expert'], ), + mri_segment=dict(xor=['expert'], ), + mri_segstats=dict(xor=['expert'], ), + mri_tessellate=dict(xor=['expert'], ), + mri_watershed=dict(xor=['expert'], ), + mris_anatomical_stats=dict(xor=['expert'], ), + mris_ca_label=dict(xor=['expert'], ), + mris_fix_topology=dict(xor=['expert'], ), + mris_inflate=dict(xor=['expert'], ), + mris_make_surfaces=dict(xor=['expert'], ), + mris_register=dict(xor=['expert'], ), + mris_smooth=dict(xor=['expert'], ), + mris_sphere=dict(xor=['expert'], ), + mris_surf2vol=dict(xor=['expert'], ), + mrisp_paint=dict(xor=['expert'], ), + openmp=dict(argstr='-openmp %d', ), + parallel=dict(argstr='-parallel', ), + subject_id=dict( + argstr='-subjid %s', + usedefault=True, + ), + subjects_dir=dict( + argstr='-sd %s', + genfile=True, + hash_files=False, + ), + talairach=dict(xor=['expert'], ), + use_FLAIR=dict( + argstr='-FLAIRpial', + min_ver='5.3.0', + xor=['use_T2'], + ), + use_T2=dict( + argstr='-T2pial', + min_ver='5.3.0', + xor=['use_FLAIR'], + ), + xopts=dict(argstr='-xopts-%s', ), + ) + inputs = ReconAll.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ReconAll_outputs(): + output_map = dict( + BA_stats=dict( + altkey='BA', + loc='stats', + ), + T1=dict(loc='mri', ), + annot=dict( + altkey='*annot', + loc='label', + ), + aparc_a2009s_stats=dict( + altkey='aparc.a2009s', + loc='stats', + ), + aparc_aseg=dict( + altkey='aparc*aseg', + loc='mri', + ), + aparc_stats=dict( + altkey='aparc', + loc='stats', + ), + area_pial=dict( + altkey='area.pial', + loc='surf', + ), + aseg=dict(loc='mri', ), + aseg_stats=dict( + altkey='aseg', + loc='stats', + ), + avg_curv=dict(loc='surf', ), + brain=dict(loc='mri', ), + brainmask=dict(loc='mri', ), + curv=dict(loc='surf', ), + curv_pial=dict( + altkey='curv.pial', + loc='surf', + ), + curv_stats=dict( + altkey='curv', + loc='stats', + ), + entorhinal_exvivo_stats=dict( + altkey='entorhinal_exvivo', + loc='stats', + ), + filled=dict(loc='mri', ), + graymid=dict( + altkey=['graymid', 'midthickness'], + loc='surf', + ), + inflated=dict(loc='surf', ), + jacobian_white=dict(loc='surf', ), + label=dict( + altkey='*label', + loc='label', + ), + norm=dict(loc='mri', ), + nu=dict(loc='mri', ), + orig=dict(loc='mri', ), + pial=dict(loc='surf', ), + rawavg=dict(loc='mri', ), + ribbon=dict( + altkey='*ribbon', + loc='mri', + ), + smoothwm=dict(loc='surf', ), + sphere=dict(loc='surf', ), + sphere_reg=dict( + altkey='sphere.reg', + loc='surf', + ), + subject_id=dict(), + subjects_dir=dict(), + sulc=dict(loc='surf', ), + thickness=dict(loc='surf', ), + volume=dict(loc='surf', ), + white=dict(loc='surf', ), + wm=dict(loc='mri', ), + wmparc=dict(loc='mri', ), + wmparc_stats=dict( + altkey='wmparc', + loc='stats', + ), + ) + outputs = ReconAll.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Register.py b/nipype/interfaces/freesurfer/tests/test_auto_Register.py new file mode 100644 index 0000000000..b4eff5133c --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Register.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import Register + + +def test_Register_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + curv=dict( + argstr='-curv', + requires=['in_smoothwm'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_smoothwm=dict(copyfile=True, ), + in_sulc=dict( + copyfile=True, + mandatory=True, + ), + in_surf=dict( + argstr='%s', + copyfile=True, + mandatory=True, + position=-3, + ), + out_file=dict( + argstr='%s', + genfile=True, + position=-1, + ), + subjects_dir=dict(), + target=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + ) + inputs = Register.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Register_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Register.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py new file mode 100644 index 0000000000..0bc88bf935 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_RegisterAVItoTalairach.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import RegisterAVItoTalairach + + +def test_RegisterAVItoTalairach_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + out_file=dict( + argstr='%s', + position=3, + usedefault=True, + ), + subjects_dir=dict(), + target=dict( + argstr='%s', + mandatory=True, + position=1, + ), + vox2vox=dict( + argstr='%s', + mandatory=True, + position=2, + ), + ) + inputs = RegisterAVItoTalairach.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RegisterAVItoTalairach_outputs(): + output_map = dict( + log_file=dict(usedefault=True, ), + out_file=dict(), + ) + outputs = RegisterAVItoTalairach.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py new file mode 100644 index 0000000000..859aff0820 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_RelabelHypointensities.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import RelabelHypointensities + + +def test_RelabelHypointensities_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + aseg=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + lh_white=dict( + copyfile=True, + mandatory=True, + ), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=False, + name_source=['aseg'], + name_template='%s.hypos.mgz', + position=-1, + ), + rh_white=dict( + copyfile=True, + mandatory=True, + ), + subjects_dir=dict(), + surf_directory=dict( + argstr='%s', + position=-2, + usedefault=True, + ), + ) + inputs = RelabelHypointensities.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RelabelHypointensities_outputs(): + output_map = dict(out_file=dict(argstr='%s', ), ) + outputs = RelabelHypointensities.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py new file mode 100644 index 0000000000..69e1d453a4 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveIntersection.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import RemoveIntersection + + +def test_RemoveIntersection_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=True, + mandatory=True, + position=-2, + ), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=True, + name_source=['in_file'], + name_template='%s', + position=-1, + ), + subjects_dir=dict(), + ) + inputs = RemoveIntersection.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RemoveIntersection_outputs(): + output_map = dict(out_file=dict(), ) + outputs = RemoveIntersection.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py new file mode 100644 index 0000000000..9e095ddba0 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_RemoveNeck.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import RemoveNeck + + +def test_RemoveNeck_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-4, + ), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=True, + name_source=['in_file'], + name_template='%s_noneck', + position=-1, + ), + radius=dict(argstr='-radius %d', ), + subjects_dir=dict(), + template=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + transform=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + ) + inputs = RemoveNeck.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RemoveNeck_outputs(): + output_map = dict(out_file=dict(), ) + outputs = RemoveNeck.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Resample.py b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py new file mode 100644 index 0000000000..ecb939d89a --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Resample.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Resample + + +def test_Resample_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + position=-2, + ), + resampled_file=dict( + argstr='-o %s', + genfile=True, + position=-1, + ), + subjects_dir=dict(), + voxel_size=dict( + argstr='-vs %.2f %.2f %.2f', + mandatory=True, + ), + ) + inputs = Resample.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Resample_outputs(): + output_map = dict(resampled_file=dict(), ) + outputs = Resample.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py new file mode 100644 index 0000000000..af9020c278 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustRegister.py @@ -0,0 +1,81 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import RobustRegister + + +def test_RobustRegister_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + auto_sens=dict( + argstr='--satit', + mandatory=True, + xor=['outlier_sens'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + est_int_scale=dict(argstr='--iscale', ), + force_double=dict(argstr='--doubleprec', ), + force_float=dict(argstr='--floattype', ), + half_source=dict(argstr='--halfmov %s', ), + half_source_xfm=dict(argstr='--halfmovlta %s', ), + half_targ=dict(argstr='--halfdst %s', ), + half_targ_xfm=dict(argstr='--halfdstlta %s', ), + half_weights=dict(argstr='--halfweights %s', ), + high_iterations=dict(argstr='--highit %d', ), + in_xfm_file=dict(argstr='--transform', ), + init_orient=dict(argstr='--initorient', ), + iteration_thresh=dict(argstr='--epsit %.3f', ), + least_squares=dict(argstr='--leastsquares', ), + mask_source=dict(argstr='--maskmov %s', ), + mask_target=dict(argstr='--maskdst %s', ), + max_iterations=dict(argstr='--maxit %d', ), + no_init=dict(argstr='--noinit', ), + no_multi=dict(argstr='--nomulti', ), + out_reg_file=dict( + argstr='--lta %s', + usedefault=True, + ), + outlier_limit=dict(argstr='--wlimit %.3f', ), + outlier_sens=dict( + argstr='--sat %.4f', + mandatory=True, + xor=['auto_sens'], + ), + registered_file=dict(argstr='--warp %s', ), + source_file=dict( + argstr='--mov %s', + mandatory=True, + ), + subjects_dir=dict(), + subsample_thresh=dict(argstr='--subsample %d', ), + target_file=dict( + argstr='--dst %s', + mandatory=True, + ), + trans_only=dict(argstr='--transonly', ), + weights_file=dict(argstr='--weights %s', ), + write_vo2vox=dict(argstr='--vox2vox', ), + ) + inputs = RobustRegister.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RobustRegister_outputs(): + output_map = dict( + half_source=dict(), + half_source_xfm=dict(), + half_targ=dict(), + half_targ_xfm=dict(), + half_weights=dict(), + out_reg_file=dict(), + registered_file=dict(), + weights_file=dict(), + ) + outputs = RobustRegister.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py new file mode 100644 index 0000000000..e845a764e4 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_RobustTemplate.py @@ -0,0 +1,60 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..longitudinal import RobustTemplate + + +def test_RobustTemplate_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + auto_detect_sensitivity=dict( + argstr='--satit', + mandatory=True, + xor=['outlier_sensitivity'], + ), + average_metric=dict(argstr='--average %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_timepoint=dict(argstr='--fixtp', ), + in_files=dict( + argstr='--mov %s', + mandatory=True, + ), + in_intensity_scales=dict(argstr='--iscalein %s', ), + initial_timepoint=dict(argstr='--inittp %d', ), + initial_transforms=dict(argstr='--ixforms %s', ), + intensity_scaling=dict(argstr='--iscale', ), + no_iteration=dict(argstr='--noit', ), + num_threads=dict(), + out_file=dict( + argstr='--template %s', + mandatory=True, + usedefault=True, + ), + outlier_sensitivity=dict( + argstr='--sat %.4f', + mandatory=True, + xor=['auto_detect_sensitivity'], + ), + scaled_intensity_outputs=dict(argstr='--iscaleout %s', ), + subjects_dir=dict(), + subsample_threshold=dict(argstr='--subsample %d', ), + transform_outputs=dict(argstr='--lta %s', ), + ) + inputs = RobustTemplate.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RobustTemplate_outputs(): + output_map = dict( + out_file=dict(), + scaled_intensity_outputs=dict(), + transform_outputs=dict(), + ) + outputs = RobustTemplate.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py new file mode 100644 index 0000000000..0926eebba2 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_SampleToSurface.py @@ -0,0 +1,116 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import SampleToSurface + + +def test_SampleToSurface_inputs(): + input_map = dict( + apply_rot=dict(argstr='--rot %.3f %.3f %.3f', ), + apply_trans=dict(argstr='--trans %.3f %.3f %.3f', ), + args=dict(argstr='%s', ), + cortex_mask=dict( + argstr='--cortex', + xor=['mask_label'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fix_tk_reg=dict(argstr='--fixtkreg', ), + float2int_method=dict(argstr='--float2int %s', ), + frame=dict(argstr='--frame %d', ), + hemi=dict( + argstr='--hemi %s', + mandatory=True, + ), + hits_file=dict(argstr='--srchit %s', ), + hits_type=dict(argstr='--srchit_type', ), + ico_order=dict( + argstr='--icoorder %d', + requires=['target_subject'], + ), + interp_method=dict(argstr='--interp %s', ), + mask_label=dict( + argstr='--mask %s', + xor=['cortex_mask'], + ), + mni152reg=dict( + argstr='--mni152reg', + mandatory=True, + xor=['reg_file', 'reg_header', 'mni152reg'], + ), + no_reshape=dict( + argstr='--noreshape', + xor=['reshape'], + ), + out_file=dict( + argstr='--o %s', + genfile=True, + ), + out_type=dict(argstr='--out_type %s', ), + override_reg_subj=dict( + argstr='--srcsubject %s', + requires=['subject_id'], + ), + projection_stem=dict( + mandatory=True, + xor=['sampling_method'], + ), + reference_file=dict(argstr='--ref %s', ), + reg_file=dict( + argstr='--reg %s', + mandatory=True, + xor=['reg_file', 'reg_header', 'mni152reg'], + ), + reg_header=dict( + argstr='--regheader %s', + mandatory=True, + requires=['subject_id'], + xor=['reg_file', 'reg_header', 'mni152reg'], + ), + reshape=dict( + argstr='--reshape', + xor=['no_reshape'], + ), + reshape_slices=dict(argstr='--rf %d', ), + sampling_method=dict( + argstr='%s', + mandatory=True, + requires=['sampling_range', 'sampling_units'], + xor=['projection_stem'], + ), + sampling_range=dict(), + sampling_units=dict(), + scale_input=dict(argstr='--scale %.3f', ), + smooth_surf=dict(argstr='--surf-fwhm %.3f', ), + smooth_vol=dict(argstr='--fwhm %.3f', ), + source_file=dict( + argstr='--mov %s', + mandatory=True, + ), + subject_id=dict(), + subjects_dir=dict(), + surf_reg=dict( + argstr='--surfreg %s', + requires=['target_subject'], + ), + surface=dict(argstr='--surf %s', ), + target_subject=dict(argstr='--trgsubject %s', ), + vox_file=dict(argstr='--nvox %s', ), + ) + inputs = SampleToSurface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SampleToSurface_outputs(): + output_map = dict( + hits_file=dict(), + out_file=dict(), + vox_file=dict(), + ) + outputs = SampleToSurface.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py new file mode 100644 index 0000000000..8feb61d9d8 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStats.py @@ -0,0 +1,99 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import SegStats + + +def test_SegStats_inputs(): + input_map = dict( + annot=dict( + argstr='--annot %s %s %s', + mandatory=True, + xor=('segmentation_file', 'annot', 'surf_label'), + ), + args=dict(argstr='%s', ), + avgwf_file=dict(argstr='--avgwfvol %s', ), + avgwf_txt_file=dict(argstr='--avgwf %s', ), + brain_vol=dict(argstr='--%s', ), + brainmask_file=dict(argstr='--brainmask %s', ), + calc_power=dict(argstr='--%s', ), + calc_snr=dict(argstr='--snr', ), + color_table_file=dict( + argstr='--ctab %s', + xor=('color_table_file', 'default_color_table', 'gca_color_table'), + ), + cortex_vol_from_surf=dict(argstr='--surf-ctx-vol', ), + default_color_table=dict( + argstr='--ctab-default', + xor=('color_table_file', 'default_color_table', 'gca_color_table'), + ), + empty=dict(argstr='--empty', ), + environ=dict( + nohash=True, + usedefault=True, + ), + etiv=dict(argstr='--etiv', ), + etiv_only=dict(), + euler=dict(argstr='--euler', ), + exclude_ctx_gm_wm=dict(argstr='--excl-ctxgmwm', ), + exclude_id=dict(argstr='--excludeid %d', ), + frame=dict(argstr='--frame %d', ), + gca_color_table=dict( + argstr='--ctab-gca %s', + xor=('color_table_file', 'default_color_table', 'gca_color_table'), + ), + in_file=dict(argstr='--i %s', ), + in_intensity=dict(argstr='--in %s --in-intensity-name %s', ), + intensity_units=dict( + argstr='--in-intensity-units %s', + requires=['in_intensity'], + ), + mask_erode=dict(argstr='--maskerode %d', ), + mask_file=dict(argstr='--mask %s', ), + mask_frame=dict(requires=['mask_file'], ), + mask_invert=dict(argstr='--maskinvert', ), + mask_sign=dict(), + mask_thresh=dict(argstr='--maskthresh %f', ), + multiply=dict(argstr='--mul %f', ), + non_empty_only=dict(argstr='--nonempty', ), + partial_volume_file=dict(argstr='--pv %s', ), + segment_id=dict(argstr='--id %s...', ), + segmentation_file=dict( + argstr='--seg %s', + mandatory=True, + xor=('segmentation_file', 'annot', 'surf_label'), + ), + sf_avg_file=dict(argstr='--sfavg %s', ), + subcort_gm=dict(argstr='--subcortgray', ), + subjects_dir=dict(), + summary_file=dict( + argstr='--sum %s', + genfile=True, + position=-1, + ), + supratent=dict(argstr='--supratent', ), + surf_label=dict( + argstr='--slabel %s %s %s', + mandatory=True, + xor=('segmentation_file', 'annot', 'surf_label'), + ), + total_gray=dict(argstr='--totalgray', ), + vox=dict(argstr='--vox %s', ), + wm_vol_from_surf=dict(argstr='--surf-wm-vol', ), + ) + inputs = SegStats.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SegStats_outputs(): + output_map = dict( + avgwf_file=dict(), + avgwf_txt_file=dict(), + sf_avg_file=dict(), + summary_file=dict(), + ) + outputs = SegStats.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py new file mode 100644 index 0000000000..e65dc82e3b --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegStatsReconAll.py @@ -0,0 +1,115 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import SegStatsReconAll + + +def test_SegStatsReconAll_inputs(): + input_map = dict( + annot=dict( + argstr='--annot %s %s %s', + mandatory=True, + xor=('segmentation_file', 'annot', 'surf_label'), + ), + args=dict(argstr='%s', ), + aseg=dict(), + avgwf_file=dict(argstr='--avgwfvol %s', ), + avgwf_txt_file=dict(argstr='--avgwf %s', ), + brain_vol=dict(argstr='--%s', ), + brainmask_file=dict(argstr='--brainmask %s', ), + calc_power=dict(argstr='--%s', ), + calc_snr=dict(argstr='--snr', ), + color_table_file=dict( + argstr='--ctab %s', + xor=('color_table_file', 'default_color_table', 'gca_color_table'), + ), + copy_inputs=dict(), + cortex_vol_from_surf=dict(argstr='--surf-ctx-vol', ), + default_color_table=dict( + argstr='--ctab-default', + xor=('color_table_file', 'default_color_table', 'gca_color_table'), + ), + empty=dict(argstr='--empty', ), + environ=dict( + nohash=True, + usedefault=True, + ), + etiv=dict(argstr='--etiv', ), + etiv_only=dict(), + euler=dict(argstr='--euler', ), + exclude_ctx_gm_wm=dict(argstr='--excl-ctxgmwm', ), + exclude_id=dict(argstr='--excludeid %d', ), + frame=dict(argstr='--frame %d', ), + gca_color_table=dict( + argstr='--ctab-gca %s', + xor=('color_table_file', 'default_color_table', 'gca_color_table'), + ), + in_file=dict(argstr='--i %s', ), + in_intensity=dict(argstr='--in %s --in-intensity-name %s', ), + intensity_units=dict( + argstr='--in-intensity-units %s', + requires=['in_intensity'], + ), + lh_orig_nofix=dict(mandatory=True, ), + lh_pial=dict(mandatory=True, ), + lh_white=dict(mandatory=True, ), + mask_erode=dict(argstr='--maskerode %d', ), + mask_file=dict(argstr='--mask %s', ), + mask_frame=dict(requires=['mask_file'], ), + mask_invert=dict(argstr='--maskinvert', ), + mask_sign=dict(), + mask_thresh=dict(argstr='--maskthresh %f', ), + multiply=dict(argstr='--mul %f', ), + non_empty_only=dict(argstr='--nonempty', ), + partial_volume_file=dict(argstr='--pv %s', ), + presurf_seg=dict(), + rh_orig_nofix=dict(mandatory=True, ), + rh_pial=dict(mandatory=True, ), + rh_white=dict(mandatory=True, ), + ribbon=dict(mandatory=True, ), + segment_id=dict(argstr='--id %s...', ), + segmentation_file=dict( + argstr='--seg %s', + mandatory=True, + xor=('segmentation_file', 'annot', 'surf_label'), + ), + sf_avg_file=dict(argstr='--sfavg %s', ), + subcort_gm=dict(argstr='--subcortgray', ), + subject_id=dict( + argstr='--subject %s', + mandatory=True, + usedefault=True, + ), + subjects_dir=dict(), + summary_file=dict( + argstr='--sum %s', + genfile=True, + position=-1, + ), + supratent=dict(argstr='--supratent', ), + surf_label=dict( + argstr='--slabel %s %s %s', + mandatory=True, + xor=('segmentation_file', 'annot', 'surf_label'), + ), + total_gray=dict(argstr='--totalgray', ), + transform=dict(mandatory=True, ), + vox=dict(argstr='--vox %s', ), + wm_vol_from_surf=dict(argstr='--surf-wm-vol', ), + ) + inputs = SegStatsReconAll.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SegStatsReconAll_outputs(): + output_map = dict( + avgwf_file=dict(), + avgwf_txt_file=dict(), + sf_avg_file=dict(), + summary_file=dict(), + ) + outputs = SegStatsReconAll.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py new file mode 100644 index 0000000000..e6b5fb2679 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentCC.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import SegmentCC + + +def test_SegmentCC_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + copy_inputs=dict(), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-aseg %s', + mandatory=True, + ), + in_norm=dict(mandatory=True, ), + out_file=dict( + argstr='-o %s', + hash_files=False, + keep_extension=False, + name_source=['in_file'], + name_template='%s.auto.mgz', + ), + out_rotation=dict( + argstr='-lta %s', + mandatory=True, + ), + subject_id=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + subjects_dir=dict(), + ) + inputs = SegmentCC.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SegmentCC_outputs(): + output_map = dict( + out_file=dict(), + out_rotation=dict(), + ) + outputs = SegmentCC.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py new file mode 100644 index 0000000000..aa742e8fea --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_SegmentWM.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import SegmentWM + + +def test_SegmentWM_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + subjects_dir=dict(), + ) + inputs = SegmentWM.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SegmentWM_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SegmentWM.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py new file mode 100644 index 0000000000..fe4581dee0 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Smooth.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Smooth + + +def test_Smooth_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='--i %s', + mandatory=True, + ), + num_iters=dict( + argstr='--niters %d', + mandatory=True, + xor=['surface_fwhm'], + ), + proj_frac=dict( + argstr='--projfrac %s', + xor=['proj_frac_avg'], + ), + proj_frac_avg=dict( + argstr='--projfrac-avg %.2f %.2f %.2f', + xor=['proj_frac'], + ), + reg_file=dict( + argstr='--reg %s', + mandatory=True, + ), + smoothed_file=dict( + argstr='--o %s', + genfile=True, + ), + subjects_dir=dict(), + surface_fwhm=dict( + argstr='--fwhm %f', + mandatory=True, + requires=['reg_file'], + xor=['num_iters'], + ), + vol_fwhm=dict(argstr='--vol-fwhm %f', ), + ) + inputs = Smooth.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Smooth_outputs(): + output_map = dict(smoothed_file=dict(), ) + outputs = Smooth.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py new file mode 100644 index 0000000000..8ce4dce075 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_SmoothTessellation.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import SmoothTessellation + + +def test_SmoothTessellation_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + curvature_averaging_iterations=dict(argstr='-a %d', ), + disable_estimates=dict(argstr='-nw', ), + environ=dict( + nohash=True, + usedefault=True, + ), + gaussian_curvature_norm_steps=dict(argstr='%d ', ), + gaussian_curvature_smoothing_steps=dict(argstr='%d', ), + in_file=dict( + argstr='%s', + copyfile=True, + mandatory=True, + position=-2, + ), + normalize_area=dict(argstr='-area', ), + out_area_file=dict(argstr='-b %s', ), + out_curvature_file=dict(argstr='-c %s', ), + out_file=dict( + argstr='%s', + genfile=True, + position=-1, + ), + seed=dict(argstr='-seed %d', ), + smoothing_iterations=dict(argstr='-n %d', ), + snapshot_writing_iterations=dict(argstr='-w %d', ), + subjects_dir=dict(), + use_gaussian_curvature_smoothing=dict(argstr='-g', ), + use_momentum=dict(argstr='-m', ), + ) + inputs = SmoothTessellation.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SmoothTessellation_outputs(): + output_map = dict(surface=dict(), ) + outputs = SmoothTessellation.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py new file mode 100644 index 0000000000..461398e6a8 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Sphere.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Sphere + + +def test_Sphere_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=True, + mandatory=True, + position=-2, + ), + in_smoothwm=dict(copyfile=True, ), + magic=dict(argstr='-q', ), + num_threads=dict(), + out_file=dict( + argstr='%s', + hash_files=False, + name_source=['in_file'], + name_template='%s.sphere', + position=-1, + ), + seed=dict(argstr='-seed %d', ), + subjects_dir=dict(), + ) + inputs = Sphere.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Sphere_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Sphere.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py new file mode 100644 index 0000000000..efdc032787 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_SphericalAverage.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import SphericalAverage + + +def test_SphericalAverage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + erode=dict(argstr='-erode %d', ), + fname=dict( + argstr='%s', + mandatory=True, + position=-5, + ), + hemisphere=dict( + argstr='%s', + mandatory=True, + position=-4, + ), + in_average=dict( + argstr='%s', + genfile=True, + position=-2, + ), + in_orig=dict(argstr='-orig %s', ), + in_surf=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + out_file=dict( + argstr='%s', + genfile=True, + position=-1, + ), + subject_id=dict( + argstr='-o %s', + mandatory=True, + ), + subjects_dir=dict(), + threshold=dict(argstr='-t %.1f', ), + which=dict( + argstr='%s', + mandatory=True, + position=-6, + ), + ) + inputs = SphericalAverage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SphericalAverage_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SphericalAverage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py new file mode 100644 index 0000000000..ca3f96c42b --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Surface2VolTransform.py @@ -0,0 +1,67 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Surface2VolTransform + + +def test_Surface2VolTransform_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemi=dict( + argstr='--hemi %s', + mandatory=True, + ), + mkmask=dict( + argstr='--mkmask', + xor=['source_file'], + ), + projfrac=dict(argstr='--projfrac %s', ), + reg_file=dict( + argstr='--volreg %s', + mandatory=True, + xor=['subject_id'], + ), + source_file=dict( + argstr='--surfval %s', + copyfile=False, + mandatory=True, + xor=['mkmask'], + ), + subject_id=dict( + argstr='--identity %s', + xor=['reg_file'], + ), + subjects_dir=dict(argstr='--sd %s', ), + surf_name=dict(argstr='--surf %s', ), + template_file=dict(argstr='--template %s', ), + transformed_file=dict( + argstr='--outvol %s', + hash_files=False, + name_source=['source_file'], + name_template='%s_asVol.nii', + ), + vertexvol_file=dict( + argstr='--vtxvol %s', + hash_files=False, + name_source=['source_file'], + name_template='%s_asVol_vertex.nii', + ), + ) + inputs = Surface2VolTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Surface2VolTransform_outputs(): + output_map = dict( + transformed_file=dict(), + vertexvol_file=dict(), + ) + outputs = Surface2VolTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py new file mode 100644 index 0000000000..84bef6ed7a --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSmooth.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import SurfaceSmooth + + +def test_SurfaceSmooth_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + cortex=dict( + argstr='--cortex', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr='--fwhm %.4f', + xor=['smooth_iters'], + ), + hemi=dict( + argstr='--hemi %s', + mandatory=True, + ), + in_file=dict( + argstr='--sval %s', + mandatory=True, + ), + out_file=dict( + argstr='--tval %s', + genfile=True, + ), + reshape=dict(argstr='--reshape', ), + smooth_iters=dict( + argstr='--smooth %d', + xor=['fwhm'], + ), + subject_id=dict( + argstr='--s %s', + mandatory=True, + ), + subjects_dir=dict(), + ) + inputs = SurfaceSmooth.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SurfaceSmooth_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SurfaceSmooth.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py new file mode 100644 index 0000000000..a413d410b1 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceSnapshots.py @@ -0,0 +1,102 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import SurfaceSnapshots + + +def test_SurfaceSnapshots_inputs(): + input_map = dict( + annot_file=dict( + argstr='-annotation %s', + xor=['annot_name'], + ), + annot_name=dict( + argstr='-annotation %s', + xor=['annot_file'], + ), + args=dict(argstr='%s', ), + colortable=dict(argstr='-colortable %s', ), + demean_overlay=dict(argstr='-zm', ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemi=dict( + argstr='%s', + mandatory=True, + position=2, + ), + identity_reg=dict( + argstr='-overlay-reg-identity', + xor=['overlay_reg', 'identity_reg', 'mni152_reg'], + ), + invert_overlay=dict(argstr='-invphaseflag 1', ), + label_file=dict( + argstr='-label %s', + xor=['label_name'], + ), + label_name=dict( + argstr='-label %s', + xor=['label_file'], + ), + label_outline=dict(argstr='-label-outline', ), + label_under=dict(argstr='-labels-under', ), + mni152_reg=dict( + argstr='-mni152reg', + xor=['overlay_reg', 'identity_reg', 'mni152_reg'], + ), + orig_suffix=dict(argstr='-orig %s', ), + overlay=dict( + argstr='-overlay %s', + requires=['overlay_range'], + ), + overlay_range=dict(argstr='%s', ), + overlay_range_offset=dict(argstr='-foffset %.3f', ), + overlay_reg=dict( + argstr='-overlay-reg %s', + xor=['overlay_reg', 'identity_reg', 'mni152_reg'], + ), + patch_file=dict(argstr='-patch %s', ), + reverse_overlay=dict(argstr='-revphaseflag 1', ), + screenshot_stem=dict(), + show_color_scale=dict(argstr='-colscalebarflag 1', ), + show_color_text=dict(argstr='-colscaletext 1', ), + show_curv=dict( + argstr='-curv', + xor=['show_gray_curv'], + ), + show_gray_curv=dict( + argstr='-gray', + xor=['show_curv'], + ), + six_images=dict(), + sphere_suffix=dict(argstr='-sphere %s', ), + stem_template_args=dict(requires=['screenshot_stem'], ), + subject_id=dict( + argstr='%s', + mandatory=True, + position=1, + ), + subjects_dir=dict(), + surface=dict( + argstr='%s', + mandatory=True, + position=3, + ), + tcl_script=dict( + argstr='%s', + genfile=True, + ), + truncate_overlay=dict(argstr='-truncphaseflag 1', ), + ) + inputs = SurfaceSnapshots.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SurfaceSnapshots_outputs(): + output_map = dict(snapshots=dict(), ) + outputs = SurfaceSnapshots.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py new file mode 100644 index 0000000000..0546a275dc --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_SurfaceTransform.py @@ -0,0 +1,60 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import SurfaceTransform + + +def test_SurfaceTransform_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + hemi=dict( + argstr='--hemi %s', + mandatory=True, + ), + out_file=dict( + argstr='--tval %s', + genfile=True, + ), + reshape=dict(argstr='--reshape', ), + reshape_factor=dict(argstr='--reshape-factor', ), + source_annot_file=dict( + argstr='--sval-annot %s', + mandatory=True, + xor=['source_file'], + ), + source_file=dict( + argstr='--sval %s', + mandatory=True, + xor=['source_annot_file'], + ), + source_subject=dict( + argstr='--srcsubject %s', + mandatory=True, + ), + source_type=dict( + argstr='--sfmt %s', + requires=['source_file'], + ), + subjects_dir=dict(), + target_ico_order=dict(argstr='--trgicoorder %d', ), + target_subject=dict( + argstr='--trgsubject %s', + mandatory=True, + ), + target_type=dict(argstr='--tfmt %s', ), + ) + inputs = SurfaceTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SurfaceTransform_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SurfaceTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py new file mode 100644 index 0000000000..ea121d877e --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_SynthesizeFLASH.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import SynthesizeFLASH + + +def test_SynthesizeFLASH_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixed_weighting=dict( + argstr='-w', + position=1, + ), + flip_angle=dict( + argstr='%.2f', + mandatory=True, + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + ), + pd_image=dict( + argstr='%s', + mandatory=True, + position=6, + ), + subjects_dir=dict(), + t1_image=dict( + argstr='%s', + mandatory=True, + position=5, + ), + te=dict( + argstr='%.3f', + mandatory=True, + position=4, + ), + tr=dict( + argstr='%.2f', + mandatory=True, + position=2, + ), + ) + inputs = SynthesizeFLASH.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SynthesizeFLASH_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SynthesizeFLASH.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py new file mode 100644 index 0000000000..8ce925fcc7 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachAVI.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TalairachAVI + + +def test_TalairachAVI_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + atlas=dict(argstr='--atlas %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='--i %s', + mandatory=True, + ), + out_file=dict( + argstr='--xfm %s', + mandatory=True, + ), + subjects_dir=dict(), + ) + inputs = TalairachAVI.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TalairachAVI_outputs(): + output_map = dict( + out_file=dict(), + out_log=dict(), + out_txt=dict(), + ) + outputs = TalairachAVI.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py new file mode 100644 index 0000000000..d07e572365 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_TalairachQC.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TalairachQC + + +def test_TalairachQC_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + log_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + subjects_dir=dict(), + ) + inputs = TalairachQC.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TalairachQC_outputs(): + output_map = dict(log_file=dict(usedefault=True, ), ) + outputs = TalairachQC.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py new file mode 100644 index 0000000000..17f8e53a1f --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_Tkregister2.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Tkregister2 + + +def test_Tkregister2_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fsl_in_matrix=dict(argstr='--fsl %s', ), + fsl_out=dict(argstr='--fslregout %s', ), + fstal=dict( + argstr='--fstal', + xor=['target_image', 'moving_image', 'reg_file'], + ), + fstarg=dict( + argstr='--fstarg', + xor=['target_image'], + ), + invert_lta_in=dict(requires=['lta_in'], ), + invert_lta_out=dict( + argstr='--ltaout-inv', + requires=['lta_in'], + ), + lta_in=dict(argstr='--lta %s', ), + lta_out=dict(argstr='--ltaout %s', ), + moving_image=dict( + argstr='--mov %s', + mandatory=True, + ), + movscale=dict(argstr='--movscale %f', ), + noedit=dict( + argstr='--noedit', + usedefault=True, + ), + reg_file=dict( + argstr='--reg %s', + mandatory=True, + usedefault=True, + ), + reg_header=dict(argstr='--regheader', ), + subject_id=dict(argstr='--s %s', ), + subjects_dir=dict(), + target_image=dict( + argstr='--targ %s', + xor=['fstarg'], + ), + xfm=dict(argstr='--xfm %s', ), + ) + inputs = Tkregister2.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Tkregister2_outputs(): + output_map = dict( + fsl_file=dict(), + lta_file=dict(), + reg_file=dict(), + ) + outputs = Tkregister2.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py new file mode 100644 index 0000000000..9427e60940 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_UnpackSDICOMDir.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import UnpackSDICOMDir + + +def test_UnpackSDICOMDir_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + config=dict( + argstr='-cfg %s', + mandatory=True, + xor=('run_info', 'config', 'seq_config'), + ), + dir_structure=dict(argstr='-%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + log_file=dict(argstr='-log %s', ), + no_info_dump=dict(argstr='-noinfodump', ), + no_unpack_err=dict(argstr='-no-unpackerr', ), + output_dir=dict(argstr='-targ %s', ), + run_info=dict( + argstr='-run %d %s %s %s', + mandatory=True, + xor=('run_info', 'config', 'seq_config'), + ), + scan_only=dict(argstr='-scanonly %s', ), + seq_config=dict( + argstr='-seqcfg %s', + mandatory=True, + xor=('run_info', 'config', 'seq_config'), + ), + source_dir=dict( + argstr='-src %s', + mandatory=True, + ), + spm_zeropad=dict(argstr='-nspmzeropad %d', ), + subjects_dir=dict(), + ) + inputs = UnpackSDICOMDir.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py new file mode 100644 index 0000000000..3e898a81f7 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_VolumeMask.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import VolumeMask + + +def test_VolumeMask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + aseg=dict(xor=['in_aseg'], ), + copy_inputs=dict(), + environ=dict( + nohash=True, + usedefault=True, + ), + in_aseg=dict( + argstr='--aseg_name %s', + xor=['aseg'], + ), + left_ribbonlabel=dict( + argstr='--label_left_ribbon %d', + mandatory=True, + ), + left_whitelabel=dict( + argstr='--label_left_white %d', + mandatory=True, + ), + lh_pial=dict(mandatory=True, ), + lh_white=dict(mandatory=True, ), + rh_pial=dict(mandatory=True, ), + rh_white=dict(mandatory=True, ), + right_ribbonlabel=dict( + argstr='--label_right_ribbon %d', + mandatory=True, + ), + right_whitelabel=dict( + argstr='--label_right_white %d', + mandatory=True, + ), + save_ribbon=dict(argstr='--save_ribbon', ), + subject_id=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + subjects_dir=dict(), + ) + inputs = VolumeMask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_VolumeMask_outputs(): + output_map = dict( + lh_ribbon=dict(), + out_ribbon=dict(), + rh_ribbon=dict(), + ) + outputs = VolumeMask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py new file mode 100644 index 0000000000..3586e7d234 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_auto_WatershedSkullStrip.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import WatershedSkullStrip + + +def test_WatershedSkullStrip_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + brain_atlas=dict( + argstr='-brain_atlas %s', + position=-4, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + subjects_dir=dict(), + t1=dict(argstr='-T1', ), + transform=dict( + argstr='%s', + position=-3, + ), + ) + inputs = WatershedSkullStrip.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_WatershedSkullStrip_outputs(): + output_map = dict(out_file=dict(), ) + outputs = WatershedSkullStrip.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/freesurfer/tests/test_model.py b/nipype/interfaces/freesurfer/tests/test_model.py new file mode 100644 index 0000000000..cd8d129690 --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_model.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import os +import numpy as np +import nibabel as nb + +import pytest + +from nipype.utils import NUMPY_MMAP +from nipype.interfaces.freesurfer import model, no_freesurfer +import nipype.pipeline.engine as pe + + +@pytest.mark.skipif(no_freesurfer(), reason="freesurfer is not installed") +def test_concatenate(tmpdir): + tmpdir.chdir() + + in1 = tmpdir.join('cont1.nii').strpath + in2 = tmpdir.join('cont2.nii').strpath + out = 'bar.nii' + + data1 = np.zeros((3, 3, 3, 1), dtype=np.float32) + data2 = np.ones((3, 3, 3, 5), dtype=np.float32) + out_data = np.concatenate((data1, data2), axis=3) + mean_data = np.mean(out_data, axis=3) + + nb.Nifti1Image(data1, affine=np.eye(4)).to_filename(in1) + nb.Nifti1Image(data2, affine=np.eye(4)).to_filename(in2) + + # Test default behavior + res = model.Concatenate(in_files=[in1, in2]).run() + assert res.outputs.concatenated_file == tmpdir.join( + 'concat_output.nii.gz').strpath + assert np.allclose(nb.load('concat_output.nii.gz').get_data(), out_data) + + # Test specified concatenated_file + res = model.Concatenate(in_files=[in1, in2], concatenated_file=out).run() + assert res.outputs.concatenated_file == tmpdir.join(out).strpath + assert np.allclose(nb.load(out, mmap=NUMPY_MMAP).get_data(), out_data) + + # Test in workflow + wf = pe.Workflow('test_concatenate', base_dir=tmpdir.strpath) + concat = pe.Node( + model.Concatenate(in_files=[in1, in2], concatenated_file=out), + name='concat') + wf.add_nodes([concat]) + wf.run() + assert np.allclose( + nb.load(tmpdir.join('test_concatenate', 'concat', + out).strpath).get_data(), out_data) + + # Test a simple statistic + res = model.Concatenate( + in_files=[in1, in2], concatenated_file=out, stats='mean').run() + assert np.allclose(nb.load(out, mmap=NUMPY_MMAP).get_data(), mean_data) diff --git a/nipype/interfaces/freesurfer/tests/test_preprocess.py b/nipype/interfaces/freesurfer/tests/test_preprocess.py new file mode 100644 index 0000000000..f9fc09515a --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_preprocess.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from builtins import str +import os + +import pytest +from nipype.testing.fixtures import create_files_in_directory + +from nipype.interfaces import freesurfer +from nipype.interfaces.freesurfer import Info +from nipype import LooseVersion + + +@pytest.mark.skipif( + freesurfer.no_freesurfer(), reason="freesurfer is not installed") +def test_robustregister(create_files_in_directory): + filelist, outdir = create_files_in_directory + + reg = freesurfer.RobustRegister() + cwd = os.getcwd() + + # make sure command gets called + assert reg.cmd == 'mri_robust_register' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + reg.run() + + # .inputs based parameters setting + reg.inputs.source_file = filelist[0] + reg.inputs.target_file = filelist[1] + reg.inputs.auto_sens = True + assert reg.cmdline == ('mri_robust_register --satit --lta ' + '%s/%s_robustreg.lta --mov %s --dst %s' % + (cwd, filelist[0][:-4], filelist[0], filelist[1])) + + # constructor based parameter setting + reg2 = freesurfer.RobustRegister( + source_file=filelist[0], + target_file=filelist[1], + outlier_sens=3.0, + out_reg_file='foo.lta', + half_targ=True) + assert reg2.cmdline == ( + 'mri_robust_register --halfdst %s_halfway.nii --lta foo.lta ' + '--sat 3.0000 --mov %s --dst %s' % + (os.path.join(outdir, filelist[1][:-4]), filelist[0], filelist[1])) + + +@pytest.mark.skipif( + freesurfer.no_freesurfer(), reason="freesurfer is not installed") +def test_fitmsparams(create_files_in_directory): + filelist, outdir = create_files_in_directory + + fit = freesurfer.FitMSParams() + + # make sure command gets called + assert fit.cmd == 'mri_ms_fitparms' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + fit.run() + + # .inputs based parameters setting + fit.inputs.in_files = filelist + fit.inputs.out_dir = outdir + assert fit.cmdline == 'mri_ms_fitparms %s %s %s' % (filelist[0], + filelist[1], outdir) + + # constructor based parameter setting + fit2 = freesurfer.FitMSParams( + in_files=filelist, + te_list=[1.5, 3.5], + flip_list=[20, 30], + out_dir=outdir) + assert fit2.cmdline == ( + 'mri_ms_fitparms -te %.3f -fa %.1f %s -te %.3f -fa %.1f %s %s' % + (1.500, 20.0, filelist[0], 3.500, 30.0, filelist[1], outdir)) + + +@pytest.mark.skipif( + freesurfer.no_freesurfer(), reason="freesurfer is not installed") +def test_synthesizeflash(create_files_in_directory): + filelist, outdir = create_files_in_directory + + syn = freesurfer.SynthesizeFLASH() + + # make sure command gets called + assert syn.cmd == 'mri_synthesize' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + syn.run() + + # .inputs based parameters setting + syn.inputs.t1_image = filelist[0] + syn.inputs.pd_image = filelist[1] + syn.inputs.flip_angle = 30 + syn.inputs.te = 4.5 + syn.inputs.tr = 20 + + assert syn.cmdline == ('mri_synthesize 20.00 30.00 4.500 %s %s %s' % + (filelist[0], filelist[1], + os.path.join(outdir, 'synth-flash_30.mgz'))) + + # constructor based parameters setting + syn2 = freesurfer.SynthesizeFLASH( + t1_image=filelist[0], pd_image=filelist[1], flip_angle=20, te=5, tr=25) + assert syn2.cmdline == ('mri_synthesize 25.00 20.00 5.000 %s %s %s' % + (filelist[0], filelist[1], + os.path.join(outdir, 'synth-flash_20.mgz'))) + + +@pytest.mark.skipif( + freesurfer.no_freesurfer(), reason="freesurfer is not installed") +def test_mandatory_outvol(create_files_in_directory): + filelist, outdir = create_files_in_directory + mni = freesurfer.MNIBiasCorrection() + + # make sure command gets called + assert mni.cmd == "mri_nu_correct.mni" + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + mni.cmdline + + # test with minimal args + mni.inputs.in_file = filelist[0] + base, ext = os.path.splitext(os.path.basename(filelist[0])) + if ext == '.gz': + base, ext2 = os.path.splitext(base) + ext = ext2 + ext + + assert mni.cmdline == ('mri_nu_correct.mni --i %s --n 4 --o %s_output%s' % + (filelist[0], base, ext)) + + # test with custom outfile + mni.inputs.out_file = 'new_corrected_file.mgz' + assert mni.cmdline == ( + 'mri_nu_correct.mni --i %s --n 4 --o new_corrected_file.mgz' % (filelist[0])) + + # constructor based tests + mni2 = freesurfer.MNIBiasCorrection( + in_file=filelist[0], out_file='bias_corrected_output', iterations=2) + assert mni2.cmdline == ( + 'mri_nu_correct.mni --i %s --n 2 --o bias_corrected_output' % + filelist[0]) + + +@pytest.mark.skipif( + freesurfer.no_freesurfer(), reason="freesurfer is not installed") +def test_bbregister(create_files_in_directory): + filelist, outdir = create_files_in_directory + bbr = freesurfer.BBRegister() + + # make sure command gets called + assert bbr.cmd == "bbregister" + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + bbr.cmdline + + bbr.inputs.subject_id = 'fsaverage' + bbr.inputs.source_file = filelist[0] + bbr.inputs.contrast_type = 't2' + + # Check that 'init' is mandatory in FS < 6, but not in 6+ + if Info.looseversion() < LooseVersion("6.0.0"): + with pytest.raises(ValueError): + bbr.cmdline + else: + bbr.cmdline + + bbr.inputs.init = 'fsl' + + base, ext = os.path.splitext(os.path.basename(filelist[0])) + if ext == '.gz': + base, _ = os.path.splitext(base) + + assert bbr.cmdline == ('bbregister --t2 --init-fsl ' + '--reg {base}_bbreg_fsaverage.dat ' + '--mov {full} --s fsaverage'.format( + full=filelist[0], base=base)) + + +def test_FSVersion(): + """Check that FSVersion is a string that can be compared with LooseVersion + """ + assert isinstance(freesurfer.preprocess.FSVersion, str) + assert LooseVersion(freesurfer.preprocess.FSVersion) >= LooseVersion("0") diff --git a/nipype/interfaces/freesurfer/tests/test_utils.py b/nipype/interfaces/freesurfer/tests/test_utils.py new file mode 100644 index 0000000000..f3ff8fd5ee --- /dev/null +++ b/nipype/interfaces/freesurfer/tests/test_utils.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open +import os +import os.path as op +import pytest +from nipype.testing.fixtures import (create_files_in_directory_plus_dummy_file, + create_surf_file_in_directory) + +from nipype.pipeline import engine as pe +from nipype.interfaces import freesurfer as fs +from nipype.interfaces.base import TraitError +from nipype.interfaces.io import FreeSurferSource + + +@pytest.mark.skipif(fs.no_freesurfer(), reason="freesurfer is not installed") +def test_sample2surf(create_files_in_directory_plus_dummy_file): + + s2s = fs.SampleToSurface() + # Test underlying command + assert s2s.cmd == 'mri_vol2surf' + + # Test mandatory args exception + with pytest.raises(ValueError): + s2s.run() + + # Create testing files + files, cwd = create_files_in_directory_plus_dummy_file + + # Test input settings + s2s.inputs.source_file = files[0] + s2s.inputs.reference_file = files[1] + s2s.inputs.hemi = "lh" + s2s.inputs.reg_file = files[2] + s2s.inputs.sampling_range = .5 + s2s.inputs.sampling_units = "frac" + s2s.inputs.sampling_method = "point" + + # Test a basic command line + assert s2s.cmdline == ( + "mri_vol2surf " + "--hemi lh --o %s --ref %s --reg reg.dat --projfrac 0.500 --mov %s" % + (os.path.join(cwd, "lh.a.mgz"), files[1], files[0])) + + # Test identity + s2sish = fs.SampleToSurface( + source_file=files[1], reference_file=files[0], hemi="rh") + assert s2s != s2sish + + # Test hits file name creation + s2s.inputs.hits_file = True + assert s2s._get_outfilename("hits_file") == os.path.join( + cwd, "lh.a_hits.mgz") + + # Test that a 2-tuple range raises an error + def set_illegal_range(): + s2s.inputs.sampling_range = (.2, .5) + + with pytest.raises(TraitError): + set_illegal_range() + + +@pytest.mark.skipif(fs.no_freesurfer(), reason="freesurfer is not installed") +def test_surfsmooth(create_surf_file_in_directory): + + smooth = fs.SurfaceSmooth() + + # Test underlying command + assert smooth.cmd == "mri_surf2surf" + + # Test mandatory args exception + with pytest.raises(ValueError): + smooth.run() + + # Create testing files + surf, cwd = create_surf_file_in_directory + + # Test input settings + smooth.inputs.in_file = surf + smooth.inputs.subject_id = "fsaverage" + fwhm = 5 + smooth.inputs.fwhm = fwhm + smooth.inputs.hemi = "lh" + + # Test the command line + assert smooth.cmdline == \ + ("mri_surf2surf --cortex --fwhm 5.0000 --hemi lh --sval %s --tval %s/lh.a_smooth%d.nii --s fsaverage" % + (surf, cwd, fwhm)) + + # Test identity + shmooth = fs.SurfaceSmooth( + subject_id="fsaverage", + fwhm=6, + in_file=surf, + hemi="lh", + out_file="lh.a_smooth.nii") + assert smooth != shmooth + + +@pytest.mark.skipif(fs.no_freesurfer(), reason="freesurfer is not installed") +def test_surfxfm(create_surf_file_in_directory): + + xfm = fs.SurfaceTransform() + + # Test underlying command + assert xfm.cmd == "mri_surf2surf" + + # Test mandatory args exception + with pytest.raises(ValueError): + xfm.run() + + # Create testing files + surf, cwd = create_surf_file_in_directory + + # Test input settings + xfm.inputs.source_file = surf + xfm.inputs.source_subject = "my_subject" + xfm.inputs.target_subject = "fsaverage" + xfm.inputs.hemi = "lh" + + # Test the command line + assert xfm.cmdline == \ + ("mri_surf2surf --hemi lh --tval %s/lh.a.fsaverage.nii --sval %s --srcsubject my_subject --trgsubject fsaverage" % + (cwd, surf)) + + # Test identity + xfmish = fs.SurfaceTransform( + source_subject="fsaverage", + target_subject="my_subject", + source_file=surf, + hemi="lh") + assert xfm != xfmish + + +@pytest.mark.skipif(fs.no_freesurfer(), reason="freesurfer is not installed") +def test_surfshots(create_files_in_directory_plus_dummy_file): + + fotos = fs.SurfaceSnapshots() + + # Test underlying command + assert fotos.cmd == "tksurfer" + + # Test mandatory args exception + with pytest.raises(ValueError): + fotos.run() + + # Create testing files + files, cwd = create_files_in_directory_plus_dummy_file + + # Test input settins + fotos.inputs.subject_id = "fsaverage" + fotos.inputs.hemi = "lh" + fotos.inputs.surface = "pial" + + # Test a basic command line + assert fotos.cmdline == "tksurfer fsaverage lh pial -tcl snapshots.tcl" + + # Test identity + schmotos = fs.SurfaceSnapshots( + subject_id="mysubject", hemi="rh", surface="white") + assert fotos != schmotos + + # Test that the tcl script gets written + fotos._write_tcl_script() + assert os.path.exists("snapshots.tcl") + + # Test that we can use a different tcl script + foo = open("other.tcl", "w").close() + fotos.inputs.tcl_script = "other.tcl" + assert fotos.cmdline == "tksurfer fsaverage lh pial -tcl other.tcl" + + # Test that the interface crashes politely if graphics aren't enabled + try: + hold_display = os.environ["DISPLAY"] + del os.environ["DISPLAY"] + with pytest.raises(RuntimeError): + fotos.run() + os.environ["DISPLAY"] = hold_display + except KeyError: + pass + + +@pytest.mark.skipif(fs.no_freesurfer(), reason="freesurfer is not installed") +def test_mrisexpand(tmpdir): + fssrc = FreeSurferSource( + subjects_dir=fs.Info.subjectsdir(), subject_id='fsaverage', hemi='lh') + + fsavginfo = fssrc.run().outputs.get() + + # dt=60 to ensure very short runtime + expand_if = fs.MRIsExpand( + in_file=fsavginfo['smoothwm'], out_name='expandtmp', distance=1, dt=60) + + expand_nd = pe.Node( + fs.MRIsExpand( + in_file=fsavginfo['smoothwm'], + out_name='expandtmp', + distance=1, + dt=60), + name='expand_node') + + # Interfaces should have same command line at instantiation + orig_cmdline = 'mris_expand -T 60 {} 1 expandtmp'.format( + fsavginfo['smoothwm']) + assert expand_if.cmdline == orig_cmdline + assert expand_nd.interface.cmdline == orig_cmdline + + # Run Node interface + nd_res = expand_nd.run() + + # Commandlines differ + node_cmdline = 'mris_expand -T 60 -pial {cwd}/lh.pial {cwd}/lh.smoothwm ' \ + '1 expandtmp'.format(cwd=nd_res.runtime.cwd) + assert nd_res.runtime.cmdline == node_cmdline + + # Check output + if_out_file = expand_if._list_outputs()['out_file'] + nd_out_file = nd_res.outputs.get()['out_file'] + # Same filename + assert op.basename(if_out_file) == op.basename(nd_out_file) + # Interface places output in source directory + assert op.dirname(if_out_file) == op.dirname(fsavginfo['smoothwm']) + # Node places output in working directory + assert op.dirname(nd_out_file) == nd_res.runtime.cwd diff --git a/nipype/interfaces/freesurfer/utils.py b/nipype/interfaces/freesurfer/utils.py new file mode 100644 index 0000000000..b258d41720 --- /dev/null +++ b/nipype/interfaces/freesurfer/utils.py @@ -0,0 +1,3970 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Interfaces to assorted Freesurfer utility programs. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, open + +import os +import re +import shutil + +from ... import logging +from ...utils.filemanip import fname_presuffix, split_filename +from ..base import (TraitedSpec, File, traits, OutputMultiPath, isdefined, + CommandLine, CommandLineInputSpec) +from .base import (FSCommand, FSTraitedSpec, FSSurfaceCommand, FSScriptCommand, + FSScriptOutputSpec, FSTraitedSpecOpenMP, FSCommandOpenMP) +__docformat__ = 'restructuredtext' + +filemap = dict( + cor='cor', + mgh='mgh', + mgz='mgz', + minc='mnc', + afni='brik', + brik='brik', + bshort='bshort', + spm='img', + analyze='img', + analyze4d='img', + bfloat='bfloat', + nifti1='img', + nii='nii', + niigz='nii.gz', + gii='gii') + +filetypes = [ + 'cor', 'mgh', 'mgz', 'minc', 'analyze', 'analyze4d', 'spm', 'afni', 'brik', + 'bshort', 'bfloat', 'sdt', 'outline', 'otl', 'gdf', 'nifti1', 'nii', + 'niigz' +] +implicit_filetypes = ['gii'] + +logger = logging.getLogger('nipype.interface') + + +def copy2subjdir(cls, in_file, folder=None, basename=None, subject_id=None): + """Method to copy an input to the subjects directory""" + # check that the input is defined + if not isdefined(in_file): + return in_file + # check that subjects_dir is defined + if isdefined(cls.inputs.subjects_dir): + subjects_dir = cls.inputs.subjects_dir + else: + subjects_dir = os.getcwd() # if not use cwd + # check for subject_id + if not subject_id: + if isdefined(cls.inputs.subject_id): + subject_id = cls.inputs.subject_id + else: + subject_id = 'subject_id' # default + # check for basename + if basename is None: + basename = os.path.basename(in_file) + # check which folder to put the file in + if folder is not None: + out_dir = os.path.join(subjects_dir, subject_id, folder) + else: + out_dir = os.path.join(subjects_dir, subject_id) + # make the output folder if it does not exist + if not os.path.isdir(out_dir): + os.makedirs(out_dir) + out_file = os.path.join(out_dir, basename) + if not os.path.isfile(out_file): + shutil.copy(in_file, out_file) + return out_file + + +def createoutputdirs(outputs): + """create all output directories. If not created, some freesurfer interfaces fail""" + for output in list(outputs.values()): + dirname = os.path.dirname(output) + if not os.path.isdir(dirname): + os.makedirs(dirname) + + +class SampleToSurfaceInputSpec(FSTraitedSpec): + + source_file = File( + exists=True, + mandatory=True, + argstr="--mov %s", + desc="volume to sample values from") + reference_file = File( + exists=True, + argstr="--ref %s", + desc="reference volume (default is orig.mgz)") + + hemi = traits.Enum( + "lh", + "rh", + mandatory=True, + argstr="--hemi %s", + desc="target hemisphere") + surface = traits.String( + argstr="--surf %s", desc="target surface (default is white)") + + reg_xors = ["reg_file", "reg_header", "mni152reg"] + reg_file = File( + exists=True, + argstr="--reg %s", + mandatory=True, + xor=reg_xors, + desc="source-to-reference registration file") + reg_header = traits.Bool( + argstr="--regheader %s", + requires=["subject_id"], + mandatory=True, + xor=reg_xors, + desc="register based on header geometry") + mni152reg = traits.Bool( + argstr="--mni152reg", + mandatory=True, + xor=reg_xors, + desc="source volume is in MNI152 space") + + apply_rot = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr="--rot %.3f %.3f %.3f", + desc="rotation angles (in degrees) to apply to reg matrix") + apply_trans = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr="--trans %.3f %.3f %.3f", + desc="translation (in mm) to apply to reg matrix") + override_reg_subj = traits.Bool( + argstr="--srcsubject %s", + requires=["subject_id"], + desc="override the subject in the reg file header") + + sampling_method = traits.Enum( + "point", + "max", + "average", + mandatory=True, + argstr="%s", + xor=["projection_stem"], + requires=["sampling_range", "sampling_units"], + desc="how to sample -- at a point or at the max or average over a range" + ) + sampling_range = traits.Either( + traits.Float, + traits.Tuple(traits.Float, traits.Float, traits.Float), + desc="sampling range - a point or a tuple of (min, max, step)") + sampling_units = traits.Enum( + "mm", "frac", desc="sampling range type -- either 'mm' or 'frac'") + projection_stem = traits.String( + mandatory=True, + xor=["sampling_method"], + desc="stem for precomputed linear estimates and volume fractions") + + smooth_vol = traits.Float( + argstr="--fwhm %.3f", desc="smooth input volume (mm fwhm)") + smooth_surf = traits.Float( + argstr="--surf-fwhm %.3f", desc="smooth output surface (mm fwhm)") + + interp_method = traits.Enum( + "nearest", + "trilinear", + argstr="--interp %s", + desc="interpolation method") + + cortex_mask = traits.Bool( + argstr="--cortex", + xor=["mask_label"], + desc="mask the target surface with hemi.cortex.label") + mask_label = File( + exists=True, + argstr="--mask %s", + xor=["cortex_mask"], + desc="label file to mask output with") + + float2int_method = traits.Enum( + "round", + "tkregister", + argstr="--float2int %s", + desc="method to convert reg matrix values (default is round)") + fix_tk_reg = traits.Bool( + argstr="--fixtkreg", desc="make reg matrix round-compatible") + + subject_id = traits.String(desc="subject id") + target_subject = traits.String( + argstr="--trgsubject %s", + desc="sample to surface of different subject than source") + surf_reg = traits.Either( + traits.Bool, + traits.Str(), + argstr="--surfreg %s", + requires=["target_subject"], + desc="use surface registration to target subject") + ico_order = traits.Int( + argstr="--icoorder %d", + requires=["target_subject"], + desc="icosahedron order when target_subject is 'ico'") + + reshape = traits.Bool( + argstr="--reshape", + xor=["no_reshape"], + desc="reshape surface vector to fit in non-mgh format") + no_reshape = traits.Bool( + argstr="--noreshape", + xor=["reshape"], + desc="do not reshape surface vector (default)") + reshape_slices = traits.Int( + argstr="--rf %d", desc="number of 'slices' for reshaping") + scale_input = traits.Float( + argstr="--scale %.3f", desc="multiple all intensities by scale factor") + frame = traits.Int( + argstr="--frame %d", desc="save only one frame (0-based)") + + out_file = File( + argstr="--o %s", genfile=True, desc="surface file to write") + out_type = traits.Enum( + filetypes + implicit_filetypes, + argstr="--out_type %s", + desc="output file type") + hits_file = traits.Either( + traits.Bool, + File(exists=True), + argstr="--srchit %s", + desc="save image with number of hits at each voxel") + hits_type = traits.Enum( + filetypes, argstr="--srchit_type", desc="hits file type") + vox_file = traits.Either( + traits.Bool, + File, + argstr="--nvox %s", + desc="text file with the number of voxels intersecting the surface") + + +class SampleToSurfaceOutputSpec(TraitedSpec): + + out_file = File(exists=True, desc="surface file") + hits_file = File( + exists=True, desc="image with number of hits at each voxel") + vox_file = File( + exists=True, + desc="text file with the number of voxels intersecting the surface") + + +class SampleToSurface(FSCommand): + """Sample a volume to the cortical surface using Freesurfer's mri_vol2surf. + + You must supply a sampling method, range, and units. You can project + either a given distance (in mm) or a given fraction of the cortical + thickness at that vertex along the surface normal from the target surface, + and then set the value of that vertex to be either the value at that point + or the average or maximum value found along the projection vector. + + By default, the surface will be saved as a vector with a length equal to the + number of vertices on the target surface. This is not a problem for Freesurfer + programs, but if you intend to use the file with interfaces to another package, + you must set the ``reshape`` input to True, which will factor the surface vector + into a matrix with dimensions compatible with proper Nifti files. + + Examples + -------- + + >>> import nipype.interfaces.freesurfer as fs + >>> sampler = fs.SampleToSurface(hemi="lh") + >>> sampler.inputs.source_file = "cope1.nii.gz" + >>> sampler.inputs.reg_file = "register.dat" + >>> sampler.inputs.sampling_method = "average" + >>> sampler.inputs.sampling_range = 1 + >>> sampler.inputs.sampling_units = "frac" + >>> sampler.cmdline # doctest: +ELLIPSIS + 'mri_vol2surf --hemi lh --o ...lh.cope1.mgz --reg register.dat --projfrac-avg 1.000 --mov cope1.nii.gz' + >>> res = sampler.run() # doctest: +SKIP + + """ + _cmd = "mri_vol2surf" + input_spec = SampleToSurfaceInputSpec + output_spec = SampleToSurfaceOutputSpec + + def _format_arg(self, name, spec, value): + if name == "sampling_method": + range = self.inputs.sampling_range + units = self.inputs.sampling_units + if units == "mm": + units = "dist" + if isinstance(range, tuple): + range = "%.3f %.3f %.3f" % range + else: + range = "%.3f" % range + method = dict(point="", max="-max", average="-avg")[value] + return "--proj%s%s %s" % (units, method, range) + + if name == "reg_header": + return spec.argstr % self.inputs.subject_id + if name == "override_reg_subj": + return spec.argstr % self.inputs.subject_id + if name in ["hits_file", "vox_file"]: + return spec.argstr % self._get_outfilename(name) + if name == "out_type": + if isdefined(self.inputs.out_file): + _, base, ext = split_filename(self._get_outfilename()) + if ext != filemap[value]: + if ext in filemap.values(): + raise ValueError( + "Cannot create {} file with extension " + "{}".format(value, ext)) + else: + logger.warn('Creating %s file with extension %s: %s%s', + value, ext, base, ext) + + if value in implicit_filetypes: + return "" + if name == 'surf_reg': + if value is True: + return spec.argstr % 'sphere.reg' + + return super(SampleToSurface, self)._format_arg(name, spec, value) + + def _get_outfilename(self, opt="out_file"): + outfile = getattr(self.inputs, opt) + if not isdefined(outfile) or isinstance(outfile, bool): + if isdefined(self.inputs.out_type): + if opt == "hits_file": + suffix = '_hits.' + filemap[self.inputs.out_type] + else: + suffix = '.' + filemap[self.inputs.out_type] + elif opt == "hits_file": + suffix = "_hits.mgz" + else: + suffix = '.mgz' + outfile = fname_presuffix( + self.inputs.source_file, + newpath=os.getcwd(), + prefix=self.inputs.hemi + ".", + suffix=suffix, + use_ext=False) + return outfile + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self._get_outfilename()) + hitsfile = self.inputs.hits_file + if isdefined(hitsfile): + outputs["hits_file"] = hitsfile + if isinstance(hitsfile, bool): + hitsfile = self._get_outfilename("hits_file") + voxfile = self.inputs.vox_file + if isdefined(voxfile): + if isinstance(voxfile, bool): + voxfile = fname_presuffix( + self.inputs.source_file, + newpath=os.getcwd(), + prefix=self.inputs.hemi + ".", + suffix="_vox.txt", + use_ext=False) + outputs["vox_file"] = voxfile + return outputs + + def _gen_filename(self, name): + if name == "out_file": + return self._list_outputs()[name] + return None + + +class SurfaceSmoothInputSpec(FSTraitedSpec): + + in_file = File( + mandatory=True, argstr="--sval %s", desc="source surface file") + subject_id = traits.String( + mandatory=True, argstr="--s %s", desc="subject id of surface file") + hemi = traits.Enum( + "lh", + "rh", + argstr="--hemi %s", + mandatory=True, + desc="hemisphere to operate on") + fwhm = traits.Float( + argstr="--fwhm %.4f", + xor=["smooth_iters"], + desc="effective FWHM of the smoothing process") + smooth_iters = traits.Int( + argstr="--smooth %d", + xor=["fwhm"], + desc="iterations of the smoothing process") + cortex = traits.Bool( + True, + argstr="--cortex", + usedefault=True, + desc="only smooth within $hemi.cortex.label") + reshape = traits.Bool( + argstr="--reshape", + desc="reshape surface vector to fit in non-mgh format") + out_file = File( + argstr="--tval %s", genfile=True, desc="surface file to write") + + +class SurfaceSmoothOutputSpec(TraitedSpec): + + out_file = File(exists=True, desc="smoothed surface file") + + +class SurfaceSmooth(FSCommand): + """Smooth a surface image with mri_surf2surf. + + The surface is smoothed by an interative process of averaging the + value at each vertex with those of its adjacent neighbors. You may supply + either the number of iterations to run or a desired effective FWHM of the + smoothing process. If the latter, the underlying program will calculate + the correct number of iterations internally. + + .. seealso:: + + SmoothTessellation() Interface + For smoothing a tessellated surface (e.g. in gifti or .stl) + + Examples + -------- + + >>> import nipype.interfaces.freesurfer as fs + >>> smoother = fs.SurfaceSmooth() + >>> smoother.inputs.in_file = "lh.cope1.mgz" + >>> smoother.inputs.subject_id = "subj_1" + >>> smoother.inputs.hemi = "lh" + >>> smoother.inputs.fwhm = 5 + >>> smoother.cmdline # doctest: +ELLIPSIS + 'mri_surf2surf --cortex --fwhm 5.0000 --hemi lh --sval lh.cope1.mgz --tval ...lh.cope1_smooth5.mgz --s subj_1' + >>> smoother.run() # doctest: +SKIP + + """ + _cmd = "mri_surf2surf" + input_spec = SurfaceSmoothInputSpec + output_spec = SurfaceSmoothOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = self.inputs.out_file + if not isdefined(outputs["out_file"]): + in_file = self.inputs.in_file + if isdefined(self.inputs.fwhm): + kernel = self.inputs.fwhm + else: + kernel = self.inputs.smooth_iters + outputs["out_file"] = fname_presuffix( + in_file, suffix="_smooth%d" % kernel, newpath=os.getcwd()) + return outputs + + def _gen_filename(self, name): + if name == "out_file": + return self._list_outputs()[name] + return None + + +class SurfaceTransformInputSpec(FSTraitedSpec): + source_file = File( + exists=True, + mandatory=True, + argstr="--sval %s", + xor=['source_annot_file'], + desc="surface file with source values") + source_annot_file = File( + exists=True, + mandatory=True, + argstr="--sval-annot %s", + xor=['source_file'], + desc="surface annotation file") + source_subject = traits.String( + mandatory=True, + argstr="--srcsubject %s", + desc="subject id for source surface") + hemi = traits.Enum( + "lh", + "rh", + argstr="--hemi %s", + mandatory=True, + desc="hemisphere to transform") + target_subject = traits.String( + mandatory=True, + argstr="--trgsubject %s", + desc="subject id of target surface") + target_ico_order = traits.Enum( + 1, + 2, + 3, + 4, + 5, + 6, + 7, + argstr="--trgicoorder %d", + desc=("order of the icosahedron if " + "target_subject is 'ico'")) + source_type = traits.Enum( + filetypes, + argstr='--sfmt %s', + requires=['source_file'], + desc="source file format") + target_type = traits.Enum( + filetypes + implicit_filetypes, + argstr='--tfmt %s', + desc="output format") + reshape = traits.Bool( + argstr="--reshape", + desc="reshape output surface to conform with Nifti") + reshape_factor = traits.Int( + argstr="--reshape-factor", desc="number of slices in reshaped image") + out_file = File( + argstr="--tval %s", genfile=True, desc="surface file to write") + + +class SurfaceTransformOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="transformed surface file") + + +class SurfaceTransform(FSCommand): + """Transform a surface file from one subject to another via a spherical registration. + + Both the source and target subject must reside in your Subjects Directory, + and they must have been processed with recon-all, unless you are transforming + to one of the icosahedron meshes. + + Examples + -------- + + >>> from nipype.interfaces.freesurfer import SurfaceTransform + >>> sxfm = SurfaceTransform() + >>> sxfm.inputs.source_file = "lh.cope1.nii.gz" + >>> sxfm.inputs.source_subject = "my_subject" + >>> sxfm.inputs.target_subject = "fsaverage" + >>> sxfm.inputs.hemi = "lh" + >>> sxfm.run() # doctest: +SKIP + + """ + _cmd = "mri_surf2surf" + input_spec = SurfaceTransformInputSpec + output_spec = SurfaceTransformOutputSpec + + def _format_arg(self, name, spec, value): + if name == "target_type": + if isdefined(self.inputs.out_file): + _, base, ext = split_filename(self._list_outputs()['out_file']) + if ext != filemap[value]: + if ext in filemap.values(): + raise ValueError( + "Cannot create {} file with extension " + "{}".format(value, ext)) + else: + logger.warn('Creating %s file with extension %s: %s%s', + value, ext, base, ext) + if value in implicit_filetypes: + return "" + return super(SurfaceTransform, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = self.inputs.out_file + if not isdefined(outputs["out_file"]): + if isdefined(self.inputs.source_file): + source = self.inputs.source_file + else: + source = self.inputs.source_annot_file + + # Some recon-all files don't have a proper extension (e.g. "lh.thickness") + # so we have to account for that here + bad_extensions = [ + ".%s" % e for e in [ + "area", "mid", "pial", "avg_curv", "curv", "inflated", + "jacobian_white", "orig", "nofix", "smoothwm", "crv", + "sphere", "sulc", "thickness", "volume", "white" + ] + ] + use_ext = True + if split_filename(source)[2] in bad_extensions: + source = source + ".stripme" + use_ext = False + ext = "" + if isdefined(self.inputs.target_type): + ext = "." + filemap[self.inputs.target_type] + use_ext = False + outputs["out_file"] = fname_presuffix( + source, + suffix=".%s%s" % (self.inputs.target_subject, ext), + newpath=os.getcwd(), + use_ext=use_ext) + else: + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + return outputs + + def _gen_filename(self, name): + if name == "out_file": + return self._list_outputs()[name] + return None + + +class Surface2VolTransformInputSpec(FSTraitedSpec): + source_file = File( + exists=True, + argstr='--surfval %s', + copyfile=False, + mandatory=True, + xor=['mkmask'], + desc='This is the source of the surface values') + hemi = traits.Str( + argstr='--hemi %s', mandatory=True, desc='hemisphere of data') + transformed_file = File( + name_template="%s_asVol.nii", + desc='Output volume', + argstr='--outvol %s', + name_source=['source_file'], + hash_files=False) + reg_file = File( + exists=True, + argstr='--volreg %s', + mandatory=True, + desc='tkRAS-to-tkRAS matrix (tkregister2 format)', + xor=['subject_id']) + template_file = File( + exists=True, argstr='--template %s', desc='Output template volume') + mkmask = traits.Bool( + desc='make a mask instead of loading surface values', + argstr='--mkmask', + xor=['source_file']) + vertexvol_file = File( + name_template="%s_asVol_vertex.nii", + desc=('Path name of the vertex output volume, which ' + 'is the same as output volume except that the ' + 'value of each voxel is the vertex-id that is ' + 'mapped to that voxel.'), + argstr='--vtxvol %s', + name_source=['source_file'], + hash_files=False) + surf_name = traits.Str( + argstr='--surf %s', desc='surfname (default is white)') + projfrac = traits.Float(argstr='--projfrac %s', desc='thickness fraction') + subjects_dir = traits.Str( + argstr='--sd %s', + desc=('freesurfer subjects directory defaults to ' + '$SUBJECTS_DIR')) + subject_id = traits.Str( + argstr='--identity %s', desc='subject id', xor=['reg_file']) + + +class Surface2VolTransformOutputSpec(TraitedSpec): + transformed_file = File( + exists=True, desc='Path to output file if used normally') + vertexvol_file = File(desc='vertex map volume path id. Optional') + + +class Surface2VolTransform(FSCommand): + """Use FreeSurfer mri_surf2vol to apply a transform. + + Examples + -------- + + >>> from nipype.interfaces.freesurfer import Surface2VolTransform + >>> xfm2vol = Surface2VolTransform() + >>> xfm2vol.inputs.source_file = 'lh.cope1.mgz' + >>> xfm2vol.inputs.reg_file = 'register.mat' + >>> xfm2vol.inputs.hemi = 'lh' + >>> xfm2vol.inputs.template_file = 'cope1.nii.gz' + >>> xfm2vol.inputs.subjects_dir = '.' + >>> xfm2vol.cmdline + 'mri_surf2vol --hemi lh --volreg register.mat --surfval lh.cope1.mgz --sd . --template cope1.nii.gz --outvol lh.cope1_asVol.nii --vtxvol lh.cope1_asVol_vertex.nii' + >>> res = xfm2vol.run()# doctest: +SKIP + + """ + + _cmd = 'mri_surf2vol' + input_spec = Surface2VolTransformInputSpec + output_spec = Surface2VolTransformOutputSpec + + +class ApplyMaskInputSpec(FSTraitedSpec): + + in_file = File( + exists=True, + mandatory=True, + position=-3, + argstr="%s", + desc="input image (will be masked)") + mask_file = File( + exists=True, + mandatory=True, + position=-2, + argstr="%s", + desc="image defining mask space") + out_file = File( + name_source=['in_file'], + name_template='%s_masked', + hash_files=True, + keep_extension=True, + position=-1, + argstr="%s", + desc="final image to write") + xfm_file = File( + exists=True, + argstr="-xform %s", + desc="LTA-format transformation matrix to align mask with input") + invert_xfm = traits.Bool(argstr="-invert", desc="invert transformation") + xfm_source = File( + exists=True, + argstr="-lta_src %s", + desc="image defining transform source space") + xfm_target = File( + exists=True, + argstr="-lta_dst %s", + desc="image defining transform target space") + use_abs = traits.Bool( + argstr="-abs", desc="take absolute value of mask before applying") + mask_thresh = traits.Float( + argstr="-T %.4f", desc="threshold mask before applying") + keep_mask_deletion_edits = traits.Bool( + argstr="-keep_mask_deletion_edits", + desc="transfer voxel-deletion edits (voxels=1) from mask to out vol") + transfer = traits.Int( + argstr="-transfer %d", + desc="transfer only voxel value # from mask to out") + + +class ApplyMaskOutputSpec(TraitedSpec): + + out_file = File(exists=True, desc="masked image") + + +class ApplyMask(FSCommand): + """Use Freesurfer's mri_mask to apply a mask to an image. + + The mask file need not be binarized; it can be thresholded above a given + value before application. It can also optionally be transformed into input + space with an LTA matrix. + + """ + _cmd = "mri_mask" + input_spec = ApplyMaskInputSpec + output_spec = ApplyMaskOutputSpec + + +class SurfaceSnapshotsInputSpec(FSTraitedSpec): + + subject_id = traits.String( + position=1, argstr="%s", mandatory=True, desc="subject to visualize") + hemi = traits.Enum( + "lh", + "rh", + position=2, + argstr="%s", + mandatory=True, + desc="hemisphere to visualize") + surface = traits.String( + position=3, argstr="%s", mandatory=True, desc="surface to visualize") + + show_curv = traits.Bool( + argstr="-curv", desc="show curvature", xor=["show_gray_curv"]) + show_gray_curv = traits.Bool( + argstr="-gray", desc="show curvature in gray", xor=["show_curv"]) + + overlay = File( + exists=True, + argstr="-overlay %s", + desc="load an overlay volume/surface", + requires=["overlay_range"]) + reg_xors = ["overlay_reg", "identity_reg", "mni152_reg"] + overlay_reg = traits.File( + exists=True, + argstr="-overlay-reg %s", + xor=reg_xors, + desc="registration matrix file to register overlay to surface") + identity_reg = traits.Bool( + argstr="-overlay-reg-identity", + xor=reg_xors, + desc="use the identity matrix to register the overlay to the surface") + mni152_reg = traits.Bool( + argstr="-mni152reg", + xor=reg_xors, + desc="use to display a volume in MNI152 space on the average subject") + + overlay_range = traits.Either( + traits.Float, + traits.Tuple(traits.Float, traits.Float), + traits.Tuple(traits.Float, traits.Float, traits.Float), + desc="overlay range--either min, (min, max) or (min, mid, max)", + argstr="%s") + overlay_range_offset = traits.Float( + argstr="-foffset %.3f", + desc="overlay range will be symettric around offset value") + + truncate_overlay = traits.Bool( + argstr="-truncphaseflag 1", desc="truncate the overlay display") + reverse_overlay = traits.Bool( + argstr="-revphaseflag 1", desc="reverse the overlay display") + invert_overlay = traits.Bool( + argstr="-invphaseflag 1", desc="invert the overlay display") + demean_overlay = traits.Bool(argstr="-zm", desc="remove mean from overlay") + + annot_file = File( + exists=True, + argstr="-annotation %s", + xor=["annot_name"], + desc="path to annotation file to display") + annot_name = traits.String( + argstr="-annotation %s", + xor=["annot_file"], + desc= + "name of annotation to display (must be in $subject/label directory") + + label_file = File( + exists=True, + argstr="-label %s", + xor=["label_name"], + desc="path to label file to display") + label_name = traits.String( + argstr="-label %s", + xor=["label_file"], + desc="name of label to display (must be in $subject/label directory") + + colortable = File( + exists=True, argstr="-colortable %s", desc="load colortable file") + label_under = traits.Bool( + argstr="-labels-under", desc="draw label/annotation under overlay") + label_outline = traits.Bool( + argstr="-label-outline", desc="draw label/annotation as outline") + + patch_file = File(exists=True, argstr="-patch %s", desc="load a patch") + + orig_suffix = traits.String( + argstr="-orig %s", desc="set the orig surface suffix string") + sphere_suffix = traits.String( + argstr="-sphere %s", desc="set the sphere.reg suffix string") + + show_color_scale = traits.Bool( + argstr="-colscalebarflag 1", desc="display the color scale bar") + show_color_text = traits.Bool( + argstr="-colscaletext 1", desc="display text in the color scale bar") + + six_images = traits.Bool(desc="also take anterior and posterior snapshots") + screenshot_stem = traits.String( + desc="stem to use for screenshot file names") + stem_template_args = traits.List( + traits.String, + requires=["screenshot_stem"], + desc= + "input names to use as arguments for a string-formated stem template") + tcl_script = File( + exists=True, + argstr="%s", + genfile=True, + desc="override default screenshot script") + + +class SurfaceSnapshotsOutputSpec(TraitedSpec): + + snapshots = OutputMultiPath( + File(exists=True), + desc="tiff images of the surface from different perspectives") + + +class SurfaceSnapshots(FSCommand): + """Use Tksurfer to save pictures of the cortical surface. + + By default, this takes snapshots of the lateral, medial, ventral, + and dorsal surfaces. See the ``six_images`` option to add the + anterior and posterior surfaces. + + You may also supply your own tcl script (see the Freesurfer wiki for + information on scripting tksurfer). The screenshot stem is set as the + environment variable "_SNAPSHOT_STEM", which you can use in your + own scripts. + + Node that this interface will not run if you do not have graphics + enabled on your system. + + Examples + -------- + + >>> import nipype.interfaces.freesurfer as fs + >>> shots = fs.SurfaceSnapshots(subject_id="fsaverage", hemi="lh", surface="pial") + >>> shots.inputs.overlay = "zstat1.nii.gz" + >>> shots.inputs.overlay_range = (2.3, 6) + >>> shots.inputs.overlay_reg = "register.dat" + >>> res = shots.run() # doctest: +SKIP + + """ + _cmd = "tksurfer" + input_spec = SurfaceSnapshotsInputSpec + output_spec = SurfaceSnapshotsOutputSpec + + def _format_arg(self, name, spec, value): + if name == "tcl_script": + if not isdefined(value): + return "-tcl snapshots.tcl" + else: + return "-tcl %s" % value + elif name == "overlay_range": + if isinstance(value, float): + return "-fthresh %.3f" % value + else: + if len(value) == 2: + return "-fminmax %.3f %.3f" % value + else: + return "-fminmax %.3f %.3f -fmid %.3f" % (value[0], + value[2], + value[1]) + elif name == "annot_name" and isdefined(value): + # Matching annot by name needs to strip the leading hemi and trailing + # extension strings + if value.endswith(".annot"): + value = value[:-6] + if re.match("%s[\.\-_]" % self.inputs.hemi, value[:3]): + value = value[3:] + return "-annotation %s" % value + return super(SurfaceSnapshots, self)._format_arg(name, spec, value) + + def _run_interface(self, runtime): + if not isdefined(self.inputs.screenshot_stem): + stem = "%s_%s_%s" % (self.inputs.subject_id, self.inputs.hemi, + self.inputs.surface) + else: + stem = self.inputs.screenshot_stem + stem_args = self.inputs.stem_template_args + if isdefined(stem_args): + args = tuple([getattr(self.inputs, arg) for arg in stem_args]) + stem = stem % args + # Check if the DISPLAY variable is set -- should avoid crashes (might not?) + if "DISPLAY" not in os.environ: + raise RuntimeError( + "Graphics are not enabled -- cannot run tksurfer") + runtime.environ["_SNAPSHOT_STEM"] = stem + self._write_tcl_script() + runtime = super(SurfaceSnapshots, self)._run_interface(runtime) + # If a display window can't be opened, this will crash on + # aggregate_outputs. Let's try to parse stderr and raise a + # better exception here if that happened. + errors = [ + "surfer: failed, no suitable display found", + "Fatal Error in tksurfer.bin: could not open display" + ] + for err in errors: + if err in runtime.stderr: + self.raise_exception(runtime) + # Tksurfer always (or at least always when you run a tcl script) + # exits with a nonzero returncode. We have to force it to 0 here. + runtime.returncode = 0 + return runtime + + def _write_tcl_script(self): + fid = open("snapshots.tcl", "w") + script = [ + "save_tiff $env(_SNAPSHOT_STEM)-lat.tif", "make_lateral_view", + "rotate_brain_y 180", "redraw", + "save_tiff $env(_SNAPSHOT_STEM)-med.tif", "make_lateral_view", + "rotate_brain_x 90", "redraw", + "save_tiff $env(_SNAPSHOT_STEM)-ven.tif", "make_lateral_view", + "rotate_brain_x -90", "redraw", + "save_tiff $env(_SNAPSHOT_STEM)-dor.tif" + ] + if isdefined(self.inputs.six_images) and self.inputs.six_images: + script.extend([ + "make_lateral_view", "rotate_brain_y 90", "redraw", + "save_tiff $env(_SNAPSHOT_STEM)-pos.tif", "make_lateral_view", + "rotate_brain_y -90", "redraw", + "save_tiff $env(_SNAPSHOT_STEM)-ant.tif" + ]) + + script.append("exit") + fid.write("\n".join(script)) + fid.close() + + def _list_outputs(self): + outputs = self._outputs().get() + if not isdefined(self.inputs.screenshot_stem): + stem = "%s_%s_%s" % (self.inputs.subject_id, self.inputs.hemi, + self.inputs.surface) + else: + stem = self.inputs.screenshot_stem + stem_args = self.inputs.stem_template_args + if isdefined(stem_args): + args = tuple([getattr(self.inputs, arg) for arg in stem_args]) + stem = stem % args + snapshots = ["%s-lat.tif", "%s-med.tif", "%s-dor.tif", "%s-ven.tif"] + if self.inputs.six_images: + snapshots.extend(["%s-pos.tif", "%s-ant.tif"]) + snapshots = [self._gen_fname(f % stem, suffix="") for f in snapshots] + outputs["snapshots"] = snapshots + return outputs + + def _gen_filename(self, name): + if name == "tcl_script": + return "snapshots.tcl" + return None + + +class ImageInfoInputSpec(FSTraitedSpec): + + in_file = File(exists=True, position=1, argstr="%s", desc="image to query") + + +class ImageInfoOutputSpec(TraitedSpec): + + info = traits.Any(desc="output of mri_info") + out_file = File(exists=True, desc="text file with image information") + data_type = traits.String(desc="image data type") + file_format = traits.String(desc="file format") + TE = traits.String(desc="echo time (msec)") + TR = traits.String(desc="repetition time(msec)") + TI = traits.String(desc="inversion time (msec)") + dimensions = traits.Tuple(desc="image dimensions (voxels)") + vox_sizes = traits.Tuple(desc="voxel sizes (mm)") + orientation = traits.String(desc="image orientation") + ph_enc_dir = traits.String(desc="phase encode direction") + + +class ImageInfo(FSCommand): + + _cmd = "mri_info" + input_spec = ImageInfoInputSpec + output_spec = ImageInfoOutputSpec + + def info_regexp(self, info, field, delim="\n"): + m = re.search("%s\s*:\s+(.+?)%s" % (field, delim), info) + if m: + return m.group(1) + else: + return None + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + outputs = self._outputs() + info = runtime.stdout + outputs.info = info + + # Pulse sequence parameters + for field in ["TE", "TR", "TI"]: + fieldval = self.info_regexp(info, field, ", ") + if fieldval.endswith(" msec"): + fieldval = fieldval[:-5] + setattr(outputs, field, fieldval) + + # Voxel info + vox = self.info_regexp(info, "voxel sizes") + vox = tuple(vox.split(", ")) + outputs.vox_sizes = vox + dim = self.info_regexp(info, "dimensions") + dim = tuple([int(d) for d in dim.split(" x ")]) + outputs.dimensions = dim + + outputs.orientation = self.info_regexp(info, "Orientation") + outputs.ph_enc_dir = self.info_regexp(info, "PhEncDir") + + # File format and datatype are both keyed by "type" + ftype, dtype = re.findall("%s\s*:\s+(.+?)\n" % "type", info) + outputs.file_format = ftype + outputs.data_type = dtype + + return outputs + + +class MRIsConvertInputSpec(FSTraitedSpec): + """ + Uses Freesurfer's mris_convert to convert surface files to various formats + """ + annot_file = File( + exists=True, + argstr="--annot %s", + desc="input is annotation or gifti label data") + + parcstats_file = File( + exists=True, + argstr="--parcstats %s", + desc="infile is name of text file containing label/val pairs") + + label_file = File( + exists=True, + argstr="--label %s", + desc="infile is .label file, label is name of this label") + + scalarcurv_file = File( + exists=True, + argstr="-c %s", + desc="input is scalar curv overlay file (must still specify surface)") + + functional_file = File( + exists=True, + argstr="-f %s", + desc= + "input is functional time-series or other multi-frame data (must specify surface)" + ) + + labelstats_outfile = File( + exists=False, + argstr="--labelstats %s", + desc= + "outfile is name of gifti file to which label stats will be written") + + patch = traits.Bool( + argstr="-p", desc="input is a patch, not a full surface") + rescale = traits.Bool( + argstr="-r", + desc="rescale vertex xyz so total area is same as group average") + normal = traits.Bool( + argstr="-n", desc="output is an ascii file where vertex data") + xyz_ascii = traits.Bool( + argstr="-a", desc="Print only surface xyz to ascii file") + vertex = traits.Bool( + argstr="-v", desc="Writes out neighbors of a vertex in each row") + + scale = traits.Float(argstr="-s %.3f", desc="scale vertex xyz by scale") + dataarray_num = traits.Int( + argstr="--da_num %d", + desc="if input is gifti, 'num' specifies which data array to use") + + talairachxfm_subjid = traits.String( + argstr="-t %s", desc="apply talairach xfm of subject to vertex xyz") + origname = traits.String(argstr="-o %s", desc="read orig positions") + + in_file = File( + exists=True, + mandatory=True, + position=-2, + argstr='%s', + desc='File to read/convert') + out_file = File( + argstr='%s', + position=-1, + genfile=True, + xor=['out_datatype'], + mandatory=True, + desc='output filename or True to generate one') + + out_datatype = traits.Enum( + "asc", + "ico", + "tri", + "stl", + "vtk", + "gii", + "mgh", + "mgz", + xor=['out_file'], + mandatory=True, + desc="These file formats are supported: ASCII: .asc" + "ICO: .ico, .tri GEO: .geo STL: .stl VTK: .vtk GIFTI: .gii MGH surface-encoded 'volume': .mgh, .mgz" + ) + to_scanner = traits.Bool( + argstr="--to-scanner", + desc="convert coordinates from native FS (tkr) coords to scanner coords" + ) + to_tkr = traits.Bool( + argstr="--to-tkr", + desc="convert coordinates from scanner coords to native FS (tkr) coords" + ) + + +class MRIsConvertOutputSpec(TraitedSpec): + """ + Uses Freesurfer's mris_convert to convert surface files to various formats + """ + converted = File(exists=True, desc='converted output surface') + + +class MRIsConvert(FSCommand): + """ + Uses Freesurfer's mris_convert to convert surface files to various formats + + Example + ------- + + >>> import nipype.interfaces.freesurfer as fs + >>> mris = fs.MRIsConvert() + >>> mris.inputs.in_file = 'lh.pial' + >>> mris.inputs.out_datatype = 'gii' + >>> mris.run() # doctest: +SKIP + """ + _cmd = 'mris_convert' + input_spec = MRIsConvertInputSpec + output_spec = MRIsConvertOutputSpec + + def _format_arg(self, name, spec, value): + if name == "out_file" and not os.path.isabs(value): + value = os.path.abspath(value) + return super(MRIsConvert, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs["converted"] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return os.path.abspath(self._gen_outfilename()) + else: + return None + + def _gen_outfilename(self): + if isdefined(self.inputs.out_file): + return self.inputs.out_file + elif isdefined(self.inputs.annot_file): + _, name, ext = split_filename(self.inputs.annot_file) + elif isdefined(self.inputs.parcstats_file): + _, name, ext = split_filename(self.inputs.parcstats_file) + elif isdefined(self.inputs.label_file): + _, name, ext = split_filename(self.inputs.label_file) + elif isdefined(self.inputs.scalarcurv_file): + _, name, ext = split_filename(self.inputs.scalarcurv_file) + elif isdefined(self.inputs.functional_file): + _, name, ext = split_filename(self.inputs.functional_file) + elif isdefined(self.inputs.in_file): + _, name, ext = split_filename(self.inputs.in_file) + + return name + ext + "_converted." + self.inputs.out_datatype + + +class MRIsCombineInputSpec(FSTraitedSpec): + """ + Uses Freesurfer's mris_convert to combine two surface files into one. + """ + in_files = traits.List( + File(Exists=True), + maxlen=2, + minlen=2, + mandatory=True, + position=1, + argstr='--combinesurfs %s', + desc='Two surfaces to be combined.') + out_file = File( + argstr='%s', + position=-1, + genfile=True, + mandatory=True, + desc='Output filename. Combined surfaces from in_files.') + + +class MRIsCombineOutputSpec(TraitedSpec): + """ + Uses Freesurfer's mris_convert to combine two surface files into one. + """ + out_file = File( + exists=True, + desc='Output filename. Combined surfaces from ' + 'in_files.') + + +class MRIsCombine(FSSurfaceCommand): + """ + Uses Freesurfer's ``mris_convert`` to combine two surface files into one. + + For complete details, see the `mris_convert Documentation. + `_ + + If given an ``out_file`` that does not begin with ``'lh.'`` or ``'rh.'``, + ``mris_convert`` will prepend ``'lh.'`` to the file name. + To avoid this behavior, consider setting ``out_file = './'``, or + leaving out_file blank. + + In a Node/Workflow, ``out_file`` is interpreted literally. + + Example + ------- + + >>> import nipype.interfaces.freesurfer as fs + >>> mris = fs.MRIsCombine() + >>> mris.inputs.in_files = ['lh.pial', 'rh.pial'] + >>> mris.inputs.out_file = 'bh.pial' + >>> mris.cmdline + 'mris_convert --combinesurfs lh.pial rh.pial bh.pial' + >>> mris.run() # doctest: +SKIP + """ + _cmd = 'mris_convert' + input_spec = MRIsCombineInputSpec + output_spec = MRIsCombineOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + + # mris_convert --combinesurfs uses lh. as the default prefix + # regardless of input file names, except when path info is + # specified + path, base = os.path.split(self.inputs.out_file) + if path == '' and base[:3] not in ('lh.', 'rh.'): + base = 'lh.' + base + outputs['out_file'] = os.path.abspath(os.path.join(path, base)) + + return outputs + + def _normalize_filenames(self): + """ In a Node context, interpret out_file as a literal path to + reduce surprise. + """ + if isdefined(self.inputs.out_file): + self.inputs.out_file = os.path.abspath(self.inputs.out_file) + + +class MRITessellateInputSpec(FSTraitedSpec): + """ + Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume + """ + + in_file = File( + exists=True, + mandatory=True, + position=-3, + argstr='%s', + desc='Input volume to tesselate voxels from.') + label_value = traits.Int( + position=-2, + argstr='%d', + mandatory=True, + desc= + 'Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)' + ) + out_file = File( + argstr='%s', + position=-1, + genfile=True, + desc='output filename or True to generate one') + tesselate_all_voxels = traits.Bool( + argstr='-a', + desc='Tessellate the surface of all voxels with different labels') + use_real_RAS_coordinates = traits.Bool( + argstr='-n', + desc='Saves surface with real RAS coordinates where c_(r,a,s) != 0') + + +class MRITessellateOutputSpec(TraitedSpec): + """ + Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume + """ + surface = File(exists=True, desc='binary surface of the tessellation ') + + +class MRITessellate(FSCommand): + """ + Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume + + Example + ------- + + >>> import nipype.interfaces.freesurfer as fs + >>> tess = fs.MRITessellate() + >>> tess.inputs.in_file = 'aseg.mgz' + >>> tess.inputs.label_value = 17 + >>> tess.inputs.out_file = 'lh.hippocampus' + >>> tess.run() # doctest: +SKIP + """ + _cmd = 'mri_tessellate' + input_spec = MRITessellateInputSpec + output_spec = MRITessellateOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['surface'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + if isdefined(self.inputs.out_file): + return self.inputs.out_file + else: + _, name, ext = split_filename(self.inputs.in_file) + return name + ext + '_' + str(self.inputs.label_value) + + +class MRIPretessInputSpec(FSTraitedSpec): + in_filled = File( + exists=True, + mandatory=True, + position=-4, + argstr='%s', + desc=('filled volume, usually wm.mgz')) + label = traits.Either( + traits.Str('wm'), + traits.Int(1), + argstr='%s', + default='wm', + mandatory=True, + usedefault=True, + position=-3, + desc=('label to be picked up, can be a Freesurfer\'s string like ' + '\'wm\' or a label value (e.g. 127 for rh or 255 for lh)')) + in_norm = File( + exists=True, + mandatory=True, + position=-2, + argstr='%s', + desc=('the normalized, brain-extracted T1w image. Usually norm.mgz')) + out_file = File( + position=-1, + argstr='%s', + name_source=['in_filled'], + name_template='%s_pretesswm', + keep_extension=True, + desc='the output file after mri_pretess.') + + nocorners = traits.Bool( + False, + argstr='-nocorners', + desc=('do not remove corner configurations' + ' in addition to edge ones.')) + keep = traits.Bool(False, argstr='-keep', desc=('keep WM edits')) + test = traits.Bool( + False, + argstr='-test', + desc= + ('adds a voxel that should be removed by ' + 'mri_pretess. The value of the voxel is set to that of an ON-edited WM, ' + 'so it should be kept with -keep. The output will NOT be saved.')) + + +class MRIPretessOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output file after mri_pretess') + + +class MRIPretess(FSCommand): + """ + Uses Freesurfer's mri_pretess to prepare volumes to be tessellated. + + Description + ----------- + + Changes white matter (WM) segmentation so that the neighbors of all + voxels labeled as WM have a face in common - no edges or corners + allowed. + + Example + ------- + + >>> import nipype.interfaces.freesurfer as fs + >>> pretess = fs.MRIPretess() + >>> pretess.inputs.in_filled = 'wm.mgz' + >>> pretess.inputs.in_norm = 'norm.mgz' + >>> pretess.inputs.nocorners = True + >>> pretess.cmdline + 'mri_pretess -nocorners wm.mgz wm norm.mgz wm_pretesswm.mgz' + >>> pretess.run() # doctest: +SKIP + + """ + _cmd = 'mri_pretess' + input_spec = MRIPretessInputSpec + output_spec = MRIPretessOutputSpec + + +class MRIMarchingCubesInputSpec(FSTraitedSpec): + """ + Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume + """ + + in_file = File( + exists=True, + mandatory=True, + position=1, + argstr='%s', + desc='Input volume to tesselate voxels from.') + label_value = traits.Int( + position=2, + argstr='%d', + mandatory=True, + desc= + 'Label value which to tesselate from the input volume. (integer, if input is "filled.mgz" volume, 127 is rh, 255 is lh)' + ) + connectivity_value = traits.Int( + 1, + position=-1, + argstr='%d', + usedefault=True, + desc= + 'Alter the marching cubes connectivity: 1=6+,2=18,3=6,4=26 (default=1)' + ) + out_file = File( + argstr='./%s', + position=-2, + genfile=True, + desc='output filename or True to generate one') + + +class MRIMarchingCubesOutputSpec(TraitedSpec): + """ + Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume + """ + surface = File(exists=True, desc='binary surface of the tessellation ') + + +class MRIMarchingCubes(FSCommand): + """ + Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume + + Example + ------- + + >>> import nipype.interfaces.freesurfer as fs + >>> mc = fs.MRIMarchingCubes() + >>> mc.inputs.in_file = 'aseg.mgz' + >>> mc.inputs.label_value = 17 + >>> mc.inputs.out_file = 'lh.hippocampus' + >>> mc.run() # doctest: +SKIP + """ + _cmd = 'mri_mc' + input_spec = MRIMarchingCubesInputSpec + output_spec = MRIMarchingCubesOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['surface'] = self._gen_outfilename() + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + if isdefined(self.inputs.out_file): + return os.path.abspath(self.inputs.out_file) + else: + _, name, ext = split_filename(self.inputs.in_file) + return os.path.abspath( + name + ext + '_' + str(self.inputs.label_value)) + + +class SmoothTessellationInputSpec(FSTraitedSpec): + """ + This program smooths the tessellation of a surface using 'mris_smooth' + """ + + in_file = File( + exists=True, + mandatory=True, + argstr='%s', + position=-2, + copyfile=True, + desc='Input volume to tesselate voxels from.') + curvature_averaging_iterations = traits.Int( + argstr='-a %d', + desc='Number of curvature averaging iterations (default=10)') + smoothing_iterations = traits.Int( + argstr='-n %d', desc='Number of smoothing iterations (default=10)') + snapshot_writing_iterations = traits.Int( + argstr='-w %d', desc='Write snapshot every "n" iterations') + + use_gaussian_curvature_smoothing = traits.Bool( + argstr='-g', desc='Use Gaussian curvature smoothing') + gaussian_curvature_norm_steps = traits.Int( + argstr='%d ', desc='Use Gaussian curvature smoothing') + gaussian_curvature_smoothing_steps = traits.Int( + argstr='%d', desc='Use Gaussian curvature smoothing') + + disable_estimates = traits.Bool( + argstr='-nw', + desc='Disables the writing of curvature and area estimates') + normalize_area = traits.Bool( + argstr='-area', desc='Normalizes the area after smoothing') + use_momentum = traits.Bool(argstr='-m', desc='Uses momentum') + + out_file = File( + argstr='%s', + position=-1, + genfile=True, + desc='output filename or True to generate one') + out_curvature_file = File( + argstr='-c %s', desc='Write curvature to ?h.curvname (default "curv")') + out_area_file = File( + argstr='-b %s', desc='Write area to ?h.areaname (default "area")') + seed = traits.Int( + argstr="-seed %d", desc="Seed for setting random number generator") + + +class SmoothTessellationOutputSpec(TraitedSpec): + """ + This program smooths the tessellation of a surface using 'mris_smooth' + """ + surface = File(exists=True, desc='Smoothed surface file ') + + +class SmoothTessellation(FSCommand): + """ + This program smooths the tessellation of a surface using 'mris_smooth' + + .. seealso:: + + SurfaceSmooth() Interface + For smoothing a scalar field along a surface manifold + + Example + ------- + + >>> import nipype.interfaces.freesurfer as fs + >>> smooth = fs.SmoothTessellation() + >>> smooth.inputs.in_file = 'lh.hippocampus.stl' + >>> smooth.run() # doctest: +SKIP + """ + _cmd = 'mris_smooth' + input_spec = SmoothTessellationInputSpec + output_spec = SmoothTessellationOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['surface'] = self._gen_outfilename() + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + if isdefined(self.inputs.out_file): + return os.path.abspath(self.inputs.out_file) + else: + _, name, ext = split_filename(self.inputs.in_file) + return os.path.abspath(name + '_smoothed' + ext) + + def _run_interface(self, runtime): + # The returncode is meaningless in BET. So check the output + # in stderr and if it's set, then update the returncode + # accordingly. + runtime = super(SmoothTessellation, self)._run_interface(runtime) + if "failed" in runtime.stderr: + self.raise_exception(runtime) + return runtime + + +class MakeAverageSubjectInputSpec(FSTraitedSpec): + subjects_ids = traits.List( + traits.Str(), + argstr='--subjects %s', + desc='freesurfer subjects ids to average', + mandatory=True, + sep=' ') + out_name = File( + 'average', + argstr='--out %s', + desc='name for the average subject', + usedefault=True) + + +class MakeAverageSubjectOutputSpec(TraitedSpec): + average_subject_name = traits.Str(desc='Output registration file') + + +class MakeAverageSubject(FSCommand): + """Make an average freesurfer subject + + Examples + -------- + + >>> from nipype.interfaces.freesurfer import MakeAverageSubject + >>> avg = MakeAverageSubject(subjects_ids=['s1', 's2']) + >>> avg.cmdline + 'make_average_subject --out average --subjects s1 s2' + + """ + + _cmd = 'make_average_subject' + input_spec = MakeAverageSubjectInputSpec + output_spec = MakeAverageSubjectOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['average_subject_name'] = self.inputs.out_name + return outputs + + +class ExtractMainComponentInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + mandatory=True, + argstr='%s', + position=1, + desc='input surface file') + out_file = File( + name_template='%s.maincmp', + name_source='in_file', + argstr='%s', + position=2, + desc='surface containing main component') + + +class ExtractMainComponentOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='surface containing main component') + + +class ExtractMainComponent(CommandLine): + """Extract the main component of a tesselated surface + + Examples + -------- + + >>> from nipype.interfaces.freesurfer import ExtractMainComponent + >>> mcmp = ExtractMainComponent(in_file='lh.pial') + >>> mcmp.cmdline + 'mris_extract_main_component lh.pial lh.maincmp' + + """ + + _cmd = 'mris_extract_main_component' + input_spec = ExtractMainComponentInputSpec + output_spec = ExtractMainComponentOutputSpec + + +class Tkregister2InputSpec(FSTraitedSpec): + target_image = File( + exists=True, argstr="--targ %s", xor=['fstarg'], desc='target volume') + fstarg = traits.Bool( + False, + argstr='--fstarg', + xor=['target_image'], + desc='use subject\'s T1 as reference') + + moving_image = File( + exists=True, mandatory=True, argstr="--mov %s", desc='moving volume') + # Input registration file options + fsl_in_matrix = File( + exists=True, + argstr="--fsl %s", + desc='fsl-style registration input matrix') + xfm = File( + exists=True, + argstr='--xfm %s', + desc='use a matrix in MNI coordinates as initial registration') + lta_in = File( + exists=True, + argstr='--lta %s', + desc='use a matrix in MNI coordinates as initial registration') + invert_lta_in = traits.Bool( + requires=['lta_in'], desc='Invert input LTA before applying') + # Output registration file options + fsl_out = traits.Either( + True, + File, + argstr='--fslregout %s', + desc='compute an FSL-compatible resgitration matrix') + lta_out = traits.Either( + True, + File, + argstr='--ltaout %s', + desc='output registration file (LTA format)') + invert_lta_out = traits.Bool( + argstr='--ltaout-inv', + requires=['lta_in'], + desc='Invert input LTA before applying') + + subject_id = traits.String(argstr="--s %s", desc='freesurfer subject ID') + noedit = traits.Bool( + True, + argstr="--noedit", + usedefault=True, + desc='do not open edit window (exit)') + reg_file = File( + 'register.dat', + usedefault=True, + mandatory=True, + argstr='--reg %s', + desc='freesurfer-style registration file') + reg_header = traits.Bool( + False, argstr='--regheader', desc='compute regstration from headers') + fstal = traits.Bool( + False, + argstr='--fstal', + xor=['target_image', 'moving_image', 'reg_file'], + desc='set mov to be tal and reg to be tal xfm') + movscale = traits.Float( + argstr='--movscale %f', desc='adjust registration matrix to scale mov') + + +class Tkregister2OutputSpec(TraitedSpec): + reg_file = File(exists=True, desc='freesurfer-style registration file') + fsl_file = File(desc='FSL-style registration file') + lta_file = File(desc='LTA-style registration file') + + +class Tkregister2(FSCommand): + """ + + Examples + -------- + + Get transform matrix between orig (*tkRAS*) and native (*scannerRAS*) + coordinates in Freesurfer. Implements the first step of mapping surfaces + to native space in `this guide + `_. + + >>> from nipype.interfaces.freesurfer import Tkregister2 + >>> tk2 = Tkregister2(reg_file='T1_to_native.dat') + >>> tk2.inputs.moving_image = 'T1.mgz' + >>> tk2.inputs.target_image = 'structural.nii' + >>> tk2.inputs.reg_header = True + >>> tk2.cmdline + 'tkregister2 --mov T1.mgz --noedit --reg T1_to_native.dat --regheader \ +--targ structural.nii' + >>> tk2.run() # doctest: +SKIP + + The example below uses tkregister2 without the manual editing + stage to convert FSL-style registration matrix (.mat) to + FreeSurfer-style registration matrix (.dat) + + >>> from nipype.interfaces.freesurfer import Tkregister2 + >>> tk2 = Tkregister2() + >>> tk2.inputs.moving_image = 'epi.nii' + >>> tk2.inputs.fsl_in_matrix = 'flirt.mat' + >>> tk2.cmdline + 'tkregister2 --fsl flirt.mat --mov epi.nii --noedit --reg register.dat' + >>> tk2.run() # doctest: +SKIP + """ + _cmd = "tkregister2" + input_spec = Tkregister2InputSpec + output_spec = Tkregister2OutputSpec + + def _format_arg(self, name, spec, value): + if name == 'lta_in' and self.inputs.invert_lta_in: + spec = '--lta-inv %s' + if name in ('fsl_out', 'lta_out') and value is True: + value = self._list_outputs()[name] + return super(Tkregister2, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + reg_file = os.path.abspath(self.inputs.reg_file) + outputs['reg_file'] = reg_file + + cwd = os.getcwd() + fsl_out = self.inputs.fsl_out + if isdefined(fsl_out): + if fsl_out is True: + outputs['fsl_file'] = fname_presuffix( + reg_file, suffix='.mat', newpath=cwd, use_ext=False) + else: + outputs['fsl_file'] = os.path.abspath(self.inputs.fsl_out) + + lta_out = self.inputs.lta_out + if isdefined(lta_out): + if lta_out is True: + outputs['lta_file'] = fname_presuffix( + reg_file, suffix='.lta', newpath=cwd, use_ext=False) + else: + outputs['lta_file'] = os.path.abspath(self.inputs.lta_out) + return outputs + + def _gen_outfilename(self): + if isdefined(self.inputs.out_file): + return os.path.abspath(self.inputs.out_file) + else: + _, name, ext = split_filename(self.inputs.in_file) + return os.path.abspath(name + '_smoothed' + ext) + + +class AddXFormToHeaderInputSpec(FSTraitedSpec): + + # required + in_file = File( + exists=True, + mandatory=True, + position=-2, + argstr="%s", + desc="input volume") + # transform file does NOT need to exist at the time if using copy_name + transform = File( + exists=False, + mandatory=True, + position=-3, + argstr="%s", + desc="xfm file") + out_file = File( + 'output.mgz', + position=-1, + argstr="%s", + usedefault=True, + desc="output volume") + # optional + copy_name = traits.Bool( + argstr="-c", desc="do not try to load the xfmfile, just copy name") + verbose = traits.Bool(argstr="-v", desc="be verbose") + + +class AddXFormToHeaderOutputSpec(TraitedSpec): + + out_file = File(exists=True, desc="output volume") + + +class AddXFormToHeader(FSCommand): + """ Just adds specified xform to the volume header + + (!) WARNING: transform input **MUST** be an absolute path to a DataSink'ed transform or + the output will reference a transform in the workflow cache directory! + + >>> from nipype.interfaces.freesurfer import AddXFormToHeader + >>> adder = AddXFormToHeader() + >>> adder.inputs.in_file = 'norm.mgz' + >>> adder.inputs.transform = 'trans.mat' + >>> adder.cmdline + 'mri_add_xform_to_header trans.mat norm.mgz output.mgz' + + >>> adder.inputs.copy_name = True + >>> adder.cmdline + 'mri_add_xform_to_header -c trans.mat norm.mgz output.mgz' + + >>> adder.run() # doctest: +SKIP + + References: + ---------- + [https://surfer.nmr.mgh.harvard.edu/fswiki/mri_add_xform_to_header] + + """ + _cmd = "mri_add_xform_to_header" + input_spec = AddXFormToHeaderInputSpec + output_spec = AddXFormToHeaderOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'transform': + return value # os.path.abspath(value) + # if name == 'copy_name' and value: + # self.input_spec.transform + return super(AddXFormToHeader, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + return outputs + + +class CheckTalairachAlignmentInputSpec(FSTraitedSpec): + in_file = File( + argstr='-xfm %s', + xor=['subject'], + exists=True, + mandatory=True, + position=-1, + desc="specify the talairach.xfm file to check") + subject = traits.String( + argstr='-subj %s', + xor=['in_file'], + mandatory=True, + position=-1, + desc="specify subject's name") + # optional + threshold = traits.Float( + default_value=0.010, + usedefault=True, + argstr='-T %.3f', + desc="Talairach transforms for subjects with p-values <= T " + + "are considered as very unlikely default=0.010") + + +class CheckTalairachAlignmentOutputSpec(TraitedSpec): + out_file = traits.File( + exists=True, desc="The input file for CheckTalairachAlignment") + + +class CheckTalairachAlignment(FSCommand): + """ + This program detects Talairach alignment failures + + Examples + ======== + + >>> from nipype.interfaces.freesurfer import CheckTalairachAlignment + >>> checker = CheckTalairachAlignment() + + >>> checker.inputs.in_file = 'trans.mat' + >>> checker.inputs.threshold = 0.005 + >>> checker.cmdline + 'talairach_afd -T 0.005 -xfm trans.mat' + + >>> checker.run() # doctest: +SKIP + """ + _cmd = "talairach_afd" + input_spec = CheckTalairachAlignmentInputSpec + output_spec = CheckTalairachAlignmentOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = self.inputs.in_file + return outputs + + +class TalairachAVIInputSpec(FSTraitedSpec): + in_file = File( + argstr='--i %s', exists=True, mandatory=True, desc="input volume") + out_file = File( + argstr='--xfm %s', + mandatory=True, + exists=False, + desc="output xfm file") + # optional + atlas = traits.String( + argstr='--atlas %s', + desc="alternate target atlas (in freesurfer/average dir)") + + +class TalairachAVIOutputSpec(TraitedSpec): + out_file = traits.File( + exists=False, desc="The output transform for TalairachAVI") + out_log = traits.File( + exists=False, desc="The output log file for TalairachAVI") + out_txt = traits.File( + exists=False, desc="The output text file for TaliarachAVI") + + +class TalairachAVI(FSCommand): + """ + Front-end for Avi Snyders image registration tool. Computes the + talairach transform that maps the input volume to the MNI average_305. + This does not add the xfm to the header of the input file. When called + by recon-all, the xfm is added to the header after the transform is + computed. + + Examples + ======== + + >>> from nipype.interfaces.freesurfer import TalairachAVI + >>> example = TalairachAVI() + >>> example.inputs.in_file = 'norm.mgz' + >>> example.inputs.out_file = 'trans.mat' + >>> example.cmdline + 'talairach_avi --i norm.mgz --xfm trans.mat' + + >>> example.run() # doctest: +SKIP + """ + _cmd = "talairach_avi" + input_spec = TalairachAVIInputSpec + output_spec = TalairachAVIOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + outputs['out_log'] = os.path.abspath('talairach_avi.log') + outputs['out_txt'] = os.path.join( + os.path.dirname(self.inputs.out_file), + 'talsrcimg_to_' + str(self.inputs.atlas) + 't4_vox2vox.txt') + return outputs + + +class TalairachQCInputSpec(FSTraitedSpec): + log_file = File( + argstr='%s', + mandatory=True, + exists=True, + position=0, + desc="The log file for TalairachQC") + + +class TalairachQC(FSScriptCommand): + """ + Examples + ======== + + >>> from nipype.interfaces.freesurfer import TalairachQC + >>> qc = TalairachQC() + >>> qc.inputs.log_file = 'dirs.txt' + >>> qc.cmdline + 'tal_QC_AZS dirs.txt' + """ + _cmd = "tal_QC_AZS" + input_spec = TalairachQCInputSpec + output_spec = FSScriptOutputSpec + + +class RemoveNeckInputSpec(FSTraitedSpec): + in_file = File( + argstr="%s", + exists=True, + mandatory=True, + position=-4, + desc="Input file for RemoveNeck") + out_file = File( + argstr="%s", + exists=False, + name_source=['in_file'], + name_template="%s_noneck", + hash_files=False, + keep_extension=True, + position=-1, + desc="Output file for RemoveNeck") + transform = File( + argstr="%s", + exists=True, + mandatory=True, + position=-3, + desc="Input transform file for RemoveNeck") + template = File( + argstr="%s", + exists=True, + mandatory=True, + position=-2, + desc="Input template file for RemoveNeck") + # optional + radius = traits.Int(argstr="-radius %d", desc="Radius") + + +class RemoveNeckOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output file with neck removed") + + +class RemoveNeck(FSCommand): + """ + Crops the neck out of the mri image + + Examples + ======== + + >>> from nipype.interfaces.freesurfer import TalairachQC + >>> remove_neck = RemoveNeck() + >>> remove_neck.inputs.in_file = 'norm.mgz' + >>> remove_neck.inputs.transform = 'trans.mat' + >>> remove_neck.inputs.template = 'trans.mat' + >>> remove_neck.cmdline + 'mri_remove_neck norm.mgz trans.mat trans.mat norm_noneck.mgz' + """ + _cmd = "mri_remove_neck" + input_spec = RemoveNeckInputSpec + output_spec = RemoveNeckOutputSpec + + def _gen_fname(self, name): + if name == 'out_file': + return os.path.abspath('nu_noneck.mgz') + return None + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + return outputs + + +class MRIFillInputSpec(FSTraitedSpec): + in_file = File( + argstr="%s", + mandatory=True, + exists=True, + position=-2, + desc="Input white matter file") + out_file = File( + argstr="%s", + mandatory=True, + exists=False, + position=-1, + desc="Output filled volume file name for MRIFill") + # optional + segmentation = File( + argstr="-segmentation %s", + exists=True, + desc="Input segmentation file for MRIFill") + transform = File( + argstr="-xform %s", + exists=True, + desc="Input transform file for MRIFill") + log_file = File(argstr="-a %s", desc="Output log file for MRIFill") + + +class MRIFillOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output file from MRIFill") + log_file = File(desc="Output log file from MRIFill") + + +class MRIFill(FSCommand): + """ + This program creates hemispheric cutting planes and fills white matter + with specific values for subsequent surface tesselation. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import MRIFill + >>> fill = MRIFill() + >>> fill.inputs.in_file = 'wm.mgz' # doctest: +SKIP + >>> fill.inputs.out_file = 'filled.mgz' # doctest: +SKIP + >>> fill.cmdline # doctest: +SKIP + 'mri_fill wm.mgz filled.mgz' + """ + + _cmd = "mri_fill" + input_spec = MRIFillInputSpec + output_spec = MRIFillOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + if isdefined(self.inputs.log_file): + outputs["log_file"] = os.path.abspath(self.inputs.log_file) + return outputs + + +class MRIsInflateInputSpec(FSTraitedSpec): + in_file = File( + argstr="%s", + position=-2, + mandatory=True, + exists=True, + copyfile=True, + desc="Input file for MRIsInflate") + out_file = File( + argstr="%s", + position=-1, + exists=False, + name_source=['in_file'], + name_template="%s.inflated", + hash_files=False, + keep_extension=True, + desc="Output file for MRIsInflate") + # optional + out_sulc = File( + exists=False, xor=['no_save_sulc'], desc="Output sulc file") + no_save_sulc = traits.Bool( + argstr='-no-save-sulc', + xor=['out_sulc'], + desc="Do not save sulc file as output") + + +class MRIsInflateOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output file for MRIsInflate") + out_sulc = File(exists=False, desc="Output sulc file") + + +class MRIsInflate(FSCommand): + """ + This program will inflate a cortical surface. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import MRIsInflate + >>> inflate = MRIsInflate() + >>> inflate.inputs.in_file = 'lh.pial' + >>> inflate.inputs.no_save_sulc = True + >>> inflate.cmdline # doctest: +SKIP + 'mris_inflate -no-save-sulc lh.pial lh.inflated' + """ + + _cmd = 'mris_inflate' + input_spec = MRIsInflateInputSpec + output_spec = MRIsInflateOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + if not self.inputs.no_save_sulc: + # if the sulc file will be saved + outputs["out_sulc"] = os.path.abspath(self.inputs.out_sulc) + return outputs + + +class SphereInputSpec(FSTraitedSpecOpenMP): + in_file = File( + argstr="%s", + position=-2, + copyfile=True, + mandatory=True, + exists=True, + desc="Input file for Sphere") + out_file = File( + argstr="%s", + position=-1, + exists=False, + name_source=['in_file'], + hash_files=False, + name_template='%s.sphere', + desc="Output file for Sphere") + # optional + seed = traits.Int( + argstr="-seed %d", desc="Seed for setting random number generator") + magic = traits.Bool( + argstr="-q", + desc= + "No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu" + ) + in_smoothwm = File( + exists=True, + copyfile=True, + desc="Input surface required when -q flag is not selected") + + +class SphereOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output file for Sphere") + + +class Sphere(FSCommandOpenMP): + """ + This program will add a template into an average surface + + Examples + ======== + >>> from nipype.interfaces.freesurfer import Sphere + >>> sphere = Sphere() + >>> sphere.inputs.in_file = 'lh.pial' + >>> sphere.cmdline + 'mris_sphere lh.pial lh.sphere' + """ + _cmd = 'mris_sphere' + input_spec = SphereInputSpec + output_spec = SphereOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + return outputs + + +class FixTopologyInputSpec(FSTraitedSpec): + in_orig = File( + exists=True, + mandatory=True, + desc="Undocumented input file .orig") + in_inflated = File( + exists=True, + mandatory=True, + desc="Undocumented input file .inflated") + in_brain = File( + exists=True, mandatory=True, desc="Implicit input brain.mgz") + in_wm = File(exists=True, mandatory=True, desc="Implicit input wm.mgz") + hemisphere = traits.String( + position=-1, + argstr="%s", + mandatory=True, + desc="Hemisphere being processed") + subject_id = traits.String( + 'subject_id', + position=-2, + argstr="%s", + mandatory=True, + usedefault=True, + desc="Subject being processed") + copy_inputs = traits.Bool( + mandatory=True, + desc="If running as a node, set this to True " + + "otherwise, the topology fixing will be done " + "in place.") + + # optional + seed = traits.Int( + argstr="-seed %d", desc="Seed for setting random number generator") + ga = traits.Bool( + argstr="-ga", + desc= + "No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu" + ) + mgz = traits.Bool( + argstr="-mgz", + desc= + "No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu" + ) + sphere = traits.File(argstr="-sphere %s", desc="Sphere input file") + + +class FixTopologyOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output file for FixTopology") + + +class FixTopology(FSCommand): + """ + This program computes a mapping from the unit sphere onto the surface + of the cortex from a previously generated approximation of the + cortical surface, thus guaranteeing a topologically correct surface. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import FixTopology + >>> ft = FixTopology() + >>> ft.inputs.in_orig = 'lh.orig' # doctest: +SKIP + >>> ft.inputs.in_inflated = 'lh.inflated' # doctest: +SKIP + >>> ft.inputs.sphere = 'lh.qsphere.nofix' # doctest: +SKIP + >>> ft.inputs.hemisphere = 'lh' + >>> ft.inputs.subject_id = '10335' + >>> ft.inputs.mgz = True + >>> ft.inputs.ga = True + >>> ft.cmdline # doctest: +SKIP + 'mris_fix_topology -ga -mgz -sphere qsphere.nofix 10335 lh' + """ + + _cmd = 'mris_fix_topology' + input_spec = FixTopologyInputSpec + output_spec = FixTopologyOutputSpec + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + hemi = self.inputs.hemisphere + copy2subjdir(self, self.inputs.sphere, folder='surf') + # the orig file is edited in place + self.inputs.in_orig = copy2subjdir( + self, + self.inputs.in_orig, + folder='surf', + basename='{0}.orig'.format(hemi)) + copy2subjdir( + self, + self.inputs.in_inflated, + folder='surf', + basename='{0}.inflated'.format(hemi)) + copy2subjdir( + self, self.inputs.in_brain, folder='mri', basename='brain.mgz') + copy2subjdir( + self, self.inputs.in_wm, folder='mri', basename='wm.mgz') + return super(FixTopology, self).run(**inputs) + + def _format_arg(self, name, spec, value): + if name == 'sphere': + # get the basename and take out the hemisphere + suffix = os.path.basename(value).split('.', 1)[1] + return spec.argstr % suffix + return super(FixTopology, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self.inputs.in_orig) + return outputs + + +class EulerNumberInputSpec(FSTraitedSpec): + in_file = File( + argstr="%s", + position=-1, + mandatory=True, + exists=True, + desc="Input file for EulerNumber") + + +class EulerNumberOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output file for EulerNumber") + + +class EulerNumber(FSCommand): + """ + This program computes EulerNumber for a cortical surface + + Examples + ======== + >>> from nipype.interfaces.freesurfer import EulerNumber + >>> ft = EulerNumber() + >>> ft.inputs.in_file = 'lh.pial' + >>> ft.cmdline + 'mris_euler_number lh.pial' + """ + _cmd = 'mris_euler_number' + input_spec = EulerNumberInputSpec + output_spec = EulerNumberOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self.inputs.in_file) + return outputs + + +class RemoveIntersectionInputSpec(FSTraitedSpec): + in_file = File( + argstr="%s", + position=-2, + mandatory=True, + exists=True, + copyfile=True, + desc="Input file for RemoveIntersection") + out_file = File( + argstr="%s", + position=-1, + exists=False, + name_source=['in_file'], + name_template='%s', + hash_files=False, + keep_extension=True, + desc="Output file for RemoveIntersection") + + +class RemoveIntersectionOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output file for RemoveIntersection") + + +class RemoveIntersection(FSCommand): + """ + This program removes the intersection of the given MRI + + Examples + ======== + >>> from nipype.interfaces.freesurfer import RemoveIntersection + >>> ri = RemoveIntersection() + >>> ri.inputs.in_file = 'lh.pial' + >>> ri.cmdline + 'mris_remove_intersection lh.pial lh.pial' + """ + + _cmd = 'mris_remove_intersection' + input_spec = RemoveIntersectionInputSpec + output_spec = RemoveIntersectionOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + return outputs + + +class MakeSurfacesInputSpec(FSTraitedSpec): + # required + hemisphere = traits.Enum( + 'lh', + 'rh', + position=-1, + argstr="%s", + mandatory=True, + desc="Hemisphere being processed") + subject_id = traits.String( + 'subject_id', + usedefault=True, + position=-2, + argstr="%s", + mandatory=True, + desc="Subject being processed") + # implicit + in_orig = File( + exists=True, + mandatory=True, + argstr='-orig %s', + desc="Implicit input file .orig") + in_wm = File( + exists=True, mandatory=True, desc="Implicit input file wm.mgz") + in_filled = File( + exists=True, mandatory=True, desc="Implicit input file filled.mgz") + # optional + in_white = File(exists=True, desc="Implicit input that is sometimes used") + in_label = File( + exists=True, + xor=['noaparc'], + desc="Implicit input label/.aparc.annot") + orig_white = File( + argstr="-orig_white %s", + exists=True, + desc="Specify a white surface to start with") + orig_pial = File( + argstr="-orig_pial %s", + exists=True, + requires=['in_label'], + desc="Specify a pial surface to start with") + fix_mtl = traits.Bool(argstr="-fix_mtl", desc="Undocumented flag") + no_white = traits.Bool(argstr="-nowhite", desc="Undocumented flag") + white_only = traits.Bool(argstr="-whiteonly", desc="Undocumented flage") + in_aseg = File( + argstr="-aseg %s", exists=True, desc="Input segmentation file") + in_T1 = File(argstr="-T1 %s", exists=True, desc="Input brain or T1 file") + mgz = traits.Bool( + argstr="-mgz", + desc= + "No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu" + ) + noaparc = traits.Bool( + argstr="-noaparc", + xor=['in_label'], + desc= + "No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu" + ) + maximum = traits.Float( + argstr="-max %.1f", + desc="No documentation (used for longitudinal processing)") + longitudinal = traits.Bool( + argstr="-long", + desc="No documentation (used for longitudinal processing)") + white = traits.String(argstr="-white %s", desc="White surface name") + copy_inputs = traits.Bool( + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + "directory.") + + +class MakeSurfacesOutputSpec(TraitedSpec): + out_white = File( + exists=False, desc="Output white matter hemisphere surface") + out_curv = File(exists=False, desc="Output curv file for MakeSurfaces") + out_area = File(exists=False, desc="Output area file for MakeSurfaces") + out_cortex = File(exists=False, desc="Output cortex file for MakeSurfaces") + out_pial = File(exists=False, desc="Output pial surface for MakeSurfaces") + out_thickness = File( + exists=False, desc="Output thickness file for MakeSurfaces") + + +class MakeSurfaces(FSCommand): + """ + This program positions the tessellation of the cortical surface at the + white matter surface, then the gray matter surface and generate + surface files for these surfaces as well as a 'curvature' file for the + cortical thickness, and a surface file which approximates layer IV of + the cortical sheet. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import MakeSurfaces + >>> makesurfaces = MakeSurfaces() + >>> makesurfaces.inputs.hemisphere = 'lh' + >>> makesurfaces.inputs.subject_id = '10335' + >>> makesurfaces.inputs.in_orig = 'lh.pial' + >>> makesurfaces.inputs.in_wm = 'wm.mgz' + >>> makesurfaces.inputs.in_filled = 'norm.mgz' + >>> makesurfaces.inputs.in_label = 'aparc+aseg.nii' + >>> makesurfaces.inputs.in_T1 = 'T1.mgz' + >>> makesurfaces.inputs.orig_pial = 'lh.pial' + >>> makesurfaces.cmdline + 'mris_make_surfaces -T1 T1.mgz -orig pial -orig_pial pial 10335 lh' + """ + + _cmd = 'mris_make_surfaces' + input_spec = MakeSurfacesInputSpec + output_spec = MakeSurfacesOutputSpec + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + copy2subjdir( + self, self.inputs.in_wm, folder='mri', basename='wm.mgz') + copy2subjdir( + self, + self.inputs.in_filled, + folder='mri', + basename='filled.mgz') + copy2subjdir(self, self.inputs.in_white, 'surf', + '{0}.white'.format(self.inputs.hemisphere)) + for originalfile in [self.inputs.in_aseg, self.inputs.in_T1]: + copy2subjdir(self, originalfile, folder='mri') + for originalfile in [ + self.inputs.orig_white, self.inputs.orig_pial, + self.inputs.in_orig + ]: + copy2subjdir(self, originalfile, folder='surf') + if isdefined(self.inputs.in_label): + copy2subjdir(self, self.inputs.in_label, 'label', + '{0}.aparc.annot'.format(self.inputs.hemisphere)) + else: + os.makedirs( + os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id, 'label')) + return super(MakeSurfaces, self).run(**inputs) + + def _format_arg(self, name, spec, value): + if name in ['in_T1', 'in_aseg']: + # These inputs do not take full paths as inputs or even basenames + basename = os.path.basename(value) + # whent the -mgz flag is specified, it assumes the mgz extension + if self.inputs.mgz: + prefix = os.path.splitext(basename)[0] + else: + prefix = basename + if prefix == 'aseg': + return # aseg is already the default + return spec.argstr % prefix + elif name in ['orig_white', 'orig_pial']: + # these inputs do take full file paths or even basenames + basename = os.path.basename(value) + suffix = basename.split('.')[1] + return spec.argstr % suffix + elif name == 'in_orig': + if value.endswith('lh.orig') or value.endswith('rh.orig'): + # {lh,rh}.orig inputs are not sepcified on command line + return + else: + # if the input orig file is different than lh.orig or rh.orig + # these inputs do take full file paths or even basenames + basename = os.path.basename(value) + suffix = basename.split('.')[1] + return spec.argstr % suffix + return super(MakeSurfaces, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + # Outputs are saved in the surf directory + dest_dir = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id, 'surf') + # labels are saved in the label directory + label_dir = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id, 'label') + if not self.inputs.no_white: + outputs["out_white"] = os.path.join( + dest_dir, + str(self.inputs.hemisphere) + '.white') + # The curv and area files must have the hemisphere names as a prefix + outputs["out_curv"] = os.path.join( + dest_dir, + str(self.inputs.hemisphere) + '.curv') + outputs["out_area"] = os.path.join( + dest_dir, + str(self.inputs.hemisphere) + '.area') + # Something determines when a pial surface and thickness file is generated + # but documentation doesn't say what. + # The orig_pial input is just a guess + if isdefined(self.inputs.orig_pial) or self.inputs.white == 'NOWRITE': + outputs["out_curv"] = outputs["out_curv"] + ".pial" + outputs["out_area"] = outputs["out_area"] + ".pial" + outputs["out_pial"] = os.path.join( + dest_dir, + str(self.inputs.hemisphere) + '.pial') + outputs["out_thickness"] = os.path.join( + dest_dir, + str(self.inputs.hemisphere) + '.thickness') + else: + # when a pial surface is generated, the cortex label file is not + # generated + outputs["out_cortex"] = os.path.join( + label_dir, + str(self.inputs.hemisphere) + '.cortex.label') + return outputs + + +class CurvatureInputSpec(FSTraitedSpec): + in_file = File( + argstr="%s", + position=-2, + mandatory=True, + exists=True, + copyfile=True, + desc="Input file for Curvature") + # optional + threshold = traits.Float( + argstr="-thresh %.3f", desc="Undocumented input threshold") + n = traits.Bool(argstr="-n", desc="Undocumented boolean flag") + averages = traits.Int( + argstr="-a %d", + desc= + "Perform this number iterative averages of curvature measure before saving" + ) + save = traits.Bool( + argstr="-w", + desc= + "Save curvature files (will only generate screen output without this option)" + ) + distances = traits.Tuple( + traits.Int, + traits.Int, + argstr="-distances %d %d", + desc="Undocumented input integer distances") + copy_input = traits.Bool(desc="Copy input file to current directory") + + +class CurvatureOutputSpec(TraitedSpec): + out_mean = File(exists=False, desc="Mean curvature output file") + out_gauss = File(exists=False, desc="Gaussian curvature output file") + + +class Curvature(FSCommand): + """ + This program will compute the second fundamental form of a cortical + surface. It will create two new files ..H and + ..K with the mean and Gaussian curvature respectively. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import Curvature + >>> curv = Curvature() + >>> curv.inputs.in_file = 'lh.pial' + >>> curv.inputs.save = True + >>> curv.cmdline + 'mris_curvature -w lh.pial' + """ + + _cmd = 'mris_curvature' + input_spec = CurvatureInputSpec + output_spec = CurvatureOutputSpec + + def _format_arg(self, name, spec, value): + if self.inputs.copy_input: + if name == 'in_file': + basename = os.path.basename(value) + return spec.argstr % basename + return super(Curvature, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + if self.inputs.copy_input: + in_file = os.path.basename(self.inputs.in_file) + else: + in_file = self.inputs.in_file + outputs["out_mean"] = os.path.abspath(in_file) + '.H' + outputs["out_gauss"] = os.path.abspath(in_file) + '.K' + return outputs + + +class CurvatureStatsInputSpec(FSTraitedSpec): + surface = File( + argstr="-F %s", + exists=True, + desc="Specify surface file for CurvatureStats") + curvfile1 = File( + argstr="%s", + position=-2, + mandatory=True, + exists=True, + desc="Input file for CurvatureStats") + curvfile2 = File( + argstr="%s", + position=-1, + mandatory=True, + exists=True, + desc="Input file for CurvatureStats") + hemisphere = traits.Enum( + 'lh', + 'rh', + position=-3, + argstr="%s", + mandatory=True, + desc="Hemisphere being processed") + subject_id = traits.String( + 'subject_id', + usedefault=True, + position=-4, + argstr="%s", + mandatory=True, + desc="Subject being processed") + out_file = File( + argstr="-o %s", + exists=False, + name_source=['hemisphere'], + name_template='%s.curv.stats', + hash_files=False, + desc="Output curvature stats file") + # optional + min_max = traits.Bool( + argstr="-m", + desc="Output min / max information for the processed curvature.") + values = traits.Bool( + argstr="-G", desc="Triggers a series of derived curvature values") + write = traits.Bool( + argstr="--writeCurvatureFiles", desc="Write curvature files") + copy_inputs = traits.Bool( + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + "directory.") + + +class CurvatureStatsOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output curvature stats file") + + +class CurvatureStats(FSCommand): + """ + In its simplest usage, 'mris_curvature_stats' will compute a set + of statistics on its input . These statistics are the + mean and standard deviation of the particular curvature on the + surface, as well as the results from several surface-based + integrals. + + Additionally, 'mris_curvature_stats' can report the max/min + curvature values, and compute a simple histogram based on + all curvature values. + + Curvatures can also be normalised and constrained to a given + range before computation. + + Principal curvature (K, H, k1 and k2) calculations on a surface + structure can also be performed, as well as several functions + derived from k1 and k2. + + Finally, all output to the console, as well as any new + curvatures that result from the above calculations can be + saved to a series of text and binary-curvature files. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import CurvatureStats + >>> curvstats = CurvatureStats() + >>> curvstats.inputs.hemisphere = 'lh' + >>> curvstats.inputs.curvfile1 = 'lh.pial' + >>> curvstats.inputs.curvfile2 = 'lh.pial' + >>> curvstats.inputs.surface = 'lh.pial' + >>> curvstats.inputs.out_file = 'lh.curv.stats' + >>> curvstats.inputs.values = True + >>> curvstats.inputs.min_max = True + >>> curvstats.inputs.write = True + >>> curvstats.cmdline + 'mris_curvature_stats -m -o lh.curv.stats -F pial -G --writeCurvatureFiles subject_id lh pial pial' + """ + + _cmd = 'mris_curvature_stats' + input_spec = CurvatureStatsInputSpec + output_spec = CurvatureStatsOutputSpec + + def _format_arg(self, name, spec, value): + if name in ['surface', 'curvfile1', 'curvfile2']: + prefix = os.path.basename(value).split('.')[1] + return spec.argstr % prefix + return super(CurvatureStats, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + return outputs + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.surface, 'surf') + copy2subjdir(self, self.inputs.curvfile1, 'surf') + copy2subjdir(self, self.inputs.curvfile2, 'surf') + return super(CurvatureStats, self).run(**inputs) + + +class JacobianInputSpec(FSTraitedSpec): + # required + in_origsurf = File( + argstr="%s", + position=-3, + mandatory=True, + exists=True, + desc="Original surface") + in_mappedsurf = File( + argstr="%s", + position=-2, + mandatory=True, + exists=True, + desc="Mapped surface") + # optional + out_file = File( + argstr="%s", + exists=False, + position=-1, + name_source=['in_origsurf'], + hash_files=False, + name_template='%s.jacobian', + keep_extension=False, + desc="Output Jacobian of the surface mapping") + + +class JacobianOutputSpec(TraitedSpec): + out_file = File( + exists=False, desc="Output Jacobian of the surface mapping") + + +class Jacobian(FSCommand): + """ + This program computes the Jacobian of a surface mapping. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import Jacobian + >>> jacobian = Jacobian() + >>> jacobian.inputs.in_origsurf = 'lh.pial' + >>> jacobian.inputs.in_mappedsurf = 'lh.pial' + >>> jacobian.cmdline + 'mris_jacobian lh.pial lh.pial lh.jacobian' + """ + + _cmd = 'mris_jacobian' + input_spec = JacobianInputSpec + output_spec = JacobianOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class MRIsCalcInputSpec(FSTraitedSpec): + # required + in_file1 = File( + argstr="%s", + position=-3, + mandatory=True, + exists=True, + desc="Input file 1") + action = traits.String( + argstr="%s", + position=-2, + mandatory=True, + desc="Action to perform on input file(s)") + out_file = File( + argstr="-o %s", mandatory=True, desc="Output file after calculation") + + # optional + in_file2 = File( + argstr="%s", + exists=True, + position=-1, + xor=['in_float', 'in_int'], + desc="Input file 2") + in_float = traits.Float( + argstr="%f", + position=-1, + xor=['in_file2', 'in_int'], + desc="Input float") + in_int = traits.Int( + argstr="%d", + position=-1, + xor=['in_file2', 'in_float'], + desc="Input integer") + + +class MRIsCalcOutputSpec(TraitedSpec): + out_file = File(exists=False, desc="Output file after calculation") + + +class MRIsCalc(FSCommand): + """ + 'mris_calc' is a simple calculator that operates on FreeSurfer + curvatures and volumes. In most cases, the calculator functions with + three arguments: two inputs and an linking them. Some + actions, however, operate with only one input . In all cases, + the first input is the name of a FreeSurfer curvature overlay + (e.g. rh.curv) or volume file (e.g. orig.mgz). For two inputs, the + calculator first assumes that the second input is a file. If, however, + this second input file doesn't exist, the calculator assumes it refers + to a float number, which is then processed according to .Note: + and should typically be generated on the same subject. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import MRIsCalc + >>> example = MRIsCalc() + >>> example.inputs.in_file1 = 'lh.area' # doctest: +SKIP + >>> example.inputs.in_file2 = 'lh.area.pial' # doctest: +SKIP + >>> example.inputs.action = 'add' + >>> example.inputs.out_file = 'area.mid' + >>> example.cmdline # doctest: +SKIP + 'mris_calc -o lh.area.mid lh.area add lh.area.pial' + """ + + _cmd = 'mris_calc' + input_spec = MRIsCalcInputSpec + output_spec = MRIsCalcOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class VolumeMaskInputSpec(FSTraitedSpec): + left_whitelabel = traits.Int( + argstr="--label_left_white %d", + mandatory=True, + desc="Left white matter label") + left_ribbonlabel = traits.Int( + argstr="--label_left_ribbon %d", + mandatory=True, + desc="Left cortical ribbon label") + right_whitelabel = traits.Int( + argstr="--label_right_white %d", + mandatory=True, + desc="Right white matter label") + right_ribbonlabel = traits.Int( + argstr="--label_right_ribbon %d", + mandatory=True, + desc="Right cortical ribbon label") + lh_pial = File( + mandatory=True, exists=True, desc="Implicit input left pial surface") + rh_pial = File( + mandatory=True, exists=True, desc="Implicit input right pial surface") + lh_white = File( + mandatory=True, + exists=True, + desc="Implicit input left white matter surface") + rh_white = File( + mandatory=True, + exists=True, + desc="Implicit input right white matter surface") + aseg = File( + exists=True, + xor=['in_aseg'], + desc="Implicit aseg.mgz segmentation. " + + "Specify a different aseg by using the 'in_aseg' input.") + subject_id = traits.String( + 'subject_id', + usedefault=True, + position=-1, + argstr="%s", + mandatory=True, + desc="Subject being processed") + # optional + in_aseg = File( + argstr="--aseg_name %s", + exists=True, + xor=['aseg'], + desc="Input aseg file for VolumeMask") + save_ribbon = traits.Bool( + argstr="--save_ribbon", + desc="option to save just the ribbon for the " + + "hemispheres in the format ?h.ribbon.mgz") + copy_inputs = traits.Bool( + desc="If running as a node, set this to True." + + "This will copy the implicit input files to the " + "node directory.") + + +class VolumeMaskOutputSpec(TraitedSpec): + out_ribbon = File(exists=False, desc="Output cortical ribbon mask") + lh_ribbon = File(exists=False, desc="Output left cortical ribbon mask") + rh_ribbon = File(exists=False, desc="Output right cortical ribbon mask") + + +class VolumeMask(FSCommand): + """ + Computes a volume mask, at the same resolution as the + /mri/brain.mgz. The volume mask contains 4 values: LH_WM + (default 10), LH_GM (default 100), RH_WM (default 20), RH_GM (default + 200). + The algorithm uses the 4 surfaces situated in /surf/ + [lh|rh].[white|pial] and labels voxels based on the + signed-distance function from the surface. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import VolumeMask + >>> volmask = VolumeMask() + >>> volmask.inputs.left_whitelabel = 2 + >>> volmask.inputs.left_ribbonlabel = 3 + >>> volmask.inputs.right_whitelabel = 41 + >>> volmask.inputs.right_ribbonlabel = 42 + >>> volmask.inputs.lh_pial = 'lh.pial' + >>> volmask.inputs.rh_pial = 'lh.pial' + >>> volmask.inputs.lh_white = 'lh.pial' + >>> volmask.inputs.rh_white = 'lh.pial' + >>> volmask.inputs.subject_id = '10335' + >>> volmask.inputs.save_ribbon = True + >>> volmask.cmdline + 'mris_volmask --label_left_ribbon 3 --label_left_white 2 --label_right_ribbon 42 --label_right_white 41 --save_ribbon 10335' + """ + + _cmd = 'mris_volmask' + input_spec = VolumeMaskInputSpec + output_spec = VolumeMaskOutputSpec + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial') + copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial') + copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white') + copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white') + copy2subjdir(self, self.inputs.in_aseg, 'mri') + copy2subjdir(self, self.inputs.aseg, 'mri', 'aseg.mgz') + + return super(VolumeMask, self).run(**inputs) + + def _format_arg(self, name, spec, value): + if name == 'in_aseg': + return spec.argstr % os.path.basename(value).rstrip('.mgz') + return super(VolumeMask, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + out_dir = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id, 'mri') + outputs["out_ribbon"] = os.path.join(out_dir, 'ribbon.mgz') + if self.inputs.save_ribbon: + outputs["rh_ribbon"] = os.path.join(out_dir, 'rh.ribbon.mgz') + outputs["lh_ribbon"] = os.path.join(out_dir, 'lh.ribbon.mgz') + return outputs + + +class ParcellationStatsInputSpec(FSTraitedSpec): + # required + subject_id = traits.String( + 'subject_id', + usedefault=True, + position=-3, + argstr="%s", + mandatory=True, + desc="Subject being processed") + hemisphere = traits.Enum( + 'lh', + 'rh', + position=-2, + argstr="%s", + mandatory=True, + desc="Hemisphere being processed") + # implicit + wm = File( + mandatory=True, + exists=True, + desc="Input file must be /mri/wm.mgz") + lh_white = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/lh.white") + rh_white = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/rh.white") + lh_pial = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/lh.pial") + rh_pial = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/rh.pial") + transform = File( + mandatory=True, + exists=True, + desc="Input file must be /mri/transforms/talairach.xfm") + thickness = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/?h.thickness") + brainmask = File( + mandatory=True, + exists=True, + desc="Input file must be /mri/brainmask.mgz") + aseg = File( + mandatory=True, + exists=True, + desc="Input file must be /mri/aseg.presurf.mgz") + ribbon = File( + mandatory=True, + exists=True, + desc="Input file must be /mri/ribbon.mgz") + cortex_label = File( + exists=True, desc="implicit input file {hemi}.cortex.label") + # optional + surface = traits.String( + position=-1, argstr="%s", desc="Input surface (e.g. 'white')") + mgz = traits.Bool(argstr="-mgz", desc="Look for mgz files") + in_cortex = traits.File( + argstr="-cortex %s", exists=True, desc="Input cortex label") + in_annotation = traits.File( + argstr="-a %s", + exists=True, + xor=['in_label'], + desc= + "compute properties for each label in the annotation file separately") + in_label = traits.File( + argstr="-l %s", + exists=True, + xor=['in_annotatoin', 'out_color'], + desc="limit calculations to specified label") + tabular_output = traits.Bool(argstr="-b", desc="Tabular output") + out_table = traits.File( + argstr="-f %s", + exists=False, + genfile=True, + requires=['tabular_output'], + desc="Table output to tablefile") + out_color = traits.File( + argstr="-c %s", + exists=False, + genfile=True, + xor=['in_label'], + desc="Output annotation files's colortable to text file") + copy_inputs = traits.Bool( + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + "directory.") + th3 = traits.Bool( + argstr="-th3", + requires=["cortex_label"], + desc="turns on new vertex-wise volume calc for mris_anat_stats") + + +class ParcellationStatsOutputSpec(TraitedSpec): + out_table = File(exists=False, desc="Table output to tablefile") + out_color = File( + exists=False, desc="Output annotation files's colortable to text file") + + +class ParcellationStats(FSCommand): + """ + This program computes a number of anatomical properties. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import ParcellationStats + >>> import os + >>> parcstats = ParcellationStats() + >>> parcstats.inputs.subject_id = '10335' + >>> parcstats.inputs.hemisphere = 'lh' + >>> parcstats.inputs.wm = './../mri/wm.mgz' # doctest: +SKIP + >>> parcstats.inputs.transform = './../mri/transforms/talairach.xfm' # doctest: +SKIP + >>> parcstats.inputs.brainmask = './../mri/brainmask.mgz' # doctest: +SKIP + >>> parcstats.inputs.aseg = './../mri/aseg.presurf.mgz' # doctest: +SKIP + >>> parcstats.inputs.ribbon = './../mri/ribbon.mgz' # doctest: +SKIP + >>> parcstats.inputs.lh_pial = 'lh.pial' # doctest: +SKIP + >>> parcstats.inputs.rh_pial = 'lh.pial' # doctest: +SKIP + >>> parcstats.inputs.lh_white = 'lh.white' # doctest: +SKIP + >>> parcstats.inputs.rh_white = 'rh.white' # doctest: +SKIP + >>> parcstats.inputs.thickness = 'lh.thickness' # doctest: +SKIP + >>> parcstats.inputs.surface = 'white' + >>> parcstats.inputs.out_table = 'lh.test.stats' + >>> parcstats.inputs.out_color = 'test.ctab' + >>> parcstats.cmdline # doctest: +SKIP + 'mris_anatomical_stats -c test.ctab -f lh.test.stats 10335 lh white' + """ + + _cmd = 'mris_anatomical_stats' + input_spec = ParcellationStatsInputSpec + output_spec = ParcellationStatsOutputSpec + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white') + copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial') + copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white') + copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial') + copy2subjdir(self, self.inputs.wm, 'mri', 'wm.mgz') + copy2subjdir(self, self.inputs.transform, + os.path.join('mri', 'transforms'), 'talairach.xfm') + copy2subjdir(self, self.inputs.brainmask, 'mri', 'brainmask.mgz') + copy2subjdir(self, self.inputs.aseg, 'mri', 'aseg.presurf.mgz') + copy2subjdir(self, self.inputs.ribbon, 'mri', 'ribbon.mgz') + copy2subjdir(self, self.inputs.thickness, 'surf', + '{0}.thickness'.format(self.inputs.hemisphere)) + if isdefined(self.inputs.cortex_label): + copy2subjdir(self, self.inputs.cortex_label, 'label', + '{0}.cortex.label'.format(self.inputs.hemisphere)) + createoutputdirs(self._list_outputs()) + return super(ParcellationStats, self).run(**inputs) + + def _gen_filename(self, name): + if name in ['out_table', 'out_color']: + return self._list_outputs()[name] + return None + + def _list_outputs(self): + outputs = self._outputs().get() + if isdefined(self.inputs.out_table): + outputs["out_table"] = os.path.abspath(self.inputs.out_table) + else: + # subject stats directory + stats_dir = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id, 'stats') + if isdefined(self.inputs.in_annotation): + # if out_table is not defined just tag .stats on the end + # instead of .annot + if self.inputs.surface == 'pial': + basename = os.path.basename( + self.inputs.in_annotation).replace( + '.annot', '.pial.stats') + else: + basename = os.path.basename( + self.inputs.in_annotation).replace('.annot', '.stats') + elif isdefined(self.inputs.in_label): + # if out_table is not defined just tag .stats on the end + # instead of .label + if self.inputs.surface == 'pial': + basename = os.path.basename(self.inputs.in_label).replace( + '.label', '.pial.stats') + else: + basename = os.path.basename(self.inputs.in_label).replace( + '.label', '.stats') + else: + basename = str(self.inputs.hemisphere) + '.aparc.annot.stats' + outputs["out_table"] = os.path.join(stats_dir, basename) + if isdefined(self.inputs.out_color): + outputs["out_color"] = os.path.abspath(self.inputs.out_color) + else: + # subject label directory + out_dir = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id, 'label') + if isdefined(self.inputs.in_annotation): + # find the annotation name (if it exists) + basename = os.path.basename(self.inputs.in_annotation) + for item in ['lh.', 'rh.', 'aparc.', 'annot']: + basename = basename.replace(item, '') + annot = basename + # if the out_color table is not defined, one with the annotation + # name will be created + if 'BA' in annot: + outputs["out_color"] = os.path.join( + out_dir, annot + 'ctab') + else: + outputs["out_color"] = os.path.join( + out_dir, 'aparc.annot.' + annot + 'ctab') + else: + outputs["out_color"] = os.path.join(out_dir, + 'aparc.annot.ctab') + return outputs + + +class ContrastInputSpec(FSTraitedSpec): + # required + subject_id = traits.String( + 'subject_id', + argstr="--s %s", + usedefault=True, + mandatory=True, + desc="Subject being processed") + hemisphere = traits.Enum( + 'lh', + 'rh', + argstr="--%s-only", + mandatory=True, + desc="Hemisphere being processed") + # implicit + thickness = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/?h.thickness") + white = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/.white") + annotation = traits.File( + mandatory=True, + exists=True, + desc= + "Input annotation file must be /label/.aparc.annot" + ) + cortex = traits.File( + mandatory=True, + exists=True, + desc= + "Input cortex label must be /label/.cortex.label" + ) + orig = File( + exists=True, mandatory=True, desc="Implicit input file mri/orig.mgz") + rawavg = File( + exists=True, mandatory=True, desc="Implicit input file mri/rawavg.mgz") + copy_inputs = traits.Bool( + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + "directory.") + + +class ContrastOutputSpec(TraitedSpec): + out_contrast = File( + exists=False, desc="Output contrast file from Contrast") + out_stats = File(exists=False, desc="Output stats file from Contrast") + out_log = File(exists=True, desc="Output log from Contrast") + + +class Contrast(FSCommand): + """ + Compute surface-wise gray/white contrast + + Examples + ======== + >>> from nipype.interfaces.freesurfer import Contrast + >>> contrast = Contrast() + >>> contrast.inputs.subject_id = '10335' + >>> contrast.inputs.hemisphere = 'lh' + >>> contrast.inputs.white = 'lh.white' # doctest: +SKIP + >>> contrast.inputs.thickness = 'lh.thickness' # doctest: +SKIP + >>> contrast.inputs.annotation = '../label/lh.aparc.annot' # doctest: +SKIP + >>> contrast.inputs.cortex = '../label/lh.cortex.label' # doctest: +SKIP + >>> contrast.inputs.rawavg = '../mri/rawavg.mgz' # doctest: +SKIP + >>> contrast.inputs.orig = '../mri/orig.mgz' # doctest: +SKIP + >>> contrast.cmdline # doctest: +SKIP + 'pctsurfcon --lh-only --s 10335' + """ + + _cmd = 'pctsurfcon' + input_spec = ContrastInputSpec + output_spec = ContrastOutputSpec + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + hemi = self.inputs.hemisphere + copy2subjdir(self, self.inputs.annotation, 'label', + '{0}.aparc.annot'.format(hemi)) + copy2subjdir(self, self.inputs.cortex, 'label', + '{0}.cortex.label'.format(hemi)) + copy2subjdir(self, self.inputs.white, 'surf', + '{0}.white'.format(hemi)) + copy2subjdir(self, self.inputs.thickness, 'surf', + '{0}.thickness'.format(hemi)) + copy2subjdir(self, self.inputs.orig, 'mri', 'orig.mgz') + copy2subjdir(self, self.inputs.rawavg, 'mri', 'rawavg.mgz') + # need to create output directories + createoutputdirs(self._list_outputs()) + return super(Contrast, self).run(**inputs) + + def _list_outputs(self): + outputs = self._outputs().get() + subject_dir = os.path.join(self.inputs.subjects_dir, + self.inputs.subject_id) + outputs["out_contrast"] = os.path.join( + subject_dir, 'surf', + str(self.inputs.hemisphere) + '.w-g.pct.mgh') + outputs["out_stats"] = os.path.join( + subject_dir, 'stats', + str(self.inputs.hemisphere) + '.w-g.pct.stats') + outputs["out_log"] = os.path.join(subject_dir, 'scripts', + 'pctsurfcon.log') + return outputs + + +class RelabelHypointensitiesInputSpec(FSTraitedSpec): + # required + lh_white = File( + mandatory=True, + exists=True, + copyfile=True, + desc="Implicit input file must be lh.white") + rh_white = File( + mandatory=True, + exists=True, + copyfile=True, + desc="Implicit input file must be rh.white") + aseg = File( + argstr="%s", + position=-3, + mandatory=True, + exists=True, + desc="Input aseg file") + surf_directory = traits.Directory( + '.', + argstr="%s", + position=-2, + exists=True, + usedefault=True, + desc="Directory containing lh.white and rh.white") + out_file = File( + argstr="%s", + position=-1, + exists=False, + name_source=['aseg'], + name_template='%s.hypos.mgz', + hash_files=False, + keep_extension=False, + desc="Output aseg file") + + +class RelabelHypointensitiesOutputSpec(TraitedSpec): + out_file = File(argstr="%s", exists=False, desc="Output aseg file") + + +class RelabelHypointensities(FSCommand): + """ + Relabel Hypointensities + + Examples + ======== + >>> from nipype.interfaces.freesurfer import RelabelHypointensities + >>> relabelhypos = RelabelHypointensities() + >>> relabelhypos.inputs.lh_white = 'lh.pial' + >>> relabelhypos.inputs.rh_white = 'lh.pial' + >>> relabelhypos.inputs.surf_directory = '.' + >>> relabelhypos.inputs.aseg = 'aseg.mgz' + >>> relabelhypos.cmdline + 'mri_relabel_hypointensities aseg.mgz . aseg.hypos.mgz' + """ + + _cmd = 'mri_relabel_hypointensities' + input_spec = RelabelHypointensitiesInputSpec + output_spec = RelabelHypointensitiesOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + return outputs + + +class Aparc2AsegInputSpec(FSTraitedSpec): + # required + subject_id = traits.String( + 'subject_id', + argstr="--s %s", + usedefault=True, + mandatory=True, + desc="Subject being processed") + out_file = File( + argstr="--o %s", + exists=False, + mandatory=True, + desc="Full path of file to save the output segmentation in") + # implicit + lh_white = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/lh.white") + rh_white = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/rh.white") + lh_pial = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/lh.pial") + rh_pial = File( + mandatory=True, + exists=True, + desc="Input file must be /surf/rh.pial") + lh_ribbon = File( + mandatory=True, + exists=True, + desc="Input file must be /mri/lh.ribbon.mgz") + rh_ribbon = File( + mandatory=True, + exists=True, + desc="Input file must be /mri/rh.ribbon.mgz") + ribbon = File( + mandatory=True, + exists=True, + desc="Input file must be /mri/ribbon.mgz") + lh_annotation = File( + mandatory=True, + exists=True, + desc="Input file must be /label/lh.aparc.annot") + rh_annotation = File( + mandatory=True, + exists=True, + desc="Input file must be /label/rh.aparc.annot") + # optional + filled = File( + exists=True, + desc="Implicit input filled file. Only required with FS v5.3.") + aseg = File(argstr="--aseg %s", exists=True, desc="Input aseg file") + volmask = traits.Bool(argstr="--volmask", desc="Volume mask flag") + ctxseg = File(argstr="--ctxseg %s", exists=True, desc="") + label_wm = traits.Bool( + argstr="--labelwm", + desc=""" + For each voxel labeled as white matter in the aseg, re-assign + its label to be that of the closest cortical point if its + distance is less than dmaxctx + """) + hypo_wm = traits.Bool( + argstr="--hypo-as-wm", desc="Label hypointensities as WM") + rip_unknown = traits.Bool( + argstr="--rip-unknown", + desc="Do not label WM based on 'unknown' corical label") + a2009s = traits.Bool(argstr="--a2009s", desc="Using the a2009s atlas") + copy_inputs = traits.Bool( + desc="If running as a node, set this to True." + + "This will copy the input files to the node " + "directory.") + + +class Aparc2AsegOutputSpec(TraitedSpec): + out_file = File(argstr="%s", desc="Output aseg file") + + +class Aparc2Aseg(FSCommand): + """ + Maps the cortical labels from the automatic cortical parcellation + (aparc) to the automatic segmentation volume (aseg). The result can be + used as the aseg would. The algorithm is to find each aseg voxel + labeled as cortex (3 and 42) and assign it the label of the closest + cortical vertex. If the voxel is not in the ribbon (as defined by mri/ + lh.ribbon and rh.ribbon), then the voxel is marked as unknown (0). + This can be turned off with --noribbon. The cortical parcellation is + obtained from subject/label/hemi.aparc.annot which should be based on + the curvature.buckner40.filled.desikan_killiany.gcs atlas. The aseg is + obtained from subject/mri/aseg.mgz and should be based on the + RB40_talairach_2005-07-20.gca atlas. If these atlases are used, then the + segmentations can be viewed with tkmedit and the + FreeSurferColorLUT.txt color table found in $FREESURFER_HOME. These + are the default atlases used by recon-all. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import Aparc2Aseg + >>> aparc2aseg = Aparc2Aseg() + >>> aparc2aseg.inputs.lh_white = 'lh.pial' + >>> aparc2aseg.inputs.rh_white = 'lh.pial' + >>> aparc2aseg.inputs.lh_pial = 'lh.pial' + >>> aparc2aseg.inputs.rh_pial = 'lh.pial' + >>> aparc2aseg.inputs.lh_ribbon = 'label.mgz' + >>> aparc2aseg.inputs.rh_ribbon = 'label.mgz' + >>> aparc2aseg.inputs.ribbon = 'label.mgz' + >>> aparc2aseg.inputs.lh_annotation = 'lh.pial' + >>> aparc2aseg.inputs.rh_annotation = 'lh.pial' + >>> aparc2aseg.inputs.out_file = 'aparc+aseg.mgz' + >>> aparc2aseg.inputs.label_wm = True + >>> aparc2aseg.inputs.rip_unknown = True + >>> aparc2aseg.cmdline # doctest: +SKIP + 'mri_aparc2aseg --labelwm --o aparc+aseg.mgz --rip-unknown --s subject_id' + """ + + _cmd = 'mri_aparc2aseg' + input_spec = Aparc2AsegInputSpec + output_spec = Aparc2AsegOutputSpec + + def run(self, **inputs): + if self.inputs.copy_inputs: + self.inputs.subjects_dir = os.getcwd() + if 'subjects_dir' in inputs: + inputs['subjects_dir'] = self.inputs.subjects_dir + copy2subjdir(self, self.inputs.lh_white, 'surf', 'lh.white') + copy2subjdir(self, self.inputs.lh_pial, 'surf', 'lh.pial') + copy2subjdir(self, self.inputs.rh_white, 'surf', 'rh.white') + copy2subjdir(self, self.inputs.rh_pial, 'surf', 'rh.pial') + copy2subjdir(self, self.inputs.lh_ribbon, 'mri', 'lh.ribbon.mgz') + copy2subjdir(self, self.inputs.rh_ribbon, 'mri', 'rh.ribbon.mgz') + copy2subjdir(self, self.inputs.ribbon, 'mri', 'ribbon.mgz') + copy2subjdir(self, self.inputs.aseg, 'mri') + copy2subjdir(self, self.inputs.filled, 'mri', 'filled.mgz') + copy2subjdir(self, self.inputs.lh_annotation, 'label') + copy2subjdir(self, self.inputs.rh_annotation, 'label') + + return super(Aparc2Aseg, self).run(**inputs) + + def _format_arg(self, name, spec, value): + if name == 'aseg': + # aseg does not take a full filename + basename = os.path.basename(value).replace('.mgz', '') + return spec.argstr % basename + elif name == 'out_file': + return spec.argstr % os.path.abspath(value) + + return super(Aparc2Aseg, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + return outputs + + +class Apas2AsegInputSpec(FSTraitedSpec): + # required + in_file = File( + argstr="--i %s", + mandatory=True, + exists=True, + desc="Input aparc+aseg.mgz") + out_file = File(argstr="--o %s", mandatory=True, desc="Output aseg file") + + +class Apas2AsegOutputSpec(TraitedSpec): + out_file = File(argstr="%s", exists=False, desc="Output aseg file") + + +class Apas2Aseg(FSCommand): + """ + Converts aparc+aseg.mgz into something like aseg.mgz by replacing the + cortical segmentations 1000-1035 with 3 and 2000-2035 with 42. The + advantage of this output is that the cortical label conforms to the + actual surface (this is not the case with aseg.mgz). + + Examples + ======== + >>> from nipype.interfaces.freesurfer import Apas2Aseg + >>> apas2aseg = Apas2Aseg() + >>> apas2aseg.inputs.in_file = 'aseg.mgz' + >>> apas2aseg.inputs.out_file = 'output.mgz' + >>> apas2aseg.cmdline + 'apas2aseg --i aseg.mgz --o output.mgz' + """ + + _cmd = 'apas2aseg' + input_spec = Apas2AsegInputSpec + output_spec = Apas2AsegOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.abspath(self.inputs.out_file) + return outputs + + +class MRIsExpandInputSpec(FSTraitedSpec): + # Input spec derived from + # https://github.com/freesurfer/freesurfer/blob/102e053/mris_expand/mris_expand.c + in_file = File( + exists=True, + mandatory=True, + argstr='%s', + position=-3, + copyfile=False, + desc='Surface to expand') + distance = traits.Float( + mandatory=True, + argstr='%g', + position=-2, + desc='Distance in mm or fraction of cortical thickness') + out_name = traits.Str( + 'expanded', + argstr='%s', + position=-1, + usedefault=True, + desc=('Output surface file\n' + 'If no path, uses directory of `in_file`\n' + 'If no path AND missing "lh." or "rh.", derive from `in_file`')) + thickness = traits.Bool( + argstr='-thickness', + desc='Expand by fraction of cortical thickness, not mm') + thickness_name = traits.Str( + argstr="-thickness_name %s", + copyfile=False, + desc=('Name of thickness file (implicit: "thickness")\n' + 'If no path, uses directory of `in_file`\n' + 'If no path AND missing "lh." or "rh.", derive from `in_file`')) + pial = traits.Str( + argstr='-pial %s', + copyfile=False, + desc=('Name of pial file (implicit: "pial")\n' + 'If no path, uses directory of `in_file`\n' + 'If no path AND missing "lh." or "rh.", derive from `in_file`')) + sphere = traits.Str( + 'sphere', + copyfile=False, + usedefault=True, + desc='WARNING: Do not change this trait') + spring = traits.Float(argstr='-S %g', desc="Spring term (implicit: 0.05)") + dt = traits.Float(argstr='-T %g', desc='dt (implicit: 0.25)') + write_iterations = traits.Int( + argstr='-W %d', desc='Write snapshots of expansion every N iterations') + smooth_averages = traits.Int( + argstr='-A %d', + desc='Smooth surface with N iterations after expansion') + nsurfaces = traits.Int( + argstr='-N %d', desc='Number of surfacces to write during expansion') + # # Requires dev version - Re-add when min_ver/max_ver support this + # # https://github.com/freesurfer/freesurfer/blob/9730cb9/mris_expand/mris_expand.c + # navgs = traits.Tuple( + # traits.Int, traits.Int, + # argstr='-navgs %d %d', + # desc=('Tuple of (n_averages, min_averages) parameters ' + # '(implicit: (16, 0))')) + # target_intensity = traits.Tuple( + # traits.Float, traits.File(exists=True), + # argstr='-intensity %g %s', + # desc='Tuple of intensity and brain volume to crop to target intensity') + + +class MRIsExpandOutputSpec(TraitedSpec): + out_file = File(desc='Output surface file') + + +class MRIsExpand(FSSurfaceCommand): + """ + Expands a surface (typically ?h.white) outwards while maintaining + smoothness and self-intersection constraints. + + Examples + ======== + >>> from nipype.interfaces.freesurfer import MRIsExpand + >>> mris_expand = MRIsExpand(thickness=True, distance=0.5) + >>> mris_expand.inputs.in_file = 'lh.white' + >>> mris_expand.cmdline + 'mris_expand -thickness lh.white 0.5 expanded' + >>> mris_expand.inputs.out_name = 'graymid' + >>> mris_expand.cmdline + 'mris_expand -thickness lh.white 0.5 graymid' + """ + _cmd = 'mris_expand' + input_spec = MRIsExpandInputSpec + output_spec = MRIsExpandOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = self._associated_file(self.inputs.in_file, + self.inputs.out_name) + return outputs + + def _normalize_filenames(self): + """ Find full paths for pial, thickness and sphere files for copying + """ + in_file = self.inputs.in_file + + pial = self.inputs.pial + if not isdefined(pial): + pial = 'pial' + self.inputs.pial = self._associated_file(in_file, pial) + + if isdefined(self.inputs.thickness) and self.inputs.thickness: + thickness_name = self.inputs.thickness_name + if not isdefined(thickness_name): + thickness_name = 'thickness' + self.inputs.thickness_name = self._associated_file( + in_file, thickness_name) + + self.inputs.sphere = self._associated_file(in_file, self.inputs.sphere) + + +class LTAConvertInputSpec(CommandLineInputSpec): + # Inputs + _in_xor = ('in_lta', 'in_fsl', 'in_mni', 'in_reg', 'in_niftyreg', 'in_itk') + in_lta = traits.Either( + File(exists=True), + 'identity.nofile', + argstr='--inlta %s', + mandatory=True, + xor=_in_xor, + desc='input transform of LTA type') + in_fsl = File( + exists=True, + argstr='--infsl %s', + mandatory=True, + xor=_in_xor, + desc='input transform of FSL type') + in_mni = File( + exists=True, + argstr='--inmni %s', + mandatory=True, + xor=_in_xor, + desc='input transform of MNI/XFM type') + in_reg = File( + exists=True, + argstr='--inreg %s', + mandatory=True, + xor=_in_xor, + desc='input transform of TK REG type (deprecated format)') + in_niftyreg = File( + exists=True, + argstr='--inniftyreg %s', + mandatory=True, + xor=_in_xor, + desc='input transform of Nifty Reg type (inverse RAS2RAS)') + in_itk = File( + exists=True, + argstr='--initk %s', + mandatory=True, + xor=_in_xor, + desc='input transform of ITK type') + # Outputs + out_lta = traits.Either( + traits.Bool, + File, + argstr='--outlta %s', + desc='output linear transform (LTA Freesurfer format)') + out_fsl = traits.Either( + traits.Bool, + File, + argstr='--outfsl %s', + desc='output transform in FSL format') + out_mni = traits.Either( + traits.Bool, + File, + argstr='--outmni %s', + desc='output transform in MNI/XFM format') + out_reg = traits.Either( + traits.Bool, + File, + argstr='--outreg %s', + desc='output transform in reg dat format') + out_itk = traits.Either( + traits.Bool, + File, + argstr='--outitk %s', + desc='output transform in ITK format') + # Optional flags + invert = traits.Bool(argstr='--invert') + ltavox2vox = traits.Bool(argstr='--ltavox2vox', requires=['out_lta']) + source_file = File(exists=True, argstr='--src %s') + target_file = File(exists=True, argstr='--trg %s') + target_conform = traits.Bool(argstr='--trgconform') + + +class LTAConvertOutputSpec(TraitedSpec): + out_lta = File( + exists=True, desc='output linear transform (LTA Freesurfer format)') + out_fsl = File(exists=True, desc='output transform in FSL format') + out_mni = File(exists=True, desc='output transform in MNI/XFM format') + out_reg = File(exists=True, desc='output transform in reg dat format') + out_itk = File(exists=True, desc='output transform in ITK format') + + +class LTAConvert(CommandLine): + """Convert different transformation formats. + Some formats may require you to pass an image if the geometry information + is missing form the transform file format. + + For complete details, see the `lta_convert documentation. + `_ + """ + input_spec = LTAConvertInputSpec + output_spec = LTAConvertOutputSpec + _cmd = 'lta_convert' + + def _format_arg(self, name, spec, value): + if name.startswith('out_') and value is True: + value = self._list_outputs()[name] + return super(LTAConvert, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + for name, default in (('out_lta', 'out.lta'), ('out_fsl', 'out.mat'), + ('out_mni', 'out.xfm'), ('out_reg', 'out.dat'), + ('out_itk', 'out.txt')): + attr = getattr(self.inputs, name) + if attr: + fname = default if attr is True else attr + outputs[name] = os.path.abspath(fname) + + return outputs diff --git a/nipype/interfaces/fsl/__init__.py b/nipype/interfaces/fsl/__init__.py new file mode 100644 index 0000000000..e8f192f4f2 --- /dev/null +++ b/nipype/interfaces/fsl/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The fsl module provides classes for interfacing with the `FSL +`_ command line tools. + +Top-level namespace for fsl. +""" + +from .base import (FSLCommand, Info, check_fsl, no_fsl, no_fsl_course_data) +from .preprocess import (FAST, FLIRT, ApplyXFM, BET, MCFLIRT, FNIRT, ApplyWarp, + SliceTimer, SUSAN, PRELUDE, FUGUE, FIRST) +from .model import (Level1Design, FEAT, FEATModel, FILMGLS, FEATRegister, + FLAMEO, ContrastMgr, MultipleRegressDesign, L2Model, SMM, + MELODIC, SmoothEstimate, Cluster, Randomise, GLM) +from .utils import ( + AvScale, Smooth, Slice, Merge, ExtractROI, Split, ImageMaths, ImageMeants, + ImageStats, FilterRegressor, Overlay, Slicer, PlotTimeSeries, + PlotMotionParams, ConvertXFM, SwapDimensions, PowerSpectrum, Reorient2Std, + Complex, InvWarp, WarpUtils, ConvertWarp, WarpPoints, WarpPointsToStd, + WarpPointsFromStd, RobustFOV, CopyGeom, MotionOutliers) + +from .epi import (PrepareFieldmap, TOPUP, ApplyTOPUP, Eddy, EPIDeWarp, SigLoss, + EddyCorrect, EpiReg) +from .dti import (BEDPOSTX, XFibres, DTIFit, ProbTrackX, ProbTrackX2, VecReg, + ProjThresh, FindTheBiggest, DistanceMap, TractSkeleton, + MakeDyadicVectors, BEDPOSTX5, XFibres5) +from .maths import (ChangeDataType, Threshold, MeanImage, ApplyMask, + IsotropicSmooth, TemporalFilter, DilateImage, ErodeImage, + SpatialFilter, UnaryMaths, BinaryMaths, MultiImageMaths, + MaxnImage, MinImage, MedianImage, PercentileImage, + AR1Image) +from .possum import B0Calc +from .fix import (AccuracyTester, Classifier, Cleaner, FeatureExtractor, + Training, TrainingSetCreator) +from .aroma import ICA_AROMA diff --git a/nipype/interfaces/fsl/aroma.py b/nipype/interfaces/fsl/aroma.py new file mode 100644 index 0000000000..a86763e3e5 --- /dev/null +++ b/nipype/interfaces/fsl/aroma.py @@ -0,0 +1,146 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""This commandline module provides classes for interfacing with the +`ICA-AROMA.py`_ command line tool. +""" + +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine, File, + Directory, traits, isdefined) +import os + + +class ICA_AROMAInputSpec(CommandLineInputSpec): + feat_dir = Directory( + exists=True, + mandatory=True, + argstr='-feat %s', + xor=['in_file', 'mat_file', 'fnirt_warp_file', 'motion_parameters'], + desc='If a feat directory exists and temporal filtering ' + 'has not been run yet, ICA_AROMA can use the files in ' + 'this directory.') + in_file = File( + exists=True, + mandatory=True, + argstr='-i %s', + xor=['feat_dir'], + desc='volume to be denoised') + out_dir = Directory( + 'out', usedefault=True, mandatory=True, + argstr='-o %s', desc='output directory') + mask = File( + exists=True, + argstr='-m %s', + xor=['feat_dir'], + desc='path/name volume mask') + dim = traits.Int( + argstr='-dim %d', + desc='Dimensionality reduction when running ' + 'MELODIC (defualt is automatic estimation)') + TR = traits.Float( + argstr='-tr %.3f', + desc='TR in seconds. If this is not specified ' + 'the TR will be extracted from the ' + 'header of the fMRI nifti file.') + melodic_dir = Directory( + exists=True, + argstr='-meldir %s', + desc='path to MELODIC directory if MELODIC has already been run') + mat_file = File( + exists=True, + argstr='-affmat %s', + xor=['feat_dir'], + desc='path/name of the mat-file describing the ' + 'affine registration (e.g. FSL FLIRT) of the ' + 'functional data to structural space (.mat file)') + fnirt_warp_file = File( + exists=True, + argstr='-warp %s', + xor=['feat_dir'], + desc='File name of the warp-file describing ' + 'the non-linear registration (e.g. FSL FNIRT) ' + 'of the structural data to MNI152 space (.nii.gz)') + motion_parameters = File( + exists=True, + mandatory=True, + argstr='-mc %s', + xor=['feat_dir'], + desc='motion parameters file') + denoise_type = traits.Enum( + 'nonaggr', + 'aggr', + 'both', + 'no', + usedefault=True, + mandatory=True, + argstr='-den %s', + desc='Type of denoising strategy:\n' + '-no: only classification, no denoising\n' + '-nonaggr (default): non-aggresssive denoising, i.e. partial component regression\n' + '-aggr: aggressive denoising, i.e. full component regression\n' + '-both: both aggressive and non-aggressive denoising (two outputs)') + + +class ICA_AROMAOutputSpec(TraitedSpec): + aggr_denoised_file = File( + exists=True, desc='if generated: aggressively denoised volume') + nonaggr_denoised_file = File( + exists=True, desc='if generated: non aggressively denoised volume') + out_dir = Directory( + exists=True, + desc='directory contains (in addition to the denoised files): ' + 'melodic.ica + classified_motion_components + ' + 'classification_overview + feature_scores + melodic_ic_mni)') + + +class ICA_AROMA(CommandLine): + """ + Interface for the ICA_AROMA.py script. + + ICA-AROMA (i.e. 'ICA-based Automatic Removal Of Motion Artifacts') concerns + a data-driven method to identify and remove motion-related independent + components from fMRI data. To that end it exploits a small, but robust + set of theoretically motivated features, preventing the need for classifier + re-training and therefore providing direct and easy applicability. + + See link for further documentation: https://github.com/rhr-pruim/ICA-AROMA + + Example + ------- + + >>> from nipype.interfaces.fsl import ICA_AROMA + >>> from nipype.testing import example_data + >>> AROMA_obj = ICA_AROMA() + >>> AROMA_obj.inputs.in_file = 'functional.nii' + >>> AROMA_obj.inputs.mat_file = 'func_to_struct.mat' + >>> AROMA_obj.inputs.fnirt_warp_file = 'warpfield.nii' + >>> AROMA_obj.inputs.motion_parameters = 'fsl_mcflirt_movpar.txt' + >>> AROMA_obj.inputs.mask = 'mask.nii.gz' + >>> AROMA_obj.inputs.denoise_type = 'both' + >>> AROMA_obj.inputs.out_dir = 'ICA_testout' + >>> AROMA_obj.cmdline # doctest: +ELLIPSIS + 'ICA_AROMA.py -den both -warp warpfield.nii -i functional.nii -m mask.nii.gz -affmat func_to_struct.mat -mc fsl_mcflirt_movpar.txt -o .../ICA_testout' + """ + _cmd = 'ICA_AROMA.py' + input_spec = ICA_AROMAInputSpec + output_spec = ICA_AROMAOutputSpec + + def _format_arg(self, name, trait_spec, value): + if name == 'out_dir': + return trait_spec.argstr % os.path.abspath(value) + return super(ICA_AROMA, self)._format_arg(name, trait_spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_dir'] = os.path.abspath(self.inputs.out_dir) + out_dir = outputs['out_dir'] + + if self.inputs.denoise_type in ('aggr', 'both'): + outputs['aggr_denoised_file'] = os.path.join( + out_dir, 'denoised_func_data_aggr.nii.gz') + if self.inputs.denoise_type in ('nonaggr', 'both'): + outputs['nonaggr_denoised_file'] = os.path.join( + out_dir, 'denoised_func_data_nonaggr.nii.gz') + return outputs diff --git a/nipype/interfaces/fsl/base.py b/nipype/interfaces/fsl/base.py new file mode 100644 index 0000000000..db74e900b8 --- /dev/null +++ b/nipype/interfaces/fsl/base.py @@ -0,0 +1,282 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The fsl module provides classes for interfacing with the `FSL +`_ command line tools. This +was written to work with FSL version 4.1.4. + +These are the base tools for working with FSL. +Preprocessing tools are found in fsl/preprocess.py +Model tools are found in fsl/model.py +DTI tools are found in fsl/dti.py + +XXX Make this doc current! + +Currently these tools are supported: + +* BET v2.1: brain extraction +* FAST v4.1: segmentation and bias correction +* FLIRT v5.5: linear registration +* MCFLIRT: motion correction +* FNIRT v1.0: non-linear warp + +Examples +-------- +See the docstrings of the individual classes for examples. + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from glob import glob +import os + +from ... import logging +from ...utils.filemanip import fname_presuffix +from ..base import (traits, isdefined, CommandLine, CommandLineInputSpec, + PackageInfo) +from ...external.due import BibTeX + +IFLOGGER = logging.getLogger('nipype.interface') + + +class Info(PackageInfo): + """ + Handle FSL ``output_type`` and version information. + + output type refers to the type of file fsl defaults to writing + eg, NIFTI, NIFTI_GZ + + Examples + -------- + + >>> from nipype.interfaces.fsl import Info + >>> Info.version() # doctest: +SKIP + >>> Info.output_type() # doctest: +SKIP + + + """ + + ftypes = { + 'NIFTI': '.nii', + 'NIFTI_PAIR': '.img', + 'NIFTI_GZ': '.nii.gz', + 'NIFTI_PAIR_GZ': '.img.gz' + } + + if os.getenv('FSLDIR'): + version_file = os.path.join(os.getenv('FSLDIR'), 'etc', 'fslversion') + + @staticmethod + def parse_version(raw_info): + return raw_info.splitlines()[0] + + @classmethod + def output_type_to_ext(cls, output_type): + """Get the file extension for the given output type. + + Parameters + ---------- + output_type : {'NIFTI', 'NIFTI_GZ', 'NIFTI_PAIR', 'NIFTI_PAIR_GZ'} + String specifying the output type. + + Returns + ------- + extension : str + The file extension for the output type. + """ + + try: + return cls.ftypes[output_type] + except KeyError: + msg = 'Invalid FSLOUTPUTTYPE: ', output_type + raise KeyError(msg) + + @classmethod + def output_type(cls): + """Get the global FSL output file type FSLOUTPUTTYPE. + + This returns the value of the environment variable + FSLOUTPUTTYPE. An exception is raised if it is not defined. + + Returns + ------- + fsl_ftype : string + Represents the current environment setting of FSLOUTPUTTYPE + """ + try: + return os.environ['FSLOUTPUTTYPE'] + except KeyError: + IFLOGGER.warn('FSLOUTPUTTYPE environment variable is not set. ' + 'Setting FSLOUTPUTTYPE=NIFTI') + return 'NIFTI' + + @staticmethod + def standard_image(img_name=None): + '''Grab an image from the standard location. + + Returns a list of standard images if called without arguments. + + Could be made more fancy to allow for more relocatability''' + try: + fsldir = os.environ['FSLDIR'] + except KeyError: + raise Exception('FSL environment variables not set') + stdpath = os.path.join(fsldir, 'data', 'standard') + if img_name is None: + return [ + filename.replace(stdpath + '/', '') + for filename in glob(os.path.join(stdpath, '*nii*')) + ] + return os.path.join(stdpath, img_name) + + +class FSLCommandInputSpec(CommandLineInputSpec): + """ + Base Input Specification for all FSL Commands + + All command support specifying FSLOUTPUTTYPE dynamically + via output_type. + + Example + ------- + fsl.ExtractRoi(tmin=42, tsize=1, output_type='NIFTI') + """ + output_type = traits.Enum( + 'NIFTI', list(Info.ftypes.keys()), desc='FSL output type') + + +class FSLCommand(CommandLine): + """Base support for FSL commands. + + """ + + input_spec = FSLCommandInputSpec + _output_type = None + + references_ = [{ + 'entry': + BibTeX('@article{JenkinsonBeckmannBehrensWoolrichSmith2012,' + 'author={M. Jenkinson, C.F. Beckmann, T.E. Behrens, ' + 'M.W. Woolrich, and S.M. Smith},' + 'title={FSL},' + 'journal={NeuroImage},' + 'volume={62},' + 'pages={782-790},' + 'year={2012},' + '}'), + 'tags': ['implementation'], + }] + + def __init__(self, **inputs): + super(FSLCommand, self).__init__(**inputs) + self.inputs.on_trait_change(self._output_update, 'output_type') + + if self._output_type is None: + self._output_type = Info.output_type() + + if not isdefined(self.inputs.output_type): + self.inputs.output_type = self._output_type + else: + self._output_update() + + def _output_update(self): + self._output_type = self.inputs.output_type + self.inputs.environ.update({'FSLOUTPUTTYPE': self.inputs.output_type}) + + @classmethod + def set_default_output_type(cls, output_type): + """Set the default output type for FSL classes. + + This method is used to set the default output type for all fSL + subclasses. However, setting this will not update the output + type for any existing instances. For these, assign the + .inputs.output_type. + """ + + if output_type in Info.ftypes: + cls._output_type = output_type + else: + raise AttributeError('Invalid FSL output_type: %s' % output_type) + + @property + def version(self): + return Info.version() + + def _gen_fname(self, + basename, + cwd=None, + suffix=None, + change_ext=True, + ext=None): + """Generate a filename based on the given parameters. + + The filename will take the form: cwd/basename. + If change_ext is True, it will use the extentions specified in + intputs.output_type. + + Parameters + ---------- + basename : str + Filename to base the new filename on. + cwd : str + Path to prefix to the new filename. (default is os.getcwd()) + suffix : str + Suffix to add to the `basename`. (defaults is '' ) + change_ext : bool + Flag to change the filename extension to the FSL output type. + (default True) + + Returns + ------- + fname : str + New filename based on given parameters. + + """ + + if basename == '': + msg = 'Unable to generate filename for command %s. ' % self.cmd + msg += 'basename is not set!' + raise ValueError(msg) + if cwd is None: + cwd = os.getcwd() + if ext is None: + ext = Info.output_type_to_ext(self.inputs.output_type) + if change_ext: + if suffix: + suffix = ''.join((suffix, ext)) + else: + suffix = ext + if suffix is None: + suffix = '' + fname = fname_presuffix( + basename, suffix=suffix, use_ext=False, newpath=cwd) + return fname + + def _overload_extension(self, value, name=None): + return value + Info.output_type_to_ext(self.inputs.output_type) + + +def check_fsl(): + ver = Info.version() + if ver: + return 0 + else: + return 1 + + +def no_fsl(): + """Checks if FSL is NOT installed + used with skipif to skip tests that will + fail if FSL is not installed""" + + if Info.version() is None: + return True + else: + return False + + +def no_fsl_course_data(): + """check if fsl_course data is present""" + return not ('FSL_COURSE_DATA' in os.environ and os.path.isdir( + os.path.abspath(os.environ['FSL_COURSE_DATA']))) diff --git a/nipype/interfaces/fsl/dti.py b/nipype/interfaces/fsl/dti.py new file mode 100644 index 0000000000..d8812cec6c --- /dev/null +++ b/nipype/interfaces/fsl/dti.py @@ -0,0 +1,1464 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The fsl module provides classes for interfacing with the `FSL +`_ command line tools. This +was written to work with FSL version 4.1.4. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, open + +import os +import warnings + +from ...utils.filemanip import fname_presuffix, split_filename, copyfile +from ..base import (TraitedSpec, isdefined, File, Directory, InputMultiPath, + OutputMultiPath, traits) +from .base import (FSLCommand, FSLCommandInputSpec, Info) + + +class DTIFitInputSpec(FSLCommandInputSpec): + dwi = File( + exists=True, + desc='diffusion weighted image data file', + argstr='-k %s', + position=0, + mandatory=True) + base_name = traits.Str( + "dtifit_", + desc=('base_name that all output files ' + 'will start with'), + argstr='-o %s', + position=1, + usedefault=True) + mask = File( + exists=True, + desc='bet binary mask file', + argstr='-m %s', + position=2, + mandatory=True) + bvecs = File( + exists=True, + desc='b vectors file', + argstr='-r %s', + position=3, + mandatory=True) + bvals = File( + exists=True, + desc='b values file', + argstr='-b %s', + position=4, + mandatory=True) + min_z = traits.Int(argstr='-z %d', desc='min z') + max_z = traits.Int(argstr='-Z %d', desc='max z') + min_y = traits.Int(argstr='-y %d', desc='min y') + max_y = traits.Int(argstr='-Y %d', desc='max y') + min_x = traits.Int(argstr='-x %d', desc='min x') + max_x = traits.Int(argstr='-X %d', desc='max x') + save_tensor = traits.Bool( + desc='save the elements of the tensor', argstr='--save_tensor') + sse = traits.Bool(desc='output sum of squared errors', argstr='--sse') + cni = File( + exists=True, desc='input counfound regressors', argstr='--cni=%s') + little_bit = traits.Bool( + desc='only process small area of brain', argstr='--littlebit') + gradnonlin = File( + exists=True, argstr='--gradnonlin=%s', desc='gradient non linearities') + + +class DTIFitOutputSpec(TraitedSpec): + V1 = File(exists=True, desc='path/name of file with the 1st eigenvector') + V2 = File(exists=True, desc='path/name of file with the 2nd eigenvector') + V3 = File(exists=True, desc='path/name of file with the 3rd eigenvector') + L1 = File(exists=True, desc='path/name of file with the 1st eigenvalue') + L2 = File(exists=True, desc='path/name of file with the 2nd eigenvalue') + L3 = File(exists=True, desc='path/name of file with the 3rd eigenvalue') + MD = File(exists=True, desc='path/name of file with the mean diffusivity') + FA = File( + exists=True, desc='path/name of file with the fractional anisotropy') + MO = File( + exists=True, desc='path/name of file with the mode of anisotropy') + S0 = File( + exists=True, + desc=('path/name of file with the raw T2 signal with no ' + 'diffusion weighting')) + tensor = File( + exists=True, desc='path/name of file with the 4D tensor volume') + + +class DTIFit(FSLCommand): + """ Use FSL dtifit command for fitting a diffusion tensor model at each + voxel + + Example + ------- + + >>> from nipype.interfaces import fsl + >>> dti = fsl.DTIFit() + >>> dti.inputs.dwi = 'diffusion.nii' + >>> dti.inputs.bvecs = 'bvecs' + >>> dti.inputs.bvals = 'bvals' + >>> dti.inputs.base_name = 'TP' + >>> dti.inputs.mask = 'mask.nii' + >>> dti.cmdline + 'dtifit -k diffusion.nii -o TP -m mask.nii -r bvecs -b bvals' + + """ + + _cmd = 'dtifit' + input_spec = DTIFitInputSpec + output_spec = DTIFitOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + for k in list(outputs.keys()): + if k not in ('outputtype', 'environ', 'args'): + if k != 'tensor' or (isdefined(self.inputs.save_tensor) + and self.inputs.save_tensor): + outputs[k] = self._gen_fname( + self.inputs.base_name, suffix='_' + k) + return outputs + + +class FSLXCommandInputSpec(FSLCommandInputSpec): + dwi = File( + exists=True, + argstr='--data=%s', + mandatory=True, + desc='diffusion weighted image data file') + mask = File( + exists=True, + argstr='--mask=%s', + mandatory=True, + desc='brain binary mask file (i.e. from BET)') + bvecs = File( + exists=True, + argstr='--bvecs=%s', + mandatory=True, + desc='b vectors file') + bvals = File( + exists=True, argstr='--bvals=%s', mandatory=True, desc='b values file') + + logdir = Directory('.', argstr='--logdir=%s', usedefault=True) + n_fibres = traits.Range( + usedefault=True, + low=1, + value=2, + argstr='--nfibres=%d', + desc=('Maximum number of fibres to fit in each voxel'), + mandatory=True) + model = traits.Enum( + 1, + 2, + 3, + argstr='--model=%d', + desc=('use monoexponential (1, default, required for ' + 'single-shell) or multiexponential (2, multi-' + 'shell) model')) + fudge = traits.Int(argstr='--fudge=%d', desc='ARD fudge factor') + n_jumps = traits.Int( + 5000, usedefault=True, + argstr='--njumps=%d', desc='Num of jumps to be made by MCMC') + burn_in = traits.Range( + low=0, + value=0, + usedefault=True, + argstr='--burnin=%d', + desc=('Total num of jumps at start of MCMC to be ' + 'discarded')) + burn_in_no_ard = traits.Range( + low=0, + value=0, + usedefault=True, + argstr='--burnin_noard=%d', + desc=('num of burnin jumps before the ard is' + ' imposed')) + sample_every = traits.Range( + low=0, + value=1, + usedefault=True, + argstr='--sampleevery=%d', + desc='Num of jumps for each sample (MCMC)') + update_proposal_every = traits.Range( + low=1, + value=40, + usedefault=True, + argstr='--updateproposalevery=%d', + desc=('Num of jumps for each update ' + 'to the proposal density std ' + '(MCMC)')) + seed = traits.Int( + argstr='--seed=%d', desc='seed for pseudo random number generator') + + _xor_inputs1 = ('no_ard', 'all_ard') + no_ard = traits.Bool( + argstr='--noard', xor=_xor_inputs1, desc='Turn ARD off on all fibres') + all_ard = traits.Bool( + argstr='--allard', xor=_xor_inputs1, desc='Turn ARD on on all fibres') + + _xor_inputs2 = ('no_spat', 'non_linear', 'cnlinear') + no_spat = traits.Bool( + argstr='--nospat', + xor=_xor_inputs2, + desc='Initialise with tensor, not spatially') + non_linear = traits.Bool( + argstr='--nonlinear', + xor=_xor_inputs2, + desc='Initialise with nonlinear fitting') + cnlinear = traits.Bool( + argstr='--cnonlinear', + xor=_xor_inputs2, + desc=('Initialise with constrained nonlinear ' + 'fitting')) + rician = traits.Bool(argstr='--rician', desc=('use Rician noise modeling')) + + _xor_inputs3 = ['f0_noard', 'f0_ard'] + f0_noard = traits.Bool( + argstr='--f0', + xor=_xor_inputs3, + desc=('Noise floor model: add to the model an ' + 'unattenuated signal compartment f0')) + f0_ard = traits.Bool( + argstr='--f0 --ardf0', + xor=_xor_inputs3 + ['all_ard'], + desc=('Noise floor model: add to the model an ' + 'unattenuated signal compartment f0')) + force_dir = traits.Bool( + True, + argstr='--forcedir', + usedefault=True, + desc=('use the actual directory name given ' + '(do not add + to make a new directory)')) + + +class FSLXCommandOutputSpec(TraitedSpec): + dyads = OutputMultiPath( + File(exists=True), + desc=('Mean of PDD distribution' + ' in vector form.')) + fsamples = OutputMultiPath( + File(exists=True), + desc=('Samples from the ' + 'distribution on f ' + 'anisotropy')) + mean_dsamples = File( + exists=True, desc='Mean of distribution on diffusivity d') + mean_fsamples = OutputMultiPath( + File(exists=True), desc=('Mean of distribution on f ' + 'anisotropy')) + mean_S0samples = File( + exists=True, + desc=('Mean of distribution on T2w' + 'baseline signal intensity S0')) + mean_tausamples = File( + exists=True, + desc=('Mean of distribution on ' + 'tau samples (only with rician ' + 'noise)')) + phsamples = OutputMultiPath( + File(exists=True), desc=('phi samples, per fiber')) + thsamples = OutputMultiPath( + File(exists=True), desc=('theta samples, per fiber')) + + +class FSLXCommand(FSLCommand): + """ + Base support for ``xfibres`` and ``bedpostx`` + """ + input_spec = FSLXCommandInputSpec + output_spec = FSLXCommandOutputSpec + + def _run_interface(self, runtime): + self._out_dir = os.getcwd() + runtime = super(FSLXCommand, self)._run_interface(runtime) + if runtime.stderr: + self.raise_exception(runtime) + return runtime + + def _list_outputs(self, out_dir=None): + outputs = self.output_spec().get() + n_fibres = self.inputs.n_fibres + if not out_dir: + if isdefined(self.inputs.logdir): + out_dir = os.path.abspath(self.inputs.logdir) + else: + out_dir = os.path.abspath('logdir') + + multi_out = [ + 'dyads', 'fsamples', 'mean_fsamples', 'phsamples', 'thsamples' + ] + single_out = ['mean_dsamples', 'mean_S0samples'] + + for k in single_out: + outputs[k] = self._gen_fname(k, cwd=out_dir) + + if isdefined(self.inputs.rician) and self.inputs.rician: + outputs['mean_tausamples'] = self._gen_fname( + 'mean_tausamples', cwd=out_dir) + + for k in multi_out: + outputs[k] = [] + + for i in range(1, n_fibres + 1): + outputs['fsamples'].append( + self._gen_fname('f%dsamples' % i, cwd=out_dir)) + outputs['mean_fsamples'].append( + self._gen_fname('mean_f%dsamples' % i, cwd=out_dir)) + + for i in range(1, n_fibres + 1): + outputs['dyads'].append( + self._gen_fname('dyads%d' % i, cwd=out_dir)) + outputs['phsamples'].append( + self._gen_fname('ph%dsamples' % i, cwd=out_dir)) + outputs['thsamples'].append( + self._gen_fname('th%dsamples' % i, cwd=out_dir)) + + return outputs + + +class BEDPOSTX5InputSpec(FSLXCommandInputSpec): + dwi = File( + exists=True, desc='diffusion weighted image data file', mandatory=True) + mask = File(exists=True, desc='bet binary mask file', mandatory=True) + bvecs = File(exists=True, desc='b vectors file', mandatory=True) + bvals = File(exists=True, desc='b values file', mandatory=True) + logdir = Directory(argstr='--logdir=%s') + n_fibres = traits.Range( + usedefault=True, + low=1, + value=2, + argstr='-n %d', + desc=('Maximum number of fibres to fit in each voxel'), + mandatory=True) + model = traits.Enum( + 1, + 2, + 3, + argstr='-model %d', + desc=('use monoexponential (1, default, required for ' + 'single-shell) or multiexponential (2, multi-' + 'shell) model')) + fudge = traits.Int(argstr='-w %d', desc='ARD fudge factor') + n_jumps = traits.Int( + 5000, usedefault=True, + argstr='-j %d', desc='Num of jumps to be made by MCMC') + burn_in = traits.Range( + low=0, + value=0, + usedefault=True, + argstr='-b %d', + desc=('Total num of jumps at start of MCMC to be ' + 'discarded')) + sample_every = traits.Range( + low=0, + value=1, + usedefault=True, + argstr='-s %d', + desc='Num of jumps for each sample (MCMC)') + out_dir = Directory( + 'bedpostx', + mandatory=True, + desc='output directory', + usedefault=True, + position=1, + argstr='%s') + gradnonlin = traits.Bool( + False, + argstr='-g', + desc=('consider gradient nonlinearities, ' + 'default off')) + grad_dev = File( + exists=True, desc='grad_dev file, if gradnonlin, -g is True') + use_gpu = traits.Bool(False, desc='Use the GPU version of bedpostx') + + +class BEDPOSTX5OutputSpec(TraitedSpec): + mean_dsamples = File( + exists=True, desc='Mean of distribution on diffusivity d') + mean_fsamples = OutputMultiPath( + File(exists=True), desc=('Mean of distribution on f ' + 'anisotropy')) + mean_S0samples = File( + exists=True, + desc=('Mean of distribution on T2w' + 'baseline signal intensity S0')) + mean_phsamples = OutputMultiPath( + File(exists=True), desc='Mean of distribution on phi') + mean_thsamples = OutputMultiPath( + File(exists=True), desc='Mean of distribution on theta') + merged_thsamples = OutputMultiPath( + File(exists=True), desc=('Samples from the distribution ' + 'on theta')) + merged_phsamples = OutputMultiPath( + File(exists=True), desc=('Samples from the distribution ' + 'on phi')) + merged_fsamples = OutputMultiPath( + File(exists=True), + desc=('Samples from the distribution on ' + 'anisotropic volume fraction')) + dyads = OutputMultiPath( + File(exists=True), desc='Mean of PDD distribution in vector form.') + dyads_dispersion = OutputMultiPath(File(exists=True), desc=('Dispersion')) + + +class BEDPOSTX5(FSLXCommand): + """ + BEDPOSTX stands for Bayesian Estimation of Diffusion Parameters Obtained + using Sampling Techniques. The X stands for modelling Crossing Fibres. + bedpostx runs Markov Chain Monte Carlo sampling to build up distributions + on diffusion parameters at each voxel. It creates all the files necessary + for running probabilistic tractography. For an overview of the modelling + carried out within bedpostx see this `technical report + `_. + + + .. note:: Consider using + :func:`nipype.workflows.fsl.dmri.create_bedpostx_pipeline` instead. + + + Example + ------- + + >>> from nipype.interfaces import fsl + >>> bedp = fsl.BEDPOSTX5(bvecs='bvecs', bvals='bvals', dwi='diffusion.nii', + ... mask='mask.nii', n_fibres=1) + >>> bedp.cmdline + 'bedpostx bedpostx -b 0 --burnin_noard=0 --forcedir -n 1 -j 5000 \ +-s 1 --updateproposalevery=40' + + """ + + _cmd = 'bedpostx' + _default_cmd = _cmd + input_spec = BEDPOSTX5InputSpec + output_spec = BEDPOSTX5OutputSpec + _can_resume = True + + def __init__(self, **inputs): + super(BEDPOSTX5, self).__init__(**inputs) + self.inputs.on_trait_change(self._cuda_update, 'use_gpu') + + def _cuda_update(self): + if isdefined(self.inputs.use_gpu) and self.inputs.use_gpu: + self._cmd = 'bedpostx_gpu' + else: + self._cmd = self._default_cmd + + def _run_interface(self, runtime): + + subjectdir = os.path.abspath(self.inputs.out_dir) + if not os.path.exists(subjectdir): + os.makedirs(subjectdir) + _, _, ext = split_filename(self.inputs.mask) + copyfile(self.inputs.mask, + os.path.join(subjectdir, 'nodif_brain_mask' + ext)) + _, _, ext = split_filename(self.inputs.dwi) + copyfile(self.inputs.dwi, os.path.join(subjectdir, 'data' + ext)) + copyfile(self.inputs.bvals, os.path.join(subjectdir, 'bvals')) + copyfile(self.inputs.bvecs, os.path.join(subjectdir, 'bvecs')) + if isdefined(self.inputs.grad_dev): + _, _, ext = split_filename(self.inputs.grad_dev) + copyfile(self.inputs.grad_dev, + os.path.join(subjectdir, 'grad_dev' + ext)) + + retval = super(BEDPOSTX5, self)._run_interface(runtime) + + self._out_dir = subjectdir + '.bedpostX' + return retval + + def _list_outputs(self): + outputs = self.output_spec().get() + n_fibres = self.inputs.n_fibres + + multi_out = [ + 'merged_thsamples', 'merged_fsamples', 'merged_phsamples', + 'mean_phsamples', 'mean_thsamples', 'mean_fsamples', + 'dyads_dispersion', 'dyads' + ] + + single_out = ['mean_dsamples', 'mean_S0samples'] + + for k in single_out: + outputs[k] = self._gen_fname(k, cwd=self._out_dir) + + for k in multi_out: + outputs[k] = [] + + for i in range(1, n_fibres + 1): + outputs['merged_thsamples'].append( + self._gen_fname('merged_th%dsamples' % i, cwd=self._out_dir)) + outputs['merged_fsamples'].append( + self._gen_fname('merged_f%dsamples' % i, cwd=self._out_dir)) + outputs['merged_phsamples'].append( + self._gen_fname('merged_ph%dsamples' % i, cwd=self._out_dir)) + outputs['mean_thsamples'].append( + self._gen_fname('mean_th%dsamples' % i, cwd=self._out_dir)) + outputs['mean_phsamples'].append( + self._gen_fname('mean_ph%dsamples' % i, cwd=self._out_dir)) + outputs['mean_fsamples'].append( + self._gen_fname('mean_f%dsamples' % i, cwd=self._out_dir)) + outputs['dyads'].append( + self._gen_fname('dyads%d' % i, cwd=self._out_dir)) + outputs['dyads_dispersion'].append( + self._gen_fname('dyads%d_dispersion' % i, cwd=self._out_dir)) + return outputs + + +class XFibres5InputSpec(FSLXCommandInputSpec): + gradnonlin = File( + exists=True, + argstr='--gradnonlin=%s', + desc='gradient file corresponding to slice') + + +class XFibres5(FSLXCommand): + """ + Perform model parameters estimation for local (voxelwise) diffusion + parameters + """ + _cmd = 'xfibres' + input_spec = XFibres5InputSpec + output_spec = FSLXCommandOutputSpec + + +XFibres = XFibres5 +BEDPOSTX = BEDPOSTX5 + + +class ProbTrackXBaseInputSpec(FSLCommandInputSpec): + thsamples = InputMultiPath(File(exists=True), mandatory=True) + phsamples = InputMultiPath(File(exists=True), mandatory=True) + fsamples = InputMultiPath(File(exists=True), mandatory=True) + samples_base_name = traits.Str( + "merged", + desc=('the rootname/base_name for samples ' + 'files'), + argstr='--samples=%s', + usedefault=True) + mask = File( + exists=True, + desc='bet binary mask file in diffusion space', + argstr='-m %s', + mandatory=True) + seed = traits.Either( + File(exists=True), + traits.List(File(exists=True)), + traits.List(traits.List(traits.Int(), minlen=3, maxlen=3)), + desc=('seed volume(s), or voxel(s) or freesurfer ' + 'label file'), + argstr='--seed=%s', + mandatory=True) + target_masks = InputMultiPath( + File(exits=True), + desc=('list of target masks - required for ' + 'seeds_to_targets classification'), + argstr='--targetmasks=%s') + waypoints = File( + exists=True, + desc=('waypoint mask or ascii list of waypoint masks - ' + 'only keep paths going through ALL the masks'), + argstr='--waypoints=%s') + network = traits.Bool( + desc=('activate network mode - only keep paths ' + 'going through at least one seed mask ' + '(required if multiple seed masks)'), + argstr='--network') + seed_ref = File( + exists=True, + desc=('reference vol to define seed space in simple mode ' + '- diffusion space assumed if absent'), + argstr='--seedref=%s') + out_dir = Directory( + exists=True, + argstr='--dir=%s', + desc='directory to put the final volumes in', + genfile=True) + force_dir = traits.Bool( + True, + desc=('use the actual directory name given - i.e. ' + 'do not add + to make a new directory'), + argstr='--forcedir', + usedefault=True) + opd = traits.Bool( + True, + desc='outputs path distributions', + argstr='--opd', + usedefault=True) + correct_path_distribution = traits.Bool( + desc=('correct path distribution ' + 'for the length of the ' + 'pathways'), + argstr='--pd') + os2t = traits.Bool(desc='Outputs seeds to targets', argstr='--os2t') + # paths_file = File('nipype_fdtpaths', usedefault=True, argstr='--out=%s', + # desc='produces an output file (default is fdt_paths)') + avoid_mp = File( + exists=True, + desc=('reject pathways passing through locations given by ' + 'this mask'), + argstr='--avoid=%s') + stop_mask = File( + exists=True, + argstr='--stop=%s', + desc='stop tracking at locations given by this mask file') + xfm = File( + exists=True, + argstr='--xfm=%s', + desc=('transformation matrix taking seed space to DTI space ' + '(either FLIRT matrix or FNIRT warp_field) - default is ' + 'identity')) + inv_xfm = File( + argstr='--invxfm=%s', + desc=('transformation matrix taking DTI space to seed ' + 'space (compulsory when using a warp_field for ' + 'seeds_to_dti)')) + n_samples = traits.Int( + 5000, + argstr='--nsamples=%d', + desc='number of samples - default=5000', + usedefault=True) + n_steps = traits.Int( + argstr='--nsteps=%d', desc='number of steps per sample - default=2000') + dist_thresh = traits.Float( + argstr='--distthresh=%.3f', + desc=('discards samples shorter than this ' + 'threshold (in mm - default=0)')) + c_thresh = traits.Float( + argstr='--cthr=%.3f', desc='curvature threshold - default=0.2') + sample_random_points = traits.Bool( + argstr='--sampvox', + desc=('sample random points within ' + 'seed voxels')) + step_length = traits.Float( + argstr='--steplength=%.3f', desc='step_length in mm - default=0.5') + loop_check = traits.Bool( + argstr='--loopcheck', + desc=('perform loop_checks on paths - slower, ' + 'but allows lower curvature threshold')) + use_anisotropy = traits.Bool( + argstr='--usef', desc='use anisotropy to constrain tracking') + rand_fib = traits.Enum( + 0, + 1, + 2, + 3, + argstr='--randfib=%d', + desc=('options: 0 - default, 1 - to randomly ' + 'sample initial fibres (with f > fibthresh), ' + '2 - to sample in proportion fibres (with ' + 'f>fibthresh) to f, 3 - to sample ALL ' + 'populations at random (even if ' + 'f>> from nipype.interfaces import fsl + >>> pbx = fsl.ProbTrackX(samples_base_name='merged', mask='mask.nii', \ + seed='MASK_average_thal_right.nii', mode='seedmask', \ + xfm='trans.mat', n_samples=3, n_steps=10, force_dir=True, opd=True, \ + os2t=True, target_masks = ['targets_MASK1.nii', 'targets_MASK2.nii'], \ + thsamples='merged_thsamples.nii', fsamples='merged_fsamples.nii', \ + phsamples='merged_phsamples.nii', out_dir='.') + >>> pbx.cmdline + 'probtrackx --forcedir -m mask.nii --mode=seedmask --nsamples=3 --nsteps=10 --opd --os2t --dir=. --samples=merged --seed=MASK_average_thal_right.nii --targetmasks=targets.txt --xfm=trans.mat' + + """ + + _cmd = 'probtrackx' + input_spec = ProbTrackXInputSpec + output_spec = ProbTrackXOutputSpec + + def __init__(self, **inputs): + warnings.warn(("Deprecated: Please use create_bedpostx_pipeline " + "instead"), DeprecationWarning) + return super(ProbTrackX, self).__init__(**inputs) + + def _run_interface(self, runtime): + for i in range(1, len(self.inputs.thsamples) + 1): + _, _, ext = split_filename(self.inputs.thsamples[i - 1]) + copyfile( + self.inputs.thsamples[i - 1], + self.inputs.samples_base_name + "_th%dsamples" % i + ext, + copy=False) + _, _, ext = split_filename(self.inputs.thsamples[i - 1]) + copyfile( + self.inputs.phsamples[i - 1], + self.inputs.samples_base_name + "_ph%dsamples" % i + ext, + copy=False) + _, _, ext = split_filename(self.inputs.thsamples[i - 1]) + copyfile( + self.inputs.fsamples[i - 1], + self.inputs.samples_base_name + "_f%dsamples" % i + ext, + copy=False) + + if isdefined(self.inputs.target_masks): + f = open("targets.txt", "w") + for target in self.inputs.target_masks: + f.write("%s\n" % target) + f.close() + if isinstance(self.inputs.seed, list): + f = open("seeds.txt", "w") + for seed in self.inputs.seed: + if isinstance(seed, list): + f.write("%s\n" % (" ".join([str(s) for s in seed]))) + else: + f.write("%s\n" % seed) + f.close() + + runtime = super(ProbTrackX, self)._run_interface(runtime) + if runtime.stderr: + self.raise_exception(runtime) + return runtime + + def _format_arg(self, name, spec, value): + if name == 'target_masks' and isdefined(value): + fname = "targets.txt" + return super(ProbTrackX, self)._format_arg(name, spec, [fname]) + elif name == 'seed' and isinstance(value, list): + fname = "seeds.txt" + return super(ProbTrackX, self)._format_arg(name, spec, fname) + else: + return super(ProbTrackX, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + if not isdefined(self.inputs.out_dir): + out_dir = self._gen_filename("out_dir") + else: + out_dir = self.inputs.out_dir + + outputs['log'] = os.path.abspath( + os.path.join(out_dir, 'probtrackx.log')) + # utputs['way_total'] = os.path.abspath(os.path.join(out_dir, + # 'waytotal')) + if isdefined(self.inputs.opd is True): + if (isinstance(self.inputs.seed, list) + and isinstance(self.inputs.seed[0], list)): + outputs['fdt_paths'] = [] + for seed in self.inputs.seed: + outputs['fdt_paths'].append( + os.path.abspath( + self._gen_fname( + ("fdt_paths_%s" % + ("_".join([str(s) for s in seed]))), + cwd=out_dir, + suffix=''))) + else: + outputs['fdt_paths'] = os.path.abspath( + self._gen_fname("fdt_paths", cwd=out_dir, suffix='')) + + # handle seeds-to-target output files + if isdefined(self.inputs.target_masks): + outputs['targets'] = [] + for target in self.inputs.target_masks: + outputs['targets'].append( + os.path.abspath( + self._gen_fname( + 'seeds_to_' + os.path.split(target)[1], + cwd=out_dir, + suffix=''))) + if isdefined(self.inputs.verbose) and self.inputs.verbose == 2: + outputs['particle_files'] = [ + os.path.abspath(os.path.join(out_dir, 'particle%d' % i)) + for i in range(self.inputs.n_samples) + ] + return outputs + + def _gen_filename(self, name): + if name == "out_dir": + return os.getcwd() + elif name == "mode": + if (isinstance(self.inputs.seed, list) + and isinstance(self.inputs.seed[0], list)): + return "simple" + else: + return "seedmask" + + +class ProbTrackX2InputSpec(ProbTrackXBaseInputSpec): + simple = traits.Bool( + desc=('rack from a list of voxels (seed must be a ' + 'ASCII list of coordinates)'), + argstr='--simple') + fopd = File( + exists=True, + desc='Other mask for binning tract distribution', + argstr='--fopd=%s') + waycond = traits.Enum( + "OR", + "AND", + argstr='--waycond=%s', + desc=('Waypoint condition. Either "AND" (default) ' + 'or "OR"')) + wayorder = traits.Bool( + desc=('Reject streamlines that do not hit ' + 'waypoints in given order. Only valid if ' + 'waycond=AND'), + argstr='--wayorder') + onewaycondition = traits.Bool( + desc=('Apply waypoint conditions to each ' + 'half tract separately'), + argstr='--onewaycondition') + omatrix1 = traits.Bool( + desc='Output matrix1 - SeedToSeed Connectivity', argstr='--omatrix1') + distthresh1 = traits.Float( + argstr='--distthresh1=%.3f', + desc=('Discards samples (in matrix1) shorter ' + 'than this threshold (in mm - ' + 'default=0)')) + omatrix2 = traits.Bool( + desc='Output matrix2 - SeedToLowResMask', + argstr='--omatrix2', + requires=['target2']) + target2 = File( + exists=True, + desc=('Low resolution binary brain mask for storing ' + 'connectivity distribution in matrix2 mode'), + argstr='--target2=%s') + omatrix3 = traits.Bool( + desc='Output matrix3 (NxN connectivity matrix)', + argstr='--omatrix3', + requires=['target3', 'lrtarget3']) + target3 = File( + exists=True, + desc=('Mask used for NxN connectivity matrix (or Nxn if ' + 'lrtarget3 is set)'), + argstr='--target3=%s') + lrtarget3 = File( + exists=True, + desc='Column-space mask used for Nxn connectivity matrix', + argstr='--lrtarget3=%s') + distthresh3 = traits.Float( + argstr='--distthresh3=%.3f', + desc=('Discards samples (in matrix3) shorter ' + 'than this threshold (in mm - ' + 'default=0)')) + omatrix4 = traits.Bool( + desc=('Output matrix4 - DtiMaskToSeed (special ' + 'Oxford Sparse Format)'), + argstr='--omatrix4') + colmask4 = File( + exists=True, + desc='Mask for columns of matrix4 (default=seed mask)', + argstr='--colmask4=%s') + target4 = File( + exists=True, desc='Brain mask in DTI space', argstr='--target4=%s') + meshspace = traits.Enum( + "caret", + "freesurfer", + "first", + "vox", + argstr='--meshspace=%s', + desc=('Mesh reference space - either "caret" ' + '(default) or "freesurfer" or "first" or ' + '"vox"')) + + +class ProbTrackX2OutputSpec(ProbTrackXOutputSpec): + network_matrix = File( + exists=True, + desc=('the network matrix generated by --omatrix1 ' + 'option')) + matrix1_dot = File( + exists=True, desc='Output matrix1.dot - SeedToSeed Connectivity') + lookup_tractspace = File( + exists=True, + desc=('lookup_tractspace generated by ' + '--omatrix2 option')) + matrix2_dot = File( + exists=True, desc='Output matrix2.dot - SeedToLowResMask') + matrix3_dot = File( + exists=True, desc='Output matrix3 - NxN connectivity matrix') + + +class ProbTrackX2(ProbTrackX): + """ Use FSL probtrackx2 for tractography on bedpostx results + + Examples + -------- + + >>> from nipype.interfaces import fsl + >>> pbx2 = fsl.ProbTrackX2() + >>> pbx2.inputs.seed = 'seed_source.nii.gz' + >>> pbx2.inputs.thsamples = 'merged_th1samples.nii.gz' + >>> pbx2.inputs.fsamples = 'merged_f1samples.nii.gz' + >>> pbx2.inputs.phsamples = 'merged_ph1samples.nii.gz' + >>> pbx2.inputs.mask = 'nodif_brain_mask.nii.gz' + >>> pbx2.inputs.out_dir = '.' + >>> pbx2.inputs.n_samples = 3 + >>> pbx2.inputs.n_steps = 10 + >>> pbx2.cmdline + 'probtrackx2 --forcedir -m nodif_brain_mask.nii.gz --nsamples=3 --nsteps=10 --opd --dir=. --samples=merged --seed=seed_source.nii.gz' + """ + _cmd = 'probtrackx2' + input_spec = ProbTrackX2InputSpec + output_spec = ProbTrackX2OutputSpec + + def _list_outputs(self): + outputs = super(ProbTrackX2, self)._list_outputs() + + if not isdefined(self.inputs.out_dir): + out_dir = os.getcwd() + else: + out_dir = self.inputs.out_dir + + outputs['way_total'] = os.path.abspath( + os.path.join(out_dir, 'waytotal')) + + if isdefined(self.inputs.omatrix1): + outputs['network_matrix'] = os.path.abspath( + os.path.join(out_dir, 'matrix_seeds_to_all_targets')) + outputs['matrix1_dot'] = os.path.abspath( + os.path.join(out_dir, 'fdt_matrix1.dot')) + + if isdefined(self.inputs.omatrix2): + outputs['lookup_tractspace'] = os.path.abspath( + os.path.join(out_dir, 'lookup_tractspace_fdt_matrix2.nii.gz')) + outputs['matrix2_dot'] = os.path.abspath( + os.path.join(out_dir, 'fdt_matrix2.dot')) + + if isdefined(self.inputs.omatrix3): + outputs['matrix3_dot'] = os.path.abspath( + os.path.join(out_dir, 'fdt_matrix3.dot')) + return outputs + + +class VecRegInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + argstr='-i %s', + desc='filename for input vector or tensor field', + mandatory=True) + out_file = File( + argstr='-o %s', + desc=('filename for output registered vector or tensor ' + 'field'), + genfile=True, + hash_files=False) + ref_vol = File( + exists=True, + argstr='-r %s', + desc='filename for reference (target) volume', + mandatory=True) + affine_mat = File( + exists=True, + argstr='-t %s', + desc='filename for affine transformation matrix') + warp_field = File( + exists=True, + argstr='-w %s', + desc=('filename for 4D warp field for nonlinear ' + 'registration')) + rotation_mat = File( + exists=True, + argstr='--rotmat=%s', + desc=('filename for secondary affine matrix if set, ' + 'this will be used for the rotation of the ' + 'vector/tensor field')) + rotation_warp = File( + exists=True, + argstr='--rotwarp=%s', + desc=('filename for secondary warp field if set, ' + 'this will be used for the rotation of the ' + 'vector/tensor field')) + interpolation = traits.Enum( + "nearestneighbour", + "trilinear", + "sinc", + "spline", + argstr='--interp=%s', + desc=('interpolation method : ' + 'nearestneighbour, trilinear (default), ' + 'sinc or spline')) + mask = File(exists=True, argstr='-m %s', desc='brain mask in input space') + ref_mask = File( + exists=True, + argstr='--refmask=%s', + desc=('brain mask in output space (useful for speed up of ' + 'nonlinear reg)')) + + +class VecRegOutputSpec(TraitedSpec): + out_file = File( + exists=True, + desc=('path/name of filename for the registered vector or ' + 'tensor field')) + + +class VecReg(FSLCommand): + """Use FSL vecreg for registering vector data + For complete details, see the FDT Documentation + + + Example + ------- + + >>> from nipype.interfaces import fsl + >>> vreg = fsl.VecReg(in_file='diffusion.nii', \ + affine_mat='trans.mat', \ + ref_vol='mni.nii', \ + out_file='diffusion_vreg.nii') + >>> vreg.cmdline + 'vecreg -t trans.mat -i diffusion.nii -o diffusion_vreg.nii -r mni.nii' + + """ + + _cmd = 'vecreg' + input_spec = VecRegInputSpec + output_spec = VecRegOutputSpec + + def _run_interface(self, runtime): + if not isdefined(self.inputs.out_file): + pth, base_name = os.path.split(self.inputs.in_file) + self.inputs.out_file = self._gen_fname( + base_name, cwd=os.path.abspath(pth), suffix='_vreg') + return super(VecReg, self)._run_interface(runtime) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_file + if (not isdefined(outputs['out_file']) + and isdefined(self.inputs.in_file)): + pth, base_name = os.path.split(self.inputs.in_file) + outputs['out_file'] = self._gen_fname( + base_name, cwd=os.path.abspath(pth), suffix='_vreg') + outputs['out_file'] = os.path.abspath(outputs['out_file']) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()[name] + else: + return None + + +class ProjThreshInputSpec(FSLCommandInputSpec): + in_files = traits.List( + File(exists=True), + argstr='%s', + desc='a list of input volumes', + mandatory=True, + position=0) + threshold = traits.Int( + argstr='%d', + desc=('threshold indicating minimum number of seed ' + 'voxels entering this mask region'), + mandatory=True, + position=1) + + +class ProjThreshOuputSpec(TraitedSpec): + out_files = traits.List( + File(exists=True), + desc=('path/name of output volume after ' + 'thresholding')) + + +class ProjThresh(FSLCommand): + """Use FSL proj_thresh for thresholding some outputs of probtrack + For complete details, see the FDT Documentation + + + Example + ------- + + >>> from nipype.interfaces import fsl + >>> ldir = ['seeds_to_M1.nii', 'seeds_to_M2.nii'] + >>> pThresh = fsl.ProjThresh(in_files=ldir, threshold=3) + >>> pThresh.cmdline + 'proj_thresh seeds_to_M1.nii seeds_to_M2.nii 3' + + """ + + _cmd = 'proj_thresh' + input_spec = ProjThreshInputSpec + output_spec = ProjThreshOuputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_files'] = [] + for name in self.inputs.in_files: + cwd, base_name = os.path.split(name) + outputs['out_files'].append( + self._gen_fname( + base_name, + cwd=cwd, + suffix='_proj_seg_thr_{}'.format(self.inputs.threshold))) + return outputs + + +class FindTheBiggestInputSpec(FSLCommandInputSpec): + in_files = traits.List( + File(exists=True), + argstr='%s', + desc=('a list of input volumes or a ' + 'singleMatrixFile'), + position=0, + mandatory=True) + out_file = File( + argstr='%s', + desc='file with the resulting segmentation', + position=2, + genfile=True, + hash_files=False) + + +class FindTheBiggestOutputSpec(TraitedSpec): + out_file = File( + exists=True, + argstr='%s', + desc='output file indexed in order of input files') + + +class FindTheBiggest(FSLCommand): + """ + Use FSL find_the_biggest for performing hard segmentation on + the outputs of connectivity-based thresholding in probtrack. + For complete details, see the `FDT + Documentation. `_ + + Example + ------- + + >>> from nipype.interfaces import fsl + >>> ldir = ['seeds_to_M1.nii', 'seeds_to_M2.nii'] + >>> fBig = fsl.FindTheBiggest(in_files=ldir, out_file='biggestSegmentation') + >>> fBig.cmdline + 'find_the_biggest seeds_to_M1.nii seeds_to_M2.nii biggestSegmentation' + + """ + _cmd = 'find_the_biggest' + input_spec = FindTheBiggestInputSpec + output_spec = FindTheBiggestOutputSpec + + def _run_interface(self, runtime): + if not isdefined(self.inputs.out_file): + self.inputs.out_file = self._gen_fname( + 'biggestSegmentation', suffix='') + return super(FindTheBiggest, self)._run_interface(runtime) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_file + if not isdefined(outputs['out_file']): + outputs['out_file'] = self._gen_fname( + 'biggestSegmentation', suffix='') + outputs['out_file'] = os.path.abspath(outputs['out_file']) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()[name] + else: + return None + + +class TractSkeletonInputSpec(FSLCommandInputSpec): + + in_file = File( + exists=True, + mandatory=True, + argstr="-i %s", + desc="input image (typcially mean FA volume)") + _proj_inputs = ["threshold", "distance_map", "data_file"] + project_data = traits.Bool( + argstr="-p %.3f %s %s %s %s", + requires=_proj_inputs, + desc="project data onto skeleton") + threshold = traits.Float(desc="skeleton threshold value") + distance_map = File(exists=True, desc="distance map image") + search_mask_file = File( + exists=True, + xor=["use_cingulum_mask"], + desc="mask in which to use alternate search rule") + use_cingulum_mask = traits.Bool( + True, + usedefault=True, + xor=["search_mask_file"], + desc=("perform alternate search using " + "built-in cingulum mask")) + data_file = File( + exists=True, desc="4D data to project onto skeleton (usually FA)") + alt_data_file = File( + exists=True, + argstr="-a %s", + desc="4D non-FA data to project onto skeleton") + alt_skeleton = File( + exists=True, argstr="-s %s", desc="alternate skeleton to use") + projected_data = File(desc="input data projected onto skeleton") + skeleton_file = traits.Either( + traits.Bool, File, argstr="-o %s", desc="write out skeleton image") + + +class TractSkeletonOutputSpec(TraitedSpec): + + projected_data = File(desc="input data projected onto skeleton") + skeleton_file = File(desc="tract skeleton image") + + +class TractSkeleton(FSLCommand): + """Use FSL's tbss_skeleton to skeletonise an FA image or project arbitrary + values onto a skeleton. + + There are two ways to use this interface. To create a skeleton from an FA + image, just supply the ``in_file`` and set ``skeleton_file`` to True (or + specify a skeleton filename. To project values onto a skeleton, you must + set ``project_data`` to True, and then also supply values for + ``threshold``, ``distance_map``, and ``data_file``. The + ``search_mask_file`` and ``use_cingulum_mask`` inputs are also used in data + projection, but ``use_cingulum_mask`` is set to True by default. This mask + controls where the projection algorithm searches within a circular space + around a tract, rather than in a single perpindicular direction. + + Example + ------- + + >>> import nipype.interfaces.fsl as fsl + >>> skeletor = fsl.TractSkeleton() + >>> skeletor.inputs.in_file = "all_FA.nii.gz" + >>> skeletor.inputs.skeleton_file = True + >>> skeletor.run() # doctest: +SKIP + + """ + + _cmd = "tbss_skeleton" + input_spec = TractSkeletonInputSpec + output_spec = TractSkeletonOutputSpec + + def _format_arg(self, name, spec, value): + if name == "project_data": + if isdefined(value) and value: + _si = self.inputs + if isdefined(_si.use_cingulum_mask) and _si.use_cingulum_mask: + mask_file = Info.standard_image("LowerCingulum_1mm.nii.gz") + else: + mask_file = _si.search_mask_file + if not isdefined(_si.projected_data): + proj_file = self._list_outputs()["projected_data"] + else: + proj_file = _si.projected_data + return spec.argstr % (_si.threshold, _si.distance_map, + mask_file, _si.data_file, proj_file) + elif name == "skeleton_file": + if isinstance(value, bool): + return spec.argstr % self._list_outputs()["skeleton_file"] + else: + return spec.argstr % value + return super(TractSkeleton, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + _si = self.inputs + if isdefined(_si.project_data) and _si.project_data: + proj_data = _si.projected_data + outputs["projected_data"] = proj_data + if not isdefined(proj_data): + stem = _si.data_file + if isdefined(_si.alt_data_file): + stem = _si.alt_data_file + outputs["projected_data"] = fname_presuffix( + stem, + suffix="_skeletonised", + newpath=os.getcwd(), + use_ext=True) + if isdefined(_si.skeleton_file) and _si.skeleton_file: + outputs["skeleton_file"] = _si.skeleton_file + if isinstance(_si.skeleton_file, bool): + outputs["skeleton_file"] = fname_presuffix( + _si.in_file, + suffix="_skeleton", + newpath=os.getcwd(), + use_ext=True) + return outputs + + +class DistanceMapInputSpec(FSLCommandInputSpec): + + in_file = File( + exists=True, + mandatory=True, + argstr="--in=%s", + desc="image to calculate distance values for") + mask_file = File( + exists=True, + argstr="--mask=%s", + desc="binary mask to contrain calculations") + invert_input = traits.Bool(argstr="--invert", desc="invert input image") + local_max_file = traits.Either( + traits.Bool, + File, + argstr="--localmax=%s", + desc="write an image of the local maxima", + hash_files=False) + distance_map = File( + genfile=True, + argstr="--out=%s", + desc="distance map to write", + hash_files=False) + + +class DistanceMapOutputSpec(TraitedSpec): + + distance_map = File( + exists=True, desc="value is distance to nearest nonzero voxels") + local_max_file = File(desc="image of local maxima") + + +class DistanceMap(FSLCommand): + """Use FSL's distancemap to generate a map of the distance to the nearest + nonzero voxel. + + Example + ------- + + >>> import nipype.interfaces.fsl as fsl + >>> mapper = fsl.DistanceMap() + >>> mapper.inputs.in_file = "skeleton_mask.nii.gz" + >>> mapper.run() # doctest: +SKIP + + """ + + _cmd = "distancemap" + input_spec = DistanceMapInputSpec + output_spec = DistanceMapOutputSpec + + def _format_arg(self, name, spec, value): + if name == "local_max_file": + if isinstance(value, bool): + return spec.argstr % self._list_outputs()["local_max_file"] + return super(DistanceMap, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + _si = self.inputs + outputs["distance_map"] = _si.distance_map + if not isdefined(_si.distance_map): + outputs["distance_map"] = fname_presuffix( + _si.in_file, + suffix="_dstmap", + use_ext=True, + newpath=os.getcwd()) + outputs["distance_map"] = os.path.abspath(outputs["distance_map"]) + if isdefined(_si.local_max_file): + outputs["local_max_file"] = _si.local_max_file + if isinstance(_si.local_max_file, bool): + outputs["local_max_file"] = fname_presuffix( + _si.in_file, + suffix="_lclmax", + use_ext=True, + newpath=os.getcwd()) + outputs["local_max_file"] = os.path.abspath( + outputs["local_max_file"]) + return outputs + + def _gen_filename(self, name): + if name == "distance_map": + return self._list_outputs()["distance_map"] + return None + + +class MakeDyadicVectorsInputSpec(FSLCommandInputSpec): + theta_vol = File(exists=True, mandatory=True, position=0, argstr="%s") + phi_vol = File(exists=True, mandatory=True, position=1, argstr="%s") + mask = File(exists=True, position=2, argstr="%s") + output = File( + "dyads", position=3, usedefault=True, argstr="%s", hash_files=False) + perc = traits.Float( + desc=("the {perc}% angle of the output cone of " + "uncertainty (output will be in degrees)"), + position=4, + argstr="%f") + + +class MakeDyadicVectorsOutputSpec(TraitedSpec): + dyads = File(exists=True) + dispersion = File(exists=True) + + +class MakeDyadicVectors(FSLCommand): + """Create vector volume representing mean principal diffusion direction + and its uncertainty (dispersion)""" + + _cmd = "make_dyadic_vectors" + input_spec = MakeDyadicVectorsInputSpec + output_spec = MakeDyadicVectorsOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs["dyads"] = self._gen_fname(self.inputs.output) + outputs["dispersion"] = self._gen_fname( + self.inputs.output, suffix="_dispersion") + + return outputs diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py new file mode 100644 index 0000000000..5b36f2cb5e --- /dev/null +++ b/nipype/interfaces/fsl/epi.py @@ -0,0 +1,1224 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The fsl module provides classes for interfacing with the `FSL +`_ command line tools. This +was written to work with FSL version 5.0.4. +""" +from __future__ import print_function, division, unicode_literals, \ + absolute_import +from builtins import str + +import os +import numpy as np +import nibabel as nb +import warnings + +from ...utils.filemanip import split_filename +from ...utils import NUMPY_MMAP + +from ..base import (traits, TraitedSpec, InputMultiPath, File, isdefined) +from .base import FSLCommand, FSLCommandInputSpec, Info + + +class PrepareFieldmapInputSpec(FSLCommandInputSpec): + scanner = traits.String( + 'SIEMENS', + argstr='%s', + position=1, + desc='must be SIEMENS', + usedefault=True) + in_phase = File( + exists=True, + argstr='%s', + position=2, + mandatory=True, + desc=('Phase difference map, in SIEMENS format range from ' + '0-4096 or 0-8192)')) + in_magnitude = File( + exists=True, + argstr='%s', + position=3, + mandatory=True, + desc='Magnitude difference map, brain extracted') + delta_TE = traits.Float( + 2.46, + usedefault=True, + mandatory=True, + argstr='%f', + position=-2, + desc=('echo time difference of the ' + 'fieldmap sequence in ms. (usually 2.46ms in' + ' Siemens)')) + nocheck = traits.Bool( + False, + position=-1, + argstr='--nocheck', + usedefault=True, + desc=('do not perform sanity checks for image ' + 'size/range/dimensions')) + out_fieldmap = File( + argstr='%s', position=4, desc='output name for prepared fieldmap') + + +class PrepareFieldmapOutputSpec(TraitedSpec): + out_fieldmap = File(exists=True, desc='output name for prepared fieldmap') + + +class PrepareFieldmap(FSLCommand): + """ + Interface for the fsl_prepare_fieldmap script (FSL 5.0) + + Prepares a fieldmap suitable for FEAT from SIEMENS data - saves output in + rad/s format (e.g. ```fsl_prepare_fieldmap SIEMENS + images_3_gre_field_mapping images_4_gre_field_mapping fmap_rads 2.65```). + + + Examples + -------- + + >>> from nipype.interfaces.fsl import PrepareFieldmap + >>> prepare = PrepareFieldmap() + >>> prepare.inputs.in_phase = "phase.nii" + >>> prepare.inputs.in_magnitude = "magnitude.nii" + >>> prepare.inputs.output_type = "NIFTI_GZ" + >>> prepare.cmdline # doctest: +ELLIPSIS + 'fsl_prepare_fieldmap SIEMENS phase.nii magnitude.nii \ +.../phase_fslprepared.nii.gz 2.460000' + >>> res = prepare.run() # doctest: +SKIP + + + """ + _cmd = 'fsl_prepare_fieldmap' + input_spec = PrepareFieldmapInputSpec + output_spec = PrepareFieldmapOutputSpec + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + + if not isdefined(self.inputs.out_fieldmap): + self.inputs.out_fieldmap = self._gen_fname( + self.inputs.in_phase, suffix='_fslprepared') + + if not isdefined(self.inputs.nocheck) or not self.inputs.nocheck: + skip += ['nocheck'] + + return super(PrepareFieldmap, self)._parse_inputs(skip=skip) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_fieldmap'] = self.inputs.out_fieldmap + return outputs + + def _run_interface(self, runtime): + runtime = super(PrepareFieldmap, self)._run_interface(runtime) + + if runtime.returncode == 0: + out_file = self.inputs.out_fieldmap + im = nb.load(out_file, mmap=NUMPY_MMAP) + dumb_img = nb.Nifti1Image(np.zeros(im.shape), im.affine, im.header) + out_nii = nb.funcs.concat_images((im, dumb_img)) + nb.save(out_nii, out_file) + + return runtime + + +class TOPUPInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + mandatory=True, + desc='name of 4D file with images', + argstr='--imain=%s') + encoding_file = File( + exists=True, + mandatory=True, + xor=['encoding_direction'], + desc='name of text file with PE directions/times', + argstr='--datain=%s') + encoding_direction = traits.List( + traits.Enum('y', 'x', 'z', 'x-', 'y-', 'z-'), + mandatory=True, + xor=['encoding_file'], + requires=['readout_times'], + argstr='--datain=%s', + desc=('encoding direction for automatic ' + 'generation of encoding_file')) + readout_times = InputMultiPath( + traits.Float, + requires=['encoding_direction'], + xor=['encoding_file'], + mandatory=True, + desc=('readout times (dwell times by # ' + 'phase-encode steps minus 1)')) + out_base = File( + desc=('base-name of output files (spline ' + 'coefficients (Hz) and movement parameters)'), + name_source=['in_file'], + name_template='%s_base', + argstr='--out=%s', + hash_files=False) + out_field = File( + argstr='--fout=%s', + hash_files=False, + name_source=['in_file'], + name_template='%s_field', + desc='name of image file with field (Hz)') + out_warp_prefix = traits.Str( + "warpfield", + argstr='--dfout=%s', + hash_files=False, + desc='prefix for the warpfield images (in mm)', + usedefault=True) + out_mat_prefix = traits.Str( + "xfm", + argstr='--rbmout=%s', + hash_files=False, + desc='prefix for the realignment matrices', + usedefault=True) + out_jac_prefix = traits.Str( + "jac", + argstr='--jacout=%s', + hash_files=False, + desc='prefix for the warpfield images', + usedefault=True) + out_corrected = File( + argstr='--iout=%s', + hash_files=False, + name_source=['in_file'], + name_template='%s_corrected', + desc='name of 4D image file with unwarped images') + out_logfile = File( + argstr='--logout=%s', + desc='name of log-file', + name_source=['in_file'], + name_template='%s_topup.log', + keep_extension=True, + hash_files=False) + + # TODO: the following traits admit values separated by commas, one value + # per registration level inside topup. + warp_res = traits.Float( + 10.0, usedefault=False, + argstr='--warpres=%f', + desc=('(approximate) resolution (in mm) of warp ' + 'basis for the different sub-sampling levels' + '.')) + subsamp = traits.Int(1, usedefault=False, + argstr='--subsamp=%d', desc='sub-sampling scheme') + fwhm = traits.Float( + 8.0, + usedefault=False, + argstr='--fwhm=%f', + desc='FWHM (in mm) of gaussian smoothing kernel') + config = traits.String( + 'b02b0.cnf', + argstr='--config=%s', + usedefault=True, + desc=('Name of config file specifying command line ' + 'arguments')) + max_iter = traits.Int( + 5, usedefault=False, + argstr='--miter=%d', desc='max # of non-linear iterations') + reg_lambda = traits.Float( + 1.0, usedefault=False, + argstr='--miter=%0.f', + desc=('lambda weighting value of the ' + 'regularisation term')) + ssqlambda = traits.Enum( + 1, + 0, + argstr='--ssqlambda=%d', + desc=('Weight lambda by the current value of the ' + 'ssd. If used (=1), the effective weight of ' + 'regularisation term becomes higher for the ' + 'initial iterations, therefore initial steps' + ' are a little smoother than they would ' + 'without weighting. This reduces the ' + 'risk of finding a local minimum.')) + regmod = traits.Enum( + 'bending_energy', + 'membrane_energy', + argstr='--regmod=%s', + desc=('Regularisation term implementation. Defaults ' + 'to bending_energy. Note that the two functions' + ' have vastly different scales. The membrane ' + 'energy is based on the first derivatives and ' + 'the bending energy on the second derivatives. ' + 'The second derivatives will typically be much ' + 'smaller than the first derivatives, so input ' + 'lambda will have to be larger for ' + 'bending_energy to yield approximately the same' + ' level of regularisation.')) + estmov = traits.Enum( + 1, 0, argstr='--estmov=%d', desc='estimate movements if set') + minmet = traits.Enum( + 0, + 1, + argstr='--minmet=%d', + desc=('Minimisation method 0=Levenberg-Marquardt, ' + '1=Scaled Conjugate Gradient')) + splineorder = traits.Int( + 3, usedefault=False, + argstr='--splineorder=%d', + desc=('order of spline, 2->Qadratic spline, ' + '3->Cubic spline')) + numprec = traits.Enum( + 'double', + 'float', + argstr='--numprec=%s', + desc=('Precision for representing Hessian, double ' + 'or float.')) + interp = traits.Enum( + 'spline', + 'linear', + argstr='--interp=%s', + desc='Image interpolation model, linear or spline.') + scale = traits.Enum( + 0, + 1, + argstr='--scale=%d', + desc=('If set (=1), the images are individually scaled' + ' to a common mean')) + regrid = traits.Enum( + 1, + 0, + argstr='--regrid=%d', + desc=('If set (=1), the calculations are done in a ' + 'different grid')) + + +class TOPUPOutputSpec(TraitedSpec): + out_fieldcoef = File( + exists=True, desc='file containing the field coefficients') + out_movpar = File(exists=True, desc='movpar.txt output file') + out_enc_file = File(desc='encoding directions file output for applytopup') + out_field = File(desc='name of image file with field (Hz)') + out_warps = traits.List(File(exists=True), desc='warpfield images') + out_jacs = traits.List(File(exists=True), desc='Jacobian images') + out_mats = traits.List(File(exists=True), desc='realignment matrices') + out_corrected = File(desc='name of 4D image file with unwarped images') + out_logfile = File(desc='name of log-file') + + +class TOPUP(FSLCommand): + """ + Interface for FSL topup, a tool for estimating and correcting + susceptibility induced distortions. See FSL documentation for + `reference `_, + `usage examples + `_, + and `exemplary config files + `_. + + Examples + -------- + + >>> from nipype.interfaces.fsl import TOPUP + >>> topup = TOPUP() + >>> topup.inputs.in_file = "b0_b0rev.nii" + >>> topup.inputs.encoding_file = "topup_encoding.txt" + >>> topup.inputs.output_type = "NIFTI_GZ" + >>> topup.cmdline # doctest: +ELLIPSIS + 'topup --config=b02b0.cnf --datain=topup_encoding.txt \ +--fwhm=8.000000 --imain=b0_b0rev.nii --miter=5 \ +--out=b0_b0rev_base --iout=b0_b0rev_corrected.nii.gz \ +--fout=b0_b0rev_field.nii.gz --jacout=jac --logout=b0_b0rev_topup.log \ +--rbmout=xfm --dfout=warpfield --miter=1 --splineorder=3 --subsamp=1 \ +--warpres=10.000000' + >>> res = topup.run() # doctest: +SKIP + + """ + _cmd = 'topup' + input_spec = TOPUPInputSpec + output_spec = TOPUPOutputSpec + + def _format_arg(self, name, trait_spec, value): + if name == 'encoding_direction': + return trait_spec.argstr % self._generate_encfile() + if name == 'out_base': + path, name, ext = split_filename(value) + if path != '': + if not os.path.exists(path): + raise ValueError('out_base path must exist if provided') + return super(TOPUP, self)._format_arg(name, trait_spec, value) + + def _list_outputs(self): + outputs = super(TOPUP, self)._list_outputs() + del outputs['out_base'] + base_path = None + if isdefined(self.inputs.out_base): + base_path, base, _ = split_filename(self.inputs.out_base) + if base_path == '': + base_path = None + else: + base = split_filename(self.inputs.in_file)[1] + '_base' + outputs['out_fieldcoef'] = self._gen_fname( + base, suffix='_fieldcoef', cwd=base_path) + outputs['out_movpar'] = self._gen_fname( + base, suffix='_movpar', ext='.txt', cwd=base_path) + + n_vols = nb.load(self.inputs.in_file).shape[-1] + ext = Info.output_type_to_ext(self.inputs.output_type) + fmt = os.path.abspath('{prefix}_{i:02d}{ext}').format + outputs['out_warps'] = [ + fmt(prefix=self.inputs.out_warp_prefix, i=i, ext=ext) + for i in range(1, n_vols + 1) + ] + outputs['out_jacs'] = [ + fmt(prefix=self.inputs.out_jac_prefix, i=i, ext=ext) + for i in range(1, n_vols + 1) + ] + outputs['out_mats'] = [ + fmt(prefix=self.inputs.out_mat_prefix, i=i, ext=".mat") + for i in range(1, n_vols + 1) + ] + + if isdefined(self.inputs.encoding_direction): + outputs['out_enc_file'] = self._get_encfilename() + return outputs + + def _get_encfilename(self): + out_file = os.path.join( + os.getcwd(), + ('%s_encfile.txt' % split_filename(self.inputs.in_file)[1])) + return out_file + + def _generate_encfile(self): + """Generate a topup compatible encoding file based on given directions + """ + out_file = self._get_encfilename() + durations = self.inputs.readout_times + if len(self.inputs.encoding_direction) != len(durations): + if len(self.inputs.readout_times) != 1: + raise ValueError(('Readout time must be a float or match the' + 'length of encoding directions')) + durations = durations * len(self.inputs.encoding_direction) + + lines = [] + for idx, encdir in enumerate(self.inputs.encoding_direction): + direction = 1.0 + if encdir.endswith('-'): + direction = -1.0 + line = [ + float(val[0] == encdir[0]) * direction + for val in ['x', 'y', 'z'] + ] + [durations[idx]] + lines.append(line) + np.savetxt(out_file, np.array(lines), fmt=b'%d %d %d %.8f') + return out_file + + def _overload_extension(self, value, name=None): + if name == 'out_base': + return value + return super(TOPUP, self)._overload_extension(value, name) + + +class ApplyTOPUPInputSpec(FSLCommandInputSpec): + in_files = InputMultiPath( + File(exists=True), + mandatory=True, + desc='name of file with images', + argstr='--imain=%s', + sep=',') + encoding_file = File( + exists=True, + mandatory=True, + desc='name of text file with PE directions/times', + argstr='--datain=%s') + in_index = traits.List( + traits.Int, + argstr='--inindex=%s', + sep=',', + desc='comma separated list of indices corresponding to --datain') + in_topup_fieldcoef = File( + exists=True, + argstr="--topup=%s", + copyfile=False, + requires=['in_topup_movpar'], + desc=('topup file containing the field ' + 'coefficients')) + in_topup_movpar = File( + exists=True, + requires=['in_topup_fieldcoef'], + copyfile=False, + desc='topup movpar.txt file') + out_corrected = File( + desc='output (warped) image', + name_source=['in_files'], + name_template='%s_corrected', + argstr='--out=%s') + method = traits.Enum( + 'jac', + 'lsr', + argstr='--method=%s', + desc=('use jacobian modulation (jac) or least-squares' + ' resampling (lsr)')) + interp = traits.Enum( + 'trilinear', + 'spline', + argstr='--interp=%s', + desc='interpolation method') + datatype = traits.Enum( + 'char', + 'short', + 'int', + 'float', + 'double', + argstr='-d=%s', + desc='force output data type') + + +class ApplyTOPUPOutputSpec(TraitedSpec): + out_corrected = File( + exists=True, desc=('name of 4D image file with ' + 'unwarped images')) + + +class ApplyTOPUP(FSLCommand): + """ + Interface for FSL topup, a tool for estimating and correcting + susceptibility induced distortions. + `General reference + `_ + and `use example + `_. + + + Examples + -------- + + >>> from nipype.interfaces.fsl import ApplyTOPUP + >>> applytopup = ApplyTOPUP() + >>> applytopup.inputs.in_files = ["epi.nii", "epi_rev.nii"] + >>> applytopup.inputs.encoding_file = "topup_encoding.txt" + >>> applytopup.inputs.in_topup_fieldcoef = "topup_fieldcoef.nii.gz" + >>> applytopup.inputs.in_topup_movpar = "topup_movpar.txt" + >>> applytopup.inputs.output_type = "NIFTI_GZ" + >>> applytopup.cmdline # doctest: +ELLIPSIS + 'applytopup --datain=topup_encoding.txt --imain=epi.nii,epi_rev.nii \ +--inindex=1,2 --topup=topup --out=epi_corrected.nii.gz' + >>> res = applytopup.run() # doctest: +SKIP + + """ + _cmd = 'applytopup' + input_spec = ApplyTOPUPInputSpec + output_spec = ApplyTOPUPOutputSpec + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + + # If not defined, assume index are the first N entries in the + # parameters file, for N input images. + if not isdefined(self.inputs.in_index): + self.inputs.in_index = list( + range(1, + len(self.inputs.in_files) + 1)) + + return super(ApplyTOPUP, self)._parse_inputs(skip=skip) + + def _format_arg(self, name, spec, value): + if name == 'in_topup_fieldcoef': + return spec.argstr % value.split('_fieldcoef')[0] + return super(ApplyTOPUP, self)._format_arg(name, spec, value) + + +class EddyInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + mandatory=True, + argstr='--imain=%s', + desc=('File containing all the images to estimate ' + 'distortions for')) + in_mask = File( + exists=True, + mandatory=True, + argstr='--mask=%s', + desc='Mask to indicate brain') + in_index = File( + exists=True, + mandatory=True, + argstr='--index=%s', + desc=('File containing indices for all volumes in --imain ' + 'into --acqp and --topup')) + in_acqp = File( + exists=True, + mandatory=True, + argstr='--acqp=%s', + desc='File containing acquisition parameters') + in_bvec = File( + exists=True, + mandatory=True, + argstr='--bvecs=%s', + desc=('File containing the b-vectors for all volumes in ' + '--imain')) + in_bval = File( + exists=True, + mandatory=True, + argstr='--bvals=%s', + desc=('File containing the b-values for all volumes in ' + '--imain')) + out_base = traits.Str( + 'eddy_corrected', + argstr='--out=%s', + usedefault=True, + desc=('basename for output (warped) image')) + session = File( + exists=True, + argstr='--session=%s', + desc=('File containing session indices for all volumes in ' + '--imain')) + in_topup_fieldcoef = File( + exists=True, + argstr="--topup=%s", + requires=['in_topup_movpar'], + desc=('topup file containing the field ' + 'coefficients')) + in_topup_movpar = File( + exists=True, + requires=['in_topup_fieldcoef'], + desc='topup movpar.txt file') + + flm = traits.Enum( + 'linear', + 'quadratic', + 'cubic', + argstr='--flm=%s', + desc='First level EC model') + + slm = traits.Enum( + 'none', + 'linear', + 'quadratic', + argstr='--slm=%s', + desc='Second level EC model') + + fep = traits.Bool( + False, argstr='--fep', desc='Fill empty planes in x- or y-directions') + + interp = traits.Enum( + 'spline', + 'trilinear', + argstr='--interp=%s', + desc='Interpolation model for estimation step') + + nvoxhp = traits.Int( + 1000, usedefault=True, + argstr='--nvoxhp=%s', + desc=('# of voxels used to estimate the ' + 'hyperparameters')) + + fudge_factor = traits.Float( + 10.0, usedefault=True, + argstr='--ff=%s', + desc=('Fudge factor for hyperparameter ' + 'error variance')) + + dont_sep_offs_move = traits.Bool( + False, + argstr='--dont_sep_offs_move', + desc=('Do NOT attempt to separate ' + 'field offset from subject ' + 'movement')) + + dont_peas = traits.Bool( + False, + argstr='--dont_peas', + desc="Do NOT perform a post-eddy alignment of " + "shells") + + fwhm = traits.Float( + desc=('FWHM for conditioning filter when estimating ' + 'the parameters'), + argstr='--fwhm=%s') + + niter = traits.Int(5, usedefault=True, + argstr='--niter=%s', desc='Number of iterations') + + method = traits.Enum( + 'jac', + 'lsr', + argstr='--resamp=%s', + desc=('Final resampling method (jacobian/least ' + 'squares)')) + repol = traits.Bool( + False, argstr='--repol', desc='Detect and replace outlier slices') + num_threads = traits.Int( + 1, + usedefault=True, + nohash=True, + desc="Number of openmp threads to use") + is_shelled = traits.Bool( + False, + argstr='--data_is_shelled', + desc="Override internal check to ensure that " + "date are acquired on a set of b-value " + "shells") + field = traits.Str( + argstr='--field=%s', + desc="NonTOPUP fieldmap scaled in Hz - filename has " + "to be provided without an extension. TOPUP is " + "strongly recommended") + field_mat = File( + exists=True, + argstr='--field_mat=%s', + desc="Matrix that specifies the relative locations of " + "the field specified by --field and first volume " + "in file --imain") + use_cuda = traits.Bool(False, desc="Run eddy using cuda gpu") + + +class EddyOutputSpec(TraitedSpec): + out_corrected = File( + exists=True, desc='4D image file containing all the corrected volumes') + out_parameter = File( + exists=True, + desc=('text file with parameters definining the field and' + 'movement for each scan')) + out_rotated_bvecs = File( + exists=True, desc='File containing rotated b-values for all volumes') + out_movement_rms = File( + exists=True, desc='Summary of the "total movement" in each volume') + out_restricted_movement_rms = File( + exists=True, + desc=('Summary of the "total movement" in each volume ' + 'disregarding translation in the PE direction')) + out_shell_alignment_parameters = File( + exists=True, + desc=('File containing rigid body movement parameters ' + 'between the different shells as estimated by a ' + 'post-hoc mutual information based registration')) + out_outlier_report = File( + exists=True, + desc=('Text-file with a plain language report on what ' + 'outlier slices eddy has found')) + + +class Eddy(FSLCommand): + """ + Interface for FSL eddy, a tool for estimating and correcting eddy + currents induced distortions. `User guide + `_ and + `more info regarding acqp file + `_. + + Examples + -------- + + >>> from nipype.interfaces.fsl import Eddy + >>> eddy = Eddy() + >>> eddy.inputs.in_file = 'epi.nii' + >>> eddy.inputs.in_mask = 'epi_mask.nii' + >>> eddy.inputs.in_index = 'epi_index.txt' + >>> eddy.inputs.in_acqp = 'epi_acqp.txt' + >>> eddy.inputs.in_bvec = 'bvecs.scheme' + >>> eddy.inputs.in_bval = 'bvals.scheme' + >>> eddy.inputs.use_cuda = True + >>> eddy.cmdline # doctest: +ELLIPSIS + 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \ +--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \ +--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected' + >>> eddy.inputs.use_cuda = False + >>> eddy.cmdline # doctest: +ELLIPSIS + 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \ +--bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \ +--mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected' + >>> res = eddy.run() # doctest: +SKIP + + """ + _cmd = 'eddy_openmp' + input_spec = EddyInputSpec + output_spec = EddyOutputSpec + + _num_threads = 1 + + def __init__(self, **inputs): + super(Eddy, self).__init__(**inputs) + self.inputs.on_trait_change(self._num_threads_update, 'num_threads') + if not isdefined(self.inputs.num_threads): + self.inputs.num_threads = self._num_threads + else: + self._num_threads_update() + self.inputs.on_trait_change(self._use_cuda, 'use_cuda') + if isdefined(self.inputs.use_cuda): + self._use_cuda() + + def _num_threads_update(self): + self._num_threads = self.inputs.num_threads + if not isdefined(self.inputs.num_threads): + if 'OMP_NUM_THREADS' in self.inputs.environ: + del self.inputs.environ['OMP_NUM_THREADS'] + else: + self.inputs.environ['OMP_NUM_THREADS'] = str( + self.inputs.num_threads) + + def _use_cuda(self): + self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp' + + def _run_interface(self, runtime): + # If 'eddy_openmp' is missing, use 'eddy' + FSLDIR = os.getenv('FSLDIR', '') + cmd = self._cmd + if all((FSLDIR != '', cmd == 'eddy_openmp', + not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))): + self._cmd = 'eddy' + runtime = super(Eddy, self)._run_interface(runtime) + + # Restore command to avoid side-effects + self._cmd = cmd + return runtime + + def _format_arg(self, name, spec, value): + if name == 'in_topup_fieldcoef': + return spec.argstr % value.split('_fieldcoef')[0] + if name == 'out_base': + return spec.argstr % os.path.abspath(value) + return super(Eddy, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_corrected'] = os.path.abspath( + '%s.nii.gz' % self.inputs.out_base) + outputs['out_parameter'] = os.path.abspath( + '%s.eddy_parameters' % self.inputs.out_base) + + # File generation might depend on the version of EDDY + out_rotated_bvecs = os.path.abspath( + '%s.eddy_rotated_bvecs' % self.inputs.out_base) + out_movement_rms = os.path.abspath( + '%s.eddy_movement_rms' % self.inputs.out_base) + out_restricted_movement_rms = os.path.abspath( + '%s.eddy_restricted_movement_rms' % self.inputs.out_base) + out_shell_alignment_parameters = os.path.abspath( + '%s.eddy_post_eddy_shell_alignment_parameters' % + self.inputs.out_base) + out_outlier_report = os.path.abspath( + '%s.eddy_outlier_report' % self.inputs.out_base) + + if os.path.exists(out_rotated_bvecs): + outputs['out_rotated_bvecs'] = out_rotated_bvecs + if os.path.exists(out_movement_rms): + outputs['out_movement_rms'] = out_movement_rms + if os.path.exists(out_restricted_movement_rms): + outputs['out_restricted_movement_rms'] = \ + out_restricted_movement_rms + if os.path.exists(out_shell_alignment_parameters): + outputs['out_shell_alignment_parameters'] = \ + out_shell_alignment_parameters + if os.path.exists(out_outlier_report): + outputs['out_outlier_report'] = out_outlier_report + + return outputs + + +class SigLossInputSpec(FSLCommandInputSpec): + in_file = File( + mandatory=True, exists=True, argstr='-i %s', desc='b0 fieldmap file') + out_file = File( + argstr='-s %s', desc='output signal loss estimate file', genfile=True) + + mask_file = File(exists=True, argstr='-m %s', desc='brain mask file') + echo_time = traits.Float(argstr='--te=%f', desc='echo time in seconds') + slice_direction = traits.Enum( + 'x', 'y', 'z', argstr='-d %s', desc='slicing direction') + + +class SigLossOuputSpec(TraitedSpec): + out_file = File(exists=True, desc='signal loss estimate file') + + +class SigLoss(FSLCommand): + """ + Estimates signal loss from a field map (in rad/s) + + Examples + -------- + + >>> from nipype.interfaces.fsl import SigLoss + >>> sigloss = SigLoss() + >>> sigloss.inputs.in_file = "phase.nii" + >>> sigloss.inputs.echo_time = 0.03 + >>> sigloss.inputs.output_type = "NIFTI_GZ" + >>> sigloss.cmdline # doctest: +ELLIPSIS + 'sigloss --te=0.030000 -i phase.nii -s .../phase_sigloss.nii.gz' + >>> res = sigloss.run() # doctest: +SKIP + + + """ + input_spec = SigLossInputSpec + output_spec = SigLossOuputSpec + _cmd = 'sigloss' + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_file + if ((not isdefined(outputs['out_file'])) + and (isdefined(self.inputs.in_file))): + outputs['out_file'] = self._gen_fname( + self.inputs.in_file, suffix='_sigloss') + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()['out_file'] + return None + + +class EpiRegInputSpec(FSLCommandInputSpec): + epi = File( + exists=True, + argstr='--epi=%s', + mandatory=True, + position=-4, + desc='EPI image') + t1_head = File( + exists=True, + argstr='--t1=%s', + mandatory=True, + position=-3, + desc='wholehead T1 image') + t1_brain = File( + exists=True, + argstr='--t1brain=%s', + mandatory=True, + position=-2, + desc='brain extracted T1 image') + out_base = traits.String( + "epi2struct", + desc='output base name', + argstr='--out=%s', + position=-1, + usedefault=True) + fmap = File( + exists=True, argstr='--fmap=%s', desc='fieldmap image (in rad/s)') + fmapmag = File( + exists=True, + argstr='--fmapmag=%s', + desc='fieldmap magnitude image - wholehead') + fmapmagbrain = File( + exists=True, + argstr='--fmapmagbrain=%s', + desc='fieldmap magnitude image - brain extracted') + wmseg = File( + exists=True, + argstr='--wmseg=%s', + desc='white matter segmentation of T1 image, has to be named \ + like the t1brain and end on _wmseg') + echospacing = traits.Float( + argstr='--echospacing=%f', + desc='Effective EPI echo spacing \ + (sometimes called dwell time) - in seconds') + pedir = traits.Enum( + 'x', + 'y', + 'z', + '-x', + '-y', + '-z', + argstr='--pedir=%s', + desc='phase encoding direction, dir = x/y/z/-x/-y/-z') + + weight_image = File( + exists=True, + argstr='--weight=%s', + desc='weighting image (in T1 space)') + no_fmapreg = traits.Bool( + False, + argstr='--nofmapreg', + desc='do not perform registration of fmap to T1 \ + (use if fmap already registered)') + no_clean = traits.Bool( + True, + argstr='--noclean', + usedefault=True, + desc='do not clean up intermediate files') + + +class EpiRegOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='unwarped and coregistered epi input') + out_1vol = File( + exists=True, desc='unwarped and coregistered single volume') + fmap2str_mat = File( + exists=True, desc='rigid fieldmap-to-structural transform') + fmap2epi_mat = File(exists=True, desc='rigid fieldmap-to-epi transform') + fmap_epi = File(exists=True, desc='fieldmap in epi space') + fmap_str = File(exists=True, desc='fieldmap in structural space') + fmapmag_str = File( + exists=True, desc='fieldmap magnitude image in structural space') + epi2str_inv = File(exists=True, desc='rigid structural-to-epi transform') + epi2str_mat = File(exists=True, desc='rigid epi-to-structural transform') + shiftmap = File(exists=True, desc='shiftmap in epi space') + fullwarp = File( + exists=True, + desc='warpfield to unwarp epi and transform into \ + structural space') + wmseg = File( + exists=True, desc='white matter segmentation used in flirt bbr') + seg = File( + exists=True, desc='wm/gm/csf segmentation used in flirt bbr') + wmedge = File(exists=True, desc='white matter edges for visualization') + + +class EpiReg(FSLCommand): + """ + + Runs FSL epi_reg script for simultaneous coregistration and fieldmap + unwarping. + + Examples + -------- + + >>> from nipype.interfaces.fsl import EpiReg + >>> epireg = EpiReg() + >>> epireg.inputs.epi='epi.nii' + >>> epireg.inputs.t1_head='T1.nii' + >>> epireg.inputs.t1_brain='T1_brain.nii' + >>> epireg.inputs.out_base='epi2struct' + >>> epireg.inputs.fmap='fieldmap_phase_fslprepared.nii' + >>> epireg.inputs.fmapmag='fieldmap_mag.nii' + >>> epireg.inputs.fmapmagbrain='fieldmap_mag_brain.nii' + >>> epireg.inputs.echospacing=0.00067 + >>> epireg.inputs.pedir='y' + >>> epireg.cmdline # doctest: +ELLIPSIS + 'epi_reg --echospacing=0.000670 --fmap=fieldmap_phase_fslprepared.nii \ +--fmapmag=fieldmap_mag.nii --fmapmagbrain=fieldmap_mag_brain.nii --noclean \ +--pedir=y --epi=epi.nii --t1=T1.nii --t1brain=T1_brain.nii --out=epi2struct' + >>> epireg.run() # doctest: +SKIP + + """ + _cmd = 'epi_reg' + input_spec = EpiRegInputSpec + output_spec = EpiRegOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.join(os.getcwd(), + self.inputs.out_base + '.nii.gz') + if (not (isdefined(self.inputs.no_fmapreg) and self.inputs.no_fmapreg) + and isdefined(self.inputs.fmap)): + outputs['out_1vol'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_1vol.nii.gz') + outputs['fmap2str_mat'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_fieldmap2str.mat') + outputs['fmap2epi_mat'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_fieldmaprads2epi.mat') + outputs['fmap_epi'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_fieldmaprads2epi.nii.gz') + outputs['fmap_str'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_fieldmaprads2str.nii.gz') + outputs['fmapmag_str'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_fieldmap2str.nii.gz') + outputs['shiftmap'] = os.path.join( + os.getcwd(), + self.inputs.out_base + '_fieldmaprads2epi_shift.nii.gz') + outputs['fullwarp'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_warp.nii.gz') + outputs['epi2str_inv'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_inv.mat') + + outputs['epi2str_mat'] = os.path.join(os.getcwd(), + self.inputs.out_base + '.mat') + outputs['wmedge'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_fast_wmedge.nii.gz') + outputs['wmseg'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_fast_wmseg.nii.gz') + outputs['seg'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_fast_seg.nii.gz') + + return outputs + + +####################################### +# deprecated interfaces +####################################### + + +class EPIDeWarpInputSpec(FSLCommandInputSpec): + mag_file = File( + exists=True, + desc='Magnitude file', + argstr='--mag %s', + position=0, + mandatory=True) + dph_file = File( + exists=True, + desc='Phase file assumed to be scaled from 0 to 4095', + argstr='--dph %s', + mandatory=True) + exf_file = File( + exists=True, + desc='example func volume (or use epi)', + argstr='--exf %s') + epi_file = File( + exists=True, desc='EPI volume to unwarp', argstr='--epi %s') + tediff = traits.Float( + 2.46, + usedefault=True, + desc='difference in B0 field map TEs', + argstr='--tediff %s') + esp = traits.Float( + 0.58, desc='EPI echo spacing', argstr='--esp %s', usedefault=True) + sigma = traits.Int( + 2, + usedefault=True, + argstr='--sigma %s', + desc="2D spatial gaussing smoothing \ + stdev (default = 2mm)") + vsm = traits.String( + genfile=True, desc='voxel shift map', argstr='--vsm %s') + exfdw = traits.String( + desc='dewarped example func volume', genfile=True, argstr='--exfdw %s') + epidw = traits.String( + desc='dewarped epi volume', genfile=False, argstr='--epidw %s') + tmpdir = traits.String(genfile=True, desc='tmpdir', argstr='--tmpdir %s') + nocleanup = traits.Bool( + True, usedefault=True, desc='no cleanup', argstr='--nocleanup') + cleanup = traits.Bool(desc='cleanup', argstr='--cleanup') + + +class EPIDeWarpOutputSpec(TraitedSpec): + unwarped_file = File(desc="unwarped epi file") + vsm_file = File(desc="voxel shift map") + exfdw = File(desc="dewarped functional volume example") + exf_mask = File(desc="Mask from example functional volume") + + +class EPIDeWarp(FSLCommand): + """ + Wraps the unwarping script `epidewarp.fsl + `_. + + .. warning:: deprecated in FSL, please use + :func:`nipype.workflows.dmri.preprocess.epi.sdc_fmb` instead. + + Examples + -------- + + >>> from nipype.interfaces.fsl import EPIDeWarp + >>> dewarp = EPIDeWarp() + >>> dewarp.inputs.epi_file = "functional.nii" + >>> dewarp.inputs.mag_file = "magnitude.nii" + >>> dewarp.inputs.dph_file = "phase.nii" + >>> dewarp.inputs.output_type = "NIFTI_GZ" + >>> dewarp.cmdline # doctest: +ELLIPSIS + 'epidewarp.fsl --mag magnitude.nii --dph phase.nii --epi functional.nii \ +--esp 0.58 --exfdw .../exfdw.nii.gz --nocleanup --sigma 2 --tediff 2.46 \ +--tmpdir .../temp --vsm .../vsm.nii.gz' + >>> res = dewarp.run() # doctest: +SKIP + + + """ + _cmd = 'epidewarp.fsl' + input_spec = EPIDeWarpInputSpec + output_spec = EPIDeWarpOutputSpec + + def __init__(self, **inputs): + warnings.warn(("Deprecated: Please use " + "nipype.workflows.dmri.preprocess.epi.sdc_fmb instead"), + DeprecationWarning) + return super(EPIDeWarp, self).__init__(**inputs) + + def _run_interface(self, runtime): + runtime = super(EPIDeWarp, self)._run_interface(runtime) + if runtime.stderr: + self.raise_exception(runtime) + return runtime + + def _gen_filename(self, name): + if name == 'exfdw': + if isdefined(self.inputs.exf_file): + return self._gen_fname(self.inputs.exf_file, suffix="_exfdw") + else: + return self._gen_fname("exfdw") + if name == 'epidw': + if isdefined(self.inputs.epi_file): + return self._gen_fname(self.inputs.epi_file, suffix="_epidw") + if name == 'vsm': + return self._gen_fname('vsm') + if name == 'tmpdir': + return os.path.join(os.getcwd(), 'temp') + return None + + def _list_outputs(self): + outputs = self.output_spec().get() + if not isdefined(self.inputs.exfdw): + outputs['exfdw'] = self._gen_filename('exfdw') + else: + outputs['exfdw'] = self.inputs.exfdw + if isdefined(self.inputs.epi_file): + if isdefined(self.inputs.epidw): + outputs['unwarped_file'] = self.inputs.epidw + else: + outputs['unwarped_file'] = self._gen_filename('epidw') + if not isdefined(self.inputs.vsm): + outputs['vsm_file'] = self._gen_filename('vsm') + else: + outputs['vsm_file'] = self._gen_fname(self.inputs.vsm) + if not isdefined(self.inputs.tmpdir): + outputs['exf_mask'] = self._gen_fname( + cwd=self._gen_filename('tmpdir'), basename='maskexf') + else: + outputs['exf_mask'] = self._gen_fname( + cwd=self.inputs.tmpdir, basename='maskexf') + return outputs + + +class EddyCorrectInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + desc='4D input file', + argstr='%s', + position=0, + mandatory=True) + out_file = File( + desc='4D output file', + argstr='%s', + position=1, + name_source=['in_file'], + name_template='%s_edc', + output_name='eddy_corrected') + ref_num = traits.Int( + 0, + argstr='%d', + position=2, + desc='reference number', + mandatory=True, + usedefault=True) + + +class EddyCorrectOutputSpec(TraitedSpec): + eddy_corrected = File( + exists=True, desc='path/name of 4D eddy corrected output file') + + +class EddyCorrect(FSLCommand): + """ + + .. warning:: Deprecated in FSL. Please use + :class:`nipype.interfaces.fsl.epi.Eddy` instead + + Example + ------- + + >>> from nipype.interfaces.fsl import EddyCorrect + >>> eddyc = EddyCorrect(in_file='diffusion.nii', + ... out_file="diffusion_edc.nii", ref_num=0) + >>> eddyc.cmdline + 'eddy_correct diffusion.nii diffusion_edc.nii 0' + + """ + _cmd = 'eddy_correct' + input_spec = EddyCorrectInputSpec + output_spec = EddyCorrectOutputSpec + + def __init__(self, **inputs): + warnings.warn(("Deprecated: Please use nipype.interfaces.fsl.epi.Eddy " + "instead"), DeprecationWarning) + return super(EddyCorrect, self).__init__(**inputs) + + def _run_interface(self, runtime): + runtime = super(EddyCorrect, self)._run_interface(runtime) + if runtime.stderr: + self.raise_exception(runtime) + return runtime diff --git a/nipype/interfaces/fsl/fix.py b/nipype/interfaces/fsl/fix.py new file mode 100644 index 0000000000..ebe986eb79 --- /dev/null +++ b/nipype/interfaces/fsl/fix.py @@ -0,0 +1,370 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The fix module provides classes for interfacing with the `FSL FIX +` command line tools. + +This was written to work with FSL version v5.0 + +The following example assumes that melodic has already been run, so +the datagrabber is configured to start from there (a list of melodic +output directories). If no hand_labels_noise.txt exists already, this +will fail and comment on that. + +EXAMPLE: +subject_list = ['1', '2', '3'] + +fix_pipeline = pe.Workflow(name='fix_pipeline') +fix_pipeline.base_dir = os.path.abspath('./') + +info = dict(mel_ica=[['subject_id']]) + +datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], outfields=['mel_ica']), name='datasource') +datasource.inputs.base_directory = os.path.abspath('') +datasource.inputs.template = '%s/' +datasource.inputs.template_args = info +datasource.inputs.subject_id = subject_list +datasource.inputs.sort_filelist = True +datasource.iterables = ('subject_id', subject_list) + +# create training set by looking into which mel_icas have hand_labels_noise.txt files in them +create_training_set = pe.JoinNode(interface=fix.TrainingSetCreator(), joinfield=['mel_icas_in'], joinsource='datasource', name='trainingset_creator') + +# train the classifier +train_node = pe.Node(interface=fix.Training(trained_wts_filestem='foo'), name='train_node') + +# test accuracy. Probably not necessary, and also failing on my setup because of fix itself (no error msg) +accuracy_tester = pe.Node(interface=fix.AccuracyTester(output_directory='more_foo'), name='accuracy_tester') + +# classify components +classify_node = pe.Node(interface=fix.Classifier(), name='classify') + +# apply cleanup +cleaner_node = pe.Node(interface=fix.Cleaner(), name='cleaner') + +fix_pipeline.connect(datasource, 'mel_ica', create_training_set, 'mel_icas_in') +fix_pipeline.connect(create_training_set, 'mel_icas_out', train_node, 'mel_icas') +fix_pipeline.connect(train_node, 'trained_wts_file', accuracy_tester, 'trained_wts_file') +fix_pipeline.connect(datasource, 'mel_ica', accuracy_tester, 'mel_icas') +fix_pipeline.connect(train_node, 'trained_wts_file', classify_node, 'trained_wts_file') +fix_pipeline.connect(datasource, 'mel_ica', classify_node, 'mel_ica') +fix_pipeline.connect(classify_node, 'artifacts_list_file', cleaner_node, 'artifacts_list_file') + +fix_pipeline.write_graph() +outgraph = fix_pipeline.run() + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine, + InputMultiPath, OutputMultiPath, BaseInterface, + BaseInterfaceInputSpec, traits, Directory, File, isdefined) +import os + + +class TrainingSetCreatorInputSpec(BaseInterfaceInputSpec): + mel_icas_in = InputMultiPath( + Directory(exists=True), + copyfile=False, + desc='Melodic output directories', + argstr='%s', + position=-1) + + +class TrainingSetCreatorOutputSpec(TraitedSpec): + mel_icas_out = OutputMultiPath( + Directory(exists=True), + copyfile=False, + desc='Hand labels for noise vs signal', + argstr='%s', + position=-1) + + +class TrainingSetCreator(BaseInterface): + '''Goes through set of provided melodic output directories, to find all + the ones that have a hand_labels_noise.txt file in them. + + This is outsourced as a separate class, so that the pipeline is + rerun everytime a handlabeled file has been changed, or a new one + created. + + ''' + input_spec = TrainingSetCreatorInputSpec + output_spec = TrainingSetCreatorOutputSpec + _always_run = True + + def _run_interface(self, runtime): + mel_icas = [] + for item in self.inputs.mel_icas_in: + if os.path.exists(os.path.join(item, 'hand_labels_noise.txt')): + mel_icas.append(item) + + if len(mel_icas) == 0: + raise Exception( + '%s did not find any hand_labels_noise.txt files in the following directories: %s' + % (self.__class__.__name__, mel_icas)) + + return runtime + + def _list_outputs(self): + mel_icas = [] + for item in self.inputs.mel_icas_in: + if os.path.exists(os.path.join(item, 'hand_labels_noise.txt')): + mel_icas.append(item) + outputs = self._outputs().get() + outputs['mel_icas_out'] = mel_icas + return outputs + + +class FeatureExtractorInputSpec(CommandLineInputSpec): + mel_ica = Directory( + exists=True, + copyfile=False, + desc='Melodic output directory or directories', + argstr='%s', + position=-1) + + +class FeatureExtractorOutputSpec(TraitedSpec): + mel_ica = Directory( + exists=True, + copyfile=False, + desc='Melodic output directory or directories', + argstr='%s', + position=-1) + + +class FeatureExtractor(CommandLine): + ''' + Extract features (for later training and/or classifying) + ''' + input_spec = FeatureExtractorInputSpec + output_spec = FeatureExtractorOutputSpec + cmd = 'fix -f' + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['mel_ica'] = self.inputs.mel_ica + return outputs + + +class TrainingInputSpec(CommandLineInputSpec): + mel_icas = InputMultiPath( + Directory(exists=True), + copyfile=False, + desc='Melodic output directories', + argstr='%s', + position=-1) + + trained_wts_filestem = traits.Str( + desc= + 'trained-weights filestem, used for trained_wts_file and output directories', + argstr='%s', + position=1) + + loo = traits.Bool( + argstr='-l', + desc='full leave-one-out test with classifier training', + position=2) + + +class TrainingOutputSpec(TraitedSpec): + trained_wts_file = File(exists=True, desc='Trained-weights file') + + +class Training(CommandLine): + ''' + Train the classifier based on your own FEAT/MELODIC output directory. + ''' + input_spec = TrainingInputSpec + output_spec = TrainingOutputSpec + cmd = 'fix -t' + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.trained_wts_filestem): + outputs['trained_wts_file'] = os.path.abspath( + self.inputs.trained_wts_filestem + '.RData') + else: + outputs['trained_wts_file'] = os.path.abspath( + 'trained_wts_file.RData') + return outputs + + +class AccuracyTesterInputSpec(CommandLineInputSpec): + mel_icas = InputMultiPath( + Directory(exists=True), + copyfile=False, + desc='Melodic output directories', + argstr='%s', + position=3, + mandatory=True) + + trained_wts_file = File( + desc='trained-weights file', argstr='%s', position=1, mandatory=True) + + output_directory = Directory( + desc= + 'Path to folder in which to store the results of the accuracy test.', + argstr='%s', + position=2, + mandatory=True) + + +class AccuracyTesterOutputSpec(TraitedSpec): + output_directory = Directory( + desc= + 'Path to folder in which to store the results of the accuracy test.', + argstr='%s', + position=1) + + +class AccuracyTester(CommandLine): + ''' + Test the accuracy of an existing training dataset on a set of hand-labelled subjects. + Note: This may or may not be working. Couldn't presently not confirm because fix fails on this (even outside of nipype) without leaving an error msg. + ''' + input_spec = AccuracyTesterInputSpec + output_spec = AccuracyTesterOutputSpec + cmd = 'fix -C' + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.output_directory): + outputs['output_directory'] = Directory( + exists=False, value=self.inputs.output_directory) + else: + outputs['output_directory'] = Directory( + exists=False, value='accuracy_test') + return outputs + + +class ClassifierInputSpec(CommandLineInputSpec): + mel_ica = Directory( + exists=True, + copyfile=False, + desc='Melodic output directory or directories', + argstr='%s', + position=1) + + trained_wts_file = File( + exists=True, + desc='trained-weights file', + argstr='%s', + position=2, + mandatory=True, + copyfile=False) + + thresh = traits.Int( + argstr='%d', + desc='Threshold for cleanup.', + position=-1, + mandatory=True) + + artifacts_list_file = File( + desc= + 'Text file listing which ICs are artifacts; can be the output from classification or can be created manually' + ) + + +class ClassifierOutputSpec(TraitedSpec): + artifacts_list_file = File( + desc= + 'Text file listing which ICs are artifacts; can be the output from classification or can be created manually' + ) + + +class Classifier(CommandLine): + ''' + Classify ICA components using a specific training dataset ( is in the range 0-100, typically 5-20). + ''' + input_spec = ClassifierInputSpec + output_spec = ClassifierOutputSpec + cmd = 'fix -c' + + def _gen_artifacts_list_file(self, mel_ica, thresh): + + _, trained_wts_file = os.path.split(self.inputs.trained_wts_file) + trained_wts_filestem = trained_wts_file.split('.')[0] + filestem = 'fix4melview_' + trained_wts_filestem + '_thr' + + fname = os.path.join(mel_ica, filestem + str(thresh) + '.txt') + return fname + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['artifacts_list_file'] = self._gen_artifacts_list_file( + self.inputs.mel_ica, self.inputs.thresh) + + return outputs + + +class CleanerInputSpec(CommandLineInputSpec): + artifacts_list_file = File( + exists=True, + argstr='%s', + position=1, + mandatory=True, + desc= + 'Text file listing which ICs are artifacts; can be the output from classification or can be created manually' + ) + + cleanup_motion = traits.Bool( + argstr='-m', + desc= + 'cleanup motion confounds, looks for design.fsf for highpass filter cut-off', + position=2) + + highpass = traits.Float( + 100, + argstr='-m -h %f', + usedefault=True, + desc='cleanup motion confounds', + position=2) + + aggressive = traits.Bool( + argstr='-A', + desc= + 'Apply aggressive (full variance) cleanup, instead of the default less-aggressive (unique variance) cleanup.', + position=3) + + confound_file = traits.File( + argstr='-x %s', desc='Include additional confound file.', position=4) + + confound_file_1 = traits.File( + argstr='-x %s', desc='Include additional confound file.', position=5) + + confound_file_2 = traits.File( + argstr='-x %s', desc='Include additional confound file.', position=6) + + +class CleanerOutputSpec(TraitedSpec): + cleaned_functional_file = File(exists=True, desc='Cleaned session data') + + +class Cleaner(CommandLine): + ''' + Extract features (for later training and/or classifying) + ''' + input_spec = CleanerInputSpec + output_spec = CleanerOutputSpec + cmd = 'fix -a' + + def _get_cleaned_functional_filename(self, artifacts_list_filename): + ''' extract the proper filename from the first line of the artifacts file ''' + artifacts_list_file = open(artifacts_list_filename, 'r') + functional_filename, extension = artifacts_list_file.readline().split( + '.') + artifacts_list_file_path, artifacts_list_filename = os.path.split( + artifacts_list_filename) + + return (os.path.join(artifacts_list_file_path, + functional_filename + '_clean.nii.gz')) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs[ + 'cleaned_functional_file'] = self._get_cleaned_functional_filename( + self.inputs.artifacts_list_file) + return outputs diff --git a/nipype/interfaces/fsl/maths.py b/nipype/interfaces/fsl/maths.py new file mode 100644 index 0000000000..3862cea8c7 --- /dev/null +++ b/nipype/interfaces/fsl/maths.py @@ -0,0 +1,621 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The maths module provides higher-level interfaces to some of the operations +that can be performed with the fslmaths command-line program. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os +import numpy as np + +from ..base import (TraitedSpec, File, traits, InputMultiPath, isdefined) +from .base import FSLCommand, FSLCommandInputSpec + + +class MathsInput(FSLCommandInputSpec): + + in_file = File( + position=2, + argstr="%s", + exists=True, + mandatory=True, + desc="image to operate on") + out_file = File( + genfile=True, + position=-2, + argstr="%s", + desc="image to write", + hash_files=False) + _dtypes = ["float", "char", "int", "short", "double", "input"] + internal_datatype = traits.Enum( + *_dtypes, + position=1, + argstr="-dt %s", + desc=("datatype to use for calculations " + "(default is float)")) + output_datatype = traits.Enum( + *_dtypes, + position=-1, + argstr="-odt %s", + desc=("datatype to use for output (default " + "uses input type)")) + + nan2zeros = traits.Bool( + position=3, + argstr='-nan', + desc='change NaNs to zeros before doing anything') + + +class MathsOutput(TraitedSpec): + + out_file = File(exists=True, desc="image written after calculations") + + +class MathsCommand(FSLCommand): + + _cmd = "fslmaths" + input_spec = MathsInput + output_spec = MathsOutput + _suffix = "_maths" + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs["out_file"] = self.inputs.out_file + if not isdefined(self.inputs.out_file): + outputs["out_file"] = self._gen_fname( + self.inputs.in_file, suffix=self._suffix) + outputs["out_file"] = os.path.abspath(outputs["out_file"]) + return outputs + + def _gen_filename(self, name): + if name == "out_file": + return self._list_outputs()["out_file"] + return None + + +class ChangeDataTypeInput(MathsInput): + + _dtypes = ["float", "char", "int", "short", "double", "input"] + output_datatype = traits.Enum( + *_dtypes, + position=-1, + argstr="-odt %s", + mandatory=True, + desc="output data type") + + +class ChangeDataType(MathsCommand): + """Use fslmaths to change the datatype of an image. + + """ + input_spec = ChangeDataTypeInput + _suffix = "_chdt" + + +class ThresholdInputSpec(MathsInput): + + thresh = traits.Float( + mandatory=True, position=4, argstr="%s", desc="threshold value") + direction = traits.Enum( + "below", + "above", + usedefault=True, + desc="zero-out either below or above thresh value") + use_robust_range = traits.Bool( + desc="interpret thresh as percentage (0-100) of robust range") + use_nonzero_voxels = traits.Bool( + desc="use nonzero voxels to calculate robust range", + requires=["use_robust_range"]) + + +class Threshold(MathsCommand): + """Use fslmaths to apply a threshold to an image in a variety of ways. + + """ + input_spec = ThresholdInputSpec + _suffix = "_thresh" + + def _format_arg(self, name, spec, value): + if name == "thresh": + arg = "-" + _si = self.inputs + if self.inputs.direction == "above": + arg += "u" + arg += "thr" + if isdefined(_si.use_robust_range) and _si.use_robust_range: + if (isdefined(_si.use_nonzero_voxels) + and _si.use_nonzero_voxels): + arg += "P" + else: + arg += "p" + arg += " %.10f" % value + return arg + return super(Threshold, self)._format_arg(name, spec, value) + + +class StdImageInput(MathsInput): + + dimension = traits.Enum( + "T", + "X", + "Y", + "Z", + usedefault=True, + argstr="-%sstd", + position=4, + desc="dimension to standard deviate across") + + +class StdImage(MathsCommand): + """Use fslmaths to generate a standard deviation in an image across a given + dimension. + """ + input_spec = StdImageInput + _suffix = "_std" + + +class MeanImageInput(MathsInput): + + dimension = traits.Enum( + "T", + "X", + "Y", + "Z", + usedefault=True, + argstr="-%smean", + position=4, + desc="dimension to mean across") + + +class MeanImage(MathsCommand): + """Use fslmaths to generate a mean image across a given dimension. + + """ + input_spec = MeanImageInput + _suffix = "_mean" + + +class MaxImageInput(MathsInput): + + dimension = traits.Enum( + "T", + "X", + "Y", + "Z", + usedefault=True, + argstr="-%smax", + position=4, + desc="dimension to max across") + + +class MaxImage(MathsCommand): + """Use fslmaths to generate a max image across a given dimension. + + Examples + -------- + >>> from nipype.interfaces.fsl.maths import MaxImage + >>> maxer = MaxImage() + >>> maxer.inputs.in_file = "functional.nii" # doctest: +SKIP + >>> maxer.dimension = "T" + >>> maxer.cmdline # doctest: +SKIP + 'fslmaths functional.nii -Tmax functional_max.nii' + + """ + input_spec = MaxImageInput + _suffix = "_max" + + +class PercentileImageInput(MathsInput): + + dimension = traits.Enum( + "T", + "X", + "Y", + "Z", + usedefault=True, + argstr="-%sperc", + position=4, + desc="dimension to percentile across") + perc = traits.Range( + low=0, + high=100, + argstr="%f", + position=5, + desc=("nth percentile (0-100) of FULL RANGE " + "across dimension")) + + +class PercentileImage(MathsCommand): + """Use fslmaths to generate a percentile image across a given dimension. + + Examples + -------- + >>> from nipype.interfaces.fsl.maths import MaxImage + >>> percer = PercentileImage() + >>> percer.inputs.in_file = "functional.nii" # doctest: +SKIP + >>> percer.dimension = "T" + >>> percer.perc = 90 + >>> percer.cmdline # doctest: +SKIP + 'fslmaths functional.nii -Tperc 90 functional_perc.nii' + + """ + input_spec = PercentileImageInput + _suffix = "_perc" + + +class MaxnImageInput(MathsInput): + + dimension = traits.Enum( + "T", + "X", + "Y", + "Z", + usedefault=True, + argstr="-%smaxn", + position=4, + desc="dimension to index max across") + + +class MaxnImage(MathsCommand): + """Use fslmaths to generate an image of index of max across + a given dimension. + + """ + input_spec = MaxnImageInput + _suffix = "_maxn" + + +class MinImageInput(MathsInput): + + dimension = traits.Enum( + "T", + "X", + "Y", + "Z", + usedefault=True, + argstr="-%smin", + position=4, + desc="dimension to min across") + + +class MinImage(MathsCommand): + """Use fslmaths to generate a minimum image across a given dimension. + + """ + input_spec = MinImageInput + _suffix = "_min" + + +class MedianImageInput(MathsInput): + + dimension = traits.Enum( + "T", + "X", + "Y", + "Z", + usedefault=True, + argstr="-%smedian", + position=4, + desc="dimension to median across") + + +class MedianImage(MathsCommand): + """Use fslmaths to generate a median image across a given dimension. + + """ + input_spec = MedianImageInput + _suffix = "_median" + + +class AR1ImageInput(MathsInput): + + dimension = traits.Enum( + "T", + "X", + "Y", + "Z", + usedefault=True, + argstr="-%sar1", + position=4, + desc=("dimension to find AR(1) coefficient" + "across")) + + +class AR1Image(MathsCommand): + """Use fslmaths to generate an AR1 coefficient image across a + given dimension. (Should use -odt float and probably demean first) + + """ + input_spec = AR1ImageInput + _suffix = "_ar1" + + +class IsotropicSmoothInput(MathsInput): + + fwhm = traits.Float( + mandatory=True, + xor=["sigma"], + position=4, + argstr="-s %.5f", + desc="fwhm of smoothing kernel [mm]") + sigma = traits.Float( + mandatory=True, + xor=["fwhm"], + position=4, + argstr="-s %.5f", + desc="sigma of smoothing kernel [mm]") + + +class IsotropicSmooth(MathsCommand): + """Use fslmaths to spatially smooth an image with a gaussian kernel. + + """ + input_spec = IsotropicSmoothInput + _suffix = "_smooth" + + def _format_arg(self, name, spec, value): + if name == "fwhm": + sigma = float(value) / np.sqrt(8 * np.log(2)) + return spec.argstr % sigma + return super(IsotropicSmooth, self)._format_arg(name, spec, value) + + +class ApplyMaskInput(MathsInput): + + mask_file = File( + exists=True, + mandatory=True, + argstr="-mas %s", + position=4, + desc="binary image defining mask space") + + +class ApplyMask(MathsCommand): + """Use fslmaths to apply a binary mask to another image. + + """ + input_spec = ApplyMaskInput + _suffix = "_masked" + + +class KernelInput(MathsInput): + + kernel_shape = traits.Enum( + "3D", + "2D", + "box", + "boxv", + "gauss", + "sphere", + "file", + argstr="-kernel %s", + position=4, + desc="kernel shape to use") + kernel_size = traits.Float( + argstr="%.4f", + position=5, + xor=["kernel_file"], + desc=("kernel size - voxels for box/boxv, mm " + "for sphere, mm sigma for gauss")) + kernel_file = File( + exists=True, + argstr="%s", + position=5, + xor=["kernel_size"], + desc="use external file for kernel") + + +class DilateInput(KernelInput): + + operation = traits.Enum( + "mean", + "modal", + "max", + argstr="-dil%s", + position=6, + mandatory=True, + desc="filtering operation to perfoem in dilation") + + +class DilateImage(MathsCommand): + """Use fslmaths to perform a spatial dilation of an image. + + """ + input_spec = DilateInput + _suffix = "_dil" + + def _format_arg(self, name, spec, value): + if name == "operation": + return spec.argstr % dict(mean="M", modal="D", max="F")[value] + return super(DilateImage, self)._format_arg(name, spec, value) + + +class ErodeInput(KernelInput): + + minimum_filter = traits.Bool( + argstr="%s", + position=6, + usedefault=True, + default_value=False, + desc=("if true, minimum filter rather than " + "erosion by zeroing-out")) + + +class ErodeImage(MathsCommand): + """Use fslmaths to perform a spatial erosion of an image. + + """ + input_spec = ErodeInput + _suffix = "_ero" + + def _format_arg(self, name, spec, value): + if name == "minimum_filter": + if value: + return "-eroF" + return "-ero" + return super(ErodeImage, self)._format_arg(name, spec, value) + + +class SpatialFilterInput(KernelInput): + + operation = traits.Enum( + "mean", + "median", + "meanu", + argstr="-f%s", + position=6, + mandatory=True, + desc="operation to filter with") + + +class SpatialFilter(MathsCommand): + """Use fslmaths to spatially filter an image. + + """ + input_spec = SpatialFilterInput + _suffix = "_filt" + + +class UnaryMathsInput(MathsInput): + + operation = traits.Enum( + "exp", + "log", + "sin", + "cos", + "tan", + "asin", + "acos", + "atan", + "sqr", + "sqrt", + "recip", + "abs", + "bin", + "binv", + "fillh", + "fillh26", + "index", + "edge", + "nan", + "nanm", + "rand", + "randn", + "range", + argstr="-%s", + position=4, + mandatory=True, + desc="operation to perform") + + +class UnaryMaths(MathsCommand): + """Use fslmaths to perorm a variety of mathematical operations on an image. + + """ + input_spec = UnaryMathsInput + + def _list_outputs(self): + self._suffix = "_" + self.inputs.operation + return super(UnaryMaths, self)._list_outputs() + + +class BinaryMathsInput(MathsInput): + + operation = traits.Enum( + "add", + "sub", + "mul", + "div", + "rem", + "max", + "min", + mandatory=True, + argstr="-%s", + position=4, + desc="operation to perform") + operand_file = File( + exists=True, + argstr="%s", + mandatory=True, + position=5, + xor=["operand_value"], + desc="second image to perform operation with") + operand_value = traits.Float( + argstr="%.8f", + mandatory=True, + position=5, + xor=["operand_file"], + desc="value to perform operation with") + + +class BinaryMaths(MathsCommand): + """Use fslmaths to perform mathematical operations using a second image or + a numeric value. + + """ + input_spec = BinaryMathsInput + + +class MultiImageMathsInput(MathsInput): + + op_string = traits.String( + position=4, + argstr="%s", + mandatory=True, + desc=("python formatted string of operations " + "to perform")) + operand_files = InputMultiPath( + File(exists=True), + mandatory=True, + desc=("list of file names to plug into op " + "string")) + + +class MultiImageMaths(MathsCommand): + """Use fslmaths to perform a sequence of mathematical operations. + + Examples + -------- + >>> from nipype.interfaces.fsl import MultiImageMaths + >>> maths = MultiImageMaths() + >>> maths.inputs.in_file = "functional.nii" + >>> maths.inputs.op_string = "-add %s -mul -1 -div %s" + >>> maths.inputs.operand_files = ["functional2.nii", "functional3.nii"] + >>> maths.inputs.out_file = "functional4.nii" + >>> maths.cmdline + 'fslmaths functional.nii -add functional2.nii -mul -1 -div functional3.nii functional4.nii' + + """ + input_spec = MultiImageMathsInput + + def _format_arg(self, name, spec, value): + if name == "op_string": + return value % tuple(self.inputs.operand_files) + return super(MultiImageMaths, self)._format_arg(name, spec, value) + + +class TemporalFilterInput(MathsInput): + + lowpass_sigma = traits.Float( + -1, + argstr="%.6f", + position=5, + usedefault=True, + desc="lowpass filter sigma (in volumes)") + highpass_sigma = traits.Float( + -1, + argstr="-bptf %.6f", + position=4, + usedefault=True, + desc="highpass filter sigma (in volumes)") + + +class TemporalFilter(MathsCommand): + """Use fslmaths to apply a low, high, or bandpass temporal filter to a + timeseries. + + """ + input_spec = TemporalFilterInput + _suffix = "_filt" diff --git a/nipype/interfaces/fsl/model.py b/nipype/interfaces/fsl/model.py new file mode 100644 index 0000000000..113f785120 --- /dev/null +++ b/nipype/interfaces/fsl/model.py @@ -0,0 +1,2432 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The fsl module provides classes for interfacing with the `FSL +`_ command line tools. This +was written to work with FSL version 4.1.4. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, open + +import os +from glob import glob +from shutil import rmtree +from string import Template + +import numpy as np +from nibabel import load + +from ... import LooseVersion +from ...utils.filemanip import simplify_list, ensure_list +from ...utils.misc import human_order_sorted +from ...external.due import BibTeX +from ..base import (File, traits, isdefined, TraitedSpec, BaseInterface, + Directory, InputMultiPath, OutputMultiPath, + BaseInterfaceInputSpec) +from .base import FSLCommand, FSLCommandInputSpec, Info + + +class Level1DesignInputSpec(BaseInterfaceInputSpec): + interscan_interval = traits.Float( + mandatory=True, desc='Interscan interval (in secs)') + session_info = traits.Any( + mandatory=True, + desc=('Session specific information generated ' + 'by ``modelgen.SpecifyModel``')) + bases = traits.Either( + traits.Dict( + traits.Enum('dgamma'), + traits.Dict(traits.Enum('derivs'), traits.Bool)), + traits.Dict( + traits.Enum('gamma'), + traits.Dict(traits.Enum('derivs', 'gammasigma', 'gammadelay'))), + traits.Dict( + traits.Enum('custom'), + traits.Dict(traits.Enum('bfcustompath'), traits.Str)), + traits.Dict(traits.Enum('none'), traits.Dict()), + traits.Dict(traits.Enum('none'), traits.Enum(None)), + mandatory=True, + desc=("name of basis function and options e.g., " + "{'dgamma': {'derivs': True}}"), + ) + orthogonalization = traits.Dict( + traits.Int, + traits.Dict(traits.Int, traits.Either(traits.Bool, traits.Int)), + desc=("which regressors to make orthogonal e.g., " + "{1: {0:0,1:0,2:0}, 2: {0:1,1:1,2:0}} to make the second " + "regressor in a 2-regressor model orthogonal to the first."), + usedefault=True) + model_serial_correlations = traits.Bool( + desc="Option to model serial correlations using an \ +autoregressive estimator (order 1). Setting this option is only \ +useful in the context of the fsf file. If you set this to False, you need to \ +repeat this option for FILMGLS by setting autocorr_noestimate to True", + mandatory=True) + contrasts = traits.List( + traits.Either( + traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), + traits.List(traits.Float)), + traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), + traits.List(traits.Float), traits.List(traits.Float)), + traits.Tuple(traits.Str, traits.Enum('F'), + traits.List( + traits.Either( + traits.Tuple(traits.Str, traits.Enum('T'), + traits.List(traits.Str), + traits.List(traits.Float)), + traits.Tuple(traits.Str, traits.Enum('T'), + traits.List(traits.Str), + traits.List(traits.Float), + traits.List(traits.Float)))))), + desc="List of contrasts with each contrast being a list of the form - \ +[('name', 'stat', [condition list], [weight list], [session list])]. if \ +session list is None or not provided, all sessions are used. For F \ +contrasts, the condition list should contain previously defined \ +T-contrasts.") + + +class Level1DesignOutputSpec(TraitedSpec): + fsf_files = OutputMultiPath( + File(exists=True), desc='FSL feat specification files') + ev_files = OutputMultiPath( + traits.List(File(exists=True)), desc='condition information files') + + +class Level1Design(BaseInterface): + """Generate FEAT specific files + + Examples + -------- + + >>> level1design = Level1Design() + >>> level1design.inputs.interscan_interval = 2.5 + >>> level1design.inputs.bases = {'dgamma':{'derivs': False}} + >>> level1design.inputs.session_info = 'session_info.npz' + >>> level1design.run() # doctest: +SKIP + + """ + + input_spec = Level1DesignInputSpec + output_spec = Level1DesignOutputSpec + + def _create_ev_file(self, evfname, evinfo): + f = open(evfname, 'wt') + for i in evinfo: + if len(i) == 3: + f.write('%f %f %f\n' % (i[0], i[1], i[2])) + else: + f.write('%f\n' % i[0]) + f.close() + + def _create_ev_files(self, cwd, runinfo, runidx, ev_parameters, + orthogonalization, contrasts, do_tempfilter, + basis_key): + """Creates EV files from condition and regressor information. + + Parameters: + ----------- + + runinfo : dict + Generated by `SpecifyModel` and contains information + about events and other regressors. + runidx : int + Index to run number + ev_parameters : dict + A dictionary containing the model parameters for the + given design type. + orthogonalization : dict + A dictionary of dictionaries specifying orthogonal EVs. + contrasts : list of lists + Information on contrasts to be evaluated + """ + conds = {} + evname = [] + if basis_key == "dgamma": + basis_key = "hrf" + elif basis_key == "gamma": + try: + _ = ev_parameters['gammasigma'] + except KeyError: + ev_parameters['gammasigma'] = 3 + try: + _ = ev_parameters['gammadelay'] + except KeyError: + ev_parameters['gammadelay'] = 6 + ev_template = load_template('feat_ev_' + basis_key + '.tcl') + ev_none = load_template('feat_ev_none.tcl') + ev_ortho = load_template('feat_ev_ortho.tcl') + ev_txt = '' + # generate sections for conditions and other nuisance + # regressors + num_evs = [0, 0] + for field in ['cond', 'regress']: + for i, cond in enumerate(runinfo[field]): + name = cond['name'] + evname.append(name) + evfname = os.path.join(cwd, 'ev_%s_%d_%d.txt' % (name, runidx, + len(evname))) + evinfo = [] + num_evs[0] += 1 + num_evs[1] += 1 + if field == 'cond': + for j, onset in enumerate(cond['onset']): + try: + amplitudes = cond['amplitudes'] + if len(amplitudes) > 1: + amp = amplitudes[j] + else: + amp = amplitudes[0] + except KeyError: + amp = 1 + if len(cond['duration']) > 1: + evinfo.insert(j, [onset, cond['duration'][j], amp]) + else: + evinfo.insert(j, [onset, cond['duration'][0], amp]) + ev_parameters['cond_file'] = evfname + ev_parameters['ev_num'] = num_evs[0] + ev_parameters['ev_name'] = name + ev_parameters['tempfilt_yn'] = do_tempfilter + if 'basisorth' not in ev_parameters: + ev_parameters['basisorth'] = 1 + if 'basisfnum' not in ev_parameters: + ev_parameters['basisfnum'] = 1 + try: + ev_parameters['fsldir'] = os.environ['FSLDIR'] + except KeyError: + if basis_key == 'flobs': + raise Exception( + 'FSL environment variables not set') + else: + ev_parameters['fsldir'] = '/usr/share/fsl' + ev_parameters['temporalderiv'] = int( + bool(ev_parameters.get('derivs', False))) + if ev_parameters['temporalderiv']: + evname.append(name + 'TD') + num_evs[1] += 1 + ev_txt += ev_template.substitute(ev_parameters) + elif field == 'regress': + evinfo = [[j] for j in cond['val']] + ev_txt += ev_none.substitute( + ev_num=num_evs[0], + ev_name=name, + tempfilt_yn=do_tempfilter, + cond_file=evfname) + ev_txt += "\n" + conds[name] = evfname + self._create_ev_file(evfname, evinfo) + # add ev orthogonalization + for i in range(1, num_evs[0] + 1): + initial = ev_ortho.substitute(c0=i, c1=0, orthogonal=1) + for j in range(0, num_evs[0] + 1): + try: + orthogonal = int(orthogonalization[i][j]) + except (KeyError, TypeError, ValueError, IndexError): + orthogonal = 0 + if orthogonal == 1 and initial not in ev_txt: + ev_txt += initial + "\n" + ev_txt += ev_ortho.substitute(c0=i, c1=j, + orthogonal=orthogonal) + ev_txt += "\n" + # add contrast info to fsf file + if isdefined(contrasts): + contrast_header = load_template('feat_contrast_header.tcl') + contrast_prolog = load_template('feat_contrast_prolog.tcl') + contrast_element = load_template('feat_contrast_element.tcl') + contrast_ftest_element = load_template( + 'feat_contrast_ftest_element.tcl') + contrastmask_header = load_template('feat_contrastmask_header.tcl') + contrastmask_footer = load_template('feat_contrastmask_footer.tcl') + contrastmask_element = load_template( + 'feat_contrastmask_element.tcl') + # add t/f contrast info + ev_txt += contrast_header.substitute() + con_names = [] + for j, con in enumerate(contrasts): + con_names.append(con[0]) + con_map = {} + ftest_idx = [] + ttest_idx = [] + for j, con in enumerate(contrasts): + if con[1] == 'F': + ftest_idx.append(j) + for c in con[2]: + if c[0] not in list(con_map.keys()): + con_map[c[0]] = [] + con_map[c[0]].append(j) + else: + ttest_idx.append(j) + + for ctype in ['real', 'orig']: + for j, con in enumerate(contrasts): + if con[1] == 'F': + continue + tidx = ttest_idx.index(j) + 1 + ev_txt += contrast_prolog.substitute( + cnum=tidx, ctype=ctype, cname=con[0]) + count = 0 + for c in range(1, len(evname) + 1): + if evname[c - 1].endswith('TD') and ctype == 'orig': + continue + count = count + 1 + if evname[c - 1] in con[2]: + val = con[3][con[2].index(evname[c - 1])] + else: + val = 0.0 + ev_txt += contrast_element.substitute( + cnum=tidx, element=count, ctype=ctype, val=val) + ev_txt += "\n" + + for fconidx in ftest_idx: + fval = 0 + if (con[0] in con_map.keys() + and fconidx in con_map[con[0]]): + fval = 1 + ev_txt += contrast_ftest_element.substitute( + cnum=ftest_idx.index(fconidx) + 1, + element=tidx, + ctype=ctype, + val=fval) + ev_txt += "\n" + + # add contrast mask info + ev_txt += contrastmask_header.substitute() + for j, _ in enumerate(contrasts): + for k, _ in enumerate(contrasts): + if j != k: + ev_txt += contrastmask_element.substitute( + c1=j + 1, c2=k + 1) + ev_txt += contrastmask_footer.substitute() + return num_evs, ev_txt + + def _format_session_info(self, session_info): + if isinstance(session_info, dict): + session_info = [session_info] + return session_info + + def _get_func_files(self, session_info): + """Returns functional files in the order of runs + """ + func_files = [] + for i, info in enumerate(session_info): + func_files.insert(i, info['scans']) + return func_files + + def _run_interface(self, runtime): + cwd = os.getcwd() + fsf_header = load_template('feat_header_l1.tcl') + fsf_postscript = load_template('feat_nongui.tcl') + + prewhiten = 0 + if isdefined(self.inputs.model_serial_correlations): + prewhiten = int(self.inputs.model_serial_correlations) + basis_key = list(self.inputs.bases.keys())[0] + ev_parameters = dict(self.inputs.bases[basis_key]) + session_info = self._format_session_info(self.inputs.session_info) + func_files = self._get_func_files(session_info) + n_tcon = 0 + n_fcon = 0 + if isdefined(self.inputs.contrasts): + for i, c in enumerate(self.inputs.contrasts): + if c[1] == 'T': + n_tcon += 1 + elif c[1] == 'F': + n_fcon += 1 + + for i, info in enumerate(session_info): + do_tempfilter = 1 + if info['hpf'] == np.inf: + do_tempfilter = 0 + num_evs, cond_txt = self._create_ev_files( + cwd, info, i, ev_parameters, self.inputs.orthogonalization, + self.inputs.contrasts, do_tempfilter, basis_key) + nim = load(func_files[i]) + (_, _, _, timepoints) = nim.shape + fsf_txt = fsf_header.substitute( + run_num=i, + interscan_interval=self.inputs.interscan_interval, + num_vols=timepoints, + prewhiten=prewhiten, + num_evs=num_evs[0], + num_evs_real=num_evs[1], + num_tcon=n_tcon, + num_fcon=n_fcon, + high_pass_filter_cutoff=info['hpf'], + temphp_yn=do_tempfilter, + func_file=func_files[i]) + fsf_txt += cond_txt + fsf_txt += fsf_postscript.substitute(overwrite=1) + + f = open(os.path.join(cwd, 'run%d.fsf' % i), 'w') + f.write(fsf_txt) + f.close() + + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + cwd = os.getcwd() + outputs['fsf_files'] = [] + outputs['ev_files'] = [] + basis_key = list(self.inputs.bases.keys())[0] + ev_parameters = dict(self.inputs.bases[basis_key]) + for runno, runinfo in enumerate( + self._format_session_info(self.inputs.session_info)): + outputs['fsf_files'].append(os.path.join(cwd, 'run%d.fsf' % runno)) + outputs['ev_files'].insert(runno, []) + evname = [] + for field in ['cond', 'regress']: + for i, cond in enumerate(runinfo[field]): + name = cond['name'] + evname.append(name) + evfname = os.path.join(cwd, + 'ev_%s_%d_%d.txt' % (name, runno, + len(evname))) + if field == 'cond': + ev_parameters['temporalderiv'] = int( + bool(ev_parameters.get('derivs', False))) + if ev_parameters['temporalderiv']: + evname.append(name + 'TD') + outputs['ev_files'][runno].append( + os.path.join(cwd, evfname)) + return outputs + + +class FEATInputSpec(FSLCommandInputSpec): + fsf_file = File( + exists=True, + mandatory=True, + argstr="%s", + position=0, + desc="File specifying the feat design spec file") + + +class FEATOutputSpec(TraitedSpec): + feat_dir = Directory(exists=True) + + +class FEAT(FSLCommand): + """Uses FSL feat to calculate first level stats + """ + _cmd = 'feat' + input_spec = FEATInputSpec + output_spec = FEATOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + is_ica = False + outputs['feat_dir'] = None + with open(self.inputs.fsf_file, 'rt') as fp: + text = fp.read() + if "set fmri(inmelodic) 1" in text: + is_ica = True + for line in text.split('\n'): + if line.find("set fmri(outputdir)") > -1: + try: + outputdir_spec = line.split('"')[-2] + if os.path.exists(outputdir_spec): + outputs['feat_dir'] = outputdir_spec + + except: + pass + if not outputs['feat_dir']: + if is_ica: + outputs['feat_dir'] = glob(os.path.join(os.getcwd(), + '*ica'))[0] + else: + outputs['feat_dir'] = glob(os.path.join(os.getcwd(), + '*feat'))[0] + print('Outputs from FEATmodel:', outputs) + return outputs + + +class FEATModelInputSpec(FSLCommandInputSpec): + fsf_file = File( + exists=True, + mandatory=True, + argstr="%s", + position=0, + desc="File specifying the feat design spec file", + copyfile=False) + ev_files = traits.List( + File(exists=True), + mandatory=True, + argstr="%s", + desc="Event spec files generated by level1design", + position=1, + copyfile=False) + + +class FEATModelOutpuSpec(TraitedSpec): + design_file = File( + exists=True, desc='Mat file containing ascii matrix for design') + design_image = File( + exists=True, desc='Graphical representation of design matrix') + design_cov = File( + exists=True, desc='Graphical representation of design covariance') + con_file = File( + exists=True, desc='Contrast file containing contrast vectors') + fcon_file = File(desc='Contrast file containing contrast vectors') + + +class FEATModel(FSLCommand): + """Uses FSL feat_model to generate design.mat files + """ + _cmd = 'feat_model' + input_spec = FEATModelInputSpec + output_spec = FEATModelOutpuSpec + + def _format_arg(self, name, trait_spec, value): + if name == 'fsf_file': + return super(FEATModel, + self)._format_arg(name, trait_spec, + self._get_design_root(value)) + elif name == 'ev_files': + return '' + else: + return super(FEATModel, self)._format_arg(name, trait_spec, value) + + def _get_design_root(self, infile): + _, fname = os.path.split(infile) + return fname.split('.')[0] + + def _list_outputs(self): + # TODO: figure out file names and get rid off the globs + outputs = self._outputs().get() + root = self._get_design_root(simplify_list(self.inputs.fsf_file)) + design_file = glob(os.path.join(os.getcwd(), '%s*.mat' % root)) + assert len(design_file) == 1, 'No mat file generated by FEAT Model' + outputs['design_file'] = design_file[0] + design_image = glob(os.path.join(os.getcwd(), '%s.png' % root)) + assert len( + design_image) == 1, 'No design image generated by FEAT Model' + outputs['design_image'] = design_image[0] + design_cov = glob(os.path.join(os.getcwd(), '%s_cov.png' % root)) + assert len( + design_cov) == 1, 'No covariance image generated by FEAT Model' + outputs['design_cov'] = design_cov[0] + con_file = glob(os.path.join(os.getcwd(), '%s*.con' % root)) + assert len(con_file) == 1, 'No con file generated by FEAT Model' + outputs['con_file'] = con_file[0] + fcon_file = glob(os.path.join(os.getcwd(), '%s*.fts' % root)) + if fcon_file: + assert len(fcon_file) == 1, 'No fts file generated by FEAT Model' + outputs['fcon_file'] = fcon_file[0] + return outputs + + +class FILMGLSInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + mandatory=True, + position=-3, + argstr='%s', + desc='input data file') + design_file = File( + exists=True, position=-2, argstr='%s', desc='design matrix file') + threshold = traits.Range( + value=1000., + low=0.0, + argstr='%f', + position=-1, + usedefault=True, + desc='threshold') + smooth_autocorr = traits.Bool( + argstr='-sa', desc='Smooth auto corr estimates') + mask_size = traits.Int(argstr='-ms %d', desc="susan mask size") + brightness_threshold = traits.Range( + low=0, + argstr='-epith %d', + desc=('susan brightness threshold, ' + 'otherwise it is estimated')) + full_data = traits.Bool(argstr='-v', desc='output full data') + _estimate_xor = [ + 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', + 'multitaper_product', 'use_pava', 'autocorr_noestimate' + ] + autocorr_estimate_only = traits.Bool( + argstr='-ac', + xor=_estimate_xor, + desc=('perform autocorrelation ' + 'estimatation only')) + fit_armodel = traits.Bool( + argstr='-ar', + xor=_estimate_xor, + desc=('fits autoregressive model - default is ' + 'to use tukey with M=sqrt(numvols)')) + tukey_window = traits.Int( + argstr='-tukey %d', + xor=_estimate_xor, + desc='tukey window size to estimate autocorr') + multitaper_product = traits.Int( + argstr='-mt %d', + xor=_estimate_xor, + desc=('multitapering with slepian tapers ' + 'and num is the time-bandwidth ' + 'product')) + use_pava = traits.Bool( + argstr='-pava', desc='estimates autocorr using PAVA') + autocorr_noestimate = traits.Bool( + argstr='-noest', xor=_estimate_xor, desc='do not estimate autocorrs') + output_pwdata = traits.Bool( + argstr='-output_pwdata', + desc=('output prewhitened data and average ' + 'design matrix')) + results_dir = Directory( + 'results', + argstr='-rn %s', + usedefault=True, + desc='directory to store results in') + + +class FILMGLSInputSpec505(FSLCommandInputSpec): + in_file = File( + exists=True, + mandatory=True, + position=-3, + argstr='--in=%s', + desc='input data file') + design_file = File( + exists=True, position=-2, argstr='--pd=%s', desc='design matrix file') + threshold = traits.Range( + value=1000., + low=0.0, + argstr='--thr=%f', + position=-1, + usedefault=True, + desc='threshold') + smooth_autocorr = traits.Bool( + argstr='--sa', desc='Smooth auto corr estimates') + mask_size = traits.Int(argstr='--ms=%d', desc="susan mask size") + brightness_threshold = traits.Range( + low=0, + argstr='--epith=%d', + desc=('susan brightness threshold, ' + 'otherwise it is estimated')) + full_data = traits.Bool(argstr='-v', desc='output full data') + _estimate_xor = [ + 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', + 'multitaper_product', 'use_pava', 'autocorr_noestimate' + ] + autocorr_estimate_only = traits.Bool( + argstr='--ac', + xor=_estimate_xor, + desc=('perform autocorrelation ' + 'estimation only')) + fit_armodel = traits.Bool( + argstr='--ar', + xor=_estimate_xor, + desc=('fits autoregressive model - default is ' + 'to use tukey with M=sqrt(numvols)')) + tukey_window = traits.Int( + argstr='--tukey=%d', + xor=_estimate_xor, + desc='tukey window size to estimate autocorr') + multitaper_product = traits.Int( + argstr='--mt=%d', + xor=_estimate_xor, + desc=('multitapering with slepian tapers ' + 'and num is the time-bandwidth ' + 'product')) + use_pava = traits.Bool( + argstr='--pava', desc='estimates autocorr using PAVA') + autocorr_noestimate = traits.Bool( + argstr='--noest', xor=_estimate_xor, desc='do not estimate autocorrs') + output_pwdata = traits.Bool( + argstr='--outputPWdata', + desc=('output prewhitened data and average ' + 'design matrix')) + results_dir = Directory( + 'results', + argstr='--rn=%s', + usedefault=True, + desc='directory to store results in') + + +class FILMGLSInputSpec507(FILMGLSInputSpec505): + threshold = traits.Float( + default_value=-1000., + argstr='--thr=%f', + position=-1, + usedefault=True, + desc='threshold') + tcon_file = File( + exists=True, + argstr='--con=%s', + desc='contrast file containing T-contrasts') + fcon_file = File( + exists=True, + argstr='--fcon=%s', + desc='contrast file containing F-contrasts') + mode = traits.Enum( + 'volumetric', + 'surface', + argstr="--mode=%s", + desc="Type of analysis to be done") + surface = File( + exists=True, + argstr="--in2=%s", + desc=("input surface for autocorr smoothing in " + "surface-based analyses")) + + +class FILMGLSOutputSpec(TraitedSpec): + param_estimates = OutputMultiPath( + File(exists=True), + desc=('Parameter estimates for each ' + 'column of the design matrix')) + residual4d = File( + exists=True, + desc=('Model fit residual mean-squared error for each ' + 'time point')) + dof_file = File(exists=True, desc='degrees of freedom') + sigmasquareds = File( + exists=True, desc='summary of residuals, See Woolrich, et. al., 2001') + results_dir = Directory( + exists=True, desc='directory storing model estimation output') + corrections = File( + exists=True, + desc=('statistical corrections used within FILM ' + 'modeling')) + thresholdac = File(exists=True, desc='The FILM autocorrelation parameters') + logfile = File(exists=True, desc='FILM run logfile') + + +class FILMGLSOutputSpec507(TraitedSpec): + param_estimates = OutputMultiPath( + File(exists=True), + desc=('Parameter estimates for each ' + 'column of the design matrix')) + residual4d = File( + exists=True, + desc=('Model fit residual mean-squared error for each ' + 'time point')) + dof_file = File(exists=True, desc='degrees of freedom') + sigmasquareds = File( + exists=True, desc='summary of residuals, See Woolrich, et. al., 2001') + results_dir = Directory( + exists=True, desc='directory storing model estimation output') + thresholdac = File(exists=True, desc='The FILM autocorrelation parameters') + logfile = File(exists=True, desc='FILM run logfile') + copes = OutputMultiPath( + File(exists=True), desc='Contrast estimates for each contrast') + varcopes = OutputMultiPath( + File(exists=True), desc='Variance estimates for each contrast') + zstats = OutputMultiPath( + File(exists=True), desc='z-stat file for each contrast') + tstats = OutputMultiPath( + File(exists=True), desc='t-stat file for each contrast') + fstats = OutputMultiPath( + File(exists=True), desc='f-stat file for each contrast') + zfstats = OutputMultiPath( + File(exists=True), desc='z-stat file for each F contrast') + + +class FILMGLS(FSLCommand): + """Use FSL film_gls command to fit a design matrix to voxel timeseries + + Examples + -------- + + Initialize with no options, assigning them when calling run: + + >>> from nipype.interfaces import fsl + >>> fgls = fsl.FILMGLS() + >>> res = fgls.run('in_file', 'design_file', 'thresh', rn='stats') #doctest: +SKIP + + Assign options through the ``inputs`` attribute: + + >>> fgls = fsl.FILMGLS() + >>> fgls.inputs.in_file = 'functional.nii' + >>> fgls.inputs.design_file = 'design.mat' + >>> fgls.inputs.threshold = 10 + >>> fgls.inputs.results_dir = 'stats' + >>> res = fgls.run() #doctest: +SKIP + + Specify options when creating an instance: + + >>> fgls = fsl.FILMGLS(in_file='functional.nii', \ +design_file='design.mat', \ +threshold=10, results_dir='stats') + >>> res = fgls.run() #doctest: +SKIP + + """ + + _cmd = 'film_gls' + input_spec = FILMGLSInputSpec + output_spec = FILMGLSOutputSpec + if Info.version() and LooseVersion(Info.version()) > LooseVersion('5.0.6'): + input_spec = FILMGLSInputSpec507 + output_spec = FILMGLSOutputSpec507 + elif (Info.version() + and LooseVersion(Info.version()) > LooseVersion('5.0.4')): + input_spec = FILMGLSInputSpec505 + + def _get_pe_files(self, cwd): + files = None + if isdefined(self.inputs.design_file): + fp = open(self.inputs.design_file, 'rt') + for line in fp.readlines(): + if line.startswith('/NumWaves'): + numpes = int(line.split()[-1]) + files = [] + for i in range(numpes): + files.append( + self._gen_fname('pe%d.nii' % (i + 1), cwd=cwd)) + break + fp.close() + return files + + def _get_numcons(self): + numtcons = 0 + numfcons = 0 + if isdefined(self.inputs.tcon_file): + fp = open(self.inputs.tcon_file, 'rt') + for line in fp.readlines(): + if line.startswith('/NumContrasts'): + numtcons = int(line.split()[-1]) + break + fp.close() + if isdefined(self.inputs.fcon_file): + fp = open(self.inputs.fcon_file, 'rt') + for line in fp.readlines(): + if line.startswith('/NumContrasts'): + numfcons = int(line.split()[-1]) + break + fp.close() + return numtcons, numfcons + + def _list_outputs(self): + outputs = self._outputs().get() + cwd = os.getcwd() + results_dir = os.path.join(cwd, self.inputs.results_dir) + outputs['results_dir'] = results_dir + pe_files = self._get_pe_files(results_dir) + if pe_files: + outputs['param_estimates'] = pe_files + outputs['residual4d'] = self._gen_fname('res4d.nii', cwd=results_dir) + outputs['dof_file'] = os.path.join(results_dir, 'dof') + outputs['sigmasquareds'] = self._gen_fname( + 'sigmasquareds.nii', cwd=results_dir) + outputs['thresholdac'] = self._gen_fname( + 'threshac1.nii', cwd=results_dir) + if (Info.version() + and LooseVersion(Info.version()) < LooseVersion('5.0.7')): + outputs['corrections'] = self._gen_fname( + 'corrections.nii', cwd=results_dir) + outputs['logfile'] = self._gen_fname( + 'logfile', change_ext=False, cwd=results_dir) + + if (Info.version() + and LooseVersion(Info.version()) > LooseVersion('5.0.6')): + pth = results_dir + numtcons, numfcons = self._get_numcons() + base_contrast = 1 + copes = [] + varcopes = [] + zstats = [] + tstats = [] + for i in range(numtcons): + copes.append( + self._gen_fname( + 'cope%d.nii' % (base_contrast + i), cwd=pth)) + varcopes.append( + self._gen_fname( + 'varcope%d.nii' % (base_contrast + i), cwd=pth)) + zstats.append( + self._gen_fname( + 'zstat%d.nii' % (base_contrast + i), cwd=pth)) + tstats.append( + self._gen_fname( + 'tstat%d.nii' % (base_contrast + i), cwd=pth)) + if copes: + outputs['copes'] = copes + outputs['varcopes'] = varcopes + outputs['zstats'] = zstats + outputs['tstats'] = tstats + fstats = [] + zfstats = [] + for i in range(numfcons): + fstats.append( + self._gen_fname( + 'fstat%d.nii' % (base_contrast + i), cwd=pth)) + zfstats.append( + self._gen_fname( + 'zfstat%d.nii' % (base_contrast + i), cwd=pth)) + if fstats: + outputs['fstats'] = fstats + outputs['zfstats'] = zfstats + return outputs + + +class FEATRegisterInputSpec(BaseInterfaceInputSpec): + feat_dirs = InputMultiPath( + Directory(exists=True), desc="Lower level feat dirs", mandatory=True) + reg_image = File( + exists=True, + desc="image to register to (will be treated as standard)", + mandatory=True) + reg_dof = traits.Int( + 12, desc="registration degrees of freedom", usedefault=True) + + +class FEATRegisterOutputSpec(TraitedSpec): + fsf_file = File(exists=True, desc="FSL feat specification file") + + +class FEATRegister(BaseInterface): + """Register feat directories to a specific standard + """ + input_spec = FEATRegisterInputSpec + output_spec = FEATRegisterOutputSpec + + def _run_interface(self, runtime): + fsf_header = load_template('featreg_header.tcl') + fsf_footer = load_template('feat_nongui.tcl') + fsf_dirs = load_template('feat_fe_featdirs.tcl') + + num_runs = len(self.inputs.feat_dirs) + fsf_txt = fsf_header.substitute( + num_runs=num_runs, + regimage=self.inputs.reg_image, + regdof=self.inputs.reg_dof) + for i, rundir in enumerate(ensure_list(self.inputs.feat_dirs)): + fsf_txt += fsf_dirs.substitute( + runno=i + 1, rundir=os.path.abspath(rundir)) + fsf_txt += fsf_footer.substitute() + f = open(os.path.join(os.getcwd(), 'register.fsf'), 'wt') + f.write(fsf_txt) + f.close() + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['fsf_file'] = os.path.abspath( + os.path.join(os.getcwd(), 'register.fsf')) + return outputs + + +class FLAMEOInputSpec(FSLCommandInputSpec): + cope_file = File( + exists=True, + argstr='--copefile=%s', + mandatory=True, + desc='cope regressor data file') + var_cope_file = File( + exists=True, + argstr='--varcopefile=%s', + desc='varcope weightings data file') + dof_var_cope_file = File( + exists=True, + argstr='--dofvarcopefile=%s', + desc='dof data file for varcope data') + mask_file = File( + exists=True, argstr='--maskfile=%s', mandatory=True, desc='mask file') + design_file = File( + exists=True, + argstr='--designfile=%s', + mandatory=True, + desc='design matrix file') + t_con_file = File( + exists=True, + argstr='--tcontrastsfile=%s', + mandatory=True, + desc='ascii matrix specifying t-contrasts') + f_con_file = File( + exists=True, + argstr='--fcontrastsfile=%s', + desc='ascii matrix specifying f-contrasts') + cov_split_file = File( + exists=True, + argstr='--covsplitfile=%s', + mandatory=True, + desc='ascii matrix specifying the groups the covariance is split into') + run_mode = traits.Enum( + 'fe', + 'ols', + 'flame1', + 'flame12', + argstr='--runmode=%s', + mandatory=True, + desc='inference to perform') + n_jumps = traits.Int( + argstr='--njumps=%d', desc='number of jumps made by mcmc') + burnin = traits.Int( + argstr='--burnin=%d', + desc=('number of jumps at start of mcmc to be ' + 'discarded')) + sample_every = traits.Int( + argstr='--sampleevery=%d', desc='number of jumps for each sample') + fix_mean = traits.Bool(argstr='--fixmean', desc='fix mean for tfit') + infer_outliers = traits.Bool( + argstr='--inferoutliers', desc='infer outliers - not for fe') + no_pe_outputs = traits.Bool( + argstr='--nopeoutput', desc='do not output pe files') + sigma_dofs = traits.Int( + argstr='--sigma_dofs=%d', + desc=('sigma (in mm) to use for Gaussian ' + 'smoothing the DOFs in FLAME 2. Default is ' + '1mm, -1 indicates no smoothing')) + outlier_iter = traits.Int( + argstr='--ioni=%d', + desc=('Number of max iterations to use when ' + 'inferring outliers. Default is 12.')) + log_dir = Directory("stats", argstr='--ld=%s', usedefault=True) # ohinds + # no support for ven, vef + + +class FLAMEOOutputSpec(TraitedSpec): + pes = OutputMultiPath( + File(exists=True), + desc=("Parameter estimates for each column of the " + "design matrix for each voxel")) + res4d = OutputMultiPath( + File(exists=True), + desc=("Model fit residual mean-squared error for " + "each time point")) + copes = OutputMultiPath( + File(exists=True), desc="Contrast estimates for each contrast") + var_copes = OutputMultiPath( + File(exists=True), desc="Variance estimates for each contrast") + zstats = OutputMultiPath( + File(exists=True), desc="z-stat file for each contrast") + tstats = OutputMultiPath( + File(exists=True), desc="t-stat file for each contrast") + zfstats = OutputMultiPath( + File(exists=True), desc="z stat file for each f contrast") + fstats = OutputMultiPath( + File(exists=True), desc="f-stat file for each contrast") + mrefvars = OutputMultiPath( + File(exists=True), + desc=("mean random effect variances for each " + "contrast")) + tdof = OutputMultiPath( + File(exists=True), desc="temporal dof file for each contrast") + weights = OutputMultiPath( + File(exists=True), desc="weights file for each contrast") + stats_dir = Directory( + File(exists=True), desc="directory storing model estimation output") + + +class FLAMEO(FSLCommand): + """Use FSL flameo command to perform higher level model fits + + Examples + -------- + + Initialize FLAMEO with no options, assigning them when calling run: + + >>> from nipype.interfaces import fsl + >>> flameo = fsl.FLAMEO() + >>> flameo.inputs.cope_file = 'cope.nii.gz' + >>> flameo.inputs.var_cope_file = 'varcope.nii.gz' + >>> flameo.inputs.cov_split_file = 'cov_split.mat' + >>> flameo.inputs.design_file = 'design.mat' + >>> flameo.inputs.t_con_file = 'design.con' + >>> flameo.inputs.mask_file = 'mask.nii' + >>> flameo.inputs.run_mode = 'fe' + >>> flameo.cmdline + 'flameo --copefile=cope.nii.gz --covsplitfile=cov_split.mat --designfile=design.mat --ld=stats --maskfile=mask.nii --runmode=fe --tcontrastsfile=design.con --varcopefile=varcope.nii.gz' + + """ + + _cmd = 'flameo' + input_spec = FLAMEOInputSpec + output_spec = FLAMEOOutputSpec + + references_ = [{ + 'entry': + BibTeX( + '@article{BeckmannJenkinsonSmith2003,' + 'author={C.F. Beckmann, M. Jenkinson, and S.M. Smith},' + 'title={General multilevel linear modeling for group analysis in FMRI.},' + 'journal={NeuroImage},' + 'volume={20},' + 'pages={1052-1063},' + 'year={2003},' + '}'), + 'tags': ['method'], + }, { + 'entry': + BibTeX( + '@article{WoolrichBehrensBeckmannJenkinsonSmith2004,' + 'author={M.W. Woolrich, T.E. Behrens, ' + 'C.F. Beckmann, M. Jenkinson, and S.M. Smith},' + 'title={Multilevel linear modelling for FMRI group analysis using Bayesian inference.},' + 'journal={NeuroImage},' + 'volume={21},' + 'pages={1732-1747},' + 'year={2004},' + '}'), + 'tags': ['method'], + }] + + # ohinds: 2010-04-06 + def _run_interface(self, runtime): + log_dir = self.inputs.log_dir + cwd = os.getcwd() + if os.access(os.path.join(cwd, log_dir), os.F_OK): + rmtree(os.path.join(cwd, log_dir)) + + return super(FLAMEO, self)._run_interface(runtime) + + # ohinds: 2010-04-06 + # made these compatible with flameo + def _list_outputs(self): + outputs = self._outputs().get() + pth = os.path.join(os.getcwd(), self.inputs.log_dir) + + pes = human_order_sorted(glob(os.path.join(pth, 'pe[0-9]*.*'))) + assert len(pes) >= 1, 'No pe volumes generated by FSL Estimate' + outputs['pes'] = pes + + res4d = human_order_sorted(glob(os.path.join(pth, 'res4d.*'))) + assert len(res4d) == 1, 'No residual volume generated by FSL Estimate' + outputs['res4d'] = res4d[0] + + copes = human_order_sorted(glob(os.path.join(pth, 'cope[0-9]*.*'))) + assert len(copes) >= 1, 'No cope volumes generated by FSL CEstimate' + outputs['copes'] = copes + + var_copes = human_order_sorted( + glob(os.path.join(pth, 'varcope[0-9]*.*'))) + assert len( + var_copes) >= 1, 'No varcope volumes generated by FSL CEstimate' + outputs['var_copes'] = var_copes + + zstats = human_order_sorted(glob(os.path.join(pth, 'zstat[0-9]*.*'))) + assert len(zstats) >= 1, 'No zstat volumes generated by FSL CEstimate' + outputs['zstats'] = zstats + + if isdefined(self.inputs.f_con_file): + zfstats = human_order_sorted( + glob(os.path.join(pth, 'zfstat[0-9]*.*'))) + assert len( + zfstats) >= 1, 'No zfstat volumes generated by FSL CEstimate' + outputs['zfstats'] = zfstats + + fstats = human_order_sorted( + glob(os.path.join(pth, 'fstat[0-9]*.*'))) + assert len( + fstats) >= 1, 'No fstat volumes generated by FSL CEstimate' + outputs['fstats'] = fstats + + tstats = human_order_sorted(glob(os.path.join(pth, 'tstat[0-9]*.*'))) + assert len(tstats) >= 1, 'No tstat volumes generated by FSL CEstimate' + outputs['tstats'] = tstats + + mrefs = human_order_sorted( + glob(os.path.join(pth, 'mean_random_effects_var[0-9]*.*'))) + assert len( + mrefs) >= 1, 'No mean random effects volumes generated by FLAMEO' + outputs['mrefvars'] = mrefs + + tdof = human_order_sorted(glob(os.path.join(pth, 'tdof_t[0-9]*.*'))) + assert len(tdof) >= 1, 'No T dof volumes generated by FLAMEO' + outputs['tdof'] = tdof + + weights = human_order_sorted( + glob(os.path.join(pth, 'weights[0-9]*.*'))) + assert len(weights) >= 1, 'No weight volumes generated by FLAMEO' + outputs['weights'] = weights + + outputs['stats_dir'] = pth + + return outputs + + +class ContrastMgrInputSpec(FSLCommandInputSpec): + tcon_file = File( + exists=True, + mandatory=True, + argstr='%s', + position=-1, + desc='contrast file containing T-contrasts') + fcon_file = File( + exists=True, + argstr='-f %s', + desc='contrast file containing F-contrasts') + param_estimates = InputMultiPath( + File(exists=True), + argstr='', + copyfile=False, + mandatory=True, + desc=('Parameter estimates for each ' + 'column of the design matrix')) + corrections = File( + exists=True, + copyfile=False, + mandatory=True, + desc='statistical corrections used within FILM modelling') + dof_file = File( + exists=True, + argstr='', + copyfile=False, + mandatory=True, + desc='degrees of freedom') + sigmasquareds = File( + exists=True, + argstr='', + position=-2, + copyfile=False, + mandatory=True, + desc=('summary of residuals, See Woolrich, et. al., ' + '2001')) + contrast_num = traits.Range( + low=1, + argstr='-cope', + desc=('contrast number to start labeling ' + 'copes from')) + suffix = traits.Str( + argstr='-suffix %s', + desc=('suffix to put on the end of the cope filename ' + 'before the contrast number, default is ' + 'nothing')) + + +class ContrastMgrOutputSpec(TraitedSpec): + copes = OutputMultiPath( + File(exists=True), desc='Contrast estimates for each contrast') + varcopes = OutputMultiPath( + File(exists=True), desc='Variance estimates for each contrast') + zstats = OutputMultiPath( + File(exists=True), desc='z-stat file for each contrast') + tstats = OutputMultiPath( + File(exists=True), desc='t-stat file for each contrast') + fstats = OutputMultiPath( + File(exists=True), desc='f-stat file for each contrast') + zfstats = OutputMultiPath( + File(exists=True), desc='z-stat file for each F contrast') + neffs = OutputMultiPath( + File(exists=True), desc='neff file ?? for each contrast') + + +class ContrastMgr(FSLCommand): + """Use FSL contrast_mgr command to evaluate contrasts + + In interface mode this file assumes that all the required inputs are in the + same location. This has deprecated for FSL versions 5.0.7+ as the necessary + corrections file is no longer generated by FILMGLS. + """ + if Info.version() and LooseVersion( + Info.version()) >= LooseVersion("5.0.7"): + DeprecationWarning("ContrastMgr is deprecated in FSL 5.0.7+") + _cmd = 'contrast_mgr' + input_spec = ContrastMgrInputSpec + output_spec = ContrastMgrOutputSpec + + def _run_interface(self, runtime): + # The returncode is meaningless in ContrastMgr. So check the output + # in stderr and if it's set, then update the returncode + # accordingly. + runtime = super(ContrastMgr, self)._run_interface(runtime) + if runtime.stderr: + self.raise_exception(runtime) + return runtime + + def _format_arg(self, name, trait_spec, value): + if name in ['param_estimates', 'corrections', 'dof_file']: + return '' + elif name in ['sigmasquareds']: + path, _ = os.path.split(value) + return path + else: + return super(ContrastMgr, self)._format_arg( + name, trait_spec, value) + + def _get_design_root(self, infile): + _, fname = os.path.split(infile) + return fname.split('.')[0] + + def _get_numcons(self): + numtcons = 0 + numfcons = 0 + if isdefined(self.inputs.tcon_file): + fp = open(self.inputs.tcon_file, 'rt') + for line in fp.readlines(): + if line.startswith('/NumContrasts'): + numtcons = int(line.split()[-1]) + break + fp.close() + if isdefined(self.inputs.fcon_file): + fp = open(self.inputs.fcon_file, 'rt') + for line in fp.readlines(): + if line.startswith('/NumContrasts'): + numfcons = int(line.split()[-1]) + break + fp.close() + return numtcons, numfcons + + def _list_outputs(self): + outputs = self._outputs().get() + pth, _ = os.path.split(self.inputs.sigmasquareds) + numtcons, numfcons = self._get_numcons() + base_contrast = 1 + if isdefined(self.inputs.contrast_num): + base_contrast = self.inputs.contrast_num + copes = [] + varcopes = [] + zstats = [] + tstats = [] + neffs = [] + for i in range(numtcons): + copes.append( + self._gen_fname('cope%d.nii' % (base_contrast + i), cwd=pth)) + varcopes.append( + self._gen_fname( + 'varcope%d.nii' % (base_contrast + i), cwd=pth)) + zstats.append( + self._gen_fname('zstat%d.nii' % (base_contrast + i), cwd=pth)) + tstats.append( + self._gen_fname('tstat%d.nii' % (base_contrast + i), cwd=pth)) + neffs.append( + self._gen_fname('neff%d.nii' % (base_contrast + i), cwd=pth)) + if copes: + outputs['copes'] = copes + outputs['varcopes'] = varcopes + outputs['zstats'] = zstats + outputs['tstats'] = tstats + outputs['neffs'] = neffs + fstats = [] + zfstats = [] + for i in range(numfcons): + fstats.append( + self._gen_fname('fstat%d.nii' % (base_contrast + i), cwd=pth)) + zfstats.append( + self._gen_fname('zfstat%d.nii' % (base_contrast + i), cwd=pth)) + if fstats: + outputs['fstats'] = fstats + outputs['zfstats'] = zfstats + return outputs + + +class L2ModelInputSpec(BaseInterfaceInputSpec): + num_copes = traits.Range( + low=1, mandatory=True, desc='number of copes to be combined') + + +class L2ModelOutputSpec(TraitedSpec): + design_mat = File(exists=True, desc='design matrix file') + design_con = File(exists=True, desc='design contrast file') + design_grp = File(exists=True, desc='design group file') + + +class L2Model(BaseInterface): + """Generate subject specific second level model + + Examples + -------- + + >>> from nipype.interfaces.fsl import L2Model + >>> model = L2Model(num_copes=3) # 3 sessions + + """ + + input_spec = L2ModelInputSpec + output_spec = L2ModelOutputSpec + + def _run_interface(self, runtime): + cwd = os.getcwd() + mat_txt = [ + '/NumWaves 1', '/NumPoints {:d}'.format(self.inputs.num_copes), + '/PPheights 1', '', '/Matrix' + ] + for i in range(self.inputs.num_copes): + mat_txt += ['1'] + mat_txt = '\n'.join(mat_txt) + + con_txt = [ + '/ContrastName1 group mean', + '/NumWaves 1', + '/NumContrasts 1', + '/PPheights 1', + '/RequiredEffect 100', # XX where does this + # number come from + '', + '/Matrix', + '1' + ] + con_txt = '\n'.join(con_txt) + + grp_txt = [ + '/NumWaves 1', '/NumPoints {:d}'.format(self.inputs.num_copes), + '', '/Matrix' + ] + for i in range(self.inputs.num_copes): + grp_txt += ['1'] + grp_txt = '\n'.join(grp_txt) + + txt = { + 'design.mat': mat_txt, + 'design.con': con_txt, + 'design.grp': grp_txt + } + + # write design files + for i, name in enumerate(['design.mat', 'design.con', 'design.grp']): + f = open(os.path.join(cwd, name), 'wt') + f.write(txt[name]) + f.close() + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + for field in list(outputs.keys()): + outputs[field] = os.path.join(os.getcwd(), field.replace('_', '.')) + return outputs + + +class MultipleRegressDesignInputSpec(BaseInterfaceInputSpec): + contrasts = traits.List( + traits.Either( + traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), + traits.List(traits.Float)), + traits.Tuple(traits.Str, traits.Enum('F'), + traits.List( + traits.Tuple(traits.Str, traits.Enum('T'), + traits.List(traits.Str), + traits.List(traits.Float)), ))), + mandatory=True, + desc="List of contrasts with each contrast being a list of the form - \ +[('name', 'stat', [condition list], [weight list])]. if \ +session list is None or not provided, all sessions are used. For F \ +contrasts, the condition list should contain previously defined \ +T-contrasts without any weight list.") + regressors = traits.Dict( + traits.Str, + traits.List(traits.Float), + mandatory=True, + desc=('dictionary containing named lists of ' + 'regressors')) + groups = traits.List( + traits.Int, + desc=('list of group identifiers (defaults to single ' + 'group)')) + + +class MultipleRegressDesignOutputSpec(TraitedSpec): + design_mat = File(exists=True, desc='design matrix file') + design_con = File(exists=True, desc='design t-contrast file') + design_fts = File(exists=True, desc='design f-contrast file') + design_grp = File(exists=True, desc='design group file') + + +class MultipleRegressDesign(BaseInterface): + """Generate multiple regression design + + .. note:: + FSL does not demean columns for higher level analysis. + + Please see `FSL documentation + `_ + for more details on model specification for higher level analysis. + + Examples + -------- + + >>> from nipype.interfaces.fsl import MultipleRegressDesign + >>> model = MultipleRegressDesign() + >>> model.inputs.contrasts = [['group mean', 'T',['reg1'],[1]]] + >>> model.inputs.regressors = dict(reg1=[1, 1, 1], reg2=[2.,-4, 3]) + >>> model.run() # doctest: +SKIP + + """ + + input_spec = MultipleRegressDesignInputSpec + output_spec = MultipleRegressDesignOutputSpec + + def _run_interface(self, runtime): + cwd = os.getcwd() + regs = sorted(self.inputs.regressors.keys()) + nwaves = len(regs) + npoints = len(self.inputs.regressors[regs[0]]) + ntcons = sum([1 for con in self.inputs.contrasts if con[1] == 'T']) + nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F']) + # write mat file + mat_txt = [ + '/NumWaves %d' % nwaves, + '/NumPoints %d' % npoints + ] + ppheights = [] + for reg in regs: + maxreg = np.max(self.inputs.regressors[reg]) + minreg = np.min(self.inputs.regressors[reg]) + if np.sign(maxreg) == np.sign(minreg): + regheight = max([abs(minreg), abs(maxreg)]) + else: + regheight = abs(maxreg - minreg) + ppheights.append('%e' % regheight) + mat_txt += ['/PPheights ' + ' '.join(ppheights)] + mat_txt += ['', '/Matrix'] + for cidx in range(npoints): + mat_txt.append(' '.join( + ['%e' % self.inputs.regressors[key][cidx] for key in regs])) + mat_txt = '\n'.join(mat_txt) + '\n' + # write t-con file + con_txt = [] + counter = 0 + tconmap = {} + for conidx, con in enumerate(self.inputs.contrasts): + if con[1] == 'T': + tconmap[conidx] = counter + counter += 1 + con_txt += ['/ContrastName%d %s' % (counter, con[0])] + con_txt += [ + '/NumWaves %d' % nwaves, + '/NumContrasts %d' % ntcons, + '/PPheights %s' % ' '.join( + ['%e' % 1 for i in range(counter)]), + '/RequiredEffect %s' % ' '.join( + ['%.3f' % 100 for i in range(counter)]), '', '/Matrix' + ] + for idx in sorted(tconmap.keys()): + convals = np.zeros((nwaves, 1)) + for regidx, reg in enumerate(self.inputs.contrasts[idx][2]): + convals[regs.index(reg)] = self.inputs.contrasts[idx][3][ + regidx] + con_txt.append(' '.join(['%e' % val for val in convals])) + con_txt = '\n'.join(con_txt) + '\n' + # write f-con file + fcon_txt = '' + if nfcons: + fcon_txt = [ + '/NumWaves %d' % ntcons, + '/NumContrasts %d' % nfcons, '', '/Matrix' + ] + for conidx, con in enumerate(self.inputs.contrasts): + if con[1] == 'F': + convals = np.zeros((ntcons, 1)) + for tcon in con[2]: + convals[tconmap[self.inputs.contrasts.index(tcon)]] = 1 + fcon_txt.append(' '.join(['%d' % val for val in convals])) + fcon_txt = '\n'.join(fcon_txt) + fcon_txt += '\n' + # write group file + grp_txt = [ + '/NumWaves 1', + '/NumPoints %d' % npoints, '', '/Matrix' + ] + for i in range(npoints): + if isdefined(self.inputs.groups): + grp_txt += ['%d' % self.inputs.groups[i]] + else: + grp_txt += ['1'] + grp_txt = '\n'.join(grp_txt) + '\n' + + txt = { + 'design.mat': mat_txt, + 'design.con': con_txt, + 'design.fts': fcon_txt, + 'design.grp': grp_txt + } + + # write design files + for key, val in list(txt.items()): + if ('fts' in key) and (nfcons == 0): + continue + filename = key.replace('_', '.') + f = open(os.path.join(cwd, filename), 'wt') + f.write(val) + f.close() + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F']) + for field in list(outputs.keys()): + if ('fts' in field) and (nfcons == 0): + continue + outputs[field] = os.path.join(os.getcwd(), field.replace('_', '.')) + return outputs + + +class SMMInputSpec(FSLCommandInputSpec): + spatial_data_file = File( + exists=True, + position=0, + argstr='--sdf="%s"', + mandatory=True, + desc="statistics spatial map", + copyfile=False) + mask = File( + exists=True, + position=1, + argstr='--mask="%s"', + mandatory=True, + desc="mask file", + copyfile=False) + no_deactivation_class = traits.Bool( + position=2, + argstr="--zfstatmode", + desc="enforces no deactivation class") + + +class SMMOutputSpec(TraitedSpec): + null_p_map = File(exists=True) + activation_p_map = File(exists=True) + deactivation_p_map = File(exists=True) + + +class SMM(FSLCommand): + ''' + Spatial Mixture Modelling. For more detail on the spatial mixture modelling + see Mixture Models with Adaptive Spatial Regularisation for Segmentation + with an Application to FMRI Data; Woolrich, M., Behrens, T., Beckmann, C., + and Smith, S.; IEEE Trans. Medical Imaging, 24(1):1-11, 2005. + ''' + _cmd = 'mm --ld=logdir' + input_spec = SMMInputSpec + output_spec = SMMOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + # TODO get the true logdir from the stdout + outputs['null_p_map'] = self._gen_fname( + basename="w1_mean", cwd="logdir") + outputs['activation_p_map'] = self._gen_fname( + basename="w2_mean", cwd="logdir") + if (not isdefined(self.inputs.no_deactivation_class) + or not self.inputs.no_deactivation_class): + outputs['deactivation_p_map'] = self._gen_fname( + basename="w3_mean", cwd="logdir") + return outputs + + +class MELODICInputSpec(FSLCommandInputSpec): + in_files = InputMultiPath( + File(exists=True), + argstr="-i %s", + mandatory=True, + position=0, + desc="input file names (either single file name or a list)", + sep=",") + out_dir = Directory( + argstr="-o %s", desc="output directory name", genfile=True) + mask = File( + exists=True, argstr="-m %s", desc="file name of mask for thresholding") + no_mask = traits.Bool(argstr="--nomask", desc="switch off masking") + update_mask = traits.Bool( + argstr="--update_mask", desc="switch off mask updating") + no_bet = traits.Bool(argstr="--nobet", desc="switch off BET") + bg_threshold = traits.Float( + argstr="--bgthreshold=%f", + desc=("brain/non-brain threshold used to mask non-brain voxels, as a " + "percentage (only if --nobet selected)")) + dim = traits.Int( + argstr="-d %d", + desc=("dimensionality reduction into #num dimensions (default: " + "automatic estimation)")) + dim_est = traits.Str( + argstr="--dimest=%s", + desc=("use specific dim. estimation technique: lap, " + "bic, mdl, aic, mean (default: lap)")) + sep_whiten = traits.Bool( + argstr="--sep_whiten", desc="switch on separate whitening") + sep_vn = traits.Bool( + argstr="--sep_vn", desc="switch off joined variance normalization") + migp = traits.Bool(argstr="--migp", desc="switch on MIGP data reduction") + migpN = traits.Int( + argstr="--migpN %d", desc="number of internal Eigenmaps") + migp_shuffle = traits.Bool( + argstr="--migp_shuffle", + desc="randomise MIGP file order (default: TRUE)") + migp_factor = traits.Int( + argstr="--migp_factor %d", + desc= + "Internal Factor of mem-threshold relative to number of Eigenmaps (default: 2)" + ) + num_ICs = traits.Int( + argstr="-n %d", + desc="number of IC's to extract (for deflation approach)") + approach = traits.Str( + argstr="-a %s", + desc="approach for decomposition, 2D: defl, symm (default), 3D: tica " + "(default), concat") + non_linearity = traits.Str( + argstr="--nl=%s", desc="nonlinearity: gauss, tanh, pow3, pow4") + var_norm = traits.Bool( + argstr="--vn", desc="switch off variance normalization") + pbsc = traits.Bool( + argstr="--pbsc", + desc="switch off conversion to percent BOLD signal change") + cov_weight = traits.Float( + argstr="--covarweight=%f", + desc=("voxel-wise weights for the covariance matrix (e.g. " + "segmentation information)")) + epsilon = traits.Float(argstr="--eps=%f", desc="minimum error change") + epsilonS = traits.Float( + argstr="--epsS=%f", + desc="minimum error change for rank-1 approximation in TICA") + maxit = traits.Int( + argstr="--maxit=%d", + desc="maximum number of iterations before restart") + max_restart = traits.Int( + argstr="--maxrestart=%d", desc="maximum number of restarts") + mm_thresh = traits.Float( + argstr="--mmthresh=%f", + desc="threshold for Mixture Model based inference") + no_mm = traits.Bool( + argstr="--no_mm", desc="switch off mixture modelling on IC maps") + ICs = File( + exists=True, + argstr="--ICs=%s", + desc="filename of the IC components file for mixture modelling") + mix = File( + exists=True, + argstr="--mix=%s", + desc="mixing matrix for mixture modelling / filtering") + smode = File( + exists=True, + argstr="--smode=%s", + desc="matrix of session modes for report generation") + rem_cmp = traits.List( + traits.Int, argstr="-f %d", desc="component numbers to remove") + report = traits.Bool(argstr="--report", desc="generate Melodic web report") + bg_image = File( + exists=True, + argstr="--bgimage=%s", + desc="specify background image for report (default: mean image)") + tr_sec = traits.Float(argstr="--tr=%f", desc="TR in seconds") + log_power = traits.Bool( + argstr="--logPower", + desc="calculate log of power for frequency spectrum") + t_des = File( + exists=True, + argstr="--Tdes=%s", + desc="design matrix across time-domain") + t_con = File( + exists=True, + argstr="--Tcon=%s", + desc="t-contrast matrix across time-domain") + s_des = File( + exists=True, + argstr="--Sdes=%s", + desc="design matrix across subject-domain") + s_con = File( + exists=True, + argstr="--Scon=%s", + desc="t-contrast matrix across subject-domain") + out_all = traits.Bool(argstr="--Oall", desc="output everything") + out_unmix = traits.Bool(argstr="--Ounmix", desc="output unmixing matrix") + out_stats = traits.Bool( + argstr="--Ostats", desc="output thresholded maps and probability maps") + out_pca = traits.Bool(argstr="--Opca", desc="output PCA results") + out_white = traits.Bool( + argstr="--Owhite", desc="output whitening/dewhitening matrices") + out_orig = traits.Bool(argstr="--Oorig", desc="output the original ICs") + out_mean = traits.Bool(argstr="--Omean", desc="output mean volume") + report_maps = traits.Str( + argstr="--report_maps=%s", + desc="control string for spatial map images (see slicer)") + remove_deriv = traits.Bool( + argstr="--remove_deriv", + desc="removes every second entry in paradigm file (EV derivatives)") + + +class MELODICOutputSpec(TraitedSpec): + out_dir = Directory(exists=True) + report_dir = Directory(exists=True) + + +class MELODIC(FSLCommand): + """Multivariate Exploratory Linear Optimised Decomposition into Independent + Components + + Examples + -------- + + >>> melodic_setup = MELODIC() + >>> melodic_setup.inputs.approach = 'tica' + >>> melodic_setup.inputs.in_files = ['functional.nii', 'functional2.nii', 'functional3.nii'] + >>> melodic_setup.inputs.no_bet = True + >>> melodic_setup.inputs.bg_threshold = 10 + >>> melodic_setup.inputs.tr_sec = 1.5 + >>> melodic_setup.inputs.mm_thresh = 0.5 + >>> melodic_setup.inputs.out_stats = True + >>> melodic_setup.inputs.t_des = 'timeDesign.mat' + >>> melodic_setup.inputs.t_con = 'timeDesign.con' + >>> melodic_setup.inputs.s_des = 'subjectDesign.mat' + >>> melodic_setup.inputs.s_con = 'subjectDesign.con' + >>> melodic_setup.inputs.out_dir = 'groupICA.out' + >>> melodic_setup.cmdline + 'melodic -i functional.nii,functional2.nii,functional3.nii -a tica --bgthreshold=10.000000 --mmthresh=0.500000 --nobet -o groupICA.out --Ostats --Scon=subjectDesign.con --Sdes=subjectDesign.mat --Tcon=timeDesign.con --Tdes=timeDesign.mat --tr=1.500000' + >>> melodic_setup.run() # doctest: +SKIP + + + """ + input_spec = MELODICInputSpec + output_spec = MELODICOutputSpec + _cmd = 'melodic' + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.out_dir): + outputs['out_dir'] = os.path.abspath(self.inputs.out_dir) + else: + outputs['out_dir'] = self._gen_filename("out_dir") + if isdefined(self.inputs.report) and self.inputs.report: + outputs['report_dir'] = os.path.join(outputs['out_dir'], "report") + return outputs + + def _gen_filename(self, name): + if name == "out_dir": + return os.getcwd() + + +class SmoothEstimateInputSpec(FSLCommandInputSpec): + dof = traits.Int( + argstr='--dof=%d', + mandatory=True, + xor=['zstat_file'], + desc='number of degrees of freedom') + mask_file = File( + argstr='--mask=%s', + exists=True, + mandatory=True, + desc='brain mask volume') + residual_fit_file = File( + argstr='--res=%s', + exists=True, + requires=['dof'], + desc='residual-fit image file') + zstat_file = File( + argstr='--zstat=%s', exists=True, xor=['dof'], desc='zstat image file') + + +class SmoothEstimateOutputSpec(TraitedSpec): + dlh = traits.Float(desc='smoothness estimate sqrt(det(Lambda))') + volume = traits.Int(desc='number of voxels in mask') + resels = traits.Float(desc='number of resels') + + +class SmoothEstimate(FSLCommand): + """ Estimates the smoothness of an image + + Examples + -------- + + >>> est = SmoothEstimate() + >>> est.inputs.zstat_file = 'zstat1.nii.gz' + >>> est.inputs.mask_file = 'mask.nii' + >>> est.cmdline + 'smoothest --mask=mask.nii --zstat=zstat1.nii.gz' + + """ + + input_spec = SmoothEstimateInputSpec + output_spec = SmoothEstimateOutputSpec + _cmd = 'smoothest' + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + outputs = self._outputs() + stdout = runtime.stdout.split('\n') + outputs.dlh = float(stdout[0].split()[1]) + outputs.volume = int(stdout[1].split()[1]) + outputs.resels = float(stdout[2].split()[1]) + return outputs + + +class ClusterInputSpec(FSLCommandInputSpec): + in_file = File( + argstr='--in=%s', mandatory=True, exists=True, desc='input volume') + threshold = traits.Float( + argstr='--thresh=%.10f', + mandatory=True, + desc='threshold for input volume') + out_index_file = traits.Either( + traits.Bool, + File, + argstr='--oindex=%s', + desc='output of cluster index (in size order)', + hash_files=False) + out_threshold_file = traits.Either( + traits.Bool, + File, + argstr='--othresh=%s', + desc='thresholded image', + hash_files=False) + out_localmax_txt_file = traits.Either( + traits.Bool, + File, + argstr='--olmax=%s', + desc='local maxima text file', + hash_files=False) + out_localmax_vol_file = traits.Either( + traits.Bool, + File, + argstr='--olmaxim=%s', + desc='output of local maxima volume', + hash_files=False) + out_size_file = traits.Either( + traits.Bool, + File, + argstr='--osize=%s', + desc='filename for output of size image', + hash_files=False) + out_max_file = traits.Either( + traits.Bool, + File, + argstr='--omax=%s', + desc='filename for output of max image', + hash_files=False) + out_mean_file = traits.Either( + traits.Bool, + File, + argstr='--omean=%s', + desc='filename for output of mean image', + hash_files=False) + out_pval_file = traits.Either( + traits.Bool, + File, + argstr='--opvals=%s', + desc='filename for image output of log pvals', + hash_files=False) + pthreshold = traits.Float( + argstr='--pthresh=%.10f', + requires=['dlh', 'volume'], + desc='p-threshold for clusters') + peak_distance = traits.Float( + argstr='--peakdist=%.10f', + desc='minimum distance between local maxima/minima, in mm (default 0)') + cope_file = traits.File(argstr='--cope=%s', desc='cope volume') + volume = traits.Int( + argstr='--volume=%d', desc='number of voxels in the mask') + dlh = traits.Float( + argstr='--dlh=%.10f', desc='smoothness estimate = sqrt(det(Lambda))') + fractional = traits.Bool( + False, + usedefault=True, + argstr='--fractional', + desc='interprets the threshold as a fraction of the robust range') + connectivity = traits.Int( + argstr='--connectivity=%d', + desc='the connectivity of voxels (default 26)') + use_mm = traits.Bool( + False, + usedefault=True, + argstr='--mm', + desc='use mm, not voxel, coordinates') + find_min = traits.Bool( + False, + usedefault=True, + argstr='--min', + desc='find minima instead of maxima') + no_table = traits.Bool( + False, + usedefault=True, + argstr='--no_table', + desc='suppresses printing of the table info') + minclustersize = traits.Bool( + False, + usedefault=True, + argstr='--minclustersize', + desc='prints out minimum significant cluster size') + xfm_file = File( + argstr='--xfm=%s', + desc=('filename for Linear: input->standard-space ' + 'transform. Non-linear: input->highres transform')) + std_space_file = File( + argstr='--stdvol=%s', desc='filename for standard-space volume') + num_maxima = traits.Int( + argstr='--num=%d', desc='no of local maxima to report') + warpfield_file = File( + argstr='--warpvol=%s', desc='file contining warpfield') + + +class ClusterOutputSpec(TraitedSpec): + index_file = File(desc='output of cluster index (in size order)') + threshold_file = File(desc='thresholded image') + localmax_txt_file = File(desc='local maxima text file') + localmax_vol_file = File(desc='output of local maxima volume') + size_file = File(desc='filename for output of size image') + max_file = File(desc='filename for output of max image') + mean_file = File(desc='filename for output of mean image') + pval_file = File(desc='filename for image output of log pvals') + + +class Cluster(FSLCommand): + """ Uses FSL cluster to perform clustering on statistical output + + Examples + -------- + + >>> cl = Cluster() + >>> cl.inputs.threshold = 2.3 + >>> cl.inputs.in_file = 'zstat1.nii.gz' + >>> cl.inputs.out_localmax_txt_file = 'stats.txt' + >>> cl.inputs.use_mm = True + >>> cl.cmdline + 'cluster --in=zstat1.nii.gz --olmax=stats.txt --thresh=2.3000000000 --mm' + + """ + input_spec = ClusterInputSpec + output_spec = ClusterOutputSpec + _cmd = 'cluster' + + filemap = { + 'out_index_file': 'index', + 'out_threshold_file': 'threshold', + 'out_localmax_txt_file': 'localmax.txt', + 'out_localmax_vol_file': 'localmax', + 'out_size_file': 'size', + 'out_max_file': 'max', + 'out_mean_file': 'mean', + 'out_pval_file': 'pval' + } + + def _list_outputs(self): + outputs = self.output_spec().get() + for key, suffix in list(self.filemap.items()): + outkey = key[4:] + inval = getattr(self.inputs, key) + if isdefined(inval): + if isinstance(inval, bool): + if inval: + change_ext = True + if suffix.endswith('.txt'): + change_ext = False + outputs[outkey] = self._gen_fname( + self.inputs.in_file, + suffix='_' + suffix, + change_ext=change_ext) + else: + outputs[outkey] = os.path.abspath(inval) + return outputs + + def _format_arg(self, name, spec, value): + if name in list(self.filemap.keys()): + if isinstance(value, bool): + fname = self._list_outputs()[name[4:]] + else: + fname = value + return spec.argstr % fname + return super(Cluster, self)._format_arg(name, spec, value) + + +class DualRegressionInputSpec(FSLCommandInputSpec): + in_files = InputMultiPath( + File(exists=True), + argstr="%s", + mandatory=True, + position=-1, + sep=" ", + desc="List all subjects' preprocessed, standard-space 4D datasets", + ) + group_IC_maps_4D = File( + exists=True, + argstr="%s", + mandatory=True, + position=1, + desc="4D image containing spatial IC maps (melodic_IC) from the " + "whole-group ICA analysis") + des_norm = traits.Bool( + True, + argstr="%i", + position=2, + usedefault=True, + desc="Whether to variance-normalise the timecourses used as the " + "stage-2 regressors; True is default and recommended") + one_sample_group_mean = traits.Bool( + argstr="-1", + position=3, + desc="perform 1-sample group-mean test instead of generic " + "permutation test") + design_file = File( + exists=True, + argstr="%s", + position=3, + desc="Design matrix for final cross-subject modelling with " + "randomise") + con_file = File( + exists=True, + argstr="%s", + position=4, + desc="Design contrasts for final cross-subject modelling with " + "randomise") + n_perm = traits.Int( + argstr="%i", + mandatory=True, + position=5, + desc="Number of permutations for randomise; set to 1 for just raw " + "tstat output, set to 0 to not run randomise at all.") + out_dir = Directory( + "output", + argstr="%s", + usedefault=True, + position=6, + desc="This directory will be created to hold all output and logfiles", + genfile=True) + + +class DualRegressionOutputSpec(TraitedSpec): + out_dir = Directory(exists=True) + + +class DualRegression(FSLCommand): + """Wrapper Script for Dual Regression Workflow + + Examples + -------- + + >>> dual_regression = DualRegression() + >>> dual_regression.inputs.in_files = ["functional.nii", "functional2.nii", "functional3.nii"] + >>> dual_regression.inputs.group_IC_maps_4D = "allFA.nii" + >>> dual_regression.inputs.des_norm = False + >>> dual_regression.inputs.one_sample_group_mean = True + >>> dual_regression.inputs.n_perm = 10 + >>> dual_regression.inputs.out_dir = "my_output_directory" + >>> dual_regression.cmdline + 'dual_regression allFA.nii 0 -1 10 my_output_directory functional.nii functional2.nii functional3.nii' + >>> dual_regression.run() # doctest: +SKIP + + """ + input_spec = DualRegressionInputSpec + output_spec = DualRegressionOutputSpec + _cmd = 'dual_regression' + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.out_dir): + outputs['out_dir'] = os.path.abspath(self.inputs.out_dir) + else: + outputs['out_dir'] = self._gen_filename("out_dir") + return outputs + + def _gen_filename(self, name): + if name == "out_dir": + return os.getcwd() + + +class RandomiseInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + desc='4D input file', + argstr='-i %s', + position=0, + mandatory=True) + base_name = traits.Str( + 'randomise', + desc='the rootname that all generated files will have', + argstr='-o "%s"', + position=1, + usedefault=True) + design_mat = File( + exists=True, desc='design matrix file', argstr='-d %s', position=2) + tcon = File( + exists=True, desc='t contrasts file', argstr='-t %s', position=3) + fcon = File(exists=True, desc='f contrasts file', argstr='-f %s') + mask = File(exists=True, desc='mask image', argstr='-m %s') + x_block_labels = File( + exists=True, desc='exchangeability block labels file', argstr='-e %s') + demean = traits.Bool( + desc='demean data temporally before model fitting', argstr='-D') + one_sample_group_mean = traits.Bool( + desc=('perform 1-sample group-mean test instead of generic ' + 'permutation test'), + argstr='-1') + show_total_perms = traits.Bool( + desc=('print out how many unique permutations would be generated ' + 'and exit'), + argstr='-q') + show_info_parallel_mode = traits.Bool( + desc='print out information required for parallel mode and exit', + argstr='-Q') + vox_p_values = traits.Bool( + desc='output voxelwise (corrected and uncorrected) p-value images', + argstr='-x') + tfce = traits.Bool( + desc='carry out Threshold-Free Cluster Enhancement', argstr='-T') + tfce2D = traits.Bool( + desc=('carry out Threshold-Free Cluster Enhancement with 2D ' + 'optimisation'), + argstr='--T2') + f_only = traits.Bool(desc='calculate f-statistics only', argstr='--f_only') + raw_stats_imgs = traits.Bool( + desc='output raw ( unpermuted ) statistic images', argstr='-R') + p_vec_n_dist_files = traits.Bool( + desc='output permutation vector and null distribution text files', + argstr='-P') + num_perm = traits.Int( + argstr='-n %d', + desc='number of permutations (default 5000, set to 0 for exhaustive)') + seed = traits.Int( + argstr='--seed=%d', + desc='specific integer seed for random number generator') + var_smooth = traits.Int( + argstr='-v %d', desc='use variance smoothing (std is in mm)') + c_thresh = traits.Float( + argstr='-c %.1f', desc='carry out cluster-based thresholding') + cm_thresh = traits.Float( + argstr='-C %.1f', desc='carry out cluster-mass-based thresholding') + f_c_thresh = traits.Float( + argstr='-F %.2f', desc='carry out f cluster thresholding') + f_cm_thresh = traits.Float( + argstr='-S %.2f', desc='carry out f cluster-mass thresholding') + tfce_H = traits.Float( + argstr='--tfce_H=%.2f', desc='TFCE height parameter (default=2)') + tfce_E = traits.Float( + argstr='--tfce_E=%.2f', desc='TFCE extent parameter (default=0.5)') + tfce_C = traits.Float( + argstr='--tfce_C=%.2f', desc='TFCE connectivity (6 or 26; default=6)') + + +class RandomiseOutputSpec(TraitedSpec): + tstat_files = traits.List( + File(exists=True), desc='t contrast raw statistic') + fstat_files = traits.List( + File(exists=True), desc='f contrast raw statistic') + t_p_files = traits.List( + File(exists=True), desc='f contrast uncorrected p values files') + f_p_files = traits.List( + File(exists=True), desc='f contrast uncorrected p values files') + t_corrected_p_files = traits.List( + File(exists=True), + desc='t contrast FWE (Family-wise error) corrected p values files') + f_corrected_p_files = traits.List( + File(exists=True), + desc='f contrast FWE (Family-wise error) corrected p values files') + + +class Randomise(FSLCommand): + """FSL Randomise: feeds the 4D projected FA data into GLM + modelling and thresholding + in order to find voxels which correlate with your model + + Example + ------- + >>> import nipype.interfaces.fsl as fsl + >>> rand = fsl.Randomise(in_file='allFA.nii', mask = 'mask.nii', tcon='design.con', design_mat='design.mat') + >>> rand.cmdline + 'randomise -i allFA.nii -o "randomise" -d design.mat -t design.con -m mask.nii' + + """ + + _cmd = 'randomise' + input_spec = RandomiseInputSpec + output_spec = RandomiseOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['tstat_files'] = glob( + self._gen_fname('%s_tstat*.nii' % self.inputs.base_name)) + outputs['fstat_files'] = glob( + self._gen_fname('%s_fstat*.nii' % self.inputs.base_name)) + prefix = False + if self.inputs.tfce or self.inputs.tfce2D: + prefix = 'tfce' + elif self.inputs.vox_p_values: + prefix = 'vox' + elif self.inputs.c_thresh or self.inputs.f_c_thresh: + prefix = 'clustere' + elif self.inputs.cm_thresh or self.inputs.f_cm_thresh: + prefix = 'clusterm' + if prefix: + outputs['t_p_files'] = glob( + self._gen_fname('%s_%s_p_tstat*' % (self.inputs.base_name, + prefix))) + outputs['t_corrected_p_files'] = glob( + self._gen_fname('%s_%s_corrp_tstat*.nii' % + (self.inputs.base_name, prefix))) + + outputs['f_p_files'] = glob( + self._gen_fname('%s_%s_p_fstat*.nii' % (self.inputs.base_name, + prefix))) + outputs['f_corrected_p_files'] = glob( + self._gen_fname('%s_%s_corrp_fstat*.nii' % + (self.inputs.base_name, prefix))) + return outputs + + +class GLMInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + argstr='-i %s', + mandatory=True, + position=1, + desc='input file name (text matrix or 3D/4D image file)') + out_file = File( + name_template="%s_glm", + argstr='-o %s', + position=3, + desc=('filename for GLM parameter estimates' + ' (GLM betas)'), + name_source="in_file", + keep_extension=True) + design = File( + exists=True, + argstr='-d %s', + mandatory=True, + position=2, + desc=('file name of the GLM design matrix (text time' + + ' courses for temporal regression or an image' + + ' file for spatial regression)')) + contrasts = File( + exists=True, argstr='-c %s', desc=('matrix of t-statics contrasts')) + mask = File( + exists=True, + argstr='-m %s', + desc=('mask image file name if input is image')) + dof = traits.Int( + argstr='--dof=%d', desc=('set degrees of freedom' + ' explicitly')) + des_norm = traits.Bool( + argstr='--des_norm', + desc=('switch on normalization of the design' + + ' matrix columns to unit std deviation')) + dat_norm = traits.Bool( + argstr='--dat_norm', + desc=('switch on normalization of the data time series to unit std ' + 'deviation')) + var_norm = traits.Bool( + argstr='--vn', desc=('perform MELODIC variance-normalisation on data')) + demean = traits.Bool( + argstr='--demean', desc=('switch on demeaining of design and data')) + out_cope = File( + argstr='--out_cope=%s', + desc='output file name for COPE (either as txt or image') + out_z_name = File( + argstr='--out_z=%s', + desc='output file name for Z-stats (either as txt or image') + out_t_name = File( + argstr='--out_t=%s', + desc='output file name for t-stats (either as txt or image') + out_p_name = File( + argstr='--out_p=%s', + desc=('output file name for p-values of Z-stats (either as text file ' + 'or image)')) + out_f_name = File( + argstr='--out_f=%s', + desc='output file name for F-value of full model fit') + out_pf_name = File( + argstr='--out_pf=%s', + desc='output file name for p-value for full model fit') + out_res_name = File( + argstr='--out_res=%s', desc='output file name for residuals') + out_varcb_name = File( + argstr='--out_varcb=%s', desc='output file name for variance of COPEs') + out_sigsq_name = File( + argstr='--out_sigsq=%s', + desc=('output file name for residual noise variance sigma-square')) + out_data_name = File( + argstr='--out_data=%s', desc='output file name for pre-processed data') + out_vnscales_name = File( + argstr='--out_vnscales=%s', + desc=('output file name for scaling factors for variance ' + 'normalisation')) + + +class GLMOutputSpec(TraitedSpec): + out_file = File( + exists=True, desc=('file name of GLM parameters (if generated)')) + out_cope = OutputMultiPath( + File(exists=True), + desc=('output file name for COPEs (either as text file or image)')) + out_z = OutputMultiPath( + File(exists=True), + desc=('output file name for COPEs (either as text file or image)')) + out_t = OutputMultiPath( + File(exists=True), + desc=('output file name for t-stats (either as text file or image)')) + out_p = OutputMultiPath( + File(exists=True), + desc=('output file name for p-values of Z-stats (either as text file ' + 'or image)')) + out_f = OutputMultiPath( + File(exists=True), + desc=('output file name for F-value of full model fit')) + out_pf = OutputMultiPath( + File(exists=True), + desc=('output file name for p-value for full model fit')) + out_res = OutputMultiPath( + File(exists=True), desc='output file name for residuals') + out_varcb = OutputMultiPath( + File(exists=True), desc='output file name for variance of COPEs') + out_sigsq = OutputMultiPath( + File(exists=True), + desc=('output file name for residual noise variance sigma-square')) + out_data = OutputMultiPath( + File(exists=True), desc='output file for preprocessed data') + out_vnscales = OutputMultiPath( + File(exists=True), + desc=('output file name for scaling factors for variance ' + 'normalisation')) + + +class GLM(FSLCommand): + """ + FSL GLM: + + Example + ------- + >>> import nipype.interfaces.fsl as fsl + >>> glm = fsl.GLM(in_file='functional.nii', design='maps.nii', output_type='NIFTI') + >>> glm.cmdline + 'fsl_glm -i functional.nii -d maps.nii -o functional_glm.nii' + + """ + _cmd = 'fsl_glm' + input_spec = GLMInputSpec + output_spec = GLMOutputSpec + + def _list_outputs(self): + outputs = super(GLM, self)._list_outputs() + + if isdefined(self.inputs.out_cope): + outputs['out_cope'] = os.path.abspath(self.inputs.out_cope) + + if isdefined(self.inputs.out_z_name): + outputs['out_z'] = os.path.abspath(self.inputs.out_z_name) + + if isdefined(self.inputs.out_t_name): + outputs['out_t'] = os.path.abspath(self.inputs.out_t_name) + + if isdefined(self.inputs.out_p_name): + outputs['out_p'] = os.path.abspath(self.inputs.out_p_name) + + if isdefined(self.inputs.out_f_name): + outputs['out_f'] = os.path.abspath(self.inputs.out_f_name) + + if isdefined(self.inputs.out_pf_name): + outputs['out_pf'] = os.path.abspath(self.inputs.out_pf_name) + + if isdefined(self.inputs.out_res_name): + outputs['out_res'] = os.path.abspath(self.inputs.out_res_name) + + if isdefined(self.inputs.out_varcb_name): + outputs['out_varcb'] = os.path.abspath(self.inputs.out_varcb_name) + + if isdefined(self.inputs.out_sigsq_name): + outputs['out_sigsq'] = os.path.abspath(self.inputs.out_sigsq_name) + + if isdefined(self.inputs.out_data_name): + outputs['out_data'] = os.path.abspath(self.inputs.out_data_name) + + if isdefined(self.inputs.out_vnscales_name): + outputs['out_vnscales'] = os.path.abspath( + self.inputs.out_vnscales_name) + + return outputs + + +def load_template(name): + """Load a template from the model_templates directory + + Parameters + ---------- + name : str + The name of the file to load + + Returns + ------- + template : string.Template + + """ + from pkg_resources import resource_filename as pkgrf + full_fname = pkgrf('nipype', + os.path.join('interfaces', 'fsl', 'model_templates', + name)) + with open(full_fname) as template_file: + template = Template(template_file.read()) + + return template diff --git a/nipype/interfaces/fsl/model_templates/feat_contrast_element.tcl b/nipype/interfaces/fsl/model_templates/feat_contrast_element.tcl new file mode 100644 index 0000000000..a246206cdf --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_contrast_element.tcl @@ -0,0 +1,2 @@ +# Real contrast_$ctype vector $cnum element $element +set fmri(con_$ctype$cnum.$element) $val diff --git a/nipype/interfaces/fsl/model_templates/feat_contrast_ftest_element.tcl b/nipype/interfaces/fsl/model_templates/feat_contrast_ftest_element.tcl new file mode 100644 index 0000000000..1d24e81dd2 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_contrast_ftest_element.tcl @@ -0,0 +1,2 @@ +# F-test $cnum element $element +set fmri(ftest_$ctype$cnum.$element) $val diff --git a/nipype/interfaces/fsl/model_templates/feat_contrast_header.tcl b/nipype/interfaces/fsl/model_templates/feat_contrast_header.tcl new file mode 100644 index 0000000000..42efa4d521 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_contrast_header.tcl @@ -0,0 +1,5 @@ +# Contrast & F-tests mode +# real : control real EVs +# orig : control original EVs +set fmri(con_mode_old) orig +set fmri(con_mode) orig diff --git a/nipype/interfaces/fsl/model_templates/feat_contrast_prolog.tcl b/nipype/interfaces/fsl/model_templates/feat_contrast_prolog.tcl new file mode 100644 index 0000000000..1a8edc00c5 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_contrast_prolog.tcl @@ -0,0 +1,5 @@ +# Display images for contrast_$ctype $cnum +set fmri(conpic_$ctype.$cnum) 1 + +# Title for contrast_$ctype $cnum +set fmri(conname_$ctype.$cnum) "$cname" diff --git a/nipype/interfaces/fsl/model_templates/feat_contrastmask_element.tcl b/nipype/interfaces/fsl/model_templates/feat_contrastmask_element.tcl new file mode 100644 index 0000000000..bde4f85479 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_contrastmask_element.tcl @@ -0,0 +1,3 @@ +# Mask real contrast/F-test $c1 with real contrast/F-test $c2? +set fmri(conmask${c1}_${c2}) 0 + diff --git a/nipype/interfaces/fsl/model_templates/feat_contrastmask_footer.tcl b/nipype/interfaces/fsl/model_templates/feat_contrastmask_footer.tcl new file mode 100644 index 0000000000..3c689901f9 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_contrastmask_footer.tcl @@ -0,0 +1,2 @@ +# Do contrast masking at all? +set fmri(conmask1_1) 0 diff --git a/nipype/interfaces/fsl/model_templates/feat_contrastmask_header.tcl b/nipype/interfaces/fsl/model_templates/feat_contrastmask_header.tcl new file mode 100644 index 0000000000..debd522273 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_contrastmask_header.tcl @@ -0,0 +1,2 @@ +# Contrast masking - use >0 instead of thresholding? +set fmri(conmask_zerothresh_yn) 0 diff --git a/nipype/interfaces/fsl/model_templates/feat_contrasts.tcl b/nipype/interfaces/fsl/model_templates/feat_contrasts.tcl new file mode 100644 index 0000000000..0d78360d57 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_contrasts.tcl @@ -0,0 +1,688 @@ +# Contrast & F-tests mode +# real : control real EVs +# orig : control original EVs +set fmri(con_mode_old) orig +set fmri(con_mode) orig + +### Needs iteration + +# Display images for contrast_real 1 +set fmri(conpic_real.1) 1 + +# Title for contrast_real 1 +set fmri(conname_real.1) "left>right" + +# Real contrast_real vector 1 element 1 +set fmri(con_real1.1) 1 + +# Real contrast_real vector 1 element 2 +set fmri(con_real1.2) -1.0 + +# Real contrast_real vector 1 element 3 +set fmri(con_real1.3) 1.0 + +# Real contrast_real vector 1 element 4 +set fmri(con_real1.4) -1.0 + +# Real contrast_real vector 1 element 5 +set fmri(con_real1.5) 1.0 + +# Real contrast_real vector 1 element 6 +set fmri(con_real1.6) -1.0 + +# Real contrast_real vector 1 element 7 +set fmri(con_real1.7) 1.0 + +# Real contrast_real vector 1 element 8 +set fmri(con_real1.8) -1.0 + +# Display images for contrast_real 2 +set fmri(conpic_real.2) 1 + +# Title for contrast_real 2 +set fmri(conname_real.2) "visual>vibe" + +# Real contrast_real vector 2 element 1 +set fmri(con_real2.1) -1.0 + +# Real contrast_real vector 2 element 2 +set fmri(con_real2.2) -1.0 + +# Real contrast_real vector 2 element 3 +set fmri(con_real2.3) -1.0 + +# Real contrast_real vector 2 element 4 +set fmri(con_real2.4) -1.0 + +# Real contrast_real vector 2 element 5 +set fmri(con_real2.5) 1.0 + +# Real contrast_real vector 2 element 6 +set fmri(con_real2.6) 1.0 + +# Real contrast_real vector 2 element 7 +set fmri(con_real2.7) 1.0 + +# Real contrast_real vector 2 element 8 +set fmri(con_real2.8) 1.0 + +# Display images for contrast_real 3 +set fmri(conpic_real.3) 1 + +# Title for contrast_real 3 +set fmri(conname_real.3) "seq>all" + +# Real contrast_real vector 3 element 1 +set fmri(con_real3.1) -1.0 + +# Real contrast_real vector 3 element 2 +set fmri(con_real3.2) -1.0 + +# Real contrast_real vector 3 element 3 +set fmri(con_real3.3) 1.0 + +# Real contrast_real vector 3 element 4 +set fmri(con_real3.4) 1.0 + +# Real contrast_real vector 3 element 5 +set fmri(con_real3.5) -1.0 + +# Real contrast_real vector 3 element 6 +set fmri(con_real3.6) -1.0 + +# Real contrast_real vector 3 element 7 +set fmri(con_real3.7) 1.0 + +# Real contrast_real vector 3 element 8 +set fmri(con_real3.8) 1.0 + +# Display images for contrast_real 4 +set fmri(conpic_real.4) 1 + +# Title for contrast_real 4 +set fmri(conname_real.4) "visual seq>all" + +# Real contrast_real vector 4 element 1 +set fmri(con_real4.1) 0 + +# Real contrast_real vector 4 element 2 +set fmri(con_real4.2) 0 + +# Real contrast_real vector 4 element 3 +set fmri(con_real4.3) 0 + +# Real contrast_real vector 4 element 4 +set fmri(con_real4.4) 0 + +# Real contrast_real vector 4 element 5 +set fmri(con_real4.5) -1.0 + +# Real contrast_real vector 4 element 6 +set fmri(con_real4.6) -1.0 + +# Real contrast_real vector 4 element 7 +set fmri(con_real4.7) 1.0 + +# Real contrast_real vector 4 element 8 +set fmri(con_real4.8) 1.0 + +# Display images for contrast_real 5 +set fmri(conpic_real.5) 1 + +# Title for contrast_real 5 +set fmri(conname_real.5) "vibe seq>all" + +# Real contrast_real vector 5 element 1 +set fmri(con_real5.1) -1.0 + +# Real contrast_real vector 5 element 2 +set fmri(con_real5.2) -1.0 + +# Real contrast_real vector 5 element 3 +set fmri(con_real5.3) 1.0 + +# Real contrast_real vector 5 element 4 +set fmri(con_real5.4) 1.0 + +# Real contrast_real vector 5 element 5 +set fmri(con_real5.5) 0 + +# Real contrast_real vector 5 element 6 +set fmri(con_real5.6) 0 + +# Real contrast_real vector 5 element 7 +set fmri(con_real5.7) 0 + +# Real contrast_real vector 5 element 8 +set fmri(con_real5.8) 0 + +# Display images for contrast_real 6 +set fmri(conpic_real.6) 1 + +# Title for contrast_real 6 +set fmri(conname_real.6) "visual seq>vibe seq" + +# Real contrast_real vector 6 element 1 +set fmri(con_real6.1) 0 + +# Real contrast_real vector 6 element 2 +set fmri(con_real6.2) 0 + +# Real contrast_real vector 6 element 3 +set fmri(con_real6.3) -1.0 + +# Real contrast_real vector 6 element 4 +set fmri(con_real6.4) -1.0 + +# Real contrast_real vector 6 element 5 +set fmri(con_real6.5) 0 + +# Real contrast_real vector 6 element 6 +set fmri(con_real6.6) 0 + +# Real contrast_real vector 6 element 7 +set fmri(con_real6.7) 1.0 + +# Real contrast_real vector 6 element 8 +set fmri(con_real6.8) 1.0 + +# Display images for contrast_real 7 +set fmri(conpic_real.7) 1 + +# Title for contrast_real 7 +set fmri(conname_real.7) "visual all>vibe all" + +# Real contrast_real vector 7 element 1 +set fmri(con_real7.1) -1.0 + +# Real contrast_real vector 7 element 2 +set fmri(con_real7.2) -1.0 + +# Real contrast_real vector 7 element 3 +set fmri(con_real7.3) 0 + +# Real contrast_real vector 7 element 4 +set fmri(con_real7.4) 0 + +# Real contrast_real vector 7 element 5 +set fmri(con_real7.5) 1.0 + +# Real contrast_real vector 7 element 6 +set fmri(con_real7.6) 1.0 + +# Real contrast_real vector 7 element 7 +set fmri(con_real7.7) 0 + +# Real contrast_real vector 7 element 8 +set fmri(con_real7.8) 0 + +# Display images for contrast_real 8 +set fmri(conpic_real.8) 1 + +# Title for contrast_real 8 +set fmri(conname_real.8) "mode x complexity" + +# Real contrast_real vector 8 element 1 +set fmri(con_real8.1) -1.0 + +# Real contrast_real vector 8 element 2 +set fmri(con_real8.2) -1.0 + +# Real contrast_real vector 8 element 3 +set fmri(con_real8.3) 1.0 + +# Real contrast_real vector 8 element 4 +set fmri(con_real8.4) 1.0 + +# Real contrast_real vector 8 element 5 +set fmri(con_real8.5) 1.0 + +# Real contrast_real vector 8 element 6 +set fmri(con_real8.6) 1.0 + +# Real contrast_real vector 8 element 7 +set fmri(con_real8.7) -1.0 + +# Real contrast_real vector 8 element 8 +set fmri(con_real8.8) -1.0 + +# Display images for contrast_orig 1 +set fmri(conpic_orig.1) 1 + +# Title for contrast_orig 1 +set fmri(conname_orig.1) "left>right" + +# Real contrast_orig vector 1 element 1 +set fmri(con_orig1.1) 1 + +# Real contrast_orig vector 1 element 2 +set fmri(con_orig1.2) -1.0 + +# Real contrast_orig vector 1 element 3 +set fmri(con_orig1.3) 1.0 + +# Real contrast_orig vector 1 element 4 +set fmri(con_orig1.4) -1.0 + +# Real contrast_orig vector 1 element 5 +set fmri(con_orig1.5) 1.0 + +# Real contrast_orig vector 1 element 6 +set fmri(con_orig1.6) -1.0 + +# Real contrast_orig vector 1 element 7 +set fmri(con_orig1.7) 1.0 + +# Real contrast_orig vector 1 element 8 +set fmri(con_orig1.8) -1.0 + +# Display images for contrast_orig 2 +set fmri(conpic_orig.2) 1 + +# Title for contrast_orig 2 +set fmri(conname_orig.2) "visual>vibe" + +# Real contrast_orig vector 2 element 1 +set fmri(con_orig2.1) -1.0 + +# Real contrast_orig vector 2 element 2 +set fmri(con_orig2.2) -1.0 + +# Real contrast_orig vector 2 element 3 +set fmri(con_orig2.3) -1.0 + +# Real contrast_orig vector 2 element 4 +set fmri(con_orig2.4) -1.0 + +# Real contrast_orig vector 2 element 5 +set fmri(con_orig2.5) 1.0 + +# Real contrast_orig vector 2 element 6 +set fmri(con_orig2.6) 1.0 + +# Real contrast_orig vector 2 element 7 +set fmri(con_orig2.7) 1.0 + +# Real contrast_orig vector 2 element 8 +set fmri(con_orig2.8) 1.0 + +# Display images for contrast_orig 3 +set fmri(conpic_orig.3) 1 + +# Title for contrast_orig 3 +set fmri(conname_orig.3) "seq>all" + +# Real contrast_orig vector 3 element 1 +set fmri(con_orig3.1) -1.0 + +# Real contrast_orig vector 3 element 2 +set fmri(con_orig3.2) -1.0 + +# Real contrast_orig vector 3 element 3 +set fmri(con_orig3.3) 1.0 + +# Real contrast_orig vector 3 element 4 +set fmri(con_orig3.4) 1.0 + +# Real contrast_orig vector 3 element 5 +set fmri(con_orig3.5) -1.0 + +# Real contrast_orig vector 3 element 6 +set fmri(con_orig3.6) -1.0 + +# Real contrast_orig vector 3 element 7 +set fmri(con_orig3.7) 1.0 + +# Real contrast_orig vector 3 element 8 +set fmri(con_orig3.8) 1.0 + +# Display images for contrast_orig 4 +set fmri(conpic_orig.4) 1 + +# Title for contrast_orig 4 +set fmri(conname_orig.4) "visual seq>all" + +# Real contrast_orig vector 4 element 1 +set fmri(con_orig4.1) 0 + +# Real contrast_orig vector 4 element 2 +set fmri(con_orig4.2) 0 + +# Real contrast_orig vector 4 element 3 +set fmri(con_orig4.3) 0 + +# Real contrast_orig vector 4 element 4 +set fmri(con_orig4.4) 0 + +# Real contrast_orig vector 4 element 5 +set fmri(con_orig4.5) -1.0 + +# Real contrast_orig vector 4 element 6 +set fmri(con_orig4.6) -1.0 + +# Real contrast_orig vector 4 element 7 +set fmri(con_orig4.7) 1.0 + +# Real contrast_orig vector 4 element 8 +set fmri(con_orig4.8) 1.0 + +# Display images for contrast_orig 5 +set fmri(conpic_orig.5) 1 + +# Title for contrast_orig 5 +set fmri(conname_orig.5) "vibe seq>all" + +# Real contrast_orig vector 5 element 1 +set fmri(con_orig5.1) -1.0 + +# Real contrast_orig vector 5 element 2 +set fmri(con_orig5.2) -1.0 + +# Real contrast_orig vector 5 element 3 +set fmri(con_orig5.3) 1.0 + +# Real contrast_orig vector 5 element 4 +set fmri(con_orig5.4) 1.0 + +# Real contrast_orig vector 5 element 5 +set fmri(con_orig5.5) 0 + +# Real contrast_orig vector 5 element 6 +set fmri(con_orig5.6) 0 + +# Real contrast_orig vector 5 element 7 +set fmri(con_orig5.7) 0 + +# Real contrast_orig vector 5 element 8 +set fmri(con_orig5.8) 0 + +# Display images for contrast_orig 6 +set fmri(conpic_orig.6) 1 + +# Title for contrast_orig 6 +set fmri(conname_orig.6) "visual seq>vibe seq" + +# Real contrast_orig vector 6 element 1 +set fmri(con_orig6.1) 0 + +# Real contrast_orig vector 6 element 2 +set fmri(con_orig6.2) 0 + +# Real contrast_orig vector 6 element 3 +set fmri(con_orig6.3) -1.0 + +# Real contrast_orig vector 6 element 4 +set fmri(con_orig6.4) -1.0 + +# Real contrast_orig vector 6 element 5 +set fmri(con_orig6.5) 0 + +# Real contrast_orig vector 6 element 6 +set fmri(con_orig6.6) 0 + +# Real contrast_orig vector 6 element 7 +set fmri(con_orig6.7) 1.0 + +# Real contrast_orig vector 6 element 8 +set fmri(con_orig6.8) 1.0 + +# Display images for contrast_orig 7 +set fmri(conpic_orig.7) 1 + +# Title for contrast_orig 7 +set fmri(conname_orig.7) "visual all>vibe all" + +# Real contrast_orig vector 7 element 1 +set fmri(con_orig7.1) -1.0 + +# Real contrast_orig vector 7 element 2 +set fmri(con_orig7.2) -1.0 + +# Real contrast_orig vector 7 element 3 +set fmri(con_orig7.3) 0 + +# Real contrast_orig vector 7 element 4 +set fmri(con_orig7.4) 0 + +# Real contrast_orig vector 7 element 5 +set fmri(con_orig7.5) 1.0 + +# Real contrast_orig vector 7 element 6 +set fmri(con_orig7.6) 1.0 + +# Real contrast_orig vector 7 element 7 +set fmri(con_orig7.7) 0 + +# Real contrast_orig vector 7 element 8 +set fmri(con_orig7.8) 0 + +# Display images for contrast_orig 8 +set fmri(conpic_orig.8) 1 + +# Title for contrast_orig 8 +set fmri(conname_orig.8) "mode x complexity" + +# Real contrast_orig vector 8 element 1 +set fmri(con_orig8.1) -1.0 + +# Real contrast_orig vector 8 element 2 +set fmri(con_orig8.2) -1.0 + +# Real contrast_orig vector 8 element 3 +set fmri(con_orig8.3) 1.0 + +# Real contrast_orig vector 8 element 4 +set fmri(con_orig8.4) 1.0 + +# Real contrast_orig vector 8 element 5 +set fmri(con_orig8.5) 1.0 + +# Real contrast_orig vector 8 element 6 +set fmri(con_orig8.6) 1.0 + +# Real contrast_orig vector 8 element 7 +set fmri(con_orig8.7) -1.0 + +# Real contrast_orig vector 8 element 8 +set fmri(con_orig8.8) -1.0 + +### This is fixed + +# Contrast masking - use >0 instead of thresholding? +set fmri(conmask_zerothresh_yn) 0 + +### These are set for the full combo of contrasts - needs iteration + +# Mask real contrast/F-test 1 with real contrast/F-test 2? +set fmri(conmask1_2) 0 + +# Mask real contrast/F-test 1 with real contrast/F-test 3? +set fmri(conmask1_3) 0 + +# Mask real contrast/F-test 1 with real contrast/F-test 4? +set fmri(conmask1_4) 0 + +# Mask real contrast/F-test 1 with real contrast/F-test 5? +set fmri(conmask1_5) 0 + +# Mask real contrast/F-test 1 with real contrast/F-test 6? +set fmri(conmask1_6) 0 + +# Mask real contrast/F-test 1 with real contrast/F-test 7? +set fmri(conmask1_7) 0 + +# Mask real contrast/F-test 1 with real contrast/F-test 8? +set fmri(conmask1_8) 0 + +# Mask real contrast/F-test 2 with real contrast/F-test 1? +set fmri(conmask2_1) 0 + +# Mask real contrast/F-test 2 with real contrast/F-test 3? +set fmri(conmask2_3) 0 + +# Mask real contrast/F-test 2 with real contrast/F-test 4? +set fmri(conmask2_4) 0 + +# Mask real contrast/F-test 2 with real contrast/F-test 5? +set fmri(conmask2_5) 0 + +# Mask real contrast/F-test 2 with real contrast/F-test 6? +set fmri(conmask2_6) 0 + +# Mask real contrast/F-test 2 with real contrast/F-test 7? +set fmri(conmask2_7) 0 + +# Mask real contrast/F-test 2 with real contrast/F-test 8? +set fmri(conmask2_8) 0 + +# Mask real contrast/F-test 3 with real contrast/F-test 1? +set fmri(conmask3_1) 0 + +# Mask real contrast/F-test 3 with real contrast/F-test 2? +set fmri(conmask3_2) 0 + +# Mask real contrast/F-test 3 with real contrast/F-test 4? +set fmri(conmask3_4) 0 + +# Mask real contrast/F-test 3 with real contrast/F-test 5? +set fmri(conmask3_5) 0 + +# Mask real contrast/F-test 3 with real contrast/F-test 6? +set fmri(conmask3_6) 0 + +# Mask real contrast/F-test 3 with real contrast/F-test 7? +set fmri(conmask3_7) 0 + +# Mask real contrast/F-test 3 with real contrast/F-test 8? +set fmri(conmask3_8) 0 + +# Mask real contrast/F-test 4 with real contrast/F-test 1? +set fmri(conmask4_1) 0 + +# Mask real contrast/F-test 4 with real contrast/F-test 2? +set fmri(conmask4_2) 0 + +# Mask real contrast/F-test 4 with real contrast/F-test 3? +set fmri(conmask4_3) 0 + +# Mask real contrast/F-test 4 with real contrast/F-test 5? +set fmri(conmask4_5) 0 + +# Mask real contrast/F-test 4 with real contrast/F-test 6? +set fmri(conmask4_6) 0 + +# Mask real contrast/F-test 4 with real contrast/F-test 7? +set fmri(conmask4_7) 0 + +# Mask real contrast/F-test 4 with real contrast/F-test 8? +set fmri(conmask4_8) 0 + +# Mask real contrast/F-test 5 with real contrast/F-test 1? +set fmri(conmask5_1) 0 + +# Mask real contrast/F-test 5 with real contrast/F-test 2? +set fmri(conmask5_2) 0 + +# Mask real contrast/F-test 5 with real contrast/F-test 3? +set fmri(conmask5_3) 0 + +# Mask real contrast/F-test 5 with real contrast/F-test 4? +set fmri(conmask5_4) 0 + +# Mask real contrast/F-test 5 with real contrast/F-test 6? +set fmri(conmask5_6) 0 + +# Mask real contrast/F-test 5 with real contrast/F-test 7? +set fmri(conmask5_7) 0 + +# Mask real contrast/F-test 5 with real contrast/F-test 8? +set fmri(conmask5_8) 0 + +# Mask real contrast/F-test 6 with real contrast/F-test 1? +set fmri(conmask6_1) 0 + +# Mask real contrast/F-test 6 with real contrast/F-test 2? +set fmri(conmask6_2) 0 + +# Mask real contrast/F-test 6 with real contrast/F-test 3? +set fmri(conmask6_3) 0 + +# Mask real contrast/F-test 6 with real contrast/F-test 4? +set fmri(conmask6_4) 0 + +# Mask real contrast/F-test 6 with real contrast/F-test 5? +set fmri(conmask6_5) 0 + +# Mask real contrast/F-test 6 with real contrast/F-test 7? +set fmri(conmask6_7) 0 + +# Mask real contrast/F-test 6 with real contrast/F-test 8? +set fmri(conmask6_8) 0 + +# Mask real contrast/F-test 7 with real contrast/F-test 1? +set fmri(conmask7_1) 0 + +# Mask real contrast/F-test 7 with real contrast/F-test 2? +set fmri(conmask7_2) 0 + +# Mask real contrast/F-test 7 with real contrast/F-test 3? +set fmri(conmask7_3) 0 + +# Mask real contrast/F-test 7 with real contrast/F-test 4? +set fmri(conmask7_4) 0 + +# Mask real contrast/F-test 7 with real contrast/F-test 5? +set fmri(conmask7_5) 0 + +# Mask real contrast/F-test 7 with real contrast/F-test 6? +set fmri(conmask7_6) 0 + +# Mask real contrast/F-test 7 with real contrast/F-test 8? +set fmri(conmask7_8) 0 + +# Mask real contrast/F-test 8 with real contrast/F-test 1? +set fmri(conmask8_1) 0 + +# Mask real contrast/F-test 8 with real contrast/F-test 2? +set fmri(conmask8_2) 0 + +# Mask real contrast/F-test 8 with real contrast/F-test 3? +set fmri(conmask8_3) 0 + +# Mask real contrast/F-test 8 with real contrast/F-test 4? +set fmri(conmask8_4) 0 + +# Mask real contrast/F-test 8 with real contrast/F-test 5? +set fmri(conmask8_5) 0 + +# Mask real contrast/F-test 8 with real contrast/F-test 6? +set fmri(conmask8_6) 0 + +# Mask real contrast/F-test 8 with real contrast/F-test 7? +set fmri(conmask8_7) 0 + +### The rest is just fixed + +# Do contrast masking at all? +set fmri(conmask1_1) 0 + +# Now options that don't appear in the GUI + +# Alternative example_func image (not derived from input 4D dataset) +set fmri(alternative_example_func) "" + +# Alternative (to BETting) mask image +set fmri(alternative_mask) "" + +# Initial structural space registration initialisation transform +set fmri(init_initial_highres) "" + +# Structural space registration initialisation transform +set fmri(init_highres) "" + +# Standard space registration initialisation transform +set fmri(init_standard) "" + + +# For full FEAT analysis: overwrite existing .feat output dir? +set fmri(overwrite_yn) 1 diff --git a/nipype/interfaces/fsl/model_templates/feat_ev_custom.tcl b/nipype/interfaces/fsl/model_templates/feat_ev_custom.tcl new file mode 100644 index 0000000000..a4d3ec80df --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_ev_custom.tcl @@ -0,0 +1,40 @@ +# EV title +set fmri(evtitle$ev_num) "$ev_name" + +# Basic waveform shape (EV $ev_num) +# 0 : Square +# 1 : Sinusoid +# 2 : Custom (1 entry per volume) +# 3 : Custom (3 column format) +# 4 : Interaction +# 10 : Empty (all zeros) +set fmri(shape$ev_num) 3 + +# Convolution (EV $ev_num) +# 0 : None +# 1 : Gaussian +# 2 : Gamma +# 3 : Double-Gamma HRF +# 4 : Gamma basis functions +# 5 : Sine basis functions +# 6 : FIR basis functions +# 7 : Optimal/custom basis functions +set fmri(convolve$ev_num) 7 + +# Convolve phase (EV $ev_num) +set fmri(convolve_phase$ev_num) 0 + +# Apply temporal filtering (EV $ev_num) +set fmri(tempfilt_yn$ev_num) $tempfilt_yn + +# Add temporal derivative (EV $ev_num) +set fmri(deriv_yn$ev_num) $temporalderiv + +# Custom EV file (EV $ev_num) +set fmri(custom$ev_num) "$cond_file" + +# Optimal/custom HRF convolution file +set fmri(default_bfcustom) "${fsldir}/etc/default_flobs.flobs/hrfbasisfns.txt" +set fmri(basisorth$ev_num) $basisorth +set fmri(basisfnum$ev_num) $basisfnum +set fmri(bfcustom$ev_num) "$bfcustompath" diff --git a/nipype/interfaces/fsl/model_templates/feat_ev_gamma.tcl b/nipype/interfaces/fsl/model_templates/feat_ev_gamma.tcl new file mode 100644 index 0000000000..321cc6f830 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_ev_gamma.tcl @@ -0,0 +1,40 @@ +# EV title +set fmri(evtitle$ev_num) "$ev_name" + +# Basic waveform shape +# 0 : Square +# 1 : Sinusoid +# 2 : Custom (1 entry per volume) +# 3 : Custom (3 column format) +# 4 : Interaction +# 10 : Empty (all zeros) +set fmri(shape$ev_num) 3 + +# Convolution +# 0 : None +# 1 : Gaussian +# 2 : Gamma +# 3 : Double-Gamma HRF +# 4 : Gamma basis functions +# 5 : Sine basis functions +# 6 : FIR basis functions +# 7 : Optimal/custom basis functions +set fmri(convolve$ev_num) 2 + +# Convolve phase +set fmri(convolve_phase$ev_num) 0 + +# Apply temporal filtering +set fmri(tempfilt_yn$ev_num) 1 + +# Add temporal derivative +set fmri(deriv_yn$ev_num) $temporalderiv + +# Custom EV file +set fmri(custom$ev_num) "$cond_file" + +# Gamma sigma +set fmri(gammasigma$ev_num) $gammasigma + +# Gamma delay +set fmri(gammadelay$ev_num) $gammadelay diff --git a/nipype/interfaces/fsl/model_templates/feat_ev_hrf.tcl b/nipype/interfaces/fsl/model_templates/feat_ev_hrf.tcl new file mode 100644 index 0000000000..a2112a40b6 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_ev_hrf.tcl @@ -0,0 +1,34 @@ +# EV title +set fmri(evtitle$ev_num) "$ev_name" + +# Basic waveform shape (EV $ev_num) +# 0 : Square +# 1 : Sinusoid +# 2 : Custom (1 entry per volume) +# 3 : Custom (3 column format) +# 4 : Interaction +# 10 : Empty (all zeros) +set fmri(shape$ev_num) 3 + +# Convolution (EV $ev_num) +# 0 : None +# 1 : Gaussian +# 2 : Gamma +# 3 : Double-Gamma HRF +# 4 : Gamma basis functions +# 5 : Sine basis functions +# 6 : FIR basis functions +# 7 : Optimal/custom basis functions +set fmri(convolve$ev_num) 3 + +# Convolve phase (EV $ev_num) +set fmri(convolve_phase$ev_num) 0 + +# Apply temporal filtering (EV $ev_num) +set fmri(tempfilt_yn$ev_num) $tempfilt_yn + +# Add temporal derivative (EV $ev_num) +set fmri(deriv_yn$ev_num) $temporalderiv + +# Custom EV file (EV $ev_num) +set fmri(custom$ev_num) "$cond_file" diff --git a/nipype/interfaces/fsl/model_templates/feat_ev_none.tcl b/nipype/interfaces/fsl/model_templates/feat_ev_none.tcl new file mode 100644 index 0000000000..2a7d3ecff6 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_ev_none.tcl @@ -0,0 +1,31 @@ +# EV title +set fmri(evtitle$ev_num) "$ev_name" + +# Basic waveform shape +# 0 : Square +# 1 : Sinusoid +# 2 : Custom (1 entry per volume) +# 3 : Custom (3 column format) +# 4 : Interaction +# 10 : Empty (all zeros) +set fmri(shape$ev_num) 2 + +# Convolution +# 0 : None +# 1 : Gaussian +# 2 : Gamma +# 3 : Double-Gamma HRF +# 4 : Gamma basis functions +# 5 : Sine basis functions +# 6 : FIR basis functions +# 7 : Optimal/custom basis functions +set fmri(convolve$ev_num) 0 + +# Apply temporal filtering +set fmri(tempfilt_yn$ev_num) $tempfilt_yn + +# Add temporal derivative +set fmri(deriv_yn$ev_num) 0 + +# Custom EV file +set fmri(custom$ev_num) "$cond_file" diff --git a/nipype/interfaces/fsl/model_templates/feat_ev_ortho.tcl b/nipype/interfaces/fsl/model_templates/feat_ev_ortho.tcl new file mode 100644 index 0000000000..f2b0912cdb --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_ev_ortho.tcl @@ -0,0 +1,2 @@ +# Orthogonalise EV $c0 wrt EV $c1 +set fmri(ortho$c0.$c1) $orthogonal diff --git a/nipype/interfaces/fsl/model_templates/feat_fe_copes.tcl b/nipype/interfaces/fsl/model_templates/feat_fe_copes.tcl new file mode 100644 index 0000000000..914d2ec4ff --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_fe_copes.tcl @@ -0,0 +1,3 @@ +# Use lower-level cope $copeno for higher-level analysis +set fmri(copeinput.${copeno}) 1 + diff --git a/nipype/interfaces/fsl/model_templates/feat_fe_ev_element.tcl b/nipype/interfaces/fsl/model_templates/feat_fe_ev_element.tcl new file mode 100644 index 0000000000..d63d076411 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_fe_ev_element.tcl @@ -0,0 +1,5 @@ +# Higher-level EV value for EV 1 and input $input +set fmri(evg${input}.1) 1 + +# Group membership for input $input +set fmri(groupmem.${input}) 1 diff --git a/nipype/interfaces/fsl/model_templates/feat_fe_ev_header.tcl b/nipype/interfaces/fsl/model_templates/feat_fe_ev_header.tcl new file mode 100644 index 0000000000..c83f8f442f --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_fe_ev_header.tcl @@ -0,0 +1,45 @@ +# Add confound EVs text file +set fmri(confoundevs) 0 + +# EV 1 title +set fmri(evtitle1) "" + +# Basic waveform shape (EV 1) +# 0 : Square +# 1 : Sinusoid +# 2 : Custom (1 entry per volume) +# 3 : Custom (3 column format) +# 4 : Interaction +# 10 : Empty (all zeros) +set fmri(shape1) 2 + +# Convolution (EV 1) +# 0 : None +# 1 : Gaussian +# 2 : Gamma +# 3 : Double-Gamma HRF +# 4 : Gamma basis functions +# 5 : Sine basis functions +# 6 : FIR basis functions +set fmri(convolve1) 0 + +# Convolve phase (EV 1) +set fmri(convolve_phase1) 0 + +# Apply temporal filtering (EV 1) +set fmri(tempfilt_yn1) 0 + +# Add temporal derivative (EV 1) +set fmri(deriv_yn1) 0 + +# Custom EV file (EV 1) +set fmri(custom1) "dummy" + +# Orthogonalise EV 1 wrt EV 0 +set fmri(ortho1.0) 0 + +# Orthogonalise EV 1 wrt EV 1 +set fmri(ortho1.1) 0 + + + diff --git a/nipype/interfaces/fsl/model_templates/feat_fe_featdirs.tcl b/nipype/interfaces/fsl/model_templates/feat_fe_featdirs.tcl new file mode 100644 index 0000000000..d2d72f78ed --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_fe_featdirs.tcl @@ -0,0 +1,3 @@ +# 4D AVW data or FEAT directory ($runno) +set feat_files($runno) "${rundir}" + diff --git a/nipype/interfaces/fsl/model_templates/feat_fe_footer.tcl b/nipype/interfaces/fsl/model_templates/feat_fe_footer.tcl new file mode 100644 index 0000000000..322cca519f --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_fe_footer.tcl @@ -0,0 +1,41 @@ +# Contrast & F-tests mode +# real : control real EVs +# orig : control original EVs +set fmri(con_mode_old) real +set fmri(con_mode) real + +# Display images for contrast_real 1 +set fmri(conpic_real.1) 1 + +# Title for contrast_real 1 +set fmri(conname_real.1) "group mean" + +# Real contrast_real vector 1 element 1 +set fmri(con_real1.1) 1 + +# Contrast masking - use >0 instead of thresholding? +set fmri(conmask_zerothresh_yn) 0 + +# Do contrast masking at all? +set fmri(conmask1_1) 0 + +########################################################## +# Now options that don't appear in the GUI + +# Alternative example_func image (not derived from input 4D dataset) +set fmri(alternative_example_func) "" + +# Alternative (to BETting) mask image +set fmri(alternative_mask) "" + +# Initial structural space registration initialisation transform +set fmri(init_initial_highres) "" + +# Structural space registration initialisation transform +set fmri(init_highres) "" + +# Standard space registration initialisation transform +set fmri(init_standard) "" + +# For full FEAT analysis: overwrite existing .feat output dir? +set fmri(overwrite_yn) $overwrite diff --git a/nipype/interfaces/fsl/model_templates/feat_fe_header.tcl b/nipype/interfaces/fsl/model_templates/feat_fe_header.tcl new file mode 100644 index 0000000000..4d4d1939fa --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_fe_header.tcl @@ -0,0 +1,269 @@ +# FEAT version number +set fmri(version) 5.98 + +# Are we in MELODIC? +set fmri(inmelodic) 0 + +# Analysis level +# 1 : First-level analysis +# 2 : Higher-level analysis +set fmri(level) 2 + +# Which stages to run +# 0 : No first-level analysis (registration and/or group stats only) +# 7 : Full first-level analysis +# 1 : Pre-Stats +# 3 : Pre-Stats + Stats +# 2 : Stats +# 6 : Stats + Post-stats +# 4 : Post-stats +set fmri(analysis) 6 + +# Use relative filenames +set fmri(relative_yn) 0 + +# Balloon help +set fmri(help_yn) 1 + +# Run Featwatcher +set fmri(featwatcher_yn) 1 + +# Cleanup first-level standard-space images +set fmri(sscleanup_yn) 0 + +# Output directory +set fmri(outputdir) "./output" + +# TR(s) +set fmri(tr) 3 + +# Total volumes +set fmri(npts) ${num_runs} + +# Delete volumes +set fmri(ndelete) 0 + +# Perfusion tag/control order +set fmri(tagfirst) 1 + +# Number of first-level analyses +set fmri(multiple) ${num_runs} + +# Higher-level input type +# 1 : Inputs are lower-level FEAT directories +# 2 : Inputs are cope images from FEAT directories +set fmri(inputtype) 1 + +# Carry out pre-stats processing? +set fmri(filtering_yn) 0 + +# Brain/background threshold, % +set fmri(brain_thresh) 10 + +# Critical z for design efficiency calculation +set fmri(critical_z) 5.3 + +# Noise level +set fmri(noise) 0.66 + +# Noise AR(1) +set fmri(noisear) 0.34 + +# Post-stats-only directory copying +# 0 : Overwrite original post-stats results +# 1 : Copy original FEAT directory for new Contrasts, Thresholding, Rendering +set fmri(newdir_yn) 0 + +# Motion correction +# 0 : None +# 1 : MCFLIRT +set fmri(mc) 1 + +# Spin-history (currently obsolete) +set fmri(sh_yn) 0 + +# B0 fieldmap unwarping? +set fmri(regunwarp_yn) 0 + +# EPI dwell time (ms) +set fmri(dwell) 0.7 + +# EPI TE (ms) +set fmri(te) 35 + +# % Signal loss threshold +set fmri(signallossthresh) 10 + +# Unwarp direction +set fmri(unwarp_dir) y- + +# Slice timing correction +# 0 : None +# 1 : Regular up (0, 1, 2, 3, ...) +# 2 : Regular down +# 3 : Use slice order file +# 4 : Use slice timings file +# 5 : Interleaved (0, 2, 4 ... 1, 3, 5 ... ) +set fmri(st) 0 + +# Slice timings file +set fmri(st_file) "" + +# BET brain extraction +set fmri(bet_yn) 1 + +# Spatial smoothing FWHM (mm) +set fmri(smooth) 5 + +# Intensity normalization +set fmri(norm_yn) 0 + +# Perfusion subtraction +set fmri(perfsub_yn) 0 + +# Highpass temporal filtering +set fmri(temphp_yn) 1 + +# Lowpass temporal filtering +set fmri(templp_yn) 0 + +# MELODIC ICA data exploration +set fmri(melodic_yn) 0 + +# Carry out main stats? +set fmri(stats_yn) 1 + +# Carry out prewhitening? +set fmri(prewhiten_yn) 1 + +# Add motion parameters to model +# 0 : No +# 1 : Yes +set fmri(motionevs) 0 + +# Robust outlier detection in FLAME? +set fmri(robust_yn) 0 + +# Higher-level modelling +# 3 : Fixed effects +# 0 : Mixed Effects: Simple OLS +# 2 : Mixed Effects: FLAME 1 +# 1 : Mixed Effects: FLAME 1+2 +set fmri(mixed_yn) 3 + +# Number of EVs +set fmri(evs_orig) 1 +set fmri(evs_real) 1 +set fmri(evs_vox) 0 + +# Number of contrasts +set fmri(ncon_orig) 1 +set fmri(ncon_real) 1 + +# Number of F-tests +set fmri(nftests_orig) 0 +set fmri(nftests_real) 0 + +# Add constant column to design matrix? (obsolete) +set fmri(constcol) 0 + +# Carry out post-stats steps? +set fmri(poststats_yn) 1 + +# Pre-threshold masking? +set fmri(threshmask) "" + +# Thresholding +# 0 : None +# 1 : Uncorrected +# 2 : Voxel +# 3 : Cluster +set fmri(thresh) 3 + +# P threshold +set fmri(prob_thresh) 0.05 + +# Z threshold +set fmri(z_thresh) 2.3 + +# Z min/max for colour rendering +# 0 : Use actual Z min/max +# 1 : Use preset Z min/max +set fmri(zdisplay) 0 + +# Z min in colour rendering +set fmri(zmin) 2 + +# Z max in colour rendering +set fmri(zmax) 8 + +# Colour rendering type +# 0 : Solid blobs +# 1 : Transparent blobs +set fmri(rendertype) 1 + +# Background image for higher-level stats overlays +# 1 : Mean highres +# 2 : First highres +# 3 : Mean functional +# 4 : First functional +# 5 : Standard space template +set fmri(bgimage) 1 + +# Create time series plots +set fmri(tsplot_yn) 1 + +# Registration? +set fmri(reg_yn) 0 + +# Registration to initial structural +set fmri(reginitial_highres_yn) 0 + +# Search space for registration to initial structural +# 0 : No search +# 90 : Normal search +# 180 : Full search +set fmri(reginitial_highres_search) 90 + +# Degrees of Freedom for registration to initial structural +set fmri(reginitial_highres_dof) 3 + +# Registration to main structural +set fmri(reghighres_yn) 0 + +# Search space for registration to main structural +# 0 : No search +# 90 : Normal search +# 180 : Full search +set fmri(reghighres_search) 90 + +# Degrees of Freedom for registration to main structural +set fmri(reghighres_dof) 6 + +# Registration to standard image? +set fmri(regstandard_yn) 0 + +# Standard image +set fmri(regstandard) "regimage" + +# Search space for registration to standard space +# 0 : No search +# 90 : Normal search +# 180 : Full search +set fmri(regstandard_search) 90 + +# Degrees of Freedom for registration to standard space +set fmri(regstandard_dof) 12 + +# Do nonlinear registration from structural to standard space? +set fmri(regstandard_nonlinear_yn) 0 + +# Control nonlinear warp field resolution +set fmri(regstandard_nonlinear_warpres) 10 + +# High pass filter cutoff +set fmri(paradigm_hp) 100 + +# Number of lower-level copes feeding into higher-level analysis +set fmri(ncopeinputs) ${num_copes} + diff --git a/nipype/interfaces/fsl/model_templates/feat_header.tcl b/nipype/interfaces/fsl/model_templates/feat_header.tcl new file mode 100644 index 0000000000..806d50c517 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_header.tcl @@ -0,0 +1,271 @@ +# FEAT version number +set fmri(version) 5.98 + +# Are we in MELODIC? +set fmri(inmelodic) 0 + +# Analysis level +# 1 : First-level analysis +# 2 : Higher-level analysis +set fmri(level) 1 + +# Which stages to run +# 0 : No first-level analysis (registration and/or group stats only) +# 7 : Full first-level analysis +# 1 : Pre-Stats +# 3 : Pre-Stats + Stats +# 2 : Stats +# 6 : Stats + Post-stats +# 4 : Post-stats +set fmri(analysis) $analysis_stages + +# Use relative filenames +set fmri(relative_yn) 0 + +# Balloon help +set fmri(help_yn) 1 + +# Run Featwatcher +set fmri(featwatcher_yn) 0 + +# Cleanup first-level standard-space images +set fmri(sscleanup_yn) 0 + +# Output directory +set fmri(outputdir) "scan$scan_num" + +# TR(s) +set fmri(tr) 2.0 + +# Total volumes +set fmri(npts) $num_vols + +# Delete volumes +set fmri(ndelete) 0 + +# Perfusion tag/control order +set fmri(tagfirst) 1 + +# Number of first-level analyses +set fmri(multiple) 1 + +# Higher-level input type +# 1 : Inputs are lower-level FEAT directories +# 2 : Inputs are cope images from FEAT directories +set fmri(inputtype) 1 + +# Carry out pre-stats processing? +set fmri(filtering_yn) 0 + +# Brain/background threshold, +set fmri(brain_thresh) 10 + +# Critical z for design efficiency calculation +set fmri(critical_z) 5.3 + +# Noise level +set fmri(noise) 0.66 + +# Noise AR(1) +set fmri(noisear) 0.34 + +# Post-stats-only directory copying +# 0 : Overwrite original post-stats results +# 1 : Copy original FEAT directory for new Contrasts, Thresholding, Rendering +set fmri(newdir_yn) 0 + +# Motion correction +# 0 : None +# 1 : MCFLIRT +set fmri(mc) 0 + +# Spin-history (currently obsolete) +set fmri(sh_yn) 0 + +# B0 fieldmap unwarping? +set fmri(regunwarp_yn) 0 + +# EPI dwell time (ms) +set fmri(dwell) 0.7 + +# EPI TE (ms) +set fmri(te) 35 + +# Signal loss threshold +set fmri(signallossthresh) 10 + +# Unwarp direction +set fmri(unwarp_dir) y- + +# Slice timing correction +# 0 : None +# 1 : Regular up (0, 1, 2, 3, ...) +# 2 : Regular down +# 3 : Use slice order file +# 4 : Use slice timings file +# 5 : Interleaved (0, 2, 4 ... 1, 3, 5 ... ) +set fmri(st) 0 + +# Slice timings file +set fmri(st_file) "" + +# BET brain extraction +set fmri(bet_yn) 0 + +# Spatial smoothing FWHM (mm) +set fmri(smooth) 5 + +# Intensity normalization +set fmri(norm_yn) 0 + +# Perfusion subtraction +set fmri(perfsub_yn) 0 + +# Highpass temporal filtering +set fmri(temphp_yn) 1 + +# Lowpass temporal filtering +set fmri(templp_yn) 0 + +# MELODIC ICA data exploration +set fmri(melodic_yn) 0 + +# Carry out main stats? +set fmri(stats_yn) 1 + +# Carry out prewhitening? +set fmri(prewhiten_yn) 1 + +# Add motion parameters to model +# 0 : No +# 1 : Yes +set fmri(motionevs) 0 + +# Robust outlier detection in FLAME? +set fmri(robust_yn) 0 + +# Higher-level modelling +# 3 : Fixed effects +# 0 : Mixed Effects: Simple OLS +# 2 : Mixed Effects: FLAME 1 +# 1 : Mixed Effects: FLAME 1+2 +set fmri(mixed_yn) 2 + +# Number of EVs +set fmri(evs_orig) $num_evs +set fmri(evs_real) $num_evs +set fmri(evs_vox) 0 + +# Number of contrasts +set fmri(ncon_orig) $num_contrasts +set fmri(ncon_real) $num_contrasts + +# Number of F-tests +set fmri(nftests_orig) 0 +set fmri(nftests_real) 0 + +# Add constant column to design matrix? (obsolete) +set fmri(constcol) 0 + +# Carry out post-stats steps? +set fmri(poststats_yn) $do_contrasts + +# Pre-threshold masking? +set fmri(threshmask) "" + +# Thresholding +# 0 : None +# 1 : Uncorrected +# 2 : Voxel +# 3 : Cluster +set fmri(thresh) 3 + +# P threshold +set fmri(prob_thresh) 0.05 + +# Z threshold +set fmri(z_thresh) 2.3 + +# Z min/max for colour rendering +# 0 : Use actual Z min/max +# 1 : Use preset Z min/max +set fmri(zdisplay) 0 + +# Z min in colour rendering +set fmri(zmin) 2 + +# Z max in colour rendering +set fmri(zmax) 8 + +# Colour rendering type +# 0 : Solid blobs +# 1 : Transparent blobs +set fmri(rendertype) 1 + +# Background image for higher-level stats overlays +# 1 : Mean highres +# 2 : First highres +# 3 : Mean functional +# 4 : First functional +# 5 : Standard space template +set fmri(bgimage) 1 + +# Create time series plots +set fmri(tsplot_yn) 1 + +#Registration? +set fmri(reg_yn) 0 + +# Registration to initial structural +set fmri(reginitial_highres_yn) 0 + +# Search space for registration to initial structural +# 0 : No search +# 90 : Normal search +# 180 : Full search +set fmri(reginitial_highres_search) 90 + +# Degrees of Freedom for registration to initial structural +set fmri(reginitial_highres_dof) 3 + +# Registration to main structural +set fmri(reghighres_yn) 0 + +# Search space for registration to main structural +# 0 : No search +# 90 : Normal search +# 180 : Full search +set fmri(reghighres_search) 90 + +# Degrees of Freedom for registration to main structural +set fmri(reghighres_dof) 6 + +# Registration to standard image? +set fmri(regstandard_yn) 0 + +# Standard image +set fmri(regstandard) "standard_image" + +# Search space for registration to standard space +# 0 : No search +# 90 : Normal search +# 180 : Full search +set fmri(regstandard_search) 90 + +# Degrees of Freedom for registration to standard space +set fmri(regstandard_dof) 12 + +# Do nonlinear registration from structural to standard space? +set fmri(regstandard_nonlinear_yn) 0 + +# Control nonlinear warp field resolution +set fmri(regstandard_nonlinear_warpres) 10 + +# High pass filter cutoff +set fmri(paradigm_hp) 100 + +# 4D AVW data or FEAT directory (1) +set feat_files(1) "$func_file" + +# Subject's structural for analysis 1 +set highres_files(1) "$struct_file" diff --git a/nipype/interfaces/fsl/model_templates/feat_header_l1.tcl b/nipype/interfaces/fsl/model_templates/feat_header_l1.tcl new file mode 100644 index 0000000000..fc63166cd5 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_header_l1.tcl @@ -0,0 +1,271 @@ +# FEAT version number +set fmri(version) 5.98 + +# Are we in MELODIC? +set fmri(inmelodic) 0 + +# Analysis level +# 1 : First-level analysis +# 2 : Higher-level analysis +set fmri(level) 1 + +# Which stages to run +# 0 : No first-level analysis (registration and/or group stats only) +# 7 : Full first-level analysis +# 1 : Pre-Stats +# 3 : Pre-Stats + Stats +# 2 : Stats +# 6 : Stats + Post-stats +# 4 : Post-stats +set fmri(analysis) 6 + +# Use relative filenames +set fmri(relative_yn) 0 + +# Balloon help +set fmri(help_yn) 1 + +# Run Featwatcher +set fmri(featwatcher_yn) 0 + +# Cleanup first-level standard-space images +set fmri(sscleanup_yn) 0 + +# Output directory +set fmri(outputdir) "run$run_num" + +# TR(s) +set fmri(tr) $interscan_interval + +# Total volumes +set fmri(npts) $num_vols + +# Delete volumes +set fmri(ndelete) 0 + +# Perfusion tag/control order +set fmri(tagfirst) 1 + +# Number of first-level analyses +set fmri(multiple) 1 + +# Higher-level input type +# 1 : Inputs are lower-level FEAT directories +# 2 : Inputs are cope images from FEAT directories +set fmri(inputtype) 2 + +# Carry out pre-stats processing? +set fmri(filtering_yn) 0 + +# Brain/background threshold, +set fmri(brain_thresh) 10 + +# Critical z for design efficiency calculation +set fmri(critical_z) 5.3 + +# Noise level +set fmri(noise) 0.66 + +# Noise AR(1) +set fmri(noisear) 0.34 + +# Post-stats-only directory copying +# 0 : Overwrite original post-stats results +# 1 : Copy original FEAT directory for new Contrasts, Thresholding, Rendering +set fmri(newdir_yn) 0 + +# Motion correction +# 0 : None +# 1 : MCFLIRT +set fmri(mc) 0 + +# Spin-history (currently obsolete) +set fmri(sh_yn) 0 + +# B0 fieldmap unwarping? +set fmri(regunwarp_yn) 0 + +# EPI dwell time (ms) +set fmri(dwell) 0.7 + +# EPI TE (ms) +set fmri(te) 35 + +# Signal loss threshold +set fmri(signallossthresh) 10 + +# Unwarp direction +set fmri(unwarp_dir) y- + +# Slice timing correction +# 0 : None +# 1 : Regular up (0, 1, 2, 3, ...) +# 2 : Regular down +# 3 : Use slice order file +# 4 : Use slice timings file +# 5 : Interleaved (0, 2, 4 ... 1, 3, 5 ... ) +set fmri(st) 0 + +# Slice timings file +set fmri(st_file) "" + +# BET brain extraction +set fmri(bet_yn) 0 + +# Spatial smoothing FWHM (mm) +set fmri(smooth) 0 + +# Intensity normalization +set fmri(norm_yn) 0 + +# Perfusion subtraction +set fmri(perfsub_yn) 0 + +# Highpass temporal filtering +set fmri(temphp_yn) $temphp_yn + +# Lowpass temporal filtering +set fmri(templp_yn) 0 + +# MELODIC ICA data exploration +set fmri(melodic_yn) 0 + +# Carry out main stats? +set fmri(stats_yn) 1 + +# Carry out prewhitening? +set fmri(prewhiten_yn) $prewhiten + +# Add motion parameters to model +# 0 : No +# 1 : Yes +set fmri(motionevs) 0 + +# Robust outlier detection in FLAME? +set fmri(robust_yn) 0 + +# Higher-level modelling +# 3 : Fixed effects +# 0 : Mixed Effects: Simple OLS +# 2 : Mixed Effects: FLAME 1 +# 1 : Mixed Effects: FLAME 1+2 +set fmri(mixed_yn) 2 + +# Number of EVs +set fmri(evs_orig) $num_evs +set fmri(evs_real) $num_evs_real +set fmri(evs_vox) 0 + +# Number of contrasts +set fmri(ncon_orig) $num_tcon +set fmri(ncon_real) $num_tcon + +# Number of F-tests +set fmri(nftests_orig) $num_fcon +set fmri(nftests_real) $num_fcon + +# Add constant column to design matrix? (obsolete) +set fmri(constcol) 0 + +# Carry out post-stats steps? +set fmri(poststats_yn) 1 + +# Pre-threshold masking? +set fmri(threshmask) "" + +# Thresholding +# 0 : None +# 1 : Uncorrected +# 2 : Voxel +# 3 : Cluster +set fmri(thresh) 3 + +# P threshold +set fmri(prob_thresh) 0.05 + +# Z threshold +set fmri(z_thresh) 2.3 + +# Z min/max for colour rendering +# 0 : Use actual Z min/max +# 1 : Use preset Z min/max +set fmri(zdisplay) 0 + +# Z min in colour rendering +set fmri(zmin) 2 + +# Z max in colour rendering +set fmri(zmax) 8 + +# Colour rendering type +# 0 : Solid blobs +# 1 : Transparent blobs +set fmri(rendertype) 1 + +# Background image for higher-level stats overlays +# 1 : Mean highres +# 2 : First highres +# 3 : Mean functional +# 4 : First functional +# 5 : Standard space template +set fmri(bgimage) 1 + +# Create time series plots +set fmri(tsplot_yn) 1 + +#Registration? +set fmri(reg_yn) 0 + +# Registration to initial structural +set fmri(reginitial_highres_yn) 0 + +# Search space for registration to initial structural +# 0 : No search +# 90 : Normal search +# 180 : Full search +set fmri(reginitial_highres_search) 90 + +# Degrees of Freedom for registration to initial structural +set fmri(reginitial_highres_dof) 3 + +# Registration to main structural +set fmri(reghighres_yn) 0 + +# Search space for registration to main structural +# 0 : No search +# 90 : Normal search +# 180 : Full search +set fmri(reghighres_search) 90 + +# Degrees of Freedom for registration to main structural +set fmri(reghighres_dof) 6 + +# Registration to standard image? +set fmri(regstandard_yn) 0 + +# Standard image +set fmri(regstandard) "MNI152" + +# Search space for registration to standard space +# 0 : No search +# 90 : Normal search +# 180 : Full search +set fmri(regstandard_search) 90 + +# Degrees of Freedom for registration to standard space +set fmri(regstandard_dof) 0 + +# Do nonlinear registration from structural to standard space? +set fmri(regstandard_nonlinear_yn) 0 + +# Control nonlinear warp field resolution +set fmri(regstandard_nonlinear_warpres) 10 + +# High pass filter cutoff +set fmri(paradigm_hp) $high_pass_filter_cutoff + +# 4D AVW data or FEAT directory (1) +set feat_files(1) "$func_file" + +# Subject's structural for analysis 1 +set highres_files(1) "" diff --git a/nipype/interfaces/fsl/model_templates/feat_nongui.tcl b/nipype/interfaces/fsl/model_templates/feat_nongui.tcl new file mode 100644 index 0000000000..accbee1906 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/feat_nongui.tcl @@ -0,0 +1,20 @@ +########################################################## +# Now options that don't appear in the GUI + +# Alternative example_func image (not derived from input 4D dataset) +set fmri(alternative_example_func) "" + +# Alternative (to BETting) mask image +set fmri(alternative_mask) "" + +# Initial structural space registration initialisation transform +set fmri(init_initial_highres) "" + +# Structural space registration initialisation transform +set fmri(init_highres) "" + +# Standard space registration initialisation transform +set fmri(init_standard) "" + +# For full FEAT analysis: overwrite existing .feat output dir? +set fmri(overwrite_yn) $overwrite diff --git a/nipype/interfaces/fsl/model_templates/featreg_header.tcl b/nipype/interfaces/fsl/model_templates/featreg_header.tcl new file mode 100644 index 0000000000..a73b17bb44 --- /dev/null +++ b/nipype/interfaces/fsl/model_templates/featreg_header.tcl @@ -0,0 +1,269 @@ +# FEAT version number +set fmri(version) 5.98 + +# Are we in MELODIC? +set fmri(inmelodic) 0 + +# Analysis level +# 1 : First-level analysis +# 2 : Higher-level analysis +set fmri(level) 2 + +# Which stages to run +# 0 : No first-level analysis (registration and/or group stats only) +# 7 : Full first-level analysis +# 1 : Pre-Stats +# 3 : Pre-Stats + Stats +# 2 : Stats +# 6 : Stats + Post-stats +# 4 : Post-stats +set fmri(analysis) 0 + +# Use relative filenames +set fmri(relative_yn) 0 + +# Balloon help +set fmri(help_yn) 1 + +# Run Featwatcher +set fmri(featwatcher_yn) 1 + +# Cleanup first-level standard-space images +set fmri(sscleanup_yn) 0 + +# Output directory +set fmri(outputdir) "" + +# TR(s) +set fmri(tr) 3 + +# Total volumes +set fmri(npts) 2 + +# Delete volumes +set fmri(ndelete) 0 + +# Perfusion tag/control order +set fmri(tagfirst) 1 + +# Number of first-level analyses +set fmri(multiple) ${num_runs} + +# Higher-level input type +# 1 : Inputs are lower-level FEAT directories +# 2 : Inputs are cope images from FEAT directories +set fmri(inputtype) 1 + +# Carry out pre-stats processing? +set fmri(filtering_yn) 0 + +# Brain/background threshold, % +set fmri(brain_thresh) 10 + +# Critical z for design efficiency calculation +set fmri(critical_z) 5.3 + +# Noise level +set fmri(noise) 0.66 + +# Noise AR(1) +set fmri(noisear) 0.34 + +# Post-stats-only directory copying +# 0 : Overwrite original post-stats results +# 1 : Copy original FEAT directory for new Contrasts, Thresholding, Rendering +set fmri(newdir_yn) 0 + +# Motion correction +# 0 : None +# 1 : MCFLIRT +set fmri(mc) 1 + +# Spin-history (currently obsolete) +set fmri(sh_yn) 0 + +# B0 fieldmap unwarping? +set fmri(regunwarp_yn) 0 + +# EPI dwell time (ms) +set fmri(dwell) 0.7 + +# EPI TE (ms) +set fmri(te) 35 + +# % Signal loss threshold +set fmri(signallossthresh) 10 + +# Unwarp direction +set fmri(unwarp_dir) y- + +# Slice timing correction +# 0 : None +# 1 : Regular up (0, 1, 2, 3, ...) +# 2 : Regular down +# 3 : Use slice order file +# 4 : Use slice timings file +# 5 : Interleaved (0, 2, 4 ... 1, 3, 5 ... ) +set fmri(st) 0 + +# Slice timings file +set fmri(st_file) "" + +# BET brain extraction +set fmri(bet_yn) 1 + +# Spatial smoothing FWHM (mm) +set fmri(smooth) 5 + +# Intensity normalization +set fmri(norm_yn) 0 + +# Perfusion subtraction +set fmri(perfsub_yn) 0 + +# Highpass temporal filtering +set fmri(temphp_yn) 1 + +# Lowpass temporal filtering +set fmri(templp_yn) 0 + +# MELODIC ICA data exploration +set fmri(melodic_yn) 0 + +# Carry out main stats? +set fmri(stats_yn) 1 + +# Carry out prewhitening? +set fmri(prewhiten_yn) 1 + +# Add motion parameters to model +# 0 : No +# 1 : Yes +set fmri(motionevs) 0 + +# Robust outlier detection in FLAME? +set fmri(robust_yn) 0 + +# Higher-level modelling +# 3 : Fixed effects +# 0 : Mixed Effects: Simple OLS +# 2 : Mixed Effects: FLAME 1 +# 1 : Mixed Effects: FLAME 1+2 +set fmri(mixed_yn) 3 + +# Number of EVs +set fmri(evs_orig) 0 +set fmri(evs_real) 0 +set fmri(evs_vox) 0 + +# Number of contrasts +set fmri(ncon_orig) 0 +set fmri(ncon_real) 0 + +# Number of F-tests +set fmri(nftests_orig) 0 +set fmri(nftests_real) 0 + +# Add constant column to design matrix? (obsolete) +set fmri(constcol) 0 + +# Carry out post-stats steps? +set fmri(poststats_yn) 1 + +# Pre-threshold masking? +set fmri(threshmask) "" + +# Thresholding +# 0 : None +# 1 : Uncorrected +# 2 : Voxel +# 3 : Cluster +set fmri(thresh) 3 + +# P threshold +set fmri(prob_thresh) 0.05 + +# Z threshold +set fmri(z_thresh) 2.3 + +# Z min/max for colour rendering +# 0 : Use actual Z min/max +# 1 : Use preset Z min/max +set fmri(zdisplay) 0 + +# Z min in colour rendering +set fmri(zmin) 2 + +# Z max in colour rendering +set fmri(zmax) 8 + +# Colour rendering type +# 0 : Solid blobs +# 1 : Transparent blobs +set fmri(rendertype) 1 + +# Background image for higher-level stats overlays +# 1 : Mean highres +# 2 : First highres +# 3 : Mean functional +# 4 : First functional +# 5 : Standard space template +set fmri(bgimage) 1 + +# Create time series plots +set fmri(tsplot_yn) 1 + +# Registration? +set fmri(reg_yn) 0 + +# Registration to initial structural +set fmri(reginitial_highres_yn) 0 + +# Search space for registration to initial structural +# 0 : No search +# 90 : Normal search +# 180 : Full search +set fmri(reginitial_highres_search) 90 + +# Degrees of Freedom for registration to initial structural +set fmri(reginitial_highres_dof) 3 + +# Registration to main structural +set fmri(reghighres_yn) 0 + +# Search space for registration to main structural +# 0 : No search +# 90 : Normal search +# 180 : Full search +set fmri(reghighres_search) 90 + +# Degrees of Freedom for registration to main structural +set fmri(reghighres_dof) 6 + +# Registration to standard image? +set fmri(regstandard_yn) 1 + +# Standard image +set fmri(regstandard) "$regimage" + +# Search space for registration to standard space +# 0 : No search +# 90 : Normal search +# 180 : Full search +set fmri(regstandard_search) 90 + +# Degrees of Freedom for registration to standard space +set fmri(regstandard_dof) $regdof + +# Do nonlinear registration from structural to standard space? +set fmri(regstandard_nonlinear_yn) 0 + +# Control nonlinear warp field resolution +set fmri(regstandard_nonlinear_warpres) 10 + +# High pass filter cutoff +set fmri(paradigm_hp) 100 + +# Number of lower-level copes feeding into higher-level analysis +set fmri(ncopeinputs) ${num_runs} + diff --git a/nipype/interfaces/fsl/possum.py b/nipype/interfaces/fsl/possum.py new file mode 100644 index 0000000000..50b88db185 --- /dev/null +++ b/nipype/interfaces/fsl/possum.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The possum module provides classes for interfacing with `POSSUM +`_ command line tools. +Please, check out the link for pertinent citations using POSSUM. + + .. Note:: This was written to work with FSL version 5.0.6. +""" + +from .base import FSLCommand, FSLCommandInputSpec +from ..base import TraitedSpec, File, traits + + +class B0CalcInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + mandatory=True, + argstr='-i %s', + position=0, + desc='filename of input image (usually a tissue/air segmentation)') + out_file = File( + argstr='-o %s', + position=1, + name_source=['in_file'], + name_template='%s_b0field', + output_name='out_file', + desc='filename of B0 output volume') + + x_grad = traits.Float( + 0.0, usedefault=True, + argstr='--gx=%0.4f', + desc='Value for zeroth-order x-gradient field (per mm)') + y_grad = traits.Float( + 0.0, usedefault=True, + argstr='--gy=%0.4f', + desc='Value for zeroth-order y-gradient field (per mm)') + z_grad = traits.Float( + 0.0, usedefault=True, + argstr='--gz=%0.4f', + desc='Value for zeroth-order z-gradient field (per mm)') + + x_b0 = traits.Float( + 0.0, usedefault=True, + argstr='--b0x=%0.2f', + xor=['xyz_b0'], + desc='Value for zeroth-order b0 field (x-component), in Tesla') + y_b0 = traits.Float( + 0.0, usedefault=True, + argstr='--b0y=%0.2f', + xor=['xyz_b0'], + desc='Value for zeroth-order b0 field (y-component), in Tesla') + z_b0 = traits.Float( + 1.0, usedefault=True, + argstr='--b0=%0.2f', + xor=['xyz_b0'], + desc='Value for zeroth-order b0 field (z-component), in Tesla') + + xyz_b0 = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--b0x=%0.2f --b0y=%0.2f --b0=%0.2f', + xor=['x_b0', 'y_b0', 'z_b0'], + desc='Zeroth-order B0 field in Tesla') + + delta = traits.Float( + -9.45e-6, usedefault=True, + argstr='-d %e', desc='Delta value (chi_tissue - chi_air)') + chi_air = traits.Float( + 4.0e-7, usedefault=True, + argstr='--chi0=%e', desc='susceptibility of air') + compute_xyz = traits.Bool( + False, usedefault=True, + argstr='--xyz', + desc='calculate and save all 3 field components (i.e. x,y,z)') + extendboundary = traits.Float( + 1.0, usedefault=True, + argstr='--extendboundary=%0.2f', + desc='Relative proportion to extend voxels at boundary') + directconv = traits.Bool( + False, usedefault=True, + argstr='--directconv', + desc='use direct (image space) convolution, not FFT') + + +class B0CalcOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='filename of B0 output volume') + + +class B0Calc(FSLCommand): + """ + B0 inhomogeneities occur at interfaces of materials with different magnetic susceptibilities, + such as tissue-air interfaces. These differences lead to distortion in the local magnetic field, + as Maxwell’s equations need to be satisfied. An example of B0 inhomogneity is the first volume + of the 4D volume ```$FSLDIR/data/possum/b0_ppm.nii.gz```. + + Examples + -------- + + >>> from nipype.interfaces.fsl import B0Calc + >>> b0calc = B0Calc() + >>> b0calc.inputs.in_file = 'tissue+air_map.nii' + >>> b0calc.inputs.z_b0 = 3.0 + >>> b0calc.inputs.output_type = "NIFTI_GZ" + >>> b0calc.cmdline + 'b0calc -i tissue+air_map.nii -o tissue+air_map_b0field.nii.gz --chi0=4.000000e-07 \ +-d -9.450000e-06 --extendboundary=1.00 --b0x=0.00 --gx=0.0000 --b0y=0.00 --gy=0.0000 \ +--b0=3.00 --gz=0.0000' + + """ + + _cmd = 'b0calc' + input_spec = B0CalcInputSpec + output_spec = B0CalcOutputSpec diff --git a/nipype/interfaces/fsl/preprocess.py b/nipype/interfaces/fsl/preprocess.py new file mode 100644 index 0000000000..dd2c969945 --- /dev/null +++ b/nipype/interfaces/fsl/preprocess.py @@ -0,0 +1,2056 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The fsl module provides classes for interfacing with the `FSL +`_ command line tools. This +was written to work with FSL version 4.1.4. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, open + +import os +import os.path as op +from warnings import warn + +import numpy as np +from nibabel import load + +from ...utils.filemanip import split_filename +from ..base import (TraitedSpec, File, InputMultiPath, OutputMultiPath, + Undefined, traits, isdefined) +from .base import FSLCommand, FSLCommandInputSpec, Info + + +class BETInputSpec(FSLCommandInputSpec): + # We use position args here as list indices - so a negative number + # will put something on the end + in_file = File( + exists=True, + desc='input file to skull strip', + argstr='%s', + position=0, + mandatory=True) + out_file = File( + desc='name of output skull stripped image', + argstr='%s', + position=1, + genfile=True, + hash_files=False) + outline = traits.Bool(desc='create surface outline image', argstr='-o') + mask = traits.Bool(desc='create binary mask image', argstr='-m') + skull = traits.Bool(desc='create skull image', argstr='-s') + no_output = traits.Bool( + argstr='-n', desc="Don't generate segmented output") + frac = traits.Float( + desc='fractional intensity threshold', argstr='-f %.2f') + vertical_gradient = traits.Float( + argstr='-g %.2f', + desc='vertical gradient in fractional intensity threshold (-1, 1)') + radius = traits.Int(argstr='-r %d', units='mm', desc="head radius") + center = traits.List( + traits.Int, + desc='center of gravity in voxels', + argstr='-c %s', + minlen=0, + maxlen=3, + units='voxels') + threshold = traits.Bool( + argstr='-t', + desc="apply thresholding to segmented brain image and mask") + mesh = traits.Bool(argstr='-e', desc="generate a vtk mesh brain surface") + # the remaining 'options' are more like modes (mutually exclusive) that + # FSL actually implements in a shell script wrapper around the bet binary. + # for some combinations of them in specific order a call would not fail, + # but in general using more than one of the following is clearly not + # supported + _xor_inputs = ('functional', 'reduce_bias', 'robust', 'padding', + 'remove_eyes', 'surfaces', 't2_guided') + robust = traits.Bool( + desc='robust brain centre estimation (iterates BET several times)', + argstr='-R', + xor=_xor_inputs) + padding = traits.Bool( + desc=('improve BET if FOV is very small in Z (by temporarily padding ' + 'end slices)'), + argstr='-Z', + xor=_xor_inputs) + remove_eyes = traits.Bool( + desc='eye & optic nerve cleanup (can be useful in SIENA)', + argstr='-S', + xor=_xor_inputs) + surfaces = traits.Bool( + desc=('run bet2 and then betsurf to get additional skull and scalp ' + 'surfaces (includes registrations)'), + argstr='-A', + xor=_xor_inputs) + t2_guided = File( + desc='as with creating surfaces, when also feeding in ' + 'non-brain-extracted T2 (includes registrations)', + argstr='-A2 %s', + xor=_xor_inputs) + functional = traits.Bool( + argstr='-F', xor=_xor_inputs, desc="apply to 4D fMRI data") + reduce_bias = traits.Bool( + argstr='-B', xor=_xor_inputs, desc="bias field and neck cleanup") + + +class BETOutputSpec(TraitedSpec): + out_file = File(desc="path/name of skullstripped file (if generated)") + mask_file = File(desc="path/name of binary brain mask (if generated)") + outline_file = File(desc="path/name of outline file (if generated)") + meshfile = File(desc="path/name of vtk mesh file (if generated)") + inskull_mask_file = File(desc="path/name of inskull mask (if generated)") + inskull_mesh_file = File( + desc="path/name of inskull mesh outline (if generated)") + outskull_mask_file = File(desc="path/name of outskull mask (if generated)") + outskull_mesh_file = File( + desc="path/name of outskull mesh outline (if generated)") + outskin_mask_file = File(desc="path/name of outskin mask (if generated)") + outskin_mesh_file = File( + desc="path/name of outskin mesh outline (if generated)") + skull_mask_file = File(desc="path/name of skull mask (if generated)") + + +class BET(FSLCommand): + """FSL BET wrapper for skull stripping + + For complete details, see the `BET Documentation. + `_ + + Examples + -------- + >>> from nipype.interfaces import fsl + >>> btr = fsl.BET() + >>> btr.inputs.in_file = 'structural.nii' + >>> btr.inputs.frac = 0.7 + >>> btr.inputs.out_file = 'brain_anat.nii' + >>> btr.cmdline + 'bet structural.nii brain_anat.nii -f 0.70' + >>> res = btr.run() # doctest: +SKIP + + """ + + _cmd = 'bet' + input_spec = BETInputSpec + output_spec = BETOutputSpec + + def _run_interface(self, runtime): + # The returncode is meaningless in BET. So check the output + # in stderr and if it's set, then update the returncode + # accordingly. + runtime = super(BET, self)._run_interface(runtime) + if runtime.stderr: + self.raise_exception(runtime) + return runtime + + def _gen_outfilename(self): + out_file = self.inputs.out_file + if not isdefined(out_file) and isdefined(self.inputs.in_file): + out_file = self._gen_fname(self.inputs.in_file, suffix='_brain') + return os.path.abspath(out_file) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self._gen_outfilename() + if ((isdefined(self.inputs.mesh) and self.inputs.mesh) + or (isdefined(self.inputs.surfaces) and self.inputs.surfaces)): + outputs['meshfile'] = self._gen_fname( + outputs['out_file'], suffix='_mesh.vtk', change_ext=False) + if (isdefined(self.inputs.mask) and self.inputs.mask) or \ + (isdefined(self.inputs.reduce_bias) and + self.inputs.reduce_bias): + outputs['mask_file'] = self._gen_fname( + outputs['out_file'], suffix='_mask') + if isdefined(self.inputs.outline) and self.inputs.outline: + outputs['outline_file'] = self._gen_fname( + outputs['out_file'], suffix='_overlay') + if isdefined(self.inputs.surfaces) and self.inputs.surfaces: + outputs['inskull_mask_file'] = self._gen_fname( + outputs['out_file'], suffix='_inskull_mask') + outputs['inskull_mesh_file'] = self._gen_fname( + outputs['out_file'], suffix='_inskull_mesh') + outputs['outskull_mask_file'] = self._gen_fname( + outputs['out_file'], suffix='_outskull_mask') + outputs['outskull_mesh_file'] = self._gen_fname( + outputs['out_file'], suffix='_outskull_mesh') + outputs['outskin_mask_file'] = self._gen_fname( + outputs['out_file'], suffix='_outskin_mask') + outputs['outskin_mesh_file'] = self._gen_fname( + outputs['out_file'], suffix='_outskin_mesh') + outputs['skull_mask_file'] = self._gen_fname( + outputs['out_file'], suffix='_skull_mask') + if isdefined(self.inputs.no_output) and self.inputs.no_output: + outputs['out_file'] = Undefined + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_outfilename() + return None + + +class FASTInputSpec(FSLCommandInputSpec): + """ Defines inputs (trait classes) for FAST """ + in_files = InputMultiPath( + File(exists=True), + copyfile=False, + desc='image, or multi-channel set of images, ' + 'to be segmented', + argstr='%s', + position=-1, + mandatory=True) + out_basename = File(desc='base name of output files', argstr='-o %s') + # ^^ uses in_file name as basename if none given + number_classes = traits.Range( + low=1, high=10, argstr='-n %d', desc='number of tissue-type classes') + output_biasfield = traits.Bool( + desc='output estimated bias field', argstr='-b') + output_biascorrected = traits.Bool( + desc='output restored image (bias-corrected image)', argstr='-B') + img_type = traits.Enum( + (1, 2, 3), + desc='int specifying type of image: (1 = T1, 2 = T2, 3 = PD)', + argstr='-t %d') + bias_iters = traits.Range( + low=1, + high=10, + argstr='-I %d', + desc='number of main-loop iterations during ' + 'bias-field removal') + bias_lowpass = traits.Range( + low=4, + high=40, + desc='bias field smoothing extent (FWHM) ' + 'in mm', + argstr='-l %d', + units='mm') + init_seg_smooth = traits.Range( + low=0.0001, + high=0.1, + desc='initial segmentation spatial ' + 'smoothness (during bias field ' + 'estimation)', + argstr='-f %.3f') + segments = traits.Bool( + desc='outputs a separate binary image for each ' + 'tissue type', + argstr='-g') + init_transform = File( + exists=True, + desc=' initialise' + ' using priors', + argstr='-a %s') + other_priors = InputMultiPath( + File(exist=True), + desc='alternative prior images', + argstr='-A %s', + minlen=3, + maxlen=3) + no_pve = traits.Bool( + desc='turn off PVE (partial volume estimation)', argstr='--nopve') + no_bias = traits.Bool(desc='do not remove bias field', argstr='-N') + use_priors = traits.Bool(desc='use priors throughout', argstr='-P') + # ^^ Must also set -a!, mutually inclusive?? No, conditional mandatory... need to figure out how to handle with traits. + segment_iters = traits.Range( + low=1, + high=50, + desc='number of segmentation-initialisation' + ' iterations', + argstr='-W %d') + mixel_smooth = traits.Range( + low=0.0, + high=1.0, + desc='spatial smoothness for mixeltype', + argstr='-R %.2f') + iters_afterbias = traits.Range( + low=1, + high=20, + desc='number of main-loop iterations ' + 'after bias-field removal', + argstr='-O %d') + hyper = traits.Range( + low=0.0, + high=1.0, + desc='segmentation spatial smoothness', + argstr='-H %.2f') + verbose = traits.Bool(desc='switch on diagnostic messages', argstr='-v') + manual_seg = File( + exists=True, desc='Filename containing intensities', argstr='-s %s') + probability_maps = traits.Bool( + desc='outputs individual probability maps', argstr='-p') + + +class FASTOutputSpec(TraitedSpec): + """Specify possible outputs from FAST""" + tissue_class_map = File( + exists=True, + desc='path/name of binary segmented volume file' + ' one val for each class _seg') + tissue_class_files = OutputMultiPath( + File( + desc=( + 'path/name of binary segmented volumes one file for each class ' + '_seg_x'))) + restored_image = OutputMultiPath( + File( + desc=( + 'restored images (one for each input image) named according to ' + 'the input images _restore'))) + + mixeltype = File(desc="path/name of mixeltype volume file _mixeltype") + + partial_volume_map = File(desc='path/name of partial volume file _pveseg') + partial_volume_files = OutputMultiPath( + File( + desc='path/name of partial volumes files one for each class, _pve_x' + )) + + bias_field = OutputMultiPath(File(desc='Estimated bias field _bias')) + probability_maps = OutputMultiPath( + File(desc='filenames, one for each class, for each input, prob_x')) + + +class FAST(FSLCommand): + """FSL FAST wrapper for segmentation and bias correction + + For complete details, see the `FAST Documentation. + `_ + + Examples + -------- + >>> from nipype.interfaces import fsl + >>> fastr = fsl.FAST() + >>> fastr.inputs.in_files = 'structural.nii' + >>> fastr.inputs.out_basename = 'fast_' + >>> fastr.cmdline + 'fast -o fast_ -S 1 structural.nii' + >>> out = fastr.run() # doctest: +SKIP + + """ + _cmd = 'fast' + input_spec = FASTInputSpec + output_spec = FASTOutputSpec + + def _format_arg(self, name, spec, value): + # first do what should be done in general + formatted = super(FAST, self)._format_arg(name, spec, value) + if name == 'in_files': + # FAST needs the -S parameter value to correspond to the number + # of input images, otherwise it will ignore all but the first + formatted = "-S %d %s" % (len(value), formatted) + return formatted + + def _list_outputs(self): + outputs = self.output_spec().get() + if not isdefined(self.inputs.number_classes): + nclasses = 3 + else: + nclasses = self.inputs.number_classes + # when using multichannel, results basename is based on last + # input filename + _gen_fname_opts = {} + if isdefined(self.inputs.out_basename): + _gen_fname_opts['basename'] = self.inputs.out_basename + _gen_fname_opts['cwd'] = os.getcwd() + else: + _gen_fname_opts['basename'] = self.inputs.in_files[-1] + _gen_fname_opts['cwd'], _, _ = split_filename( + _gen_fname_opts['basename']) + + outputs['tissue_class_map'] = self._gen_fname( + suffix='_seg', **_gen_fname_opts) + if self.inputs.segments: + outputs['tissue_class_files'] = [] + for i in range(nclasses): + outputs['tissue_class_files'].append( + self._gen_fname(suffix='_seg_%d' % i, **_gen_fname_opts)) + if isdefined(self.inputs.output_biascorrected): + outputs['restored_image'] = [] + if len(self.inputs.in_files) > 1: + # for multi-image segmentation there is one corrected image + # per input + for val, f in enumerate(self.inputs.in_files): + # image numbering is 1-based + outputs['restored_image'].append( + self._gen_fname( + suffix='_restore_%d' % (val + 1), + **_gen_fname_opts)) + else: + # single image segmentation has unnumbered output image + outputs['restored_image'].append( + self._gen_fname(suffix='_restore', **_gen_fname_opts)) + + outputs['mixeltype'] = self._gen_fname( + suffix='_mixeltype', **_gen_fname_opts) + if not self.inputs.no_pve: + outputs['partial_volume_map'] = self._gen_fname( + suffix='_pveseg', **_gen_fname_opts) + outputs['partial_volume_files'] = [] + for i in range(nclasses): + outputs['partial_volume_files'].append( + self._gen_fname(suffix='_pve_%d' % i, **_gen_fname_opts)) + if self.inputs.output_biasfield: + outputs['bias_field'] = [] + if len(self.inputs.in_files) > 1: + # for multi-image segmentation there is one bias field image + # per input + for val, f in enumerate(self.inputs.in_files): + # image numbering is 1-based + outputs['bias_field'].append( + self._gen_fname( + suffix='_bias_%d' % (val + 1), **_gen_fname_opts)) + else: + # single image segmentation has unnumbered output image + outputs['bias_field'].append( + self._gen_fname(suffix='_bias', **_gen_fname_opts)) + + if self.inputs.probability_maps: + outputs['probability_maps'] = [] + for i in range(nclasses): + outputs['probability_maps'].append( + self._gen_fname(suffix='_prob_%d' % i, **_gen_fname_opts)) + return outputs + + +class FLIRTInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + argstr='-in %s', + mandatory=True, + position=0, + desc='input file') + reference = File( + exists=True, + argstr='-ref %s', + mandatory=True, + position=1, + desc='reference file') + out_file = File( + argstr='-out %s', + desc='registered output file', + name_source=['in_file'], + name_template='%s_flirt', + position=2, + hash_files=False) + out_matrix_file = File( + argstr='-omat %s', + name_source=['in_file'], + keep_extension=True, + name_template='%s_flirt.mat', + desc='output affine matrix in 4x4 asciii format', + position=3, + hash_files=False) + out_log = File( + name_source=['in_file'], + keep_extension=True, + requires=['save_log'], + name_template='%s_flirt.log', + desc='output log') + in_matrix_file = File(argstr='-init %s', desc='input 4x4 affine matrix') + apply_xfm = traits.Bool( + argstr='-applyxfm', + desc=( + 'apply transformation supplied by in_matrix_file or uses_qform to' + ' use the affine matrix stored in the reference header')) + apply_isoxfm = traits.Float( + argstr='-applyisoxfm %f', + xor=['apply_xfm'], + desc='as applyxfm but forces isotropic resampling') + datatype = traits.Enum( + 'char', + 'short', + 'int', + 'float', + 'double', + argstr='-datatype %s', + desc='force output data type') + cost = traits.Enum( + 'mutualinfo', + 'corratio', + 'normcorr', + 'normmi', + 'leastsq', + 'labeldiff', + 'bbr', + argstr='-cost %s', + desc='cost function') + # XXX What is the difference between 'cost' and 'searchcost'? Are + # these both necessary or do they map to the same variable. + cost_func = traits.Enum( + 'mutualinfo', + 'corratio', + 'normcorr', + 'normmi', + 'leastsq', + 'labeldiff', + 'bbr', + argstr='-searchcost %s', + desc='cost function') + uses_qform = traits.Bool( + argstr='-usesqform', desc='initialize using sform or qform') + display_init = traits.Bool( + argstr='-displayinit', desc='display initial matrix') + angle_rep = traits.Enum( + 'quaternion', + 'euler', + argstr='-anglerep %s', + desc='representation of rotation angles') + interp = traits.Enum( + 'trilinear', + 'nearestneighbour', + 'sinc', + 'spline', + argstr='-interp %s', + desc='final interpolation method used in reslicing') + sinc_width = traits.Int( + argstr='-sincwidth %d', units='voxels', desc='full-width in voxels') + sinc_window = traits.Enum( + 'rectangular', + 'hanning', + 'blackman', + argstr='-sincwindow %s', + desc='sinc window') # XXX better doc + bins = traits.Int(argstr='-bins %d', desc='number of histogram bins') + dof = traits.Int( + argstr='-dof %d', desc='number of transform degrees of freedom') + no_resample = traits.Bool( + argstr='-noresample', desc='do not change input sampling') + force_scaling = traits.Bool( + argstr='-forcescaling', desc='force rescaling even for low-res images') + min_sampling = traits.Float( + argstr='-minsampling %f', + units='mm', + desc='set minimum voxel dimension for sampling') + padding_size = traits.Int( + argstr='-paddingsize %d', + units='voxels', + desc='for applyxfm: interpolates outside image ' + 'by size') + searchr_x = traits.List( + traits.Int, + minlen=2, + maxlen=2, + units='degrees', + argstr='-searchrx %s', + desc='search angles along x-axis, in degrees') + searchr_y = traits.List( + traits.Int, + minlen=2, + maxlen=2, + units='degrees', + argstr='-searchry %s', + desc='search angles along y-axis, in degrees') + searchr_z = traits.List( + traits.Int, + minlen=2, + maxlen=2, + units='degrees', + argstr='-searchrz %s', + desc='search angles along z-axis, in degrees') + no_search = traits.Bool( + argstr='-nosearch', desc='set all angular searches to ranges 0 to 0') + coarse_search = traits.Int( + argstr='-coarsesearch %d', + units='degrees', + desc='coarse search delta angle') + fine_search = traits.Int( + argstr='-finesearch %d', + units='degrees', + desc='fine search delta angle') + schedule = File( + exists=True, argstr='-schedule %s', desc='replaces default schedule') + ref_weight = File( + exists=True, + argstr='-refweight %s', + desc='File for reference weighting volume') + in_weight = File( + exists=True, + argstr='-inweight %s', + desc='File for input weighting volume') + no_clamp = traits.Bool( + argstr='-noclamp', desc='do not use intensity clamping') + no_resample_blur = traits.Bool( + argstr='-noresampblur', desc='do not use blurring on downsampling') + rigid2D = traits.Bool( + argstr='-2D', desc='use 2D rigid body mode - ignores dof') + save_log = traits.Bool(desc='save to log file') + verbose = traits.Int(argstr='-verbose %d', desc='verbose mode, 0 is least') + bgvalue = traits.Float( + 0, + argstr='-setbackground %f', + desc=('use specified background value for points ' + 'outside FOV')) + + # BBR options + wm_seg = File( + argstr='-wmseg %s', + min_ver='5.0.0', + desc='white matter segmentation volume needed by BBR cost function') + wmcoords = File( + argstr='-wmcoords %s', + min_ver='5.0.0', + desc='white matter boundary coordinates for BBR cost function') + wmnorms = File( + argstr='-wmnorms %s', + min_ver='5.0.0', + desc='white matter boundary normals for BBR cost function') + fieldmap = File( + argstr='-fieldmap %s', + min_ver='5.0.0', + desc=('fieldmap image in rads/s - must be already registered to the ' + 'reference image')) + fieldmapmask = File( + argstr='-fieldmapmask %s', + min_ver='5.0.0', + desc='mask for fieldmap image') + pedir = traits.Int( + argstr='-pedir %d', + min_ver='5.0.0', + desc='phase encode direction of EPI - 1/2/3=x/y/z & -1/-2/-3=-x/-y/-z') + echospacing = traits.Float( + argstr='-echospacing %f', + min_ver='5.0.0', + desc='value of EPI echo spacing - units of seconds') + bbrtype = traits.Enum( + 'signed', + 'global_abs', + 'local_abs', + argstr='-bbrtype %s', + min_ver='5.0.0', + desc=('type of bbr cost function: signed [default], global_abs, ' + 'local_abs')) + bbrslope = traits.Float( + argstr='-bbrslope %f', min_ver='5.0.0', desc='value of bbr slope') + + +class FLIRTOutputSpec(TraitedSpec): + out_file = File( + exists=True, desc='path/name of registered file (if generated)') + out_matrix_file = File( + exists=True, + desc='path/name of calculated affine transform ' + '(if generated)') + out_log = File(desc='path/name of output log (if generated)') + + +class FLIRT(FSLCommand): + """FSL FLIRT wrapper for coregistration + + For complete details, see the `FLIRT Documentation. + `_ + + To print out the command line help, use: + fsl.FLIRT().inputs_help() + + Examples + -------- + >>> from nipype.interfaces import fsl + >>> from nipype.testing import example_data + >>> flt = fsl.FLIRT(bins=640, cost_func='mutualinfo') + >>> flt.inputs.in_file = 'structural.nii' + >>> flt.inputs.reference = 'mni.nii' + >>> flt.inputs.output_type = "NIFTI_GZ" + >>> flt.cmdline # doctest: +ELLIPSIS + 'flirt -in structural.nii -ref mni.nii -out structural_flirt.nii.gz -omat structural_flirt.mat -bins 640 -searchcost mutualinfo' + >>> res = flt.run() #doctest: +SKIP + + """ + _cmd = 'flirt' + input_spec = FLIRTInputSpec + output_spec = FLIRTOutputSpec + _log_written = False + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + outputs = super(FLIRT, self).aggregate_outputs( + runtime=runtime, needed_outputs=needed_outputs) + if self.inputs.save_log and not self._log_written: + with open(outputs.out_log, "a") as text_file: + text_file.write(runtime.stdout + '\n') + self._log_written = True + return outputs + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + if self.inputs.save_log and not self.inputs.verbose: + self.inputs.verbose = 1 + if self.inputs.apply_xfm and not (self.inputs.in_matrix_file + or self.inputs.uses_qform): + raise RuntimeError('Argument apply_xfm requires in_matrix_file or ' + 'uses_qform arguments to run') + skip.append('save_log') + return super(FLIRT, self)._parse_inputs(skip=skip) + + +class ApplyXFMInputSpec(FLIRTInputSpec): + apply_xfm = traits.Bool( + True, + argstr='-applyxfm', + desc=( + 'apply transformation supplied by in_matrix_file or uses_qform to' + ' use the affine matrix stored in the reference header'), + usedefault=True) + + +class ApplyXFM(FLIRT): + """Currently just a light wrapper around FLIRT, + with no modifications + + ApplyXFM is used to apply an existing tranform to an image + + + Examples + -------- + + >>> import nipype.interfaces.fsl as fsl + >>> from nipype.testing import example_data + >>> applyxfm = fsl.preprocess.ApplyXFM() + >>> applyxfm.inputs.in_file = example_data('structural.nii') + >>> applyxfm.inputs.in_matrix_file = example_data('trans.mat') + >>> applyxfm.inputs.out_file = 'newfile.nii' + >>> applyxfm.inputs.reference = example_data('mni.nii') + >>> applyxfm.inputs.apply_xfm = True + >>> result = applyxfm.run() # doctest: +SKIP + + """ + input_spec = ApplyXFMInputSpec + + +class MCFLIRTInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + position=0, + argstr="-in %s", + mandatory=True, + desc="timeseries to motion-correct") + out_file = File( + argstr='-out %s', genfile=True, desc="file to write", hash_files=False) + cost = traits.Enum( + 'mutualinfo', + 'woods', + 'corratio', + 'normcorr', + 'normmi', + 'leastsquares', + argstr='-cost %s', + desc="cost function to optimize") + bins = traits.Int(argstr='-bins %d', desc="number of histogram bins") + dof = traits.Int( + argstr='-dof %d', desc="degrees of freedom for the transformation") + ref_vol = traits.Int(argstr='-refvol %d', desc="volume to align frames to") + scaling = traits.Float( + argstr='-scaling %.2f', desc="scaling factor to use") + smooth = traits.Float( + argstr='-smooth %.2f', desc="smoothing factor for the cost function") + rotation = traits.Int( + argstr='-rotation %d', desc="scaling factor for rotation tolerances") + stages = traits.Int( + argstr='-stages %d', + desc="stages (if 4, perform final search with sinc interpolation") + init = File( + exists=True, argstr='-init %s', desc="inital transformation matrix") + interpolation = traits.Enum( + "spline", + "nn", + "sinc", + argstr="-%s_final", + desc="interpolation method for transformation") + use_gradient = traits.Bool( + argstr='-gdt', desc="run search on gradient images") + use_contour = traits.Bool( + argstr='-edge', desc="run search on contour images") + mean_vol = traits.Bool(argstr='-meanvol', desc="register to mean volume") + stats_imgs = traits.Bool( + argstr='-stats', desc="produce variance and std. dev. images") + save_mats = traits.Bool( + argstr='-mats', desc="save transformation matrices") + save_plots = traits.Bool( + argstr='-plots', desc="save transformation parameters") + save_rms = traits.Bool( + argstr='-rmsabs -rmsrel', desc="save rms displacement parameters") + ref_file = File( + exists=True, + argstr='-reffile %s', + desc="target image for motion correction") + + +class MCFLIRTOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="motion-corrected timeseries") + variance_img = File(exists=True, desc="variance image") + std_img = File(exists=True, desc="standard deviation image") + mean_img = File( + exists=True, desc="mean timeseries image (if mean_vol=True)") + par_file = File(exists=True, desc="text-file with motion parameters") + mat_file = OutputMultiPath( + File(exists=True), desc="transformation matrices") + rms_files = OutputMultiPath( + File(exists=True), + desc="absolute and relative displacement parameters") + + +class MCFLIRT(FSLCommand): + """FSL MCFLIRT wrapper for within-modality motion correction + + For complete details, see the `MCFLIRT Documentation. + `_ + + Examples + -------- + >>> from nipype.interfaces import fsl + >>> mcflt = fsl.MCFLIRT() + >>> mcflt.inputs.in_file = 'functional.nii' + >>> mcflt.inputs.cost = 'mutualinfo' + >>> mcflt.inputs.out_file = 'moco.nii' + >>> mcflt.cmdline + 'mcflirt -in functional.nii -cost mutualinfo -out moco.nii' + >>> res = mcflt.run() # doctest: +SKIP + + """ + _cmd = 'mcflirt' + input_spec = MCFLIRTInputSpec + output_spec = MCFLIRTOutputSpec + + def _format_arg(self, name, spec, value): + if name == "interpolation": + if value == "trilinear": + return "" + else: + return spec.argstr % value + return super(MCFLIRT, self)._format_arg(name, spec, value) + + def _list_outputs(self): + cwd = os.getcwd() + outputs = self._outputs().get() + + outputs['out_file'] = self._gen_outfilename() + + if isdefined(self.inputs.stats_imgs) and self.inputs.stats_imgs: + outputs['variance_img'] = self._gen_fname( + outputs['out_file'] + '_variance.ext', cwd=cwd) + outputs['std_img'] = self._gen_fname( + outputs['out_file'] + '_sigma.ext', cwd=cwd) + + # The mean image created if -stats option is specified ('meanvol') + # is missing the top and bottom slices. Therefore we only expose the + # mean image created by -meanvol option ('mean_reg') which isn't + # corrupted. + # Note that the same problem holds for the std and variance image. + + if isdefined(self.inputs.mean_vol) and self.inputs.mean_vol: + outputs['mean_img'] = self._gen_fname( + outputs['out_file'] + '_mean_reg.ext', cwd=cwd) + + if isdefined(self.inputs.save_mats) and self.inputs.save_mats: + _, filename = os.path.split(outputs['out_file']) + matpathname = os.path.join(cwd, filename + '.mat') + _, _, _, timepoints = load(self.inputs.in_file).shape + outputs['mat_file'] = [] + for t in range(timepoints): + outputs['mat_file'].append( + os.path.join(matpathname, 'MAT_%04d' % t)) + if isdefined(self.inputs.save_plots) and self.inputs.save_plots: + # Note - if e.g. out_file has .nii.gz, you get .nii.gz.par, + # which is what mcflirt does! + outputs['par_file'] = outputs['out_file'] + '.par' + if isdefined(self.inputs.save_rms) and self.inputs.save_rms: + outfile = outputs['out_file'] + outputs['rms_files'] = [outfile + '_abs.rms', outfile + '_rel.rms'] + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_outfilename() + return None + + def _gen_outfilename(self): + out_file = self.inputs.out_file + if isdefined(out_file): + out_file = os.path.realpath(out_file) + if not isdefined(out_file) and isdefined(self.inputs.in_file): + out_file = self._gen_fname(self.inputs.in_file, suffix='_mcf') + return os.path.abspath(out_file) + + +class FNIRTInputSpec(FSLCommandInputSpec): + ref_file = File( + exists=True, + argstr='--ref=%s', + mandatory=True, + desc='name of reference image') + in_file = File( + exists=True, + argstr='--in=%s', + mandatory=True, + desc='name of input image') + affine_file = File( + exists=True, + argstr='--aff=%s', + desc='name of file containing affine transform') + inwarp_file = File( + exists=True, + argstr='--inwarp=%s', + desc='name of file containing initial non-linear warps') + in_intensitymap_file = traits.List( + File(exists=True), + argstr='--intin=%s', + copyfile=False, + minlen=1, + maxlen=2, + desc=('name of file/files containing ' + 'initial intensity mapping ' + 'usually generated by previous ' + 'fnirt run')) + fieldcoeff_file = traits.Either( + traits.Bool, + File, + argstr='--cout=%s', + desc='name of output file with field coefficients or true') + warped_file = File( + argstr='--iout=%s', + desc='name of output image', + genfile=True, + hash_files=False) + field_file = traits.Either( + traits.Bool, + File, + argstr='--fout=%s', + desc='name of output file with field or true', + hash_files=False) + jacobian_file = traits.Either( + traits.Bool, + File, + argstr='--jout=%s', + desc=('name of file for writing out the ' + 'Jacobian of the field (for ' + 'diagnostic or VBM purposes)'), + hash_files=False) + modulatedref_file = traits.Either( + traits.Bool, + File, + argstr='--refout=%s', + desc=('name of file for writing out ' + 'intensity modulated --ref (for ' + 'diagnostic purposes)'), + hash_files=False) + out_intensitymap_file = traits.Either( + traits.Bool, + File, + argstr='--intout=%s', + desc=('name of files for writing ' + 'information pertaining to ' + 'intensity mapping'), + hash_files=False) + log_file = File( + argstr='--logout=%s', + desc='Name of log-file', + genfile=True, + hash_files=False) + config_file = traits.Either( + traits.Enum("T1_2_MNI152_2mm", "FA_2_FMRIB58_1mm"), + File(exists=True), + argstr='--config=%s', + desc='Name of config file specifying command line arguments') + refmask_file = File( + exists=True, + argstr='--refmask=%s', + desc='name of file with mask in reference space') + inmask_file = File( + exists=True, + argstr='--inmask=%s', + desc='name of file with mask in input image space') + skip_refmask = traits.Bool( + argstr='--applyrefmask=0', + xor=['apply_refmask'], + desc='Skip specified refmask if set, default false') + skip_inmask = traits.Bool( + argstr='--applyinmask=0', + xor=['apply_inmask'], + desc='skip specified inmask if set, default false') + apply_refmask = traits.List( + traits.Enum(0, 1), + argstr='--applyrefmask=%s', + xor=['skip_refmask'], + desc=('list of iterations to use reference mask on (1 to use, 0 to ' + 'skip)'), + sep=",") + apply_inmask = traits.List( + traits.Enum(0, 1), + argstr='--applyinmask=%s', + xor=['skip_inmask'], + desc='list of iterations to use input mask on (1 to use, 0 to skip)', + sep=",") + skip_implicit_ref_masking = traits.Bool( + argstr='--imprefm=0', + desc=('skip implicit masking based on value in --ref image. ' + 'Default = 0')) + skip_implicit_in_masking = traits.Bool( + argstr='--impinm=0', + desc=('skip implicit masking based on value in --in image. ' + 'Default = 0')) + refmask_val = traits.Float( + argstr='--imprefval=%f', + desc='Value to mask out in --ref image. Default =0.0') + inmask_val = traits.Float( + argstr='--impinval=%f', + desc='Value to mask out in --in image. Default =0.0') + max_nonlin_iter = traits.List( + traits.Int, + argstr='--miter=%s', + desc='Max # of non-linear iterations list, default [5, 5, 5, 5]', + sep=",") + subsampling_scheme = traits.List( + traits.Int, + argstr='--subsamp=%s', + desc='sub-sampling scheme, list, default [4, 2, 1, 1]', + sep=",") + warp_resolution = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + argstr='--warpres=%d,%d,%d', + desc=('(approximate) resolution (in mm) of warp basis in x-, y- and ' + 'z-direction, default 10, 10, 10')) + spline_order = traits.Int( + argstr='--splineorder=%d', + desc='Order of spline, 2->Qadratic spline, 3->Cubic spline. Default=3') + in_fwhm = traits.List( + traits.Int, + argstr='--infwhm=%s', + desc=('FWHM (in mm) of gaussian smoothing kernel for input volume, ' + 'default [6, 4, 2, 2]'), + sep=",") + ref_fwhm = traits.List( + traits.Int, + argstr='--reffwhm=%s', + desc=('FWHM (in mm) of gaussian smoothing kernel for ref volume, ' + 'default [4, 2, 0, 0]'), + sep=",") + regularization_model = traits.Enum( + 'membrane_energy', + 'bending_energy', + argstr='--regmod=%s', + desc=('Model for regularisation of warp-field [membrane_energy ' + 'bending_energy], default bending_energy')) + regularization_lambda = traits.List( + traits.Float, + argstr='--lambda=%s', + desc=('Weight of regularisation, default depending on --ssqlambda and ' + '--regmod switches. See user documetation.'), + sep=",") + skip_lambda_ssq = traits.Bool( + argstr='--ssqlambda=0', + desc='If true, lambda is not weighted by current ssq, default false') + jacobian_range = traits.Tuple( + traits.Float, + traits.Float, + argstr='--jacrange=%f,%f', + desc='Allowed range of Jacobian determinants, default 0.01, 100.0') + derive_from_ref = traits.Bool( + argstr='--refderiv', + desc=('If true, ref image is used to calculate derivatives. ' + 'Default false')) + intensity_mapping_model = traits.Enum( + 'none', + 'global_linear', + 'global_non_linear', + 'local_linear', + 'global_non_linear_with_bias', + 'local_non_linear', + argstr='--intmod=%s', + desc='Model for intensity-mapping') + intensity_mapping_order = traits.Int( + argstr='--intorder=%d', + desc='Order of poynomial for mapping intensities, default 5') + biasfield_resolution = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + argstr='--biasres=%d,%d,%d', + desc=('Resolution (in mm) of bias-field modelling local intensities, ' + 'default 50, 50, 50')) + bias_regularization_lambda = traits.Float( + argstr='--biaslambda=%f', + desc='Weight of regularisation for bias-field, default 10000') + skip_intensity_mapping = traits.Bool( + argstr='--estint=0', + xor=['apply_intensity_mapping'], + desc='Skip estimate intensity-mapping default false') + apply_intensity_mapping = traits.List( + traits.Enum(0, 1), + argstr='--estint=%s', + xor=['skip_intensity_mapping'], + desc=('List of subsampling levels to apply intensity mapping for ' + '(0 to skip, 1 to apply)'), + sep=",") + hessian_precision = traits.Enum( + 'double', + 'float', + argstr='--numprec=%s', + desc=('Precision for representing Hessian, double or float. ' + 'Default double')) + + +class FNIRTOutputSpec(TraitedSpec): + fieldcoeff_file = File(exists=True, desc='file with field coefficients') + warped_file = File(exists=True, desc='warped image') + field_file = File(desc='file with warp field') + jacobian_file = File(desc='file containing Jacobian of the field') + modulatedref_file = File(desc='file containing intensity modulated --ref') + out_intensitymap_file = traits.List( + File, + minlen=2, + maxlen=2, + desc='files containing info pertaining to intensity mapping') + log_file = File(desc='Name of log-file') + + +class FNIRT(FSLCommand): + """FSL FNIRT wrapper for non-linear registration + + For complete details, see the `FNIRT Documentation. + `_ + + Examples + -------- + >>> from nipype.interfaces import fsl + >>> from nipype.testing import example_data + >>> fnt = fsl.FNIRT(affine_file=example_data('trans.mat')) + >>> res = fnt.run(ref_file=example_data('mni.nii', in_file=example_data('structural.nii')) #doctest: +SKIP + + T1 -> Mni153 + + >>> from nipype.interfaces import fsl + >>> fnirt_mprage = fsl.FNIRT() + >>> fnirt_mprage.inputs.in_fwhm = [8, 4, 2, 2] + >>> fnirt_mprage.inputs.subsampling_scheme = [4, 2, 1, 1] + + Specify the resolution of the warps + + >>> fnirt_mprage.inputs.warp_resolution = (6, 6, 6) + >>> res = fnirt_mprage.run(in_file='structural.nii', ref_file='mni.nii', warped_file='warped.nii', fieldcoeff_file='fieldcoeff.nii')#doctest: +SKIP + + We can check the command line and confirm that it's what we expect. + + >>> fnirt_mprage.cmdline #doctest: +SKIP + 'fnirt --cout=fieldcoeff.nii --in=structural.nii --infwhm=8,4,2,2 --ref=mni.nii --subsamp=4,2,1,1 --warpres=6,6,6 --iout=warped.nii' + + """ + + _cmd = 'fnirt' + input_spec = FNIRTInputSpec + output_spec = FNIRTOutputSpec + + filemap = { + 'warped_file': 'warped', + 'field_file': 'field', + 'jacobian_file': 'field_jacobian', + 'modulatedref_file': 'modulated', + 'out_intensitymap_file': 'intmap', + 'log_file': 'log.txt', + 'fieldcoeff_file': 'fieldwarp' + } + + def _list_outputs(self): + outputs = self.output_spec().get() + for key, suffix in list(self.filemap.items()): + inval = getattr(self.inputs, key) + change_ext = True + if key in ['warped_file', 'log_file']: + if suffix.endswith('.txt'): + change_ext = False + if isdefined(inval): + outputs[key] = inval + else: + outputs[key] = self._gen_fname( + self.inputs.in_file, + suffix='_' + suffix, + change_ext=change_ext) + elif isdefined(inval): + if isinstance(inval, bool): + if inval: + outputs[key] = self._gen_fname( + self.inputs.in_file, + suffix='_' + suffix, + change_ext=change_ext) + else: + outputs[key] = os.path.abspath(inval) + + if key == 'out_intensitymap_file' and isdefined(outputs[key]): + basename = FNIRT.intensitymap_file_basename(outputs[key]) + outputs[key] = [ + outputs[key], + '%s.txt' % basename, + ] + return outputs + + def _format_arg(self, name, spec, value): + if name in ('in_intensitymap_file', 'out_intensitymap_file'): + if name == 'out_intensitymap_file': + value = self._list_outputs()[name] + value = [FNIRT.intensitymap_file_basename(v) for v in value] + assert len(set(value)) == 1, ( + 'Found different basenames for {}: {}'.format(name, value)) + return spec.argstr % value[0] + if name in list(self.filemap.keys()): + return spec.argstr % self._list_outputs()[name] + return super(FNIRT, self)._format_arg(name, spec, value) + + def _gen_filename(self, name): + if name in ['warped_file', 'log_file']: + return self._list_outputs()[name] + return None + + def write_config(self, configfile): + """Writes out currently set options to specified config file + + XX TODO : need to figure out how the config file is written + + Parameters + ---------- + configfile : /path/to/configfile + """ + try: + fid = open(configfile, 'w+') + except IOError: + print('unable to create config_file %s' % (configfile)) + + for item in list(self.inputs.get().items()): + fid.write('%s\n' % (item)) + fid.close() + + @classmethod + def intensitymap_file_basename(cls, f): + """Removes valid intensitymap extensions from `f`, returning a basename + that can refer to both intensitymap files. + """ + for ext in list(Info.ftypes.values()) + ['.txt']: + if f.endswith(ext): + return f[:-len(ext)] + # TODO consider warning for this case + return f + + +class ApplyWarpInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + argstr='--in=%s', + mandatory=True, + position=0, + desc='image to be warped') + out_file = File( + argstr='--out=%s', + genfile=True, + position=2, + desc='output filename', + hash_files=False) + ref_file = File( + exists=True, + argstr='--ref=%s', + mandatory=True, + position=1, + desc='reference image') + field_file = File( + exists=True, argstr='--warp=%s', desc='file containing warp field') + abswarp = traits.Bool( + argstr='--abs', + xor=['relwarp'], + desc="treat warp field as absolute: x' = w(x)") + relwarp = traits.Bool( + argstr='--rel', + xor=['abswarp'], + position=-1, + desc="treat warp field as relative: x' = x + w(x)") + datatype = traits.Enum( + 'char', + 'short', + 'int', + 'float', + 'double', + argstr='--datatype=%s', + desc='Force output data type [char short int float double].') + supersample = traits.Bool( + argstr='--super', + desc='intermediary supersampling of output, default is off') + superlevel = traits.Either( + traits.Enum('a'), + traits.Int, + argstr='--superlevel=%s', + desc=("level of intermediary supersampling, a for 'automatic' or " + "integer level. Default = 2")) + premat = File( + exists=True, + argstr='--premat=%s', + desc='filename for pre-transform (affine matrix)') + postmat = File( + exists=True, + argstr='--postmat=%s', + desc='filename for post-transform (affine matrix)') + mask_file = File( + exists=True, + argstr='--mask=%s', + desc='filename for mask image (in reference space)') + interp = traits.Enum( + 'nn', + 'trilinear', + 'sinc', + 'spline', + argstr='--interp=%s', + position=-2, + desc='interpolation method') + + +class ApplyWarpOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='Warped output file') + + +class ApplyWarp(FSLCommand): + """FSL's applywarp wrapper to apply the results of a FNIRT registration + + Examples + -------- + >>> from nipype.interfaces import fsl + >>> from nipype.testing import example_data + >>> aw = fsl.ApplyWarp() + >>> aw.inputs.in_file = example_data('structural.nii') + >>> aw.inputs.ref_file = example_data('mni.nii') + >>> aw.inputs.field_file = 'my_coefficients_filed.nii' #doctest: +SKIP + >>> res = aw.run() #doctest: +SKIP + + + """ + + _cmd = 'applywarp' + input_spec = ApplyWarpInputSpec + output_spec = ApplyWarpOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'superlevel': + return spec.argstr % str(value) + return super(ApplyWarp, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + if not isdefined(self.inputs.out_file): + outputs['out_file'] = self._gen_fname( + self.inputs.in_file, suffix='_warp') + else: + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()[name] + return None + + +class SliceTimerInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + argstr='--in=%s', + mandatory=True, + position=0, + desc='filename of input timeseries') + out_file = File( + argstr='--out=%s', + genfile=True, + desc='filename of output timeseries', + hash_files=False) + index_dir = traits.Bool( + argstr='--down', desc='slice indexing from top to bottom') + time_repetition = traits.Float( + argstr='--repeat=%f', desc='Specify TR of data - default is 3s') + slice_direction = traits.Enum( + 1, + 2, + 3, + argstr='--direction=%d', + desc='direction of slice acquisition (x=1, y=2, z=3) - default is z') + interleaved = traits.Bool( + argstr='--odd', desc='use interleaved acquisition') + custom_timings = File( + exists=True, + argstr='--tcustom=%s', + desc=('slice timings, in fractions of TR, range 0:1 (default is 0.5 = ' + 'no shift)')) + global_shift = traits.Float( + argstr='--tglobal', + desc='shift in fraction of TR, range 0:1 (default is 0.5 = no shift)') + custom_order = File( + exists=True, + argstr='--ocustom=%s', + desc=('filename of single-column custom interleave order file (first ' + 'slice is referred to as 1 not 0)')) + + +class SliceTimerOutputSpec(TraitedSpec): + slice_time_corrected_file = File( + exists=True, desc='slice time corrected file') + + +class SliceTimer(FSLCommand): + """FSL slicetimer wrapper to perform slice timing correction + + Examples + -------- + >>> from nipype.interfaces import fsl + >>> from nipype.testing import example_data + >>> st = fsl.SliceTimer() + >>> st.inputs.in_file = example_data('functional.nii') + >>> st.inputs.interleaved = True + >>> result = st.run() #doctest: +SKIP + + """ + + _cmd = 'slicetimer' + input_spec = SliceTimerInputSpec + output_spec = SliceTimerOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + out_file = self.inputs.out_file + if not isdefined(out_file): + out_file = self._gen_fname(self.inputs.in_file, suffix='_st') + outputs['slice_time_corrected_file'] = os.path.abspath(out_file) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()['slice_time_corrected_file'] + return None + + +class SUSANInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=1, + desc='filename of input timeseries') + brightness_threshold = traits.Float( + argstr='%.10f', + position=2, + mandatory=True, + desc=('brightness threshold and should be greater than noise level ' + 'and less than contrast of edges to be preserved.')) + fwhm = traits.Float( + argstr='%.10f', + position=3, + mandatory=True, + desc='fwhm of smoothing, in mm, gets converted using sqrt(8*log(2))') + dimension = traits.Enum( + 3, + 2, + argstr='%d', + position=4, + usedefault=True, + desc='within-plane (2) or fully 3D (3)') + use_median = traits.Enum( + 1, + 0, + argstr='%d', + position=5, + usedefault=True, + desc=('whether to use a local median filter in the cases where ' + 'single-point noise is detected')) + usans = traits.List( + traits.Tuple(File(exists=True), traits.Float), + maxlen=2, + argstr='', + position=6, + usedefault=True, + desc='determines whether the smoothing area (USAN) is to be ' + 'found from secondary images (0, 1 or 2). A negative ' + 'value for any brightness threshold will auto-set the ' + 'threshold at 10% of the robust range') + out_file = File( + argstr='%s', + position=-1, + genfile=True, + desc='output file name', + hash_files=False) + + +class SUSANOutputSpec(TraitedSpec): + smoothed_file = File(exists=True, desc='smoothed output file') + + +class SUSAN(FSLCommand): + """FSL SUSAN wrapper to perform smoothing + + For complete details, see the `SUSAN Documentation. + `_ + + Examples + -------- + + >>> from nipype.interfaces import fsl + >>> from nipype.testing import example_data + >>> anatfile # doctest: +SKIP + anatomical.nii # doctest: +SKIP + >>> sus = fsl.SUSAN() + >>> sus.inputs.in_file = example_data('structural.nii') + >>> sus.inputs.brightness_threshold = 2000.0 + >>> sus.inputs.fwhm = 8.0 + >>> result = sus.run() # doctest: +SKIP + """ + + _cmd = 'susan' + input_spec = SUSANInputSpec + output_spec = SUSANOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'fwhm': + return spec.argstr % (float(value) / np.sqrt(8 * np.log(2))) + if name == 'usans': + if not value: + return '0' + arglist = [str(len(value))] + for filename, thresh in value: + arglist.extend([filename, '%.10f' % thresh]) + return ' '.join(arglist) + return super(SUSAN, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + out_file = self.inputs.out_file + if not isdefined(out_file): + out_file = self._gen_fname(self.inputs.in_file, suffix='_smooth') + outputs['smoothed_file'] = os.path.abspath(out_file) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()['smoothed_file'] + return None + + +class FUGUEInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, argstr='--in=%s', desc='filename of input volume') + shift_in_file = File( + exists=True, + argstr='--loadshift=%s', + desc='filename for reading pixel shift volume') + phasemap_in_file = File( + exists=True, + argstr='--phasemap=%s', + desc='filename for input phase image') + fmap_in_file = File( + exists=True, + argstr='--loadfmap=%s', + desc='filename for loading fieldmap (rad/s)') + unwarped_file = File( + argstr='--unwarp=%s', + desc='apply unwarping and save as filename', + xor=['warped_file'], + requires=['in_file']) + warped_file = File( + argstr='--warp=%s', + desc='apply forward warping and save as filename', + xor=['unwarped_file'], + requires=['in_file']) + forward_warping = traits.Bool( + False, + usedefault=True, + desc='apply forward warping instead of unwarping') + dwell_to_asym_ratio = traits.Float( + argstr='--dwelltoasym=%.10f', desc='set the dwell to asym time ratio') + dwell_time = traits.Float( + argstr='--dwell=%.10f', + desc=('set the EPI dwell time per phase-encode line - same as echo ' + 'spacing - (sec)')) + asym_se_time = traits.Float( + argstr='--asym=%.10f', + desc='set the fieldmap asymmetric spin echo time (sec)') + median_2dfilter = traits.Bool( + argstr='--median', desc='apply 2D median filtering') + despike_2dfilter = traits.Bool( + argstr='--despike', desc='apply a 2D de-spiking filter') + no_gap_fill = traits.Bool( + argstr='--nofill', + desc='do not apply gap-filling measure to the fieldmap') + no_extend = traits.Bool( + argstr='--noextend', + desc='do not apply rigid-body extrapolation to the fieldmap') + smooth2d = traits.Float( + argstr='--smooth2=%.2f', + desc='apply 2D Gaussian smoothing of sigma N (in mm)') + smooth3d = traits.Float( + argstr='--smooth3=%.2f', + desc='apply 3D Gaussian smoothing of sigma N (in mm)') + poly_order = traits.Int( + argstr='--poly=%d', desc='apply polynomial fitting of order N') + fourier_order = traits.Int( + argstr='--fourier=%d', + desc='apply Fourier (sinusoidal) fitting of order N') + pava = traits.Bool( + argstr='--pava', desc='apply monotonic enforcement via PAVA') + despike_threshold = traits.Float( + argstr='--despikethreshold=%s', + desc='specify the threshold for de-spiking (default=3.0)') + unwarp_direction = traits.Enum( + 'x', + 'y', + 'z', + 'x-', + 'y-', + 'z-', + argstr='--unwarpdir=%s', + desc='specifies direction of warping (default y)') + phase_conjugate = traits.Bool( + argstr='--phaseconj', desc='apply phase conjugate method of unwarping') + icorr = traits.Bool( + argstr='--icorr', + requires=['shift_in_file'], + desc=('apply intensity correction to unwarping (pixel shift method ' + 'only)')) + icorr_only = traits.Bool( + argstr='--icorronly', + requires=['unwarped_file'], + desc='apply intensity correction only') + mask_file = File( + exists=True, + argstr='--mask=%s', + desc='filename for loading valid mask') + nokspace = traits.Bool( + False, argstr='--nokspace', desc='do not use k-space forward warping') + + # Special outputs: shift (voxel shift map, vsm) + save_shift = traits.Bool( + False, xor=['save_unmasked_shift'], desc='write pixel shift volume') + shift_out_file = File( + argstr='--saveshift=%s', desc='filename for saving pixel shift volume') + save_unmasked_shift = traits.Bool( + argstr='--unmaskshift', + xor=['save_shift'], + desc='saves the unmasked shiftmap when using --saveshift') + + # Special outputs: fieldmap (fmap) + save_fmap = traits.Bool( + False, xor=['save_unmasked_fmap'], desc='write field map volume') + fmap_out_file = File( + argstr='--savefmap=%s', desc='filename for saving fieldmap (rad/s)') + save_unmasked_fmap = traits.Bool( + False, + argstr='--unmaskfmap', + xor=['save_fmap'], + desc='saves the unmasked fieldmap when using --savefmap') + + +class FUGUEOutputSpec(TraitedSpec): + unwarped_file = File(desc='unwarped file') + warped_file = File(desc='forward warped file') + shift_out_file = File(desc='voxel shift map file') + fmap_out_file = File(desc='fieldmap file') + + +class FUGUE(FSLCommand): + """FSL FUGUE set of tools for EPI distortion correction + + `FUGUE `_ is, most generally, + a set of tools for EPI distortion correction. + + Distortions may be corrected for + 1. improving registration with non-distorted images (e.g. structurals), + or + 2. dealing with motion-dependent changes. + + FUGUE is designed to deal only with the first case - + improving registration. + + + Examples + -------- + + + Unwarping an input image (shift map is known): + + >>> from nipype.interfaces.fsl.preprocess import FUGUE + >>> fugue = FUGUE() + >>> fugue.inputs.in_file = 'epi.nii' + >>> fugue.inputs.mask_file = 'epi_mask.nii' + >>> fugue.inputs.shift_in_file = 'vsm.nii' # Previously computed with fugue as well + >>> fugue.inputs.unwarp_direction = 'y' + >>> fugue.inputs.output_type = "NIFTI_GZ" + >>> fugue.cmdline # doctest: +ELLIPSIS + 'fugue --in=epi.nii --mask=epi_mask.nii --loadshift=vsm.nii --unwarpdir=y --unwarp=epi_unwarped.nii.gz' + >>> fugue.run() #doctest: +SKIP + + + Warping an input image (shift map is known): + + >>> from nipype.interfaces.fsl.preprocess import FUGUE + >>> fugue = FUGUE() + >>> fugue.inputs.in_file = 'epi.nii' + >>> fugue.inputs.forward_warping = True + >>> fugue.inputs.mask_file = 'epi_mask.nii' + >>> fugue.inputs.shift_in_file = 'vsm.nii' # Previously computed with fugue as well + >>> fugue.inputs.unwarp_direction = 'y' + >>> fugue.inputs.output_type = "NIFTI_GZ" + >>> fugue.cmdline # doctest: +ELLIPSIS + 'fugue --in=epi.nii --mask=epi_mask.nii --loadshift=vsm.nii --unwarpdir=y --warp=epi_warped.nii.gz' + >>> fugue.run() #doctest: +SKIP + + + Computing the vsm (unwrapped phase map is known): + + >>> from nipype.interfaces.fsl.preprocess import FUGUE + >>> fugue = FUGUE() + >>> fugue.inputs.phasemap_in_file = 'epi_phasediff.nii' + >>> fugue.inputs.mask_file = 'epi_mask.nii' + >>> fugue.inputs.dwell_to_asym_ratio = (0.77e-3 * 3) / 2.46e-3 + >>> fugue.inputs.unwarp_direction = 'y' + >>> fugue.inputs.save_shift = True + >>> fugue.inputs.output_type = "NIFTI_GZ" + >>> fugue.cmdline # doctest: +ELLIPSIS + 'fugue --dwelltoasym=0.9390243902 --mask=epi_mask.nii --phasemap=epi_phasediff.nii --saveshift=epi_phasediff_vsm.nii.gz --unwarpdir=y' + >>> fugue.run() #doctest: +SKIP + + + """ + + _cmd = 'fugue' + input_spec = FUGUEInputSpec + output_spec = FUGUEOutputSpec + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + + input_phase = isdefined(self.inputs.phasemap_in_file) + input_vsm = isdefined(self.inputs.shift_in_file) + input_fmap = isdefined(self.inputs.fmap_in_file) + + if not input_phase and not input_vsm and not input_fmap: + raise RuntimeError( + ('Either phasemap_in_file, shift_in_file or fmap_in_file must ' + 'be set.')) + + if not isdefined(self.inputs.in_file): + skip += ['unwarped_file', 'warped_file'] + else: + if self.inputs.forward_warping: + skip += ['unwarped_file'] + trait_spec = self.inputs.trait('warped_file') + trait_spec.name_template = "%s_warped" + trait_spec.name_source = 'in_file' + trait_spec.output_name = 'warped_file' + else: + skip += ['warped_file'] + trait_spec = self.inputs.trait('unwarped_file') + trait_spec.name_template = "%s_unwarped" + trait_spec.name_source = 'in_file' + trait_spec.output_name = 'unwarped_file' + + # Handle shift output + if not isdefined(self.inputs.shift_out_file): + vsm_save_masked = (isdefined(self.inputs.save_shift) + and self.inputs.save_shift) + vsm_save_unmasked = (isdefined(self.inputs.save_unmasked_shift) + and self.inputs.save_unmasked_shift) + + if (vsm_save_masked or vsm_save_unmasked): + trait_spec = self.inputs.trait('shift_out_file') + trait_spec.output_name = 'shift_out_file' + + if input_fmap: + trait_spec.name_source = 'fmap_in_file' + elif input_phase: + trait_spec.name_source = 'phasemap_in_file' + elif input_vsm: + trait_spec.name_source = 'shift_in_file' + else: + raise RuntimeError( + ('Either phasemap_in_file, shift_in_file or ' + 'fmap_in_file must be set.')) + + if vsm_save_unmasked: + trait_spec.name_template = '%s_vsm_unmasked' + else: + trait_spec.name_template = '%s_vsm' + else: + skip += ['save_shift', 'save_unmasked_shift', 'shift_out_file'] + + # Handle fieldmap output + if not isdefined(self.inputs.fmap_out_file): + fmap_save_masked = (isdefined(self.inputs.save_fmap) + and self.inputs.save_fmap) + fmap_save_unmasked = (isdefined(self.inputs.save_unmasked_fmap) + and self.inputs.save_unmasked_fmap) + + if (fmap_save_masked or fmap_save_unmasked): + trait_spec = self.inputs.trait('fmap_out_file') + trait_spec.output_name = 'fmap_out_file' + + if input_vsm: + trait_spec.name_source = 'shift_in_file' + elif input_phase: + trait_spec.name_source = 'phasemap_in_file' + elif input_fmap: + trait_spec.name_source = 'fmap_in_file' + else: + raise RuntimeError( + ('Either phasemap_in_file, shift_in_file or ' + 'fmap_in_file must be set.')) + + if fmap_save_unmasked: + trait_spec.name_template = '%s_fieldmap_unmasked' + else: + trait_spec.name_template = '%s_fieldmap' + else: + skip += ['save_fmap', 'save_unmasked_fmap', 'fmap_out_file'] + + return super(FUGUE, self)._parse_inputs(skip=skip) + + +class PRELUDEInputSpec(FSLCommandInputSpec): + complex_phase_file = File( + exists=True, + argstr='--complex=%s', + mandatory=True, + xor=['magnitude_file', 'phase_file'], + desc='complex phase input volume') + magnitude_file = File( + exists=True, + argstr='--abs=%s', + mandatory=True, + xor=['complex_phase_file'], + desc='file containing magnitude image') + phase_file = File( + exists=True, + argstr='--phase=%s', + mandatory=True, + xor=['complex_phase_file'], + desc='raw phase file') + unwrapped_phase_file = File( + genfile=True, + argstr='--unwrap=%s', + desc='file containing unwrapepd phase', + hash_files=False) + num_partitions = traits.Int( + argstr='--numphasesplit=%d', desc='number of phase partitions to use') + labelprocess2d = traits.Bool( + argstr='--labelslices', + desc='does label processing in 2D (slice at a time)') + process2d = traits.Bool( + argstr='--slices', + xor=['labelprocess2d'], + desc='does all processing in 2D (slice at a time)') + process3d = traits.Bool( + argstr='--force3D', + xor=['labelprocess2d', 'process2d'], + desc='forces all processing to be full 3D') + threshold = traits.Float( + argstr='--thresh=%.10f', desc='intensity threshold for masking') + mask_file = File( + exists=True, argstr='--mask=%s', desc='filename of mask input volume') + start = traits.Int( + argstr='--start=%d', desc='first image number to process (default 0)') + end = traits.Int( + argstr='--end=%d', desc='final image number to process (default Inf)') + savemask_file = File( + argstr='--savemask=%s', + desc='saving the mask volume', + hash_files=False) + rawphase_file = File( + argstr='--rawphase=%s', + desc='saving the raw phase output', + hash_files=False) + label_file = File( + argstr='--labels=%s', + desc='saving the area labels output', + hash_files=False) + removeramps = traits.Bool( + argstr='--removeramps', desc='remove phase ramps during unwrapping') + + +class PRELUDEOutputSpec(TraitedSpec): + unwrapped_phase_file = File(exists=True, desc='unwrapped phase file') + + +class PRELUDE(FSLCommand): + """FSL prelude wrapper for phase unwrapping + + Examples + -------- + + Please insert examples for use of this command + + """ + input_spec = PRELUDEInputSpec + output_spec = PRELUDEOutputSpec + _cmd = 'prelude' + + def __init__(self, **kwargs): + super(PRELUDE, self).__init__(**kwargs) + warn('This has not been fully tested. Please report any failures.') + + def _list_outputs(self): + outputs = self._outputs().get() + out_file = self.inputs.unwrapped_phase_file + if not isdefined(out_file): + if isdefined(self.inputs.phase_file): + out_file = self._gen_fname( + self.inputs.phase_file, suffix='_unwrapped') + elif isdefined(self.inputs.complex_phase_file): + out_file = self._gen_fname( + self.inputs.complex_phase_file, suffix='_phase_unwrapped') + outputs['unwrapped_phase_file'] = os.path.abspath(out_file) + return outputs + + def _gen_filename(self, name): + if name == 'unwrapped_phase_file': + return self._list_outputs()['unwrapped_phase_file'] + return None + + +class FIRSTInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + mandatory=True, + position=-2, + copyfile=False, + argstr='-i %s', + desc='input data file') + out_file = File( + 'segmented', + usedefault=True, + mandatory=True, + position=-1, + argstr='-o %s', + desc='output data file', + hash_files=False) + verbose = traits.Bool(argstr='-v', position=1, desc="Use verbose logging.") + brain_extracted = traits.Bool( + argstr='-b', + position=2, + desc="Input structural image is already brain-extracted") + no_cleanup = traits.Bool( + argstr='-d', + position=3, + desc="Input structural image is already brain-extracted") + method = traits.Enum( + 'auto', + 'fast', + 'none', + xor=['method_as_numerical_threshold'], + argstr='-m %s', + position=4, + usedefault=True, + desc=("Method must be one of auto, fast, none, or it can be entered " + "using the 'method_as_numerical_threshold' input")) + method_as_numerical_threshold = traits.Float( + argstr='-m %.4f', + position=4, + desc=("Specify a numerical threshold value or use the 'method' input " + "to choose auto, fast, or none")) + list_of_specific_structures = traits.List( + traits.Str, + argstr='-s %s', + sep=',', + position=5, + minlen=1, + desc='Runs only on the specified structures (e.g. L_Hipp, R_Hipp' + 'L_Accu, R_Accu, L_Amyg, R_Amyg' + 'L_Caud, R_Caud, L_Pall, R_Pall' + 'L_Puta, R_Puta, L_Thal, R_Thal, BrStem') + affine_file = File( + exists=True, + position=6, + argstr='-a %s', + desc=('Affine matrix to use (e.g. img2std.mat) (does not ' + 're-run registration)')) + + +class FIRSTOutputSpec(TraitedSpec): + vtk_surfaces = OutputMultiPath( + File(exists=True), + desc='VTK format meshes for each subcortical region') + bvars = OutputMultiPath( + File(exists=True), desc='bvars for each subcortical region') + original_segmentations = File( + exists=True, + desc=('3D image file containing the segmented regions ' + 'as integer values. Uses CMA labelling')) + segmentation_file = File( + exists=True, + desc=('4D image file containing a single volume per ' + 'segmented region')) + + +class FIRST(FSLCommand): + """FSL run_first_all wrapper for segmentation of subcortical volumes + + http://www.fmrib.ox.ac.uk/fsl/first/index.html + + Examples + -------- + + >>> from nipype.interfaces import fsl + >>> first = fsl.FIRST() + >>> first.inputs.in_file = 'structural.nii' + >>> first.inputs.out_file = 'segmented.nii' + >>> res = first.run() #doctest: +SKIP + + """ + + _cmd = 'run_first_all' + input_spec = FIRSTInputSpec + output_spec = FIRSTOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + + if isdefined(self.inputs.list_of_specific_structures): + structures = self.inputs.list_of_specific_structures + else: + structures = [ + 'L_Hipp', 'R_Hipp', 'L_Accu', 'R_Accu', 'L_Amyg', 'R_Amyg', + 'L_Caud', 'R_Caud', 'L_Pall', 'R_Pall', 'L_Puta', 'R_Puta', + 'L_Thal', 'R_Thal', 'BrStem' + ] + outputs['original_segmentations'] = \ + self._gen_fname('original_segmentations') + outputs['segmentation_file'] = self._gen_fname('segmentation_file') + outputs['vtk_surfaces'] = self._gen_mesh_names('vtk_surfaces', + structures) + outputs['bvars'] = self._gen_mesh_names('bvars', structures) + return outputs + + def _gen_fname(self, basename): + path, outname, ext = split_filename(self.inputs.out_file) + + method = 'none' + if isdefined(self.inputs.method) and self.inputs.method != 'none': + method = 'fast' + if (self.inputs.list_of_specific_structures + and self.inputs.method == 'auto'): + method = 'none' + + if isdefined(self.inputs.method_as_numerical_threshold): + thres = '%.4f' % self.inputs.method_as_numerical_threshold + method = thres.replace('.', '') + + if basename == 'original_segmentations': + return op.abspath('%s_all_%s_origsegs.nii.gz' % (outname, method)) + if basename == 'segmentation_file': + return op.abspath('%s_all_%s_firstseg.nii.gz' % (outname, method)) + + return None + + def _gen_mesh_names(self, name, structures): + path, prefix, ext = split_filename(self.inputs.out_file) + if name == 'vtk_surfaces': + vtks = list() + for struct in structures: + vtk = prefix + '-' + struct + '_first.vtk' + vtks.append(op.abspath(vtk)) + return vtks + if name == 'bvars': + bvars = list() + for struct in structures: + bvar = prefix + '-' + struct + '_first.bvars' + bvars.append(op.abspath(bvar)) + return bvars + return None diff --git a/nipype/interfaces/fsl/tests/__init__.py b/nipype/interfaces/fsl/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/fsl/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/fsl/tests/test_FILMGLS.py b/nipype/interfaces/fsl/tests/test_FILMGLS.py new file mode 100644 index 0000000000..a37fc1b116 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_FILMGLS.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +from nipype.interfaces.fsl.model import FILMGLS, FILMGLSInputSpec + + +def test_filmgls(): + input_map = dict( + args=dict(argstr='%s', ), + autocorr_estimate_only=dict( + xor=[ + 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', + 'multitaper_product', 'use_pava', 'autocorr_noestimate' + ], + argstr='-ac', + ), + autocorr_noestimate=dict( + xor=[ + 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', + 'multitaper_product', 'use_pava', 'autocorr_noestimate' + ], + argstr='-noest', + ), + brightness_threshold=dict(argstr='-epith %d', ), + design_file=dict(argstr='%s', ), + environ=dict(usedefault=True, ), + fit_armodel=dict( + xor=[ + 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', + 'multitaper_product', 'use_pava', 'autocorr_noestimate' + ], + argstr='-ar', + ), + full_data=dict(argstr='-v', ), + in_file=dict( + mandatory=True, + argstr='%s', + ), + mask_size=dict(argstr='-ms %d', ), + multitaper_product=dict( + xor=[ + 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', + 'multitaper_product', 'use_pava', 'autocorr_noestimate' + ], + argstr='-mt %d', + ), + output_pwdata=dict(argstr='-output_pwdata', ), + output_type=dict(), + results_dir=dict( + usedefault=True, + argstr='-rn %s', + ), + smooth_autocorr=dict(argstr='-sa', ), + threshold=dict(argstr='%f', ), + tukey_window=dict( + xor=[ + 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', + 'multitaper_product', 'use_pava', 'autocorr_noestimate' + ], + argstr='-tukey %d', + ), + use_pava=dict(argstr='-pava', ), + ) + input_map2 = dict( + args=dict(argstr='%s', ), + autocorr_estimate_only=dict( + xor=[ + 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', + 'multitaper_product', 'use_pava', 'autocorr_noestimate' + ], + argstr='--ac', + ), + autocorr_noestimate=dict( + xor=[ + 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', + 'multitaper_product', 'use_pava', 'autocorr_noestimate' + ], + argstr='--noest', + ), + brightness_threshold=dict(argstr='--epith=%d', ), + design_file=dict(argstr='--pd=%s', ), + environ=dict(usedefault=True, ), + fit_armodel=dict( + xor=[ + 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', + 'multitaper_product', 'use_pava', 'autocorr_noestimate' + ], + argstr='--ar', + ), + full_data=dict(argstr='-v', ), + in_file=dict( + mandatory=True, + argstr='--in=%s', + ), + mask_size=dict(argstr='--ms=%d', ), + multitaper_product=dict( + xor=[ + 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', + 'multitaper_product', 'use_pava', 'autocorr_noestimate' + ], + argstr='--mt=%d', + ), + output_pwdata=dict(argstr='--outputPWdata', ), + output_type=dict(), + results_dir=dict( + argstr='--rn=%s', + usedefault=True, + ), + smooth_autocorr=dict(argstr='--sa', ), + threshold=dict( + usedefault=True, + argstr='--thr=%f', + ), + tukey_window=dict( + xor=[ + 'autocorr_estimate_only', 'fit_armodel', 'tukey_window', + 'multitaper_product', 'use_pava', 'autocorr_noestimate' + ], + argstr='--tukey=%d', + ), + use_pava=dict(argstr='--pava', ), + ) + instance = FILMGLS() + if isinstance(instance.inputs, FILMGLSInputSpec): + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(instance.inputs.traits()[key], metakey) == value + else: + for key, metadata in list(input_map2.items()): + for metakey, value in list(metadata.items()): + assert getattr(instance.inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_Level1Design_functions.py b/nipype/interfaces/fsl/tests/test_Level1Design_functions.py new file mode 100644 index 0000000000..44c04d41d5 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_Level1Design_functions.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +import os +from ...base import Undefined +from ..model import Level1Design + + +def test_level1design(tmpdir): + old = tmpdir.chdir() + l = Level1Design() + runinfo = dict( + cond=[{ + 'name': 'test_condition', + 'onset': [0, 10], + 'duration': [10, 10] + }], + regress=[]) + runidx = 0 + contrasts = Undefined + do_tempfilter = False + orthogonalization = {} + basic_ev_parameters = {'temporalderiv': False} + convolution_variants = [('custom', 7, { + 'temporalderiv': False, + 'bfcustompath': '/some/path' + }), ('hrf', 3, basic_ev_parameters), ('dgamma', 3, basic_ev_parameters), + ('gamma', 2, basic_ev_parameters), + ('none', 0, basic_ev_parameters)] + for key, val, ev_parameters in convolution_variants: + output_num, output_txt = Level1Design._create_ev_files( + l, os.getcwd(), runinfo, runidx, ev_parameters, orthogonalization, + contrasts, do_tempfilter, key) + assert "set fmri(convolve1) {0}".format(val) in output_txt diff --git a/nipype/interfaces/fsl/tests/test_auto_AR1Image.py b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py new file mode 100644 index 0000000000..32efb1826e --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_AR1Image.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import AR1Image + + +def test_AR1Image_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='-%sar1', + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = AR1Image.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AR1Image_outputs(): + output_map = dict(out_file=dict(), ) + outputs = AR1Image.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py new file mode 100644 index 0000000000..062d6367f7 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_AccuracyTester.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fix import AccuracyTester + + +def test_AccuracyTester_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + mel_icas=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=3, + ), + output_directory=dict( + argstr='%s', + mandatory=True, + position=2, + ), + trained_wts_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + ) + inputs = AccuracyTester.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AccuracyTester_outputs(): + output_map = dict( + output_directory=dict( + argstr='%s', + position=1, + ), ) + outputs = AccuracyTester.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py new file mode 100644 index 0000000000..8a39956c18 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyMask.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import ApplyMask + + +def test_ApplyMask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + mask_file=dict( + argstr='-mas %s', + mandatory=True, + position=4, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = ApplyMask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ApplyMask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ApplyMask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py new file mode 100644 index 0000000000..7a803f9695 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py @@ -0,0 +1,56 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..epi import ApplyTOPUP + + +def test_ApplyTOPUP_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + datatype=dict(argstr='-d=%s', ), + encoding_file=dict( + argstr='--datain=%s', + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr='--imain=%s', + mandatory=True, + sep=',', + ), + in_index=dict( + argstr='--inindex=%s', + sep=',', + ), + in_topup_fieldcoef=dict( + argstr='--topup=%s', + copyfile=False, + requires=['in_topup_movpar'], + ), + in_topup_movpar=dict( + copyfile=False, + requires=['in_topup_fieldcoef'], + ), + interp=dict(argstr='--interp=%s', ), + method=dict(argstr='--method=%s', ), + out_corrected=dict( + argstr='--out=%s', + name_source=['in_files'], + name_template='%s_corrected', + ), + output_type=dict(), + ) + inputs = ApplyTOPUP.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ApplyTOPUP_outputs(): + output_map = dict(out_corrected=dict(), ) + outputs = ApplyTOPUP.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py new file mode 100644 index 0000000000..71977cb873 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyWarp.py @@ -0,0 +1,62 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ApplyWarp + + +def test_ApplyWarp_inputs(): + input_map = dict( + abswarp=dict( + argstr='--abs', + xor=['relwarp'], + ), + args=dict(argstr='%s', ), + datatype=dict(argstr='--datatype=%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + field_file=dict(argstr='--warp=%s', ), + in_file=dict( + argstr='--in=%s', + mandatory=True, + position=0, + ), + interp=dict( + argstr='--interp=%s', + position=-2, + ), + mask_file=dict(argstr='--mask=%s', ), + out_file=dict( + argstr='--out=%s', + genfile=True, + hash_files=False, + position=2, + ), + output_type=dict(), + postmat=dict(argstr='--postmat=%s', ), + premat=dict(argstr='--premat=%s', ), + ref_file=dict( + argstr='--ref=%s', + mandatory=True, + position=1, + ), + relwarp=dict( + argstr='--rel', + position=-1, + xor=['abswarp'], + ), + superlevel=dict(argstr='--superlevel=%s', ), + supersample=dict(argstr='--super', ), + ) + inputs = ApplyWarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ApplyWarp_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ApplyWarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py new file mode 100644 index 0000000000..f515be1f04 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ApplyXFM.py @@ -0,0 +1,160 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ApplyXFM + + +def test_ApplyXFM_inputs(): + input_map = dict( + angle_rep=dict(argstr='-anglerep %s', ), + apply_isoxfm=dict( + argstr='-applyisoxfm %f', + xor=['apply_xfm'], + ), + apply_xfm=dict( + argstr='-applyxfm', + usedefault=True, + ), + args=dict(argstr='%s', ), + bbrslope=dict( + argstr='-bbrslope %f', + min_ver='5.0.0', + ), + bbrtype=dict( + argstr='-bbrtype %s', + min_ver='5.0.0', + ), + bgvalue=dict(argstr='-setbackground %f', ), + bins=dict(argstr='-bins %d', ), + coarse_search=dict( + argstr='-coarsesearch %d', + units='degrees', + ), + cost=dict(argstr='-cost %s', ), + cost_func=dict(argstr='-searchcost %s', ), + datatype=dict(argstr='-datatype %s', ), + display_init=dict(argstr='-displayinit', ), + dof=dict(argstr='-dof %d', ), + echospacing=dict( + argstr='-echospacing %f', + min_ver='5.0.0', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fieldmap=dict( + argstr='-fieldmap %s', + min_ver='5.0.0', + ), + fieldmapmask=dict( + argstr='-fieldmapmask %s', + min_ver='5.0.0', + ), + fine_search=dict( + argstr='-finesearch %d', + units='degrees', + ), + force_scaling=dict(argstr='-forcescaling', ), + in_file=dict( + argstr='-in %s', + mandatory=True, + position=0, + ), + in_matrix_file=dict(argstr='-init %s', ), + in_weight=dict(argstr='-inweight %s', ), + interp=dict(argstr='-interp %s', ), + min_sampling=dict( + argstr='-minsampling %f', + units='mm', + ), + no_clamp=dict(argstr='-noclamp', ), + no_resample=dict(argstr='-noresample', ), + no_resample_blur=dict(argstr='-noresampblur', ), + no_search=dict(argstr='-nosearch', ), + out_file=dict( + argstr='-out %s', + hash_files=False, + name_source=['in_file'], + name_template='%s_flirt', + position=2, + ), + out_log=dict( + keep_extension=True, + name_source=['in_file'], + name_template='%s_flirt.log', + requires=['save_log'], + ), + out_matrix_file=dict( + argstr='-omat %s', + hash_files=False, + keep_extension=True, + name_source=['in_file'], + name_template='%s_flirt.mat', + position=3, + ), + output_type=dict(), + padding_size=dict( + argstr='-paddingsize %d', + units='voxels', + ), + pedir=dict( + argstr='-pedir %d', + min_ver='5.0.0', + ), + ref_weight=dict(argstr='-refweight %s', ), + reference=dict( + argstr='-ref %s', + mandatory=True, + position=1, + ), + rigid2D=dict(argstr='-2D', ), + save_log=dict(), + schedule=dict(argstr='-schedule %s', ), + searchr_x=dict( + argstr='-searchrx %s', + units='degrees', + ), + searchr_y=dict( + argstr='-searchry %s', + units='degrees', + ), + searchr_z=dict( + argstr='-searchrz %s', + units='degrees', + ), + sinc_width=dict( + argstr='-sincwidth %d', + units='voxels', + ), + sinc_window=dict(argstr='-sincwindow %s', ), + uses_qform=dict(argstr='-usesqform', ), + verbose=dict(argstr='-verbose %d', ), + wm_seg=dict( + argstr='-wmseg %s', + min_ver='5.0.0', + ), + wmcoords=dict( + argstr='-wmcoords %s', + min_ver='5.0.0', + ), + wmnorms=dict( + argstr='-wmnorms %s', + min_ver='5.0.0', + ), + ) + inputs = ApplyXFM.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ApplyXFM_outputs(): + output_map = dict( + out_file=dict(), + out_log=dict(), + out_matrix_file=dict(), + ) + outputs = ApplyXFM.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_AvScale.py b/nipype/interfaces/fsl/tests/test_auto_AvScale.py new file mode 100644 index 0000000000..762ada916d --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_AvScale.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import AvScale + + +def test_AvScale_inputs(): + input_map = dict( + all_param=dict(argstr='--allparams', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + mat_file=dict( + argstr='%s', + position=-2, + ), + ref_file=dict( + argstr='%s', + position=-1, + ), + ) + inputs = AvScale.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AvScale_outputs(): + output_map = dict( + average_scaling=dict(), + backward_half_transform=dict(), + determinant=dict(), + forward_half_transform=dict(), + left_right_orientation_preserved=dict(), + rot_angles=dict(), + rotation_translation_matrix=dict(), + scales=dict(), + skews=dict(), + translations=dict(), + ) + outputs = AvScale.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_B0Calc.py b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py new file mode 100644 index 0000000000..96ec6b949c --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_B0Calc.py @@ -0,0 +1,89 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..possum import B0Calc + + +def test_B0Calc_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + chi_air=dict( + argstr='--chi0=%e', + usedefault=True, + ), + compute_xyz=dict( + argstr='--xyz', + usedefault=True, + ), + delta=dict( + argstr='-d %e', + usedefault=True, + ), + directconv=dict( + argstr='--directconv', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + extendboundary=dict( + argstr='--extendboundary=%0.2f', + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + position=0, + ), + out_file=dict( + argstr='-o %s', + name_source=['in_file'], + name_template='%s_b0field', + output_name='out_file', + position=1, + ), + output_type=dict(), + x_b0=dict( + argstr='--b0x=%0.2f', + usedefault=True, + xor=['xyz_b0'], + ), + x_grad=dict( + argstr='--gx=%0.4f', + usedefault=True, + ), + xyz_b0=dict( + argstr='--b0x=%0.2f --b0y=%0.2f --b0=%0.2f', + xor=['x_b0', 'y_b0', 'z_b0'], + ), + y_b0=dict( + argstr='--b0y=%0.2f', + usedefault=True, + xor=['xyz_b0'], + ), + y_grad=dict( + argstr='--gy=%0.4f', + usedefault=True, + ), + z_b0=dict( + argstr='--b0=%0.2f', + usedefault=True, + xor=['xyz_b0'], + ), + z_grad=dict( + argstr='--gz=%0.4f', + usedefault=True, + ), + ) + inputs = B0Calc.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_B0Calc_outputs(): + output_map = dict(out_file=dict(), ) + outputs = B0Calc.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py new file mode 100644 index 0000000000..1ac5db111b --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_BEDPOSTX5.py @@ -0,0 +1,112 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import BEDPOSTX5 + + +def test_BEDPOSTX5_inputs(): + input_map = dict( + all_ard=dict( + argstr='--allard', + xor=('no_ard', 'all_ard'), + ), + args=dict(argstr='%s', ), + burn_in=dict( + argstr='-b %d', + usedefault=True, + ), + burn_in_no_ard=dict( + argstr='--burnin_noard=%d', + usedefault=True, + ), + bvals=dict(mandatory=True, ), + bvecs=dict(mandatory=True, ), + cnlinear=dict( + argstr='--cnonlinear', + xor=('no_spat', 'non_linear', 'cnlinear'), + ), + dwi=dict(mandatory=True, ), + environ=dict( + nohash=True, + usedefault=True, + ), + f0_ard=dict( + argstr='--f0 --ardf0', + xor=['f0_noard', 'f0_ard', 'all_ard'], + ), + f0_noard=dict( + argstr='--f0', + xor=['f0_noard', 'f0_ard'], + ), + force_dir=dict( + argstr='--forcedir', + usedefault=True, + ), + fudge=dict(argstr='-w %d', ), + grad_dev=dict(), + gradnonlin=dict(argstr='-g', ), + logdir=dict(argstr='--logdir=%s', ), + mask=dict(mandatory=True, ), + model=dict(argstr='-model %d', ), + n_fibres=dict( + argstr='-n %d', + mandatory=True, + usedefault=True, + ), + n_jumps=dict( + argstr='-j %d', + usedefault=True, + ), + no_ard=dict( + argstr='--noard', + xor=('no_ard', 'all_ard'), + ), + no_spat=dict( + argstr='--nospat', + xor=('no_spat', 'non_linear', 'cnlinear'), + ), + non_linear=dict( + argstr='--nonlinear', + xor=('no_spat', 'non_linear', 'cnlinear'), + ), + out_dir=dict( + argstr='%s', + mandatory=True, + position=1, + usedefault=True, + ), + output_type=dict(), + rician=dict(argstr='--rician', ), + sample_every=dict( + argstr='-s %d', + usedefault=True, + ), + seed=dict(argstr='--seed=%d', ), + update_proposal_every=dict( + argstr='--updateproposalevery=%d', + usedefault=True, + ), + use_gpu=dict(), + ) + inputs = BEDPOSTX5.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BEDPOSTX5_outputs(): + output_map = dict( + dyads=dict(), + dyads_dispersion=dict(), + mean_S0samples=dict(), + mean_dsamples=dict(), + mean_fsamples=dict(), + mean_phsamples=dict(), + mean_thsamples=dict(), + merged_fsamples=dict(), + merged_phsamples=dict(), + merged_thsamples=dict(), + ) + outputs = BEDPOSTX5.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_BET.py b/nipype/interfaces/fsl/tests/test_auto_BET.py new file mode 100644 index 0000000000..eccf348b3e --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_BET.py @@ -0,0 +1,100 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import BET + + +def test_BET_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + center=dict( + argstr='-c %s', + units='voxels', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + frac=dict(argstr='-f %.2f', ), + functional=dict( + argstr='-F', + xor=('functional', 'reduce_bias', 'robust', 'padding', + 'remove_eyes', 'surfaces', 't2_guided'), + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + mask=dict(argstr='-m', ), + mesh=dict(argstr='-e', ), + no_output=dict(argstr='-n', ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=1, + ), + outline=dict(argstr='-o', ), + output_type=dict(), + padding=dict( + argstr='-Z', + xor=('functional', 'reduce_bias', 'robust', 'padding', + 'remove_eyes', 'surfaces', 't2_guided'), + ), + radius=dict( + argstr='-r %d', + units='mm', + ), + reduce_bias=dict( + argstr='-B', + xor=('functional', 'reduce_bias', 'robust', 'padding', + 'remove_eyes', 'surfaces', 't2_guided'), + ), + remove_eyes=dict( + argstr='-S', + xor=('functional', 'reduce_bias', 'robust', 'padding', + 'remove_eyes', 'surfaces', 't2_guided'), + ), + robust=dict( + argstr='-R', + xor=('functional', 'reduce_bias', 'robust', 'padding', + 'remove_eyes', 'surfaces', 't2_guided'), + ), + skull=dict(argstr='-s', ), + surfaces=dict( + argstr='-A', + xor=('functional', 'reduce_bias', 'robust', 'padding', + 'remove_eyes', 'surfaces', 't2_guided'), + ), + t2_guided=dict( + argstr='-A2 %s', + xor=('functional', 'reduce_bias', 'robust', 'padding', + 'remove_eyes', 'surfaces', 't2_guided'), + ), + threshold=dict(argstr='-t', ), + vertical_gradient=dict(argstr='-g %.2f', ), + ) + inputs = BET.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BET_outputs(): + output_map = dict( + inskull_mask_file=dict(), + inskull_mesh_file=dict(), + mask_file=dict(), + meshfile=dict(), + out_file=dict(), + outline_file=dict(), + outskin_mask_file=dict(), + outskin_mesh_file=dict(), + outskull_mask_file=dict(), + outskull_mesh_file=dict(), + skull_mask_file=dict(), + ) + outputs = BET.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py new file mode 100644 index 0000000000..2548ae68e5 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_BinaryMaths.py @@ -0,0 +1,66 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import BinaryMaths + + +def test_BinaryMaths_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + operand_file=dict( + argstr='%s', + mandatory=True, + position=5, + xor=['operand_value'], + ), + operand_value=dict( + argstr='%.8f', + mandatory=True, + position=5, + xor=['operand_file'], + ), + operation=dict( + argstr='-%s', + mandatory=True, + position=4, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = BinaryMaths.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BinaryMaths_outputs(): + output_map = dict(out_file=dict(), ) + outputs = BinaryMaths.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py new file mode 100644 index 0000000000..aab508f2bf --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ChangeDataType.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import ChangeDataType + + +def test_ChangeDataType_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + mandatory=True, + position=-1, + ), + output_type=dict(), + ) + inputs = ChangeDataType.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ChangeDataType_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ChangeDataType.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Classifier.py b/nipype/interfaces/fsl/tests/test_auto_Classifier.py new file mode 100644 index 0000000000..a8db888acb --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Classifier.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fix import Classifier + + +def test_Classifier_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + artifacts_list_file=dict(), + environ=dict( + nohash=True, + usedefault=True, + ), + mel_ica=dict( + argstr='%s', + copyfile=False, + position=1, + ), + thresh=dict( + argstr='%d', + mandatory=True, + position=-1, + ), + trained_wts_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=2, + ), + ) + inputs = Classifier.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Classifier_outputs(): + output_map = dict(artifacts_list_file=dict(), ) + outputs = Classifier.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Cleaner.py b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py new file mode 100644 index 0000000000..5fc505a174 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Cleaner.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fix import Cleaner + + +def test_Cleaner_inputs(): + input_map = dict( + aggressive=dict( + argstr='-A', + position=3, + ), + args=dict(argstr='%s', ), + artifacts_list_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + cleanup_motion=dict( + argstr='-m', + position=2, + ), + confound_file=dict( + argstr='-x %s', + position=4, + ), + confound_file_1=dict( + argstr='-x %s', + position=5, + ), + confound_file_2=dict( + argstr='-x %s', + position=6, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + highpass=dict( + argstr='-m -h %f', + position=2, + usedefault=True, + ), + ) + inputs = Cleaner.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Cleaner_outputs(): + output_map = dict(cleaned_functional_file=dict(), ) + outputs = Cleaner.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Cluster.py b/nipype/interfaces/fsl/tests/test_auto_Cluster.py new file mode 100644 index 0000000000..f876f85e64 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Cluster.py @@ -0,0 +1,108 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Cluster + + +def test_Cluster_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + connectivity=dict(argstr='--connectivity=%d', ), + cope_file=dict(argstr='--cope=%s', ), + dlh=dict(argstr='--dlh=%.10f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + find_min=dict( + argstr='--min', + usedefault=True, + ), + fractional=dict( + argstr='--fractional', + usedefault=True, + ), + in_file=dict( + argstr='--in=%s', + mandatory=True, + ), + minclustersize=dict( + argstr='--minclustersize', + usedefault=True, + ), + no_table=dict( + argstr='--no_table', + usedefault=True, + ), + num_maxima=dict(argstr='--num=%d', ), + out_index_file=dict( + argstr='--oindex=%s', + hash_files=False, + ), + out_localmax_txt_file=dict( + argstr='--olmax=%s', + hash_files=False, + ), + out_localmax_vol_file=dict( + argstr='--olmaxim=%s', + hash_files=False, + ), + out_max_file=dict( + argstr='--omax=%s', + hash_files=False, + ), + out_mean_file=dict( + argstr='--omean=%s', + hash_files=False, + ), + out_pval_file=dict( + argstr='--opvals=%s', + hash_files=False, + ), + out_size_file=dict( + argstr='--osize=%s', + hash_files=False, + ), + out_threshold_file=dict( + argstr='--othresh=%s', + hash_files=False, + ), + output_type=dict(), + peak_distance=dict(argstr='--peakdist=%.10f', ), + pthreshold=dict( + argstr='--pthresh=%.10f', + requires=['dlh', 'volume'], + ), + std_space_file=dict(argstr='--stdvol=%s', ), + threshold=dict( + argstr='--thresh=%.10f', + mandatory=True, + ), + use_mm=dict( + argstr='--mm', + usedefault=True, + ), + volume=dict(argstr='--volume=%d', ), + warpfield_file=dict(argstr='--warpvol=%s', ), + xfm_file=dict(argstr='--xfm=%s', ), + ) + inputs = Cluster.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Cluster_outputs(): + output_map = dict( + index_file=dict(), + localmax_txt_file=dict(), + localmax_vol_file=dict(), + max_file=dict(), + mean_file=dict(), + pval_file=dict(), + size_file=dict(), + threshold_file=dict(), + ) + outputs = Cluster.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Complex.py b/nipype/interfaces/fsl/tests/test_auto_Complex.py new file mode 100644 index 0000000000..7e49bcdfa8 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Complex.py @@ -0,0 +1,163 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Complex + + +def test_Complex_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + complex_cartesian=dict( + argstr='-complex', + position=1, + xor=[ + 'real_polar', 'real_cartesian', 'complex_cartesian', + 'complex_polar', 'complex_split', 'complex_merge' + ], + ), + complex_in_file=dict( + argstr='%s', + position=2, + ), + complex_in_file2=dict( + argstr='%s', + position=3, + ), + complex_merge=dict( + argstr='-complexmerge', + position=1, + xor=[ + 'real_polar', 'real_cartesian', 'complex_cartesian', + 'complex_polar', 'complex_split', 'complex_merge', 'start_vol', + 'end_vol' + ], + ), + complex_out_file=dict( + argstr='%s', + genfile=True, + position=-3, + xor=[ + 'complex_out_file', 'magnitude_out_file', 'phase_out_file', + 'real_out_file', 'imaginary_out_file', 'real_polar', + 'real_cartesian' + ], + ), + complex_polar=dict( + argstr='-complexpolar', + position=1, + xor=[ + 'real_polar', 'real_cartesian', 'complex_cartesian', + 'complex_polar', 'complex_split', 'complex_merge' + ], + ), + complex_split=dict( + argstr='-complexsplit', + position=1, + xor=[ + 'real_polar', 'real_cartesian', 'complex_cartesian', + 'complex_polar', 'complex_split', 'complex_merge' + ], + ), + end_vol=dict( + argstr='%d', + position=-1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + imaginary_in_file=dict( + argstr='%s', + position=3, + ), + imaginary_out_file=dict( + argstr='%s', + genfile=True, + position=-3, + xor=[ + 'complex_out_file', 'magnitude_out_file', 'phase_out_file', + 'real_polar', 'complex_cartesian', 'complex_polar', + 'complex_split', 'complex_merge' + ], + ), + magnitude_in_file=dict( + argstr='%s', + position=2, + ), + magnitude_out_file=dict( + argstr='%s', + genfile=True, + position=-4, + xor=[ + 'complex_out_file', 'real_out_file', 'imaginary_out_file', + 'real_cartesian', 'complex_cartesian', 'complex_polar', + 'complex_split', 'complex_merge' + ], + ), + output_type=dict(), + phase_in_file=dict( + argstr='%s', + position=3, + ), + phase_out_file=dict( + argstr='%s', + genfile=True, + position=-3, + xor=[ + 'complex_out_file', 'real_out_file', 'imaginary_out_file', + 'real_cartesian', 'complex_cartesian', 'complex_polar', + 'complex_split', 'complex_merge' + ], + ), + real_cartesian=dict( + argstr='-realcartesian', + position=1, + xor=[ + 'real_polar', 'real_cartesian', 'complex_cartesian', + 'complex_polar', 'complex_split', 'complex_merge' + ], + ), + real_in_file=dict( + argstr='%s', + position=2, + ), + real_out_file=dict( + argstr='%s', + genfile=True, + position=-4, + xor=[ + 'complex_out_file', 'magnitude_out_file', 'phase_out_file', + 'real_polar', 'complex_cartesian', 'complex_polar', + 'complex_split', 'complex_merge' + ], + ), + real_polar=dict( + argstr='-realpolar', + position=1, + xor=[ + 'real_polar', 'real_cartesian', 'complex_cartesian', + 'complex_polar', 'complex_split', 'complex_merge' + ], + ), + start_vol=dict( + argstr='%d', + position=-2, + ), + ) + inputs = Complex.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Complex_outputs(): + output_map = dict( + complex_out_file=dict(), + imaginary_out_file=dict(), + magnitude_out_file=dict(), + phase_out_file=dict(), + real_out_file=dict(), + ) + outputs = Complex.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py new file mode 100644 index 0000000000..a82ea8fe43 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ContrastMgr.py @@ -0,0 +1,62 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import ContrastMgr + + +def test_ContrastMgr_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + contrast_num=dict(argstr='-cope', ), + corrections=dict( + copyfile=False, + mandatory=True, + ), + dof_file=dict( + argstr='', + copyfile=False, + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fcon_file=dict(argstr='-f %s', ), + output_type=dict(), + param_estimates=dict( + argstr='', + copyfile=False, + mandatory=True, + ), + sigmasquareds=dict( + argstr='', + copyfile=False, + mandatory=True, + position=-2, + ), + suffix=dict(argstr='-suffix %s', ), + tcon_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + ) + inputs = ContrastMgr.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ContrastMgr_outputs(): + output_map = dict( + copes=dict(), + fstats=dict(), + neffs=dict(), + tstats=dict(), + varcopes=dict(), + zfstats=dict(), + zstats=dict(), + ) + outputs = ContrastMgr.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py new file mode 100644 index 0000000000..2ed14aaad2 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertWarp.py @@ -0,0 +1,67 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ConvertWarp + + +def test_ConvertWarp_inputs(): + input_map = dict( + abswarp=dict( + argstr='--abs', + xor=['relwarp'], + ), + args=dict(argstr='%s', ), + cons_jacobian=dict(argstr='--constrainj', ), + environ=dict( + nohash=True, + usedefault=True, + ), + jacobian_max=dict(argstr='--jmax=%f', ), + jacobian_min=dict(argstr='--jmin=%f', ), + midmat=dict(argstr='--midmat=%s', ), + out_abswarp=dict( + argstr='--absout', + xor=['out_relwarp'], + ), + out_file=dict( + argstr='--out=%s', + name_source=['reference'], + name_template='%s_concatwarp', + output_name='out_file', + position=-1, + ), + out_relwarp=dict( + argstr='--relout', + xor=['out_abswarp'], + ), + output_type=dict(), + postmat=dict(argstr='--postmat=%s', ), + premat=dict(argstr='--premat=%s', ), + reference=dict( + argstr='--ref=%s', + mandatory=True, + position=1, + ), + relwarp=dict( + argstr='--rel', + xor=['abswarp'], + ), + shift_direction=dict( + argstr='--shiftdir=%s', + requires=['shift_in_file'], + ), + shift_in_file=dict(argstr='--shiftmap=%s', ), + warp1=dict(argstr='--warp1=%s', ), + warp2=dict(argstr='--warp2=%s', ), + ) + inputs = ConvertWarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ConvertWarp_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ConvertWarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py new file mode 100644 index 0000000000..c017a39a52 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ConvertXFM.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ConvertXFM + + +def test_ConvertXFM_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + concat_xfm=dict( + argstr='-concat', + position=-3, + requires=['in_file2'], + xor=['invert_xfm', 'concat_xfm', 'fix_scale_skew'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fix_scale_skew=dict( + argstr='-fixscaleskew', + position=-3, + requires=['in_file2'], + xor=['invert_xfm', 'concat_xfm', 'fix_scale_skew'], + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + in_file2=dict( + argstr='%s', + position=-2, + ), + invert_xfm=dict( + argstr='-inverse', + position=-3, + xor=['invert_xfm', 'concat_xfm', 'fix_scale_skew'], + ), + out_file=dict( + argstr='-omat %s', + genfile=True, + hash_files=False, + position=1, + ), + output_type=dict(), + ) + inputs = ConvertXFM.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ConvertXFM_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ConvertXFM.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py new file mode 100644 index 0000000000..c66feff211 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_CopyGeom.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import CopyGeom + + +def test_CopyGeom_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dest_file=dict( + argstr='%s', + copyfile=True, + mandatory=True, + name_source='dest_file', + name_template='%s', + output_name='out_file', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore_dims=dict( + argstr='-d', + position='-1', + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + output_type=dict(), + ) + inputs = CopyGeom.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CopyGeom_outputs(): + output_map = dict(out_file=dict(), ) + outputs = CopyGeom.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_DTIFit.py b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py new file mode 100644 index 0000000000..8bfdf5f36e --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_DTIFit.py @@ -0,0 +1,74 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import DTIFit + + +def test_DTIFit_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + base_name=dict( + argstr='-o %s', + position=1, + usedefault=True, + ), + bvals=dict( + argstr='-b %s', + mandatory=True, + position=4, + ), + bvecs=dict( + argstr='-r %s', + mandatory=True, + position=3, + ), + cni=dict(argstr='--cni=%s', ), + dwi=dict( + argstr='-k %s', + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradnonlin=dict(argstr='--gradnonlin=%s', ), + little_bit=dict(argstr='--littlebit', ), + mask=dict( + argstr='-m %s', + mandatory=True, + position=2, + ), + max_x=dict(argstr='-X %d', ), + max_y=dict(argstr='-Y %d', ), + max_z=dict(argstr='-Z %d', ), + min_x=dict(argstr='-x %d', ), + min_y=dict(argstr='-y %d', ), + min_z=dict(argstr='-z %d', ), + output_type=dict(), + save_tensor=dict(argstr='--save_tensor', ), + sse=dict(argstr='--sse', ), + ) + inputs = DTIFit.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DTIFit_outputs(): + output_map = dict( + FA=dict(), + L1=dict(), + L2=dict(), + L3=dict(), + MD=dict(), + MO=dict(), + S0=dict(), + V1=dict(), + V2=dict(), + V3=dict(), + tensor=dict(), + ) + outputs = DTIFit.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_DilateImage.py b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py new file mode 100644 index 0000000000..3ed43fbd22 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_DilateImage.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import DilateImage + + +def test_DilateImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + kernel_file=dict( + argstr='%s', + position=5, + xor=['kernel_size'], + ), + kernel_shape=dict( + argstr='-kernel %s', + position=4, + ), + kernel_size=dict( + argstr='%.4f', + position=5, + xor=['kernel_file'], + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + operation=dict( + argstr='-dil%s', + mandatory=True, + position=6, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = DilateImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DilateImage_outputs(): + output_map = dict(out_file=dict(), ) + outputs = DilateImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py new file mode 100644 index 0000000000..5f3321de78 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_DistanceMap.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import DistanceMap + + +def test_DistanceMap_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + distance_map=dict( + argstr='--out=%s', + genfile=True, + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='--in=%s', + mandatory=True, + ), + invert_input=dict(argstr='--invert', ), + local_max_file=dict( + argstr='--localmax=%s', + hash_files=False, + ), + mask_file=dict(argstr='--mask=%s', ), + output_type=dict(), + ) + inputs = DistanceMap.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DistanceMap_outputs(): + output_map = dict( + distance_map=dict(), + local_max_file=dict(), + ) + outputs = DistanceMap.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_DualRegression.py b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py new file mode 100644 index 0000000000..4c2fdd3ceb --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_DualRegression.py @@ -0,0 +1,65 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import DualRegression + + +def test_DualRegression_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + con_file=dict( + argstr='%s', + position=4, + ), + des_norm=dict( + argstr='%i', + position=2, + usedefault=True, + ), + design_file=dict( + argstr='%s', + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + group_IC_maps_4D=dict( + argstr='%s', + mandatory=True, + position=1, + ), + in_files=dict( + argstr='%s', + mandatory=True, + position=-1, + sep=' ', + ), + n_perm=dict( + argstr='%i', + mandatory=True, + position=5, + ), + one_sample_group_mean=dict( + argstr='-1', + position=3, + ), + out_dir=dict( + argstr='%s', + genfile=True, + position=6, + usedefault=True, + ), + output_type=dict(), + ) + inputs = DualRegression.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DualRegression_outputs(): + output_map = dict(out_dir=dict(), ) + outputs = DualRegression.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py new file mode 100644 index 0000000000..2a5f0c86b6 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_EPIDeWarp.py @@ -0,0 +1,75 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..epi import EPIDeWarp + + +def test_EPIDeWarp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + cleanup=dict(argstr='--cleanup', ), + dph_file=dict( + argstr='--dph %s', + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi_file=dict(argstr='--epi %s', ), + epidw=dict( + argstr='--epidw %s', + genfile=False, + ), + esp=dict( + argstr='--esp %s', + usedefault=True, + ), + exf_file=dict(argstr='--exf %s', ), + exfdw=dict( + argstr='--exfdw %s', + genfile=True, + ), + mag_file=dict( + argstr='--mag %s', + mandatory=True, + position=0, + ), + nocleanup=dict( + argstr='--nocleanup', + usedefault=True, + ), + output_type=dict(), + sigma=dict( + argstr='--sigma %s', + usedefault=True, + ), + tediff=dict( + argstr='--tediff %s', + usedefault=True, + ), + tmpdir=dict( + argstr='--tmpdir %s', + genfile=True, + ), + vsm=dict( + argstr='--vsm %s', + genfile=True, + ), + ) + inputs = EPIDeWarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EPIDeWarp_outputs(): + output_map = dict( + exf_mask=dict(), + exfdw=dict(), + unwarped_file=dict(), + vsm_file=dict(), + ) + outputs = EPIDeWarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Eddy.py b/nipype/interfaces/fsl/tests/test_auto_Eddy.py new file mode 100644 index 0000000000..1e3cdf644d --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Eddy.py @@ -0,0 +1,97 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..epi import Eddy + + +def test_Eddy_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dont_peas=dict(argstr='--dont_peas', ), + dont_sep_offs_move=dict(argstr='--dont_sep_offs_move', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fep=dict(argstr='--fep', ), + field=dict(argstr='--field=%s', ), + field_mat=dict(argstr='--field_mat=%s', ), + flm=dict(argstr='--flm=%s', ), + fudge_factor=dict( + argstr='--ff=%s', + usedefault=True, + ), + fwhm=dict(argstr='--fwhm=%s', ), + in_acqp=dict( + argstr='--acqp=%s', + mandatory=True, + ), + in_bval=dict( + argstr='--bvals=%s', + mandatory=True, + ), + in_bvec=dict( + argstr='--bvecs=%s', + mandatory=True, + ), + in_file=dict( + argstr='--imain=%s', + mandatory=True, + ), + in_index=dict( + argstr='--index=%s', + mandatory=True, + ), + in_mask=dict( + argstr='--mask=%s', + mandatory=True, + ), + in_topup_fieldcoef=dict( + argstr='--topup=%s', + requires=['in_topup_movpar'], + ), + in_topup_movpar=dict(requires=['in_topup_fieldcoef'], ), + interp=dict(argstr='--interp=%s', ), + is_shelled=dict(argstr='--data_is_shelled', ), + method=dict(argstr='--resamp=%s', ), + niter=dict( + argstr='--niter=%s', + usedefault=True, + ), + num_threads=dict( + nohash=True, + usedefault=True, + ), + nvoxhp=dict( + argstr='--nvoxhp=%s', + usedefault=True, + ), + out_base=dict( + argstr='--out=%s', + usedefault=True, + ), + output_type=dict(), + repol=dict(argstr='--repol', ), + session=dict(argstr='--session=%s', ), + slm=dict(argstr='--slm=%s', ), + use_cuda=dict(), + ) + inputs = Eddy.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Eddy_outputs(): + output_map = dict( + out_corrected=dict(), + out_movement_rms=dict(), + out_outlier_report=dict(), + out_parameter=dict(), + out_restricted_movement_rms=dict(), + out_rotated_bvecs=dict(), + out_shell_alignment_parameters=dict(), + ) + outputs = Eddy.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py new file mode 100644 index 0000000000..e88219aa04 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_EddyCorrect.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..epi import EddyCorrect + + +def test_EddyCorrect_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + out_file=dict( + argstr='%s', + name_source=['in_file'], + name_template='%s_edc', + output_name='eddy_corrected', + position=1, + ), + output_type=dict(), + ref_num=dict( + argstr='%d', + mandatory=True, + position=2, + usedefault=True, + ), + ) + inputs = EddyCorrect.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EddyCorrect_outputs(): + output_map = dict(eddy_corrected=dict(), ) + outputs = EddyCorrect.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py new file mode 100644 index 0000000000..29a935333e --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py @@ -0,0 +1,72 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..epi import EpiReg + + +def test_EpiReg_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + echospacing=dict(argstr='--echospacing=%f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + epi=dict( + argstr='--epi=%s', + mandatory=True, + position=-4, + ), + fmap=dict(argstr='--fmap=%s', ), + fmapmag=dict(argstr='--fmapmag=%s', ), + fmapmagbrain=dict(argstr='--fmapmagbrain=%s', ), + no_clean=dict( + argstr='--noclean', + usedefault=True, + ), + no_fmapreg=dict(argstr='--nofmapreg', ), + out_base=dict( + argstr='--out=%s', + position=-1, + usedefault=True, + ), + output_type=dict(), + pedir=dict(argstr='--pedir=%s', ), + t1_brain=dict( + argstr='--t1brain=%s', + mandatory=True, + position=-2, + ), + t1_head=dict( + argstr='--t1=%s', + mandatory=True, + position=-3, + ), + weight_image=dict(argstr='--weight=%s', ), + wmseg=dict(argstr='--wmseg=%s', ), + ) + inputs = EpiReg.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EpiReg_outputs(): + output_map = dict( + epi2str_inv=dict(), + epi2str_mat=dict(), + fmap2epi_mat=dict(), + fmap2str_mat=dict(), + fmap_epi=dict(), + fmap_str=dict(), + fmapmag_str=dict(), + fullwarp=dict(), + out_1vol=dict(), + out_file=dict(), + shiftmap=dict(), + wmedge=dict(), + wmseg=dict(), + ) + outputs = EpiReg.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py new file mode 100644 index 0000000000..797a403d45 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ErodeImage.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import ErodeImage + + +def test_ErodeImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + kernel_file=dict( + argstr='%s', + position=5, + xor=['kernel_size'], + ), + kernel_shape=dict( + argstr='-kernel %s', + position=4, + ), + kernel_size=dict( + argstr='%.4f', + position=5, + xor=['kernel_file'], + ), + minimum_filter=dict( + argstr='%s', + position=6, + usedefault=True, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = ErodeImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ErodeImage_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ErodeImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py new file mode 100644 index 0000000000..df8de64144 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ExtractROI.py @@ -0,0 +1,77 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ExtractROI + + +def test_ExtractROI_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + crop_list=dict( + argstr='%s', + position=2, + xor=[ + 'x_min', 'x_size', 'y_min', 'y_size', 'z_min', 'z_size', + 't_min', 't_size' + ], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + output_type=dict(), + roi_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=1, + ), + t_min=dict( + argstr='%d', + position=8, + ), + t_size=dict( + argstr='%d', + position=9, + ), + x_min=dict( + argstr='%d', + position=2, + ), + x_size=dict( + argstr='%d', + position=3, + ), + y_min=dict( + argstr='%d', + position=4, + ), + y_size=dict( + argstr='%d', + position=5, + ), + z_min=dict( + argstr='%d', + position=6, + ), + z_size=dict( + argstr='%d', + position=7, + ), + ) + inputs = ExtractROI.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ExtractROI_outputs(): + output_map = dict(roi_file=dict(), ) + outputs = ExtractROI.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FAST.py b/nipype/interfaces/fsl/tests/test_auto_FAST.py new file mode 100644 index 0000000000..0b983181af --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FAST.py @@ -0,0 +1,65 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import FAST + + +def test_FAST_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bias_iters=dict(argstr='-I %d', ), + bias_lowpass=dict( + argstr='-l %d', + units='mm', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hyper=dict(argstr='-H %.2f', ), + img_type=dict(argstr='-t %d', ), + in_files=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + ), + init_seg_smooth=dict(argstr='-f %.3f', ), + init_transform=dict(argstr='-a %s', ), + iters_afterbias=dict(argstr='-O %d', ), + manual_seg=dict(argstr='-s %s', ), + mixel_smooth=dict(argstr='-R %.2f', ), + no_bias=dict(argstr='-N', ), + no_pve=dict(argstr='--nopve', ), + number_classes=dict(argstr='-n %d', ), + other_priors=dict(argstr='-A %s', ), + out_basename=dict(argstr='-o %s', ), + output_biascorrected=dict(argstr='-B', ), + output_biasfield=dict(argstr='-b', ), + output_type=dict(), + probability_maps=dict(argstr='-p', ), + segment_iters=dict(argstr='-W %d', ), + segments=dict(argstr='-g', ), + use_priors=dict(argstr='-P', ), + verbose=dict(argstr='-v', ), + ) + inputs = FAST.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FAST_outputs(): + output_map = dict( + bias_field=dict(), + mixeltype=dict(), + partial_volume_files=dict(), + partial_volume_map=dict(), + probability_maps=dict(), + restored_image=dict(), + tissue_class_files=dict(), + tissue_class_map=dict(), + ) + outputs = FAST.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FEAT.py b/nipype/interfaces/fsl/tests/test_auto_FEAT.py new file mode 100644 index 0000000000..01fc72506f --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FEAT.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import FEAT + + +def test_FEAT_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fsf_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + output_type=dict(), + ) + inputs = FEAT.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FEAT_outputs(): + output_map = dict(feat_dir=dict(), ) + outputs = FEAT.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATModel.py b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py new file mode 100644 index 0000000000..34ea37d47f --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FEATModel.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import FEATModel + + +def test_FEATModel_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ev_files=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=1, + ), + fsf_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=0, + ), + output_type=dict(), + ) + inputs = FEATModel.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FEATModel_outputs(): + output_map = dict( + con_file=dict(), + design_cov=dict(), + design_file=dict(), + design_image=dict(), + fcon_file=dict(), + ) + outputs = FEATModel.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py new file mode 100644 index 0000000000..a8f59a0ec3 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FEATRegister.py @@ -0,0 +1,23 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import FEATRegister + + +def test_FEATRegister_inputs(): + input_map = dict( + feat_dirs=dict(mandatory=True, ), + reg_dof=dict(usedefault=True, ), + reg_image=dict(mandatory=True, ), + ) + inputs = FEATRegister.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FEATRegister_outputs(): + output_map = dict(fsf_file=dict(), ) + outputs = FEATRegister.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FIRST.py b/nipype/interfaces/fsl/tests/test_auto_FIRST.py new file mode 100644 index 0000000000..964ee4922c --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FIRST.py @@ -0,0 +1,75 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import FIRST + + +def test_FIRST_inputs(): + input_map = dict( + affine_file=dict( + argstr='-a %s', + position=6, + ), + args=dict(argstr='%s', ), + brain_extracted=dict( + argstr='-b', + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + copyfile=False, + mandatory=True, + position=-2, + ), + list_of_specific_structures=dict( + argstr='-s %s', + position=5, + sep=',', + ), + method=dict( + argstr='-m %s', + position=4, + usedefault=True, + xor=['method_as_numerical_threshold'], + ), + method_as_numerical_threshold=dict( + argstr='-m %.4f', + position=4, + ), + no_cleanup=dict( + argstr='-d', + position=3, + ), + out_file=dict( + argstr='-o %s', + hash_files=False, + mandatory=True, + position=-1, + usedefault=True, + ), + output_type=dict(), + verbose=dict( + argstr='-v', + position=1, + ), + ) + inputs = FIRST.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FIRST_outputs(): + output_map = dict( + bvars=dict(), + original_segmentations=dict(), + segmentation_file=dict(), + vtk_surfaces=dict(), + ) + outputs = FIRST.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py new file mode 100644 index 0000000000..bd335282e3 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FLAMEO.py @@ -0,0 +1,78 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import FLAMEO + + +def test_FLAMEO_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + burnin=dict(argstr='--burnin=%d', ), + cope_file=dict( + argstr='--copefile=%s', + mandatory=True, + ), + cov_split_file=dict( + argstr='--covsplitfile=%s', + mandatory=True, + ), + design_file=dict( + argstr='--designfile=%s', + mandatory=True, + ), + dof_var_cope_file=dict(argstr='--dofvarcopefile=%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + f_con_file=dict(argstr='--fcontrastsfile=%s', ), + fix_mean=dict(argstr='--fixmean', ), + infer_outliers=dict(argstr='--inferoutliers', ), + log_dir=dict( + argstr='--ld=%s', + usedefault=True, + ), + mask_file=dict( + argstr='--maskfile=%s', + mandatory=True, + ), + n_jumps=dict(argstr='--njumps=%d', ), + no_pe_outputs=dict(argstr='--nopeoutput', ), + outlier_iter=dict(argstr='--ioni=%d', ), + output_type=dict(), + run_mode=dict( + argstr='--runmode=%s', + mandatory=True, + ), + sample_every=dict(argstr='--sampleevery=%d', ), + sigma_dofs=dict(argstr='--sigma_dofs=%d', ), + t_con_file=dict( + argstr='--tcontrastsfile=%s', + mandatory=True, + ), + var_cope_file=dict(argstr='--varcopefile=%s', ), + ) + inputs = FLAMEO.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FLAMEO_outputs(): + output_map = dict( + copes=dict(), + fstats=dict(), + mrefvars=dict(), + pes=dict(), + res4d=dict(), + stats_dir=dict(), + tdof=dict(), + tstats=dict(), + var_copes=dict(), + weights=dict(), + zfstats=dict(), + zstats=dict(), + ) + outputs = FLAMEO.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FLIRT.py b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py new file mode 100644 index 0000000000..0b59550e5a --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FLIRT.py @@ -0,0 +1,157 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import FLIRT + + +def test_FLIRT_inputs(): + input_map = dict( + angle_rep=dict(argstr='-anglerep %s', ), + apply_isoxfm=dict( + argstr='-applyisoxfm %f', + xor=['apply_xfm'], + ), + apply_xfm=dict(argstr='-applyxfm', ), + args=dict(argstr='%s', ), + bbrslope=dict( + argstr='-bbrslope %f', + min_ver='5.0.0', + ), + bbrtype=dict( + argstr='-bbrtype %s', + min_ver='5.0.0', + ), + bgvalue=dict(argstr='-setbackground %f', ), + bins=dict(argstr='-bins %d', ), + coarse_search=dict( + argstr='-coarsesearch %d', + units='degrees', + ), + cost=dict(argstr='-cost %s', ), + cost_func=dict(argstr='-searchcost %s', ), + datatype=dict(argstr='-datatype %s', ), + display_init=dict(argstr='-displayinit', ), + dof=dict(argstr='-dof %d', ), + echospacing=dict( + argstr='-echospacing %f', + min_ver='5.0.0', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fieldmap=dict( + argstr='-fieldmap %s', + min_ver='5.0.0', + ), + fieldmapmask=dict( + argstr='-fieldmapmask %s', + min_ver='5.0.0', + ), + fine_search=dict( + argstr='-finesearch %d', + units='degrees', + ), + force_scaling=dict(argstr='-forcescaling', ), + in_file=dict( + argstr='-in %s', + mandatory=True, + position=0, + ), + in_matrix_file=dict(argstr='-init %s', ), + in_weight=dict(argstr='-inweight %s', ), + interp=dict(argstr='-interp %s', ), + min_sampling=dict( + argstr='-minsampling %f', + units='mm', + ), + no_clamp=dict(argstr='-noclamp', ), + no_resample=dict(argstr='-noresample', ), + no_resample_blur=dict(argstr='-noresampblur', ), + no_search=dict(argstr='-nosearch', ), + out_file=dict( + argstr='-out %s', + hash_files=False, + name_source=['in_file'], + name_template='%s_flirt', + position=2, + ), + out_log=dict( + keep_extension=True, + name_source=['in_file'], + name_template='%s_flirt.log', + requires=['save_log'], + ), + out_matrix_file=dict( + argstr='-omat %s', + hash_files=False, + keep_extension=True, + name_source=['in_file'], + name_template='%s_flirt.mat', + position=3, + ), + output_type=dict(), + padding_size=dict( + argstr='-paddingsize %d', + units='voxels', + ), + pedir=dict( + argstr='-pedir %d', + min_ver='5.0.0', + ), + ref_weight=dict(argstr='-refweight %s', ), + reference=dict( + argstr='-ref %s', + mandatory=True, + position=1, + ), + rigid2D=dict(argstr='-2D', ), + save_log=dict(), + schedule=dict(argstr='-schedule %s', ), + searchr_x=dict( + argstr='-searchrx %s', + units='degrees', + ), + searchr_y=dict( + argstr='-searchry %s', + units='degrees', + ), + searchr_z=dict( + argstr='-searchrz %s', + units='degrees', + ), + sinc_width=dict( + argstr='-sincwidth %d', + units='voxels', + ), + sinc_window=dict(argstr='-sincwindow %s', ), + uses_qform=dict(argstr='-usesqform', ), + verbose=dict(argstr='-verbose %d', ), + wm_seg=dict( + argstr='-wmseg %s', + min_ver='5.0.0', + ), + wmcoords=dict( + argstr='-wmcoords %s', + min_ver='5.0.0', + ), + wmnorms=dict( + argstr='-wmnorms %s', + min_ver='5.0.0', + ), + ) + inputs = FLIRT.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FLIRT_outputs(): + output_map = dict( + out_file=dict(), + out_log=dict(), + out_matrix_file=dict(), + ) + outputs = FLIRT.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FNIRT.py b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py new file mode 100644 index 0000000000..2b7d0b1b00 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FNIRT.py @@ -0,0 +1,140 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import FNIRT + + +def test_FNIRT_inputs(): + input_map = dict( + affine_file=dict(argstr='--aff=%s', ), + apply_inmask=dict( + argstr='--applyinmask=%s', + sep=',', + xor=['skip_inmask'], + ), + apply_intensity_mapping=dict( + argstr='--estint=%s', + sep=',', + xor=['skip_intensity_mapping'], + ), + apply_refmask=dict( + argstr='--applyrefmask=%s', + sep=',', + xor=['skip_refmask'], + ), + args=dict(argstr='%s', ), + bias_regularization_lambda=dict(argstr='--biaslambda=%f', ), + biasfield_resolution=dict(argstr='--biasres=%d,%d,%d', ), + config_file=dict(argstr='--config=%s', ), + derive_from_ref=dict(argstr='--refderiv', ), + environ=dict( + nohash=True, + usedefault=True, + ), + field_file=dict( + argstr='--fout=%s', + hash_files=False, + ), + fieldcoeff_file=dict(argstr='--cout=%s', ), + hessian_precision=dict(argstr='--numprec=%s', ), + in_file=dict( + argstr='--in=%s', + mandatory=True, + ), + in_fwhm=dict( + argstr='--infwhm=%s', + sep=',', + ), + in_intensitymap_file=dict( + argstr='--intin=%s', + copyfile=False, + ), + inmask_file=dict(argstr='--inmask=%s', ), + inmask_val=dict(argstr='--impinval=%f', ), + intensity_mapping_model=dict(argstr='--intmod=%s', ), + intensity_mapping_order=dict(argstr='--intorder=%d', ), + inwarp_file=dict(argstr='--inwarp=%s', ), + jacobian_file=dict( + argstr='--jout=%s', + hash_files=False, + ), + jacobian_range=dict(argstr='--jacrange=%f,%f', ), + log_file=dict( + argstr='--logout=%s', + genfile=True, + hash_files=False, + ), + max_nonlin_iter=dict( + argstr='--miter=%s', + sep=',', + ), + modulatedref_file=dict( + argstr='--refout=%s', + hash_files=False, + ), + out_intensitymap_file=dict( + argstr='--intout=%s', + hash_files=False, + ), + output_type=dict(), + ref_file=dict( + argstr='--ref=%s', + mandatory=True, + ), + ref_fwhm=dict( + argstr='--reffwhm=%s', + sep=',', + ), + refmask_file=dict(argstr='--refmask=%s', ), + refmask_val=dict(argstr='--imprefval=%f', ), + regularization_lambda=dict( + argstr='--lambda=%s', + sep=',', + ), + regularization_model=dict(argstr='--regmod=%s', ), + skip_implicit_in_masking=dict(argstr='--impinm=0', ), + skip_implicit_ref_masking=dict(argstr='--imprefm=0', ), + skip_inmask=dict( + argstr='--applyinmask=0', + xor=['apply_inmask'], + ), + skip_intensity_mapping=dict( + argstr='--estint=0', + xor=['apply_intensity_mapping'], + ), + skip_lambda_ssq=dict(argstr='--ssqlambda=0', ), + skip_refmask=dict( + argstr='--applyrefmask=0', + xor=['apply_refmask'], + ), + spline_order=dict(argstr='--splineorder=%d', ), + subsampling_scheme=dict( + argstr='--subsamp=%s', + sep=',', + ), + warp_resolution=dict(argstr='--warpres=%d,%d,%d', ), + warped_file=dict( + argstr='--iout=%s', + genfile=True, + hash_files=False, + ), + ) + inputs = FNIRT.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FNIRT_outputs(): + output_map = dict( + field_file=dict(), + fieldcoeff_file=dict(), + jacobian_file=dict(), + log_file=dict(), + modulatedref_file=dict(), + out_intensitymap_file=dict(), + warped_file=dict(), + ) + outputs = FNIRT.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py new file mode 100644 index 0000000000..1d95b77df5 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FSLCommand.py @@ -0,0 +1,19 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import FSLCommand + + +def test_FSLCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + output_type=dict(), + ) + inputs = FSLCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py new file mode 100644 index 0000000000..fd85eee3bf --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FSLXCommand.py @@ -0,0 +1,116 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import FSLXCommand + + +def test_FSLXCommand_inputs(): + input_map = dict( + all_ard=dict( + argstr='--allard', + xor=('no_ard', 'all_ard'), + ), + args=dict(argstr='%s', ), + burn_in=dict( + argstr='--burnin=%d', + usedefault=True, + ), + burn_in_no_ard=dict( + argstr='--burnin_noard=%d', + usedefault=True, + ), + bvals=dict( + argstr='--bvals=%s', + mandatory=True, + ), + bvecs=dict( + argstr='--bvecs=%s', + mandatory=True, + ), + cnlinear=dict( + argstr='--cnonlinear', + xor=('no_spat', 'non_linear', 'cnlinear'), + ), + dwi=dict( + argstr='--data=%s', + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f0_ard=dict( + argstr='--f0 --ardf0', + xor=['f0_noard', 'f0_ard', 'all_ard'], + ), + f0_noard=dict( + argstr='--f0', + xor=['f0_noard', 'f0_ard'], + ), + force_dir=dict( + argstr='--forcedir', + usedefault=True, + ), + fudge=dict(argstr='--fudge=%d', ), + logdir=dict( + argstr='--logdir=%s', + usedefault=True, + ), + mask=dict( + argstr='--mask=%s', + mandatory=True, + ), + model=dict(argstr='--model=%d', ), + n_fibres=dict( + argstr='--nfibres=%d', + mandatory=True, + usedefault=True, + ), + n_jumps=dict( + argstr='--njumps=%d', + usedefault=True, + ), + no_ard=dict( + argstr='--noard', + xor=('no_ard', 'all_ard'), + ), + no_spat=dict( + argstr='--nospat', + xor=('no_spat', 'non_linear', 'cnlinear'), + ), + non_linear=dict( + argstr='--nonlinear', + xor=('no_spat', 'non_linear', 'cnlinear'), + ), + output_type=dict(), + rician=dict(argstr='--rician', ), + sample_every=dict( + argstr='--sampleevery=%d', + usedefault=True, + ), + seed=dict(argstr='--seed=%d', ), + update_proposal_every=dict( + argstr='--updateproposalevery=%d', + usedefault=True, + ), + ) + inputs = FSLXCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FSLXCommand_outputs(): + output_map = dict( + dyads=dict(), + fsamples=dict(), + mean_S0samples=dict(), + mean_dsamples=dict(), + mean_fsamples=dict(), + mean_tausamples=dict(), + phsamples=dict(), + thsamples=dict(), + ) + outputs = FSLXCommand.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FUGUE.py b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py new file mode 100644 index 0000000000..0e96f1e867 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FUGUE.py @@ -0,0 +1,83 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import FUGUE + + +def test_FUGUE_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + asym_se_time=dict(argstr='--asym=%.10f', ), + despike_2dfilter=dict(argstr='--despike', ), + despike_threshold=dict(argstr='--despikethreshold=%s', ), + dwell_time=dict(argstr='--dwell=%.10f', ), + dwell_to_asym_ratio=dict(argstr='--dwelltoasym=%.10f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fmap_in_file=dict(argstr='--loadfmap=%s', ), + fmap_out_file=dict(argstr='--savefmap=%s', ), + forward_warping=dict(usedefault=True, ), + fourier_order=dict(argstr='--fourier=%d', ), + icorr=dict( + argstr='--icorr', + requires=['shift_in_file'], + ), + icorr_only=dict( + argstr='--icorronly', + requires=['unwarped_file'], + ), + in_file=dict(argstr='--in=%s', ), + mask_file=dict(argstr='--mask=%s', ), + median_2dfilter=dict(argstr='--median', ), + no_extend=dict(argstr='--noextend', ), + no_gap_fill=dict(argstr='--nofill', ), + nokspace=dict(argstr='--nokspace', ), + output_type=dict(), + pava=dict(argstr='--pava', ), + phase_conjugate=dict(argstr='--phaseconj', ), + phasemap_in_file=dict(argstr='--phasemap=%s', ), + poly_order=dict(argstr='--poly=%d', ), + save_fmap=dict(xor=['save_unmasked_fmap'], ), + save_shift=dict(xor=['save_unmasked_shift'], ), + save_unmasked_fmap=dict( + argstr='--unmaskfmap', + xor=['save_fmap'], + ), + save_unmasked_shift=dict( + argstr='--unmaskshift', + xor=['save_shift'], + ), + shift_in_file=dict(argstr='--loadshift=%s', ), + shift_out_file=dict(argstr='--saveshift=%s', ), + smooth2d=dict(argstr='--smooth2=%.2f', ), + smooth3d=dict(argstr='--smooth3=%.2f', ), + unwarp_direction=dict(argstr='--unwarpdir=%s', ), + unwarped_file=dict( + argstr='--unwarp=%s', + requires=['in_file'], + xor=['warped_file'], + ), + warped_file=dict( + argstr='--warp=%s', + requires=['in_file'], + xor=['unwarped_file'], + ), + ) + inputs = FUGUE.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FUGUE_outputs(): + output_map = dict( + fmap_out_file=dict(), + shift_out_file=dict(), + unwarped_file=dict(), + warped_file=dict(), + ) + outputs = FUGUE.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py new file mode 100644 index 0000000000..3945c40a87 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FeatureExtractor.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fix import FeatureExtractor + + +def test_FeatureExtractor_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + mel_ica=dict( + argstr='%s', + copyfile=False, + position=-1, + ), + ) + inputs = FeatureExtractor.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FeatureExtractor_outputs(): + output_map = dict( + mel_ica=dict( + argstr='%s', + copyfile=False, + position=-1, + ), ) + outputs = FeatureExtractor.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py new file mode 100644 index 0000000000..36cbb979d7 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FilterRegressor.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import FilterRegressor + + +def test_FilterRegressor_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + design_file=dict( + argstr='-d %s', + mandatory=True, + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filter_all=dict( + argstr="-f '%s'", + mandatory=True, + position=4, + xor=['filter_columns'], + ), + filter_columns=dict( + argstr="-f '%s'", + mandatory=True, + position=4, + xor=['filter_all'], + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + position=1, + ), + mask=dict(argstr='-m %s', ), + out_file=dict( + argstr='-o %s', + genfile=True, + hash_files=False, + position=2, + ), + out_vnscales=dict(argstr='--out_vnscales', ), + output_type=dict(), + var_norm=dict(argstr='--vn', ), + ) + inputs = FilterRegressor.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FilterRegressor_outputs(): + output_map = dict(out_file=dict(), ) + outputs = FilterRegressor.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py new file mode 100644 index 0000000000..3731c842e7 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_FindTheBiggest.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import FindTheBiggest + + +def test_FindTheBiggest_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr='%s', + mandatory=True, + position=0, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=2, + ), + output_type=dict(), + ) + inputs = FindTheBiggest.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FindTheBiggest_outputs(): + output_map = dict(out_file=dict(argstr='%s', ), ) + outputs = FindTheBiggest.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_GLM.py b/nipype/interfaces/fsl/tests/test_auto_GLM.py new file mode 100644 index 0000000000..1dbf7eba19 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_GLM.py @@ -0,0 +1,74 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import GLM + + +def test_GLM_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + contrasts=dict(argstr='-c %s', ), + dat_norm=dict(argstr='--dat_norm', ), + demean=dict(argstr='--demean', ), + des_norm=dict(argstr='--des_norm', ), + design=dict( + argstr='-d %s', + mandatory=True, + position=2, + ), + dof=dict(argstr='--dof=%d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + position=1, + ), + mask=dict(argstr='-m %s', ), + out_cope=dict(argstr='--out_cope=%s', ), + out_data_name=dict(argstr='--out_data=%s', ), + out_f_name=dict(argstr='--out_f=%s', ), + out_file=dict( + argstr='-o %s', + keep_extension=True, + name_source='in_file', + name_template='%s_glm', + position=3, + ), + out_p_name=dict(argstr='--out_p=%s', ), + out_pf_name=dict(argstr='--out_pf=%s', ), + out_res_name=dict(argstr='--out_res=%s', ), + out_sigsq_name=dict(argstr='--out_sigsq=%s', ), + out_t_name=dict(argstr='--out_t=%s', ), + out_varcb_name=dict(argstr='--out_varcb=%s', ), + out_vnscales_name=dict(argstr='--out_vnscales=%s', ), + out_z_name=dict(argstr='--out_z=%s', ), + output_type=dict(), + var_norm=dict(argstr='--vn', ), + ) + inputs = GLM.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GLM_outputs(): + output_map = dict( + out_cope=dict(), + out_data=dict(), + out_f=dict(), + out_file=dict(), + out_p=dict(), + out_pf=dict(), + out_res=dict(), + out_sigsq=dict(), + out_t=dict(), + out_varcb=dict(), + out_vnscales=dict(), + out_z=dict(), + ) + outputs = GLM.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py new file mode 100644 index 0000000000..05bbb5b106 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ICA_AROMA.py @@ -0,0 +1,71 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..aroma import ICA_AROMA + + +def test_ICA_AROMA_inputs(): + input_map = dict( + TR=dict(argstr='-tr %.3f', ), + args=dict(argstr='%s', ), + denoise_type=dict( + argstr='-den %s', + mandatory=True, + usedefault=True, + ), + dim=dict(argstr='-dim %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + feat_dir=dict( + argstr='-feat %s', + mandatory=True, + xor=[ + 'in_file', 'mat_file', 'fnirt_warp_file', 'motion_parameters' + ], + ), + fnirt_warp_file=dict( + argstr='-warp %s', + xor=['feat_dir'], + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + xor=['feat_dir'], + ), + mask=dict( + argstr='-m %s', + xor=['feat_dir'], + ), + mat_file=dict( + argstr='-affmat %s', + xor=['feat_dir'], + ), + melodic_dir=dict(argstr='-meldir %s', ), + motion_parameters=dict( + argstr='-mc %s', + mandatory=True, + xor=['feat_dir'], + ), + out_dir=dict( + argstr='-o %s', + mandatory=True, + usedefault=True, + ), + ) + inputs = ICA_AROMA.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ICA_AROMA_outputs(): + output_map = dict( + aggr_denoised_file=dict(), + nonaggr_denoised_file=dict(), + out_dir=dict(), + ) + outputs = ICA_AROMA.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py new file mode 100644 index 0000000000..6d4e06827e --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMaths.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ImageMaths + + +def test_ImageMaths_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + in_file2=dict( + argstr='%s', + position=3, + ), + mask_file=dict(argstr='-mas %s', ), + op_string=dict( + argstr='%s', + position=2, + ), + out_data_type=dict( + argstr='-odt %s', + position=-1, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_type=dict(), + suffix=dict(), + ) + inputs = ImageMaths.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ImageMaths_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ImageMaths.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py new file mode 100644 index 0000000000..042f93112b --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ImageMeants.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ImageMeants + + +def test_ImageMeants_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + eig=dict(argstr='--eig', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + position=0, + ), + mask=dict(argstr='-m %s', ), + nobin=dict(argstr='--no_bin', ), + order=dict( + argstr='--order=%d', + usedefault=True, + ), + out_file=dict( + argstr='-o %s', + genfile=True, + hash_files=False, + ), + output_type=dict(), + show_all=dict(argstr='--showall', ), + spatial_coord=dict(argstr='-c %s', ), + transpose=dict(argstr='--transpose', ), + use_mm=dict(argstr='--usemm', ), + ) + inputs = ImageMeants.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ImageMeants_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ImageMeants.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ImageStats.py b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py new file mode 100644 index 0000000000..91a6059798 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ImageStats.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ImageStats + + +def test_ImageStats_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + mask_file=dict(argstr='', ), + op_string=dict( + argstr='%s', + mandatory=True, + position=3, + ), + output_type=dict(), + split_4d=dict( + argstr='-t', + position=1, + ), + ) + inputs = ImageStats.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ImageStats_outputs(): + output_map = dict(out_stat=dict(), ) + outputs = ImageStats.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_InvWarp.py b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py new file mode 100644 index 0000000000..973f42ea34 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_InvWarp.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import InvWarp + + +def test_InvWarp_inputs(): + input_map = dict( + absolute=dict( + argstr='--abs', + xor=['relative'], + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inverse_warp=dict( + argstr='--out=%s', + hash_files=False, + name_source=['warp'], + name_template='%s_inverse', + ), + jacobian_max=dict(argstr='--jmax=%f', ), + jacobian_min=dict(argstr='--jmin=%f', ), + niter=dict(argstr='--niter=%d', ), + noconstraint=dict(argstr='--noconstraint', ), + output_type=dict(), + reference=dict( + argstr='--ref=%s', + mandatory=True, + ), + regularise=dict(argstr='--regularise=%f', ), + relative=dict( + argstr='--rel', + xor=['absolute'], + ), + warp=dict( + argstr='--warp=%s', + mandatory=True, + ), + ) + inputs = InvWarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_InvWarp_outputs(): + output_map = dict(inverse_warp=dict(), ) + outputs = InvWarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py new file mode 100644 index 0000000000..4b6192c98d --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_IsotropicSmooth.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import IsotropicSmooth + + +def test_IsotropicSmooth_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr='-s %.5f', + mandatory=True, + position=4, + xor=['sigma'], + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + sigma=dict( + argstr='-s %.5f', + mandatory=True, + position=4, + xor=['fwhm'], + ), + ) + inputs = IsotropicSmooth.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_IsotropicSmooth_outputs(): + output_map = dict(out_file=dict(), ) + outputs = IsotropicSmooth.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_L2Model.py b/nipype/interfaces/fsl/tests/test_auto_L2Model.py new file mode 100644 index 0000000000..9d3588666f --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_L2Model.py @@ -0,0 +1,23 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import L2Model + + +def test_L2Model_inputs(): + input_map = dict(num_copes=dict(mandatory=True, ), ) + inputs = L2Model.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_L2Model_outputs(): + output_map = dict( + design_con=dict(), + design_grp=dict(), + design_mat=dict(), + ) + outputs = L2Model.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Level1Design.py b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py new file mode 100644 index 0000000000..45451f8eff --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Level1Design.py @@ -0,0 +1,29 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Level1Design + + +def test_Level1Design_inputs(): + input_map = dict( + bases=dict(mandatory=True, ), + contrasts=dict(), + interscan_interval=dict(mandatory=True, ), + model_serial_correlations=dict(mandatory=True, ), + orthogonalization=dict(usedefault=True, ), + session_info=dict(mandatory=True, ), + ) + inputs = Level1Design.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Level1Design_outputs(): + output_map = dict( + ev_files=dict(), + fsf_files=dict(), + ) + outputs = Level1Design.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py new file mode 100644 index 0000000000..daeca07b10 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_MCFLIRT.py @@ -0,0 +1,62 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import MCFLIRT + + +def test_MCFLIRT_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bins=dict(argstr='-bins %d', ), + cost=dict(argstr='-cost %s', ), + dof=dict(argstr='-dof %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + position=0, + ), + init=dict(argstr='-init %s', ), + interpolation=dict(argstr='-%s_final', ), + mean_vol=dict(argstr='-meanvol', ), + out_file=dict( + argstr='-out %s', + genfile=True, + hash_files=False, + ), + output_type=dict(), + ref_file=dict(argstr='-reffile %s', ), + ref_vol=dict(argstr='-refvol %d', ), + rotation=dict(argstr='-rotation %d', ), + save_mats=dict(argstr='-mats', ), + save_plots=dict(argstr='-plots', ), + save_rms=dict(argstr='-rmsabs -rmsrel', ), + scaling=dict(argstr='-scaling %.2f', ), + smooth=dict(argstr='-smooth %.2f', ), + stages=dict(argstr='-stages %d', ), + stats_imgs=dict(argstr='-stats', ), + use_contour=dict(argstr='-edge', ), + use_gradient=dict(argstr='-gdt', ), + ) + inputs = MCFLIRT.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MCFLIRT_outputs(): + output_map = dict( + mat_file=dict(), + mean_img=dict(), + out_file=dict(), + par_file=dict(), + rms_files=dict(), + std_img=dict(), + variance_img=dict(), + ) + outputs = MCFLIRT.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_MELODIC.py b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py new file mode 100644 index 0000000000..b22078c450 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_MELODIC.py @@ -0,0 +1,85 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import MELODIC + + +def test_MELODIC_inputs(): + input_map = dict( + ICs=dict(argstr='--ICs=%s', ), + approach=dict(argstr='-a %s', ), + args=dict(argstr='%s', ), + bg_image=dict(argstr='--bgimage=%s', ), + bg_threshold=dict(argstr='--bgthreshold=%f', ), + cov_weight=dict(argstr='--covarweight=%f', ), + dim=dict(argstr='-d %d', ), + dim_est=dict(argstr='--dimest=%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + epsilon=dict(argstr='--eps=%f', ), + epsilonS=dict(argstr='--epsS=%f', ), + in_files=dict( + argstr='-i %s', + mandatory=True, + position=0, + sep=',', + ), + log_power=dict(argstr='--logPower', ), + mask=dict(argstr='-m %s', ), + max_restart=dict(argstr='--maxrestart=%d', ), + maxit=dict(argstr='--maxit=%d', ), + migp=dict(argstr='--migp', ), + migpN=dict(argstr='--migpN %d', ), + migp_factor=dict(argstr='--migp_factor %d', ), + migp_shuffle=dict(argstr='--migp_shuffle', ), + mix=dict(argstr='--mix=%s', ), + mm_thresh=dict(argstr='--mmthresh=%f', ), + no_bet=dict(argstr='--nobet', ), + no_mask=dict(argstr='--nomask', ), + no_mm=dict(argstr='--no_mm', ), + non_linearity=dict(argstr='--nl=%s', ), + num_ICs=dict(argstr='-n %d', ), + out_all=dict(argstr='--Oall', ), + out_dir=dict( + argstr='-o %s', + genfile=True, + ), + out_mean=dict(argstr='--Omean', ), + out_orig=dict(argstr='--Oorig', ), + out_pca=dict(argstr='--Opca', ), + out_stats=dict(argstr='--Ostats', ), + out_unmix=dict(argstr='--Ounmix', ), + out_white=dict(argstr='--Owhite', ), + output_type=dict(), + pbsc=dict(argstr='--pbsc', ), + rem_cmp=dict(argstr='-f %d', ), + remove_deriv=dict(argstr='--remove_deriv', ), + report=dict(argstr='--report', ), + report_maps=dict(argstr='--report_maps=%s', ), + s_con=dict(argstr='--Scon=%s', ), + s_des=dict(argstr='--Sdes=%s', ), + sep_vn=dict(argstr='--sep_vn', ), + sep_whiten=dict(argstr='--sep_whiten', ), + smode=dict(argstr='--smode=%s', ), + t_con=dict(argstr='--Tcon=%s', ), + t_des=dict(argstr='--Tdes=%s', ), + tr_sec=dict(argstr='--tr=%f', ), + update_mask=dict(argstr='--update_mask', ), + var_norm=dict(argstr='--vn', ), + ) + inputs = MELODIC.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MELODIC_outputs(): + output_map = dict( + out_dir=dict(), + report_dir=dict(), + ) + outputs = MELODIC.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py new file mode 100644 index 0000000000..c7a5a3e465 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_MakeDyadicVectors.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import MakeDyadicVectors + + +def test_MakeDyadicVectors_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + mask=dict( + argstr='%s', + position=2, + ), + output=dict( + argstr='%s', + hash_files=False, + position=3, + usedefault=True, + ), + output_type=dict(), + perc=dict( + argstr='%f', + position=4, + ), + phi_vol=dict( + argstr='%s', + mandatory=True, + position=1, + ), + theta_vol=dict( + argstr='%s', + mandatory=True, + position=0, + ), + ) + inputs = MakeDyadicVectors.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MakeDyadicVectors_outputs(): + output_map = dict( + dispersion=dict(), + dyads=dict(), + ) + outputs = MakeDyadicVectors.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py new file mode 100644 index 0000000000..64b0f8b089 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_MathsCommand.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import MathsCommand + + +def test_MathsCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = MathsCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MathsCommand_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MathsCommand.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py new file mode 100644 index 0000000000..9910f9c4e8 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_MaxImage.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import MaxImage + + +def test_MaxImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='-%smax', + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = MaxImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MaxImage_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MaxImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py new file mode 100644 index 0000000000..f49c5f462c --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_MaxnImage.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import MaxnImage + + +def test_MaxnImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='-%smaxn', + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = MaxnImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MaxnImage_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MaxnImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_MeanImage.py b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py new file mode 100644 index 0000000000..2172dcfa9e --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_MeanImage.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import MeanImage + + +def test_MeanImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='-%smean', + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = MeanImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MeanImage_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MeanImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_MedianImage.py b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py new file mode 100644 index 0000000000..c14bf8d839 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_MedianImage.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import MedianImage + + +def test_MedianImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='-%smedian', + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = MedianImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MedianImage_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MedianImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Merge.py b/nipype/interfaces/fsl/tests/test_auto_Merge.py new file mode 100644 index 0000000000..826270239e --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Merge.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Merge + + +def test_Merge_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='-%s', + mandatory=True, + position=0, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr='%s', + mandatory=True, + position=2, + ), + merged_file=dict( + argstr='%s', + hash_files=False, + name_source='in_files', + name_template='%s_merged', + position=1, + ), + output_type=dict(), + tr=dict( + argstr='%.2f', + position=-1, + ), + ) + inputs = Merge.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Merge_outputs(): + output_map = dict(merged_file=dict(), ) + outputs = Merge.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_MinImage.py b/nipype/interfaces/fsl/tests/test_auto_MinImage.py new file mode 100644 index 0000000000..4e9002c259 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_MinImage.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import MinImage + + +def test_MinImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='-%smin', + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = MinImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MinImage_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MinImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py new file mode 100644 index 0000000000..97d2426b53 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_MotionOutliers.py @@ -0,0 +1,60 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MotionOutliers + + +def test_MotionOutliers_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dummy=dict(argstr='--dummy=%d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + ), + mask=dict(argstr='-m %s', ), + metric=dict(argstr='--%s', ), + no_motion_correction=dict(argstr='--nomoco', ), + out_file=dict( + argstr='-o %s', + hash_files=False, + keep_extension=True, + name_source='in_file', + name_template='%s_outliers.txt', + ), + out_metric_plot=dict( + argstr='-p %s', + hash_files=False, + keep_extension=True, + name_source='in_file', + name_template='%s_metrics.png', + ), + out_metric_values=dict( + argstr='-s %s', + hash_files=False, + keep_extension=True, + name_source='in_file', + name_template='%s_metrics.txt', + ), + output_type=dict(), + threshold=dict(argstr='--thresh=%g', ), + ) + inputs = MotionOutliers.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MotionOutliers_outputs(): + output_map = dict( + out_file=dict(), + out_metric_plot=dict(), + out_metric_values=dict(), + ) + outputs = MotionOutliers.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py new file mode 100644 index 0000000000..50a2977a4c --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_MultiImageMaths.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import MultiImageMaths + + +def test_MultiImageMaths_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + op_string=dict( + argstr='%s', + mandatory=True, + position=4, + ), + operand_files=dict(mandatory=True, ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = MultiImageMaths.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MultiImageMaths_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MultiImageMaths.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py new file mode 100644 index 0000000000..78bd97ff85 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_MultipleRegressDesign.py @@ -0,0 +1,28 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import MultipleRegressDesign + + +def test_MultipleRegressDesign_inputs(): + input_map = dict( + contrasts=dict(mandatory=True, ), + groups=dict(), + regressors=dict(mandatory=True, ), + ) + inputs = MultipleRegressDesign.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MultipleRegressDesign_outputs(): + output_map = dict( + design_con=dict(), + design_fts=dict(), + design_grp=dict(), + design_mat=dict(), + ) + outputs = MultipleRegressDesign.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Overlay.py b/nipype/interfaces/fsl/tests/test_auto_Overlay.py new file mode 100644 index 0000000000..9818ae1eda --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Overlay.py @@ -0,0 +1,93 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Overlay + + +def test_Overlay_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + auto_thresh_bg=dict( + argstr='-a', + mandatory=True, + position=5, + xor=('auto_thresh_bg', 'full_bg_range', 'bg_thresh'), + ), + background_image=dict( + argstr='%s', + mandatory=True, + position=4, + ), + bg_thresh=dict( + argstr='%.3f %.3f', + mandatory=True, + position=5, + xor=('auto_thresh_bg', 'full_bg_range', 'bg_thresh'), + ), + environ=dict( + nohash=True, + usedefault=True, + ), + full_bg_range=dict( + argstr='-A', + mandatory=True, + position=5, + xor=('auto_thresh_bg', 'full_bg_range', 'bg_thresh'), + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-1, + ), + out_type=dict( + argstr='%s', + position=2, + usedefault=True, + ), + output_type=dict(), + show_negative_stats=dict( + argstr='%s', + position=8, + xor=['stat_image2'], + ), + stat_image=dict( + argstr='%s', + mandatory=True, + position=6, + ), + stat_image2=dict( + argstr='%s', + position=9, + xor=['show_negative_stats'], + ), + stat_thresh=dict( + argstr='%.2f %.2f', + mandatory=True, + position=7, + ), + stat_thresh2=dict( + argstr='%.2f %.2f', + position=10, + ), + transparency=dict( + argstr='%s', + position=1, + usedefault=True, + ), + use_checkerboard=dict( + argstr='-c', + position=3, + ), + ) + inputs = Overlay.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Overlay_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Overlay.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py new file mode 100644 index 0000000000..328a8e3272 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_PRELUDE.py @@ -0,0 +1,73 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import PRELUDE + + +def test_PRELUDE_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + complex_phase_file=dict( + argstr='--complex=%s', + mandatory=True, + xor=['magnitude_file', 'phase_file'], + ), + end=dict(argstr='--end=%d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + label_file=dict( + argstr='--labels=%s', + hash_files=False, + ), + labelprocess2d=dict(argstr='--labelslices', ), + magnitude_file=dict( + argstr='--abs=%s', + mandatory=True, + xor=['complex_phase_file'], + ), + mask_file=dict(argstr='--mask=%s', ), + num_partitions=dict(argstr='--numphasesplit=%d', ), + output_type=dict(), + phase_file=dict( + argstr='--phase=%s', + mandatory=True, + xor=['complex_phase_file'], + ), + process2d=dict( + argstr='--slices', + xor=['labelprocess2d'], + ), + process3d=dict( + argstr='--force3D', + xor=['labelprocess2d', 'process2d'], + ), + rawphase_file=dict( + argstr='--rawphase=%s', + hash_files=False, + ), + removeramps=dict(argstr='--removeramps', ), + savemask_file=dict( + argstr='--savemask=%s', + hash_files=False, + ), + start=dict(argstr='--start=%d', ), + threshold=dict(argstr='--thresh=%.10f', ), + unwrapped_phase_file=dict( + argstr='--unwrap=%s', + genfile=True, + hash_files=False, + ), + ) + inputs = PRELUDE.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PRELUDE_outputs(): + output_map = dict(unwrapped_phase_file=dict(), ) + outputs = PRELUDE.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py new file mode 100644 index 0000000000..4e08c18db0 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_PercentileImage.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import PercentileImage + + +def test_PercentileImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='-%sperc', + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + perc=dict( + argstr='%f', + position=5, + ), + ) + inputs = PercentileImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PercentileImage_outputs(): + output_map = dict(out_file=dict(), ) + outputs = PercentileImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py new file mode 100644 index 0000000000..45a5b43945 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_PlotMotionParams.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import PlotMotionParams + + +def test_PlotMotionParams_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + in_source=dict(mandatory=True, ), + out_file=dict( + argstr='-o %s', + genfile=True, + hash_files=False, + ), + output_type=dict(), + plot_size=dict(argstr='%s', ), + plot_type=dict( + argstr='%s', + mandatory=True, + ), + ) + inputs = PlotMotionParams.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PlotMotionParams_outputs(): + output_map = dict(out_file=dict(), ) + outputs = PlotMotionParams.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py new file mode 100644 index 0000000000..62ced498e1 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_PlotTimeSeries.py @@ -0,0 +1,70 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import PlotTimeSeries + + +def test_PlotTimeSeries_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + labels=dict(argstr='%s', ), + legend_file=dict(argstr='--legend=%s', ), + out_file=dict( + argstr='-o %s', + genfile=True, + hash_files=False, + ), + output_type=dict(), + plot_finish=dict( + argstr='--finish=%d', + xor=('plot_range', ), + ), + plot_range=dict( + argstr='%s', + xor=('plot_start', 'plot_finish'), + ), + plot_size=dict(argstr='%s', ), + plot_start=dict( + argstr='--start=%d', + xor=('plot_range', ), + ), + sci_notation=dict(argstr='--sci', ), + title=dict(argstr='%s', ), + x_precision=dict(argstr='--precision=%d', ), + x_units=dict( + argstr='-u %d', + usedefault=True, + ), + y_max=dict( + argstr='--ymax=%.2f', + xor=('y_range', ), + ), + y_min=dict( + argstr='--ymin=%.2f', + xor=('y_range', ), + ), + y_range=dict( + argstr='%s', + xor=('y_min', 'y_max'), + ), + ) + inputs = PlotTimeSeries.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PlotTimeSeries_outputs(): + output_map = dict(out_file=dict(), ) + outputs = PlotTimeSeries.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py new file mode 100644 index 0000000000..18a812c00b --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_PowerSpectrum.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import PowerSpectrum + + +def test_PowerSpectrum_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=1, + ), + output_type=dict(), + ) + inputs = PowerSpectrum.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PowerSpectrum_outputs(): + output_map = dict(out_file=dict(), ) + outputs = PowerSpectrum.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py new file mode 100644 index 0000000000..66bfd51fab --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_PrepareFieldmap.py @@ -0,0 +1,56 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..epi import PrepareFieldmap + + +def test_PrepareFieldmap_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + delta_TE=dict( + argstr='%f', + mandatory=True, + position=-2, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_magnitude=dict( + argstr='%s', + mandatory=True, + position=3, + ), + in_phase=dict( + argstr='%s', + mandatory=True, + position=2, + ), + nocheck=dict( + argstr='--nocheck', + position=-1, + usedefault=True, + ), + out_fieldmap=dict( + argstr='%s', + position=4, + ), + output_type=dict(), + scanner=dict( + argstr='%s', + position=1, + usedefault=True, + ), + ) + inputs = PrepareFieldmap.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PrepareFieldmap_outputs(): + output_map = dict(out_fieldmap=dict(), ) + outputs = PrepareFieldmap.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py new file mode 100644 index 0000000000..5368d0dd37 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX.py @@ -0,0 +1,92 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import ProbTrackX + + +def test_ProbTrackX_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + avoid_mp=dict(argstr='--avoid=%s', ), + c_thresh=dict(argstr='--cthr=%.3f', ), + correct_path_distribution=dict(argstr='--pd', ), + dist_thresh=dict(argstr='--distthresh=%.3f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fibst=dict(argstr='--fibst=%d', ), + force_dir=dict( + argstr='--forcedir', + usedefault=True, + ), + fsamples=dict(mandatory=True, ), + inv_xfm=dict(argstr='--invxfm=%s', ), + loop_check=dict(argstr='--loopcheck', ), + mask=dict( + argstr='-m %s', + mandatory=True, + ), + mask2=dict(argstr='--mask2=%s', ), + mesh=dict(argstr='--mesh=%s', ), + mod_euler=dict(argstr='--modeuler', ), + mode=dict( + argstr='--mode=%s', + genfile=True, + ), + n_samples=dict( + argstr='--nsamples=%d', + usedefault=True, + ), + n_steps=dict(argstr='--nsteps=%d', ), + network=dict(argstr='--network', ), + opd=dict( + argstr='--opd', + usedefault=True, + ), + os2t=dict(argstr='--os2t', ), + out_dir=dict( + argstr='--dir=%s', + genfile=True, + ), + output_type=dict(), + phsamples=dict(mandatory=True, ), + rand_fib=dict(argstr='--randfib=%d', ), + random_seed=dict(argstr='--rseed', ), + s2tastext=dict(argstr='--s2tastext', ), + sample_random_points=dict(argstr='--sampvox', ), + samples_base_name=dict( + argstr='--samples=%s', + usedefault=True, + ), + seed=dict( + argstr='--seed=%s', + mandatory=True, + ), + seed_ref=dict(argstr='--seedref=%s', ), + step_length=dict(argstr='--steplength=%.3f', ), + stop_mask=dict(argstr='--stop=%s', ), + target_masks=dict(argstr='--targetmasks=%s', ), + thsamples=dict(mandatory=True, ), + use_anisotropy=dict(argstr='--usef', ), + verbose=dict(argstr='--verbose=%d', ), + waypoints=dict(argstr='--waypoints=%s', ), + xfm=dict(argstr='--xfm=%s', ), + ) + inputs = ProbTrackX.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ProbTrackX_outputs(): + output_map = dict( + fdt_paths=dict(), + log=dict(), + particle_files=dict(), + targets=dict(), + way_total=dict(), + ) + outputs = ProbTrackX.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py new file mode 100644 index 0000000000..8592b5ae1a --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ProbTrackX2.py @@ -0,0 +1,114 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import ProbTrackX2 + + +def test_ProbTrackX2_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + avoid_mp=dict(argstr='--avoid=%s', ), + c_thresh=dict(argstr='--cthr=%.3f', ), + colmask4=dict(argstr='--colmask4=%s', ), + correct_path_distribution=dict(argstr='--pd', ), + dist_thresh=dict(argstr='--distthresh=%.3f', ), + distthresh1=dict(argstr='--distthresh1=%.3f', ), + distthresh3=dict(argstr='--distthresh3=%.3f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fibst=dict(argstr='--fibst=%d', ), + fopd=dict(argstr='--fopd=%s', ), + force_dir=dict( + argstr='--forcedir', + usedefault=True, + ), + fsamples=dict(mandatory=True, ), + inv_xfm=dict(argstr='--invxfm=%s', ), + loop_check=dict(argstr='--loopcheck', ), + lrtarget3=dict(argstr='--lrtarget3=%s', ), + mask=dict( + argstr='-m %s', + mandatory=True, + ), + meshspace=dict(argstr='--meshspace=%s', ), + mod_euler=dict(argstr='--modeuler', ), + n_samples=dict( + argstr='--nsamples=%d', + usedefault=True, + ), + n_steps=dict(argstr='--nsteps=%d', ), + network=dict(argstr='--network', ), + omatrix1=dict(argstr='--omatrix1', ), + omatrix2=dict( + argstr='--omatrix2', + requires=['target2'], + ), + omatrix3=dict( + argstr='--omatrix3', + requires=['target3', 'lrtarget3'], + ), + omatrix4=dict(argstr='--omatrix4', ), + onewaycondition=dict(argstr='--onewaycondition', ), + opd=dict( + argstr='--opd', + usedefault=True, + ), + os2t=dict(argstr='--os2t', ), + out_dir=dict( + argstr='--dir=%s', + genfile=True, + ), + output_type=dict(), + phsamples=dict(mandatory=True, ), + rand_fib=dict(argstr='--randfib=%d', ), + random_seed=dict(argstr='--rseed', ), + s2tastext=dict(argstr='--s2tastext', ), + sample_random_points=dict(argstr='--sampvox', ), + samples_base_name=dict( + argstr='--samples=%s', + usedefault=True, + ), + seed=dict( + argstr='--seed=%s', + mandatory=True, + ), + seed_ref=dict(argstr='--seedref=%s', ), + simple=dict(argstr='--simple', ), + step_length=dict(argstr='--steplength=%.3f', ), + stop_mask=dict(argstr='--stop=%s', ), + target2=dict(argstr='--target2=%s', ), + target3=dict(argstr='--target3=%s', ), + target4=dict(argstr='--target4=%s', ), + target_masks=dict(argstr='--targetmasks=%s', ), + thsamples=dict(mandatory=True, ), + use_anisotropy=dict(argstr='--usef', ), + verbose=dict(argstr='--verbose=%d', ), + waycond=dict(argstr='--waycond=%s', ), + wayorder=dict(argstr='--wayorder', ), + waypoints=dict(argstr='--waypoints=%s', ), + xfm=dict(argstr='--xfm=%s', ), + ) + inputs = ProbTrackX2.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ProbTrackX2_outputs(): + output_map = dict( + fdt_paths=dict(), + log=dict(), + lookup_tractspace=dict(), + matrix1_dot=dict(), + matrix2_dot=dict(), + matrix3_dot=dict(), + network_matrix=dict(), + particle_files=dict(), + targets=dict(), + way_total=dict(), + ) + outputs = ProbTrackX2.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py new file mode 100644 index 0000000000..dc3878e2c0 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_ProjThresh.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import ProjThresh + + +def test_ProjThresh_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr='%s', + mandatory=True, + position=0, + ), + output_type=dict(), + threshold=dict( + argstr='%d', + mandatory=True, + position=1, + ), + ) + inputs = ProjThresh.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ProjThresh_outputs(): + output_map = dict(out_files=dict(), ) + outputs = ProjThresh.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Randomise.py b/nipype/interfaces/fsl/tests/test_auto_Randomise.py new file mode 100644 index 0000000000..3d52347265 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Randomise.py @@ -0,0 +1,74 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Randomise + + +def test_Randomise_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + base_name=dict( + argstr='-o "%s"', + position=1, + usedefault=True, + ), + c_thresh=dict(argstr='-c %.1f', ), + cm_thresh=dict(argstr='-C %.1f', ), + demean=dict(argstr='-D', ), + design_mat=dict( + argstr='-d %s', + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f_c_thresh=dict(argstr='-F %.2f', ), + f_cm_thresh=dict(argstr='-S %.2f', ), + f_only=dict(argstr='--f_only', ), + fcon=dict(argstr='-f %s', ), + in_file=dict( + argstr='-i %s', + mandatory=True, + position=0, + ), + mask=dict(argstr='-m %s', ), + num_perm=dict(argstr='-n %d', ), + one_sample_group_mean=dict(argstr='-1', ), + output_type=dict(), + p_vec_n_dist_files=dict(argstr='-P', ), + raw_stats_imgs=dict(argstr='-R', ), + seed=dict(argstr='--seed=%d', ), + show_info_parallel_mode=dict(argstr='-Q', ), + show_total_perms=dict(argstr='-q', ), + tcon=dict( + argstr='-t %s', + position=3, + ), + tfce=dict(argstr='-T', ), + tfce2D=dict(argstr='--T2', ), + tfce_C=dict(argstr='--tfce_C=%.2f', ), + tfce_E=dict(argstr='--tfce_E=%.2f', ), + tfce_H=dict(argstr='--tfce_H=%.2f', ), + var_smooth=dict(argstr='-v %d', ), + vox_p_values=dict(argstr='-x', ), + x_block_labels=dict(argstr='-e %s', ), + ) + inputs = Randomise.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Randomise_outputs(): + output_map = dict( + f_corrected_p_files=dict(), + f_p_files=dict(), + fstat_files=dict(), + t_corrected_p_files=dict(), + t_p_files=dict(), + tstat_files=dict(), + ) + outputs = Randomise.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py new file mode 100644 index 0000000000..76647d82d8 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Reorient2Std.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Reorient2Std + + +def test_Reorient2Std_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + ), + output_type=dict(), + ) + inputs = Reorient2Std.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Reorient2Std_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Reorient2Std.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py new file mode 100644 index 0000000000..e1cbac6fc9 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_RobustFOV.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import RobustFOV + + +def test_RobustFOV_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + brainsize=dict(argstr='-b %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + position=0, + ), + out_roi=dict( + argstr='-r %s', + hash_files=False, + name_source=['in_file'], + name_template='%s_ROI', + ), + out_transform=dict( + argstr='-m %s', + hash_files=False, + name_source=['in_file'], + name_template='%s_to_ROI', + ), + output_type=dict(), + ) + inputs = RobustFOV.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RobustFOV_outputs(): + output_map = dict( + out_roi=dict(), + out_transform=dict(), + ) + outputs = RobustFOV.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_SMM.py b/nipype/interfaces/fsl/tests/test_auto_SMM.py new file mode 100644 index 0000000000..1e1cc308cc --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_SMM.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import SMM + + +def test_SMM_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + mask=dict( + argstr='--mask="%s"', + copyfile=False, + mandatory=True, + position=1, + ), + no_deactivation_class=dict( + argstr='--zfstatmode', + position=2, + ), + output_type=dict(), + spatial_data_file=dict( + argstr='--sdf="%s"', + copyfile=False, + mandatory=True, + position=0, + ), + ) + inputs = SMM.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SMM_outputs(): + output_map = dict( + activation_p_map=dict(), + deactivation_p_map=dict(), + null_p_map=dict(), + ) + outputs = SMM.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_SUSAN.py b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py new file mode 100644 index 0000000000..ebbef0d427 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_SUSAN.py @@ -0,0 +1,62 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import SUSAN + + +def test_SUSAN_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + brightness_threshold=dict( + argstr='%.10f', + mandatory=True, + position=2, + ), + dimension=dict( + argstr='%d', + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr='%.10f', + mandatory=True, + position=3, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-1, + ), + output_type=dict(), + usans=dict( + argstr='', + position=6, + usedefault=True, + ), + use_median=dict( + argstr='%d', + position=5, + usedefault=True, + ), + ) + inputs = SUSAN.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SUSAN_outputs(): + output_map = dict(smoothed_file=dict(), ) + outputs = SUSAN.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_SigLoss.py b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py new file mode 100644 index 0000000000..f3be97b350 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_SigLoss.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import SigLoss + + +def test_SigLoss_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + echo_time=dict(argstr='--te=%f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + ), + mask_file=dict(argstr='-m %s', ), + out_file=dict( + argstr='-s %s', + genfile=True, + ), + output_type=dict(), + slice_direction=dict(argstr='-d %s', ), + ) + inputs = SigLoss.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SigLoss_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SigLoss.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Slice.py b/nipype/interfaces/fsl/tests/test_auto_Slice.py new file mode 100644 index 0000000000..d1e9093e37 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Slice.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Slice + + +def test_Slice_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=0, + ), + out_base_name=dict( + argstr='%s', + position=1, + ), + output_type=dict(), + ) + inputs = Slice.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Slice_outputs(): + output_map = dict(out_files=dict(), ) + outputs = Slice.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py new file mode 100644 index 0000000000..75b9918ad9 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_SliceTimer.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import SliceTimer + + +def test_SliceTimer_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + custom_order=dict(argstr='--ocustom=%s', ), + custom_timings=dict(argstr='--tcustom=%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + global_shift=dict(argstr='--tglobal', ), + in_file=dict( + argstr='--in=%s', + mandatory=True, + position=0, + ), + index_dir=dict(argstr='--down', ), + interleaved=dict(argstr='--odd', ), + out_file=dict( + argstr='--out=%s', + genfile=True, + hash_files=False, + ), + output_type=dict(), + slice_direction=dict(argstr='--direction=%d', ), + time_repetition=dict(argstr='--repeat=%f', ), + ) + inputs = SliceTimer.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SliceTimer_outputs(): + output_map = dict(slice_time_corrected_file=dict(), ) + outputs = SliceTimer.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Slicer.py b/nipype/interfaces/fsl/tests/test_auto_Slicer.py new file mode 100644 index 0000000000..d313cda474 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Slicer.py @@ -0,0 +1,106 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Slicer + + +def test_Slicer_inputs(): + input_map = dict( + all_axial=dict( + argstr='-A', + position=10, + requires=['image_width'], + xor=('single_slice', 'middle_slices', 'all_axial', 'sample_axial'), + ), + args=dict(argstr='%s', ), + colour_map=dict( + argstr='-l %s', + position=4, + ), + dither_edges=dict( + argstr='-t', + position=7, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + image_edges=dict( + argstr='%s', + position=2, + ), + image_width=dict( + argstr='%d', + position=-2, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + intensity_range=dict( + argstr='-i %.3f %.3f', + position=5, + ), + label_slices=dict( + argstr='-L', + position=3, + usedefault=True, + ), + middle_slices=dict( + argstr='-a', + position=10, + xor=('single_slice', 'middle_slices', 'all_axial', 'sample_axial'), + ), + nearest_neighbour=dict( + argstr='-n', + position=8, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-1, + ), + output_type=dict(), + sample_axial=dict( + argstr='-S %d', + position=10, + requires=['image_width'], + xor=('single_slice', 'middle_slices', 'all_axial', 'sample_axial'), + ), + scaling=dict( + argstr='-s %f', + position=0, + ), + show_orientation=dict( + argstr='%s', + position=9, + usedefault=True, + ), + single_slice=dict( + argstr='-%s', + position=10, + requires=['slice_number'], + xor=('single_slice', 'middle_slices', 'all_axial', 'sample_axial'), + ), + slice_number=dict( + argstr='-%d', + position=11, + ), + threshold_edges=dict( + argstr='-e %.3f', + position=6, + ), + ) + inputs = Slicer.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Slicer_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Slicer.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Smooth.py b/nipype/interfaces/fsl/tests/test_auto_Smooth.py new file mode 100644 index 0000000000..e8d8e06117 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Smooth.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Smooth + + +def test_Smooth_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr='-kernel gauss %.03f -fmean', + mandatory=True, + position=1, + xor=['sigma'], + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + output_type=dict(), + sigma=dict( + argstr='-kernel gauss %.03f -fmean', + mandatory=True, + position=1, + xor=['fwhm'], + ), + smoothed_file=dict( + argstr='%s', + hash_files=False, + name_source=['in_file'], + name_template='%s_smooth', + position=2, + ), + ) + inputs = Smooth.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Smooth_outputs(): + output_map = dict(smoothed_file=dict(), ) + outputs = Smooth.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py new file mode 100644 index 0000000000..df7544f931 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_SmoothEstimate.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import SmoothEstimate + + +def test_SmoothEstimate_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dof=dict( + argstr='--dof=%d', + mandatory=True, + xor=['zstat_file'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + mask_file=dict( + argstr='--mask=%s', + mandatory=True, + ), + output_type=dict(), + residual_fit_file=dict( + argstr='--res=%s', + requires=['dof'], + ), + zstat_file=dict( + argstr='--zstat=%s', + xor=['dof'], + ), + ) + inputs = SmoothEstimate.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SmoothEstimate_outputs(): + output_map = dict( + dlh=dict(), + resels=dict(), + volume=dict(), + ) + outputs = SmoothEstimate.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py new file mode 100644 index 0000000000..d62f904655 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_SpatialFilter.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import SpatialFilter + + +def test_SpatialFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + kernel_file=dict( + argstr='%s', + position=5, + xor=['kernel_size'], + ), + kernel_shape=dict( + argstr='-kernel %s', + position=4, + ), + kernel_size=dict( + argstr='%.4f', + position=5, + xor=['kernel_file'], + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + operation=dict( + argstr='-f%s', + mandatory=True, + position=6, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = SpatialFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SpatialFilter_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SpatialFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Split.py b/nipype/interfaces/fsl/tests/test_auto_Split.py new file mode 100644 index 0000000000..cec2ab462d --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Split.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Split + + +def test_Split_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='-%s', + mandatory=True, + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + out_base_name=dict( + argstr='%s', + position=1, + ), + output_type=dict(), + ) + inputs = Split.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Split_outputs(): + output_map = dict(out_files=dict(), ) + outputs = Split.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_StdImage.py b/nipype/interfaces/fsl/tests/test_auto_StdImage.py new file mode 100644 index 0000000000..302ab9c4c2 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_StdImage.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import StdImage + + +def test_StdImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict( + argstr='-%sstd', + position=4, + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = StdImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_StdImage_outputs(): + output_map = dict(out_file=dict(), ) + outputs = StdImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py new file mode 100644 index 0000000000..7cbb57491c --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_SwapDimensions.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import SwapDimensions + + +def test_SwapDimensions_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position='1', + ), + new_dims=dict( + argstr='%s %s %s', + mandatory=True, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + ), + output_type=dict(), + ) + inputs = SwapDimensions.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SwapDimensions_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SwapDimensions.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_TOPUP.py b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py new file mode 100644 index 0000000000..0e7e89e4e3 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_TOPUP.py @@ -0,0 +1,132 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..epi import TOPUP + + +def test_TOPUP_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + config=dict( + argstr='--config=%s', + usedefault=True, + ), + encoding_direction=dict( + argstr='--datain=%s', + mandatory=True, + requires=['readout_times'], + xor=['encoding_file'], + ), + encoding_file=dict( + argstr='--datain=%s', + mandatory=True, + xor=['encoding_direction'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + estmov=dict(argstr='--estmov=%d', ), + fwhm=dict( + argstr='--fwhm=%f', + usedefault=True, + ), + in_file=dict( + argstr='--imain=%s', + mandatory=True, + ), + interp=dict(argstr='--interp=%s', ), + max_iter=dict( + argstr='--miter=%d', + usedefault=True, + ), + minmet=dict(argstr='--minmet=%d', ), + numprec=dict(argstr='--numprec=%s', ), + out_base=dict( + argstr='--out=%s', + hash_files=False, + name_source=['in_file'], + name_template='%s_base', + ), + out_corrected=dict( + argstr='--iout=%s', + hash_files=False, + name_source=['in_file'], + name_template='%s_corrected', + ), + out_field=dict( + argstr='--fout=%s', + hash_files=False, + name_source=['in_file'], + name_template='%s_field', + ), + out_jac_prefix=dict( + argstr='--jacout=%s', + hash_files=False, + usedefault=True, + ), + out_logfile=dict( + argstr='--logout=%s', + hash_files=False, + keep_extension=True, + name_source=['in_file'], + name_template='%s_topup.log', + ), + out_mat_prefix=dict( + argstr='--rbmout=%s', + hash_files=False, + usedefault=True, + ), + out_warp_prefix=dict( + argstr='--dfout=%s', + hash_files=False, + usedefault=True, + ), + output_type=dict(), + readout_times=dict( + mandatory=True, + requires=['encoding_direction'], + xor=['encoding_file'], + ), + reg_lambda=dict( + argstr='--miter=%0.f', + usedefault=True, + ), + regmod=dict(argstr='--regmod=%s', ), + regrid=dict(argstr='--regrid=%d', ), + scale=dict(argstr='--scale=%d', ), + splineorder=dict( + argstr='--splineorder=%d', + usedefault=True, + ), + ssqlambda=dict(argstr='--ssqlambda=%d', ), + subsamp=dict( + argstr='--subsamp=%d', + usedefault=True, + ), + warp_res=dict( + argstr='--warpres=%f', + usedefault=True, + ), + ) + inputs = TOPUP.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TOPUP_outputs(): + output_map = dict( + out_corrected=dict(), + out_enc_file=dict(), + out_field=dict(), + out_fieldcoef=dict(), + out_jacs=dict(), + out_logfile=dict(), + out_mats=dict(), + out_movpar=dict(), + out_warps=dict(), + ) + outputs = TOPUP.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py new file mode 100644 index 0000000000..c762b99d31 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_TemporalFilter.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import TemporalFilter + + +def test_TemporalFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + highpass_sigma=dict( + argstr='-bptf %.6f', + position=4, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + lowpass_sigma=dict( + argstr='%.6f', + position=5, + usedefault=True, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = TemporalFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TemporalFilter_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TemporalFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Threshold.py b/nipype/interfaces/fsl/tests/test_auto_Threshold.py new file mode 100644 index 0000000000..5c2bb46cc8 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Threshold.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import Threshold + + +def test_Threshold_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + direction=dict(usedefault=True, ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + thresh=dict( + argstr='%s', + mandatory=True, + position=4, + ), + use_nonzero_voxels=dict(requires=['use_robust_range'], ), + use_robust_range=dict(), + ) + inputs = Threshold.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Threshold_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Threshold.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py new file mode 100644 index 0000000000..4308ee8153 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_TractSkeleton.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import TractSkeleton + + +def test_TractSkeleton_inputs(): + input_map = dict( + alt_data_file=dict(argstr='-a %s', ), + alt_skeleton=dict(argstr='-s %s', ), + args=dict(argstr='%s', ), + data_file=dict(), + distance_map=dict(), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + ), + output_type=dict(), + project_data=dict( + argstr='-p %.3f %s %s %s %s', + requires=['threshold', 'distance_map', 'data_file'], + ), + projected_data=dict(), + search_mask_file=dict(xor=['use_cingulum_mask'], ), + skeleton_file=dict(argstr='-o %s', ), + threshold=dict(), + use_cingulum_mask=dict( + usedefault=True, + xor=['search_mask_file'], + ), + ) + inputs = TractSkeleton.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TractSkeleton_outputs(): + output_map = dict( + projected_data=dict(), + skeleton_file=dict(), + ) + outputs = TractSkeleton.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_Training.py b/nipype/interfaces/fsl/tests/test_auto_Training.py new file mode 100644 index 0000000000..91d48e4c01 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_Training.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fix import Training + + +def test_Training_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + loo=dict( + argstr='-l', + position=2, + ), + mel_icas=dict( + argstr='%s', + copyfile=False, + position=-1, + ), + trained_wts_filestem=dict( + argstr='%s', + position=1, + ), + ) + inputs = Training.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Training_outputs(): + output_map = dict(trained_wts_file=dict(), ) + outputs = Training.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py new file mode 100644 index 0000000000..18ef078a79 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_TrainingSetCreator.py @@ -0,0 +1,29 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fix import TrainingSetCreator + + +def test_TrainingSetCreator_inputs(): + input_map = dict( + mel_icas_in=dict( + argstr='%s', + copyfile=False, + position=-1, + ), ) + inputs = TrainingSetCreator.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TrainingSetCreator_outputs(): + output_map = dict( + mel_icas_out=dict( + argstr='%s', + copyfile=False, + position=-1, + ), ) + outputs = TrainingSetCreator.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py new file mode 100644 index 0000000000..a0dcf2610d --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_UnaryMaths.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import UnaryMaths + + +def test_UnaryMaths_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + internal_datatype=dict( + argstr='-dt %s', + position=1, + ), + nan2zeros=dict( + argstr='-nan', + position=3, + ), + operation=dict( + argstr='-%s', + mandatory=True, + position=4, + ), + out_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-1, + ), + output_type=dict(), + ) + inputs = UnaryMaths.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_UnaryMaths_outputs(): + output_map = dict(out_file=dict(), ) + outputs = UnaryMaths.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_VecReg.py b/nipype/interfaces/fsl/tests/test_auto_VecReg.py new file mode 100644 index 0000000000..61a28f7369 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_VecReg.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import VecReg + + +def test_VecReg_inputs(): + input_map = dict( + affine_mat=dict(argstr='-t %s', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + ), + interpolation=dict(argstr='--interp=%s', ), + mask=dict(argstr='-m %s', ), + out_file=dict( + argstr='-o %s', + genfile=True, + hash_files=False, + ), + output_type=dict(), + ref_mask=dict(argstr='--refmask=%s', ), + ref_vol=dict( + argstr='-r %s', + mandatory=True, + ), + rotation_mat=dict(argstr='--rotmat=%s', ), + rotation_warp=dict(argstr='--rotwarp=%s', ), + warp_field=dict(argstr='-w %s', ), + ) + inputs = VecReg.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_VecReg_outputs(): + output_map = dict(out_file=dict(), ) + outputs = VecReg.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py new file mode 100644 index 0000000000..30924223cf --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPoints.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import WarpPoints + + +def test_WarpPoints_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + coord_mm=dict( + argstr='-mm', + xor=['coord_vox'], + ), + coord_vox=dict( + argstr='-vox', + xor=['coord_mm'], + ), + dest_file=dict( + argstr='-dest %s', + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_coords=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + out_file=dict( + name_source='in_coords', + name_template='%s_warped', + output_name='out_file', + ), + src_file=dict( + argstr='-src %s', + mandatory=True, + ), + warp_file=dict( + argstr='-warp %s', + xor=['xfm_file'], + ), + xfm_file=dict( + argstr='-xfm %s', + xor=['warp_file'], + ), + ) + inputs = WarpPoints.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_WarpPoints_outputs(): + output_map = dict(out_file=dict(), ) + outputs = WarpPoints.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py new file mode 100644 index 0000000000..627ef60ad6 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsFromStd.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import WarpPointsFromStd + + +def test_WarpPointsFromStd_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + coord_mm=dict( + argstr='-mm', + xor=['coord_vox'], + ), + coord_vox=dict( + argstr='-vox', + xor=['coord_mm'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + img_file=dict( + argstr='-img %s', + mandatory=True, + ), + in_coords=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + std_file=dict( + argstr='-std %s', + mandatory=True, + ), + warp_file=dict( + argstr='-warp %s', + xor=['xfm_file'], + ), + xfm_file=dict( + argstr='-xfm %s', + xor=['warp_file'], + ), + ) + inputs = WarpPointsFromStd.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_WarpPointsFromStd_outputs(): + output_map = dict(out_file=dict(), ) + outputs = WarpPointsFromStd.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py new file mode 100644 index 0000000000..3d37ad1486 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_WarpPointsToStd.py @@ -0,0 +1,60 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import WarpPointsToStd + + +def test_WarpPointsToStd_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + coord_mm=dict( + argstr='-mm', + xor=['coord_vox'], + ), + coord_vox=dict( + argstr='-vox', + xor=['coord_mm'], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + img_file=dict( + argstr='-img %s', + mandatory=True, + ), + in_coords=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + out_file=dict( + name_source='in_coords', + name_template='%s_warped', + output_name='out_file', + ), + premat_file=dict(argstr='-premat %s', ), + std_file=dict( + argstr='-std %s', + mandatory=True, + ), + warp_file=dict( + argstr='-warp %s', + xor=['xfm_file'], + ), + xfm_file=dict( + argstr='-xfm %s', + xor=['warp_file'], + ), + ) + inputs = WarpPointsToStd.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_WarpPointsToStd_outputs(): + output_map = dict(out_file=dict(), ) + outputs = WarpPointsToStd.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py new file mode 100644 index 0000000000..04d3a05f14 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_WarpUtils.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import WarpUtils + + +def test_WarpUtils_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='--in=%s', + mandatory=True, + ), + knot_space=dict(argstr='--knotspace=%d,%d,%d', ), + out_file=dict( + argstr='--out=%s', + name_source=['in_file'], + output_name='out_file', + position=-1, + ), + out_format=dict(argstr='--outformat=%s', ), + out_jacobian=dict(argstr='--jac=%s', ), + output_type=dict(), + reference=dict( + argstr='--ref=%s', + mandatory=True, + ), + warp_resolution=dict(argstr='--warpres=%0.4f,%0.4f,%0.4f', ), + with_affine=dict(argstr='--withaff', ), + write_jacobian=dict( + mandatory=True, + usedefault=True, + ), + ) + inputs = WarpUtils.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_WarpUtils_outputs(): + output_map = dict( + out_file=dict(), + out_jacobian=dict(), + ) + outputs = WarpUtils.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_auto_XFibres5.py b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py new file mode 100644 index 0000000000..d72bb3bb4b --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_auto_XFibres5.py @@ -0,0 +1,117 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dti import XFibres5 + + +def test_XFibres5_inputs(): + input_map = dict( + all_ard=dict( + argstr='--allard', + xor=('no_ard', 'all_ard'), + ), + args=dict(argstr='%s', ), + burn_in=dict( + argstr='--burnin=%d', + usedefault=True, + ), + burn_in_no_ard=dict( + argstr='--burnin_noard=%d', + usedefault=True, + ), + bvals=dict( + argstr='--bvals=%s', + mandatory=True, + ), + bvecs=dict( + argstr='--bvecs=%s', + mandatory=True, + ), + cnlinear=dict( + argstr='--cnonlinear', + xor=('no_spat', 'non_linear', 'cnlinear'), + ), + dwi=dict( + argstr='--data=%s', + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + f0_ard=dict( + argstr='--f0 --ardf0', + xor=['f0_noard', 'f0_ard', 'all_ard'], + ), + f0_noard=dict( + argstr='--f0', + xor=['f0_noard', 'f0_ard'], + ), + force_dir=dict( + argstr='--forcedir', + usedefault=True, + ), + fudge=dict(argstr='--fudge=%d', ), + gradnonlin=dict(argstr='--gradnonlin=%s', ), + logdir=dict( + argstr='--logdir=%s', + usedefault=True, + ), + mask=dict( + argstr='--mask=%s', + mandatory=True, + ), + model=dict(argstr='--model=%d', ), + n_fibres=dict( + argstr='--nfibres=%d', + mandatory=True, + usedefault=True, + ), + n_jumps=dict( + argstr='--njumps=%d', + usedefault=True, + ), + no_ard=dict( + argstr='--noard', + xor=('no_ard', 'all_ard'), + ), + no_spat=dict( + argstr='--nospat', + xor=('no_spat', 'non_linear', 'cnlinear'), + ), + non_linear=dict( + argstr='--nonlinear', + xor=('no_spat', 'non_linear', 'cnlinear'), + ), + output_type=dict(), + rician=dict(argstr='--rician', ), + sample_every=dict( + argstr='--sampleevery=%d', + usedefault=True, + ), + seed=dict(argstr='--seed=%d', ), + update_proposal_every=dict( + argstr='--updateproposalevery=%d', + usedefault=True, + ), + ) + inputs = XFibres5.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_XFibres5_outputs(): + output_map = dict( + dyads=dict(), + fsamples=dict(), + mean_S0samples=dict(), + mean_dsamples=dict(), + mean_fsamples=dict(), + mean_tausamples=dict(), + phsamples=dict(), + thsamples=dict(), + ) + outputs = XFibres5.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/fsl/tests/test_base.py b/nipype/interfaces/fsl/tests/test_base.py new file mode 100644 index 0000000000..71022997b6 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_base.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import os + +import nipype.interfaces.fsl as fsl +from nipype.interfaces.base import InterfaceResult +from nipype.interfaces.fsl import check_fsl, no_fsl + +import pytest + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_fslversion(): + ver = fsl.Info.version() + ver = ver.split('.') + assert ver[0] in ['4', '5'] + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_fsloutputtype(): + types = list(fsl.Info.ftypes.keys()) + orig_out_type = fsl.Info.output_type() + assert orig_out_type in types + + +def test_outputtype_to_ext(): + for ftype, ext in fsl.Info.ftypes.items(): + res = fsl.Info.output_type_to_ext(ftype) + assert res == ext + + with pytest.raises(KeyError): + fsl.Info.output_type_to_ext('JUNK') + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_FSLCommand(): + # Most methods in FSLCommand are tested in the subclasses. Only + # testing the one item that is not. + cmd = fsl.FSLCommand(command='ls') + res = cmd.run() + assert type(res) == InterfaceResult + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_FSLCommand2(): + # Check default output type and environ + cmd = fsl.FSLCommand(command='junk') + assert cmd._output_type == fsl.Info.output_type() + assert cmd.inputs.environ['FSLOUTPUTTYPE'] == cmd._output_type + assert cmd._output_type in fsl.Info.ftypes + + cmd = fsl.FSLCommand + cmdinst = fsl.FSLCommand(command='junk') + for out_type in fsl.Info.ftypes: + cmd.set_default_output_type(out_type) + assert cmd._output_type == out_type + if out_type != fsl.Info.output_type(): + # Setting class outputtype should not effect existing instances + assert cmdinst.inputs.output_type != out_type + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +@pytest.mark.parametrize( + "args, desired_name", + [ + ({}, { + "file": 'foo.nii.gz' + }), # just the filename + # filename with suffix + ({ + "suffix": '_brain' + }, { + "file": 'foo_brain.nii.gz' + }), + ( + { + "suffix": '_brain', + "cwd": '/data' + }, + # filename with suffix and working directory + { + "dir": '/data', + "file": 'foo_brain.nii.gz' + }), + # filename with suffix and no file extension change + ({ + "suffix": '_brain.mat', + "change_ext": False + }, { + "file": 'foo_brain.mat' + }) + ]) +def test_gen_fname(args, desired_name): + # Test _gen_fname method of FSLCommand + cmd = fsl.FSLCommand(command='junk', output_type='NIFTI_GZ') + pth = os.getcwd() + fname = cmd._gen_fname('foo.nii.gz', **args) + if "dir" in desired_name.keys(): + desired = os.path.join(desired_name["dir"], desired_name["file"]) + else: + desired = os.path.join(pth, desired_name["file"]) + assert fname == desired diff --git a/nipype/interfaces/fsl/tests/test_dti.py b/nipype/interfaces/fsl/tests/test_dti.py new file mode 100644 index 0000000000..cd76766dd8 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_dti.py @@ -0,0 +1,424 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from builtins import open, range +import os + +import nipype.interfaces.fsl.dti as fsl +from nipype.interfaces.fsl import Info, no_fsl +from nipype.interfaces.base import Undefined + +import pytest +from nipype.testing.fixtures import create_files_in_directory + + +# test dtifit +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_dtifit2(create_files_in_directory): + filelist, outdir = create_files_in_directory + dti = fsl.DTIFit() + # make sure command gets called + assert dti.cmd == 'dtifit' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + dti.run() + + # .inputs based parameters setting + dti.inputs.dwi = filelist[0] + dti.inputs.base_name = 'foo.dti.nii' + dti.inputs.mask = filelist[1] + dti.inputs.bvecs = filelist[0] + dti.inputs.bvals = filelist[1] + dti.inputs.min_z = 10 + dti.inputs.max_z = 50 + + assert dti.cmdline == \ + 'dtifit -k %s -o foo.dti.nii -m %s -r %s -b %s -Z 50 -z 10' % (filelist[0], + filelist[1], + filelist[0], + filelist[1]) + + +@pytest.mark.xfail( + reason="These tests are skipped until we clean up some of this code") +def test_randomise2(): + + rand = fsl.Randomise() + + # make sure command gets called + assert rand.cmd == 'randomise' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + rand.run() + + # .inputs based parameters setting + rand.inputs.input_4D = 'infile.nii' + rand.inputs.output_rootname = 'outfile' + rand.inputs.design_matrix = 'design.mat' + rand.inputs.t_contrast = 'infile.con' + + actualCmdline = sorted(rand.cmdline.split()) + cmd = 'randomise -i infile.nii -o outfile -d design.mat -t infile.con' + desiredCmdline = sorted(cmd.split()) + assert actualCmdline == desiredCmdline + + # .run based parameter setting + rand2 = fsl.Randomise( + input_4D='infile2', + output_rootname='outfile2', + f_contrast='infile.f', + one_sample_gmean=True, + int_seed=4) + + actualCmdline = sorted(rand2.cmdline.split()) + cmd = 'randomise -i infile2 -o outfile2 -1 -f infile.f --seed=4' + desiredCmdline = sorted(cmd.split()) + assert actualCmdline == desiredCmdline + + rand3 = fsl.Randomise() + results = rand3.run(input_4D='infile3', output_rootname='outfile3') + assert results.runtime.cmdline == \ + 'randomise -i infile3 -o outfile3' + + # test arguments for opt_map + opt_map = { + 'demean_data': ('-D', True), + 'one_sample_gmean': ('-1', True), + 'mask_image': ('-m inp_mask', 'inp_mask'), + 'design_matrix': ('-d design.mat', 'design.mat'), + 't_contrast': ('-t input.con', 'input.con'), + 'f_contrast': ('-f input.fts', 'input.fts'), + 'xchange_block_labels': ('-e design.grp', 'design.grp'), + 'print_unique_perm': ('-q', True), + 'print_info_parallelMode': ('-Q', True), + 'num_permutations': ('-n 10', 10), + 'vox_pvalus': ('-x', True), + 'fstats_only': ('--fonly', True), + 'thresh_free_cluster': ('-T', True), + 'thresh_free_cluster_2Dopt': ('--T2', True), + 'cluster_thresholding': ('-c 0.20', 0.20), + 'cluster_mass_thresholding': ('-C 0.40', 0.40), + 'fcluster_thresholding': ('-F 0.10', 0.10), + 'fcluster_mass_thresholding': ('-S 0.30', 0.30), + 'variance_smoothing': ('-v 0.20', 0.20), + 'diagnostics_off': ('--quiet', True), + 'output_raw': ('-R', True), + 'output_perm_vect': ('-P', True), + 'int_seed': ('--seed=20', 20), + 'TFCE_height_param': ('--tfce_H=0.11', 0.11), + 'TFCE_extent_param': ('--tfce_E=0.50', 0.50), + 'TFCE_connectivity': ('--tfce_C=0.30', 0.30), + 'list_num_voxel_EVs_pos': ('--vxl=1,2,3,4', '1,2,3,4'), + 'list_img_voxel_EVs': ('--vxf=6,7,8,9,3', '6,7,8,9,3') + } + + for name, settings in list(opt_map.items()): + rand4 = fsl.Randomise( + input_4D='infile', output_rootname='root', **{ + name: settings[1] + }) + assert rand4.cmdline == rand4.cmd + ' -i infile -o root ' + settings[0] + + +@pytest.mark.xfail( + reason="These tests are skipped until we clean up some of this code") +def test_Randomise_parallel(): + rand = fsl.Randomise_parallel() + + # make sure command gets called + assert rand.cmd == 'randomise_parallel' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + rand.run() + + # .inputs based parameters setting + rand.inputs.input_4D = 'infile.nii' + rand.inputs.output_rootname = 'outfile' + rand.inputs.design_matrix = 'design.mat' + rand.inputs.t_contrast = 'infile.con' + + actualCmdline = sorted(rand.cmdline.split()) + cmd = ('randomise_parallel -i infile.nii -o outfile -d design.mat -t ' + 'infile.con') + desiredCmdline = sorted(cmd.split()) + assert actualCmdline == desiredCmdline + + # .run based parameter setting + rand2 = fsl.Randomise_parallel( + input_4D='infile2', + output_rootname='outfile2', + f_contrast='infile.f', + one_sample_gmean=True, + int_seed=4) + + actualCmdline = sorted(rand2.cmdline.split()) + cmd = 'randomise_parallel -i infile2 -o outfile2 -1 -f infile.f --seed=4' + desiredCmdline = sorted(cmd.split()) + assert actualCmdline == desiredCmdline + + rand3 = fsl.Randomise_parallel() + results = rand3.run(input_4D='infile3', output_rootname='outfile3') + assert results.runtime.cmdline == \ + 'randomise_parallel -i infile3 -o outfile3' + + # test arguments for opt_map + opt_map = { + 'demean_data': ('-D', True), + 'one_sample_gmean': ('-1', True), + 'mask_image': ('-m inp_mask', 'inp_mask'), + 'design_matrix': ('-d design.mat', 'design.mat'), + 't_contrast': ('-t input.con', 'input.con'), + 'f_contrast': ('-f input.fts', 'input.fts'), + 'xchange_block_labels': ('-e design.grp', 'design.grp'), + 'print_unique_perm': ('-q', True), + 'print_info_parallelMode': ('-Q', True), + 'num_permutations': ('-n 10', 10), + 'vox_pvalus': ('-x', True), + 'fstats_only': ('--fonly', True), + 'thresh_free_cluster': ('-T', True), + 'thresh_free_cluster_2Dopt': ('--T2', True), + 'cluster_thresholding': ('-c 0.20', 0.20), + 'cluster_mass_thresholding': ('-C 0.40', 0.40), + 'fcluster_thresholding': ('-F 0.10', 0.10), + 'fcluster_mass_thresholding': ('-S 0.30', 0.30), + 'variance_smoothing': ('-v 0.20', 0.20), + 'diagnostics_off': ('--quiet', True), + 'output_raw': ('-R', True), + 'output_perm_vect': ('-P', True), + 'int_seed': ('--seed=20', 20), + 'TFCE_height_param': ('--tfce_H=0.11', 0.11), + 'TFCE_extent_param': ('--tfce_E=0.50', 0.50), + 'TFCE_connectivity': ('--tfce_C=0.30', 0.30), + 'list_num_voxel_EVs_pos': ('--vxl=' + repr([1, 2, 3, 4]), + repr([1, 2, 3, 4])), + 'list_img_voxel_EVs': ('--vxf=' + repr([6, 7, 8, 9, 3]), + repr([6, 7, 8, 9, 3])) + } + + for name, settings in list(opt_map.items()): + rand4 = fsl.Randomise_parallel( + input_4D='infile', output_rootname='root', **{ + name: settings[1] + }) + assert rand4.cmdline == rand4.cmd + ' -i infile -o root ' + settings[0] + + +# test proj_thresh +@pytest.mark.xfail( + reason="These tests are skipped until we clean up some of this code") +def test_Proj_thresh(): + proj = fsl.ProjThresh() + + # make sure command gets called + assert proj.cmd == 'proj_thresh' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + proj.run() + + # .inputs based parameters setting + proj.inputs.volumes = ['vol1', 'vol2', 'vol3'] + proj.inputs.threshold = 3 + assert proj.cmdline == 'proj_thresh vol1 vol2 vol3 3' + + proj2 = fsl.ProjThresh(threshold=10, volumes=['vola', 'volb']) + assert proj2.cmdline == 'proj_thresh vola volb 10' + + # .run based parameters setting + proj3 = fsl.ProjThresh() + results = proj3.run(volumes=['inp1', 'inp3', 'inp2'], threshold=2) + assert results.runtime.cmdline == 'proj_thresh inp1 inp3 inp2 2' + assert results.runtime.returncode != 0 + assert isinstance(results.interface.inputs.volumes, list) + assert results.interface.inputs.threshold == 2 + + # test arguments for opt_map + # Proj_thresh doesn't have an opt_map{} + + +# test vec_reg +@pytest.mark.xfail( + reason="These tests are skipped until we clean up some of this code") +def test_Vec_reg(): + + vrg = fsl.VecReg() + + # make sure command gets called + assert vrg.cmd == 'vecreg' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + vrg.run() + + # .inputs based parameters setting + vrg.inputs.infile = 'infile' + vrg.inputs.outfile = 'outfile' + vrg.inputs.refVolName = 'MNI152' + vrg.inputs.affineTmat = 'tmat.mat' + assert vrg.cmdline == 'vecreg -i infile -o outfile -r MNI152 -t tmat.mat' + + # .run based parameter setting + vrg2 = fsl.VecReg( + infile='infile2', + outfile='outfile2', + refVolName='MNI152', + affineTmat='tmat2.mat', + brainMask='nodif_brain_mask') + + actualCmdline = sorted(vrg2.cmdline.split()) + cmd = 'vecreg -i infile2 -o outfile2 -r MNI152 -t tmat2.mat -m nodif_brain_mask' + desiredCmdline = sorted(cmd.split()) + assert actualCmdline == desiredCmdline + + vrg3 = fsl.VecReg() + results = vrg3.run( + infile='infile3', + outfile='outfile3', + refVolName='MNI152', + affineTmat='tmat3.mat', + ) + + assert results.runtime.cmdline == \ + 'vecreg -i infile3 -o outfile3 -r MNI152 -t tmat3.mat' + assert results.runtime.returncode != 0 + assert results.interface.inputs.infile == 'infile3' + assert results.interface.inputs.outfile == 'outfile3' + assert results.interface.inputs.refVolName == 'MNI152' + assert results.interface.inputs.affineTmat == 'tmat3.mat' + + # test arguments for opt_map + opt_map = { + 'verbose': ('-v', True), + 'helpDoc': ('-h', True), + 'tensor': ('--tensor', True), + 'affineTmat': ('-t Tmat', 'Tmat'), + 'warpFile': ('-w wrpFile', 'wrpFile'), + 'interpolation': ('--interp=sinc', 'sinc'), + 'brainMask': ('-m mask', 'mask') + } + + for name, settings in list(opt_map.items()): + vrg4 = fsl.VecReg( + infile='infile', + outfile='outfile', + refVolName='MNI152', + **{ + name: settings[1] + }) + assert vrg4.cmdline == vrg4.cmd + \ + ' -i infile -o outfile -r MNI152 ' + settings[0] + + +# test find_the_biggest +@pytest.mark.xfail( + reason="These tests are skipped until we clean up some of this code") +def test_Find_the_biggest(): + fbg = fsl.FindTheBiggest() + + # make sure command gets called + assert fbg.cmd == 'find_the_biggest' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + fbg.run() + + # .inputs based parameters setting + fbg.inputs.infiles = 'seed*' + fbg.inputs.outfile = 'fbgfile' + assert fbg.cmdline == 'find_the_biggest seed* fbgfile' + + fbg2 = fsl.FindTheBiggest(infiles='seed2*', outfile='fbgfile2') + assert fbg2.cmdline == 'find_the_biggest seed2* fbgfile2' + + # .run based parameters setting + fbg3 = fsl.FindTheBiggest() + results = fbg3.run(infiles='seed3', outfile='out3') + assert results.runtime.cmdline == 'find_the_biggest seed3 out3' + + # test arguments for opt_map + # Find_the_biggest doesn't have an opt_map{} + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_tbss_skeleton(create_files_in_directory): + skeletor = fsl.TractSkeleton() + + files, newdir = create_files_in_directory + + # Test the underlying command + assert skeletor.cmd == "tbss_skeleton" + + # It shouldn't run yet + with pytest.raises(ValueError): + skeletor.run() + + # Test the most basic way to use it + skeletor.inputs.in_file = files[0] + + # First by implicit argument + skeletor.inputs.skeleton_file = True + assert skeletor.cmdline == \ + "tbss_skeleton -i a.nii -o %s" % os.path.join(newdir, "a_skeleton.nii") + + # Now with a specific name + skeletor.inputs.skeleton_file = "old_boney.nii" + assert skeletor.cmdline == "tbss_skeleton -i a.nii -o old_boney.nii" + + # Now test the more complicated usage + bones = fsl.TractSkeleton(in_file="a.nii", project_data=True) + + # This should error + with pytest.raises(ValueError): + bones.run() + + # But we can set what we need + bones.inputs.threshold = 0.2 + bones.inputs.distance_map = "b.nii" + bones.inputs.data_file = "b.nii" # Even though that's silly + + # Now we get a command line + assert bones.cmdline == \ + "tbss_skeleton -i a.nii -p 0.200 b.nii %s b.nii %s" % (Info.standard_image("LowerCingulum_1mm.nii.gz"), + os.path.join(newdir, "b_skeletonised.nii")) + + # Can we specify a mask? + bones.inputs.use_cingulum_mask = Undefined + bones.inputs.search_mask_file = "a.nii" + assert bones.cmdline == \ + "tbss_skeleton -i a.nii -p 0.200 b.nii a.nii b.nii %s" % os.path.join(newdir, "b_skeletonised.nii") + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_distancemap(create_files_in_directory): + mapper = fsl.DistanceMap() + + files, newdir = create_files_in_directory + + # Test the underlying command + assert mapper.cmd == "distancemap" + + # It shouldn't run yet + with pytest.raises(ValueError): + mapper.run() + + # But if we do this... + mapper.inputs.in_file = "a.nii" + + # It should + assert mapper.cmdline == "distancemap --out=%s --in=a.nii" % os.path.join( + newdir, "a_dstmap.nii") + + # And we should be able to write out a maxima map + mapper.inputs.local_max_file = True + assert mapper.cmdline == \ + "distancemap --out=%s --in=a.nii --localmax=%s" % (os.path.join(newdir, "a_dstmap.nii"), + os.path.join(newdir, "a_lclmax.nii")) + + # And call it whatever we want + mapper.inputs.local_max_file = "max.nii" + assert mapper.cmdline == \ + "distancemap --out=%s --in=a.nii --localmax=max.nii" % os.path.join(newdir, "a_dstmap.nii") diff --git a/nipype/interfaces/fsl/tests/test_epi.py b/nipype/interfaces/fsl/tests/test_epi.py new file mode 100644 index 0000000000..bf025e991d --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_epi.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import os + +import pytest +from nipype.testing.fixtures import create_files_in_directory + +import nipype.interfaces.fsl.epi as fsl +from nipype.interfaces.fsl import no_fsl + + +# test eddy_correct +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_eddy_correct2(create_files_in_directory): + filelist, outdir = create_files_in_directory + eddy = fsl.EddyCorrect() + + # make sure command gets called + assert eddy.cmd == 'eddy_correct' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + eddy.run() + + # .inputs based parameters setting + eddy.inputs.in_file = filelist[0] + eddy.inputs.out_file = 'foo_eddc.nii' + eddy.inputs.ref_num = 100 + assert eddy.cmdline == 'eddy_correct %s foo_eddc.nii 100' % filelist[0] + + # .run based parameter setting + eddy2 = fsl.EddyCorrect( + in_file=filelist[0], out_file='foo_ec.nii', ref_num=20) + assert eddy2.cmdline == 'eddy_correct %s foo_ec.nii 20' % filelist[0] + + # test arguments for opt_map + # eddy_correct class doesn't have opt_map{} diff --git a/nipype/interfaces/fsl/tests/test_maths.py b/nipype/interfaces/fsl/tests/test_maths.py new file mode 100644 index 0000000000..b8aa41dc6b --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_maths.py @@ -0,0 +1,470 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import division +from __future__ import unicode_literals +from builtins import open +import os +import numpy as np + +from nipype.interfaces.base import Undefined +import nipype.interfaces.fsl.maths as fsl +from nipype.interfaces.fsl import no_fsl + +import pytest +from nipype.testing.fixtures import create_files_in_directory_plus_output_type + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_maths_base(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get some fslmaths + maths = fsl.MathsCommand() + + # Test that we got what we wanted + assert maths.cmd == "fslmaths" + + # Test that it needs a mandatory argument + with pytest.raises(ValueError): + maths.run() + + # Set an in file + maths.inputs.in_file = "a.nii" + out_file = "a_maths{}".format(out_ext) + + # Now test the most basic command line + assert maths.cmdline == "fslmaths a.nii {}".format( + os.path.join(testdir, out_file)) + + # Now test that we can set the various data types + dtypes = ["float", "char", "int", "short", "double", "input"] + int_cmdline = "fslmaths -dt {} a.nii " + os.path.join(testdir, out_file) + out_cmdline = "fslmaths a.nii " + os.path.join(testdir, + out_file) + " -odt {}" + duo_cmdline = "fslmaths -dt {} a.nii " + os.path.join( + testdir, out_file) + " -odt {}" + for dtype in dtypes: + foo = fsl.MathsCommand(in_file="a.nii", internal_datatype=dtype) + assert foo.cmdline == int_cmdline.format(dtype) + bar = fsl.MathsCommand(in_file="a.nii", output_datatype=dtype) + assert bar.cmdline == out_cmdline.format(dtype) + foobar = fsl.MathsCommand( + in_file="a.nii", internal_datatype=dtype, output_datatype=dtype) + assert foobar.cmdline == duo_cmdline.format(dtype, dtype) + + # Test that we can ask for an outfile name + maths.inputs.out_file = "b.nii" + assert maths.cmdline == "fslmaths a.nii b.nii" + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_changedt(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get some fslmaths + cdt = fsl.ChangeDataType() + + # Test that we got what we wanted + assert cdt.cmd == "fslmaths" + + # Test that it needs a mandatory argument + with pytest.raises(ValueError): + cdt.run() + + # Set an in file and out file + cdt.inputs.in_file = "a.nii" + cdt.inputs.out_file = "b.nii" + + # But it still shouldn't work + with pytest.raises(ValueError): + cdt.run() + + # Now test that we can set the various data types + dtypes = ["float", "char", "int", "short", "double", "input"] + cmdline = "fslmaths a.nii b.nii -odt {}" + for dtype in dtypes: + foo = fsl.MathsCommand( + in_file="a.nii", out_file="b.nii", output_datatype=dtype) + assert foo.cmdline == cmdline.format(dtype) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_threshold(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + thresh = fsl.Threshold(in_file="a.nii", out_file="b.nii") + + # Test the underlying command + assert thresh.cmd == "fslmaths" + + # Test mandtory args + with pytest.raises(ValueError): + thresh.run() + + # Test the various opstrings + cmdline = "fslmaths a.nii {} b.nii" + for val in [0, 0., -1, -1.5, -0.5, 0.5, 3, 400, 400.5]: + thresh.inputs.thresh = val + assert thresh.cmdline == cmdline.format("-thr {:.10f}".format(val)) + + val = "{:.10f}".format(42) + thresh = fsl.Threshold( + in_file="a.nii", out_file="b.nii", thresh=42, use_robust_range=True) + assert thresh.cmdline == cmdline.format("-thrp " + val) + thresh.inputs.use_nonzero_voxels = True + assert thresh.cmdline == cmdline.format("-thrP " + val) + thresh = fsl.Threshold( + in_file="a.nii", out_file="b.nii", thresh=42, direction="above") + assert thresh.cmdline == cmdline.format("-uthr " + val) + thresh.inputs.use_robust_range = True + assert thresh.cmdline == cmdline.format("-uthrp " + val) + thresh.inputs.use_nonzero_voxels = True + assert thresh.cmdline == cmdline.format("-uthrP " + val) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_meanimage(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + meaner = fsl.MeanImage(in_file="a.nii", out_file="b.nii") + + # Test the underlying command + assert meaner.cmd == "fslmaths" + + # Test the defualt opstring + assert meaner.cmdline == "fslmaths a.nii -Tmean b.nii" + + # Test the other dimensions + cmdline = "fslmaths a.nii -{}mean b.nii" + for dim in ["X", "Y", "Z", "T"]: + meaner.inputs.dimension = dim + assert meaner.cmdline == cmdline.format(dim) + + # Test the auto naming + meaner = fsl.MeanImage(in_file="a.nii") + assert meaner.cmdline == "fslmaths a.nii -Tmean {}".format( + os.path.join(testdir, "a_mean{}".format(out_ext))) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_stdimage(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + stder = fsl.StdImage(in_file="a.nii", out_file="b.nii") + + # Test the underlying command + assert stder.cmd == "fslmaths" + + # Test the defualt opstring + assert stder.cmdline == "fslmaths a.nii -Tstd b.nii" + + # Test the other dimensions + cmdline = "fslmaths a.nii -{}std b.nii" + for dim in ["X", "Y", "Z", "T"]: + stder.inputs.dimension = dim + assert stder.cmdline == cmdline.format(dim) + + # Test the auto naming + stder = fsl.StdImage(in_file="a.nii", output_type='NIFTI') + assert stder.cmdline == "fslmaths a.nii -Tstd {}".format( + os.path.join(testdir, "a_std.nii")) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_maximage(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + maxer = fsl.MaxImage(in_file="a.nii", out_file="b.nii") + + # Test the underlying command + assert maxer.cmd == "fslmaths" + + # Test the defualt opstring + assert maxer.cmdline == "fslmaths a.nii -Tmax b.nii" + + # Test the other dimensions + cmdline = "fslmaths a.nii -{}max b.nii" + for dim in ["X", "Y", "Z", "T"]: + maxer.inputs.dimension = dim + assert maxer.cmdline == cmdline.format(dim) + + # Test the auto naming + maxer = fsl.MaxImage(in_file="a.nii") + assert maxer.cmdline == "fslmaths a.nii -Tmax {}".format( + os.path.join(testdir, "a_max{}".format(out_ext))) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_smooth(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + smoother = fsl.IsotropicSmooth(in_file="a.nii", out_file="b.nii") + + # Test the underlying command + assert smoother.cmd == "fslmaths" + + # Test that smoothing kernel is mandatory + with pytest.raises(ValueError): + smoother.run() + + # Test smoothing kernels + cmdline = "fslmaths a.nii -s {:.5f} b.nii" + for val in [0, 1., 1, 25, 0.5, 8 / 3.]: + smoother = fsl.IsotropicSmooth( + in_file="a.nii", out_file="b.nii", sigma=val) + assert smoother.cmdline == cmdline.format(val) + smoother = fsl.IsotropicSmooth( + in_file="a.nii", out_file="b.nii", fwhm=val) + val = float(val) / np.sqrt(8 * np.log(2)) + assert smoother.cmdline == cmdline.format(val) + + # Test automatic naming + smoother = fsl.IsotropicSmooth(in_file="a.nii", sigma=5) + assert smoother.cmdline == "fslmaths a.nii -s {:.5f} {}".format( + 5, os.path.join(testdir, "a_smooth{}".format(out_ext))) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_mask(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + masker = fsl.ApplyMask(in_file="a.nii", out_file="c.nii") + + # Test the underlying command + assert masker.cmd == "fslmaths" + + # Test that the mask image is mandatory + with pytest.raises(ValueError): + masker.run() + + # Test setting the mask image + masker.inputs.mask_file = "b.nii" + assert masker.cmdline == "fslmaths a.nii -mas b.nii c.nii" + + # Test auto name generation + masker = fsl.ApplyMask(in_file="a.nii", mask_file="b.nii") + assert masker.cmdline == "fslmaths a.nii -mas b.nii " + os.path.join( + testdir, "a_masked{}".format(out_ext)) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_dilation(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + diller = fsl.DilateImage(in_file="a.nii", out_file="b.nii") + + # Test the underlying command + assert diller.cmd == "fslmaths" + + # Test that the dilation operation is mandatory + with pytest.raises(ValueError): + diller.run() + + # Test the different dilation operations + for op in ["mean", "modal", "max"]: + cv = dict(mean="M", modal="D", max="F") + diller.inputs.operation = op + assert diller.cmdline == "fslmaths a.nii -dil{} b.nii".format(cv[op]) + + # Now test the different kernel options + for k in ["3D", "2D", "box", "boxv", "gauss", "sphere"]: + for size in [1, 1.5, 5]: + diller.inputs.kernel_shape = k + diller.inputs.kernel_size = size + assert diller.cmdline == "fslmaths a.nii -kernel {} {:.4f} -dilF b.nii".format( + k, size) + + # Test that we can use a file kernel + f = open("kernel.txt", "w").close() + del f # Shut pyflakes up + diller.inputs.kernel_shape = "file" + diller.inputs.kernel_size = Undefined + diller.inputs.kernel_file = "kernel.txt" + assert diller.cmdline == "fslmaths a.nii -kernel file kernel.txt -dilF b.nii" + + # Test that we don't need to request an out name + dil = fsl.DilateImage(in_file="a.nii", operation="max") + assert dil.cmdline == "fslmaths a.nii -dilF {}".format( + os.path.join(testdir, "a_dil{}".format(out_ext))) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_erosion(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + erode = fsl.ErodeImage(in_file="a.nii", out_file="b.nii") + + # Test the underlying command + assert erode.cmd == "fslmaths" + + # Test the basic command line + assert erode.cmdline == "fslmaths a.nii -ero b.nii" + + # Test that something else happens when you minimum filter + erode.inputs.minimum_filter = True + assert erode.cmdline == "fslmaths a.nii -eroF b.nii" + + # Test that we don't need to request an out name + erode = fsl.ErodeImage(in_file="a.nii") + assert erode.cmdline == "fslmaths a.nii -ero {}".format( + os.path.join(testdir, "a_ero{}".format(out_ext))) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_spatial_filter(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + filter = fsl.SpatialFilter(in_file="a.nii", out_file="b.nii") + + # Test the underlying command + assert filter.cmd == "fslmaths" + + # Test that it fails without an operation + with pytest.raises(ValueError): + filter.run() + + # Test the different operations + for op in ["mean", "meanu", "median"]: + filter.inputs.operation = op + assert filter.cmdline == "fslmaths a.nii -f{} b.nii".format(op) + + # Test that we don't need to ask for an out name + filter = fsl.SpatialFilter(in_file="a.nii", operation="mean") + assert filter.cmdline == "fslmaths a.nii -fmean {}".format( + os.path.join(testdir, "a_filt{}".format(out_ext))) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_unarymaths(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + maths = fsl.UnaryMaths(in_file="a.nii", out_file="b.nii") + + # Test the underlying command + assert maths.cmd == "fslmaths" + + # Test that it fails without an operation + with pytest.raises(ValueError): + maths.run() + + # Test the different operations + ops = [ + "exp", "log", "sin", "cos", "sqr", "sqrt", "recip", "abs", "bin", + "index" + ] + for op in ops: + maths.inputs.operation = op + assert maths.cmdline == "fslmaths a.nii -{} b.nii".format(op) + + # Test that we don't need to ask for an out file + for op in ops: + maths = fsl.UnaryMaths(in_file="a.nii", operation=op) + assert maths.cmdline == "fslmaths a.nii -{} {}".format( + op, os.path.join(testdir, "a_{}{}".format(op, out_ext))) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_binarymaths(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + maths = fsl.BinaryMaths(in_file="a.nii", out_file="c.nii") + + # Test the underlying command + assert maths.cmd == "fslmaths" + + # Test that it fails without an operation an + with pytest.raises(ValueError): + maths.run() + + # Test the different operations + ops = ["add", "sub", "mul", "div", "rem", "min", "max"] + operands = ["b.nii", -2, -0.5, 0, .123456, np.pi, 500] + for op in ops: + for ent in operands: + maths = fsl.BinaryMaths( + in_file="a.nii", out_file="c.nii", operation=op) + if ent == "b.nii": + maths.inputs.operand_file = ent + assert maths.cmdline == "fslmaths a.nii -{} b.nii c.nii".format( + op) + else: + maths.inputs.operand_value = ent + assert maths.cmdline == "fslmaths a.nii -{} {:.8f} c.nii".format( + op, ent) + + # Test that we don't need to ask for an out file + for op in ops: + maths = fsl.BinaryMaths( + in_file="a.nii", operation=op, operand_file="b.nii") + assert maths.cmdline == "fslmaths a.nii -{} b.nii {}".format( + op, os.path.join(testdir, "a_maths{}".format(out_ext))) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_multimaths(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + maths = fsl.MultiImageMaths(in_file="a.nii", out_file="c.nii") + + # Test the underlying command + assert maths.cmd == "fslmaths" + + # Test that it fails without an operation an + with pytest.raises(ValueError): + maths.run() + + # Test a few operations + maths.inputs.operand_files = ["a.nii", "b.nii"] + opstrings = [ + "-add %s -div %s", "-max 1 -sub %s -min %s", "-mas %s -add %s" + ] + for ostr in opstrings: + maths.inputs.op_string = ostr + assert maths.cmdline == "fslmaths a.nii %s c.nii" % ostr % ("a.nii", + "b.nii") + + # Test that we don't need to ask for an out file + maths = fsl.MultiImageMaths( + in_file="a.nii", op_string="-add %s -mul 5", operand_files=["b.nii"]) + assert maths.cmdline == \ + "fslmaths a.nii -add b.nii -mul 5 %s" % os.path.join(testdir, "a_maths%s" % out_ext) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_tempfilt(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + + # Get the command + filt = fsl.TemporalFilter(in_file="a.nii", out_file="b.nii") + + # Test the underlying command + assert filt.cmd == "fslmaths" + + # Test that both filters are initialized off + assert filt.cmdline == "fslmaths a.nii -bptf -1.000000 -1.000000 b.nii" + + # Test some filters + windows = [(-1, -1), (0.1, 0.1), (-1, 20), (20, -1), (128, 248)] + for win in windows: + filt.inputs.highpass_sigma = win[0] + filt.inputs.lowpass_sigma = win[1] + assert filt.cmdline == "fslmaths a.nii -bptf {:.6f} {:.6f} b.nii".format( + win[0], win[1]) + + # Test that we don't need to ask for an out file + filt = fsl.TemporalFilter(in_file="a.nii", highpass_sigma=64) + assert filt.cmdline == \ + "fslmaths a.nii -bptf 64.000000 -1.000000 {}".format(os.path.join(testdir, "a_filt{}".format(out_ext))) diff --git a/nipype/interfaces/fsl/tests/test_model.py b/nipype/interfaces/fsl/tests/test_model.py new file mode 100644 index 0000000000..8b8d0b7b40 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_model.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from builtins import open + +import os + +import pytest +import nipype.interfaces.fsl.model as fsl +from nipype.interfaces.fsl import no_fsl + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_MultipleRegressDesign(tmpdir): + tmpdir.chdir() + foo = fsl.MultipleRegressDesign() + foo.inputs.regressors = dict( + voice_stenght=[1, 1, 1], age=[0.2, 0.4, 0.5], BMI=[1, -1, 2]) + con1 = ['voice_and_age', 'T', ['age', 'voice_stenght'], [0.5, 0.5]] + con2 = ['just_BMI', 'T', ['BMI'], [1]] + foo.inputs.contrasts = [con1, con2, ['con3', 'F', [con1, con2]]] + res = foo.run() + + for ii in ["mat", "con", "fts", "grp"]: + assert getattr(res.outputs, + "design_" + ii) == tmpdir.join('design.' + ii).strpath + + design_mat_expected_content = """/NumWaves 3 +/NumPoints 3 +/PPheights 3.000000e+00 5.000000e-01 1.000000e+00 + +/Matrix +1.000000e+00 2.000000e-01 1.000000e+00 +-1.000000e+00 4.000000e-01 1.000000e+00 +2.000000e+00 5.000000e-01 1.000000e+00 +""" + + design_con_expected_content = """/ContrastName1 voice_and_age +/ContrastName2 just_BMI +/NumWaves 3 +/NumContrasts 2 +/PPheights 1.000000e+00 1.000000e+00 +/RequiredEffect 100.000 100.000 + +/Matrix +0.000000e+00 5.000000e-01 5.000000e-01 +1.000000e+00 0.000000e+00 0.000000e+00 +""" + + design_fts_expected_content = """/NumWaves 2 +/NumContrasts 1 + +/Matrix +1 1 +""" + + design_grp_expected_content = """/NumWaves 1 +/NumPoints 3 + +/Matrix +1 +1 +1 +""" + for ii in ["mat", "con", "fts", "grp"]: + assert tmpdir.join('design.' + ii).read() == eval( + "design_" + ii + "_expected_content") diff --git a/nipype/interfaces/fsl/tests/test_preprocess.py b/nipype/interfaces/fsl/tests/test_preprocess.py new file mode 100644 index 0000000000..4b387201cf --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_preprocess.py @@ -0,0 +1,630 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +from builtins import str +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from builtins import open + +import os +from copy import deepcopy + +import pytest +import pdb +from nipype.utils.filemanip import split_filename, ensure_list +from .. import preprocess as fsl +from nipype.interfaces.fsl import Info +from nipype.interfaces.base import File, TraitError, Undefined, isdefined +from nipype.interfaces.fsl import no_fsl + + +def fsl_name(obj, fname): + """Create valid fsl name, including file extension for output type. + """ + ext = Info.output_type_to_ext(obj.inputs.output_type) + return fname + ext + + +@pytest.fixture() +def setup_infile(tmpdir): + ext = Info.output_type_to_ext(Info.output_type()) + tmp_infile = tmpdir.join('foo' + ext) + tmp_infile.open("w") + return (tmp_infile.strpath, tmpdir.strpath) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_bet(setup_infile): + tmp_infile, tp_dir = setup_infile + better = fsl.BET() + assert better.cmd == 'bet' + + # Test raising error with mandatory args absent + with pytest.raises(ValueError): + better.run() + + # Test generated outfile name + better.inputs.in_file = tmp_infile + outfile = fsl_name(better, 'foo_brain') + outpath = os.path.join(os.getcwd(), outfile) + realcmd = 'bet %s %s' % (tmp_infile, outpath) + assert better.cmdline == realcmd + # Test specified outfile name + outfile = fsl_name(better, '/newdata/bar') + better.inputs.out_file = outfile + realcmd = 'bet %s %s' % (tmp_infile, outfile) + assert better.cmdline == realcmd + + # infile foo.nii doesn't exist + def func(): + better.run(in_file='foo2.nii', out_file='bar.nii') + + with pytest.raises(TraitError): + func() + + # Our options and some test values for them + # Should parallel the opt_map structure in the class for clarity + opt_map = { + 'outline': ('-o', True), + 'mask': ('-m', True), + 'skull': ('-s', True), + 'no_output': ('-n', True), + 'frac': ('-f 0.40', 0.4), + 'vertical_gradient': ('-g 0.75', 0.75), + 'radius': ('-r 20', 20), + 'center': ('-c 54 75 80', [54, 75, 80]), + 'threshold': ('-t', True), + 'mesh': ('-e', True), + 'surfaces': ('-A', True) + # 'verbose': ('-v', True), + # 'flags': ('--i-made-this-up', '--i-made-this-up'), + } + # Currently we don't test -R, -S, -B, -Z, -F, -A or -A2 + + # test each of our arguments + better = fsl.BET() + outfile = fsl_name(better, 'foo_brain') + outpath = os.path.join(os.getcwd(), outfile) + for name, settings in list(opt_map.items()): + better = fsl.BET(**{name: settings[1]}) + # Add mandatory input + better.inputs.in_file = tmp_infile + realcmd = ' '.join([better.cmd, tmp_infile, outpath, settings[0]]) + assert better.cmdline == realcmd + + +# test fast + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_fast(setup_infile): + tmp_infile, tp_dir = setup_infile + faster = fsl.FAST() + faster.inputs.verbose = True + fasted = fsl.FAST(in_files=tmp_infile, verbose=True) + fasted2 = fsl.FAST(in_files=[tmp_infile, tmp_infile], verbose=True) + + assert faster.cmd == 'fast' + assert faster.inputs.verbose + assert faster.inputs.manual_seg == Undefined + assert faster.inputs != fasted.inputs + assert fasted.cmdline == 'fast -v -S 1 %s' % (tmp_infile) + assert fasted2.cmdline == 'fast -v -S 2 %s %s' % (tmp_infile, tmp_infile) + + faster = fsl.FAST() + faster.inputs.in_files = tmp_infile + assert faster.cmdline == 'fast -S 1 %s' % (tmp_infile) + faster.inputs.in_files = [tmp_infile, tmp_infile] + assert faster.cmdline == 'fast -S 2 %s %s' % (tmp_infile, tmp_infile) + + # Our options and some test values for them + # Should parallel the opt_map structure in the class for clarity + opt_map = { + 'number_classes': ('-n 4', 4), + 'bias_iters': ('-I 5', 5), + 'bias_lowpass': ('-l 15', 15), + 'img_type': ('-t 2', 2), + 'init_seg_smooth': ('-f 0.035', 0.035), + 'segments': ('-g', True), + 'init_transform': ('-a %s' % (tmp_infile), '%s' % (tmp_infile)), + 'other_priors': + ('-A %s %s %s' % (tmp_infile, tmp_infile, tmp_infile), + (['%s' % (tmp_infile), + '%s' % (tmp_infile), + '%s' % (tmp_infile)])), + 'no_pve': ('--nopve', True), + 'output_biasfield': ('-b', True), + 'output_biascorrected': ('-B', True), + 'no_bias': ('-N', True), + 'out_basename': ('-o fasted', 'fasted'), + 'use_priors': ('-P', True), + 'segment_iters': ('-W 14', 14), + 'mixel_smooth': ('-R 0.25', 0.25), + 'iters_afterbias': ('-O 3', 3), + 'hyper': ('-H 0.15', 0.15), + 'verbose': ('-v', True), + 'manual_seg': ('-s %s' % (tmp_infile), '%s' % (tmp_infile)), + 'probability_maps': ('-p', True), + } + + # test each of our arguments + for name, settings in list(opt_map.items()): + faster = fsl.FAST(in_files=tmp_infile, **{name: settings[1]}) + assert faster.cmdline == ' '.join( + [faster.cmd, settings[0], + "-S 1 %s" % tmp_infile]) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_fast_list_outputs(setup_infile, tmpdir): + ''' By default (no -o), FSL's fast command outputs files into the same + directory as the input files. If the flag -o is set, it outputs files into + the cwd ''' + + def _run_and_test(opts, output_base): + outputs = fsl.FAST(**opts)._list_outputs() + for output in outputs.values(): + if output: + for filename in ensure_list(output): + assert os.path.realpath(filename).startswith( + os.path.realpath(output_base)) + + # set up + tmp_infile, indir = setup_infile + cwd = tmpdir.mkdir("new") + cwd.chdir() + assert indir != cwd.strpath + out_basename = 'a_basename' + + # run and test + opts = {'in_files': tmp_infile} + input_path, input_filename, input_ext = split_filename(tmp_infile) + _run_and_test(opts, os.path.join(input_path, input_filename)) + + opts['out_basename'] = out_basename + _run_and_test(opts, os.path.join(cwd.strpath, out_basename)) + + +@pytest.fixture() +def setup_flirt(tmpdir): + ext = Info.output_type_to_ext(Info.output_type()) + infile = tmpdir.join("infile" + ext) + infile.open("w") + reffile = tmpdir.join("reffile" + ext) + reffile.open("w") + return (tmpdir, infile.strpath, reffile.strpath) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_flirt(setup_flirt): + # setup + tmpdir, infile, reffile = setup_flirt + + flirter = fsl.FLIRT() + assert flirter.cmd == 'flirt' + + flirter.inputs.bins = 256 + flirter.inputs.cost = 'mutualinfo' + + flirted = fsl.FLIRT( + in_file=infile, + reference=reffile, + out_file='outfile', + out_matrix_file='outmat.mat', + bins=256, + cost='mutualinfo') + + flirt_est = fsl.FLIRT( + in_file=infile, + reference=reffile, + out_matrix_file='outmat.mat', + bins=256, + cost='mutualinfo') + assert flirter.inputs != flirted.inputs + assert flirted.inputs != flirt_est.inputs + + assert flirter.inputs.bins == flirted.inputs.bins + assert flirter.inputs.cost == flirt_est.inputs.cost + realcmd = 'flirt -in %s -ref %s -out outfile -omat outmat.mat ' \ + '-bins 256 -cost mutualinfo' % (infile, reffile) + assert flirted.cmdline == realcmd + + flirter = fsl.FLIRT() + # infile not specified + with pytest.raises(ValueError): + flirter.cmdline + flirter.inputs.in_file = infile + # reference not specified + with pytest.raises(ValueError): + flirter.cmdline + flirter.inputs.reference = reffile + + # Generate outfile and outmatrix + pth, fname, ext = split_filename(infile) + outfile = fsl_name(flirter, '%s_flirt' % fname) + outmat = '%s_flirt.mat' % fname + realcmd = 'flirt -in %s -ref %s -out %s -omat %s' % (infile, reffile, + outfile, outmat) + assert flirter.cmdline == realcmd + + # test apply_xfm option + axfm = deepcopy(flirter) + axfm.inputs.apply_xfm = True + # in_matrix_file or uses_qform must be defined + with pytest.raises(RuntimeError): + axfm.cmdline + axfm2 = deepcopy(axfm) + # test uses_qform + axfm.inputs.uses_qform = True + assert axfm.cmdline == (realcmd + ' -applyxfm -usesqform') + # test in_matrix_file + axfm2.inputs.in_matrix_file = reffile + assert axfm2.cmdline == (realcmd + ' -applyxfm -init %s' % reffile) + + tmpfile = tmpdir.join("file4test.nii") + tmpfile.open("w") + # Loop over all inputs, set a reasonable value and make sure the + # cmdline is updated correctly. + for key, trait_spec in sorted(fsl.FLIRT.input_spec().traits().items()): + # Skip mandatory inputs and the trait methods + if key in ('trait_added', 'trait_modified', 'in_file', 'reference', + 'environ', 'output_type', 'out_file', 'out_matrix_file', + 'in_matrix_file', 'apply_xfm', + 'resource_monitor', 'out_log', + 'save_log'): + continue + param = None + value = None + if key == 'args': + param = '-v' + value = '-v' + elif isinstance(trait_spec.trait_type, File): + value = tmpfile.strpath + param = trait_spec.argstr % value + elif trait_spec.default is False: + param = trait_spec.argstr + value = True + elif key in ('searchr_x', 'searchr_y', 'searchr_z'): + value = [-45, 45] + param = trait_spec.argstr % ' '.join(str(elt) for elt in value) + else: + value = trait_spec.default + param = trait_spec.argstr % value + cmdline = 'flirt -in %s -ref %s' % (infile, reffile) + # Handle autogeneration of outfile + pth, fname, ext = split_filename(infile) + outfile = fsl_name(fsl.FLIRT(), '%s_flirt' % fname) + outfile = ' '.join(['-out', outfile]) + # Handle autogeneration of outmatrix + outmatrix = '%s_flirt.mat' % fname + outmatrix = ' '.join(['-omat', outmatrix]) + # Build command line + cmdline = ' '.join([cmdline, outfile, outmatrix, param]) + flirter = fsl.FLIRT(in_file=infile, reference=reffile) + setattr(flirter.inputs, key, value) + assert flirter.cmdline == cmdline + + # Test OutputSpec + flirter = fsl.FLIRT(in_file=infile, reference=reffile) + pth, fname, ext = split_filename(infile) + flirter.inputs.out_file = ''.join(['foo', ext]) + flirter.inputs.out_matrix_file = ''.join(['bar', ext]) + outs = flirter._list_outputs() + assert outs['out_file'] == \ + os.path.join(os.getcwd(), flirter.inputs.out_file) + assert outs['out_matrix_file'] == \ + os.path.join(os.getcwd(), flirter.inputs.out_matrix_file) + assert not isdefined(flirter.inputs.out_log) + + +# Mcflirt +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_mcflirt(setup_flirt): + tmpdir, infile, reffile = setup_flirt + + frt = fsl.MCFLIRT() + assert frt.cmd == 'mcflirt' + # Test generated outfile name + + frt.inputs.in_file = infile + _, nme = os.path.split(infile) + outfile = os.path.join(os.getcwd(), nme) + outfile = frt._gen_fname(outfile, suffix='_mcf') + realcmd = 'mcflirt -in ' + infile + ' -out ' + outfile + assert frt.cmdline == realcmd + # Test specified outfile name + outfile2 = '/newdata/bar.nii' + frt.inputs.out_file = outfile2 + realcmd = 'mcflirt -in ' + infile + ' -out ' + outfile2 + assert frt.cmdline == realcmd + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_mcflirt_opt(setup_flirt): + tmpdir, infile, reffile = setup_flirt + _, nme = os.path.split(infile) + + opt_map = { + 'cost': ('-cost mutualinfo', 'mutualinfo'), + 'bins': ('-bins 256', 256), + 'dof': ('-dof 6', 6), + 'ref_vol': ('-refvol 2', 2), + 'scaling': ('-scaling 6.00', 6.00), + 'smooth': ('-smooth 1.00', 1.00), + 'rotation': ('-rotation 2', 2), + 'stages': ('-stages 3', 3), + 'init': ('-init %s' % (infile), infile), + 'use_gradient': ('-gdt', True), + 'use_contour': ('-edge', True), + 'mean_vol': ('-meanvol', True), + 'stats_imgs': ('-stats', True), + 'save_mats': ('-mats', True), + 'save_plots': ('-plots', True), + } + + for name, settings in list(opt_map.items()): + fnt = fsl.MCFLIRT(in_file=infile, **{name: settings[1]}) + outfile = os.path.join(os.getcwd(), nme) + outfile = fnt._gen_fname(outfile, suffix='_mcf') + + instr = '-in %s' % (infile) + outstr = '-out %s' % (outfile) + if name in ('init', 'cost', 'dof', 'mean_vol', 'bins'): + assert fnt.cmdline == ' '.join( + [fnt.cmd, instr, settings[0], outstr]) + else: + assert fnt.cmdline == ' '.join( + [fnt.cmd, instr, outstr, settings[0]]) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_mcflirt_noinput(): + # Test error is raised when missing required args + fnt = fsl.MCFLIRT() + with pytest.raises(ValueError) as excinfo: + fnt.run() + assert str(excinfo.value).startswith( + "MCFLIRT requires a value for input 'in_file'") + + +# test fnirt + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_fnirt(setup_flirt): + + tmpdir, infile, reffile = setup_flirt + tmpdir.chdir() + fnirt = fsl.FNIRT() + assert fnirt.cmd == 'fnirt' + + # Test list parameters + params = [('subsampling_scheme', '--subsamp', [4, 2, 2, 1], + '4,2,2,1'), ('max_nonlin_iter', '--miter', [4, 4, 4, 2], + '4,4,4,2'), ('ref_fwhm', '--reffwhm', [4, 2, 2, 0], + '4,2,2,0'), ('in_fwhm', '--infwhm', + [4, 2, 2, 0], '4,2,2,0'), + ('apply_refmask', '--applyrefmask', [0, 0, 1, 1], + '0,0,1,1'), ('apply_inmask', '--applyinmask', [0, 0, 0, 1], + '0,0,0,1'), ('regularization_lambda', '--lambda', + [0.5, 0.75], '0.5,0.75'), + ('intensity_mapping_model', '--intmod', 'global_non_linear', + 'global_non_linear')] + for item, flag, val, strval in params: + fnirt = fsl.FNIRT(in_file=infile, ref_file=reffile, **{item: val}) + log = fnirt._gen_fname(infile, suffix='_log.txt', change_ext=False) + iout = fnirt._gen_fname(infile, suffix='_warped') + if item in ('max_nonlin_iter'): + cmd = 'fnirt --in=%s '\ + '--logout=%s'\ + ' %s=%s --ref=%s'\ + ' --iout=%s' % (infile, log, + flag, strval, reffile, iout) + elif item in ('in_fwhm', 'intensity_mapping_model'): + cmd = 'fnirt --in=%s %s=%s --logout=%s '\ + '--ref=%s --iout=%s' % (infile, flag, + strval, log, reffile, iout) + elif item.startswith('apply'): + cmd = 'fnirt %s=%s '\ + '--in=%s '\ + '--logout=%s '\ + '--ref=%s --iout=%s' % (flag, strval, + infile, log, + reffile, + iout) + + else: + cmd = 'fnirt '\ + '--in=%s --logout=%s '\ + '--ref=%s %s=%s --iout=%s' % (infile, log, + reffile, + flag, strval, + iout) + assert fnirt.cmdline == cmd + + # Test ValueError is raised when missing mandatory args + fnirt = fsl.FNIRT() + with pytest.raises(ValueError): + fnirt.run() + fnirt.inputs.in_file = infile + fnirt.inputs.ref_file = reffile + intmap_basename = '%s_intmap' % fsl.FNIRT.intensitymap_file_basename( + infile) + intmap_image = fsl_name(fnirt, intmap_basename) + intmap_txt = '%s.txt' % intmap_basename + # doing this to create the file to pass tests for file existence + with open(intmap_image, 'w'): + pass + with open(intmap_txt, 'w'): + pass + + # test files + opt_map = [('affine_file', '--aff=%s' % infile, + infile), ('inwarp_file', '--inwarp=%s' % infile, infile), + ('in_intensitymap_file', '--intin=%s' % intmap_basename, + [intmap_image]), ('in_intensitymap_file', + '--intin=%s' % intmap_basename, + [intmap_image, intmap_txt]), + ('config_file', '--config=%s' % infile, + infile), ('refmask_file', '--refmask=%s' % infile, + infile), ('inmask_file', '--inmask=%s' % infile, + infile), ('field_file', + '--fout=%s' % infile, infile), + ('jacobian_file', '--jout=%s' % infile, + infile), ('modulatedref_file', '--refout=%s' % infile, + infile), ('out_intensitymap_file', + '--intout=%s' % intmap_basename, True), + ('out_intensitymap_file', '--intout=%s' % intmap_basename, + intmap_image), ('fieldcoeff_file', '--cout=%s' % infile, + infile), ('log_file', '--logout=%s' % infile, + infile)] + + for (name, settings, arg) in opt_map: + fnirt = fsl.FNIRT(in_file=infile, ref_file=reffile, **{name: arg}) + + if name in ('config_file', 'affine_file', 'field_file', + 'fieldcoeff_file'): + cmd = 'fnirt %s --in=%s '\ + '--logout=%s '\ + '--ref=%s --iout=%s' % (settings, infile, log, + reffile, iout) + elif name in ('refmask_file'): + cmd = 'fnirt --in=%s '\ + '--logout=%s --ref=%s '\ + '%s '\ + '--iout=%s' % (infile, log, + reffile, + settings, + iout) + elif name in ('in_intensitymap_file', 'inwarp_file', 'inmask_file', + 'jacobian_file'): + cmd = 'fnirt --in=%s '\ + '%s '\ + '--logout=%s --ref=%s '\ + '--iout=%s' % (infile, + settings, + log, + reffile, + iout) + elif name in ('log_file'): + cmd = 'fnirt --in=%s '\ + '%s --ref=%s '\ + '--iout=%s' % (infile, + settings, + reffile, + iout) + else: + cmd = 'fnirt --in=%s '\ + '--logout=%s %s '\ + '--ref=%s --iout=%s' % (infile, log, + settings, + reffile, iout) + + assert fnirt.cmdline == cmd + + if name == 'out_intensitymap_file': + assert fnirt._list_outputs()['out_intensitymap_file'] == [ + intmap_image, intmap_txt + ] + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_applywarp(setup_flirt): + tmpdir, infile, reffile = setup_flirt + opt_map = { + 'out_file': ('--out=bar.nii', 'bar.nii'), + 'premat': ('--premat=%s' % (reffile), reffile), + 'postmat': ('--postmat=%s' % (reffile), reffile), + } + + # in_file, ref_file, field_file mandatory + for name, settings in list(opt_map.items()): + awarp = fsl.ApplyWarp( + in_file=infile, + ref_file=reffile, + field_file=reffile, + **{ + name: settings[1] + }) + if name == 'out_file': + realcmd = 'applywarp --in=%s '\ + '--ref=%s --out=%s '\ + '--warp=%s' % (infile, reffile, + settings[1], reffile) + else: + outfile = awarp._gen_fname(infile, suffix='_warp') + realcmd = 'applywarp --in=%s '\ + '--ref=%s --out=%s '\ + '--warp=%s %s' % (infile, reffile, + outfile, reffile, + settings[0]) + assert awarp.cmdline == realcmd + + +@pytest.fixture() +def setup_fugue(tmpdir): + import nibabel as nb + import numpy as np + import os.path as op + + d = np.ones((80, 80, 80)) + infile = tmpdir.join('dumbfile.nii.gz').strpath + nb.Nifti1Image(d, None, None).to_filename(infile) + + return (tmpdir, infile) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +@pytest.mark.parametrize("attr, out_file", [({ + "save_unmasked_fmap": True, + "fmap_in_file": "infile", + "mask_file": "infile", + "output_type": "NIFTI_GZ" +}, 'fmap_out_file'), ({ + "save_unmasked_shift": True, + "fmap_in_file": "infile", + "dwell_time": 1.e-3, + "mask_file": "infile", + "output_type": "NIFTI_GZ" +}, "shift_out_file"), ({ + "in_file": "infile", + "mask_file": "infile", + "shift_in_file": "infile", + "output_type": "NIFTI_GZ" +}, 'unwarped_file')]) +def test_fugue(setup_fugue, attr, out_file): + import os.path as op + tmpdir, infile = setup_fugue + + fugue = fsl.FUGUE() + for key, value in attr.items(): + if value == "infile": + setattr(fugue.inputs, key, infile) + else: + setattr(fugue.inputs, key, value) + res = fugue.run() + + assert isdefined(getattr(res.outputs, out_file)) + trait_spec = fugue.inputs.trait(out_file) + out_name = trait_spec.name_template % 'dumbfile' + out_name += '.nii.gz' + assert op.basename(getattr(res.outputs, out_file)) == out_name + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_first_genfname(): + first = fsl.FIRST() + first.inputs.out_file = 'segment.nii' + first.inputs.output_type = "NIFTI_GZ" + + value = first._gen_fname(basename='original_segmentations') + expected_value = os.path.abspath('segment_all_fast_origsegs.nii.gz') + assert value == expected_value + first.inputs.method = 'none' + value = first._gen_fname(basename='original_segmentations') + expected_value = os.path.abspath('segment_all_none_origsegs.nii.gz') + assert value == expected_value + first.inputs.method = 'auto' + first.inputs.list_of_specific_structures = ['L_Hipp', 'R_Hipp'] + value = first._gen_fname(basename='original_segmentations') + expected_value = os.path.abspath('segment_all_none_origsegs.nii.gz') + assert value == expected_value diff --git a/nipype/interfaces/fsl/tests/test_utils.py b/nipype/interfaces/fsl/tests/test_utils.py new file mode 100644 index 0000000000..5df6d88a49 --- /dev/null +++ b/nipype/interfaces/fsl/tests/test_utils.py @@ -0,0 +1,327 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import os + +import numpy as np + +import nibabel as nb +import pytest +import nipype.interfaces.fsl.utils as fsl +from nipype.interfaces.fsl import no_fsl, Info + +from nipype.testing.fixtures import create_files_in_directory_plus_output_type + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_fslroi(create_files_in_directory_plus_output_type): + filelist, outdir, _ = create_files_in_directory_plus_output_type + + roi = fsl.ExtractROI() + + # make sure command gets called + assert roi.cmd == 'fslroi' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + roi.run() + + # .inputs based parameters setting + roi.inputs.in_file = filelist[0] + roi.inputs.roi_file = 'foo_roi.nii' + roi.inputs.t_min = 10 + roi.inputs.t_size = 20 + assert roi.cmdline == 'fslroi %s foo_roi.nii 10 20' % filelist[0] + + # .run based parameter setting + roi2 = fsl.ExtractROI( + in_file=filelist[0], + roi_file='foo2_roi.nii', + t_min=20, + t_size=40, + x_min=3, + x_size=30, + y_min=40, + y_size=10, + z_min=5, + z_size=20) + assert roi2.cmdline == \ + 'fslroi %s foo2_roi.nii 3 30 40 10 5 20 20 40' % filelist[0] + + # test arguments for opt_map + # Fslroi class doesn't have a filled opt_map{} + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_fslmerge(create_files_in_directory_plus_output_type): + filelist, outdir, _ = create_files_in_directory_plus_output_type + + merger = fsl.Merge() + + # make sure command gets called + assert merger.cmd == 'fslmerge' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + merger.run() + + # .inputs based parameters setting + merger.inputs.in_files = filelist + merger.inputs.merged_file = 'foo_merged.nii' + merger.inputs.dimension = 't' + merger.inputs.output_type = 'NIFTI' + assert merger.cmdline == 'fslmerge -t foo_merged.nii %s' % ' '.join( + filelist) + + # verify that providing a tr value updates the dimension to tr + merger.inputs.tr = 2.25 + assert merger.cmdline == 'fslmerge -tr foo_merged.nii %s %.2f' % ( + ' '.join(filelist), 2.25) + + # .run based parameter setting + merger2 = fsl.Merge( + in_files=filelist, + merged_file='foo_merged.nii', + dimension='t', + output_type='NIFTI', + tr=2.25) + + assert merger2.cmdline == \ + 'fslmerge -tr foo_merged.nii %s %.2f' % (' '.join(filelist), 2.25) + + # test arguments for opt_map + # Fslmerge class doesn't have a filled opt_map{} + + +# test fslmath + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_fslmaths(create_files_in_directory_plus_output_type): + filelist, outdir, _ = create_files_in_directory_plus_output_type + math = fsl.ImageMaths() + + # make sure command gets called + assert math.cmd == 'fslmaths' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + math.run() + + # .inputs based parameters setting + math.inputs.in_file = filelist[0] + math.inputs.op_string = '-add 2.5 -mul input_volume2' + math.inputs.out_file = 'foo_math.nii' + assert math.cmdline == \ + 'fslmaths %s -add 2.5 -mul input_volume2 foo_math.nii' % filelist[0] + + # .run based parameter setting + math2 = fsl.ImageMaths( + in_file=filelist[0], op_string='-add 2.5', out_file='foo2_math.nii') + assert math2.cmdline == 'fslmaths %s -add 2.5 foo2_math.nii' % filelist[0] + + # test arguments for opt_map + # Fslmath class doesn't have opt_map{} + + +# test overlay + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_overlay(create_files_in_directory_plus_output_type): + filelist, outdir, _ = create_files_in_directory_plus_output_type + overlay = fsl.Overlay() + + # make sure command gets called + assert overlay.cmd == 'overlay' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + overlay.run() + + # .inputs based parameters setting + overlay.inputs.stat_image = filelist[0] + overlay.inputs.stat_thresh = (2.5, 10) + overlay.inputs.background_image = filelist[1] + overlay.inputs.auto_thresh_bg = True + overlay.inputs.show_negative_stats = True + overlay.inputs.out_file = 'foo_overlay.nii' + assert overlay.cmdline == \ + 'overlay 1 0 %s -a %s 2.50 10.00 %s -2.50 -10.00 foo_overlay.nii' % ( + filelist[1], filelist[0], filelist[0]) + + # .run based parameter setting + overlay2 = fsl.Overlay( + stat_image=filelist[0], + stat_thresh=(2.5, 10), + background_image=filelist[1], + auto_thresh_bg=True, + out_file='foo2_overlay.nii') + assert overlay2.cmdline == 'overlay 1 0 %s -a %s 2.50 10.00 foo2_overlay.nii' % ( + filelist[1], filelist[0]) + + +# test slicer + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_slicer(create_files_in_directory_plus_output_type): + filelist, outdir, _ = create_files_in_directory_plus_output_type + slicer = fsl.Slicer() + + # make sure command gets called + assert slicer.cmd == 'slicer' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + slicer.run() + + # .inputs based parameters setting + slicer.inputs.in_file = filelist[0] + slicer.inputs.image_edges = filelist[1] + slicer.inputs.intensity_range = (10., 20.) + slicer.inputs.all_axial = True + slicer.inputs.image_width = 750 + slicer.inputs.out_file = 'foo_bar.png' + assert slicer.cmdline == \ + 'slicer %s %s -L -i 10.000 20.000 -A 750 foo_bar.png' % ( + filelist[0], filelist[1]) + + # .run based parameter setting + slicer2 = fsl.Slicer( + in_file=filelist[0], + middle_slices=True, + label_slices=False, + out_file='foo_bar2.png') + assert slicer2.cmdline == 'slicer %s -a foo_bar2.png' % (filelist[0]) + + +def create_parfiles(): + np.savetxt('a.par', np.random.rand(6, 3)) + np.savetxt('b.par', np.random.rand(6, 3)) + return ['a.par', 'b.par'] + + +# test fsl_tsplot + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_plottimeseries(create_files_in_directory_plus_output_type): + filelist, outdir, _ = create_files_in_directory_plus_output_type + parfiles = create_parfiles() + plotter = fsl.PlotTimeSeries() + + # make sure command gets called + assert plotter.cmd == 'fsl_tsplot' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + plotter.run() + + # .inputs based parameters setting + plotter.inputs.in_file = parfiles[0] + plotter.inputs.labels = ['x', 'y', 'z'] + plotter.inputs.y_range = (0, 1) + plotter.inputs.title = 'test plot' + plotter.inputs.out_file = 'foo.png' + assert plotter.cmdline == \ + ('fsl_tsplot -i %s -a x,y,z -o foo.png -t \'test plot\' -u 1 --ymin=0 --ymax=1' + % parfiles[0]) + + # .run based parameter setting + plotter2 = fsl.PlotTimeSeries( + in_file=parfiles, + title='test2 plot', + plot_range=(2, 5), + out_file='bar.png') + assert plotter2.cmdline == \ + 'fsl_tsplot -i %s,%s -o bar.png --start=2 --finish=5 -t \'test2 plot\' -u 1' % tuple( + parfiles) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_plotmotionparams(create_files_in_directory_plus_output_type): + filelist, outdir, _ = create_files_in_directory_plus_output_type + parfiles = create_parfiles() + plotter = fsl.PlotMotionParams() + + # make sure command gets called + assert plotter.cmd == 'fsl_tsplot' + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + plotter.run() + + # .inputs based parameters setting + plotter.inputs.in_file = parfiles[0] + plotter.inputs.in_source = 'fsl' + plotter.inputs.plot_type = 'rotations' + plotter.inputs.out_file = 'foo.png' + assert plotter.cmdline == \ + ('fsl_tsplot -i %s -o foo.png -t \'MCFLIRT estimated rotations (radians)\' ' + '--start=1 --finish=3 -a x,y,z' % parfiles[0]) + + # .run based parameter setting + plotter2 = fsl.PlotMotionParams( + in_file=parfiles[1], + in_source='spm', + plot_type='translations', + out_file='bar.png') + assert plotter2.cmdline == \ + ('fsl_tsplot -i %s -o bar.png -t \'Realign estimated translations (mm)\' ' + '--start=1 --finish=3 -a x,y,z' % parfiles[1]) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_convertxfm(create_files_in_directory_plus_output_type): + filelist, outdir, _ = create_files_in_directory_plus_output_type + cvt = fsl.ConvertXFM() + + # make sure command gets called + assert cvt.cmd == "convert_xfm" + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + cvt.run() + + # .inputs based parameters setting + cvt.inputs.in_file = filelist[0] + cvt.inputs.invert_xfm = True + cvt.inputs.out_file = "foo.mat" + assert cvt.cmdline == 'convert_xfm -omat foo.mat -inverse %s' % filelist[0] + + # constructor based parameter setting + cvt2 = fsl.ConvertXFM( + in_file=filelist[0], + in_file2=filelist[1], + concat_xfm=True, + out_file="bar.mat") + assert cvt2.cmdline == \ + "convert_xfm -omat bar.mat -concat %s %s" % (filelist[1], filelist[0]) + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +def test_swapdims(create_files_in_directory_plus_output_type): + files, testdir, out_ext = create_files_in_directory_plus_output_type + swap = fsl.SwapDimensions() + + # Test the underlying command + assert swap.cmd == "fslswapdim" + + # Test mandatory args + args = [dict(in_file=files[0]), dict(new_dims=("x", "y", "z"))] + for arg in args: + wontrun = fsl.SwapDimensions(**arg) + with pytest.raises(ValueError): + wontrun.run() + + # Now test a basic command line + swap.inputs.in_file = files[0] + swap.inputs.new_dims = ("x", "y", "z") + assert swap.cmdline == "fslswapdim a.nii x y z %s" % os.path.realpath( + os.path.join(testdir, "a_newdims%s" % out_ext)) + + # Test that we can set an output name + swap.inputs.out_file = "b.nii" + assert swap.cmdline == "fslswapdim a.nii x y z b.nii" diff --git a/nipype/interfaces/fsl/utils.py b/nipype/interfaces/fsl/utils.py new file mode 100644 index 0000000000..e237124bb3 --- /dev/null +++ b/nipype/interfaces/fsl/utils.py @@ -0,0 +1,2705 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The fsl module provides classes for interfacing with the `FSL +`_ command line tools. This +was written to work with FSL version 4.1.4. + +Examples +-------- +See the docstrings of the individual classes for examples. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import map, range + +import os +import os.path as op +import re +from glob import glob +import tempfile + +import numpy as np + +from ...utils.filemanip import (load_json, save_json, split_filename, + fname_presuffix) +from ..base import (traits, TraitedSpec, OutputMultiPath, File, CommandLine, + CommandLineInputSpec, isdefined) +from .base import FSLCommand, FSLCommandInputSpec, Info + + +class CopyGeomInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + mandatory=True, + argstr="%s", + position=0, + desc="source image") + dest_file = File( + exists=True, + mandatory=True, + argstr="%s", + position=1, + desc="destination image", + copyfile=True, + output_name='out_file', + name_source='dest_file', + name_template='%s') + ignore_dims = traits.Bool( + desc='Do not copy image dimensions', argstr='-d', position="-1") + + +class CopyGeomOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="image with new geometry header") + + +class CopyGeom(FSLCommand): + """Use fslcpgeom to copy the header geometry information to another image. + Copy certain parts of the header information (image dimensions, voxel + dimensions, voxel dimensions units string, image orientation/origin or + qform/sform info) from one image to another. Note that only copies from + Analyze to Analyze or Nifti to Nifti will work properly. Copying from + different files will result in loss of information or potentially incorrect + settings. + """ + _cmd = "fslcpgeom" + input_spec = CopyGeomInputSpec + output_spec = CopyGeomOutputSpec + + +class RobustFOVInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + desc='input filename', + argstr='-i %s', + position=0, + mandatory=True) + out_roi = File( + desc="ROI volume output name", + argstr="-r %s", + name_source=['in_file'], + hash_files=False, + name_template='%s_ROI') + brainsize = traits.Int( + desc=('size of brain in z-dimension (default ' + '170mm/150mm)'), + argstr='-b %d') + out_transform = File( + desc=("Transformation matrix in_file to out_roi " + "output name"), + argstr="-m %s", + name_source=['in_file'], + hash_files=False, + name_template='%s_to_ROI') + + +class RobustFOVOutputSpec(TraitedSpec): + out_roi = File(exists=True, desc="ROI volume output name") + out_transform = File( + exists=True, + desc=("Transformation matrix in_file to out_roi " + "output name")) + + +class RobustFOV(FSLCommand): + """Automatically crops an image removing lower head and neck. + + Interface is stable 5.0.0 to 5.0.9, but default brainsize changed from + 150mm to 170mm. + """ + + _cmd = 'robustfov' + input_spec = RobustFOVInputSpec + output_spec = RobustFOVOutputSpec + + +class ImageMeantsInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + desc='input file for computing the average timeseries', + argstr='-i %s', + position=0, + mandatory=True) + out_file = File( + desc='name of output text matrix', + argstr='-o %s', + genfile=True, + hash_files=False) + mask = File(exists=True, desc='input 3D mask', argstr='-m %s') + spatial_coord = traits.List( + traits.Int, + desc=(' requested spatial coordinate ' + '(instead of mask)'), + argstr='-c %s') + use_mm = traits.Bool( + desc=('use mm instead of voxel coordinates (for -c ' + 'option)'), + argstr='--usemm') + show_all = traits.Bool( + desc=('show all voxel time series (within mask) ' + 'instead of averaging'), + argstr='--showall') + eig = traits.Bool( + desc=('calculate Eigenvariate(s) instead of mean (output will have 0 ' + 'mean)'), + argstr='--eig') + order = traits.Int( + 1, + desc='select number of Eigenvariates', + argstr='--order=%d', + usedefault=True) + nobin = traits.Bool( + desc=('do not binarise the mask for calculation of ' + 'Eigenvariates'), + argstr='--no_bin') + transpose = traits.Bool( + desc=('output results in transpose format (one row per voxel/mean)'), + argstr='--transpose') + + +class ImageMeantsOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="path/name of output text matrix") + + +class ImageMeants(FSLCommand): + """ Use fslmeants for printing the average timeseries (intensities) to + the screen (or saves to a file). The average is taken over all voxels + in the mask (or all voxels in the image if no mask is specified) + + """ + _cmd = 'fslmeants' + input_spec = ImageMeantsInputSpec + output_spec = ImageMeantsOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_file + if not isdefined(outputs['out_file']): + outputs['out_file'] = self._gen_fname( + self.inputs.in_file, suffix='_ts', ext='.txt', change_ext=True) + outputs['out_file'] = os.path.abspath(outputs['out_file']) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()[name] + return None + + +class SmoothInputSpec(FSLCommandInputSpec): + in_file = File(exists=True, argstr="%s", position=0, mandatory=True) + sigma = traits.Float( + argstr="-kernel gauss %.03f -fmean", + position=1, + xor=['fwhm'], + mandatory=True, + desc='gaussian kernel sigma in mm (not voxels)') + fwhm = traits.Float( + argstr="-kernel gauss %.03f -fmean", + position=1, + xor=['sigma'], + mandatory=True, + desc=('gaussian kernel fwhm, will be converted to sigma in mm ' + '(not voxels)')) + smoothed_file = File( + argstr="%s", + position=2, + name_source=['in_file'], + name_template='%s_smooth', + hash_files=False) + + +class SmoothOutputSpec(TraitedSpec): + smoothed_file = File(exists=True) + + +class Smooth(FSLCommand): + """ + Use fslmaths to smooth the image + + Examples + -------- + + Setting the kernel width using sigma: + + >>> sm = Smooth() + >>> sm.inputs.output_type = 'NIFTI_GZ' + >>> sm.inputs.in_file = 'functional2.nii' + >>> sm.inputs.sigma = 8.0 + >>> sm.cmdline # doctest: +ELLIPSIS + 'fslmaths functional2.nii -kernel gauss 8.000 -fmean functional2_smooth.nii.gz' + + Setting the kernel width using fwhm: + + >>> sm = Smooth() + >>> sm.inputs.output_type = 'NIFTI_GZ' + >>> sm.inputs.in_file = 'functional2.nii' + >>> sm.inputs.fwhm = 8.0 + >>> sm.cmdline # doctest: +ELLIPSIS + 'fslmaths functional2.nii -kernel gauss 3.397 -fmean functional2_smooth.nii.gz' + + One of sigma or fwhm must be set: + + >>> from nipype.interfaces.fsl import Smooth + >>> sm = Smooth() + >>> sm.inputs.output_type = 'NIFTI_GZ' + >>> sm.inputs.in_file = 'functional2.nii' + >>> sm.cmdline #doctest: +ELLIPSIS + Traceback (most recent call last): + ... + ValueError: Smooth requires a value for one of the inputs ... + + """ + + input_spec = SmoothInputSpec + output_spec = SmoothOutputSpec + _cmd = 'fslmaths' + + def _format_arg(self, name, trait_spec, value): + if name == 'fwhm': + sigma = float(value) / np.sqrt(8 * np.log(2)) + return super(Smooth, self)._format_arg(name, trait_spec, sigma) + return super(Smooth, self)._format_arg(name, trait_spec, value) + + +class SliceInputSpec(FSLCommandInputSpec): + in_file = File(exists=True, argstr="%s", position=0, mandatory=True, + desc="input filename", copyfile=False) + out_base_name = traits.Str(argstr="%s", position=1, desc="outputs prefix") + + +class SliceOutputSpec(TraitedSpec): + out_files = OutputMultiPath(File(exists=True)) + + +class Slice(FSLCommand): + """Use fslslice to split a 3D file into lots of 2D files (along z-axis). + + + Examples + -------- + + >>> from nipype.interfaces.fsl import Slice + >>> slice = Slice() + >>> slice.inputs.in_file = 'functional.nii' + >>> slice.inputs.out_base_name = 'sl' + >>> slice.cmdline + 'fslslice functional.nii sl' + + + """ + + _cmd = 'fslslice' + input_spec = SliceInputSpec + output_spec = SliceOutputSpec + + def _list_outputs(self): + """Create a Bunch which contains all possible files generated + by running the interface. Some files are always generated, others + depending on which ``inputs`` options are set. + + Returns + ------- + + outputs : Bunch object + Bunch object containing all possible files generated by + interface object. + + If None, file was not generated + Else, contains path, filename of generated outputfile + + """ + outputs = self._outputs().get() + ext = Info.output_type_to_ext(self.inputs.output_type) + suffix = '_slice_*' + ext + if isdefined(self.inputs.out_base_name): + fname_template = os.path.abspath( + self.inputs.out_base_name + suffix) + else: + fname_template = fname_presuffix(self.inputs.in_file, + suffix=suffix, use_ext=False) + + outputs['out_files'] = sorted(glob(fname_template)) + + return outputs + + +class MergeInputSpec(FSLCommandInputSpec): + in_files = traits.List( + File(exists=True), argstr="%s", position=2, mandatory=True) + dimension = traits.Enum( + 't', + 'x', + 'y', + 'z', + 'a', + argstr="-%s", + position=0, + desc=("dimension along which to merge, optionally " + "set tr input when dimension is t"), + mandatory=True) + tr = traits.Float( + position=-1, + argstr='%.2f', + desc=('use to specify TR in seconds (default is 1.00 ' + 'sec), overrides dimension and sets it to tr')) + merged_file = File( + argstr="%s", + position=1, + name_source='in_files', + name_template='%s_merged', + hash_files=False) + + +class MergeOutputSpec(TraitedSpec): + merged_file = File(exists=True) + + +class Merge(FSLCommand): + """Use fslmerge to concatenate images + + Images can be concatenated across time, x, y, or z dimensions. Across the + time (t) dimension the TR is set by default to 1 sec. + + Note: to set the TR to a different value, specify 't' for dimension and + specify the TR value in seconds for the tr input. The dimension will be + automatically updated to 'tr'. + + Examples + -------- + + >>> from nipype.interfaces.fsl import Merge + >>> merger = Merge() + >>> merger.inputs.in_files = ['functional2.nii', 'functional3.nii'] + >>> merger.inputs.dimension = 't' + >>> merger.inputs.output_type = 'NIFTI_GZ' + >>> merger.cmdline + 'fslmerge -t functional2_merged.nii.gz functional2.nii functional3.nii' + >>> merger.inputs.tr = 2.25 + >>> merger.cmdline + 'fslmerge -tr functional2_merged.nii.gz functional2.nii functional3.nii 2.25' + + + """ + + _cmd = 'fslmerge' + input_spec = MergeInputSpec + output_spec = MergeOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'tr': + if self.inputs.dimension != 't': + raise ValueError('When TR is specified, dimension must be t') + return spec.argstr % value + if name == 'dimension': + if isdefined(self.inputs.tr): + return '-tr' + return spec.argstr % value + return super(Merge, self)._format_arg(name, spec, value) + + +class ExtractROIInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + argstr="%s", + position=0, + desc="input file", + mandatory=True) + roi_file = File( + argstr="%s", + position=1, + desc="output file", + genfile=True, + hash_files=False) + x_min = traits.Int(argstr="%d", position=2) + x_size = traits.Int(argstr="%d", position=3) + y_min = traits.Int(argstr="%d", position=4) + y_size = traits.Int(argstr="%d", position=5) + z_min = traits.Int(argstr="%d", position=6) + z_size = traits.Int(argstr="%d", position=7) + t_min = traits.Int(argstr="%d", position=8) + t_size = traits.Int(argstr="%d", position=9) + _crop_xor = [ + 'x_min', 'x_size', 'y_min', 'y_size', 'z_min', 'z_size', 't_min', + 't_size' + ] + crop_list = traits.List( + traits.Tuple(traits.Int, traits.Int), + argstr="%s", + position=2, + xor=_crop_xor, + desc="list of two tuples specifying crop options") + + +class ExtractROIOutputSpec(TraitedSpec): + roi_file = File(exists=True) + + +class ExtractROI(FSLCommand): + """Uses FSL Fslroi command to extract region of interest (ROI) + from an image. + + You can a) take a 3D ROI from a 3D data set (or if it is 4D, the + same ROI is taken from each time point and a new 4D data set is + created), b) extract just some time points from a 4D data set, or + c) control time and space limits to the ROI. Note that the + arguments are minimum index and size (not maximum index). So to + extract voxels 10 to 12 inclusive you would specify 10 and 3 (not + 10 and 12). + + + Examples + -------- + + >>> from nipype.interfaces.fsl import ExtractROI + >>> from nipype.testing import anatfile + >>> fslroi = ExtractROI(in_file=anatfile, roi_file='bar.nii', t_min=0, + ... t_size=1) + >>> fslroi.cmdline == 'fslroi %s bar.nii 0 1' % anatfile + True + + + """ + + _cmd = 'fslroi' + input_spec = ExtractROIInputSpec + output_spec = ExtractROIOutputSpec + + def _format_arg(self, name, spec, value): + + if name == "crop_list": + return " ".join(map(str, sum(list(map(list, value)), []))) + return super(ExtractROI, self)._format_arg(name, spec, value) + + def _list_outputs(self): + """Create a Bunch which contains all possible files generated + by running the interface. Some files are always generated, others + depending on which ``inputs`` options are set. + + + Returns + ------- + + outputs : Bunch object + Bunch object containing all possible files generated by + interface object. + + If None, file was not generated + Else, contains path, filename of generated outputfile + + """ + outputs = self._outputs().get() + outputs['roi_file'] = self.inputs.roi_file + if not isdefined(outputs['roi_file']): + outputs['roi_file'] = self._gen_fname( + self.inputs.in_file, suffix='_roi') + outputs['roi_file'] = os.path.abspath(outputs['roi_file']) + return outputs + + def _gen_filename(self, name): + if name == 'roi_file': + return self._list_outputs()[name] + return None + + +class SplitInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + argstr="%s", + position=0, + mandatory=True, + desc="input filename") + out_base_name = traits.Str(argstr="%s", position=1, desc="outputs prefix") + dimension = traits.Enum( + 't', + 'x', + 'y', + 'z', + argstr="-%s", + position=2, + mandatory=True, + desc="dimension along which the file will be split") + + +class SplitOutputSpec(TraitedSpec): + out_files = OutputMultiPath(File(exists=True)) + + +class Split(FSLCommand): + """Uses FSL Fslsplit command to separate a volume into images in + time, x, y or z dimension. + """ + _cmd = 'fslsplit' + input_spec = SplitInputSpec + output_spec = SplitOutputSpec + + def _list_outputs(self): + """Create a Bunch which contains all possible files generated + by running the interface. Some files are always generated, others + depending on which ``inputs`` options are set. + + Returns + ------- + + outputs : Bunch object + Bunch object containing all possible files generated by + interface object. + + If None, file was not generated + Else, contains path, filename of generated outputfile + + """ + outputs = self._outputs().get() + ext = Info.output_type_to_ext(self.inputs.output_type) + outbase = 'vol*' + if isdefined(self.inputs.out_base_name): + outbase = '%s*' % self.inputs.out_base_name + outputs['out_files'] = sorted( + glob(os.path.join(os.getcwd(), outbase + ext))) + return outputs + + +class ImageMathsInputSpec(FSLCommandInputSpec): + in_file = File(exists=True, argstr="%s", mandatory=True, position=1) + in_file2 = File(exists=True, argstr="%s", position=3) + mask_file = File(exists=True, argstr='-mas %s', + desc='use (following image>0) to mask current image') + out_file = File(argstr="%s", position=-2, genfile=True, hash_files=False) + op_string = traits.Str( + argstr="%s", + position=2, + desc="string defining the operation, i. e. -add") + suffix = traits.Str(desc="out_file suffix") + out_data_type = traits.Enum( + 'char', + 'short', + 'int', + 'float', + 'double', + 'input', + argstr="-odt %s", + position=-1, + desc=("output datatype, one of (char, short, " + "int, float, double, input)")) + + +class ImageMathsOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class ImageMaths(FSLCommand): + """Use FSL fslmaths command to allow mathematical manipulation of images + `FSL info `_ + + + Examples + -------- + + >>> from nipype.interfaces import fsl + >>> from nipype.testing import anatfile + >>> maths = fsl.ImageMaths(in_file=anatfile, op_string= '-add 5', + ... out_file='foo_maths.nii') + >>> maths.cmdline == 'fslmaths %s -add 5 foo_maths.nii' % anatfile + True + + + """ + input_spec = ImageMathsInputSpec + output_spec = ImageMathsOutputSpec + + _cmd = 'fslmaths' + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()[name] + return None + + def _parse_inputs(self, skip=None): + return super(ImageMaths, self)._parse_inputs(skip=['suffix']) + + def _list_outputs(self): + suffix = '_maths' # ohinds: build suffix + if isdefined(self.inputs.suffix): + suffix = self.inputs.suffix + outputs = self._outputs().get() + outputs['out_file'] = self.inputs.out_file + if not isdefined(outputs['out_file']): + outputs['out_file'] = self._gen_fname( + self.inputs.in_file, suffix=suffix) + outputs['out_file'] = os.path.abspath(outputs['out_file']) + return outputs + + +class FilterRegressorInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + argstr="-i %s", + desc="input file name (4D image)", + mandatory=True, + position=1) + out_file = File( + argstr="-o %s", + desc="output file name for the filtered data", + genfile=True, + position=2, + hash_files=False) + design_file = File( + exists=True, + argstr="-d %s", + position=3, + mandatory=True, + desc=("name of the matrix with time courses (e.g. GLM " + "design or MELODIC mixing matrix)")) + filter_columns = traits.List( + traits.Int, + argstr="-f '%s'", + xor=["filter_all"], + mandatory=True, + position=4, + desc=("(1-based) column indices to filter out of the data")) + filter_all = traits.Bool( + mandatory=True, + argstr="-f '%s'", + xor=["filter_columns"], + position=4, + desc=("use all columns in the design file in " + "denoising")) + mask = File(exists=True, argstr="-m %s", desc="mask image file name") + var_norm = traits.Bool( + argstr="--vn", desc="perform variance-normalization on data") + out_vnscales = traits.Bool( + argstr="--out_vnscales", + desc=("output scaling factors for variance " + "normalization")) + + +class FilterRegressorOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="output file name for the filtered data") + + +class FilterRegressor(FSLCommand): + """Data de-noising by regressing out part of a design matrix + + Uses simple OLS regression on 4D images + """ + input_spec = FilterRegressorInputSpec + output_spec = FilterRegressorOutputSpec + _cmd = 'fsl_regfilt' + + def _format_arg(self, name, trait_spec, value): + if name == 'filter_columns': + return trait_spec.argstr % ",".join(map(str, value)) + elif name == "filter_all": + design = np.loadtxt(self.inputs.design_file) + try: + n_cols = design.shape[1] + except IndexError: + n_cols = 1 + return trait_spec.argstr % ",".join( + map(str, list(range(1, n_cols + 1)))) + return super(FilterRegressor, self)._format_arg( + name, trait_spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_file + if not isdefined(outputs['out_file']): + outputs['out_file'] = self._gen_fname( + self.inputs.in_file, suffix='_regfilt') + outputs['out_file'] = os.path.abspath(outputs['out_file']) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()[name] + return None + + +class ImageStatsInputSpec(FSLCommandInputSpec): + split_4d = traits.Bool( + argstr='-t', + position=1, + desc=('give a separate output line for each 3D ' + 'volume of a 4D timeseries')) + in_file = File( + exists=True, + argstr="%s", + mandatory=True, + position=2, + desc='input file to generate stats of') + op_string = traits.Str( + argstr="%s", + mandatory=True, + position=3, + desc=("string defining the operation, options are " + "applied in order, e.g. -M -l 10 -M will " + "report the non-zero mean, apply a threshold " + "and then report the new nonzero mean")) + mask_file = File( + exists=True, argstr="", desc='mask file used for option -k %s') + + +class ImageStatsOutputSpec(TraitedSpec): + out_stat = traits.Any(desc='stats output') + + +class ImageStats(FSLCommand): + """Use FSL fslstats command to calculate stats from images + `FSL info + `_ + + + Examples + -------- + + >>> from nipype.interfaces.fsl import ImageStats + >>> from nipype.testing import funcfile + >>> stats = ImageStats(in_file=funcfile, op_string= '-M') + >>> stats.cmdline == 'fslstats %s -M'%funcfile + True + + + """ + input_spec = ImageStatsInputSpec + output_spec = ImageStatsOutputSpec + + _cmd = 'fslstats' + + def _format_arg(self, name, trait_spec, value): + if name == 'mask_file': + return '' + if name == 'op_string': + if '-k %s' in self.inputs.op_string: + if isdefined(self.inputs.mask_file): + return self.inputs.op_string % self.inputs.mask_file + else: + raise ValueError( + '-k %s option in op_string requires mask_file') + return super(ImageStats, self)._format_arg(name, trait_spec, value) + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + outputs = self._outputs() + # local caching for backward compatibility + outfile = os.path.join(os.getcwd(), 'stat_result.json') + if runtime is None: + try: + out_stat = load_json(outfile)['stat'] + except IOError: + return self.run().outputs + else: + out_stat = [] + for line in runtime.stdout.split('\n'): + if line: + values = line.split() + if len(values) > 1: + out_stat.append([float(val) for val in values]) + else: + out_stat.extend([float(val) for val in values]) + if len(out_stat) == 1: + out_stat = out_stat[0] + save_json(outfile, dict(stat=out_stat)) + outputs.out_stat = out_stat + return outputs + + +class AvScaleInputSpec(CommandLineInputSpec): + all_param = traits.Bool(False, argstr='--allparams') + mat_file = File( + exists=True, argstr='%s', desc='mat file to read', position=-2) + ref_file = File( + exists=True, + argstr='%s', + position=-1, + desc='reference file to get center of rotation') + + +class AvScaleOutputSpec(TraitedSpec): + rotation_translation_matrix = traits.List( + traits.List(traits.Float), desc='Rotation and Translation Matrix') + scales = traits.List(traits.Float, desc='Scales (x,y,z)') + skews = traits.List(traits.Float, desc='Skews') + average_scaling = traits.Float(desc='Average Scaling') + determinant = traits.Float(desc='Determinant') + forward_half_transform = traits.List( + traits.List(traits.Float), desc='Forward Half Transform') + backward_half_transform = traits.List( + traits.List(traits.Float), desc='Backwards Half Transform') + left_right_orientation_preserved = traits.Bool( + desc='True if LR orientation preserved') + rot_angles = traits.List(traits.Float, desc='rotation angles') + translations = traits.List(traits.Float, desc='translations') + + +class AvScale(CommandLine): + """Use FSL avscale command to extract info from mat file output of FLIRT + + Examples + -------- + + >>> avscale = AvScale() + >>> avscale.inputs.mat_file = 'flirt.mat' + >>> res = avscale.run() # doctest: +SKIP + + + """ + input_spec = AvScaleInputSpec + output_spec = AvScaleOutputSpec + + _cmd = 'avscale' + + def _run_interface(self, runtime): + runtime = super(AvScale, self)._run_interface(runtime) + + expr = re.compile( + 'Rotation\ &\ Translation\ Matrix:\n(?P[0-9\.\ \n-]+)[\s\n]*' + '(Rotation\ Angles\ \(x,y,z\)\ \[rads\]\ =\ (?P[0-9\.\ -]+))?[\s\n]*' + '(Translations\ \(x,y,z\)\ \[mm\]\ =\ (?P[0-9\.\ -]+))?[\s\n]*' + 'Scales\ \(x,y,z\)\ =\ (?P[0-9\.\ -]+)[\s\n]*' + 'Skews\ \(xy,xz,yz\)\ =\ (?P[0-9\.\ -]+)[\s\n]*' + 'Average\ scaling\ =\ (?P[0-9\.-]+)[\s\n]*' + 'Determinant\ =\ (?P[0-9\.-]+)[\s\n]*' + 'Left-Right\ orientation:\ (?P[A-Za-z]+)[\s\n]*' + 'Forward\ half\ transform\ =[\s]*\n' + '(?P[0-9\.\ \n-]+)[\s\n]*' + 'Backward\ half\ transform\ =[\s]*\n' + '(?P[0-9\.\ \n-]+)[\s\n]*') + out = expr.search(runtime.stdout).groupdict() + outputs = {} + outputs['rotation_translation_matrix'] = [[ + float(v) for v in r.strip().split(' ') + ] for r in out['rot_tran_mat'].strip().split('\n')] + outputs['scales'] = [ + float(s) for s in out['scales'].strip().split(' ') + ] + outputs['skews'] = [float(s) for s in out['skews'].strip().split(' ')] + outputs['average_scaling'] = float(out['avg_scaling'].strip()) + outputs['determinant'] = float(out['determinant'].strip()) + outputs['left_right_orientation_preserved'] = out[ + 'lr_orientation'].strip() == 'preserved' + outputs['forward_half_transform'] = [[ + float(v) for v in r.strip().split(' ') + ] for r in out['fwd_half_xfm'].strip().split('\n')] + outputs['backward_half_transform'] = [[ + float(v) for v in r.strip().split(' ') + ] for r in out['bwd_half_xfm'].strip().split('\n')] + + if self.inputs.all_param: + outputs['rot_angles'] = [ + float(r) for r in out['rot_angles'].strip().split(' ') + ] + outputs['translations'] = [ + float(r) for r in out['translations'].strip().split(' ') + ] + + setattr(self, '_results', outputs) + return runtime + + def _list_outputs(self): + return self._results + + +class OverlayInputSpec(FSLCommandInputSpec): + transparency = traits.Bool( + desc='make overlay colors semi-transparent', + position=1, + argstr='%s', + usedefault=True, + default_value=True) + out_type = traits.Enum( + 'float', + 'int', + position=2, + usedefault=True, + argstr='%s', + desc='write output with float or int') + use_checkerboard = traits.Bool( + desc='use checkerboard mask for overlay', argstr='-c', position=3) + background_image = File( + exists=True, + position=4, + mandatory=True, + argstr='%s', + desc='image to use as background') + _xor_inputs = ('auto_thresh_bg', 'full_bg_range', 'bg_thresh') + auto_thresh_bg = traits.Bool( + desc=('automatically threshold the background image'), + argstr='-a', + position=5, + xor=_xor_inputs, + mandatory=True) + full_bg_range = traits.Bool( + desc='use full range of background image', + argstr='-A', + position=5, + xor=_xor_inputs, + mandatory=True) + bg_thresh = traits.Tuple( + traits.Float, + traits.Float, + argstr='%.3f %.3f', + position=5, + desc='min and max values for background intensity', + xor=_xor_inputs, + mandatory=True) + stat_image = File( + exists=True, + position=6, + mandatory=True, + argstr='%s', + desc='statistical image to overlay in color') + stat_thresh = traits.Tuple( + traits.Float, + traits.Float, + position=7, + mandatory=True, + argstr='%.2f %.2f', + desc=('min and max values for the statistical ' + 'overlay')) + show_negative_stats = traits.Bool( + desc=('display negative statistics in ' + 'overlay'), + xor=['stat_image2'], + argstr='%s', + position=8) + stat_image2 = File( + exists=True, + position=9, + xor=['show_negative_stats'], + argstr='%s', + desc='second statistical image to overlay in color') + stat_thresh2 = traits.Tuple( + traits.Float, + traits.Float, + position=10, + desc=('min and max values for second ' + 'statistical overlay'), + argstr='%.2f %.2f') + out_file = File( + desc='combined image volume', + position=-1, + argstr='%s', + genfile=True, + hash_files=False) + + +class OverlayOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='combined image volume') + + +class Overlay(FSLCommand): + """ Use FSL's overlay command to combine background and statistical images + into one volume + + + Examples + -------- + + >>> from nipype.interfaces import fsl + >>> combine = fsl.Overlay() + >>> combine.inputs.background_image = 'mean_func.nii.gz' + >>> combine.inputs.auto_thresh_bg = True + >>> combine.inputs.stat_image = 'zstat1.nii.gz' + >>> combine.inputs.stat_thresh = (3.5, 10) + >>> combine.inputs.show_negative_stats = True + >>> res = combine.run() #doctest: +SKIP + + + """ + _cmd = 'overlay' + input_spec = OverlayInputSpec + output_spec = OverlayOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'transparency': + if value: + return '1' + else: + return '0' + if name == 'out_type': + if value == 'float': + return '0' + else: + return '1' + if name == 'show_negative_stats': + return '%s %.2f %.2f' % (self.inputs.stat_image, + self.inputs.stat_thresh[0] * -1, + self.inputs.stat_thresh[1] * -1) + return super(Overlay, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + out_file = self.inputs.out_file + if not isdefined(out_file): + if isdefined(self.inputs.stat_image2) and ( + not isdefined(self.inputs.show_negative_stats) + or not self.inputs.show_negative_stats): + stem = "%s_and_%s" % ( + split_filename(self.inputs.stat_image)[1], + split_filename(self.inputs.stat_image2)[1]) + else: + stem = split_filename(self.inputs.stat_image)[1] + out_file = self._gen_fname(stem, suffix='_overlay') + outputs['out_file'] = os.path.abspath(out_file) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()['out_file'] + return None + + +class SlicerInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + position=1, + argstr='%s', + mandatory=True, + desc='input volume') + image_edges = File( + exists=True, + position=2, + argstr='%s', + desc=('volume to display edge overlay for (useful for ' + 'checking registration')) + label_slices = traits.Bool( + position=3, + argstr='-L', + desc='display slice number', + usedefault=True, + default_value=True) + colour_map = File( + exists=True, + position=4, + argstr='-l %s', + desc=('use different colour map from that stored in ' + 'nifti header')) + intensity_range = traits.Tuple( + traits.Float, + traits.Float, + position=5, + argstr='-i %.3f %.3f', + desc='min and max intensities to display') + threshold_edges = traits.Float( + position=6, argstr='-e %.3f', desc='use threshold for edges') + dither_edges = traits.Bool( + position=7, + argstr='-t', + desc=('produce semi-transparent (dithered) ' + 'edges')) + nearest_neighbour = traits.Bool( + position=8, + argstr='-n', + desc=('use nearest neighbor interpolation ' + 'for output')) + show_orientation = traits.Bool( + position=9, + argstr='%s', + usedefault=True, + default_value=True, + desc='label left-right orientation') + _xor_options = ('single_slice', 'middle_slices', 'all_axial', + 'sample_axial') + single_slice = traits.Enum( + 'x', + 'y', + 'z', + position=10, + argstr='-%s', + xor=_xor_options, + requires=['slice_number'], + desc=('output picture of single slice in the x, y, or z plane')) + slice_number = traits.Int( + position=11, argstr='-%d', desc='slice number to save in picture') + middle_slices = traits.Bool( + position=10, + argstr='-a', + xor=_xor_options, + desc=('output picture of mid-sagittal, axial, ' + 'and coronal slices')) + all_axial = traits.Bool( + position=10, + argstr='-A', + xor=_xor_options, + requires=['image_width'], + desc='output all axial slices into one picture') + sample_axial = traits.Int( + position=10, + argstr='-S %d', + xor=_xor_options, + requires=['image_width'], + desc=('output every n axial slices into one ' + 'picture')) + image_width = traits.Int( + position=-2, argstr='%d', desc='max picture width') + out_file = File( + position=-1, + genfile=True, + argstr='%s', + desc='picture to write', + hash_files=False) + scaling = traits.Float(position=0, argstr='-s %f', desc='image scale') + + +class SlicerOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='picture to write') + + +class Slicer(FSLCommand): + """Use FSL's slicer command to output a png image from a volume. + + + Examples + -------- + + >>> from nipype.interfaces import fsl + >>> from nipype.testing import example_data + >>> slice = fsl.Slicer() + >>> slice.inputs.in_file = example_data('functional.nii') + >>> slice.inputs.all_axial = True + >>> slice.inputs.image_width = 750 + >>> res = slice.run() #doctest: +SKIP + + + """ + _cmd = 'slicer' + input_spec = SlicerInputSpec + output_spec = SlicerOutputSpec + + def _format_arg(self, name, spec, value): + if name == 'show_orientation': + if value: + return '' + else: + return '-u' + elif name == "label_slices": + if value: + return '-L' + else: + return '' + return super(Slicer, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + out_file = self.inputs.out_file + if not isdefined(out_file): + out_file = self._gen_fname(self.inputs.in_file, ext='.png') + outputs['out_file'] = os.path.abspath(out_file) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()['out_file'] + return None + + +class PlotTimeSeriesInputSpec(FSLCommandInputSpec): + + in_file = traits.Either( + File(exists=True), + traits.List(File(exists=True)), + mandatory=True, + argstr="%s", + position=1, + desc=("file or list of files with columns of " + "timecourse information")) + plot_start = traits.Int( + argstr="--start=%d", + xor=("plot_range", ), + desc="first column from in-file to plot") + plot_finish = traits.Int( + argstr="--finish=%d", + xor=("plot_range", ), + desc="final column from in-file to plot") + plot_range = traits.Tuple( + traits.Int, + traits.Int, + argstr="%s", + xor=("plot_start", "plot_finish"), + desc=("first and last columns from the in-file " + "to plot")) + title = traits.Str(argstr="%s", desc="plot title") + legend_file = File(exists=True, argstr="--legend=%s", desc="legend file") + labels = traits.Either( + traits.Str, + traits.List(traits.Str), + argstr="%s", + desc="label or list of labels") + y_min = traits.Float( + argstr="--ymin=%.2f", desc="minumum y value", xor=("y_range", )) + y_max = traits.Float( + argstr="--ymax=%.2f", desc="maximum y value", xor=("y_range", )) + y_range = traits.Tuple( + traits.Float, + traits.Float, + argstr="%s", + xor=("y_min", "y_max"), + desc="min and max y axis values") + x_units = traits.Int( + argstr="-u %d", + usedefault=True, + default_value=1, + desc=("scaling units for x-axis (between 1 and length of in file)")) + plot_size = traits.Tuple( + traits.Int, + traits.Int, + argstr="%s", + desc="plot image height and width") + x_precision = traits.Int( + argstr="--precision=%d", desc="precision of x-axis labels") + sci_notation = traits.Bool( + argstr="--sci", desc="switch on scientific notation") + out_file = File( + argstr="-o %s", genfile=True, desc="image to write", hash_files=False) + + +class PlotTimeSeriesOutputSpec(TraitedSpec): + + out_file = File(exists=True, desc='image to write') + + +class PlotTimeSeries(FSLCommand): + """Use fsl_tsplot to create images of time course plots. + + Examples + -------- + + >>> import nipype.interfaces.fsl as fsl + >>> plotter = fsl.PlotTimeSeries() + >>> plotter.inputs.in_file = 'functional.par' + >>> plotter.inputs.title = 'Functional timeseries' + >>> plotter.inputs.labels = ['run1', 'run2'] + >>> plotter.run() #doctest: +SKIP + + + """ + _cmd = "fsl_tsplot" + input_spec = PlotTimeSeriesInputSpec + output_spec = PlotTimeSeriesOutputSpec + + def _format_arg(self, name, spec, value): + if name == "in_file": + if isinstance(value, list): + args = ",".join(value) + return "-i %s" % args + else: + return "-i %s" % value + elif name == "labels": + if isinstance(value, list): + args = ",".join(value) + return "-a %s" % args + else: + return "-a %s" % value + elif name == "title": + return "-t \'%s\'" % value + elif name == "plot_range": + return "--start=%d --finish=%d" % value + elif name == "y_range": + return "--ymin=%d --ymax=%d" % value + elif name == "plot_size": + return "-h %d -w %d" % value + return super(PlotTimeSeries, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + out_file = self.inputs.out_file + if not isdefined(out_file): + if isinstance(self.inputs.in_file, list): + infile = self.inputs.in_file[0] + else: + infile = self.inputs.in_file + out_file = self._gen_fname(infile, ext='.png') + outputs['out_file'] = os.path.abspath(out_file) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()['out_file'] + return None + + +class PlotMotionParamsInputSpec(FSLCommandInputSpec): + + in_file = traits.Either( + File(exists=True), + traits.List(File(exists=True)), + mandatory=True, + argstr="%s", + position=1, + desc="file with motion parameters") + in_source = traits.Enum( + "spm", + "fsl", + mandatory=True, + desc=("which program generated the motion " + "parameter file - fsl, spm")) + plot_type = traits.Enum( + "rotations", + "translations", + "displacement", + argstr="%s", + mandatory=True, + desc=("which motion type to plot - rotations, " + "translations, displacement")) + plot_size = traits.Tuple( + traits.Int, + traits.Int, + argstr="%s", + desc="plot image height and width") + out_file = File( + argstr="-o %s", genfile=True, desc="image to write", hash_files=False) + + +class PlotMotionParamsOutputSpec(TraitedSpec): + + out_file = File(exists=True, desc='image to write') + + +class PlotMotionParams(FSLCommand): + """Use fsl_tsplot to plot the estimated motion parameters from a + realignment program. + + + Examples + -------- + + >>> import nipype.interfaces.fsl as fsl + >>> plotter = fsl.PlotMotionParams() + >>> plotter.inputs.in_file = 'functional.par' + >>> plotter.inputs.in_source = 'fsl' + >>> plotter.inputs.plot_type = 'rotations' + >>> res = plotter.run() #doctest: +SKIP + + + Notes + ----- + + The 'in_source' attribute determines the order of columns that are expected + in the source file. FSL prints motion parameters in the order rotations, + translations, while SPM prints them in the opposite order. This interface + should be able to plot timecourses of motion parameters generated from + other sources as long as they fall under one of these two patterns. For + more flexibilty, see the :class:`fsl.PlotTimeSeries` interface. + + """ + _cmd = 'fsl_tsplot' + input_spec = PlotMotionParamsInputSpec + output_spec = PlotMotionParamsOutputSpec + + def _format_arg(self, name, spec, value): + + if name == "plot_type": + source = self.inputs.in_source + + if self.inputs.plot_type == 'displacement': + title = '-t \'MCFLIRT estimated mean displacement (mm)\'' + labels = '-a abs,rel' + return '%s %s' % (title, labels) + + # Get the right starting and ending position depending on source + # package + sfdict = dict( + fsl_rot=(1, 3), fsl_tra=(4, 6), spm_rot=(4, 6), spm_tra=(1, 3)) + + # Format the title properly + sfstr = "--start=%d --finish=%d" % sfdict["%s_%s" % (source, + value[:3])] + titledict = dict(fsl="MCFLIRT", spm="Realign") + unitdict = dict(rot="radians", tra="mm") + + title = "\'%s estimated %s (%s)\'" % (titledict[source], value, + unitdict[value[:3]]) + + return "-t %s %s -a x,y,z" % (title, sfstr) + elif name == "plot_size": + return "-h %d -w %d" % value + elif name == "in_file": + if isinstance(value, list): + args = ",".join(value) + return "-i %s" % args + else: + return "-i %s" % value + + return super(PlotMotionParams, self)._format_arg(name, spec, value) + + def _list_outputs(self): + outputs = self._outputs().get() + out_file = self.inputs.out_file + if not isdefined(out_file): + if isinstance(self.inputs.in_file, list): + infile = self.inputs.in_file[0] + else: + infile = self.inputs.in_file + plttype = dict( + rot="rot", tra="trans", dis="disp")[self.inputs.plot_type[:3]] + out_file = fname_presuffix( + infile, suffix="_%s.png" % plttype, use_ext=False) + outputs['out_file'] = os.path.abspath(out_file) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()['out_file'] + return None + + +class ConvertXFMInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + mandatory=True, + argstr="%s", + position=-1, + desc="input transformation matrix") + in_file2 = File( + exists=True, + argstr="%s", + position=-2, + desc="second input matrix (for use with fix_scale_skew or concat_xfm)") + _options = ["invert_xfm", "concat_xfm", "fix_scale_skew"] + invert_xfm = traits.Bool( + argstr="-inverse", + position=-3, + xor=_options, + desc="invert input transformation") + concat_xfm = traits.Bool( + argstr="-concat", + position=-3, + xor=_options, + requires=["in_file2"], + desc=("write joint transformation of two input " + "matrices")) + fix_scale_skew = traits.Bool( + argstr="-fixscaleskew", + position=-3, + xor=_options, + requires=["in_file2"], + desc=("use secondary matrix to fix scale and " + "skew")) + out_file = File( + genfile=True, + argstr="-omat %s", + position=1, + desc="final transformation matrix", + hash_files=False) + + +class ConvertXFMOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="output transformation matrix") + + +class ConvertXFM(FSLCommand): + """Use the FSL utility convert_xfm to modify FLIRT transformation matrices. + + Examples + -------- + + >>> import nipype.interfaces.fsl as fsl + >>> invt = fsl.ConvertXFM() + >>> invt.inputs.in_file = "flirt.mat" + >>> invt.inputs.invert_xfm = True + >>> invt.inputs.out_file = 'flirt_inv.mat' + >>> invt.cmdline + 'convert_xfm -omat flirt_inv.mat -inverse flirt.mat' + + + """ + + _cmd = "convert_xfm" + input_spec = ConvertXFMInputSpec + output_spec = ConvertXFMOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outfile = self.inputs.out_file + if not isdefined(outfile): + _, infile1, _ = split_filename(self.inputs.in_file) + if self.inputs.invert_xfm: + outfile = fname_presuffix( + infile1, + suffix="_inv.mat", + newpath=os.getcwd(), + use_ext=False) + else: + if self.inputs.concat_xfm: + _, infile2, _ = split_filename(self.inputs.in_file2) + outfile = fname_presuffix( + "%s_%s" % (infile1, infile2), + suffix=".mat", + newpath=os.getcwd(), + use_ext=False) + else: + outfile = fname_presuffix( + infile1, + suffix="_fix.mat", + newpath=os.getcwd(), + use_ext=False) + outputs["out_file"] = os.path.abspath(outfile) + return outputs + + def _gen_filename(self, name): + if name == "out_file": + return self._list_outputs()["out_file"] + return None + + +class SwapDimensionsInputSpec(FSLCommandInputSpec): + + in_file = File( + exists=True, + mandatory=True, + argstr="%s", + position="1", + desc="input image") + _dims = [ + "x", "-x", "y", "-y", "z", "-z", "RL", "LR", "AP", "PA", "IS", "SI" + ] + new_dims = traits.Tuple( + traits.Enum(_dims), + traits.Enum(_dims), + traits.Enum(_dims), + argstr="%s %s %s", + mandatory=True, + desc="3-tuple of new dimension order") + out_file = File( + genfile=True, argstr="%s", desc="image to write", hash_files=False) + + +class SwapDimensionsOutputSpec(TraitedSpec): + + out_file = File(exists=True, desc="image with new dimensions") + + +class SwapDimensions(FSLCommand): + """Use fslswapdim to alter the orientation of an image. + + This interface accepts a three-tuple corresponding to the new + orientation. You may either provide dimension ids in the form of + (-)x, (-)y, or (-z), or nifti-syle dimension codes + (RL, LR, AP, PA, IS, SI). + + """ + _cmd = "fslswapdim" + input_spec = SwapDimensionsInputSpec + output_spec = SwapDimensionsOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = self.inputs.out_file + if not isdefined(self.inputs.out_file): + outputs["out_file"] = self._gen_fname( + self.inputs.in_file, suffix='_newdims') + outputs["out_file"] = os.path.abspath(outputs["out_file"]) + return outputs + + def _gen_filename(self, name): + if name == "out_file": + return self._list_outputs()["out_file"] + return None + + +class PowerSpectrumInputSpec(FSLCommandInputSpec): + # We use position args here as list indices - so a negative number + # will put something on the end + in_file = File( + exists=True, + desc="input 4D file to estimate the power spectrum", + argstr='%s', + position=0, + mandatory=True) + out_file = File( + desc='name of output 4D file for power spectrum', + argstr='%s', + position=1, + genfile=True, + hash_files=False) + + +class PowerSpectrumOutputSpec(TraitedSpec): + out_file = File( + exists=True, desc="path/name of the output 4D power spectrum file") + + +class PowerSpectrum(FSLCommand): + """Use FSL PowerSpectrum command for power spectrum estimation. + + Examples + -------- + + >>> from nipype.interfaces import fsl + >>> pspec = fsl.PowerSpectrum() + >>> pspec.inputs.in_file = 'functional.nii' + >>> res = pspec.run() # doctest: +SKIP + + + """ + + _cmd = 'fslpspec' + input_spec = PowerSpectrumInputSpec + output_spec = PowerSpectrumOutputSpec + + def _gen_outfilename(self): + out_file = self.inputs.out_file + if not isdefined(out_file) and isdefined(self.inputs.in_file): + out_file = self._gen_fname(self.inputs.in_file, suffix='_ps') + return out_file + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = os.path.abspath(self._gen_outfilename()) + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_outfilename() + return None + + +class SigLossInputSpec(FSLCommandInputSpec): + in_file = File( + mandatory=True, exists=True, argstr='-i %s', desc='b0 fieldmap file') + out_file = File( + argstr='-s %s', desc='output signal loss estimate file', genfile=True) + + mask_file = File(exists=True, argstr='-m %s', desc='brain mask file') + echo_time = traits.Float(argstr='--te=%f', desc='echo time in seconds') + slice_direction = traits.Enum( + 'x', 'y', 'z', argstr='-d %s', desc='slicing direction') + + +class SigLossOuputSpec(TraitedSpec): + out_file = File(exists=True, desc='signal loss estimate file') + + +class SigLoss(FSLCommand): + """Estimates signal loss from a field map (in rad/s) + + Examples + -------- + + >>> sigloss = SigLoss() + >>> sigloss.inputs.in_file = "phase.nii" + >>> sigloss.inputs.echo_time = 0.03 + >>> res = sigloss.run() # doctest: +SKIP + + + """ + input_spec = SigLossInputSpec + output_spec = SigLossOuputSpec + _cmd = 'sigloss' + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_file + if not isdefined(outputs['out_file']) and \ + isdefined(self.inputs.in_file): + outputs['out_file'] = self._gen_fname( + self.inputs.in_file, suffix='_sigloss') + return outputs + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()['out_file'] + return None + + +class Reorient2StdInputSpec(FSLCommandInputSpec): + in_file = File(exists=True, mandatory=True, argstr="%s") + out_file = File(genfile=True, hash_files=False, argstr="%s") + + +class Reorient2StdOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class Reorient2Std(FSLCommand): + """fslreorient2std is a tool for reorienting the image to match the + approximate orientation of the standard template images (MNI152). + + + Examples + -------- + + >>> reorient = Reorient2Std() + >>> reorient.inputs.in_file = "functional.nii" + >>> res = reorient.run() # doctest: +SKIP + + + """ + _cmd = 'fslreorient2std' + input_spec = Reorient2StdInputSpec + output_spec = Reorient2StdOutputSpec + + def _gen_filename(self, name): + if name == 'out_file': + return self._gen_fname(self.inputs.in_file, suffix="_reoriented") + return None + + def _list_outputs(self): + outputs = self.output_spec().get() + if not isdefined(self.inputs.out_file): + outputs['out_file'] = self._gen_filename('out_file') + else: + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class InvWarpInputSpec(FSLCommandInputSpec): + warp = File( + exists=True, + argstr='--warp=%s', + mandatory=True, + desc=('Name of file containing warp-coefficients/fields. This ' + 'would typically be the output from the --cout switch of' + ' fnirt (but can also use fields, like the output from ' + '--fout).')) + reference = File( + exists=True, + argstr='--ref=%s', + mandatory=True, + desc=('Name of a file in target space. Note that the ' + 'target space is now different from the target ' + 'space that was used to create the --warp file. It ' + 'would typically be the file that was specified ' + 'with the --in argument when running fnirt.')) + inverse_warp = File( + argstr='--out=%s', + name_source=['warp'], + hash_files=False, + name_template='%s_inverse', + desc=('Name of output file, containing warps that are ' + 'the "reverse" of those in --warp. This will be ' + 'a field-file (rather than a file of spline ' + 'coefficients), and it will have any affine ' + 'component included as part of the ' + 'displacements.')) + absolute = traits.Bool( + argstr='--abs', + xor=['relative'], + desc=('If set it indicates that the warps in --warp' + ' should be interpreted as absolute, provided' + ' that it is not created by fnirt (which ' + 'always uses relative warps). If set it also ' + 'indicates that the output --out should be ' + 'absolute.')) + relative = traits.Bool( + argstr='--rel', + xor=['absolute'], + desc=('If set it indicates that the warps in --warp' + ' should be interpreted as relative. I.e. the' + ' values in --warp are displacements from the' + ' coordinates in the --ref space. If set it ' + 'also indicates that the output --out should ' + 'be relative.')) + niter = traits.Int( + argstr='--niter=%d', + desc=('Determines how many iterations of the ' + 'gradient-descent search that should be run.')) + regularise = traits.Float( + argstr='--regularise=%f', + desc='Regularization strength (deafult=1.0).') + noconstraint = traits.Bool( + argstr='--noconstraint', desc='Do not apply Jacobian constraint') + jacobian_min = traits.Float( + argstr='--jmin=%f', + desc=('Minimum acceptable Jacobian value for ' + 'constraint (default 0.01)')) + jacobian_max = traits.Float( + argstr='--jmax=%f', + desc=('Maximum acceptable Jacobian value for ' + 'constraint (default 100.0)')) + + +class InvWarpOutputSpec(TraitedSpec): + inverse_warp = File( + exists=True, + desc=('Name of output file, containing warps that are ' + 'the "reverse" of those in --warp.')) + + +class InvWarp(FSLCommand): + """ + Use FSL Invwarp to invert a FNIRT warp + + + Examples + -------- + + >>> from nipype.interfaces.fsl import InvWarp + >>> invwarp = InvWarp() + >>> invwarp.inputs.warp = "struct2mni.nii" + >>> invwarp.inputs.reference = "anatomical.nii" + >>> invwarp.inputs.output_type = "NIFTI_GZ" + >>> invwarp.cmdline + 'invwarp --out=struct2mni_inverse.nii.gz --ref=anatomical.nii --warp=struct2mni.nii' + >>> res = invwarp.run() # doctest: +SKIP + + + """ + + input_spec = InvWarpInputSpec + output_spec = InvWarpOutputSpec + + _cmd = 'invwarp' + + +class ComplexInputSpec(FSLCommandInputSpec): + complex_in_file = File(exists=True, argstr="%s", position=2) + complex_in_file2 = File(exists=True, argstr="%s", position=3) + + real_in_file = File(exists=True, argstr="%s", position=2) + imaginary_in_file = File(exists=True, argstr="%s", position=3) + magnitude_in_file = File(exists=True, argstr="%s", position=2) + phase_in_file = File(exists=True, argstr='%s', position=3) + + _ofs = [ + 'complex_out_file', 'magnitude_out_file', 'phase_out_file', + 'real_out_file', 'imaginary_out_file' + ] + _conversion = [ + 'real_polar', + 'real_cartesian', + 'complex_cartesian', + 'complex_polar', + 'complex_split', + 'complex_merge', + ] + + complex_out_file = File( + genfile=True, argstr="%s", position=-3, xor=_ofs + _conversion[:2]) + magnitude_out_file = File( + genfile=True, + argstr="%s", + position=-4, + xor=_ofs[:1] + _ofs[3:] + _conversion[1:]) + phase_out_file = File( + genfile=True, + argstr="%s", + position=-3, + xor=_ofs[:1] + _ofs[3:] + _conversion[1:]) + real_out_file = File( + genfile=True, + argstr="%s", + position=-4, + xor=_ofs[:3] + _conversion[:1] + _conversion[2:]) + imaginary_out_file = File( + genfile=True, + argstr="%s", + position=-3, + xor=_ofs[:3] + _conversion[:1] + _conversion[2:]) + + start_vol = traits.Int(position=-2, argstr='%d') + end_vol = traits.Int(position=-1, argstr='%d') + + real_polar = traits.Bool( + argstr='-realpolar', + xor=_conversion, + position=1, + ) + # requires=['complex_in_file','magnitude_out_file','phase_out_file']) + real_cartesian = traits.Bool( + argstr='-realcartesian', + xor=_conversion, + position=1, + ) + # requires=['complex_in_file','real_out_file','imaginary_out_file']) + complex_cartesian = traits.Bool( + argstr='-complex', + xor=_conversion, + position=1, + ) + # requires=['real_in_file','imaginary_in_file','complex_out_file']) + complex_polar = traits.Bool( + argstr='-complexpolar', + xor=_conversion, + position=1, + ) + # requires=['magnitude_in_file','phase_in_file', + # 'magnitude_out_file','phase_out_file']) + complex_split = traits.Bool( + argstr='-complexsplit', + xor=_conversion, + position=1, + ) + # requires=['complex_in_file','complex_out_file']) + complex_merge = traits.Bool( + argstr='-complexmerge', + xor=_conversion + ['start_vol', 'end_vol'], + position=1, + ) + + +# requires=['complex_in_file','complex_in_file2','complex_out_file']) + + +class ComplexOuputSpec(TraitedSpec): + magnitude_out_file = File() + phase_out_file = File() + real_out_file = File() + imaginary_out_file = File() + complex_out_file = File() + + +class Complex(FSLCommand): + """fslcomplex is a tool for converting complex data + + Examples + -------- + + >>> cplx = Complex() + >>> cplx.inputs.complex_in_file = "complex.nii" + >>> cplx.real_polar = True + >>> res = cplx.run() # doctest: +SKIP + + + """ + _cmd = 'fslcomplex' + input_spec = ComplexInputSpec + output_spec = ComplexOuputSpec + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + if self.inputs.real_cartesian: + skip += self.inputs._ofs[:3] + elif self.inputs.real_polar: + skip += self.inputs._ofs[:1] + self.inputs._ofs[3:] + else: + skip += self.inputs._ofs[1:] + return super(Complex, self)._parse_inputs(skip) + + def _gen_filename(self, name): + if name == 'complex_out_file': + if self.inputs.complex_cartesian: + in_file = self.inputs.real_in_file + elif self.inputs.complex_polar: + in_file = self.inputs.magnitude_in_file + elif self.inputs.complex_split or self.inputs.complex_merge: + in_file = self.inputs.complex_in_file + else: + return None + return self._gen_fname(in_file, suffix="_cplx") + elif name == 'magnitude_out_file': + return self._gen_fname(self.inputs.complex_in_file, suffix="_mag") + elif name == 'phase_out_file': + return self._gen_fname( + self.inputs.complex_in_file, suffix="_phase") + elif name == 'real_out_file': + return self._gen_fname(self.inputs.complex_in_file, suffix="_real") + elif name == 'imaginary_out_file': + return self._gen_fname(self.inputs.complex_in_file, suffix="_imag") + return None + + def _get_output(self, name): + output = getattr(self.inputs, name) + if not isdefined(output): + output = self._gen_filename(name) + return os.path.abspath(output) + + def _list_outputs(self): + outputs = self.output_spec().get() + if self.inputs.complex_cartesian or self.inputs.complex_polar or \ + self.inputs.complex_split or self.inputs.complex_merge: + outputs['complex_out_file'] = self._get_output('complex_out_file') + elif self.inputs.real_cartesian: + outputs['real_out_file'] = self._get_output('real_out_file') + outputs['imaginary_out_file'] = self._get_output( + 'imaginary_out_file') + elif self.inputs.real_polar: + outputs['magnitude_out_file'] = self._get_output( + 'magnitude_out_file') + outputs['phase_out_file'] = self._get_output('phase_out_file') + return outputs + + +class WarpUtilsInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + argstr='--in=%s', + mandatory=True, + desc=('Name of file containing warp-coefficients/fields. This ' + 'would typically be the output from the --cout switch of ' + 'fnirt (but can also use fields, like the output from ' + '--fout).')) + reference = File( + exists=True, + argstr='--ref=%s', + mandatory=True, + desc=('Name of a file in target space. Note that the ' + 'target space is now different from the target ' + 'space that was used to create the --warp file. It ' + 'would typically be the file that was specified ' + 'with the --in argument when running fnirt.')) + + out_format = traits.Enum( + 'spline', + 'field', + argstr='--outformat=%s', + desc=('Specifies the output format. If set to field (default) ' + 'the output will be a (4D) field-file. If set to spline ' + 'the format will be a (4D) file of spline coefficients.')) + + warp_resolution = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='--warpres=%0.4f,%0.4f,%0.4f', + desc=('Specifies the resolution/knot-spacing of the splines pertaining' + ' to the coefficients in the --out file. This parameter is only ' + 'relevant if --outformat is set to spline. It should be noted ' + 'that if the --in file has a higher resolution, the resulting ' + 'coefficients will pertain to the closest (in a least-squares' + ' sense) file in the space of fields with the --warpres' + ' resolution. It should also be noted that the resolution ' + 'will always be an integer multiple of the voxel ' + 'size.')) + + knot_space = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + argstr='--knotspace=%d,%d,%d', + desc=('Alternative (to --warpres) specification of the resolution of ' + 'the output spline-field.')) + + out_file = File( + argstr='--out=%s', + position=-1, + name_source=['in_file'], + output_name='out_file', + desc=('Name of output file. The format of the output depends on what ' + 'other parameters are set. The default format is a (4D) ' + 'field-file. If the --outformat is set to spline the format ' + 'will be a (4D) file of spline coefficients.')) + + write_jacobian = traits.Bool( + False, + mandatory=True, + usedefault=True, + desc='Switch on --jac flag with automatically generated filename') + + out_jacobian = File( + argstr='--jac=%s', + desc=('Specifies that a (3D) file of Jacobian determinants ' + 'corresponding to --in should be produced and written to ' + 'filename.')) + + with_affine = traits.Bool( + False, + argstr='--withaff', + desc=('Specifies that the affine transform (i.e. that which was ' + 'specified for the --aff parameter in fnirt) should be ' + 'included as displacements in the --out file. That can be ' + 'useful for interfacing with software that cannot decode ' + 'FSL/fnirt coefficient-files (where the affine transform is ' + 'stored separately from the displacements).')) + + +class WarpUtilsOutputSpec(TraitedSpec): + out_file = File( + desc=('Name of output file, containing the warp as field or ' + 'coefficients.')) + out_jacobian = File( + desc=('Name of output file, containing the map of the determinant of ' + 'the Jacobian')) + + +class WarpUtils(FSLCommand): + """Use FSL `fnirtfileutils `_ + to convert field->coefficients, coefficients->field, coefficients->other_coefficients etc + + + Examples + -------- + + >>> from nipype.interfaces.fsl import WarpUtils + >>> warputils = WarpUtils() + >>> warputils.inputs.in_file = "warpfield.nii" + >>> warputils.inputs.reference = "T1.nii" + >>> warputils.inputs.out_format = 'spline' + >>> warputils.inputs.warp_resolution = (10,10,10) + >>> warputils.inputs.output_type = "NIFTI_GZ" + >>> warputils.cmdline # doctest: +ELLIPSIS + 'fnirtfileutils --in=warpfield.nii --outformat=spline --ref=T1.nii --warpres=10.0000,10.0000,10.0000 --out=warpfield_coeffs.nii.gz' + >>> res = invwarp.run() # doctest: +SKIP + + + """ + + input_spec = WarpUtilsInputSpec + output_spec = WarpUtilsOutputSpec + + _cmd = 'fnirtfileutils' + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + + suffix = 'field' + if (isdefined(self.inputs.out_format) + and self.inputs.out_format == 'spline'): + suffix = 'coeffs' + + trait_spec = self.inputs.trait('out_file') + trait_spec.name_template = "%s_" + suffix + + if self.inputs.write_jacobian: + if not isdefined(self.inputs.out_jacobian): + jac_spec = self.inputs.trait('out_jacobian') + jac_spec.name_source = ['in_file'] + jac_spec.name_template = '%s_jac' + jac_spec.output_name = 'out_jacobian' + else: + skip += ['out_jacobian'] + + skip += ['write_jacobian'] + return super(WarpUtils, self)._parse_inputs(skip=skip) + + +class ConvertWarpInputSpec(FSLCommandInputSpec): + reference = File( + exists=True, + argstr='--ref=%s', + mandatory=True, + position=1, + desc='Name of a file in target space of the full transform.') + + out_file = File( + argstr='--out=%s', + position=-1, + name_source=['reference'], + name_template='%s_concatwarp', + output_name='out_file', + desc=('Name of output file, containing warps that are the combination ' + 'of all those given as arguments. The format of this will be a ' + 'field-file (rather than spline coefficients) with any affine ' + 'components included.')) + + premat = File( + exists=True, + argstr='--premat=%s', + desc='filename for pre-transform (affine matrix)') + + warp1 = File( + exists=True, + argstr='--warp1=%s', + desc='Name of file containing initial ' + 'warp-fields/coefficients (follows premat). This could ' + 'e.g. be a fnirt-transform from a subjects structural ' + 'scan to an average of a group of subjects.') + + midmat = File( + exists=True, + argstr="--midmat=%s", + desc="Name of file containing mid-warp-affine transform") + + warp2 = File( + exists=True, + argstr='--warp2=%s', + desc='Name of file containing secondary warp-fields/coefficients ' + '(after warp1/midmat but before postmat). This could e.g. be a ' + 'fnirt-transform from the average of a group of subjects to some ' + 'standard space (e.g. MNI152).') + + postmat = File( + exists=True, + argstr='--postmat=%s', + desc='Name of file containing an affine transform (applied last). It ' + 'could e.g. be an affine transform that maps the MNI152-space ' + 'into a better approximation to the Talairach-space (if indeed ' + 'there is one).') + + shift_in_file = File( + exists=True, + argstr='--shiftmap=%s', + desc='Name of file containing a "shiftmap", a non-linear transform ' + 'with displacements only in one direction (applied first, before ' + 'premat). This would typically be a fieldmap that has been ' + 'pre-processed using fugue that maps a subjects functional (EPI) ' + 'data onto an undistorted space (i.e. a space that corresponds ' + 'to his/her true anatomy).') + + shift_direction = traits.Enum( + 'y-', + 'y', + 'x', + 'x-', + 'z', + 'z-', + argstr="--shiftdir=%s", + requires=['shift_in_file'], + desc='Indicates the direction that the distortions from ' + '--shiftmap goes. It depends on the direction and ' + 'polarity of the phase-encoding in the EPI sequence.') + + cons_jacobian = traits.Bool( + False, + argstr='--constrainj', + desc='Constrain the Jacobian of the warpfield to lie within specified ' + 'min/max limits.') + + jacobian_min = traits.Float( + argstr='--jmin=%f', + desc='Minimum acceptable Jacobian value for ' + 'constraint (default 0.01)') + jacobian_max = traits.Float( + argstr='--jmax=%f', + desc='Maximum acceptable Jacobian value for ' + 'constraint (default 100.0)') + + abswarp = traits.Bool( + argstr='--abs', + xor=['relwarp'], + desc='If set it indicates that the warps in --warp1 and --warp2 should' + ' be interpreted as absolute. I.e. the values in --warp1/2 are ' + 'the coordinates in the next space, rather than displacements. ' + 'This flag is ignored if --warp1/2 was created by fnirt, which ' + 'always creates relative displacements.') + + relwarp = traits.Bool( + argstr='--rel', + xor=['abswarp'], + desc='If set it indicates that the warps in --warp1/2 should be ' + 'interpreted as relative. I.e. the values in --warp1/2 are ' + 'displacements from the coordinates in the next space.') + + out_abswarp = traits.Bool( + argstr='--absout', + xor=['out_relwarp'], + desc='If set it indicates that the warps in --out should be absolute, ' + 'i.e. the values in --out are displacements from the coordinates ' + 'in --ref.') + + out_relwarp = traits.Bool( + argstr='--relout', + xor=['out_abswarp'], + desc='If set it indicates that the warps in --out should be relative, ' + 'i.e. the values in --out are displacements from the coordinates ' + 'in --ref.') + + +class ConvertWarpOutputSpec(TraitedSpec): + out_file = File( + exists=True, + desc='Name of output file, containing the warp as field or ' + 'coefficients.') + + +class ConvertWarp(FSLCommand): + """Use FSL `convertwarp `_ + for combining multiple transforms into one. + + + Examples + -------- + + >>> from nipype.interfaces.fsl import ConvertWarp + >>> warputils = ConvertWarp() + >>> warputils.inputs.warp1 = "warpfield.nii" + >>> warputils.inputs.reference = "T1.nii" + >>> warputils.inputs.relwarp = True + >>> warputils.inputs.output_type = "NIFTI_GZ" + >>> warputils.cmdline # doctest: +ELLIPSIS + 'convertwarp --ref=T1.nii --rel --warp1=warpfield.nii --out=T1_concatwarp.nii.gz' + >>> res = warputils.run() # doctest: +SKIP + + + """ + + input_spec = ConvertWarpInputSpec + output_spec = ConvertWarpOutputSpec + _cmd = 'convertwarp' + + +class WarpPointsBaseInputSpec(CommandLineInputSpec): + in_coords = File( + exists=True, + position=-1, + argstr='%s', + mandatory=True, + desc='filename of file containing coordinates') + xfm_file = File( + exists=True, + argstr='-xfm %s', + xor=['warp_file'], + desc='filename of affine transform (e.g. source2dest.mat)') + warp_file = File( + exists=True, + argstr='-warp %s', + xor=['xfm_file'], + desc='filename of warpfield (e.g. ' + 'intermediate2dest_warp.nii.gz)') + coord_vox = traits.Bool( + True, + argstr='-vox', + xor=['coord_mm'], + desc='all coordinates in voxels - default') + coord_mm = traits.Bool( + False, argstr='-mm', xor=['coord_vox'], desc='all coordinates in mm') + out_file = File( + name_source='in_coords', + name_template='%s_warped', + output_name='out_file', + desc='output file name') + + +class WarpPointsInputSpec(WarpPointsBaseInputSpec): + src_file = File( + exists=True, + argstr='-src %s', + mandatory=True, + desc='filename of source image') + dest_file = File( + exists=True, + argstr='-dest %s', + mandatory=True, + desc='filename of destination image') + + +class WarpPointsOutputSpec(TraitedSpec): + out_file = File( + exists=True, + desc='Name of output file, containing the warp as field or ' + 'coefficients.') + + +class WarpPoints(CommandLine): + """Use FSL `img2imgcoord `_ + to transform point sets. Accepts plain text files and vtk files. + + .. Note:: transformation of TrackVis trk files is not yet implemented + + + Examples + -------- + + >>> from nipype.interfaces.fsl import WarpPoints + >>> warppoints = WarpPoints() + >>> warppoints.inputs.in_coords = 'surf.txt' + >>> warppoints.inputs.src_file = 'epi.nii' + >>> warppoints.inputs.dest_file = 'T1.nii' + >>> warppoints.inputs.warp_file = 'warpfield.nii' + >>> warppoints.inputs.coord_mm = True + >>> warppoints.cmdline # doctest: +ELLIPSIS + 'img2imgcoord -mm -dest T1.nii -src epi.nii -warp warpfield.nii surf.txt' + >>> res = warppoints.run() # doctest: +SKIP + + + """ + + input_spec = WarpPointsInputSpec + output_spec = WarpPointsOutputSpec + _cmd = 'img2imgcoord' + _terminal_output = 'stream' + + def __init__(self, command=None, **inputs): + self._tmpfile = None + self._in_file = None + self._outformat = None + + super(WarpPoints, self).__init__(command=command, **inputs) + + def _format_arg(self, name, trait_spec, value): + if name == 'out_file': + return '' + + return super(WarpPoints, self)._format_arg(name, trait_spec, value) + + def _parse_inputs(self, skip=None): + fname, ext = op.splitext(self.inputs.in_coords) + setattr(self, '_in_file', fname) + setattr(self, '_outformat', ext[1:]) + first_args = super(WarpPoints, + self)._parse_inputs(skip=['in_coords', 'out_file']) + + second_args = fname + '.txt' + + if ext in ['.vtk', '.trk']: + if self._tmpfile is None: + self._tmpfile = tempfile.NamedTemporaryFile( + suffix='.txt', dir=os.getcwd(), delete=False).name + second_args = self._tmpfile + + return first_args + [second_args] + + def _vtk_to_coords(self, in_file, out_file=None): + from ..vtkbase import tvtk + from ...interfaces import vtkbase as VTKInfo + + if VTKInfo.no_tvtk(): + raise ImportError( + 'TVTK is required and tvtk package was not found') + + reader = tvtk.PolyDataReader(file_name=in_file + '.vtk') + reader.update() + mesh = VTKInfo.vtk_output(reader) + points = mesh.points + + if out_file is None: + out_file, _ = op.splitext(in_file) + '.txt' + + np.savetxt(out_file, points) + return out_file + + def _coords_to_vtk(self, points, out_file): + from ..vtkbase import tvtk + from ...interfaces import vtkbase as VTKInfo + + if VTKInfo.no_tvtk(): + raise ImportError( + 'TVTK is required and tvtk package was not found') + + reader = tvtk.PolyDataReader(file_name=self.inputs.in_file) + reader.update() + + mesh = VTKInfo.vtk_output(reader) + mesh.points = points + + writer = tvtk.PolyDataWriter(file_name=out_file) + VTKInfo.configure_input_data(writer, mesh) + writer.write() + + def _trk_to_coords(self, in_file, out_file=None): + from nibabel.trackvis import TrackvisFile + trkfile = TrackvisFile.from_file(in_file) + streamlines = trkfile.streamlines + + if out_file is None: + out_file, _ = op.splitext(in_file) + + np.savetxt(streamlines, out_file + '.txt') + return out_file + '.txt' + + def _coords_to_trk(self, points, out_file): + raise NotImplementedError('trk files are not yet supported') + + def _overload_extension(self, value, name): + if name == 'out_file': + return '%s.%s' % (value, getattr(self, '_outformat')) + + def _run_interface(self, runtime): + fname = getattr(self, '_in_file') + outformat = getattr(self, '_outformat') + tmpfile = None + + if outformat == 'vtk': + tmpfile = self._tmpfile + self._vtk_to_coords(fname, out_file=tmpfile) + elif outformat == 'trk': + tmpfile = self._tmpfile + self._trk_to_coords(fname, out_file=tmpfile) + + runtime = super(WarpPoints, self)._run_interface(runtime) + newpoints = np.fromstring( + '\n'.join(runtime.stdout.split('\n')[1:]), sep=' ') + + if tmpfile is not None: + try: + os.remove(tmpfile.name) + except: + pass + + out_file = self._filename_from_source('out_file') + + if outformat == 'vtk': + self._coords_to_vtk(newpoints, out_file) + elif outformat == 'trk': + self._coords_to_trk(newpoints, out_file) + else: + np.savetxt(out_file, newpoints.reshape(-1, 3)) + + return runtime + + +class WarpPointsToStdInputSpec(WarpPointsBaseInputSpec): + img_file = File( + exists=True, + argstr='-img %s', + mandatory=True, + desc=('filename of input image')) + std_file = File( + exists=True, + argstr='-std %s', + mandatory=True, + desc=('filename of destination image')) + premat_file = File( + exists=True, + argstr='-premat %s', + desc=('filename of pre-warp affine transform ' + '(e.g. example_func2highres.mat)')) + + +class WarpPointsToStd(WarpPoints): + """ + Use FSL `img2stdcoord `_ + to transform point sets to standard space coordinates. Accepts plain text + files and vtk files. + + .. Note:: transformation of TrackVis trk files is not yet implemented + + + Examples + -------- + + >>> from nipype.interfaces.fsl import WarpPointsToStd + >>> warppoints = WarpPointsToStd() + >>> warppoints.inputs.in_coords = 'surf.txt' + >>> warppoints.inputs.img_file = 'T1.nii' + >>> warppoints.inputs.std_file = 'mni.nii' + >>> warppoints.inputs.warp_file = 'warpfield.nii' + >>> warppoints.inputs.coord_mm = True + >>> warppoints.cmdline # doctest: +ELLIPSIS + 'img2stdcoord -mm -img T1.nii -std mni.nii -warp warpfield.nii surf.txt' + >>> res = warppoints.run() # doctest: +SKIP + + + """ + + input_spec = WarpPointsToStdInputSpec + output_spec = WarpPointsOutputSpec + _cmd = 'img2stdcoord' + _terminal_output = 'file_split' + + +class WarpPointsFromStdInputSpec(CommandLineInputSpec): + img_file = File( + exists=True, + argstr='-img %s', + mandatory=True, + desc='filename of a destination image') + std_file = File( + exists=True, + argstr='-std %s', + mandatory=True, + desc='filename of the image in standard space') + in_coords = File( + exists=True, + position=-2, + argstr='%s', + mandatory=True, + desc='filename of file containing coordinates') + xfm_file = File( + exists=True, + argstr='-xfm %s', + xor=['warp_file'], + desc='filename of affine transform (e.g. source2dest.mat)') + warp_file = File( + exists=True, + argstr='-warp %s', + xor=['xfm_file'], + desc='filename of warpfield (e.g. ' + 'intermediate2dest_warp.nii.gz)') + coord_vox = traits.Bool( + True, + argstr='-vox', + xor=['coord_mm'], + desc='all coordinates in voxels - default') + coord_mm = traits.Bool( + False, argstr='-mm', xor=['coord_vox'], desc='all coordinates in mm') + + +class WarpPointsFromStd(CommandLine): + """ + Use FSL `std2imgcoord `_ + to transform point sets to standard space coordinates. Accepts plain text coordinates + files. + + + Examples + -------- + + >>> from nipype.interfaces.fsl import WarpPointsFromStd + >>> warppoints = WarpPointsFromStd() + >>> warppoints.inputs.in_coords = 'surf.txt' + >>> warppoints.inputs.img_file = 'T1.nii' + >>> warppoints.inputs.std_file = 'mni.nii' + >>> warppoints.inputs.warp_file = 'warpfield.nii' + >>> warppoints.inputs.coord_mm = True + >>> warppoints.cmdline # doctest: +ELLIPSIS + 'std2imgcoord -mm -img T1.nii -std mni.nii -warp warpfield.nii surf.txt' + >>> res = warppoints.run() # doctest: +SKIP + + + """ + + input_spec = WarpPointsFromStdInputSpec + output_spec = WarpPointsOutputSpec + _cmd = 'std2imgcoord' + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath('stdout.nipype') + return outputs + + +class MotionOutliersInputSpec(FSLCommandInputSpec): + in_file = File( + exists=True, + mandatory=True, + desc="unfiltered 4D image", + argstr="-i %s") + out_file = File( + argstr="-o %s", + name_source='in_file', + name_template='%s_outliers.txt', + keep_extension=True, + desc='output outlier file name', + hash_files=False) + mask = File( + exists=True, argstr="-m %s", desc="mask image for calculating metric") + metric = traits.Enum( + 'refrms', ['refrms', 'dvars', 'refmse', 'fd', 'fdrms'], + argstr="--%s", + desc='metrics: refrms - RMS intensity difference to reference volume ' + 'as metric [default metric], refmse - Mean Square Error version ' + 'of refrms (used in original version of fsl_motion_outliers), ' + 'dvars - DVARS, fd - frame displacement, fdrms - FD with RMS ' + 'matrix calculation') + threshold = traits.Float( + argstr="--thresh=%g", + desc=("specify absolute threshold value " + "(otherwise use box-plot cutoff = P75 + " + "1.5*IQR)")) + no_motion_correction = traits.Bool( + argstr="--nomoco", + desc="do not run motion correction (assumed already done)") + dummy = traits.Int( + argstr="--dummy=%d", + desc='number of dummy scans to delete (before running anything and ' + 'creating EVs)') + out_metric_values = File( + argstr="-s %s", + name_source='in_file', + name_template='%s_metrics.txt', + keep_extension=True, + desc='output metric values (DVARS etc.) file name', + hash_files=False) + out_metric_plot = File( + argstr="-p %s", + name_source='in_file', + name_template='%s_metrics.png', + hash_files=False, + keep_extension=True, + desc='output metric values plot (DVARS etc.) file name') + + +class MotionOutliersOutputSpec(TraitedSpec): + out_file = File(exists=True) + out_metric_values = File(exists=True) + out_metric_plot = File(exists=True) + + +class MotionOutliers(FSLCommand): + """ + Use FSL fsl_motion_outliers`http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FSLMotionOutliers`_ to find outliers in timeseries (4d) data. + Examples + -------- + >>> from nipype.interfaces.fsl import MotionOutliers + >>> mo = MotionOutliers() + >>> mo.inputs.in_file = "epi.nii" + >>> mo.cmdline # doctest: +ELLIPSIS + 'fsl_motion_outliers -i epi.nii -o epi_outliers.txt -p epi_metrics.png -s epi_metrics.txt' + >>> res = mo.run() # doctest: +SKIP + """ + + input_spec = MotionOutliersInputSpec + output_spec = MotionOutliersOutputSpec + _cmd = 'fsl_motion_outliers' diff --git a/nipype/interfaces/image.py b/nipype/interfaces/image.py new file mode 100644 index 0000000000..8c47420063 --- /dev/null +++ b/nipype/interfaces/image.py @@ -0,0 +1,234 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import numpy as np +import nibabel as nb + +from ..utils.filemanip import fname_presuffix +from .base import (SimpleInterface, TraitedSpec, BaseInterfaceInputSpec, + traits, File) + + +class RescaleInputSpec(BaseInterfaceInputSpec): + in_file = File(exists=True, mandatory=True, + desc='Skull-stripped image to rescale') + ref_file = File(exists=True, mandatory=True, + desc='Skull-stripped reference image') + invert = traits.Bool(desc='Invert contrast of rescaled image') + percentile = traits.Range(low=0., high=50., value=0., usedefault=True, + desc='Percentile to use for reference to allow ' + 'for outliers - 1 indicates the 1st and ' + '99th percentiles in the input file will ' + 'be mapped to the 99th and 1st percentiles ' + 'in the reference; 0 indicates minima and ' + 'maxima will be mapped') + + +class RescaleOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='Rescaled image') + + +class Rescale(SimpleInterface): + """Rescale an image + + Rescales the non-zero portion of ``in_file`` to match the bounds of the + non-zero portion of ``ref_file``. + Reference values in the input and reference images are defined by the + ``percentile`` parameter, and the reference values in each image are + identified and the remaining values are scaled accordingly. + In the case of ``percentile == 0``, the reference values are the maxima + and minima of each image. + If the ``invert`` parameter is set, the input file is inverted prior to + rescaling. + + Examples + -------- + + To use a high-resolution T1w image as a registration target for a T2\* + image, it may be useful to invert the T1w image and rescale to the T2\* + range. + Using the 1st and 99th percentiles may reduce the impact of outlier + voxels. + + >>> from nipype.interfaces.image import Rescale + >>> invert_t1w = Rescale(invert=True) + >>> invert_t1w.inputs.in_file = 'structural.nii' + >>> invert_t1w.inputs.ref_file = 'functional.nii' + >>> invert_t1w.inputs.percentile = 1. + >>> res = invert_t1w.run() # doctest: +SKIP + + """ + input_spec = RescaleInputSpec + output_spec = RescaleOutputSpec + + def _run_interface(self, runtime): + img = nb.load(self.inputs.in_file) + data = img.get_data() + ref_data = nb.load(self.inputs.ref_file).get_data() + + in_mask = data > 0 + ref_mask = ref_data > 0 + + q = [self.inputs.percentile, 100. - self.inputs.percentile] + in_low, in_high = np.percentile(data[in_mask], q) + ref_low, ref_high = np.percentile(ref_data[ref_mask], q) + scale_factor = (ref_high - ref_low) / (in_high - in_low) + + signal = in_high - data if self.inputs.invert else data - in_low + out_data = in_mask * (signal * scale_factor + ref_low) + + suffix = '_inv' if self.inputs.invert else '_rescaled' + out_file = fname_presuffix(self.inputs.in_file, suffix=suffix, + newpath=runtime.cwd) + img.__class__(out_data, img.affine, img.header).to_filename(out_file) + + self._results['out_file'] = out_file + return runtime + + +_axes = ('RL', 'AP', 'SI') +_orientations = tuple( + ''.join((x[i], y[j], z[k])) + for x in _axes for y in _axes for z in _axes + if x != y != z != x + for i in (0, 1) for j in (0, 1) for k in (0, 1)) + + +class ReorientInputSpec(BaseInterfaceInputSpec): + in_file = File(exists=True, mandatory=True, desc='Input image') + orientation = traits.Enum(_orientations, usedefault=True, + desc='Target axis orientation') + + +class ReorientOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='Reoriented image') + transform = File(exists=True, + desc='Affine transform from input orientation to output') + + +class Reorient(SimpleInterface): + """Conform an image to a given orientation + +Flips and reorder the image data array so that the axes match the +directions indicated in ``orientation``. +The default ``RAS`` orientation corresponds to the first axis being ordered +from left to right, the second axis from posterior to anterior, and the +third axis from inferior to superior. + +For oblique images, the original orientation is considered to be the +closest plumb orientation. + +No resampling is performed, and thus the output image is not de-obliqued +or registered to any other image or template. + +The effective transform is calculated from the original affine matrix to +the reoriented affine matrix. + +Examples +-------- + +If an image is not reoriented, the original file is not modified + +.. testsetup:: + + >>> def print_affine(matrix): + ... print(str(matrix).replace(']', ' ').replace('[', ' ')) + +>>> import numpy as np +>>> from nipype.interfaces.image import Reorient +>>> reorient = Reorient(orientation='LPS') +>>> reorient.inputs.in_file = 'segmentation0.nii.gz' +>>> res = reorient.run() +>>> res.outputs.out_file +'segmentation0.nii.gz' + +>>> print_affine(np.loadtxt(res.outputs.transform)) +1. 0. 0. 0. +0. 1. 0. 0. +0. 0. 1. 0. +0. 0. 0. 1. + +>>> reorient.inputs.orientation = 'RAS' +>>> res = reorient.run() +>>> res.outputs.out_file # doctest: +ELLIPSIS +'.../segmentation0_ras.nii.gz' + +>>> print_affine(np.loadtxt(res.outputs.transform)) +-1. 0. 0. 60. + 0. -1. 0. 72. + 0. 0. 1. 0. + 0. 0. 0. 1. + +.. testcleanup:: + + >>> import os + >>> os.unlink(res.outputs.out_file) + >>> os.unlink(res.outputs.transform) + +""" + input_spec = ReorientInputSpec + output_spec = ReorientOutputSpec + + def _run_interface(self, runtime): + from nibabel.orientations import ( + axcodes2ornt, ornt_transform, inv_ornt_aff) + + fname = self.inputs.in_file + orig_img = nb.load(fname) + + # Find transform from current (approximate) orientation to + # target, in nibabel orientation matrix and affine forms + orig_ornt = nb.io_orientation(orig_img.affine) + targ_ornt = axcodes2ornt(self.inputs.orientation) + transform = ornt_transform(orig_ornt, targ_ornt) + affine_xfm = inv_ornt_aff(transform, orig_img.shape) + + # Check can be eliminated when minimum nibabel version >= 2.2 + if hasattr(orig_img, 'as_reoriented'): + reoriented = orig_img.as_reoriented(transform) + else: + reoriented = _as_reoriented_backport(orig_img, transform) + + # Image may be reoriented + if reoriented is not orig_img: + suffix = '_' + self.inputs.orientation.lower() + out_name = fname_presuffix(fname, suffix=suffix, + newpath=runtime.cwd) + reoriented.to_filename(out_name) + else: + out_name = fname + + mat_name = fname_presuffix(fname, suffix='.mat', + newpath=runtime.cwd, use_ext=False) + np.savetxt(mat_name, affine_xfm, fmt='%.08f') + + self._results['out_file'] = out_name + self._results['transform'] = mat_name + + return runtime + + +def _as_reoriented_backport(img, ornt): + """Backport of img.as_reoriented as of nibabel 2.2.0""" + from nibabel.orientations import inv_ornt_aff + if np.array_equal(ornt, [[0, 1], [1, 1], [2, 1]]): + return img + + t_arr = nb.apply_orientation(img.get_data(), ornt) + new_aff = img.affine.dot(inv_ornt_aff(ornt, img.shape)) + reoriented = img.__class__(t_arr, new_aff, img.header) + + if isinstance(reoriented, nb.Nifti1Pair): + # Also apply the transform to the dim_info fields + new_dim = list(reoriented.header.get_dim_info()) + for idx, value in enumerate(new_dim): + # For each value, leave as None if it was that way, + # otherwise check where we have mapped it to + if value is None: + continue + new_dim[idx] = np.where(ornt[:, 0] == idx)[0] + + reoriented.header.set_dim_info(*new_dim) + + return reoriented diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py new file mode 100644 index 0000000000..7a89675e8d --- /dev/null +++ b/nipype/interfaces/io.py @@ -0,0 +1,2864 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" Set of interfaces that allow interaction with data. Currently + available interfaces are: + + DataSource: Generic nifti to named Nifti interface + DataSink: Generic named output from interfaces to data store + XNATSource: preliminary interface to XNAT + + To come : + XNATSink +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import object, zip, filter, range, open, str + +import glob +import fnmatch +import string +import json +import os +import os.path as op +import shutil +import subprocess +import re +import copy +import tempfile +from os.path import join, dirname +from warnings import warn + +import sqlite3 + +from .. import config, logging +from ..utils.filemanip import ( + copyfile, simplify_list, ensure_list, + get_related_files, related_filetype_sets) +from ..utils.misc import human_order_sorted, str2bool +from .base import ( + TraitedSpec, traits, Str, File, Directory, BaseInterface, InputMultiPath, + isdefined, OutputMultiPath, DynamicTraitedSpec, Undefined, BaseInterfaceInputSpec) + +have_pybids = True +try: + from bids import grabbids as gb +except ImportError: + have_pybids = False + +try: + import pyxnat +except: + pass + +try: + import paramiko +except: + pass + +try: + import boto + from boto.s3.connection import S3Connection, OrdinaryCallingFormat +except: + pass + +iflogger = logging.getLogger('nipype.interface') + + +def copytree(src, dst, use_hardlink=False): + """Recursively copy a directory tree using + nipype.utils.filemanip.copyfile() + + This is not a thread-safe routine. However, in the case of creating new + directories, it checks to see if a particular directory has already been + created by another process. + """ + names = os.listdir(src) + try: + os.makedirs(dst) + except OSError as why: + if 'File exists' in why.strerror: + pass + else: + raise why + errors = [] + for name in names: + srcname = os.path.join(src, name) + dstname = os.path.join(dst, name) + try: + if os.path.isdir(srcname): + copytree(srcname, dstname, use_hardlink) + else: + copyfile( + srcname, + dstname, + True, + hashmethod='content', + use_hardlink=use_hardlink) + except (IOError, os.error) as why: + errors.append((srcname, dstname, str(why))) + # catch the Error from the recursive copytree so that we can + # continue with other files + except Exception as err: + errors.extend(err.args[0]) + if errors: + raise Exception(errors) + + +def add_traits(base, names, trait_type=None): + """ Add traits to a traited class. + + All traits are set to Undefined by default + """ + if trait_type is None: + trait_type = traits.Any + undefined_traits = {} + for key in names: + base.add_trait(key, trait_type) + undefined_traits[key] = Undefined + base.trait_set(trait_change_notify=False, **undefined_traits) + # access each trait + for key in names: + _ = getattr(base, key) + return base + + +def _get_head_bucket(s3_resource, bucket_name): + """ Try to get the header info of a bucket, in order to + check if it exists and its permissions + """ + + import botocore + + # Try fetch the bucket with the name argument + try: + s3_resource.meta.client.head_bucket(Bucket=bucket_name) + except botocore.exceptions.ClientError as exc: + error_code = int(exc.response['Error']['Code']) + if error_code == 403: + err_msg = 'Access to bucket: %s is denied; check credentials'\ + % bucket_name + raise Exception(err_msg) + elif error_code == 404: + err_msg = 'Bucket: %s does not exist; check spelling and try '\ + 'again' % bucket_name + raise Exception(err_msg) + else: + err_msg = 'Unable to connect to bucket: %s. Error message:\n%s'\ + % (bucket_name, exc) + except Exception as exc: + err_msg = 'Unable to connect to bucket: %s. Error message:\n%s'\ + % (bucket_name, exc) + raise Exception(err_msg) + + +class IOBase(BaseInterface): + def _run_interface(self, runtime): + return runtime + + def _list_outputs(self): + raise NotImplementedError + + def _outputs(self): + return self._add_output_traits(super(IOBase, self)._outputs()) + + def _add_output_traits(self, base): + return base + + +# Class to track percentage of S3 file upload +class ProgressPercentage(object): + ''' + Callable class instsance (via __call__ method) that displays + upload percentage of a file to S3 + ''' + + def __init__(self, filename): + ''' + ''' + + # Import packages + import threading + + # Initialize data attributes + self._filename = filename + self._size = float(os.path.getsize(filename)) + self._seen_so_far = 0 + self._lock = threading.Lock() + + def __call__(self, bytes_amount): + ''' + ''' + + # Import packages + import sys + + # With the lock on, print upload status + with self._lock: + self._seen_so_far += bytes_amount + if self._size != 0: + percentage = (self._seen_so_far // self._size) * 100 + else: + percentage = 0 + progress_str = '%d / %d (%.2f%%)\r'\ + % (self._seen_so_far, self._size, percentage) + + # Write to stdout + sys.stdout.write(progress_str) + sys.stdout.flush() + + +# DataSink inputs +class DataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + ''' + ''' + + # Init inputspec data attributes + base_directory = Directory( + desc='Path to the base directory for storing data.') + container = Str( + desc='Folder within base directory in which to store output') + parameterization = traits.Bool( + True, usedefault=True, desc='store output in parametrized structure') + strip_dir = Directory(desc='path to strip out of filename') + substitutions = InputMultiPath( + traits.Tuple(Str, Str), + desc=('List of 2-tuples reflecting string ' + 'to substitute and string to replace ' + 'it with')) + regexp_substitutions = \ + InputMultiPath(traits.Tuple(Str, Str), + desc=('List of 2-tuples reflecting a pair of a ' + 'Python regexp pattern and a replacement ' + 'string. Invoked after string `substitutions`')) + + _outputs = traits.Dict(Str, value={}, usedefault=True) + remove_dest_dir = traits.Bool( + False, usedefault=True, desc='remove dest directory when copying dirs') + + # AWS S3 data attributes + creds_path = Str(desc='Filepath to AWS credentials file for S3 bucket ' + 'access; if not specified, the credentials will ' + 'be taken from the AWS_ACCESS_KEY_ID and ' + 'AWS_SECRET_ACCESS_KEY environment variables') + encrypt_bucket_keys = traits.Bool(desc='Flag indicating whether to use S3 ' + 'server-side AES-256 encryption') + # Set this if user wishes to override the bucket with their own + bucket = traits.Any(desc='Boto3 S3 bucket for manual override of bucket') + # Set this if user wishes to have local copy of files as well + local_copy = Str(desc='Copy files locally as well as to S3 bucket') + + # Set call-able inputs attributes + def __setattr__(self, key, value): + + if key not in self.copyable_trait_names(): + if not isdefined(value): + super(DataSinkInputSpec, self).__setattr__(key, value) + self._outputs[key] = value + else: + if key in self._outputs: + self._outputs[key] = value + super(DataSinkInputSpec, self).__setattr__(key, value) + + +# DataSink outputs +class DataSinkOutputSpec(TraitedSpec): + + # Init out file + out_file = traits.Any(desc='datasink output') + + +# Custom DataSink class +class DataSink(IOBase): + """ Generic datasink module to store structured outputs + + Primarily for use within a workflow. This interface allows arbitrary + creation of input attributes. The names of these attributes define the + directory structure to create for storage of the files or directories. + + The attributes take the following form: + + string[[.[@]]string[[.[@]]string]] ... + + where parts between [] are optional. + + An attribute such as contrasts.@con will create a 'contrasts' directory + to store the results linked to the attribute. If the @ is left out, such + as in 'contrasts.con', a subdirectory 'con' will be created under + 'contrasts'. + + the general form of the output is:: + + 'base_directory/container/parameterization/destloc/filename' + + destloc = string[[.[@]]string[[.[@]]string]] and + filename comesfrom the input to the connect statement. + + .. warning:: + + This is not a thread-safe node because it can write to a common + shared location. It will not complain when it overwrites a file. + + .. note:: + + If both substitutions and regexp_substitutions are used, then + substitutions are applied first followed by regexp_substitutions. + + This interface **cannot** be used in a MapNode as the inputs are + defined only when the connect statement is executed. + + Examples + -------- + + >>> ds = DataSink() + >>> ds.inputs.base_directory = 'results_dir' + >>> ds.inputs.container = 'subject' + >>> ds.inputs.structural = 'structural.nii' + >>> setattr(ds.inputs, 'contrasts.@con', ['cont1.nii', 'cont2.nii']) + >>> setattr(ds.inputs, 'contrasts.alt', ['cont1a.nii', 'cont2a.nii']) + >>> ds.run() # doctest: +SKIP + + To use DataSink in a MapNode, its inputs have to be defined at the + time the interface is created. + + >>> ds = DataSink(infields=['contasts.@con']) + >>> ds.inputs.base_directory = 'results_dir' + >>> ds.inputs.container = 'subject' + >>> ds.inputs.structural = 'structural.nii' + >>> setattr(ds.inputs, 'contrasts.@con', ['cont1.nii', 'cont2.nii']) + >>> setattr(ds.inputs, 'contrasts.alt', ['cont1a.nii', 'cont2a.nii']) + >>> ds.run() # doctest: +SKIP + + """ + + # Give obj .inputs and .outputs + input_spec = DataSinkInputSpec + output_spec = DataSinkOutputSpec + + # Initialization method to set up datasink + def __init__(self, infields=None, force_run=True, **kwargs): + """ + Parameters + ---------- + infields : list of str + Indicates the input fields to be dynamically created + """ + + super(DataSink, self).__init__(**kwargs) + undefined_traits = {} + # used for mandatory inputs check + self._infields = infields + if infields: + for key in infields: + self.inputs.add_trait(key, traits.Any) + self.inputs._outputs[key] = Undefined + undefined_traits[key] = Undefined + self.inputs.trait_set(trait_change_notify=False, **undefined_traits) + if force_run: + self._always_run = True + + # Get destination paths + def _get_dst(self, src): + # If path is directory with trailing os.path.sep, + # then remove that for a more robust behavior + src = src.rstrip(os.path.sep) + path, fname = os.path.split(src) + if self.inputs.parameterization: + dst = path + if isdefined(self.inputs.strip_dir): + dst = dst.replace(self.inputs.strip_dir, '') + folders = [ + folder for folder in dst.split(os.path.sep) + if folder.startswith('_') + ] + dst = os.path.sep.join(folders) + if fname: + dst = os.path.join(dst, fname) + else: + if fname: + dst = fname + else: + dst = path.split(os.path.sep)[-1] + if dst[0] == os.path.sep: + dst = dst[1:] + return dst + + # Substitute paths in substitutions dictionary parameter + def _substitute(self, pathstr): + pathstr_ = pathstr + if isdefined(self.inputs.substitutions): + for key, val in self.inputs.substitutions: + oldpathstr = pathstr + pathstr = pathstr.replace(key, val) + if pathstr != oldpathstr: + iflogger.debug('sub.str: %s -> %s using %r -> %r', + oldpathstr, pathstr, key, val) + if isdefined(self.inputs.regexp_substitutions): + for key, val in self.inputs.regexp_substitutions: + oldpathstr = pathstr + pathstr, _ = re.subn(key, val, pathstr) + if pathstr != oldpathstr: + iflogger.debug('sub.regexp: %s -> %s using %r -> %r', + oldpathstr, pathstr, key, val) + if pathstr_ != pathstr: + iflogger.info('sub: %s -> %s', pathstr_, pathstr) + return pathstr + + # Check for s3 in base directory + def _check_s3_base_dir(self): + ''' + Method to see if the datasink's base directory specifies an + S3 bucket path; if it does, it parses the path for the bucket + name in the form 's3://bucket_name/...' and returns it + + Parameters + ---------- + + Returns + ------- + s3_flag : boolean + flag indicating whether the base_directory contained an + S3 bucket path + bucket_name : string + name of the S3 bucket to connect to; if the base directory + is not a valid S3 path, defaults to '' + ''' + + # Init variables + s3_str = 's3://' + bucket_name = '' + base_directory = self.inputs.base_directory + + if not isdefined(base_directory): + s3_flag = False + return s3_flag, bucket_name + + # Explicitly lower-case the "s3" + if base_directory.lower().startswith(s3_str): + base_dir_sp = base_directory.split('/') + base_dir_sp[0] = base_dir_sp[0].lower() + base_directory = '/'.join(base_dir_sp) + + # Check if 's3://' in base dir + if base_directory.startswith(s3_str): + # Expects bucket name to be 's3://bucket_name/base_dir/..' + bucket_name = base_directory.split(s3_str)[1].split('/')[0] + s3_flag = True + # Otherwise it's just a normal datasink + else: + s3_flag = False + + # Return s3_flag + return s3_flag, bucket_name + + # Function to return AWS secure environment variables + def _return_aws_keys(self): + ''' + Method to return AWS access key id and secret access key using + credentials found in a local file. + + Parameters + ---------- + self : nipype.interfaces.io.DataSink + self for instance method + + Returns + ------- + aws_access_key_id : string + string of the AWS access key ID + aws_secret_access_key : string + string of the AWS secret access key + ''' + + # Import packages + import os + + # Init variables + creds_path = self.inputs.creds_path + + # Check if creds exist + if creds_path and os.path.exists(creds_path): + with open(creds_path, 'r') as creds_in: + # Grab csv rows + row1 = creds_in.readline() + row2 = creds_in.readline() + + # Are they root or user keys + if 'User Name' in row1: + # And split out for keys + aws_access_key_id = row2.split(',')[1] + aws_secret_access_key = row2.split(',')[2] + elif 'AWSAccessKeyId' in row1: + # And split out for keys + aws_access_key_id = row1.split('=')[1] + aws_secret_access_key = row2.split('=')[1] + else: + err_msg = 'Credentials file not recognized, check file is correct' + raise Exception(err_msg) + + # Strip any carriage return/line feeds + aws_access_key_id = aws_access_key_id.replace('\r', '').replace( + '\n', '') + aws_secret_access_key = aws_secret_access_key.replace('\r', + '').replace( + '\n', '') + else: + aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID') + aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY') + + # Return keys + return aws_access_key_id, aws_secret_access_key + + # Fetch bucket object + def _fetch_bucket(self, bucket_name): + ''' + Method to return a bucket object which can be used to interact + with an AWS S3 bucket using credentials found in a local file. + + Parameters + ---------- + self : nipype.interfaces.io.DataSink + self for instance method + bucket_name : string + string corresponding to the name of the bucket on S3 + + Returns + ------- + bucket : boto3.resources.factory.s3.Bucket + boto3 s3 Bucket object which is used to interact with files + in an S3 bucket on AWS + ''' + + # Import packages + import logging + + try: + import boto3 + import botocore + except ImportError as exc: + err_msg = 'Boto3 package is not installed - install boto3 and '\ + 'try again.' + raise Exception(err_msg) + + # Init variables + creds_path = self.inputs.creds_path + + # Get AWS credentials + try: + aws_access_key_id, aws_secret_access_key = \ + self._return_aws_keys() + except Exception as exc: + err_msg = 'There was a problem extracting the AWS credentials '\ + 'from the credentials file provided: %s. Error:\n%s'\ + % (creds_path, exc) + raise Exception(err_msg) + + # Try and get AWS credentials if a creds_path is specified + if aws_access_key_id and aws_secret_access_key: + # Init connection + iflogger.info('Connecting to S3 bucket: %s with credentials...', + bucket_name) + # Use individual session for each instance of DataSink + # Better when datasinks are being used in multi-threading, see: + # http://boto3.readthedocs.org/en/latest/guide/resources.html#multithreading + session = boto3.session.Session( + aws_access_key_id=aws_access_key_id, + aws_secret_access_key=aws_secret_access_key) + + else: + iflogger.info('Connecting to S3 bucket: %s with IAM role...', + bucket_name) + + # Lean on AWS environment / IAM role authentication and authorization + session = boto3.session.Session() + + s3_resource = session.resource('s3', use_ssl=True) + + # And try fetch the bucket with the name argument + try: + _get_head_bucket(s3_resource, bucket_name) + except Exception as exc: + + # Try to connect anonymously + s3_resource.meta.client.meta.events.register( + 'choose-signer.s3.*', botocore.handlers.disable_signing) + + iflogger.info('Connecting to AWS: %s anonymously...', bucket_name) + _get_head_bucket(s3_resource, bucket_name) + + # Explicitly declare a secure SSL connection for bucket object + bucket = s3_resource.Bucket(bucket_name) + + # Return the bucket + return bucket + + + # Send up to S3 method + def _upload_to_s3(self, bucket, src, dst): + ''' + Method to upload outputs to S3 bucket instead of on local disk + ''' + + # Import packages + import hashlib + import logging + import os + + from botocore.exceptions import ClientError + + # Init variables + s3_str = 's3://' + s3_prefix = s3_str + bucket.name + + # Explicitly lower-case the "s3" + if dst[:len(s3_str)].lower() == s3_str: + dst = s3_str + dst[len(s3_str):] + + # If src is a directory, collect files (this assumes dst is a dir too) + if os.path.isdir(src): + src_files = [] + for root, dirs, files in os.walk(src): + src_files.extend([os.path.join(root, fil) for fil in files]) + # Make the dst files have the dst folder as base dir + dst_files = [ + os.path.join(dst, + src_f.split(src)[1]) for src_f in src_files + ] + else: + src_files = [src] + dst_files = [dst] + + # Iterate over src and copy to dst + for src_idx, src_f in enumerate(src_files): + # Get destination filename/keyname + dst_f = dst_files[src_idx] + dst_k = dst_f.replace(s3_prefix, '').lstrip('/') + + # See if same file is already up there + try: + dst_obj = bucket.Object(key=dst_k) + dst_md5 = dst_obj.e_tag.strip('"') + + # See if same file is already there + src_read = open(src_f, 'rb').read() + src_md5 = hashlib.md5(src_read).hexdigest() + # Move to next loop iteration + if dst_md5 == src_md5: + iflogger.info('File %s already exists on S3, skipping...', + dst_f) + continue + else: + iflogger.info('Overwriting previous S3 file...') + + except ClientError: + iflogger.info('New file to S3') + + # Copy file up to S3 (either encrypted or not) + iflogger.info('Uploading %s to S3 bucket, %s, as %s...', src_f, + bucket.name, dst_f) + if self.inputs.encrypt_bucket_keys: + extra_args = {'ServerSideEncryption': 'AES256'} + else: + extra_args = {} + bucket.upload_file( + src_f, + dst_k, + ExtraArgs=extra_args, + Callback=ProgressPercentage(src_f)) + + # List outputs, main run routine + def _list_outputs(self): + """Execute this module. + """ + + # Init variables + outputs = self.output_spec().get() + out_files = [] + # Use hardlink + use_hardlink = str2bool( + config.get('execution', 'try_hard_link_datasink')) + + # Set local output directory if specified + if isdefined(self.inputs.local_copy): + outdir = self.inputs.local_copy + else: + outdir = self.inputs.base_directory + # If base directory isn't given, assume current directory + if not isdefined(outdir): + outdir = '.' + + # Check if base directory reflects S3 bucket upload + s3_flag, bucket_name = self._check_s3_base_dir() + if s3_flag: + s3dir = self.inputs.base_directory + # If user overrides bucket object, use that + if self.inputs.bucket: + bucket = self.inputs.bucket + # Otherwise fetch bucket object using name + else: + try: + bucket = self._fetch_bucket(bucket_name) + # If encountering an exception during bucket access, set output + # base directory to a local folder + except Exception as exc: + s3dir = '' + if not isdefined(self.inputs.local_copy): + local_out_exception = os.path.join( + os.path.expanduser('~'), + 's3_datasink_' + bucket_name) + outdir = local_out_exception + # Log local copying directory + iflogger.info( + 'Access to S3 failed! Storing outputs locally at: ' + '%s\nError: %s', outdir, exc) + else: + s3dir = '' + + # If container input is given, append that to outdir + if isdefined(self.inputs.container): + outdir = os.path.join(outdir, self.inputs.container) + s3dir = os.path.join(s3dir, self.inputs.container) + + # If sinking to local folder + if outdir != s3dir: + outdir = os.path.abspath(outdir) + # Create the directory if it doesn't exist + if not os.path.exists(outdir): + try: + os.makedirs(outdir) + except OSError as inst: + if 'File exists' in inst.strerror: + pass + else: + raise (inst) + + # Iterate through outputs attributes {key : path(s)} + for key, files in list(self.inputs._outputs.items()): + if not isdefined(files): + continue + iflogger.debug("key: %s files: %s", key, str(files)) + files = ensure_list(files) + tempoutdir = outdir + if s3_flag: + s3tempoutdir = s3dir + for d in key.split('.'): + if d[0] == '@': + continue + tempoutdir = os.path.join(tempoutdir, d) + if s3_flag: + s3tempoutdir = os.path.join(s3tempoutdir, d) + + # flattening list + if isinstance(files, list): + if isinstance(files[0], list): + files = [item for sublist in files for item in sublist] + + # Iterate through passed-in source files + for src in ensure_list(files): + # Format src and dst files + src = os.path.abspath(src) + if not os.path.isfile(src): + src = os.path.join(src, '') + dst = self._get_dst(src) + if s3_flag: + s3dst = os.path.join(s3tempoutdir, dst) + s3dst = self._substitute(s3dst) + dst = os.path.join(tempoutdir, dst) + dst = self._substitute(dst) + path, _ = os.path.split(dst) + + # If we're uploading to S3 + if s3_flag: + self._upload_to_s3(bucket, src, s3dst) + out_files.append(s3dst) + # Otherwise, copy locally src -> dst + if not s3_flag or isdefined(self.inputs.local_copy): + # Create output directory if it doesnt exist + if not os.path.exists(path): + try: + os.makedirs(path) + except OSError as inst: + if 'File exists' in inst.strerror: + pass + else: + raise (inst) + # If src is a file, copy it to dst + if os.path.isfile(src): + iflogger.debug('copyfile: %s %s', src, dst) + copyfile( + src, + dst, + copy=True, + hashmethod='content', + use_hardlink=use_hardlink) + out_files.append(dst) + # If src is a directory, copy entire contents to dst dir + elif os.path.isdir(src): + if os.path.exists(dst) and self.inputs.remove_dest_dir: + iflogger.debug('removing: %s', dst) + shutil.rmtree(dst) + iflogger.debug('copydir: %s %s', src, dst) + copytree(src, dst) + out_files.append(dst) + + # Return outputs dictionary + outputs['out_file'] = out_files + + return outputs + + +class S3DataGrabberInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + anon = traits.Bool( + False, + usedefault=True, + desc= + 'Use anonymous connection to s3. If this is set to True, boto may print' + + + ' a urlopen error, but this does not prevent data from being downloaded.' + ) + region = Str('us-east-1', usedefault=True, desc='Region of s3 bucket') + bucket = Str( + mandatory=True, desc='Amazon S3 bucket where your data is stored') + bucket_path = Str( + '', + usedefault=True, + desc='Location within your bucket for subject data.') + local_directory = Directory( + exists=True, + desc='Path to the local directory for subject data to be downloaded ' + 'and accessed. Should be on HDFS for Spark jobs.') + raise_on_empty = traits.Bool( + True, + usedefault=True, + desc='Generate exception if list is empty for a given field') + sort_filelist = traits.Bool( + mandatory=True, desc='Sort the filelist that matches the template') + template = Str( + mandatory=True, + desc='Layout used to get files. Relative to bucket_path if defined.' + 'Uses regex rather than glob style formatting.') + template_args = traits.Dict( + key_trait=Str, + value_trait=traits.List(traits.List), + desc='Information to plug into template') + + +class S3DataGrabber(IOBase): + """ Generic datagrabber module that wraps around glob in an + intelligent way for neuroimaging tasks to grab files from + Amazon S3 + + Works exactly like DataGrabber, except, you must specify an + S3 "bucket" and "bucket_path" to search for your data and a + "local_directory" to store the data. "local_directory" + should be a location on HDFS for Spark jobs. Additionally, + "template" uses regex style formatting, rather than the + glob-style found in the original DataGrabber. + + """ + input_spec = S3DataGrabberInputSpec + output_spec = DynamicTraitedSpec + _always_run = True + + def __init__(self, infields=None, outfields=None, **kwargs): + """ + Parameters + ---------- + infields : list of str + Indicates the input fields to be dynamically created + + outfields: list of str + Indicates output fields to be dynamically created + + See class examples for usage + + """ + if not outfields: + outfields = ['outfiles'] + super(S3DataGrabber, self).__init__(**kwargs) + undefined_traits = {} + # used for mandatory inputs check + self._infields = infields + self._outfields = outfields + if infields: + for key in infields: + self.inputs.add_trait(key, traits.Any) + undefined_traits[key] = Undefined + # add ability to insert field specific templates + self.inputs.add_trait('field_template', + traits.Dict( + traits.Enum(outfields), + desc="arguments that fit into template")) + undefined_traits['field_template'] = Undefined + if not isdefined(self.inputs.template_args): + self.inputs.template_args = {} + for key in outfields: + if key not in self.inputs.template_args: + if infields: + self.inputs.template_args[key] = [infields] + else: + self.inputs.template_args[key] = [] + + self.inputs.trait_set(trait_change_notify=False, **undefined_traits) + + def _add_output_traits(self, base): + """ + S3 specific: Downloads relevant files to a local folder specified + + Using traits.Any instead out OutputMultiPath till add_trait bug + is fixed. + """ + return add_traits(base, list(self.inputs.template_args.keys())) + + def _list_outputs(self): + # infields are mandatory, however I could not figure out how to set 'mandatory' flag dynamically + # hence manual check + if self._infields: + for key in self._infields: + value = getattr(self.inputs, key) + if not isdefined(value): + msg = "%s requires a value for input '%s' because it was listed in 'infields'" % \ + (self.__class__.__name__, key) + raise ValueError(msg) + + outputs = {} + # get list of all files in s3 bucket + conn = boto.connect_s3(anon=self.inputs.anon) + bkt = conn.get_bucket(self.inputs.bucket) + bkt_files = list( + k.key for k in bkt.list(prefix=self.inputs.bucket_path)) + + # keys are outfields, args are template args for the outfield + for key, args in list(self.inputs.template_args.items()): + outputs[key] = [] + template = self.inputs.template + if hasattr(self.inputs, 'field_template') and \ + isdefined(self.inputs.field_template) and \ + key in self.inputs.field_template: + template = self.inputs.field_template[ + key] # template override for multiple outfields + if isdefined(self.inputs.bucket_path): + template = os.path.join(self.inputs.bucket_path, template) + if not args: + filelist = [] + for fname in bkt_files: + if re.match(template, fname): + filelist.append(fname) + if len(filelist) == 0: + msg = 'Output key: %s Template: %s returned no files' % ( + key, template) + if self.inputs.raise_on_empty: + raise IOError(msg) + else: + warn(msg) + else: + if self.inputs.sort_filelist: + filelist = human_order_sorted(filelist) + outputs[key] = simplify_list(filelist) + for argnum, arglist in enumerate(args): + maxlen = 1 + for arg in arglist: + if isinstance(arg, + (str, bytes)) and hasattr(self.inputs, arg): + arg = getattr(self.inputs, arg) + if isinstance(arg, list): + if (maxlen > 1) and (len(arg) != maxlen): + raise ValueError( + 'incompatible number of arguments for %s' % + key) + if len(arg) > maxlen: + maxlen = len(arg) + outfiles = [] + for i in range(maxlen): + argtuple = [] + for arg in arglist: + if isinstance(arg, (str, bytes)) and hasattr( + self.inputs, arg): + arg = getattr(self.inputs, arg) + if isinstance(arg, list): + argtuple.append(arg[i]) + else: + argtuple.append(arg) + filledtemplate = template + if argtuple: + try: + filledtemplate = template % tuple(argtuple) + except TypeError as e: + raise TypeError( + e.message + + ": Template %s failed to convert with args %s" + % (template, str(tuple(argtuple)))) + outfiles = [] + for fname in bkt_files: + if re.match(filledtemplate, fname): + outfiles.append(fname) + if len(outfiles) == 0: + msg = 'Output key: %s Template: %s returned no files' % ( + key, filledtemplate) + if self.inputs.raise_on_empty: + raise IOError(msg) + else: + warn(msg) + outputs[key].append(None) + else: + if self.inputs.sort_filelist: + outfiles = human_order_sorted(outfiles) + outputs[key].append(simplify_list(outfiles)) + if any([val is None for val in outputs[key]]): + outputs[key] = [] + if len(outputs[key]) == 0: + outputs[key] = None + elif len(outputs[key]) == 1: + outputs[key] = outputs[key][0] + # Outputs are currently stored as locations on S3. + # We must convert to the local location specified + # and download the files. + for key, val in outputs.items(): + # This will basically be either list-like or string-like: + # if it's an instance of a list, we'll iterate through it. + # If it isn't, it's string-like (string, unicode), we + # convert that value directly. + if isinstance(val, (list, tuple, set)): + for i, path in enumerate(val): + outputs[key][i] = self.s3tolocal(path, bkt) + else: + outputs[key] = self.s3tolocal(val, bkt) + + return outputs + + # Takes an s3 address and downloads the file to a local + # directory, returning the local path. + def s3tolocal(self, s3path, bkt): + # path formatting + if not os.path.split(self.inputs.local_directory)[1] == '': + self.inputs.local_directory += '/' + if not os.path.split(self.inputs.bucket_path)[1] == '': + self.inputs.bucket_path += '/' + if self.inputs.template[0] == '/': + self.inputs.template = self.inputs.template[1:] + + localpath = s3path.replace(self.inputs.bucket_path, + self.inputs.local_directory) + localdir = os.path.split(localpath)[0] + if not os.path.exists(localdir): + os.makedirs(localdir) + k = boto.s3.key.Key(bkt) + k.key = s3path + k.get_contents_to_filename(localpath) + return localpath + + +class DataGrabberInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + base_directory = Directory( + exists=True, + desc='Path to the base directory consisting of subject data.') + raise_on_empty = traits.Bool( + True, + usedefault=True, + desc='Generate exception if list is empty for a given field') + drop_blank_outputs = traits.Bool( + False, usedefault=True, + desc="Remove ``None`` entries from output lists" + ) + sort_filelist = traits.Bool( + mandatory=True, desc='Sort the filelist that matches the template') + template = Str( + mandatory=True, + desc='Layout used to get files. relative to base directory if defined') + template_args = traits.Dict( + key_trait=Str, + value_trait=traits.List(traits.List), + desc='Information to plug into template') + + +class DataGrabber(IOBase): + """ Generic datagrabber module that wraps around glob in an + intelligent way for neuroimaging tasks to grab files + + + .. attention:: + + Doesn't support directories currently + + Examples + -------- + + >>> from nipype.interfaces.io import DataGrabber + + Pick all files from current directory + + >>> dg = DataGrabber() + >>> dg.inputs.template = '*' + + Pick file foo/foo.nii from current directory + + >>> dg.inputs.template = '%s/%s.dcm' + >>> dg.inputs.template_args['outfiles']=[['dicomdir','123456-1-1.dcm']] + + Same thing but with dynamically created fields + + >>> dg = DataGrabber(infields=['arg1','arg2']) + >>> dg.inputs.template = '%s/%s.nii' + >>> dg.inputs.arg1 = 'foo' + >>> dg.inputs.arg2 = 'foo' + + however this latter form can be used with iterables and iterfield in a + pipeline. + + Dynamically created, user-defined input and output fields + + >>> dg = DataGrabber(infields=['sid'], outfields=['func','struct','ref']) + >>> dg.inputs.base_directory = '.' + >>> dg.inputs.template = '%s/%s.nii' + >>> dg.inputs.template_args['func'] = [['sid',['f3','f5']]] + >>> dg.inputs.template_args['struct'] = [['sid',['struct']]] + >>> dg.inputs.template_args['ref'] = [['sid','ref']] + >>> dg.inputs.sid = 's1' + + Change the template only for output field struct. The rest use the + general template + + >>> dg.inputs.field_template = dict(struct='%s/struct.nii') + >>> dg.inputs.template_args['struct'] = [['sid']] + + """ + input_spec = DataGrabberInputSpec + output_spec = DynamicTraitedSpec + _always_run = True + + def __init__(self, infields=None, outfields=None, **kwargs): + """ + Parameters + ---------- + infields : list of str + Indicates the input fields to be dynamically created + + outfields: list of str + Indicates output fields to be dynamically created + + See class examples for usage + + """ + if not outfields: + outfields = ['outfiles'] + super(DataGrabber, self).__init__(**kwargs) + undefined_traits = {} + # used for mandatory inputs check + self._infields = infields + self._outfields = outfields + if infields: + for key in infields: + self.inputs.add_trait(key, traits.Any) + undefined_traits[key] = Undefined + # add ability to insert field specific templates + self.inputs.add_trait('field_template', + traits.Dict( + traits.Enum(outfields), + desc="arguments that fit into template")) + undefined_traits['field_template'] = Undefined + if not isdefined(self.inputs.template_args): + self.inputs.template_args = {} + for key in outfields: + if key not in self.inputs.template_args: + if infields: + self.inputs.template_args[key] = [infields] + else: + self.inputs.template_args[key] = [] + + self.inputs.trait_set(trait_change_notify=False, **undefined_traits) + + def _add_output_traits(self, base): + """ + + Using traits.Any instead out OutputMultiPath till add_trait bug + is fixed. + """ + return add_traits(base, list(self.inputs.template_args.keys())) + + def _list_outputs(self): + # infields are mandatory, however I could not figure out how to set 'mandatory' flag dynamically + # hence manual check + if self._infields: + for key in self._infields: + value = getattr(self.inputs, key) + if not isdefined(value): + msg = "%s requires a value for input '%s' because it was listed in 'infields'" % \ + (self.__class__.__name__, key) + raise ValueError(msg) + + outputs = {} + for key, args in list(self.inputs.template_args.items()): + outputs[key] = [] + template = self.inputs.template + if hasattr(self.inputs, 'field_template') and \ + isdefined(self.inputs.field_template) and \ + key in self.inputs.field_template: + template = self.inputs.field_template[key] + if isdefined(self.inputs.base_directory): + template = os.path.join( + os.path.abspath(self.inputs.base_directory), template) + else: + template = os.path.abspath(template) + if not args: + filelist = glob.glob(template) + if len(filelist) == 0: + msg = 'Output key: %s Template: %s returned no files' % ( + key, template) + if self.inputs.raise_on_empty: + raise IOError(msg) + else: + warn(msg) + else: + if self.inputs.sort_filelist: + filelist = human_order_sorted(filelist) + outputs[key] = simplify_list(filelist) + for argnum, arglist in enumerate(args): + maxlen = 1 + for arg in arglist: + if isinstance(arg, + (str, bytes)) and hasattr(self.inputs, arg): + arg = getattr(self.inputs, arg) + if isinstance(arg, list): + if (maxlen > 1) and (len(arg) != maxlen): + raise ValueError( + 'incompatible number of arguments for %s' % + key) + if len(arg) > maxlen: + maxlen = len(arg) + outfiles = [] + for i in range(maxlen): + argtuple = [] + for arg in arglist: + if isinstance(arg, (str, bytes)) and hasattr( + self.inputs, arg): + arg = getattr(self.inputs, arg) + if isinstance(arg, list): + argtuple.append(arg[i]) + else: + argtuple.append(arg) + filledtemplate = template + if argtuple: + try: + filledtemplate = template % tuple(argtuple) + except TypeError as e: + raise TypeError( + e.message + + ": Template %s failed to convert with args %s" + % (template, str(tuple(argtuple)))) + outfiles = glob.glob(filledtemplate) + if len(outfiles) == 0: + msg = 'Output key: %s Template: %s returned no files' % ( + key, filledtemplate) + if self.inputs.raise_on_empty: + raise IOError(msg) + else: + warn(msg) + outputs[key].append(None) + else: + if self.inputs.sort_filelist: + outfiles = human_order_sorted(outfiles) + outputs[key].append(simplify_list(outfiles)) + if self.inputs.drop_blank_outputs: + outputs[key] = [x for x in outputs[key] if x is not None] + else: + if any([val is None for val in outputs[key]]): + outputs[key] = [] + if len(outputs[key]) == 0: + outputs[key] = None + elif len(outputs[key]) == 1: + outputs[key] = outputs[key][0] + return outputs + + +class SelectFilesInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + + base_directory = Directory( + exists=True, desc="Root path common to templates.") + sort_filelist = traits.Bool( + True, + usedefault=True, + desc="When matching mutliple files, return them" + " in sorted order.") + raise_on_empty = traits.Bool( + True, + usedefault=True, + desc="Raise an exception if a template pattern " + "matches no files.") + force_lists = traits.Either( + traits.Bool(), + traits.List(Str()), + default=False, + usedefault=True, + desc=("Whether to return outputs as a list even" + " when only one file matches the template. " + "Either a boolean that applies to all output " + "fields or a list of output field names to " + "coerce to a list")) + + +class SelectFiles(IOBase): + """Flexibly collect data from disk to feed into workflows. + + This interface uses the {}-based string formatting syntax to plug + values (possibly known only at workflow execution time) into string + templates and collect files from persistant storage. These templates + can also be combined with glob wildcards. The field names in the + formatting template (i.e. the terms in braces) will become inputs + fields on the interface, and the keys in the templates dictionary + will form the output fields. + + Examples + -------- + + >>> import pprint + >>> from nipype import SelectFiles, Node + >>> templates={"T1": "{subject_id}/struct/T1.nii", + ... "epi": "{subject_id}/func/f[0, 1].nii"} + >>> dg = Node(SelectFiles(templates), "selectfiles") + >>> dg.inputs.subject_id = "subj1" + >>> pprint.pprint(dg.outputs.get()) # doctest: + {'T1': , 'epi': } + + The same thing with dynamic grabbing of specific files: + + >>> templates["epi"] = "{subject_id}/func/f{run!s}.nii" + >>> dg = Node(SelectFiles(templates), "selectfiles") + >>> dg.inputs.subject_id = "subj1" + >>> dg.inputs.run = [2, 4] + + """ + input_spec = SelectFilesInputSpec + output_spec = DynamicTraitedSpec + _always_run = True + + def __init__(self, templates, **kwargs): + """Create an instance with specific input fields. + + Parameters + ---------- + templates : dictionary + Mapping from string keys to string template values. + The keys become output fields on the interface. + The templates should use {}-formatting syntax, where + the names in curly braces become inputs fields on the interface. + Format strings can also use glob wildcards to match multiple + files. At runtime, the values of the interface inputs will be + plugged into these templates, and the resulting strings will be + used to select files. + + """ + super(SelectFiles, self).__init__(**kwargs) + + # Infer the infields and outfields from the template + infields = [] + for name, template in list(templates.items()): + for _, field_name, _, _ in string.Formatter().parse(template): + if field_name is not None: + field_name = re.match("\w+", field_name).group() + if field_name not in infields: + infields.append(field_name) + + self._infields = infields + self._outfields = list(templates) + self._templates = templates + + # Add the dynamic input fields + undefined_traits = {} + for field in infields: + self.inputs.add_trait(field, traits.Any) + undefined_traits[field] = Undefined + self.inputs.trait_set(trait_change_notify=False, **undefined_traits) + + def _add_output_traits(self, base): + """Add the dynamic output fields""" + return add_traits(base, list(self._templates.keys())) + + def _list_outputs(self): + """Find the files and expose them as interface outputs.""" + outputs = {} + info = dict([(k, v) for k, v in list(self.inputs.__dict__.items()) + if k in self._infields]) + + force_lists = self.inputs.force_lists + if isinstance(force_lists, bool): + force_lists = self._outfields if force_lists else [] + bad_fields = set(force_lists) - set(self._outfields) + if bad_fields: + bad_fields = ", ".join(list(bad_fields)) + plural = "s" if len(bad_fields) > 1 else "" + verb = "were" if len(bad_fields) > 1 else "was" + msg = ("The field%s '%s' %s set in 'force_lists' and not in " + "'templates'.") % (plural, bad_fields, verb) + raise ValueError(msg) + + for field, template in list(self._templates.items()): + + find_dirs = template[-1] == os.sep + + # Build the full template path + if isdefined(self.inputs.base_directory): + template = op.abspath( + op.join(self.inputs.base_directory, template)) + else: + template = op.abspath(template) + + # re-add separator if searching exclusively for directories + if find_dirs: + template += os.sep + + # Fill in the template and glob for files + filled_template = template.format(**info) + filelist = glob.glob(filled_template) + + # Handle the case where nothing matched + if not filelist: + msg = "No files were found matching %s template: %s" % ( + field, filled_template) + if self.inputs.raise_on_empty: + raise IOError(msg) + else: + warn(msg) + + # Possibly sort the list + if self.inputs.sort_filelist: + filelist = human_order_sorted(filelist) + + # Handle whether this must be a list or not + if field not in force_lists: + filelist = simplify_list(filelist) + + outputs[field] = filelist + + return outputs + + +class DataFinderInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + root_paths = traits.Either( + traits.List(), + Str(), + mandatory=True, + ) + match_regex = Str( + '(.+)', + usedefault=True, + desc=("Regular expression for matching paths.")) + ignore_regexes = traits.List( + desc=("List of regular expressions, " + "if any match the path it will be " + "ignored.")) + max_depth = traits.Int(desc="The maximum depth to search beneath " + "the root_paths") + min_depth = traits.Int(desc="The minimum depth to search beneath " + "the root paths") + unpack_single = traits.Bool( + False, usedefault=True, desc="Unpack single results from list") + + +class DataFinder(IOBase): + """Search for paths that match a given regular expression. Allows a less + proscriptive approach to gathering input files compared to DataGrabber. + Will recursively search any subdirectories by default. This can be limited + with the min/max depth options. + Matched paths are available in the output 'out_paths'. Any named groups of + captured text from the regular expression are also available as ouputs of + the same name. + + Examples + -------- + + >>> from nipype.interfaces.io import DataFinder + >>> df = DataFinder() + >>> df.inputs.root_paths = '.' + >>> df.inputs.match_regex = '.+/(?P.+(qT1|ep2d_fid_T1).+)/(?P.+)\.nii.gz' + >>> result = df.run() # doctest: +SKIP + >>> result.outputs.out_paths # doctest: +SKIP + ['./027-ep2d_fid_T1_Gd4/acquisition.nii.gz', + './018-ep2d_fid_T1_Gd2/acquisition.nii.gz', + './016-ep2d_fid_T1_Gd1/acquisition.nii.gz', + './013-ep2d_fid_T1_pre/acquisition.nii.gz'] + >>> result.outputs.series_dir # doctest: +SKIP + ['027-ep2d_fid_T1_Gd4', + '018-ep2d_fid_T1_Gd2', + '016-ep2d_fid_T1_Gd1', + '013-ep2d_fid_T1_pre'] + >>> result.outputs.basename # doctest: +SKIP + ['acquisition', + 'acquisition' + 'acquisition', + 'acquisition'] + + """ + + input_spec = DataFinderInputSpec + output_spec = DynamicTraitedSpec + _always_run = True + + def _match_path(self, target_path): + # Check if we should ignore the path + for ignore_re in self.ignore_regexes: + if ignore_re.search(target_path): + return + # Check if we can match the path + match = self.match_regex.search(target_path) + if match is not None: + match_dict = match.groupdict() + if self.result is None: + self.result = {'out_paths': []} + for key in list(match_dict.keys()): + self.result[key] = [] + self.result['out_paths'].append(target_path) + for key, val in list(match_dict.items()): + self.result[key].append(val) + + def _run_interface(self, runtime): + # Prepare some of the inputs + if isinstance(self.inputs.root_paths, (str, bytes)): + self.inputs.root_paths = [self.inputs.root_paths] + self.match_regex = re.compile(self.inputs.match_regex) + if self.inputs.max_depth is Undefined: + max_depth = None + else: + max_depth = self.inputs.max_depth + if self.inputs.min_depth is Undefined: + min_depth = 0 + else: + min_depth = self.inputs.min_depth + if self.inputs.ignore_regexes is Undefined: + self.ignore_regexes = [] + else: + self.ignore_regexes = \ + [re.compile(regex) + for regex in self.inputs.ignore_regexes] + self.result = None + for root_path in self.inputs.root_paths: + # Handle tilda/env variables and remove extra seperators + root_path = os.path.normpath( + os.path.expandvars(os.path.expanduser(root_path))) + # Check if the root_path is a file + if os.path.isfile(root_path): + if min_depth == 0: + self._match_path(root_path) + continue + # Walk through directory structure checking paths + for curr_dir, sub_dirs, files in os.walk(root_path): + # Determine the current depth from the root_path + curr_depth = (curr_dir.count(os.sep) - root_path.count(os.sep)) + # If the max path depth has been reached, clear sub_dirs + # and files + if max_depth is not None and curr_depth >= max_depth: + sub_dirs[:] = [] + files = [] + # Test the path for the curr_dir and all files + if curr_depth >= min_depth: + self._match_path(curr_dir) + if curr_depth >= (min_depth - 1): + for infile in files: + full_path = os.path.join(curr_dir, infile) + self._match_path(full_path) + if (self.inputs.unpack_single and len(self.result['out_paths']) == 1): + for key, vals in list(self.result.items()): + self.result[key] = vals[0] + else: + # sort all keys acording to out_paths + for key in list(self.result.keys()): + if key == "out_paths": + continue + sort_tuples = human_order_sorted( + list(zip(self.result["out_paths"], self.result[key]))) + self.result[key] = [x for (_, x) in sort_tuples] + self.result["out_paths"] = human_order_sorted( + self.result["out_paths"]) + + if not self.result: + raise RuntimeError("Regular expression did not match any files!") + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs.update(self.result) + return outputs + + +class FSSourceInputSpec(BaseInterfaceInputSpec): + subjects_dir = Directory(exists=True, mandatory=True, + desc='Freesurfer subjects directory.') + subject_id = Str(mandatory=True, + desc='Subject name for whom to retrieve data') + hemi = traits.Enum('both', 'lh', 'rh', usedefault=True, + desc='Selects hemisphere specific outputs') + + +class FSSourceOutputSpec(TraitedSpec): + T1 = File( + exists=True, desc='Intensity normalized whole-head volume', loc='mri') + aseg = File( + exists=True, + loc='mri', + desc='Volumetric map of regions from automatic segmentation') + brain = File( + exists=True, desc='Intensity normalized brain-only volume', loc='mri') + brainmask = File( + exists=True, desc='Skull-stripped (brain-only) volume', loc='mri') + filled = File(exists=True, desc='Subcortical mass volume', loc='mri') + norm = File( + exists=True, desc='Normalized skull-stripped volume', loc='mri') + nu = File( + exists=True, + desc='Non-uniformity corrected whole-head volume', + loc='mri') + orig = File( + exists=True, + desc='Base image conformed to Freesurfer space', + loc='mri') + rawavg = File( + exists=True, desc='Volume formed by averaging input images', loc='mri') + ribbon = OutputMultiPath( + File(exists=True), + desc='Volumetric maps of cortical ribbons', + loc='mri', + altkey='*ribbon') + wm = File(exists=True, desc='Segmented white-matter volume', loc='mri') + wmparc = File( + exists=True, + loc='mri', + desc='Aparc parcellation projected into subcortical white matter') + curv = OutputMultiPath( + File(exists=True), desc='Maps of surface curvature', loc='surf') + avg_curv = OutputMultiPath( + File(exists=True), + desc='Average atlas curvature, sampled to subject', + loc='surf') + inflated = OutputMultiPath( + File(exists=True), desc='Inflated surface meshes', loc='surf') + pial = OutputMultiPath( + File(exists=True), + desc='Gray matter/pia mater surface meshes', + loc='surf') + area_pial = OutputMultiPath( + File(exists=True), + desc='Mean area of triangles each vertex on the pial surface is ' + 'associated with', + loc='surf', + altkey='area.pial') + curv_pial = OutputMultiPath( + File(exists=True), + desc='Curvature of pial surface', + loc='surf', + altkey='curv.pial') + smoothwm = OutputMultiPath( + File(exists=True), loc='surf', desc='Smoothed original surface meshes') + sphere = OutputMultiPath( + File(exists=True), desc='Spherical surface meshes', loc='surf') + sulc = OutputMultiPath( + File(exists=True), desc='Surface maps of sulcal depth', loc='surf') + thickness = OutputMultiPath( + File(exists=True), + loc='surf', + desc='Surface maps of cortical thickness') + volume = OutputMultiPath( + File(exists=True), desc='Surface maps of cortical volume', loc='surf') + white = OutputMultiPath( + File(exists=True), desc='White/gray matter surface meshes', loc='surf') + jacobian_white = OutputMultiPath( + File(exists=True), + desc='Distortion required to register to spherical atlas', + loc='surf') + graymid = OutputMultiPath( + File(exists=True), + desc='Graymid/midthickness surface meshes', + loc='surf', + altkey=['graymid', 'midthickness']) + label = OutputMultiPath( + File(exists=True), + desc='Volume and surface label files', + loc='label', + altkey='*label') + annot = OutputMultiPath( + File(exists=True), + desc='Surface annotation files', + loc='label', + altkey='*annot') + aparc_aseg = OutputMultiPath( + File(exists=True), + loc='mri', + altkey='aparc*aseg', + desc='Aparc parcellation projected into aseg volume') + sphere_reg = OutputMultiPath( + File(exists=True), + loc='surf', + altkey='sphere.reg', + desc='Spherical registration file') + aseg_stats = OutputMultiPath( + File(exists=True), + loc='stats', + altkey='aseg', + desc='Automated segmentation statistics file') + wmparc_stats = OutputMultiPath( + File(exists=True), + loc='stats', + altkey='wmparc', + desc='White matter parcellation statistics file') + aparc_stats = OutputMultiPath( + File(exists=True), + loc='stats', + altkey='aparc', + desc='Aparc parcellation statistics files') + BA_stats = OutputMultiPath( + File(exists=True), + loc='stats', + altkey='BA', + desc='Brodmann Area statistics files') + aparc_a2009s_stats = OutputMultiPath( + File(exists=True), + loc='stats', + altkey='aparc.a2009s', + desc='Aparc a2009s parcellation statistics files') + curv_stats = OutputMultiPath( + File(exists=True), + loc='stats', + altkey='curv', + desc='Curvature statistics files') + entorhinal_exvivo_stats = OutputMultiPath( + File(exists=True), + loc='stats', + altkey='entorhinal_exvivo', + desc='Entorhinal exvivo statistics files') + + +class FreeSurferSource(IOBase): + """Generates freesurfer subject info from their directories + + Examples + -------- + + >>> from nipype.interfaces.io import FreeSurferSource + >>> fs = FreeSurferSource() + >>> #fs.inputs.subjects_dir = '.' + >>> fs.inputs.subject_id = 'PWS04' + >>> res = fs.run() # doctest: +SKIP + + >>> fs.inputs.hemi = 'lh' + >>> res = fs.run() # doctest: +SKIP + + """ + input_spec = FSSourceInputSpec + output_spec = FSSourceOutputSpec + _always_run = True + _additional_metadata = ['loc', 'altkey'] + + def _get_files(self, path, key, dirval, altkey=None): + globsuffix = '' + if dirval == 'mri': + globsuffix = '.mgz' + elif dirval == 'stats': + globsuffix = '.stats' + globprefix = '' + if dirval in ('surf', 'label', 'stats'): + if self.inputs.hemi != 'both': + globprefix = self.inputs.hemi + '.' + else: + globprefix = '?h.' + if key in ('aseg_stats', 'wmparc_stats'): + globprefix = '' + elif key == 'ribbon': + if self.inputs.hemi != 'both': + globprefix = self.inputs.hemi + '.' + else: + globprefix = '*' + keys = ensure_list(altkey) if altkey else [key] + globfmt = os.path.join(path, dirval, ''.join((globprefix, '{}', + globsuffix))) + return [ + os.path.abspath(f) for key in keys + for f in glob.glob(globfmt.format(key)) + ] + + def _list_outputs(self): + subjects_dir = self.inputs.subjects_dir + subject_path = os.path.join(subjects_dir, self.inputs.subject_id) + output_traits = self._outputs() + outputs = output_traits.get() + for k in list(outputs.keys()): + val = self._get_files(subject_path, k, + output_traits.traits()[k].loc, + output_traits.traits()[k].altkey) + if val: + outputs[k] = simplify_list(val) + return outputs + + +class XNATSourceInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + + query_template = Str( + mandatory=True, + desc=('Layout used to get files. Relative to base ' + 'directory if defined')) + + query_template_args = traits.Dict( + Str, + traits.List(traits.List), + value=dict(outfiles=[]), + usedefault=True, + desc='Information to plug into template') + + server = Str(mandatory=True, requires=['user', 'pwd'], xor=['config']) + + user = Str() + pwd = traits.Password() + config = File(mandatory=True, xor=['server']) + + cache_dir = Directory(desc='Cache directory') + + +class XNATSource(IOBase): + """ Generic XNATSource module that wraps around the pyxnat module in + an intelligent way for neuroimaging tasks to grab files and data + from an XNAT server. + + Examples + -------- + + >>> from nipype.interfaces.io import XNATSource + + Pick all files from current directory + + >>> dg = XNATSource() + >>> dg.inputs.template = '*' + + >>> dg = XNATSource(infields=['project','subject','experiment','assessor','inout']) + >>> dg.inputs.query_template = '/projects/%s/subjects/%s/experiments/%s' \ + '/assessors/%s/%s_resources/files' + >>> dg.inputs.project = 'IMAGEN' + >>> dg.inputs.subject = 'IMAGEN_000000001274' + >>> dg.inputs.experiment = '*SessionA*' + >>> dg.inputs.assessor = '*ADNI_MPRAGE_nii' + >>> dg.inputs.inout = 'out' + + >>> dg = XNATSource(infields=['sid'],outfields=['struct','func']) + >>> dg.inputs.query_template = '/projects/IMAGEN/subjects/%s/experiments/*SessionA*' \ + '/assessors/*%s_nii/out_resources/files' + >>> dg.inputs.query_template_args['struct'] = [['sid','ADNI_MPRAGE']] + >>> dg.inputs.query_template_args['func'] = [['sid','EPI_faces']] + >>> dg.inputs.sid = 'IMAGEN_000000001274' + + + """ + input_spec = XNATSourceInputSpec + output_spec = DynamicTraitedSpec + + def __init__(self, infields=None, outfields=None, **kwargs): + """ + Parameters + ---------- + infields : list of str + Indicates the input fields to be dynamically created + + outfields: list of str + Indicates output fields to be dynamically created + + See class examples for usage + + """ + super(XNATSource, self).__init__(**kwargs) + undefined_traits = {} + # used for mandatory inputs check + self._infields = infields + if infields: + for key in infields: + self.inputs.add_trait(key, traits.Any) + undefined_traits[key] = Undefined + self.inputs.query_template_args['outfiles'] = [infields] + if outfields: + # add ability to insert field specific templates + self.inputs.add_trait( + 'field_template', + traits.Dict( + traits.Enum(outfields), + desc="arguments that fit into query_template")) + undefined_traits['field_template'] = Undefined + # self.inputs.remove_trait('query_template_args') + outdict = {} + for key in outfields: + outdict[key] = [] + self.inputs.query_template_args = outdict + self.inputs.trait_set(trait_change_notify=False, **undefined_traits) + + def _add_output_traits(self, base): + """ + + Using traits.Any instead out OutputMultiPath till add_trait bug + is fixed. + """ + return add_traits(base, list(self.inputs.query_template_args.keys())) + + def _list_outputs(self): + # infields are mandatory, however I could not figure out + # how to set 'mandatory' flag dynamically, hence manual check + + cache_dir = self.inputs.cache_dir or tempfile.gettempdir() + + if self.inputs.config: + xnat = pyxnat.Interface(config=self.inputs.config) + else: + xnat = pyxnat.Interface(self.inputs.server, self.inputs.user, + self.inputs.pwd, cache_dir) + + if self._infields: + for key in self._infields: + value = getattr(self.inputs, key) + if not isdefined(value): + msg = ("%s requires a value for input '%s' " + "because it was listed in 'infields'" % + (self.__class__.__name__, key)) + raise ValueError(msg) + + outputs = {} + for key, args in list(self.inputs.query_template_args.items()): + outputs[key] = [] + template = self.inputs.query_template + if hasattr(self.inputs, 'field_template') and \ + isdefined(self.inputs.field_template) and \ + key in self.inputs.field_template: + template = self.inputs.field_template[key] + if not args: + file_objects = xnat.select(template).get('obj') + if file_objects == []: + raise IOError('Template %s returned no files' % template) + outputs[key] = simplify_list([ + str(file_object.get()) for file_object in file_objects + if file_object.exists() + ]) + for argnum, arglist in enumerate(args): + maxlen = 1 + for arg in arglist: + if isinstance(arg, + (str, bytes)) and hasattr(self.inputs, arg): + arg = getattr(self.inputs, arg) + if isinstance(arg, list): + if (maxlen > 1) and (len(arg) != maxlen): + raise ValueError('incompatible number ' + 'of arguments for %s' % key) + if len(arg) > maxlen: + maxlen = len(arg) + outfiles = [] + for i in range(maxlen): + argtuple = [] + for arg in arglist: + if isinstance(arg, (str, bytes)) and \ + hasattr(self.inputs, arg): + arg = getattr(self.inputs, arg) + if isinstance(arg, list): + argtuple.append(arg[i]) + else: + argtuple.append(arg) + if argtuple: + target = template % tuple(argtuple) + file_objects = xnat.select(target).get('obj') + + if file_objects == []: + raise IOError('Template %s ' + 'returned no files' % target) + + outfiles = simplify_list([ + str(file_object.get()) + for file_object in file_objects + if file_object.exists() + ]) + else: + file_objects = xnat.select(template).get('obj') + + if file_objects == []: + raise IOError('Template %s ' + 'returned no files' % template) + + outfiles = simplify_list([ + str(file_object.get()) + for file_object in file_objects + if file_object.exists() + ]) + + outputs[key].insert(i, outfiles) + if len(outputs[key]) == 0: + outputs[key] = None + elif len(outputs[key]) == 1: + outputs[key] = outputs[key][0] + return outputs + + +class XNATSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + + _outputs = traits.Dict(Str, value={}, usedefault=True) + + server = Str(mandatory=True, requires=['user', 'pwd'], xor=['config']) + + user = Str() + pwd = traits.Password() + config = File(mandatory=True, xor=['server']) + cache_dir = Directory(desc='') + + project_id = Str( + desc='Project in which to store the outputs', mandatory=True) + + subject_id = Str(desc='Set to subject id', mandatory=True) + + experiment_id = Str(desc='Set to workflow name', mandatory=True) + + assessor_id = Str( + desc=('Option to customize ouputs representation in XNAT - ' + 'assessor level will be used with specified id'), + xor=['reconstruction_id']) + + reconstruction_id = Str( + desc=('Option to customize ouputs representation in XNAT - ' + 'reconstruction level will be used with specified id'), + xor=['assessor_id']) + + share = traits.Bool( + False, + desc=('Option to share the subjects from the original project' + 'instead of creating new ones when possible - the created ' + 'experiments are then shared back to the original project'), + usedefault=True) + + def __setattr__(self, key, value): + if key not in self.copyable_trait_names(): + self._outputs[key] = value + else: + super(XNATSinkInputSpec, self).__setattr__(key, value) + + +class XNATSink(IOBase): + """ Generic datasink module that takes a directory containing a + list of nifti files and provides a set of structured output + fields. + """ + input_spec = XNATSinkInputSpec + + def _list_outputs(self): + """Execute this module. + """ + + # setup XNAT connection + cache_dir = self.inputs.cache_dir or tempfile.gettempdir() + + if self.inputs.config: + xnat = pyxnat.Interface(config=self.inputs.config) + else: + xnat = pyxnat.Interface(self.inputs.server, self.inputs.user, + self.inputs.pwd, cache_dir) + + # if possible share the subject from the original project + if self.inputs.share: + subject_id = self.inputs.subject_id + result = xnat.select( + 'xnat:subjectData', + ['xnat:subjectData/PROJECT', 'xnat:subjectData/SUBJECT_ID' + ]).where('xnat:subjectData/SUBJECT_ID = %s AND' % subject_id) + + # subject containing raw data exists on the server + if (result.data and isinstance(result.data[0], dict)): + result = result.data[0] + shared = xnat.select('/project/%s/subject/%s' % + (self.inputs.project_id, + self.inputs.subject_id)) + + if not shared.exists(): # subject not in share project + + share_project = xnat.select( + '/project/%s' % self.inputs.project_id) + + if not share_project.exists(): # check project exists + share_project.insert() + + subject = xnat.select('/project/%(project)s' + '/subject/%(subject_id)s' % result) + + subject.share(str(self.inputs.project_id)) + + # setup XNAT resource + uri_template_args = dict( + project_id=quote_id(self.inputs.project_id), + subject_id=self.inputs.subject_id, + experiment_id=quote_id(self.inputs.experiment_id)) + + if self.inputs.share: + uri_template_args['original_project'] = result['project'] + + if self.inputs.assessor_id: + uri_template_args['assessor_id'] = quote_id( + self.inputs.assessor_id) + elif self.inputs.reconstruction_id: + uri_template_args['reconstruction_id'] = quote_id( + self.inputs.reconstruction_id) + + # gather outputs and upload them + for key, files in list(self.inputs._outputs.items()): + + for name in ensure_list(files): + + if isinstance(name, list): + for i, file_name in enumerate(name): + push_file(self, xnat, file_name, '%s_' % i + key, + uri_template_args) + else: + push_file(self, xnat, name, key, uri_template_args) + + +def quote_id(string): + return str(string).replace('_', '---') + + +def unquote_id(string): + return str(string).replace('---', '_') + + +def push_file(self, xnat, file_name, out_key, uri_template_args): + + # grab info from output file names + val_list = [ + unquote_id(val) for part in os.path.split(file_name)[0].split(os.sep) + for val in part.split('_')[1:] + if part.startswith('_') and len(part.split('_')) % 2 + ] + + keymap = dict(list(zip(val_list[1::2], val_list[2::2]))) + + _label = [] + for key, val in sorted(keymap.items()): + if str(self.inputs.subject_id) not in val: + _label.extend([key, val]) + + # select and define container level + uri_template_args['container_type'] = None + + for container in ['assessor_id', 'reconstruction_id']: + if getattr(self.inputs, container): + uri_template_args['container_type'] = container.split('_id')[0] + uri_template_args['container_id'] = uri_template_args[container] + + if uri_template_args['container_type'] is None: + uri_template_args['container_type'] = 'reconstruction' + + uri_template_args['container_id'] = unquote_id( + uri_template_args['experiment_id']) + + if _label: + uri_template_args['container_id'] += ( + '_results_%s' % '_'.join(_label)) + else: + uri_template_args['container_id'] += '_results' + + # define resource level + uri_template_args['resource_label'] = ('%s_%s' % + (uri_template_args['container_id'], + out_key.split('.')[0])) + + # define file level + uri_template_args['file_name'] = os.path.split( + os.path.abspath(unquote_id(file_name)))[1] + + uri_template = ( + '/project/%(project_id)s/subject/%(subject_id)s' + '/experiment/%(experiment_id)s/%(container_type)s/%(container_id)s' + '/out/resource/%(resource_label)s/file/%(file_name)s') + + # unquote values before uploading + for key in list(uri_template_args.keys()): + uri_template_args[key] = unquote_id(uri_template_args[key]) + + # upload file + remote_file = xnat.select(uri_template % uri_template_args) + remote_file.insert( + file_name, experiments='xnat:imageSessionData', use_label=True) + + # shares the experiment back to the original project if relevant + if 'original_project' in uri_template_args: + + experiment_template = ( + '/project/%(original_project)s' + '/subject/%(subject_id)s/experiment/%(experiment_id)s') + + xnat.select(experiment_template % uri_template_args).share( + uri_template_args['original_project']) + + +def capture_provenance(): + pass + + +def push_provenance(): + pass + + +class SQLiteSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + database_file = File(exists=True, mandatory=True) + table_name = Str(mandatory=True) + + +class SQLiteSink(IOBase): + """ Very simple frontend for storing values into SQLite database. + + .. warning:: + + This is not a thread-safe node because it can write to a common + shared location. It will not complain when it overwrites a file. + + Examples + -------- + + >>> sql = SQLiteSink(input_names=['subject_id', 'some_measurement']) + >>> sql.inputs.database_file = 'my_database.db' + >>> sql.inputs.table_name = 'experiment_results' + >>> sql.inputs.subject_id = 's1' + >>> sql.inputs.some_measurement = 11.4 + >>> sql.run() # doctest: +SKIP + + """ + input_spec = SQLiteSinkInputSpec + + def __init__(self, input_names, **inputs): + + super(SQLiteSink, self).__init__(**inputs) + + self._input_names = ensure_list(input_names) + add_traits(self.inputs, [name for name in self._input_names]) + + def _list_outputs(self): + """Execute this module. + """ + conn = sqlite3.connect( + self.inputs.database_file, check_same_thread=False) + c = conn.cursor() + c.execute("INSERT OR REPLACE INTO %s (" % self.inputs.table_name + + ",".join(self._input_names) + ") VALUES (" + + ",".join(["?"] * len(self._input_names)) + ")", + [getattr(self.inputs, name) for name in self._input_names]) + conn.commit() + c.close() + return None + + +class MySQLSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + host = Str( + 'localhost', + mandatory=True, + requires=['username', 'password'], + xor=['config'], + usedefault=True) + config = File( + mandatory=True, + xor=['host'], + desc="MySQL Options File (same format as my.cnf)") + database_name = Str( + mandatory=True, desc='Otherwise known as the schema name') + table_name = Str(mandatory=True) + username = Str() + password = Str() + + +class MySQLSink(IOBase): + """ Very simple frontend for storing values into MySQL database. + + Examples + -------- + + >>> sql = MySQLSink(input_names=['subject_id', 'some_measurement']) + >>> sql.inputs.database_name = 'my_database' + >>> sql.inputs.table_name = 'experiment_results' + >>> sql.inputs.username = 'root' + >>> sql.inputs.password = 'secret' + >>> sql.inputs.subject_id = 's1' + >>> sql.inputs.some_measurement = 11.4 + >>> sql.run() # doctest: +SKIP + + """ + input_spec = MySQLSinkInputSpec + + def __init__(self, input_names, **inputs): + + super(MySQLSink, self).__init__(**inputs) + + self._input_names = ensure_list(input_names) + add_traits(self.inputs, [name for name in self._input_names]) + + def _list_outputs(self): + """Execute this module. + """ + import MySQLdb + if isdefined(self.inputs.config): + conn = MySQLdb.connect( + db=self.inputs.database_name, + read_default_file=self.inputs.config) + else: + conn = MySQLdb.connect( + host=self.inputs.host, + user=self.inputs.username, + passwd=self.inputs.password, + db=self.inputs.database_name) + c = conn.cursor() + c.execute("REPLACE INTO %s (" % self.inputs.table_name + + ",".join(self._input_names) + ") VALUES (" + + ",".join(["%s"] * len(self._input_names)) + ")", + [getattr(self.inputs, name) for name in self._input_names]) + conn.commit() + c.close() + return None + + +class SSHDataGrabberInputSpec(DataGrabberInputSpec): + hostname = Str(mandatory=True, desc='Server hostname.') + username = Str(desc='Server username.') + password = traits.Password(desc='Server password.') + download_files = traits.Bool( + True, + usedefault=True, + desc='If false it will return the file names without downloading them') + base_directory = Str( + mandatory=True, + desc='Path to the base directory consisting of subject data.') + template_expression = traits.Enum( + ['fnmatch', 'regexp'], + usedefault=True, + desc='Use either fnmatch or regexp to express templates') + ssh_log_to_file = Str( + '', + usedefault=True, + desc='If set SSH commands will be logged to the given file') + + +class SSHDataGrabber(DataGrabber): + """ Extension of DataGrabber module that downloads the file list and + optionally the files from a SSH server. The SSH operation must + not need user and password so an SSH agent must be active in + where this module is being run. + + + .. attention:: + + Doesn't support directories currently + + Examples + -------- + + >>> from nipype.interfaces.io import SSHDataGrabber + >>> dg = SSHDataGrabber() + >>> dg.inputs.hostname = 'test.rebex.net' + >>> dg.inputs.user = 'demo' + >>> dg.inputs.password = 'password' + >>> dg.inputs.base_directory = 'pub/example' + + Pick all files from the base directory + + >>> dg.inputs.template = '*' + + Pick all files starting with "s" and a number from current directory + + >>> dg.inputs.template_expression = 'regexp' + >>> dg.inputs.template = 'pop[0-9].*' + + Same thing but with dynamically created fields + + >>> dg = SSHDataGrabber(infields=['arg1','arg2']) + >>> dg.inputs.hostname = 'test.rebex.net' + >>> dg.inputs.user = 'demo' + >>> dg.inputs.password = 'password' + >>> dg.inputs.base_directory = 'pub' + >>> dg.inputs.template = '%s/%s.txt' + >>> dg.inputs.arg1 = 'example' + >>> dg.inputs.arg2 = 'foo' + + however this latter form can be used with iterables and iterfield in a + pipeline. + + Dynamically created, user-defined input and output fields + + >>> dg = SSHDataGrabber(infields=['sid'], outfields=['func','struct','ref']) + >>> dg.inputs.hostname = 'myhost.com' + >>> dg.inputs.base_directory = '/main_folder/my_remote_dir' + >>> dg.inputs.template_args['func'] = [['sid',['f3','f5']]] + >>> dg.inputs.template_args['struct'] = [['sid',['struct']]] + >>> dg.inputs.template_args['ref'] = [['sid','ref']] + >>> dg.inputs.sid = 's1' + + Change the template only for output field struct. The rest use the + general template + + >>> dg.inputs.field_template = dict(struct='%s/struct.nii') + >>> dg.inputs.template_args['struct'] = [['sid']] + + """ + input_spec = SSHDataGrabberInputSpec + output_spec = DynamicTraitedSpec + _always_run = False + + def __init__(self, infields=None, outfields=None, **kwargs): + """ + Parameters + ---------- + infields : list of str + Indicates the input fields to be dynamically created + + outfields: list of str + Indicates output fields to be dynamically created + + See class examples for usage + + """ + try: + paramiko + except NameError: + warn("The library paramiko needs to be installed" + " for this module to run.") + if not outfields: + outfields = ['outfiles'] + kwargs = kwargs.copy() + kwargs['infields'] = infields + kwargs['outfields'] = outfields + super(SSHDataGrabber, self).__init__(**kwargs) + if (None in (self.inputs.username, self.inputs.password)): + raise ValueError("either both username and password " + "are provided or none of them") + + if (self.inputs.template_expression == 'regexp' + and self.inputs.template[-1] != '$'): + self.inputs.template += '$' + + def _get_files_over_ssh(self, template): + """Get the files matching template over an SSH connection.""" + # Connect over SSH + client = self._get_ssh_client() + sftp = client.open_sftp() + sftp.chdir(self.inputs.base_directory) + + # Get all files in the dir, and filter for desired files + template_dir = os.path.dirname(template) + template_base = os.path.basename(template) + every_file_in_dir = sftp.listdir(template_dir) + if self.inputs.template_expression == 'fnmatch': + outfiles = fnmatch.filter(every_file_in_dir, template_base) + elif self.inputs.template_expression == 'regexp': + regexp = re.compile(template_base) + outfiles = list(filter(regexp.match, every_file_in_dir)) + else: + raise ValueError('template_expression value invalid') + + if len(outfiles) == 0: + # no files + msg = 'Output template: %s returned no files' % template + if self.inputs.raise_on_empty: + raise IOError(msg) + else: + warn(msg) + + # return value + outfiles = None + + else: + # found files, sort and save to outputs + if self.inputs.sort_filelist: + outfiles = human_order_sorted(outfiles) + + # actually download the files, if desired + if self.inputs.download_files: + files_to_download = copy.copy(outfiles) # make sure new list! + + # check to see if there are any related files to download + for file_to_download in files_to_download: + related_to_current = get_related_files( + file_to_download, include_this_file=False) + existing_related_not_downloading = [ + f for f in related_to_current + if f in every_file_in_dir and f not in files_to_download] + files_to_download.extend(existing_related_not_downloading) + + for f in files_to_download: + try: + sftp.get(os.path.join(template_dir, f), f) + except IOError: + iflogger.info('remote file %s not found' % f) + + # return value + outfiles = simplify_list(outfiles) + + return outfiles + + def _list_outputs(self): + try: + paramiko + except NameError: + raise ImportError("The library paramiko needs to be installed" + " for this module to run.") + + if len(self.inputs.ssh_log_to_file) > 0: + paramiko.util.log_to_file(self.inputs.ssh_log_to_file) + # infields are mandatory, however I could not figure out how to set 'mandatory' flag dynamically + # hence manual check + if self._infields: + for key in self._infields: + value = getattr(self.inputs, key) + if not isdefined(value): + msg = "%s requires a value for input '%s' because it was listed in 'infields'" % \ + (self.__class__.__name__, key) + raise ValueError(msg) + + outputs = {} + for key, args in list(self.inputs.template_args.items()): + outputs[key] = [] + template = self.inputs.template + if hasattr(self.inputs, 'field_template') and \ + isdefined(self.inputs.field_template) and \ + key in self.inputs.field_template: + template = self.inputs.field_template[key] + + if not args: + outputs[key] = self._get_files_over_ssh(template) + + for argnum, arglist in enumerate(args): + maxlen = 1 + for arg in arglist: + if isinstance(arg, + (str, bytes)) and hasattr(self.inputs, arg): + arg = getattr(self.inputs, arg) + if isinstance(arg, list): + if (maxlen > 1) and (len(arg) != maxlen): + raise ValueError( + 'incompatible number of arguments for %s' % + key) + if len(arg) > maxlen: + maxlen = len(arg) + outfiles = [] + for i in range(maxlen): + argtuple = [] + for arg in arglist: + if isinstance(arg, (str, bytes)) and hasattr( + self.inputs, arg): + arg = getattr(self.inputs, arg) + if isinstance(arg, list): + argtuple.append(arg[i]) + else: + argtuple.append(arg) + filledtemplate = template + if argtuple: + try: + filledtemplate = template % tuple(argtuple) + except TypeError as e: + raise TypeError( + e.message + + ": Template %s failed to convert with args %s" + % (template, str(tuple(argtuple)))) + + outputs[key].append(self._get_files_over_ssh(filledtemplate)) + + # disclude where there was any invalid matches + if any([val is None for val in outputs[key]]): + outputs[key] = [] + + # no outputs is None, not empty list + if len(outputs[key]) == 0: + outputs[key] = None + + # one output is the item, not a list + elif len(outputs[key]) == 1: + outputs[key] = outputs[key][0] + + for k, v in list(outputs.items()): + outputs[k] = os.path.join(os.getcwd(), v) + + return outputs + + def _get_ssh_client(self): + config = paramiko.SSHConfig() + config.parse(open(os.path.expanduser('~/.ssh/config'))) + host = config.lookup(self.inputs.hostname) + if 'proxycommand' in host: + proxy = paramiko.ProxyCommand( + subprocess.check_output([ + os.environ['SHELL'], '-c', + 'echo %s' % host['proxycommand'] + ]).strip()) + else: + proxy = None + client = paramiko.SSHClient() + client.load_system_host_keys() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + client.connect(host['hostname'], username=host['user'], sock=proxy) + return client + + +class JSONFileGrabberInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + in_file = File(exists=True, desc='JSON source file') + defaults = traits.Dict( + desc=('JSON dictionary that sets default output' + 'values, overridden by values found in in_file')) + + +class JSONFileGrabber(IOBase): + """ + Datagrabber interface that loads a json file and generates an output for + every first-level object + + Example + ------- + + >>> import pprint + >>> from nipype.interfaces.io import JSONFileGrabber + >>> jsonSource = JSONFileGrabber() + >>> jsonSource.inputs.defaults = {'param1': 'overrideMe', 'param3': 1.0} + >>> res = jsonSource.run() + >>> pprint.pprint(res.outputs.get()) + {'param1': 'overrideMe', 'param3': 1.0} + >>> jsonSource.inputs.in_file = os.path.join(datadir, 'jsongrabber.txt') + >>> res = jsonSource.run() + >>> pprint.pprint(res.outputs.get()) # doctest:, +ELLIPSIS + {'param1': 'exampleStr', 'param2': 4, 'param3': 1.0} + """ + input_spec = JSONFileGrabberInputSpec + output_spec = DynamicTraitedSpec + _always_run = True + + def _list_outputs(self): + import simplejson + + outputs = {} + if isdefined(self.inputs.in_file): + with open(self.inputs.in_file, 'r') as f: + data = simplejson.load(f) + + if not isinstance(data, dict): + raise RuntimeError('JSON input has no dictionary structure') + + for key, value in list(data.items()): + outputs[key] = value + + if isdefined(self.inputs.defaults): + defaults = self.inputs.defaults + for key, value in list(defaults.items()): + if key not in list(outputs.keys()): + outputs[key] = value + + return outputs + + +class JSONFileSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + out_file = File(desc='JSON sink file') + in_dict = traits.Dict( + value={}, usedefault=True, desc='input JSON dictionary') + _outputs = traits.Dict(value={}, usedefault=True) + + def __setattr__(self, key, value): + if key not in self.copyable_trait_names(): + if not isdefined(value): + super(JSONFileSinkInputSpec, self).__setattr__(key, value) + self._outputs[key] = value + else: + if key in self._outputs: + self._outputs[key] = value + super(JSONFileSinkInputSpec, self).__setattr__(key, value) + + +class JSONFileSinkOutputSpec(TraitedSpec): + out_file = File(desc='JSON sink file') + + +class JSONFileSink(IOBase): + """ + Very simple frontend for storing values into a JSON file. + Entries already existing in in_dict will be overridden by matching + entries dynamically added as inputs. + + .. warning:: + + This is not a thread-safe node because it can write to a common + shared location. It will not complain when it overwrites a file. + + Examples + -------- + + >>> jsonsink = JSONFileSink(input_names=['subject_id', + ... 'some_measurement']) + >>> jsonsink.inputs.subject_id = 's1' + >>> jsonsink.inputs.some_measurement = 11.4 + >>> jsonsink.run() # doctest: +SKIP + + Using a dictionary as input: + + >>> dictsink = JSONFileSink() + >>> dictsink.inputs.in_dict = {'subject_id': 's1', + ... 'some_measurement': 11.4} + >>> dictsink.run() # doctest: +SKIP + + """ + input_spec = JSONFileSinkInputSpec + output_spec = JSONFileSinkOutputSpec + + def __init__(self, infields=[], force_run=True, **inputs): + super(JSONFileSink, self).__init__(**inputs) + self._input_names = infields + + undefined_traits = {} + for key in infields: + self.inputs.add_trait(key, traits.Any) + self.inputs._outputs[key] = Undefined + undefined_traits[key] = Undefined + self.inputs.trait_set(trait_change_notify=False, **undefined_traits) + + if force_run: + self._always_run = True + + def _process_name(self, name, val): + if '.' in name: + newkeys = name.split('.') + name = newkeys.pop(0) + nested_dict = {newkeys.pop(): val} + + for nk in reversed(newkeys): + nested_dict = {nk: nested_dict} + val = nested_dict + + return name, val + + def _list_outputs(self): + import simplejson + import os.path as op + + if not isdefined(self.inputs.out_file): + out_file = op.abspath('datasink.json') + else: + out_file = op.abspath(self.inputs.out_file) + + out_dict = self.inputs.in_dict + + # Overwrite in_dict entries automatically + for key, val in list(self.inputs._outputs.items()): + if not isdefined(val) or key == 'trait_added': + continue + key, val = self._process_name(key, val) + out_dict[key] = val + + with open(out_file, 'w') as f: + f.write(str(simplejson.dumps(out_dict, ensure_ascii=False))) + + outputs = self.output_spec().get() + outputs['out_file'] = out_file + return outputs + + +class BIDSDataGrabberInputSpec(DynamicTraitedSpec): + base_dir = Directory(exists=True, + desc='Path to BIDS Directory.', + mandatory=True) + output_query = traits.Dict(key_trait=Str, + value_trait=traits.Dict, + desc='Queries for outfield outputs') + raise_on_empty = traits.Bool(True, usedefault=True, + desc='Generate exception if list is empty ' + 'for a given field') + return_type = traits.Enum('file', 'namedtuple', usedefault=True) + strict = traits.Bool(desc='Return only BIDS "proper" files (e.g., ' + 'ignore derivatives/, sourcedata/, etc.)') + + +class BIDSDataGrabber(IOBase): + + """ BIDS datagrabber module that wraps around pybids to allow arbitrary + querying of BIDS datasets. + + Examples + -------- + + By default, the BIDSDataGrabber fetches anatomical and functional images + from a project, and makes BIDS entities (e.g. subject) available for + filtering outputs. + + >>> bg = BIDSDataGrabber() + >>> bg.inputs.base_dir = 'ds005/' + >>> bg.inputs.subject = '01' + >>> results = bg.run() # doctest: +SKIP + + + Dynamically created, user-defined output fields can also be defined to + return different types of outputs from the same project. All outputs + are filtered on common entities, which can be explicitly defined as + infields. + + >>> bg = BIDSDataGrabber(infields = ['subject'], outfields = ['dwi']) + >>> bg.inputs.base_dir = 'ds005/' + >>> bg.inputs.subject = '01' + >>> bg.inputs.output_query['dwi'] = dict(modality='dwi') + >>> results = bg.run() # doctest: +SKIP + + """ + input_spec = BIDSDataGrabberInputSpec + output_spec = DynamicTraitedSpec + _always_run = True + + def __init__(self, infields=None, **kwargs): + """ + Parameters + ---------- + infields : list of str + Indicates the input fields to be dynamically created + """ + super(BIDSDataGrabber, self).__init__(**kwargs) + + if not isdefined(self.inputs.output_query): + self.inputs.output_query = { + "func": {"modality": "func", 'extensions': ['nii', '.nii.gz']}, + "anat": {"modality": "anat", 'extensions': ['nii', '.nii.gz']}, + } + + # If infields is empty, use all BIDS entities + if infields is None and have_pybids: + bids_config = join(dirname(gb.__file__), 'config', 'bids.json') + bids_config = json.load(open(bids_config, 'r')) + infields = [i['name'] for i in bids_config['entities']] + + self._infields = infields or [] + + # used for mandatory inputs check + undefined_traits = {} + for key in self._infields: + self.inputs.add_trait(key, traits.Any) + undefined_traits[key] = kwargs[key] if key in kwargs else Undefined + + self.inputs.trait_set(trait_change_notify=False, **undefined_traits) + + def _run_interface(self, runtime): + if not have_pybids: + raise ImportError( + "The BIDSEventsGrabber interface requires pybids." + " Please make sure it is installed.") + return runtime + + def _list_outputs(self): + exclude = None + if self.inputs.strict: + exclude = ['derivatives/', 'code/', 'sourcedata/'] + layout = gb.BIDSLayout(self.inputs.base_dir, exclude=exclude) + + # If infield is not given nm input value, silently ignore + filters = {} + for key in self._infields: + value = getattr(self.inputs, key) + if isdefined(value): + filters[key] = value + + outputs = {} + for key, query in self.inputs.output_query.items(): + args = query.copy() + args.update(filters) + filelist = layout.get(return_type=self.inputs.return_type, **args) + if len(filelist) == 0: + msg = 'Output key: %s returned no files' % key + if self.inputs.raise_on_empty: + raise IOError(msg) + else: + iflogger.warning(msg) + filelist = Undefined + + outputs[key] = filelist + return outputs + + def _add_output_traits(self, base): + return add_traits(base, list(self.inputs.output_query.keys())) diff --git a/nipype/interfaces/matlab.py b/nipype/interfaces/matlab.py new file mode 100644 index 0000000000..fed7bfeb57 --- /dev/null +++ b/nipype/interfaces/matlab.py @@ -0,0 +1,224 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" General matlab interface code """ +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open +import os + +from .. import config +from .base import (CommandLineInputSpec, InputMultiPath, isdefined, + CommandLine, traits, File, Directory) + + +def get_matlab_command(): + if 'NIPYPE_NO_MATLAB' in os.environ: + return None + + try: + matlab_cmd = os.environ['MATLABCMD'] + except: + matlab_cmd = 'matlab' + + try: + res = CommandLine( + command='which', + args=matlab_cmd, + resource_monitor=False, + terminal_output='allatonce').run() + matlab_path = res.runtime.stdout.strip() + except Exception: + return None + return matlab_cmd + + +no_matlab = get_matlab_command() is None + + +class MatlabInputSpec(CommandLineInputSpec): + """ Basic expected inputs to Matlab interface """ + + script = traits.Str( + argstr='-r \"%s;exit\"', + desc='m-code to run', + mandatory=True, + position=-1) + uses_mcr = traits.Bool( + desc='use MCR interface', + xor=['nodesktop', 'nosplash', 'single_comp_thread'], + nohash=True) + nodesktop = traits.Bool( + True, + argstr='-nodesktop', + usedefault=True, + desc='Switch off desktop mode on unix platforms', + nohash=True) + nosplash = traits.Bool( + True, + argstr='-nosplash', + usedefault=True, + desc='Switch of splash screen', + nohash=True) + logfile = File(argstr='-logfile %s', desc='Save matlab output to log') + single_comp_thread = traits.Bool( + argstr="-singleCompThread", + desc="force single threaded operation", + nohash=True) + # non-commandline options + mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True) + script_file = File( + 'pyscript.m', usedefault=True, desc='Name of file to write m-code to') + paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath') + prescript = traits.List( + ["ver,", "try,"], + usedefault=True, + desc='prescript to be added before code') + postscript = traits.List( + [ + "\n,catch ME,", "fprintf(2,'MATLAB code threw an exception:\\n');", + "fprintf(2,'%s\\n',ME.message);", + "if length(ME.stack) ~= 0, fprintf(2,'File:%s\\nName:%s\\nLine:%d\\n',ME.stack.file,ME.stack.name,ME.stack.line);, end;", + "end;" + ], + desc='script added after code', + usedefault=True) + + +class MatlabCommand(CommandLine): + """Interface that runs matlab code + + >>> import nipype.interfaces.matlab as matlab + >>> mlab = matlab.MatlabCommand(mfile=False) # don't write script file + >>> mlab.inputs.script = "which('who')" + >>> out = mlab.run() # doctest: +SKIP + """ + + _cmd = 'matlab' + _default_matlab_cmd = None + _default_mfile = None + _default_paths = None + input_spec = MatlabInputSpec + + def __init__(self, matlab_cmd=None, **inputs): + """initializes interface to matlab + (default 'matlab -nodesktop -nosplash') + """ + super(MatlabCommand, self).__init__(**inputs) + if matlab_cmd and isdefined(matlab_cmd): + self._cmd = matlab_cmd + elif self._default_matlab_cmd: + self._cmd = self._default_matlab_cmd + + if self._default_mfile and not isdefined(self.inputs.mfile): + self.inputs.mfile = self._default_mfile + + if self._default_paths and not isdefined(self.inputs.paths): + self.inputs.paths = self._default_paths + + if not isdefined(self.inputs.single_comp_thread) and \ + not isdefined(self.inputs.uses_mcr): + if config.getboolean('execution', 'single_thread_matlab'): + self.inputs.single_comp_thread = True + # For matlab commands force all output to be returned since matlab + # does not have a clean way of notifying an error + self.terminal_output = 'allatonce' + + @classmethod + def set_default_matlab_cmd(cls, matlab_cmd): + """Set the default MATLAB command line for MATLAB classes. + + This method is used to set values for all MATLAB + subclasses. However, setting this will not update the output + type for any existing instances. For these, assign the + .inputs.matlab_cmd. + """ + cls._default_matlab_cmd = matlab_cmd + + @classmethod + def set_default_mfile(cls, mfile): + """Set the default MATLAB script file format for MATLAB classes. + + This method is used to set values for all MATLAB + subclasses. However, setting this will not update the output + type for any existing instances. For these, assign the + .inputs.mfile. + """ + cls._default_mfile = mfile + + @classmethod + def set_default_paths(cls, paths): + """Set the default MATLAB paths for MATLAB classes. + + This method is used to set values for all MATLAB + subclasses. However, setting this will not update the output + type for any existing instances. For these, assign the + .inputs.paths. + """ + cls._default_paths = paths + + def _run_interface(self, runtime): + self.terminal_output = 'allatonce' + runtime = super(MatlabCommand, self)._run_interface(runtime) + try: + # Matlab can leave the terminal in a barbbled state + os.system('stty sane') + except: + # We might be on a system where stty doesn't exist + pass + if 'MATLAB code threw an exception' in runtime.stderr: + self.raise_exception(runtime) + return runtime + + def _format_arg(self, name, trait_spec, value): + if name in ['script']: + argstr = trait_spec.argstr + if self.inputs.uses_mcr: + argstr = '%s' + return self._gen_matlab_command(argstr, value) + return super(MatlabCommand, self)._format_arg(name, trait_spec, value) + + def _gen_matlab_command(self, argstr, script_lines): + """ Generates commands and, if mfile specified, writes it to disk.""" + cwd = os.getcwd() + mfile = self.inputs.mfile or self.inputs.uses_mcr + paths = [] + if isdefined(self.inputs.paths): + paths = self.inputs.paths + # prescript + prescript = self.inputs.prescript + postscript = self.inputs.postscript + + # prescript takes different default value depending on the mfile argument + if mfile: + prescript.insert( + 0, + "fprintf(1,'Executing %s at %s:\\n',mfilename(),datestr(now));" + ) + else: + prescript.insert( + 0, "fprintf(1,'Executing code at %s:\\n',datestr(now));") + for path in paths: + prescript.append("addpath('%s');\n" % path) + + if not mfile: + # clean up the code of comments and replace newlines with commas + script_lines = ','.join([ + line for line in script_lines.split("\n") + if not line.strip().startswith("%") + ]) + + script_lines = '\n'.join(prescript) + script_lines + '\n'.join( + postscript) + if mfile: + with open(os.path.join(cwd, self.inputs.script_file), + 'wt') as mfile: + mfile.write(script_lines) + if self.inputs.uses_mcr: + script = '%s' % (os.path.join(cwd, self.inputs.script_file)) + else: + script = "addpath('%s');%s" % ( + cwd, self.inputs.script_file.split('.')[0]) + else: + script = ''.join(script_lines.split('\n')) + return argstr % script diff --git a/nipype/interfaces/meshfix.py b/nipype/interfaces/meshfix.py new file mode 100644 index 0000000000..4b9db519a9 --- /dev/null +++ b/nipype/interfaces/meshfix.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" Fixes meshes: +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os.path as op +from ..utils.filemanip import split_filename +from .base import (CommandLine, CommandLineInputSpec, traits, TraitedSpec, + isdefined, File) + + +class MeshFixInputSpec(CommandLineInputSpec): + number_of_biggest_shells = traits.Int( + argstr='--shells %d', desc="Only the N biggest shells are kept") + + epsilon_angle = traits.Range( + argstr='-a %f', + low=0.0, + high=2.0, + desc="Epsilon angle in degrees (must be between 0 and 2)") + + join_overlapping_largest_components = traits.Bool( + argstr='-j', + xor=['join_closest_components'], + desc='Join 2 biggest components if they overlap, remove the rest.') + + join_closest_components = traits.Bool( + argstr='-jc', + xor=['join_closest_components'], + desc='Join the closest pair of components.') + + quiet_mode = traits.Bool( + argstr='-q', desc="Quiet mode, don't write much to stdout.") + + dont_clean = traits.Bool(argstr='--no-clean', desc="Don't Clean") + + save_as_stl = traits.Bool( + xor=['save_as_vmrl', 'save_as_freesurfer_mesh'], + argstr='--stl', + desc="Result is saved in stereolithographic format (.stl)") + save_as_vmrl = traits.Bool( + argstr='--wrl', + xor=['save_as_stl', 'save_as_freesurfer_mesh'], + desc="Result is saved in VRML1.0 format (.wrl)") + save_as_freesurfer_mesh = traits.Bool( + argstr='--fsmesh', + xor=['save_as_vrml', 'save_as_stl'], + desc="Result is saved in freesurfer mesh format") + + remove_handles = traits.Bool( + argstr='--remove-handles', desc="Remove handles") + + uniform_remeshing_steps = traits.Int( + argstr='-u %d', + requires=['uniform_remeshing_vertices'], + desc="Number of steps for uniform remeshing of the whole mesh") + + uniform_remeshing_vertices = traits.Int( + argstr='--vertices %d', + requires=['uniform_remeshing_steps'], + desc="Constrains the number of vertices." + "Must be used with uniform_remeshing_steps") + + laplacian_smoothing_steps = traits.Int( + argstr='--smooth %d', + desc="The number of laplacian smoothing steps to apply") + + x_shift = traits.Int( + argstr='--smooth %d', + desc= + "Shifts the coordinates of the vertices when saving. Output must be in FreeSurfer format" + ) + + # Cutting, decoupling, dilation + cut_outer = traits.Int( + argstr='--cut-outer %d', + desc="Remove triangles of 1st that are outside of the 2nd shell.") + cut_inner = traits.Int( + argstr='--cut-inner %d', + desc= + "Remove triangles of 1st that are inside of the 2nd shell. Dilate 2nd by N; Fill holes and keep only 1st afterwards." + ) + decouple_inin = traits.Int( + argstr='--decouple-inin %d', + desc="Treat 1st file as inner, 2nd file as outer component." + "Resolve overlaps by moving inners triangles inwards. Constrain the min distance between the components > d." + ) + decouple_outin = traits.Int( + argstr='--decouple-outin %d', + desc="Treat 1st file as outer, 2nd file as inner component." + "Resolve overlaps by moving outers triangles inwards. Constrain the min distance between the components > d." + ) + decouple_outout = traits.Int( + argstr='--decouple-outout %d', + desc="Treat 1st file as outer, 2nd file as inner component." + "Resolve overlaps by moving outers triangles outwards. Constrain the min distance between the components > d." + ) + + finetuning_inwards = traits.Bool( + argstr='--fineTuneIn ', + requires=['finetuning_distance', 'finetuning_substeps']) + finetuning_outwards = traits.Bool( + argstr='--fineTuneIn ', + requires=['finetuning_distance', 'finetuning_substeps'], + xor=['finetuning_inwards'], + desc= + 'Similar to finetuning_inwards, but ensures minimal distance in the other direction' + ) + finetuning_distance = traits.Float( + argstr='%f', + requires=['finetuning_substeps'], + desc="Used to fine-tune the minimal distance between surfaces." + "A minimal distance d is ensured, and reached in n substeps. When using the surfaces for subsequent volume meshing by gmsh, this step prevent too flat tetrahedra2)" + ) + finetuning_substeps = traits.Int( + argstr='%d', + requires=['finetuning_distance'], + desc="Used to fine-tune the minimal distance between surfaces." + "A minimal distance d is ensured, and reached in n substeps. When using the surfaces for subsequent volume meshing by gmsh, this step prevent too flat tetrahedra2)" + ) + + dilation = traits.Int( + argstr='--dilate %d', + desc="Dilate the surface by d. d < 0 means shrinking.") + set_intersections_to_one = traits.Bool( + argstr='--intersect', + desc="If the mesh contains intersections, return value = 1." + "If saved in gmsh format, intersections will be highlighted.") + + in_file1 = File(exists=True, argstr="%s", position=1, mandatory=True) + in_file2 = File(exists=True, argstr="%s", position=2) + output_type = traits.Enum( + 'off', ['stl', 'msh', 'wrl', 'vrml', 'fs', 'off'], + usedefault=True, + desc='The output type to save the file as.') + out_filename = File( + genfile=True, + argstr="-o %s", + desc='The output filename for the fixed mesh file') + + +class MeshFixOutputSpec(TraitedSpec): + mesh_file = File(exists=True, desc='The output mesh file') + + +class MeshFix(CommandLine): + """ + MeshFix v1.2-alpha - by Marco Attene, Mirko Windhoff, Axel Thielscher. + + .. seealso:: + + http://jmeshlib.sourceforge.net + Sourceforge page + + http://simnibs.de/installation/meshfixandgetfem + Ubuntu installation instructions + + If MeshFix is used for research purposes, please cite the following paper: + M. Attene - A lightweight approach to repairing digitized polygon meshes. + The Visual Computer, 2010. (c) Springer. + + Accepted input formats are OFF, PLY and STL. + Other formats (like .msh for gmsh) are supported only partially. + + Example + ------- + + >>> import nipype.interfaces.meshfix as mf + >>> fix = mf.MeshFix() + >>> fix.inputs.in_file1 = 'lh-pial.stl' + >>> fix.inputs.in_file2 = 'rh-pial.stl' + >>> fix.run() # doctest: +SKIP + >>> fix.cmdline + 'meshfix lh-pial.stl rh-pial.stl -o lh-pial_fixed.off' + """ + _cmd = 'meshfix' + input_spec = MeshFixInputSpec + output_spec = MeshFixOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + if isdefined(self.inputs.out_filename): + path, name, ext = split_filename(self.inputs.out_filename) + ext = ext.replace('.', '') + out_types = ['stl', 'msh', 'wrl', 'vrml', 'fs', 'off'] + # Make sure that the output filename uses one of the possible file types + if any(ext == out_type.lower() for out_type in out_types): + outputs['mesh_file'] = op.abspath(self.inputs.out_filename) + else: + outputs['mesh_file'] = op.abspath( + name + '.' + self.inputs.output_type) + else: + outputs['mesh_file'] = op.abspath(self._gen_outfilename()) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file1) + if self.inputs.save_as_freesurfer_mesh or self.inputs.output_type == 'fs': + self.inputs.output_type = 'fs' + self.inputs.save_as_freesurfer_mesh = True + if self.inputs.save_as_stl or self.inputs.output_type == 'stl': + self.inputs.output_type = 'stl' + self.inputs.save_as_stl = True + if self.inputs.save_as_vmrl or self.inputs.output_type == 'vmrl': + self.inputs.output_type = 'vmrl' + self.inputs.save_as_vmrl = True + return name + '_fixed.' + self.inputs.output_type diff --git a/nipype/interfaces/minc/__init__.py b/nipype/interfaces/minc/__init__.py new file mode 100644 index 0000000000..1ebea58b64 --- /dev/null +++ b/nipype/interfaces/minc/__init__.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The minc module provides classes for interfacing with the `MINC +`_ command line tools. This +module was written to work with MINC version 2.2.00. + +Author: Carlo Hamalainen + http://carlo-hamalainen.net +""" + +from .base import (Info) + +from .minc import ( + Average, + BBox, + Beast, + BestLinReg, + BigAverage, + Blob, + Blur, + Calc, + Convert, + Copy, + Dump, + Extract, + Gennlxfm, + Math, + NlpFit, + Norm, + Pik, + Resample, + Reshape, + ToEcat, + ToRaw, + Volcentre, + Voliso, + Volpad, + VolSymm, + XfmAvg, + XfmConcat, + XfmInvert, +) diff --git a/nipype/interfaces/minc/base.py b/nipype/interfaces/minc/base.py new file mode 100644 index 0000000000..67b7938176 --- /dev/null +++ b/nipype/interfaces/minc/base.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The minc module provides classes for interfacing with the `MINC +`_ command line tools. +This module was written to work with MINC version 2.2.00. + +Author: Carlo Hamalainen + http://carlo-hamalainen.net +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import object +import os +import os.path +import warnings + +from ..base import CommandLine + +warnings.filterwarnings('always', category=UserWarning) + + +def check_minc(): + """Returns True if and only if MINC is installed.' + """ + + return Info.version() is not None + + +def no_minc(): + """Returns True if and only if MINC is *not* installed. + """ + return not check_minc() + + +class Info(object): + """Handle MINC version information. + + version refers to the version of MINC on the system + """ + + @staticmethod + def version(): + """Check for minc version on the system + + Parameters + ---------- + None + + Returns + ------- + version : dict + Version number as dict or None if MINC not found + + """ + try: + clout = CommandLine( + command='mincinfo', + args='-version', + terminal_output='allatonce').run() + except IOError: + return None + + out = clout.runtime.stdout + + def read_program_version(s): + if 'program' in s: + return s.split(':')[1].strip() + return None + + def read_libminc_version(s): + if 'libminc' in s: + return s.split(':')[1].strip() + return None + + def read_netcdf_version(s): + if 'netcdf' in s: + return ' '.join(s.split(':')[1:]).strip() + return None + + def read_hdf5_version(s): + if 'HDF5' in s: + return s.split(':')[1].strip() + return None + + versions = { + 'minc': None, + 'libminc': None, + 'netcdf': None, + 'hdf5': None, + } + + for l in out.split('\n'): + for (name, f) in [ + ('minc', read_program_version), + ('libminc', read_libminc_version), + ('netcdf', read_netcdf_version), + ('hdf5', read_hdf5_version), + ]: + if f(l) is not None: + versions[name] = f(l) + + return versions + + +def aggregate_filename(files, new_suffix): + """ + Try to work out a sensible name given a set of files that have + been combined in some way (e.g. averaged). If we can't work out a + sensible prefix, we use the first filename in the list. + + Examples + -------- + + >>> from nipype.interfaces.minc.base import aggregate_filename + >>> f = aggregate_filename(['/tmp/foo1.mnc', '/tmp/foo2.mnc', '/tmp/foo3.mnc'], 'averaged') + >>> os.path.split(f)[1] # This has a full path, so just check the filename. + 'foo_averaged.mnc' + + >>> f = aggregate_filename(['/tmp/foo1.mnc', '/tmp/blah1.mnc'], 'averaged') + >>> os.path.split(f)[1] # This has a full path, so just check the filename. + 'foo1_averaged.mnc' + + """ + + path = os.path.split(files[0])[0] + names = [os.path.splitext(os.path.split(x)[1])[0] for x in files] + common_prefix = os.path.commonprefix(names) + + path = os.getcwd() + + if common_prefix == '': + return os.path.abspath( + os.path.join( + path, + os.path.splitext(files[0])[0] + '_' + new_suffix + '.mnc')) + else: + return os.path.abspath( + os.path.join(path, common_prefix + '_' + new_suffix + '.mnc')) diff --git a/nipype/interfaces/minc/minc.py b/nipype/interfaces/minc/minc.py new file mode 100644 index 0000000000..8ac8babe52 --- /dev/null +++ b/nipype/interfaces/minc/minc.py @@ -0,0 +1,3685 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The minc module provides classes for interfacing with the `MINC +`_ command line tools. This +module was written to work with MINC version 2.2.00. + +Author: Carlo Hamalainen + http://carlo-hamalainen.net +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +import glob +import os +import os.path +import re +import warnings + +from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine, + StdOutCommandLineInputSpec, StdOutCommandLine, File, + Directory, InputMultiPath, OutputMultiPath, traits, + isdefined) +from .base import aggregate_filename + +warnings.filterwarnings('always', category=UserWarning) + + +class ExtractInputSpec(StdOutCommandLineInputSpec): + input_file = File( + desc='input file', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s.raw', + keep_extension=False) + + _xor_write = ( + 'write_ascii', + 'write_ascii', + 'write_byte', + 'write_short', + 'write_int', + 'write_long', + 'write_float', + 'write_double', + 'write_signed', + 'write_unsigned', + ) + + write_ascii = traits.Bool( + desc='Write out data as ascii strings (default).', + argstr='-ascii', + xor=_xor_write) + + write_byte = traits.Bool( + desc='Write out data as bytes.', argstr='-byte', xor=_xor_write) + + write_short = traits.Bool( + desc='Write out data as short integers.', + argstr='-short', + xor=_xor_write) + + write_int = traits.Bool( + desc='Write out data as 32-bit integers.', + argstr='-int', + xor=_xor_write) + + write_long = traits.Bool( + desc='Superseded by write_int.', argstr='-long', xor=_xor_write) + + write_float = traits.Bool( + desc='Write out data as single precision floating-point values.', + argstr='-float', + xor=_xor_write) + + write_double = traits.Bool( + desc='Write out data as double precision floating-point values.', + argstr='-double', + xor=_xor_write) + + _xor_signed = ('write_signed', 'write_unsigned') + + write_signed = traits.Bool( + desc='Write out signed data.', argstr='-signed', xor=_xor_signed) + + write_unsigned = traits.Bool( + desc='Write out unsigned data.', argstr='-unsigned', xor=_xor_signed) + + write_range = traits.Tuple( + traits.Float, + traits.Float, + argstr='-range %s %s', + desc= + 'Specify the range of output values\nDefault value: 1.79769e+308 1.79769e+308.', + ) + + _xor_normalize = ( + 'normalize', + 'nonormalize', + ) + + normalize = traits.Bool( + desc='Normalize integer pixel values to file max and min.', + argstr='-normalize', + xor=_xor_normalize) + + nonormalize = traits.Bool( + desc='Turn off pixel normalization.', + argstr='-nonormalize', + xor=_xor_normalize) + + image_range = traits.Tuple( + traits.Float, + traits.Float, + desc='Specify the range of real image values for normalization.', + argstr='-image_range %s %s') + + image_minimum = traits.Float( + desc=('Specify the minimum real image value for normalization.' + 'Default value: 1.79769e+308.'), + argstr='-image_minimum %s') + + image_maximum = traits.Float( + desc=('Specify the maximum real image value for normalization.' + 'Default value: 1.79769e+308.'), + argstr='-image_maximum %s') + + start = InputMultiPath( + traits.Int, + desc='Specifies corner of hyperslab (C conventions for indices).', + sep=',', + argstr='-start %s', + ) + + count = InputMultiPath( + traits.Int, + desc='Specifies edge lengths of hyperslab to read.', + sep=',', + argstr='-count %s', + ) + + # FIXME Can we make sure that len(start) == len(count)? + + _xor_flip = ('flip_positive_direction', 'flip_negative_direction', + 'flip_any_direction') + + flip_positive_direction = traits.Bool( + desc='Flip images to always have positive direction.', + argstr='-positive_direction', + xor=_xor_flip) + flip_negative_direction = traits.Bool( + desc='Flip images to always have negative direction.', + argstr='-negative_direction', + xor=_xor_flip) + flip_any_direction = traits.Bool( + desc='Do not flip images (Default).', + argstr='-any_direction', + xor=_xor_flip) + + _xor_x_flip = ('flip_x_positive', 'flip_x_negative', 'flip_x_any') + + flip_x_positive = traits.Bool( + desc='Flip images to give positive xspace:step value (left-to-right).', + argstr='+xdirection', + xor=_xor_x_flip) + flip_x_negative = traits.Bool( + desc='Flip images to give negative xspace:step value (right-to-left).', + argstr='-xdirection', + xor=_xor_x_flip) + flip_x_any = traits.Bool( + desc='Don\'t flip images along x-axis (default).', + argstr='-xanydirection', + xor=_xor_x_flip) + + _xor_y_flip = ('flip_y_positive', 'flip_y_negative', 'flip_y_any') + + flip_y_positive = traits.Bool( + desc='Flip images to give positive yspace:step value (post-to-ant).', + argstr='+ydirection', + xor=_xor_y_flip) + flip_y_negative = traits.Bool( + desc='Flip images to give negative yspace:step value (ant-to-post).', + argstr='-ydirection', + xor=_xor_y_flip) + flip_y_any = traits.Bool( + desc='Don\'t flip images along y-axis (default).', + argstr='-yanydirection', + xor=_xor_y_flip) + + _xor_z_flip = ('flip_z_positive', 'flip_z_negative', 'flip_z_any') + + flip_z_positive = traits.Bool( + desc='Flip images to give positive zspace:step value (inf-to-sup).', + argstr='+zdirection', + xor=_xor_z_flip) + flip_z_negative = traits.Bool( + desc='Flip images to give negative zspace:step value (sup-to-inf).', + argstr='-zdirection', + xor=_xor_z_flip) + flip_z_any = traits.Bool( + desc='Don\'t flip images along z-axis (default).', + argstr='-zanydirection', + xor=_xor_z_flip) + + +class ExtractOutputSpec(TraitedSpec): + output_file = File(desc='output file in raw/text format', exists=True) + + +class Extract(StdOutCommandLine): + """Dump a hyperslab of MINC file data. + + Examples + -------- + + >>> from nipype.interfaces.minc import Extract + >>> from nipype.interfaces.minc.testdata import minc2Dfile + + >>> extract = Extract(input_file=minc2Dfile) + >>> extract.run() # doctest: +SKIP + + >>> extract = Extract(input_file=minc2Dfile, start=[3, 10, 5], count=[4, 4, 4]) # extract a 4x4x4 slab at offset [3, 10, 5] + >>> extract.run() # doctest: +SKIP + """ + + input_spec = ExtractInputSpec + output_spec = ExtractOutputSpec + _cmd = 'mincextract' + + +class ToRawInputSpec(StdOutCommandLineInputSpec): + input_file = File( + desc='input file', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s.raw', + keep_extension=False) + + _xor_write = ('write_byte', 'write_short', 'write_int', 'write_long', + 'write_float', 'write_double') + + write_byte = traits.Bool( + desc='Write out data as bytes.', argstr='-byte', xor=_xor_write) + + write_short = traits.Bool( + desc='Write out data as short integers.', + argstr='-short', + xor=_xor_write) + + write_int = traits.Bool( + desc='Write out data as 32-bit integers.', + argstr='-int', + xor=_xor_write) + + write_long = traits.Bool( + desc='Superseded by write_int.', argstr='-long', xor=_xor_write) + + write_float = traits.Bool( + desc='Write out data as single precision floating-point values.', + argstr='-float', + xor=_xor_write) + + write_double = traits.Bool( + desc='Write out data as double precision floating-point values.', + argstr='-double', + xor=_xor_write) + + _xor_signed = ('write_signed', 'write_unsigned') + + write_signed = traits.Bool( + desc='Write out signed data.', argstr='-signed', xor=_xor_signed) + + write_unsigned = traits.Bool( + desc='Write out unsigned data.', argstr='-unsigned', xor=_xor_signed) + + write_range = traits.Tuple( + traits.Float, + traits.Float, + argstr='-range %s %s', + desc=('Specify the range of output values.' + 'Default value: 1.79769e+308 1.79769e+308.'), + ) + + _xor_normalize = ( + 'normalize', + 'nonormalize', + ) + + normalize = traits.Bool( + desc='Normalize integer pixel values to file max and min.', + argstr='-normalize', + xor=_xor_normalize) + + nonormalize = traits.Bool( + desc='Turn off pixel normalization.', + argstr='-nonormalize', + xor=_xor_normalize) + + +class ToRawOutputSpec(TraitedSpec): + output_file = File(desc='output file in raw format', exists=True) + + +class ToRaw(StdOutCommandLine): + """Dump a chunk of MINC file data. This program is largely + superceded by mincextract (see Extract). + + Examples + -------- + + >>> from nipype.interfaces.minc import ToRaw + >>> from nipype.interfaces.minc.testdata import minc2Dfile + + >>> toraw = ToRaw(input_file=minc2Dfile) + >>> toraw.run() # doctest: +SKIP + + >>> toraw = ToRaw(input_file=minc2Dfile, write_range=(0, 100)) + >>> toraw.run() # doctest: +SKIP + """ + + input_spec = ToRawInputSpec + output_spec = ToRawOutputSpec + _cmd = 'minctoraw' + + +class ConvertInputSpec(CommandLineInputSpec): + input_file = File( + desc='input file for converting', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_convert_output.mnc') + + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2') + + template = traits.Bool( + desc= + ('Create a template file. The dimensions, variables, and' + 'attributes of the input file are preserved but all data it set to zero.' + ), + argstr='-template', + ) + + compression = traits.Enum( + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + argstr='-compress %s', + desc='Set the compression level, from 0 (disabled) to 9 (maximum).', + ) + + chunk = traits.Range( + low=0, + desc= + 'Set the target block size for chunking (0 default, >1 block size).', + argstr='-chunk %d', + ) + + +class ConvertOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class Convert(CommandLine): + """convert between MINC 1 to MINC 2 format. + + Examples + -------- + + >>> from nipype.interfaces.minc import Convert + >>> from nipype.interfaces.minc.testdata import minc2Dfile + >>> c = Convert(input_file=minc2Dfile, output_file='/tmp/out.mnc', two=True) # Convert to MINC2 format. + >>> c.run() # doctest: +SKIP + """ + + input_spec = ConvertInputSpec + output_spec = ConvertOutputSpec + _cmd = 'mincconvert' + + +class CopyInputSpec(CommandLineInputSpec): + input_file = File( + desc='input file to copy', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_copy.mnc') + + _xor_pixel = ('pixel_values', 'real_values') + + pixel_values = traits.Bool( + desc='Copy pixel values as is.', + argstr='-pixel_values', + xor=_xor_pixel) + + real_values = traits.Bool( + desc='Copy real pixel intensities (default).', + argstr='-real_values', + xor=_xor_pixel) + + +class CopyOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class Copy(CommandLine): + """ + Copy image values from one MINC file to another. Both the input + and output files must exist, and the images in both files must + have an equal number dimensions and equal dimension lengths. + + NOTE: This program is intended primarily for use with scripts + such as mincedit. It does not follow the typical design rules of + most MINC command-line tools and therefore should be used only + with caution. + """ + + input_spec = CopyInputSpec + output_spec = CopyOutputSpec + _cmd = 'minccopy' + + +class ToEcatInputSpec(CommandLineInputSpec): + input_file = File( + desc='input file to convert', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_to_ecat.v', + keep_extension=False) + + ignore_patient_variable = traits.Bool( + desc='Ignore informations from the minc patient variable.', + argstr='-ignore_patient_variable', + ) + + ignore_study_variable = traits.Bool( + desc='Ignore informations from the minc study variable.', + argstr='-ignore_study_variable', + ) + + ignore_acquisition_variable = traits.Bool( + desc='Ignore informations from the minc acquisition variable.', + argstr='-ignore_acquisition_variable', + ) + + ignore_ecat_acquisition_variable = traits.Bool( + desc='Ignore informations from the minc ecat_acquisition variable.', + argstr='-ignore_ecat_acquisition_variable', + ) + + ignore_ecat_main = traits.Bool( + desc='Ignore informations from the minc ecat-main variable.', + argstr='-ignore_ecat_main', + ) + + ignore_ecat_subheader_variable = traits.Bool( + desc='Ignore informations from the minc ecat-subhdr variable.', + argstr='-ignore_ecat_subheader_variable', + ) + + no_decay_corr_fctr = traits.Bool( + desc='Do not compute the decay correction factors', + argstr='-no_decay_corr_fctr', + ) + + voxels_as_integers = traits.Bool( + desc=('Voxel values are treated as integers, scale and' + 'calibration factors are set to unity'), + argstr='-label', + ) + + +class ToEcatOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class ToEcat(CommandLine): + """Convert a 2D image, a 3D volumes or a 4D dynamic volumes + written in MINC file format to a 2D, 3D or 4D Ecat7 file. + + Examples + -------- + + >>> from nipype.interfaces.minc import ToEcat + >>> from nipype.interfaces.minc.testdata import minc2Dfile + + >>> c = ToEcat(input_file=minc2Dfile) + >>> c.run() # doctest: +SKIP + + >>> c = ToEcat(input_file=minc2Dfile, voxels_as_integers=True) + >>> c.run() # doctest: +SKIP + + """ + + input_spec = ToEcatInputSpec + output_spec = ToEcatOutputSpec + _cmd = 'minctoecat' + + +class DumpInputSpec(StdOutCommandLineInputSpec): + input_file = File( + desc='input file', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_dump.txt', + keep_extension=False) + + _xor_coords_or_header = ( + 'coordinate_data', + 'header_data', + ) + + coordinate_data = traits.Bool( + desc='Coordinate variable data and header information.', + argstr='-c', + xor=_xor_coords_or_header) + + header_data = traits.Bool( + desc='Header information only, no data.', + argstr='-h', + xor=_xor_coords_or_header) + + _xor_annotations = ( + 'annotations_brief', + 'annotations_full', + ) + + annotations_brief = traits.Enum( + 'c', + 'f', + argstr='-b %s', + desc='Brief annotations for C or Fortran indices in data.', + xor=_xor_annotations) + + annotations_full = traits.Enum( + 'c', + 'f', + argstr='-f %s', + desc='Full annotations for C or Fortran indices in data.', + xor=_xor_annotations) + + variables = InputMultiPath( + traits.Str, + desc='Output data for specified variables only.', + sep=',', + argstr='-v %s') + + line_length = traits.Range( + low=0, + desc='Line length maximum in data section (default 80).', + argstr='-l %d') + + netcdf_name = traits.Str( + desc='Name for netCDF (default derived from file name).', + argstr='-n %s') + + precision = traits.Either( + traits.Int(), + traits.Tuple(traits.Int, traits.Int), + desc='Display floating-point values with less precision', + argstr='%s', + ) # See _format_arg in Dump for actual formatting. + + +class DumpOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class Dump(StdOutCommandLine): + """Dump a MINC file. Typically used in conjunction with mincgen (see Gen). + + Examples + -------- + + >>> from nipype.interfaces.minc import Dump + >>> from nipype.interfaces.minc.testdata import minc2Dfile + + >>> dump = Dump(input_file=minc2Dfile) + >>> dump.run() # doctest: +SKIP + + >>> dump = Dump(input_file=minc2Dfile, output_file='/tmp/out.txt', precision=(3, 4)) + >>> dump.run() # doctest: +SKIP + + """ + + input_spec = DumpInputSpec + output_spec = DumpOutputSpec + _cmd = 'mincdump' + + def _format_arg(self, name, spec, value): + if name == 'precision': + if isinstance(value, int): + return '-p %d' % value + elif isinstance(value, tuple) and isinstance( + value[0], int) and isinstance(value[1], int): + return '-p %d,%d' % ( + value[0], + value[1], + ) + else: + raise ValueError('Invalid precision argument: ' + str(value)) + return super(Dump, self)._format_arg(name, spec, value) + + +class AverageInputSpec(CommandLineInputSpec): + _xor_input_files = ('input_files', 'filelist') + + input_files = InputMultiPath( + traits.File(exists=True), + desc='input file(s)', + mandatory=True, + sep=' ', + argstr='%s', + position=-2, + xor=_xor_input_files) + + filelist = traits.File( + desc='Specify the name of a file containing input file names.', + argstr='-filelist %s', + exists=True, + mandatory=True, + xor=_xor_input_files) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_files'], + hash_files=False, + name_template='%s_averaged.mnc') + + two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2') + + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + _xor_verbose = ( + 'verbose', + 'quiet', + ) + + verbose = traits.Bool( + desc='Print out log messages (default).', + argstr='-verbose', + xor=_xor_verbose) + quiet = traits.Bool( + desc='Do not print out log messages.', + argstr='-quiet', + xor=_xor_verbose) + + debug = traits.Bool(desc='Print out debugging messages.', argstr='-debug') + + _xor_check_dimensions = ( + 'check_dimensions', + 'no_check_dimensions', + ) + + check_dimensions = traits.Bool( + desc='Check that dimension info matches across files (default).', + argstr='-check_dimensions', + xor=_xor_check_dimensions) + no_check_dimensions = traits.Bool( + desc='Do not check dimension info.', + argstr='-nocheck_dimensions', + xor=_xor_check_dimensions) + + _xor_format = ( + 'format_filetype', + 'format_byte', + 'format_short', + 'format_int', + 'format_long', + 'format_float', + 'format_double', + 'format_signed', + 'format_unsigned', + ) + + format_filetype = traits.Bool( + desc='Use data type of first file (default).', + argstr='-filetype', + xor=_xor_format) + format_byte = traits.Bool( + desc='Write out byte data.', argstr='-byte', xor=_xor_format) + format_short = traits.Bool( + desc='Write out short integer data.', argstr='-short', xor=_xor_format) + format_int = traits.Bool( + desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format) + format_long = traits.Bool( + desc='Superseded by -int.', argstr='-long', xor=_xor_format) + format_float = traits.Bool( + desc='Write out single-precision floating-point data.', + argstr='-float', + xor=_xor_format) + format_double = traits.Bool( + desc='Write out double-precision floating-point data.', + argstr='-double', + xor=_xor_format) + format_signed = traits.Bool( + desc='Write signed integer data.', argstr='-signed', xor=_xor_format) + format_unsigned = traits.Bool( + desc='Write unsigned integer data (default).', + argstr='-unsigned', + xor=_xor_format) + + max_buffer_size_in_kb = traits.Range( + low=0, + desc='Specify the maximum size of the internal buffers (in kbytes).', + value=4096, + usedefault=True, + argstr='-max_buffer_size_in_kb %d', + ) + + _xor_normalize = ( + 'normalize', + 'nonormalize', + ) + + normalize = traits.Bool( + desc='Normalize data sets for mean intensity.', + argstr='-normalize', + xor=_xor_normalize) + nonormalize = traits.Bool( + desc='Do not normalize data sets (default).', + argstr='-nonormalize', + xor=_xor_normalize) + + voxel_range = traits.Tuple( + traits.Int, + traits.Int, + argstr='-range %d %d', + desc='Valid range for output data.') + + sdfile = traits.File( + desc='Specify an output sd file (default=none).', argstr='-sdfile %s') + + _xor_copy_header = ('copy_header', 'no_copy_header') + + copy_header = traits.Bool( + desc= + 'Copy all of the header from the first file (default for one file).', + argstr='-copy_header', + xor=_xor_copy_header) + no_copy_header = traits.Bool( + desc= + 'Do not copy all of the header from the first file (default for many files)).', + argstr='-nocopy_header', + xor=_xor_copy_header) + + avgdim = traits.Str( + desc='Specify a dimension along which we wish to average.', + argstr='-avgdim %s') + + binarize = traits.Bool( + desc='Binarize the volume by looking for values in a given range.', + argstr='-binarize') + + binrange = traits.Tuple( + traits.Float, + traits.Float, + argstr='-binrange %s %s', + desc= + 'Specify a range for binarization. Default value: 1.79769e+308 -1.79769e+308.' + ) + + binvalue = traits.Float( + desc=('Specify a target value (+/- 0.5) for' + 'binarization. Default value: -1.79769e+308'), + argstr='-binvalue %s') + + weights = InputMultiPath( + traits.Str, + desc='Specify weights for averaging (",,...").', + sep=',', + argstr='-weights %s', + ) + + width_weighted = traits.Bool( + desc='Weight by dimension widths when -avgdim is used.', + argstr='-width_weighted', + requires=('avgdim', )) + + +class AverageOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class Average(CommandLine): + """Average a number of MINC files. + + Examples + -------- + + >>> from nipype.interfaces.minc import Average + >>> from nipype.interfaces.minc.testdata import nonempty_minc_data + + >>> files = [nonempty_minc_data(i) for i in range(3)] + >>> average = Average(input_files=files, output_file='/tmp/tmp.mnc') + >>> average.run() # doctest: +SKIP + + """ + + input_spec = AverageInputSpec + output_spec = AverageOutputSpec + _cmd = 'mincaverage' + + +class BlobInputSpec(CommandLineInputSpec): + input_file = File( + desc='input file to blob', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_blob.mnc') + + trace = traits.Bool( + desc='compute the trace (approximate growth and shrinkage) -- FAST', + argstr='-trace') + determinant = traits.Bool( + desc='compute the determinant (exact growth and shrinkage) -- SLOW', + argstr='-determinant') + translation = traits.Bool( + desc='compute translation (structure displacement)', + argstr='-translation') + magnitude = traits.Bool( + desc='compute the magnitude of the displacement vector', + argstr='-magnitude') + + +class BlobOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class Blob(CommandLine): + """Calculate blobs from minc deformation grids. + + Examples + -------- + + >>> from nipype.interfaces.minc import Blob + >>> from nipype.interfaces.minc.testdata import minc2Dfile + + >>> blob = Blob(input_file=minc2Dfile, output_file='/tmp/tmp.mnc', trace=True) + >>> blob.run() # doctest: +SKIP + """ + + input_spec = BlobInputSpec + output_spec = BlobOutputSpec + _cmd = 'mincblob' + + +class CalcInputSpec(CommandLineInputSpec): + _xor_input_files = ('input_files', 'filelist') + + input_files = InputMultiPath( + traits.File(exists=True), + desc='input file(s) for calculation', + mandatory=True, + sep=' ', + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_files'], + hash_files=False, + name_template='%s_calc.mnc') + + two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2') + + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + _xor_verbose = ( + 'verbose', + 'quiet', + ) + + verbose = traits.Bool( + desc='Print out log messages (default).', + argstr='-verbose', + xor=_xor_verbose) + quiet = traits.Bool( + desc='Do not print out log messages.', + argstr='-quiet', + xor=_xor_verbose) + + debug = traits.Bool(desc='Print out debugging messages.', argstr='-debug') + + filelist = traits.File( + desc='Specify the name of a file containing input file names.', + argstr='-filelist %s', + mandatory=True, + xor=_xor_input_files) + + _xor_copy_header = ('copy_header', 'no_copy_header') + + copy_header = traits.Bool( + desc='Copy all of the header from the first file.', + argstr='-copy_header', + xor=_xor_copy_header) + no_copy_header = traits.Bool( + desc='Do not copy all of the header from the first file.', + argstr='-nocopy_header', + xor=_xor_copy_header) + + _xor_format = ( + 'format_filetype', + 'format_byte', + 'format_short', + 'format_int', + 'format_long', + 'format_float', + 'format_double', + 'format_signed', + 'format_unsigned', + ) + + format_filetype = traits.Bool( + desc='Use data type of first file (default).', + argstr='-filetype', + xor=_xor_format) + format_byte = traits.Bool( + desc='Write out byte data.', argstr='-byte', xor=_xor_format) + format_short = traits.Bool( + desc='Write out short integer data.', argstr='-short', xor=_xor_format) + format_int = traits.Bool( + desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format) + format_long = traits.Bool( + desc='Superseded by -int.', argstr='-long', xor=_xor_format) + format_float = traits.Bool( + desc='Write out single-precision floating-point data.', + argstr='-float', + xor=_xor_format) + format_double = traits.Bool( + desc='Write out double-precision floating-point data.', + argstr='-double', + xor=_xor_format) + format_signed = traits.Bool( + desc='Write signed integer data.', argstr='-signed', xor=_xor_format) + format_unsigned = traits.Bool( + desc='Write unsigned integer data (default).', + argstr='-unsigned', + xor=_xor_format) + + voxel_range = traits.Tuple( + traits.Int, + traits.Int, + argstr='-range %d %d', + desc='Valid range for output data.', + ) + + max_buffer_size_in_kb = traits.Range( + low=0, + desc='Specify the maximum size of the internal buffers (in kbytes).', + argstr='-max_buffer_size_in_kb %d') + + _xor_check_dimensions = ( + 'check_dimensions', + 'no_check_dimensions', + ) + + check_dimensions = traits.Bool( + desc='Check that files have matching dimensions (default).', + argstr='-check_dimensions', + xor=_xor_check_dimensions) + no_check_dimensions = traits.Bool( + desc='Do not check that files have matching dimensions.', + argstr='-nocheck_dimensions', + xor=_xor_check_dimensions) + + # FIXME Is it sensible to use ignore_nan and propagate_nan at the same + # time? Document this. + ignore_nan = traits.Bool( + desc='Ignore invalid data (NaN) for accumulations.', + argstr='-ignore_nan') + propagate_nan = traits.Bool( + desc='Invalid data in any file at a voxel produces a NaN (default).', + argstr='-propagate_nan') + + # FIXME Double-check that these are mutually exclusive? + _xor_nan_zero_illegal = ('output_nan', 'output_zero', + 'output_illegal_value') + + output_nan = traits.Bool( + desc='Output NaN when an illegal operation is done (default).', + argstr='-nan', + xor=_xor_nan_zero_illegal) + output_zero = traits.Bool( + desc='Output zero when an illegal operation is done.', + argstr='-zero', + xor=_xor_nan_zero_illegal) + output_illegal = traits.Bool( + desc= + 'Value to write out when an illegal operation is done. Default value: 1.79769e+308', + argstr='-illegal_value', + xor=_xor_nan_zero_illegal) + + _xor_expression = ('expression', 'expfile') + + expression = traits.Str( + desc='Expression to use in calculations.', + argstr='-expression \'%s\'', + xor=_xor_expression, + mandatory=True) + expfile = traits.File( + desc='Name of file containing expression.', + argstr='-expfile %s', + xor=_xor_expression, + mandatory=True) + + # FIXME test this one, the argstr will probably need tweaking, see + # _format_arg. + outfiles = traits.List( + traits.Tuple( + traits.Str, + traits.File, + argstr='-outfile %s %s', + desc= + ('List of (symbol, file) tuples indicating that output should be written' + 'to the specified file, taking values from the symbol which should be' + 'created in the expression (see the EXAMPLES section). If this option' + 'is given, then all non-option arguments are taken as input files.' + 'This option can be used multiple times for multiple output files.' + ))) + + eval_width = traits.Int( + desc='Number of voxels to evaluate simultaneously.', + argstr='-eval_width %s') + + +class CalcOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class Calc(CommandLine): + """Compute an expression using MINC files as input. + + Examples + -------- + + >>> from nipype.interfaces.minc import Calc + >>> from nipype.interfaces.minc.testdata import nonempty_minc_data + + >>> file0 = nonempty_minc_data(0) + >>> file1 = nonempty_minc_data(1) + >>> calc = Calc(input_files=[file0, file1], output_file='/tmp/calc.mnc', expression='A[0] + A[1]') # add files together + >>> calc.run() # doctest: +SKIP + """ + + input_spec = CalcInputSpec + output_spec = CalcOutputSpec + _cmd = 'minccalc' + + +# FIXME mincbbox produces output like +# +# -5.000000 -5.000000 -5.000000 4.800000 2.800000 8.800000 +# +# so perhaps this would be better returned as a pair of Python +# lists instead of sending to an output file? + + +class BBoxInputSpec(StdOutCommandLineInputSpec): + input_file = File( + desc='input file', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file containing bounding box corners', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_bbox.txt', + keep_extension=False) + + threshold = traits.Int( + 0, + desc='VIO_Real value threshold for bounding box. Default value: 0.', + argstr='-threshold') + + _xor_one_two = ('one_line', 'two_lines') + + one_line = traits.Bool( + desc='Output on one line (default): start_x y z width_x y z', + argstr='-one_line', + xor=_xor_one_two) + two_lines = traits.Bool( + desc='Output on two lines: start_x y z \n width_x y z', + argstr='-two_lines', + xor=_xor_one_two) + + format_mincresample = traits.Bool( + desc= + 'Output format for mincresample: (-step x y z -start x y z -nelements x y z', + argstr='-mincresample') + format_mincreshape = traits.Bool( + desc='Output format for mincreshape: (-start x,y,z -count dx,dy,dz', + argstr='-mincreshape') + format_minccrop = traits.Bool( + desc='Output format for minccrop: (-xlim x1 x2 -ylim y1 y2 -zlim z1 z2', + argstr='-minccrop') + + # FIXME Not implemented, will clash with our parsing of the output? + # Command-specific options: + # Options for logging progress. Default = -verbose. + # -verbose: Write messages indicating progress + # -quiet: Do not write log messages + # -debug: Print out debug info. + + +class BBoxOutputSpec(TraitedSpec): + output_file = File( + desc='output file containing bounding box corners', exists=True) + + +class BBox(StdOutCommandLine): + """Determine a bounding box of image. + + Examples + -------- + + >>> from nipype.interfaces.minc import BBox + >>> from nipype.interfaces.minc.testdata import nonempty_minc_data + + >>> file0 = nonempty_minc_data(0) + >>> bbox = BBox(input_file=file0) + >>> bbox.run() # doctest: +SKIP + """ + + input_spec = BBoxInputSpec + output_spec = BBoxOutputSpec + _cmd = 'mincbbox' + + +class BeastInputSpec(CommandLineInputSpec): + """ + + TODO: + + Command-specific options: + -verbose: Enable verbose output. + -positive: Specify mask of positive segmentation (inside mask) instead of the default mask. + -output_selection: Specify file to output selected files. + -count: Specify file to output the patch count. + -mask: Specify a segmentation mask instead of the the default mask. + -no_mask: Do not apply a segmentation mask. Perform the segmentation over the entire image. + -no_positive: Do not apply a positive mask. + Generic options for all commands: + -help: Print summary of command-line options and abort + -version: Print version number of program and exit + Copyright (C) 2011 Simon Fristed Eskildsen, Vladimir Fonov, + Pierrick Coupe, Jose V. Manjon + + This program comes with ABSOLUTELY NO WARRANTY; for details type 'cat COPYING'. + This is free software, and you are welcome to redistribute it under certain + conditions; type 'cat COPYING' for details. + + Usage: mincbeast [options] + mincbeast -help + + Get this example to work? + + https://github.com/BIC-MNI/BEaST/blob/master/README.library + + + 2.3 Source the minc-toolkit (if installed): + $ source /opt/minc/minc-toolkit-config.sh + + 2.4 Generate library by running: + $ beast_prepareADNIlib -flip + Example: + $ sudo beast_prepareADNIlib -flip Downloads/ADNI /opt/minc/share/beast-library-1.1 + + 3. Test the setup + 3.1 Normalize your data + $ beast_normalize -modeldir /opt/minc/share/icbm152_model_09c input.mnc normal.mnc normal.xfm + 3.2 Run BEaST + $ mincbeast /opt/minc/share/beast-library-1.1 normal.mnc brainmask.mnc -conf /opt/minc/share/beast-library-1.1/default.2mm.conf -same_res + """ + + probability_map = traits.Bool( + desc='Output the probability map instead of crisp mask.', + argstr='-probability') + flip_images = traits.Bool( + desc= + 'Flip images around the mid-sagittal plane to increase patch count.', + argstr='-flip') + load_moments = traits.Bool( + desc=('Do not calculate moments instead use precalculated' + 'library moments. (for optimization purposes)'), + argstr='-load_moments') + fill_holes = traits.Bool( + desc='Fill holes in the binary output.', argstr='-fill') + median_filter = traits.Bool( + desc='Apply a median filter on the probability map.', argstr='-median') + nlm_filter = traits.Bool( + desc='Apply an NLM filter on the probability map (experimental).', + argstr='-nlm_filter') + + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + configuration_file = traits.File( + desc='Specify configuration file.', argstr='-configuration %s') + + voxel_size = traits.Int( + 4, usedefault=True, + desc=('Specify voxel size for calculations (4, 2, or 1).' + 'Default value: 4. Assumes no multiscale. Use configuration' + 'file for multiscale.'), + argstr='-voxel_size %s') + + abspath = traits.Bool( + desc= + 'File paths in the library are absolute (default is relative to library root).', + argstr='-abspath', + usedefault=True, + default_value=True) + + patch_size = traits.Int( + 1, usedefault=True, + desc='Specify patch size for single scale approach. Default value: 1.', + argstr='-patch_size %s') + + search_area = traits.Int( + 2, usedefault=True, + desc= + 'Specify size of search area for single scale approach. Default value: 2.', + argstr='-search_area %s') + + confidence_level_alpha = traits.Float( + 0.5, usedefault=True, + desc='Specify confidence level Alpha. Default value: 0.5', + argstr='-alpha %s') + smoothness_factor_beta = traits.Float( + 0.5, usedefault=True, + desc='Specify smoothness factor Beta. Default value: 0.25', + argstr='-beta %s') + threshold_patch_selection = traits.Float( + 0.95, usedefault=True, + desc='Specify threshold for patch selection. Default value: 0.95', + argstr='-threshold %s') + number_selected_images = traits.Int( + 20, usedefault=True, + desc='Specify number of selected images. Default value: 20', + argstr='-selection_num %s') + + same_resolution = traits.Bool( + desc='Output final mask with the same resolution as input file.', + argstr='-same_resolution') + + library_dir = traits.Directory( + desc='library directory', position=-3, argstr='%s', mandatory=True) + input_file = traits.File( + desc='input file', position=-2, argstr='%s', mandatory=True) + output_file = traits.File( + desc='output file', + position=-1, + argstr='%s', + name_source=['input_file'], + hash_files=False, + name_template='%s_beast_mask.mnc') + + +class BeastOutputSpec(TraitedSpec): + output_file = File(desc='output mask file', exists=True) + + +class Beast(CommandLine): + """Extract brain image using BEaST (Brain Extraction using + non-local Segmentation Technique). + + Examples + -------- + + >>> from nipype.interfaces.minc import Beast + >>> from nipype.interfaces.minc.testdata import nonempty_minc_data + + >>> file0 = nonempty_minc_data(0) + >>> beast = Beast(input_file=file0) + >>> beast .run() # doctest: +SKIP + """ + + input_spec = BeastInputSpec + output_spec = BeastOutputSpec + _cmd = 'mincbeast' + + +class PikInputSpec(CommandLineInputSpec): + input_file = File( + desc='input file', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + _xor_image_type = ('jpg', 'png') + + jpg = traits.Bool(desc='Output a jpg file.', xor=_xor_image_type) + png = traits.Bool(desc='Output a png file (default).', xor=_xor_image_type) + + output_file = File( + desc='output file', + argstr='%s', + genfile=True, + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s.png', + keep_extension=False) + + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + # FIXME not implemented: --verbose + # --fake + # --lookup ==> arguments to pass to minclookup + + scale = traits.Int( + 2, usedefault=True, + desc=('Scaling factor for resulting image. By default images are' + 'output at twice their original resolution.'), + argstr='--scale %s') + + width = traits.Int( + desc= + 'Autoscale the resulting image to have a fixed image width (in pixels).', + argstr='--width %s') + + depth = traits.Enum( + 8, + 16, + desc='Bitdepth for resulting image 8 or 16 (MSB machines only!)', + argstr='--depth %s') + + _xor_title = ('title_string', 'title_with_filename') + + title = traits.Either( + traits.Bool(desc='Use input filename as title in resulting image.'), + traits.Str(desc='Add a title to the resulting image.'), + argstr='%s') # see _format_arg for actual arg string + + title_size = traits.Int( + desc='Font point size for the title.', + argstr='--title_size %s', + requires=['title']) + + annotated_bar = traits.Bool( + desc= + 'create an annotated bar to match the image (use height of the output image)', + argstr='--anot_bar') + + # FIXME tuple of floats? Not voxel values? Man page doesn't specify. + minc_range = traits.Tuple( + traits.Float, + traits.Float, + desc='Valid range of values for MINC file.', + argstr='--range %s %s') + + _xor_image_range = ('image_range', 'auto_range') + + image_range = traits.Tuple( + traits.Float, + traits.Float, + desc='Range of image values to use for pixel intensity.', + argstr='--image_range %s %s', + xor=_xor_image_range) + + auto_range = traits.Bool( + desc= + 'Automatically determine image range using a 5 and 95% PcT. (histogram)', + argstr='--auto_range', + xor=_xor_image_range) + + start = traits.Int( + desc='Slice number to get. (note this is in voxel co-ordinates).', + argstr='--slice %s') # FIXME Int is correct? + + _xor_slice = ('slice_z', 'slice_y', 'slice_x') + + slice_z = traits.Bool( + desc='Get an axial/transverse (z) slice.', argstr='-z', xor=_xor_slice) + slice_y = traits.Bool( + desc='Get a coronal (y) slice.', argstr='-y', xor=_xor_slice) + slice_x = traits.Bool( + desc='Get a sagittal (x) slice.', argstr='-x', + xor=_xor_slice) # FIXME typo in man page? sagital? + + triplanar = traits.Bool( + desc='Create a triplanar view of the input file.', + argstr='--triplanar') + tile_size = traits.Int( + desc='Pixel size for each image in a triplanar.', + argstr='--tilesize %s') + + _xor_sagittal_offset = ('sagittal_offset', 'sagittal_offset_perc') + + sagittal_offset = traits.Int( + desc='Offset the sagittal slice from the centre.', + argstr='--sagittal_offset %s') + sagittal_offset_perc = traits.Range( + low=0, + high=100, + desc='Offset the sagittal slice by a percentage from the centre.', + argstr='--sagittal_offset_perc %d', + ) + + _xor_vertical_horizontal = ('vertical_triplanar_view', + 'horizontal_triplanar_view') + + vertical_triplanar_view = traits.Bool( + desc='Create a vertical triplanar view (Default).', + argstr='--vertical', + xor=_xor_vertical_horizontal) + horizontal_triplanar_view = traits.Bool( + desc='Create a horizontal triplanar view.', + argstr='--horizontal', + xor=_xor_vertical_horizontal) + + lookup = traits.Str( + desc='Arguments to pass to minclookup', argstr='--lookup %s') + + +class PikOutputSpec(TraitedSpec): + output_file = File(desc='output image', exists=True) + + +class Pik(CommandLine): + """Generate images from minc files. + + Mincpik uses Imagemagick to generate images + from Minc files. + + Examples + -------- + + >>> from nipype.interfaces.minc import Pik + >>> from nipype.interfaces.minc.testdata import nonempty_minc_data + + >>> file0 = nonempty_minc_data(0) + >>> pik = Pik(input_file=file0, title='foo') + >>> pik .run() # doctest: +SKIP + + """ + + input_spec = PikInputSpec + output_spec = PikOutputSpec + _cmd = 'mincpik' + + def _format_arg(self, name, spec, value): + if name == 'title': + if isinstance(value, bool) and value: + return '--title' + elif isinstance(value, str): + return '--title --title_text %s' % (value, ) + else: + raise ValueError( + 'Unknown value for "title" argument: ' + str(value)) + return super(Pik, self)._format_arg(name, spec, value) + + +class BlurInputSpec(CommandLineInputSpec): + input_file = File( + desc='input file', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file_base = File(desc='output file base', argstr='%s', position=-1) + + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + _xor_kernel = ('gaussian', 'rect') + + gaussian = traits.Bool( + desc='Use a gaussian smoothing kernel (default).', + argstr='-gaussian', + xor=_xor_kernel) + rect = traits.Bool( + desc='Use a rect (box) smoothing kernel.', + argstr='-rect', + xor=_xor_kernel) + + gradient = traits.Bool( + desc='Create the gradient magnitude volume as well.', + argstr='-gradient') + partial = traits.Bool( + desc= + 'Create the partial derivative and gradient magnitude volumes as well.', + argstr='-partial') + + no_apodize = traits.Bool( + desc='Do not apodize the data before blurring.', argstr='-no_apodize') + + _xor_main_options = ('fwhm', 'fwhm3d', 'standard_dev') + + fwhm = traits.Float( + 0, + desc='Full-width-half-maximum of gaussian kernel. Default value: 0.', + argstr='-fwhm %s', + xor=_xor_main_options, + mandatory=True) + + standard_dev = traits.Float( + 0, + desc='Standard deviation of gaussian kernel. Default value: 0.', + argstr='-standarddev %s', + xor=_xor_main_options, + mandatory=True) + + fwhm3d = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='-3dfwhm %s %s %s', + desc=('Full-width-half-maximum of gaussian kernel.' + 'Default value: -1.79769e+308 -1.79769e+308 -1.79769e+308.'), + xor=_xor_main_options, + mandatory=True) + + dimensions = traits.Enum( + 3, + 1, + 2, + desc= + 'Number of dimensions to blur (either 1,2 or 3). Default value: 3.', + argstr='-dimensions %s') + + +class BlurOutputSpec(TraitedSpec): + output_file = File(desc='Blurred output file.', exists=True) + + gradient_dxyz = File(desc='Gradient dxyz.') + partial_dx = File(desc='Partial gradient dx.') + partial_dy = File(desc='Partial gradient dy.') + partial_dz = File(desc='Partial gradient dz.') + partial_dxyz = File(desc='Partial gradient dxyz.') + + +class Blur(StdOutCommandLine): + """ + Convolve an input volume with a Gaussian blurring kernel of + user-defined width. Optionally, the first partial derivatives + and the gradient magnitude volume can be calculated. + + Examples + -------- + + >>> from nipype.interfaces.minc import Blur + >>> from nipype.interfaces.minc.testdata import minc3Dfile + + (1) Blur an input volume with a 6mm fwhm isotropic Gaussian + blurring kernel: + + >>> blur = Blur(input_file=minc3Dfile, fwhm=6, output_file_base='/tmp/out_6') + >>> blur.run() # doctest: +SKIP + + mincblur will create /tmp/out_6_blur.mnc. + + (2) Calculate the blurred and gradient magnitude data: + + >>> blur = Blur(input_file=minc3Dfile, fwhm=6, gradient=True, output_file_base='/tmp/out_6') + >>> blur.run() # doctest: +SKIP + + will create /tmp/out_6_blur.mnc and /tmp/out_6_dxyz.mnc. + + (3) Calculate the blurred data, the partial derivative volumes + and the gradient magnitude for the same data: + + >>> blur = Blur(input_file=minc3Dfile, fwhm=6, partial=True, output_file_base='/tmp/out_6') + >>> blur.run() # doctest: +SKIP + + will create /tmp/out_6_blur.mnc, /tmp/out_6_dx.mnc, + /tmp/out_6_dy.mnc, /tmp/out_6_dz.mnc and /tmp/out_6_dxyz.mnc. + """ + + input_spec = BlurInputSpec + output_spec = BlurOutputSpec + _cmd = 'mincblur' + + def _gen_output_base(self): + output_file_base = self.inputs.output_file_base + + if isdefined(output_file_base): + return output_file_base + else: + base_file_name = os.path.split( + self.inputs.input_file)[1] # e.g. 'foo.mnc' + base_file_name_no_ext = os.path.splitext(base_file_name)[ + 0] # e.g. 'foo' + output_base = os.path.join( + os.getcwd(), base_file_name_no_ext + + '_bluroutput') # e.g. '/tmp/blah/foo_bluroutput' + # return os.path.splitext(self.inputs.input_file)[0] + + # '_bluroutput' + return output_base + + def _list_outputs(self): + outputs = self.output_spec().get() + + output_file_base = self._gen_output_base() + + outputs['output_file'] = output_file_base + '_blur.mnc' + + if isdefined(self.inputs.gradient): + outputs['gradient_dxyz'] = output_file_base + '_dxyz.mnc' + + if isdefined(self.inputs.partial): + outputs['partial_dx'] = output_file_base + '_dx.mnc' + outputs['partial_dy'] = output_file_base + '_dy.mnc' + outputs['partial_dz'] = output_file_base + '_dz.mnc' + outputs['partial_dxyz'] = output_file_base + '_dxyz.mnc' + + return outputs + + @property + def cmdline(self): + output_file_base = self.inputs.output_file_base + orig_cmdline = super(Blur, self).cmdline + + if isdefined(output_file_base): + return orig_cmdline + else: + # FIXME this seems like a bit of a hack. Can we force output_file + # to show up in cmdline by default, even if it isn't specified in + # the instantiation of Pik? + return '%s %s' % (orig_cmdline, self._gen_output_base()) + + +class MathInputSpec(CommandLineInputSpec): + _xor_input_files = ('input_files', 'filelist') + + input_files = InputMultiPath( + traits.File(exists=True), + desc='input file(s) for calculation', + mandatory=True, + sep=' ', + argstr='%s', + position=-2, + xor=_xor_input_files) + + output_file = File( + desc='output file', + argstr='%s', + genfile=True, + position=-1, + name_source=['input_files'], + hash_files=False, + name_template='%s_mincmath.mnc') + + filelist = traits.File( + desc='Specify the name of a file containing input file names.', + argstr='-filelist %s', + exists=True, + mandatory=True, + xor=_xor_input_files) + + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2') + + _xor_copy_header = ('copy_header', 'no_copy_header') + + copy_header = traits.Bool( + desc= + 'Copy all of the header from the first file (default for one file).', + argstr='-copy_header', + xor=_xor_copy_header) + no_copy_header = traits.Bool( + desc= + 'Do not copy all of the header from the first file (default for many files)).', + argstr='-nocopy_header', + xor=_xor_copy_header) + + _xor_format = ( + 'format_filetype', + 'format_byte', + 'format_short', + 'format_int', + 'format_long', + 'format_float', + 'format_double', + 'format_signed', + 'format_unsigned', + ) + + format_filetype = traits.Bool( + desc='Use data type of first file (default).', + argstr='-filetype', + xor=_xor_format) + format_byte = traits.Bool( + desc='Write out byte data.', argstr='-byte', xor=_xor_format) + format_short = traits.Bool( + desc='Write out short integer data.', argstr='-short', xor=_xor_format) + format_int = traits.Bool( + desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format) + format_long = traits.Bool( + desc='Superseded by -int.', argstr='-long', xor=_xor_format) + format_float = traits.Bool( + desc='Write out single-precision floating-point data.', + argstr='-float', + xor=_xor_format) + format_double = traits.Bool( + desc='Write out double-precision floating-point data.', + argstr='-double', + xor=_xor_format) + format_signed = traits.Bool( + desc='Write signed integer data.', argstr='-signed', xor=_xor_format) + format_unsigned = traits.Bool( + desc='Write unsigned integer data (default).', + argstr='-unsigned', + xor=_xor_format) + + voxel_range = traits.Tuple( + traits.Int, + traits.Int, + argstr='-range %d %d', + desc='Valid range for output data.') + + max_buffer_size_in_kb = traits.Range( + low=0, + desc='Specify the maximum size of the internal buffers (in kbytes).', + value=4096, + usedefault=True, + argstr='-max_buffer_size_in_kb %d', + ) + + _xor_check_dimensions = ( + 'check_dimensions', + 'no_check_dimensions', + ) + + check_dimensions = traits.Bool( + desc='Check that dimension info matches across files (default).', + argstr='-check_dimensions', + xor=_xor_check_dimensions) + no_check_dimensions = traits.Bool( + desc='Do not check dimension info.', + argstr='-nocheck_dimensions', + xor=_xor_check_dimensions) + + dimension = traits.Str( + desc= + 'Specify a dimension along which we wish to perform a calculation.', + argstr='-dimension %s') + + # FIXME Is it sensible to use ignore_nan and propagate_nan at the same + # time? Document this. + ignore_nan = traits.Bool( + desc='Ignore invalid data (NaN) for accumulations.', + argstr='-ignore_nan') + propagate_nan = traits.Bool( + desc='Invalid data in any file at a voxel produces a NaN (default).', + argstr='-propagate_nan') + + # FIXME Double-check that these are mutually exclusive? + _xor_nan_zero_illegal = ('output_nan', 'output_zero', + 'output_illegal_value') + + output_nan = traits.Bool( + desc='Output NaN when an illegal operation is done (default).', + argstr='-nan', + xor=_xor_nan_zero_illegal) + output_zero = traits.Bool( + desc='Output zero when an illegal operation is done.', + argstr='-zero', + xor=_xor_nan_zero_illegal) + output_illegal = traits.Bool( + desc=('Value to write out when an illegal operation' + 'is done. Default value: 1.79769e+308'), + argstr='-illegal_value', + xor=_xor_nan_zero_illegal) + + # FIXME A whole bunch of the parameters will be mutually exclusive, e.g. surely can't do sqrt and abs at the same time? + # Or does mincmath do one and then the next? + + ########################################################################## + # Traits that expect a bool (compare two volumes) or constant (manipulate one volume) # + ########################################################################## + + bool_or_const_traits = [ + 'test_gt', 'test_lt', 'test_eq', 'test_ne', 'test_ge', 'test_le', + 'calc_add', 'calc_sub', 'calc_mul', 'calc_div' + ] + + test_gt = traits.Either( + traits.Bool(), + traits.Float(), + desc='Test for vol1 > vol2 or vol1 > constant.', + argstr='-gt') + test_lt = traits.Either( + traits.Bool(), + traits.Float(), + desc='Test for vol1 < vol2 or vol1 < constant.', + argstr='-lt') + test_eq = traits.Either( + traits.Bool(), + traits.Float(), + desc='Test for integer vol1 == vol2 or vol1 == constant.', + argstr='-eq') + test_ne = traits.Either( + traits.Bool(), + traits.Float(), + desc='Test for integer vol1 != vol2 or vol1 != const.', + argstr='-ne') + test_ge = traits.Either( + traits.Bool(), + traits.Float(), + desc='Test for vol1 >= vol2 or vol1 >= const.', + argstr='-ge') + test_le = traits.Either( + traits.Bool(), + traits.Float(), + desc='Test for vol1 <= vol2 or vol1 <= const.', + argstr='-le') + + calc_add = traits.Either( + traits.Bool(), + traits.Float(), + desc='Add N volumes or volume + constant.', + argstr='-add') + calc_sub = traits.Either( + traits.Bool(), + traits.Float(), + desc='Subtract 2 volumes or volume - constant.', + argstr='-sub') + calc_mul = traits.Either( + traits.Bool(), + traits.Float(), + desc='Multiply N volumes or volume * constant.', + argstr='-mult') + calc_div = traits.Either( + traits.Bool(), + traits.Float(), + desc='Divide 2 volumes or volume / constant.', + argstr='-div') + + ###################################### + # Traits that expect a single volume # + ###################################### + + single_volume_traits = [ + 'invert', 'calc_not', 'sqrt', 'square', 'abs', 'exp', 'log', 'scale', + 'clamp', 'segment', 'nsegment', 'isnan', 'isnan' + ] # FIXME enforce this in _parse_inputs and check for other members + + invert = traits.Either( + traits.Float(), desc='Calculate 1/c.', argstr='-invert -const %s') + + calc_not = traits.Bool(desc='Calculate !vol1.', argstr='-not') + + sqrt = traits.Bool(desc='Take square root of a volume.', argstr='-sqrt') + square = traits.Bool(desc='Take square of a volume.', argstr='-square') + abs = traits.Bool(desc='Take absolute value of a volume.', argstr='-abs') + + exp = traits.Tuple( + traits.Float, + traits.Float, + argstr='-exp -const2 %s %s', + desc='Calculate c2*exp(c1*x). Both constants must be specified.') + + log = traits.Tuple( + traits.Float, + traits.Float, + argstr='-log -const2 %s %s', + desc='Calculate log(x/c2)/c1. The constants c1 and c2 default to 1.') + + scale = traits.Tuple( + traits.Float, + traits.Float, + argstr='-scale -const2 %s %s', + desc='Scale a volume: volume * c1 + c2.') + + clamp = traits.Tuple( + traits.Float, + traits.Float, + argstr='-clamp -const2 %s %s', + desc='Clamp a volume to lie between two values.') + + segment = traits.Tuple( + traits.Float, + traits.Float, + argstr='-segment -const2 %s %s', + desc= + 'Segment a volume using range of -const2: within range = 1, outside range = 0.' + ) + + nsegment = traits.Tuple( + traits.Float, + traits.Float, + argstr='-nsegment -const2 %s %s', + desc='Opposite of -segment: within range = 0, outside range = 1.') + + isnan = traits.Bool(desc='Test for NaN values in vol1.', argstr='-isnan') + + nisnan = traits.Bool(desc='Negation of -isnan.', argstr='-nisnan') + + ############################################ + # Traits that expect precisely two volumes # + ############################################ + + two_volume_traits = ['percentdiff'] + + percentdiff = traits.Float( + desc= + 'Percent difference between 2 volumes, thresholded (const def=0.0).', + argstr='-percentdiff') + + ##################################### + # Traits that expect N >= 1 volumes # + ##################################### + + n_volume_traits = [ + 'count_valid', 'maximum', 'minimum', 'calc_add', 'calc_or' + ] + + count_valid = traits.Bool( + desc='Count the number of valid values in N volumes.', + argstr='-count_valid') + + maximum = traits.Bool(desc='Find maximum of N volumes.', argstr='-maximum') + minimum = traits.Bool(desc='Find minimum of N volumes.', argstr='-minimum') + + calc_and = traits.Bool( + desc='Calculate vol1 && vol2 (&& ...).', argstr='-and') + calc_or = traits.Bool( + desc='Calculate vol1 || vol2 (|| ...).', argstr='-or') + + +class MathOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class Math(StdOutCommandLine): + """ + Various mathematical operations supplied by mincmath. + + Examples + -------- + + >>> from nipype.interfaces.minc import Math + >>> from nipype.interfaces.minc.testdata import minc2Dfile + + Scale: volume*3.0 + 2: + + >>> scale = Math(input_files=[minc2Dfile], scale=(3.0, 2)) + >>> scale.run() # doctest: +SKIP + + Test if >= 1.5: + + >>> gt = Math(input_files=[minc2Dfile], test_gt=1.5) + >>> gt.run() # doctest: +SKIP + """ + + input_spec = MathInputSpec + output_spec = MathOutputSpec + _cmd = 'mincmath' + + def _format_arg(self, name, spec, value): + assert value is not None + + if name in self.input_spec.bool_or_const_traits: + # t is unused, what was I trying to do with it? + # t = self.inputs.__getattribute__(name) + + if isinstance(value, bool) and value: + return spec.argstr + elif isinstance(value, bool) and not value: + raise ValueError('Does not make sense to specify %s=False' % + (name, )) + elif isinstance(value, float): + return '%s -const %s' % ( + spec.argstr, + value, + ) + else: + raise ValueError('Invalid %s argument: %s' % ( + name, + value, + )) + + return super(Math, self)._format_arg(name, spec, value) + + def _parse_inputs(self): + """A number of the command line options expect precisely one or two files. + """ + + nr_input_files = len(self.inputs.input_files) + + for n in self.input_spec.bool_or_const_traits: + t = self.inputs.__getattribute__(n) + + if isdefined(t): + if isinstance(t, bool): + if nr_input_files != 2: + raise ValueError( + 'Due to the %s option we expected 2 files but input_files is of length %d' + % ( + n, + nr_input_files, + )) + elif isinstance(t, float): + if nr_input_files != 1: + raise ValueError( + 'Due to the %s option we expected 1 file but input_files is of length %d' + % ( + n, + nr_input_files, + )) + else: + raise ValueError( + 'Argument should be a bool or const, but got: %s' % t) + + for n in self.input_spec.single_volume_traits: + t = self.inputs.__getattribute__(n) + + if isdefined(t): + if nr_input_files != 1: + raise ValueError( + 'Due to the %s option we expected 1 file but input_files is of length %d' + % ( + n, + nr_input_files, + )) + + for n in self.input_spec.two_volume_traits: + t = self.inputs.__getattribute__(n) + + if isdefined(t): + if nr_input_files != 2: + raise ValueError( + 'Due to the %s option we expected 2 files but input_files is of length %d' + % ( + n, + nr_input_files, + )) + + for n in self.input_spec.n_volume_traits: + t = self.inputs.__getattribute__(n) + + if isdefined(t): + if not nr_input_files >= 1: + raise ValueError( + 'Due to the %s option we expected at least one file but input_files is of length %d' + % ( + n, + nr_input_files, + )) + + return super(Math, self)._parse_inputs() + + +class ResampleInputSpec(CommandLineInputSpec): + """ + not implemented: + -size: synonym for -nelements) + -xsize: synonym for -xnelements + -ysize: synonym for -ynelements + -zsize: synonym for -ynelements + + """ + + input_file = File( + desc='input file for resampling', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_resample.mnc') + + # This is a dummy input. + input_grid_files = InputMultiPath( + traits.File, + desc='input grid file(s)', + ) + + two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2') + + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + _xor_interpolation = ('trilinear_interpolation', 'tricubic_interpolation', + 'nearest_neighbour_interpolation', + 'sinc_interpolation') + + trilinear_interpolation = traits.Bool( + desc='Do trilinear interpolation.', + argstr='-trilinear', + xor=_xor_interpolation) + tricubic_interpolation = traits.Bool( + desc='Do tricubic interpolation.', + argstr='-tricubic', + xor=_xor_interpolation) + + nearest_neighbour_interpolation = traits.Bool( + desc='Do nearest neighbour interpolation.', + argstr='-nearest_neighbour', + xor=_xor_interpolation) + + sinc_interpolation = traits.Bool( + desc='Do windowed sinc interpolation.', + argstr='-sinc', + xor=_xor_interpolation) + + half_width_sinc_window = traits.Enum( + 5, + 1, + 2, + 3, + 4, + 6, + 7, + 8, + 9, + 10, + desc='Set half-width of sinc window (1-10). Default value: 5.', + argstr='-width %s', + requires=['sinc_interpolation']) + + _xor_sinc_window_type = ('sinc_window_hanning', 'sinc_window_hamming') + + sinc_window_hanning = traits.Bool( + desc='Set sinc window type to Hanning.', + argstr='-hanning', + xor=_xor_sinc_window_type, + requires=['sinc_interpolation']) + + sinc_window_hamming = traits.Bool( + desc='Set sinc window type to Hamming.', + argstr='-hamming', + xor=_xor_sinc_window_type, + requires=['sinc_interpolation']) + + transformation = traits.File( + desc='File giving world transformation. (Default = identity).', + exists=True, + argstr='-transformation %s') + + invert_transformation = traits.Bool( + desc='Invert the transformation before using it.', + argstr='-invert_transformation') + + _xor_input_sampling = ('vio_transform', 'no_input_sampling') + + vio_transform = traits.Bool( + desc='VIO_Transform the input sampling with the transform (default).', + argstr='-tfm_input_sampling', + xor=_xor_input_sampling) + + no_input_sampling = traits.Bool( + desc='Use the input sampling without transforming (old behaviour).', + argstr='-use_input_sampling', + xor=_xor_input_sampling) + + like = traits.File( + desc='Specifies a model file for the resampling.', + argstr='-like %s', + exists=True) + + _xor_format = ( + 'format_byte', + 'format_short', + 'format_int', + 'format_long', + 'format_float', + 'format_double', + 'format_signed', + 'format_unsigned', + ) + + format_byte = traits.Bool( + desc='Write out byte data.', argstr='-byte', xor=_xor_format) + format_short = traits.Bool( + desc='Write out short integer data.', argstr='-short', xor=_xor_format) + format_int = traits.Bool( + desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format) + format_long = traits.Bool( + desc='Superseded by -int.', argstr='-long', xor=_xor_format) + format_float = traits.Bool( + desc='Write out single-precision floating-point data.', + argstr='-float', + xor=_xor_format) + format_double = traits.Bool( + desc='Write out double-precision floating-point data.', + argstr='-double', + xor=_xor_format) + format_signed = traits.Bool( + desc='Write signed integer data.', argstr='-signed', xor=_xor_format) + format_unsigned = traits.Bool( + desc='Write unsigned integer data (default).', + argstr='-unsigned', + xor=_xor_format) + + output_range = traits.Tuple( + traits.Float, + traits.Float, + argstr='-range %s %s', + desc= + 'Valid range for output data. Default value: -1.79769e+308 -1.79769e+308.' + ) + + _xor_slices = ('transverse', 'sagittal', 'coronal') + + transverse_slices = traits.Bool( + desc='Write out transverse slices.', + argstr='-transverse', + xor=_xor_slices) + + sagittal_slices = traits.Bool( + desc='Write out sagittal slices', argstr='-sagittal', xor=_xor_slices) + + coronal_slices = traits.Bool( + desc='Write out coronal slices', argstr='-coronal', xor=_xor_slices) + + _xor_fill = ('nofill', 'fill') + + no_fill = traits.Bool( + desc='Use value zero for points outside of input volume.', + argstr='-nofill', + xor=_xor_fill) + fill = traits.Bool( + desc='Use a fill value for points outside of input volume.', + argstr='-fill', + xor=_xor_fill) + + fill_value = traits.Float( + desc=('Specify a fill value for points outside of input volume.' + 'Default value: 1.79769e+308.'), + argstr='-fillvalue %s', + requires=['fill']) + + _xor_scale = ('keep_real_range', 'nokeep_real_range') + + keep_real_range = traits.Bool( + desc='Keep the real scale of the input volume.', + argstr='-keep_real_range', + xor=_xor_scale) + + nokeep_real_range = traits.Bool( + desc='Do not keep the real scale of the data (default).', + argstr='-nokeep_real_range', + xor=_xor_scale) + + _xor_spacetype = ('spacetype', 'talairach') + + spacetype = traits.Str( + desc='Set the spacetype attribute to a specified string.', + argstr='-spacetype %s') + talairach = traits.Bool( + desc='Output is in Talairach space.', argstr='-talairach') + + origin = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + desc=('Origin of first pixel in 3D space.' + 'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'), + argstr='-origin %s %s %s') + + standard_sampling = traits.Bool( + desc='Set the sampling to standard values (step, start and dircos).', + argstr='-standard_sampling') # FIXME Bool? + units = traits.Str( + desc='Specify the units of the output sampling.', + argstr='-units %s') # FIXME String? + + # Elements along each dimension. + # FIXME Ints? Ranges? + # FIXME Check that this xor behaves correctly. + _xor_nelements = ('nelements', 'nelements_x_y_or_z') + + # nr elements along each dimension + nelements = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + desc='Number of elements along each dimension (X, Y, Z).', + argstr='-nelements %s %s %s', + xor=_xor_nelements) + + # FIXME Is mincresample happy if we only specify one of these, or do we + # need the requires=...? + xnelements = traits.Int( + desc='Number of elements along the X dimension.', + argstr='-xnelements %s', + requires=('ynelements', 'znelements'), + xor=_xor_nelements) + + ynelements = traits.Int( + desc='Number of elements along the Y dimension.', + argstr='-ynelements %s', + requires=('xnelements', 'znelements'), + xor=_xor_nelements) + + znelements = traits.Int( + desc='Number of elements along the Z dimension.', + argstr='-znelements %s', + requires=('xnelements', 'ynelements'), + xor=_xor_nelements) + + # step size along each dimension + _xor_step = ('step', 'step_x_y_or_z') + + step = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + desc= + 'Step size along each dimension (X, Y, Z). Default value: (0, 0, 0).', + argstr='-step %s %s %s', + xor=_xor_nelements) + + # FIXME Use the requires=...? + xstep = traits.Int( + desc='Step size along the X dimension. Default value: 0.', + argstr='-xstep %s', + requires=('ystep', 'zstep'), + xor=_xor_step) + + ystep = traits.Int( + desc='Step size along the Y dimension. Default value: 0.', + argstr='-ystep %s', + requires=('xstep', 'zstep'), + xor=_xor_step) + + zstep = traits.Int( + desc='Step size along the Z dimension. Default value: 0.', + argstr='-zstep %s', + requires=('xstep', 'ystep'), + xor=_xor_step) + + # start point along each dimension + _xor_start = ('start', 'start_x_y_or_z') + + start = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + desc=('Start point along each dimension (X, Y, Z).' + 'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'), + argstr='-start %s %s %s', + xor=_xor_nelements) + + # FIXME Use the requires=...? + xstart = traits.Float( + desc='Start point along the X dimension. Default value: 1.79769e+308.', + argstr='-xstart %s', + requires=('ystart', 'zstart'), + xor=_xor_start) + + ystart = traits.Float( + desc='Start point along the Y dimension. Default value: 1.79769e+308.', + argstr='-ystart %s', + requires=('xstart', 'zstart'), + xor=_xor_start) + + zstart = traits.Float( + desc='Start point along the Z dimension. Default value: 1.79769e+308.', + argstr='-zstart %s', + requires=('xstart', 'ystart'), + xor=_xor_start) + + # dircos along each dimension + _xor_dircos = ('dircos', 'dircos_x_y_or_z') + + dircos = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + desc=( + 'Direction cosines along each dimension (X, Y, Z). Default value:' + '1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308 ...' + ' 1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308.' + ), + argstr='-dircos %s %s %s', + xor=_xor_nelements) + + # FIXME Use the requires=...? + xdircos = traits.Float( + desc=('Direction cosines along the X dimension.' + 'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'), + argstr='-xdircos %s', + requires=('ydircos', 'zdircos'), + xor=_xor_dircos) + + ydircos = traits.Float( + desc=('Direction cosines along the Y dimension.' + 'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'), + argstr='-ydircos %s', + requires=('xdircos', 'zdircos'), + xor=_xor_dircos) + + zdircos = traits.Float( + desc=('Direction cosines along the Z dimension.' + 'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'), + argstr='-zdircos %s', + requires=('xdircos', 'ydircos'), + xor=_xor_dircos) + + +class ResampleOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class Resample(StdOutCommandLine): + """ + Resample a minc file.' + + Examples + -------- + + >>> from nipype.interfaces.minc import Resample + >>> from nipype.interfaces.minc.testdata import minc2Dfile + >>> r = Resample(input_file=minc2Dfile, output_file='/tmp/out.mnc') # Resample the file. + >>> r.run() # doctest: +SKIP + + """ + + input_spec = ResampleInputSpec + output_spec = ResampleOutputSpec + _cmd = 'mincresample' + + +class NormInputSpec(CommandLineInputSpec): + """ + + Not implemented: + + -version print version and exit + -verbose be verbose + -noverbose opposite of -verbose [default] + -quiet be quiet + -noquiet opposite of -quiet [default] + -fake do a dry run, (echo cmds only) + -nofake opposite of -fake [default] + """ + + input_file = File( + desc='input file to normalise', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_norm.mnc') + + output_threshold_mask = traits.File( + desc='File in which to store the threshold mask.', + argstr='-threshold_mask %s', + name_source=['input_file'], + hash_files=False, + name_template='%s_norm_threshold_mask.mnc') + + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + # Normalisation Options + mask = traits.File( + desc='Calculate the image normalisation within a mask.', + argstr='-mask %s', + exists=True) + clamp = traits.Bool( + desc='Force the ouput range between limits [default].', + argstr='-clamp', + usedefault=True, + default_value=True) + + cutoff = traits.Range( + low=0.0, + high=100.0, + desc= + 'Cutoff value to use to calculate thresholds by a histogram PcT in %. [default: 0.01]', + argstr='-cutoff %s', + ) + + lower = traits.Float(desc='Lower real value to use.', argstr='-lower %s') + upper = traits.Float(desc='Upper real value to use.', argstr='-upper %s') + + out_floor = traits.Float( + desc='Output files maximum [default: 0]', + argstr='-out_floor %s') # FIXME is this a float? + out_ceil = traits.Float( + desc='Output files minimum [default: 100]', + argstr='-out_ceil %s') # FIXME is this a float? + + # Threshold Options + threshold = traits.Bool( + desc= + 'Threshold the image (set values below threshold_perc to -out_floor).', + argstr='-threshold') + + threshold_perc = traits.Range( + low=0.0, + high=100.0, + desc= + 'Threshold percentage (0.1 == lower 10% of intensity range) [default: 0.1].', + argstr='-threshold_perc %s') + + threshold_bmt = traits.Bool( + desc='Use the resulting image BiModalT as the threshold.', + argstr='-threshold_bmt') + + threshold_blur = traits.Float( + desc='Blur FWHM for intensity edges then thresholding [default: 2].', + argstr='-threshold_blur %s') + + +class NormOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + output_threshold_mask = File(desc='threshold mask file') + + +class Norm(CommandLine): + """Normalise a file between a max and minimum (possibly) + using two histogram pct's. + + Examples + -------- + + >>> from nipype.interfaces.minc import Norm + >>> from nipype.interfaces.minc.testdata import minc2Dfile + >>> n = Norm(input_file=minc2Dfile, output_file='/tmp/out.mnc') # Normalise the file. + >>> n.run() # doctest: +SKIP + """ + + input_spec = NormInputSpec + output_spec = NormOutputSpec + _cmd = 'mincnorm' + + +""" +| volcentre will centre a MINC image's sampling about a point (0,0,0 typically) +| +| NB: It will modify the file in-place unless an outfile is given +| +| Problems or comments should be sent to: a.janke@gmail.com + +Summary of options: + -version print version and exit + -verbose be verbose + -noverbose opposite of -verbose [default] + -clobber clobber existing check files + -noclobber opposite of -clobber [default] + -fake do a dry run, (echo cmds only) + -nofake opposite of -fake [default] + -com Use the CoM of the volume for the new centre (via mincstats) + -nocom opposite of -com [default] + -centre + Centre to use (x,y,z) [default: 0 0 0] + -zero_dircos Set the direction cosines to identity [default] + -nozero_dirco opposite of -zero_dircos + +Usage: volcentre [options] [] + volcentre -help to list options + +""" + + +class VolcentreInputSpec(CommandLineInputSpec): + """ + Not implemented: + + -fake do a dry run, (echo cmds only) + -nofake opposite of -fake [default] + + """ + + input_file = File( + desc='input file to centre', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_volcentre.mnc') + + verbose = traits.Bool( + desc='Print out log messages. Default: False.', argstr='-verbose') + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + com = traits.Bool( + desc= + 'Use the CoM of the volume for the new centre (via mincstats). Default: False', + argstr='-com') + + centre = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='-centre %s %s %s', + desc='Centre to use (x,y,z) [default: 0 0 0].', + ) + + zero_dircos = traits.Bool( + desc='Set the direction cosines to identity [default].', + argstr='-zero_dircos') + + +class VolcentreOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class Volcentre(CommandLine): + """Centre a MINC image's sampling about a point, typically (0,0,0). + + Example + -------- + + >>> from nipype.interfaces.minc import Volcentre + >>> from nipype.interfaces.minc.testdata import minc2Dfile + >>> vc = Volcentre(input_file=minc2Dfile) + >>> vc.run() # doctest: +SKIP + """ + + input_spec = VolcentreInputSpec + output_spec = VolcentreOutputSpec + _cmd = 'volcentre' + + +class VolpadInputSpec(CommandLineInputSpec): + """ + Not implemented: + + -fake do a dry run, (echo cmds only) + -nofake opposite of -fake [default] + + | volpad pads a MINC volume + | + | Problems or comments should be sent to: a.janke@gmail.com + + Summary of options: + + -- General Options ------------------------------------------------------------- + -verbose be verbose + -noverbose opposite of -verbose [default] + -clobber clobber existing files + -noclobber opposite of -clobber [default] + -fake do a dry run, (echo cmds only) + -nofake opposite of -fake [default] + + + """ + + input_file = File( + desc='input file to centre', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_volpad.mnc') + + verbose = traits.Bool( + desc='Print out log messages. Default: False.', argstr='-verbose') + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + auto = traits.Bool( + desc= + 'Automatically determine padding distances (uses -distance as max). Default: False.', + argstr='-auto') + + auto_freq = traits.Float( + desc= + 'Frequency of voxels over bimodalt threshold to stop at [default: 500].', + argstr='-auto_freq %s') + + distance = traits.Int( + desc='Padding distance (in voxels) [default: 4].', + argstr='-distance %s') + + smooth = traits.Bool( + desc='Smooth (blur) edges before padding. Default: False.', + argstr='-smooth') + + smooth_distance = traits.Int( + desc='Smoothing distance (in voxels) [default: 4].', + argstr='-smooth_distance %s') + + +class VolpadOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class Volpad(CommandLine): + """Centre a MINC image's sampling about a point, typically (0,0,0). + + Examples + -------- + + >>> from nipype.interfaces.minc import Volpad + >>> from nipype.interfaces.minc.testdata import minc2Dfile + >>> vp = Volpad(input_file=minc2Dfile, smooth=True, smooth_distance=4) + >>> vp.run() # doctest: +SKIP + """ + + input_spec = VolpadInputSpec + output_spec = VolpadOutputSpec + _cmd = 'volpad' + + +class VolisoInputSpec(CommandLineInputSpec): + + input_file = File( + desc='input file to convert to isotropic sampling', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_voliso.mnc') + + verbose = traits.Bool( + desc='Print out log messages. Default: False.', argstr='--verbose') + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='--clobber', + usedefault=True, + default_value=True) + + maxstep = traits.Float( + desc='The target maximum step desired in the output volume.', + argstr='--maxstep %s') + + minstep = traits.Float( + desc='The target minimum step desired in the output volume.', + argstr='--minstep %s') + + avgstep = traits.Bool( + desc= + 'Calculate the maximum step from the average steps of the input volume.', + argstr='--avgstep') + + +class VolisoOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class Voliso(CommandLine): + """Changes the steps and starts in order that the output volume + has isotropic sampling. + + Examples + -------- + + >>> from nipype.interfaces.minc import Voliso + >>> from nipype.interfaces.minc.testdata import minc2Dfile + >>> viso = Voliso(input_file=minc2Dfile, minstep=0.1, avgstep=True) + >>> viso.run() # doctest: +SKIP + """ + + input_spec = VolisoInputSpec + output_spec = VolisoOutputSpec + _cmd = 'voliso' + + +class GennlxfmInputSpec(CommandLineInputSpec): + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['like'], + hash_files=False, + name_template='%s_gennlxfm.xfm') + + verbose = traits.Bool( + desc='Print out log messages. Default: False.', argstr='-verbose') + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + ident = traits.Bool( + desc='Generate an identity xfm. Default: False.', argstr='-ident') + step = traits.Int( + desc='Output ident xfm step [default: 1].', argstr='-step %s') + + like = File( + desc='Generate a nlxfm like this file.', + exists=True, + argstr='-like %s', + ) + + +class GennlxfmOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + output_grid = File(desc='output grid', exists=True) + + +class Gennlxfm(CommandLine): + """Generate nonlinear xfms. Currently only identity xfms + are supported! + + This tool is part of minc-widgets: + + https://github.com/BIC-MNI/minc-widgets/blob/master/gennlxfm/gennlxfm + + Examples + -------- + + >>> from nipype.interfaces.minc import Gennlxfm + >>> from nipype.interfaces.minc.testdata import minc2Dfile + >>> gennlxfm = Gennlxfm(step=1, like=minc2Dfile) + >>> gennlxfm.run() # doctest: +SKIP + + """ + + input_spec = GennlxfmInputSpec + output_spec = GennlxfmOutputSpec + _cmd = 'gennlxfm' + + def _list_outputs(self): + outputs = super(Gennlxfm, self)._list_outputs() + outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc', + outputs['output_file']) + return outputs + + +class XfmConcatInputSpec(CommandLineInputSpec): + input_files = InputMultiPath( + traits.File(exists=True), + desc='input file(s)', + mandatory=True, + sep=' ', + argstr='%s', + position=-2) + + # This is a dummy input. + input_grid_files = InputMultiPath( + traits.File, + desc='input grid file(s)', + ) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_files'], + hash_files=False, + name_template='%s_xfmconcat.xfm') + + verbose = traits.Bool( + desc='Print out log messages. Default: False.', argstr='-verbose') + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + +class XfmConcatOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + output_grids = OutputMultiPath(File(exists=True), desc='output grids') + + +class XfmConcat(CommandLine): + """Concatenate transforms together. The output transformation + is equivalent to applying input1.xfm, then input2.xfm, ..., in + that order. + + Examples + -------- + + >>> from nipype.interfaces.minc import XfmConcat + >>> from nipype.interfaces.minc.testdata import minc2Dfile + >>> conc = XfmConcat(input_files=['input1.xfm', 'input1.xfm']) + >>> conc.run() # doctest: +SKIP + """ + + input_spec = XfmConcatInputSpec + output_spec = XfmConcatOutputSpec + _cmd = 'xfmconcat' + + def _list_outputs(self): + outputs = super(XfmConcat, self)._list_outputs() + + if os.path.exists(outputs['output_file']): + if 'grid' in open(outputs['output_file'], 'r').read(): + outputs['output_grids'] = glob.glob( + re.sub('.(nlxfm|xfm)$', '_grid_*.mnc', + outputs['output_file'])) + + return outputs + + +class BestLinRegInputSpec(CommandLineInputSpec): + source = File( + desc='source Minc file', + exists=True, + mandatory=True, + argstr='%s', + position=-4, + ) + + target = File( + desc='target Minc file', + exists=True, + mandatory=True, + argstr='%s', + position=-3, + ) + + output_xfm = File( + desc='output xfm file', + genfile=True, + argstr='%s', + position=-2, + name_source=['source'], + hash_files=False, + name_template='%s_bestlinreg.xfm', + keep_extension=False) + + output_mnc = File( + desc='output mnc file', + genfile=True, + argstr='%s', + position=-1, + name_source=['source'], + hash_files=False, + name_template='%s_bestlinreg.mnc', + keep_extension=False) + + verbose = traits.Bool( + desc='Print out log messages. Default: False.', argstr='-verbose') + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + # FIXME Very bare implementation, none of these are done yet: + """ + -init_xfm initial transformation (default identity) + -source_mask source mask to use during fitting + -target_mask target mask to use during fitting + -lsq9 use 9-parameter transformation (default) + -lsq12 use 12-parameter transformation (default -lsq9) + -lsq6 use 6-parameter transformation + """ + + +class BestLinRegOutputSpec(TraitedSpec): + output_xfm = File(desc='output xfm file', exists=True) + output_mnc = File(desc='output mnc file', exists=True) + + +class BestLinReg(CommandLine): + """Hierachial linear fitting between two files. + + The bestlinreg script is part of the EZminc package: + + https://github.com/BIC-MNI/EZminc/blob/master/scripts/bestlinreg.pl + + Examples + -------- + + >>> from nipype.interfaces.minc import BestLinReg + >>> from nipype.interfaces.minc.testdata import nonempty_minc_data + + >>> input_file = nonempty_minc_data(0) + >>> target_file = nonempty_minc_data(1) + >>> linreg = BestLinReg(source=input_file, target=target_file) + >>> linreg.run() # doctest: +SKIP + """ + + input_spec = BestLinRegInputSpec + output_spec = BestLinRegOutputSpec + _cmd = 'bestlinreg' + + +class NlpFitInputSpec(CommandLineInputSpec): + source = File( + desc='source Minc file', + exists=True, + mandatory=True, + argstr='%s', + position=-3, + ) + + target = File( + desc='target Minc file', + exists=True, + mandatory=True, + argstr='%s', + position=-2, + ) + + output_xfm = File( + desc='output xfm file', + genfile=True, + argstr='%s', + position=-1, + ) + + # This is a dummy input. + input_grid_files = InputMultiPath( + traits.File, + desc='input grid file(s)', + ) + + config_file = File( + desc='File containing the fitting configuration use.', + argstr='-config_file %s', + mandatory=True, + exists=True) + + init_xfm = File( + desc='Initial transformation (default identity).', + argstr='-init_xfm %s', + mandatory=True, + exists=True) + + source_mask = File( + desc='Source mask to use during fitting.', + argstr='-source_mask %s', + mandatory=True, + exists=True) + + verbose = traits.Bool( + desc='Print out log messages. Default: False.', argstr='-verbose') + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + +class NlpFitOutputSpec(TraitedSpec): + output_xfm = File(desc='output xfm file', exists=True) + output_grid = File(desc='output grid file', exists=True) + + +class NlpFit(CommandLine): + """Hierarchial non-linear fitting with bluring. + + This tool is part of the minc-widgets package: + + https://github.com/BIC-MNI/minc-widgets/blob/master/nlpfit/nlpfit + + Examples + -------- + + >>> from nipype.interfaces.minc import NlpFit + >>> from nipype.interfaces.minc.testdata import nonempty_minc_data, nlp_config + >>> from nipype.testing import example_data + + >>> source = nonempty_minc_data(0) + >>> target = nonempty_minc_data(1) + >>> source_mask = nonempty_minc_data(2) + >>> config = nlp_config + >>> initial = example_data('minc_initial.xfm') + >>> nlpfit = NlpFit(config_file=config, init_xfm=initial, source_mask=source_mask, source=source, target=target) + >>> nlpfit.run() # doctest: +SKIP + """ + + input_spec = NlpFitInputSpec + output_spec = NlpFitOutputSpec + _cmd = 'nlpfit' + + def _gen_filename(self, name): + if name == 'output_xfm': + output_xfm = self.inputs.output_xfm + + if isdefined(output_xfm): + return os.path.abspath(output_xfm) + else: + return aggregate_filename( + [self.inputs.source, self.inputs.target], + 'nlpfit_xfm_output') + '.xfm' + else: + raise NotImplemented + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['output_xfm'] = os.path.abspath( + self._gen_filename('output_xfm')) + + assert os.path.exists(outputs['output_xfm']) + if 'grid' in open(outputs['output_xfm'], 'r').read(): + outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc', + outputs['output_xfm']) + + return outputs + + +class XfmAvgInputSpec(CommandLineInputSpec): + input_files = InputMultiPath( + traits.File(exists=True), + desc='input file(s)', + mandatory=True, + sep=' ', + argstr='%s', + position=-2) + + # This is a dummy input. + input_grid_files = InputMultiPath( + traits.File, + desc='input grid file(s)', + ) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + ) + + verbose = traits.Bool( + desc='Print out log messages. Default: False.', argstr='-verbose') + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + # FIXME xor these: + + avg_linear = traits.Bool( + desc='average the linear part [default].', argstr='-avg_linear') + avg_nonlinear = traits.Bool( + desc='average the non-linear part [default].', argstr='-avg_nonlinear') + + ignore_linear = traits.Bool( + desc='opposite of -avg_linear.', argstr='-ignore_linear') + ignore_nonlinear = traits.Bool( + desc='opposite of -avg_nonlinear.', argstr='-ignore_nonline') + + +class XfmAvgOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + output_grid = File(desc='output grid file', exists=True) + + +class XfmAvg(CommandLine): + """Average a number of xfm transforms using matrix logs and exponents. + The program xfmavg calls Octave for numerical work. + + This tool is part of the minc-widgets package: + + https://github.com/BIC-MNI/minc-widgets/tree/master/xfmavg + + Examples + -------- + + >>> from nipype.interfaces.minc import XfmAvg + >>> from nipype.interfaces.minc.testdata import nonempty_minc_data, nlp_config + >>> from nipype.testing import example_data + + >>> xfm1 = example_data('minc_initial.xfm') + >>> xfm2 = example_data('minc_initial.xfm') # cheating for doctest + >>> xfmavg = XfmAvg(input_files=[xfm1, xfm2]) + >>> xfmavg.run() # doctest: +SKIP + """ + + input_spec = XfmAvgInputSpec + output_spec = XfmAvgOutputSpec + _cmd = 'xfmavg' + + def _gen_filename(self, name): + if name == 'output_file': + output_file = self.inputs.output_file + + if isdefined(output_file): + return os.path.abspath(output_file) + else: + return aggregate_filename(self.inputs.input_files, + 'xfmavg_output') + '.xfm' + else: + raise NotImplemented + + def _gen_outfilename(self): + return self._gen_filename('output_file') + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['output_file'] = os.path.abspath(self._gen_outfilename()) + + assert os.path.exists(outputs['output_file']) + if 'grid' in open(outputs['output_file'], 'r').read(): + outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc', + outputs['output_file']) + + return outputs + + +class XfmInvertInputSpec(CommandLineInputSpec): + input_file = traits.File( + desc='input file', + exists=True, + mandatory=True, + argstr='%s', + position=-2) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + ) + + verbose = traits.Bool( + desc='Print out log messages. Default: False.', argstr='-verbose') + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + +class XfmInvertOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + output_grid = File(desc='output grid file', exists=True) + + +class XfmInvert(CommandLine): + """Invert an xfm transform file. + + Examples + -------- + + >>> from nipype.interfaces.minc import XfmAvg + >>> from nipype.testing import example_data + + >>> xfm = example_data('minc_initial.xfm') + >>> invert = XfmInvert(input_file=xfm) + >>> invert.run() # doctest: +SKIP + """ + + input_spec = XfmInvertInputSpec + output_spec = XfmInvertOutputSpec + _cmd = 'xfminvert' + + def _gen_filename(self, name): + if name == 'output_file': + output_file = self.inputs.output_file + + if isdefined(output_file): + return os.path.abspath(output_file) + else: + return aggregate_filename([self.inputs.input_file], + 'xfminvert_output') + '.xfm' + else: + raise NotImplemented + + def _gen_outfilename(self): + return self._gen_filename('output_file') + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['output_file'] = os.path.abspath(self._gen_outfilename()) + + assert os.path.exists(outputs['output_file']) + if 'grid' in open(outputs['output_file'], 'r').read(): + outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc', + outputs['output_file']) + + return outputs + + +class BigAverageInputSpec(CommandLineInputSpec): + input_files = InputMultiPath( + traits.File(exists=True), + desc='input file(s)', + mandatory=True, + sep=' ', + argstr='%s', + position=-2) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_files'], + hash_files=False, + name_template='%s_bigaverage.mnc') + + verbose = traits.Bool( + desc='Print out log messages. Default: False.', argstr='--verbose') + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='--clobber', + usedefault=True, + default_value=True) + + # FIXME Redumentary implementation, various parameters not implemented. + # TODO! + output_float = traits.Bool( + desc='Output files with float precision.', argstr='--float') + + robust = traits.Bool( + desc=('Perform robust averaging, features that are outside 1 standard' + 'deviation from the mean are downweighted. Works well for noisy' + 'data with artifacts. see the --tmpdir option if you have a' + 'large number of input files.'), + argstr='-robust') + + # Should Nipype deal with where the temp directory is? + tmpdir = Directory(desc='temporary files directory', argstr='-tmpdir %s') + sd_file = File( + desc='Place standard deviation image in specified file.', + argstr='--sdfile %s', + name_source=['input_files'], + hash_files=False, + name_template='%s_bigaverage_stdev.mnc') + + +class BigAverageOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + sd_file = File(desc='standard deviation image', exists=True) + + +class BigAverage(CommandLine): + """Average 1000's of MINC files in linear time. + + mincbigaverage is designed to discretise the problem of averaging either + a large number of input files or averaging a smaller number of large + files. (>1GB each). There is also some code included to perform "robust" + averaging in which only the most common features are kept via down-weighting + outliers beyond a standard deviation. + + One advantage of mincbigaverage is that it avoids issues around the number + of possible open files in HDF/netCDF. In short if you have more than 100 + files open at once while averaging things will slow down significantly. + + mincbigaverage does this via a iterative approach to averaging files and + is a direct drop in replacement for mincaverage. That said not all the + arguments of mincaverage are supported in mincbigaverage but they should + be. + + This tool is part of the minc-widgets package: + + https://github.com/BIC-MNI/minc-widgets/blob/master/mincbigaverage/mincbigaverage + + Examples + -------- + + >>> from nipype.interfaces.minc import BigAverage + >>> from nipype.interfaces.minc.testdata import nonempty_minc_data + + >>> files = [nonempty_minc_data(i) for i in range(3)] + >>> average = BigAverage(input_files=files, output_float=True, robust=True) + >>> average.run() # doctest: +SKIP + """ + + input_spec = BigAverageInputSpec + output_spec = BigAverageOutputSpec + _cmd = 'mincbigaverage' + + +class ReshapeInputSpec(CommandLineInputSpec): + input_file = traits.File( + desc='input file', + exists=True, + mandatory=True, + argstr='%s', + position=-2) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_reshape.mnc') + + verbose = traits.Bool( + desc='Print out log messages. Default: False.', argstr='-verbose') + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + # FIXME MANY options not implemented! + + write_short = traits.Bool( + desc='Convert to short integer data.', argstr='-short') + + +class ReshapeOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + + +class Reshape(CommandLine): + """Cut a hyperslab out of a minc file, with dimension reordering. + + This is also useful for rewriting with a different format, for + example converting to short (see example below). + + Examples + -------- + + >>> from nipype.interfaces.minc import Reshape + >>> from nipype.interfaces.minc.testdata import nonempty_minc_data + + >>> input_file = nonempty_minc_data(0) + >>> reshape_to_short = Reshape(input_file=input_file, write_short=True) + >>> reshape_to_short.run() # doctest: +SKIP + + """ + + input_spec = ReshapeInputSpec + output_spec = ReshapeOutputSpec + _cmd = 'mincreshape' + + +class VolSymmInputSpec(CommandLineInputSpec): + input_file = traits.File( + desc='input file', + exists=True, + mandatory=True, + argstr='%s', + position=-3) + + trans_file = traits.File( + desc='output xfm trans file', + genfile=True, + argstr='%s', + position=-2, + name_source=['input_file'], + hash_files=False, + name_template='%s_vol_symm.xfm', + keep_extension=False) + + output_file = File( + desc='output file', + genfile=True, + argstr='%s', + position=-1, + name_source=['input_file'], + hash_files=False, + name_template='%s_vol_symm.mnc') + + # This is a dummy input. + input_grid_files = InputMultiPath( + traits.File, + desc='input grid file(s)', + ) + + verbose = traits.Bool( + desc='Print out log messages. Default: False.', argstr='-verbose') + clobber = traits.Bool( + desc='Overwrite existing file.', + argstr='-clobber', + usedefault=True, + default_value=True) + + # FIXME MANY options not implemented! + + fit_linear = traits.Bool(desc='Fit using a linear xfm.', argstr='-linear') + fit_nonlinear = traits.Bool( + desc='Fit using a non-linear xfm.', argstr='-nonlinear') + + # FIXME This changes the input/output behaviour of trans_file! Split into + # two separate interfaces? + nofit = traits.Bool( + desc='Use the input transformation instead of generating one.', + argstr='-nofit') + + config_file = File( + desc= + 'File containing the fitting configuration (nlpfit -help for info).', + argstr='-config_file %s', + exists=True) + + x = traits.Bool(desc='Flip volume in x-plane (default).', argstr='-x') + y = traits.Bool(desc='Flip volume in y-plane.', argstr='-y') + z = traits.Bool(desc='Flip volume in z-plane.', argstr='-z') + + +class VolSymmOutputSpec(TraitedSpec): + output_file = File(desc='output file', exists=True) + trans_file = File(desc='xfm trans file', exists=True) + output_grid = File( + desc='output grid file', exists=True) # FIXME Is exists=True correct? + + +class VolSymm(CommandLine): + """Make a volume symmetric about an axis either linearly + and/or nonlinearly. This is done by registering a volume + to a flipped image of itself. + + This tool is part of the minc-widgets package: + + https://github.com/BIC-MNI/minc-widgets/blob/master/volsymm/volsymm + + Examples + -------- + + >>> from nipype.interfaces.minc import VolSymm + >>> from nipype.interfaces.minc.testdata import nonempty_minc_data + + >>> input_file = nonempty_minc_data(0) + >>> volsymm = VolSymm(input_file=input_file) + >>> volsymm.run() # doctest: +SKIP + + """ + + input_spec = VolSymmInputSpec + output_spec = VolSymmOutputSpec + _cmd = 'volsymm' + + def _list_outputs(self): + outputs = super(VolSymm, self)._list_outputs() + + # Have to manually check for the grid files. + if os.path.exists(outputs['trans_file']): + if 'grid' in open(outputs['trans_file'], 'r').read(): + outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc', + outputs['trans_file']) + + return outputs diff --git a/nipype/interfaces/minc/testdata.py b/nipype/interfaces/minc/testdata.py new file mode 100644 index 0000000000..0a8d6bfd88 --- /dev/null +++ b/nipype/interfaces/minc/testdata.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +from ...testing import example_data + +minc2Dfile = example_data('minc_test_2D_00.mnc') +minc3Dfile = example_data('minc_test_3D_00.mnc') + +nlp_config = example_data('minc_nlp.conf') + + +def nonempty_minc_data(i, shape='2D'): + return example_data('minc_test_%s_%.2d.mnc' % ( + shape, + i, + )) diff --git a/nipype/interfaces/minc/tests/__init__.py b/nipype/interfaces/minc/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/minc/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/minc/tests/test_auto_Average.py b/nipype/interfaces/minc/tests/test_auto_Average.py new file mode 100644 index 0000000000..57bdc5ccd2 --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Average.py @@ -0,0 +1,155 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Average + + +def test_Average_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + avgdim=dict(argstr='-avgdim %s', ), + binarize=dict(argstr='-binarize', ), + binrange=dict(argstr='-binrange %s %s', ), + binvalue=dict(argstr='-binvalue %s', ), + check_dimensions=dict( + argstr='-check_dimensions', + xor=('check_dimensions', 'no_check_dimensions'), + ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + copy_header=dict( + argstr='-copy_header', + xor=('copy_header', 'no_copy_header'), + ), + debug=dict(argstr='-debug', ), + environ=dict( + nohash=True, + usedefault=True, + ), + filelist=dict( + argstr='-filelist %s', + mandatory=True, + xor=('input_files', 'filelist'), + ), + format_byte=dict( + argstr='-byte', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_double=dict( + argstr='-double', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_filetype=dict( + argstr='-filetype', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_float=dict( + argstr='-float', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_int=dict( + argstr='-int', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_long=dict( + argstr='-long', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_short=dict( + argstr='-short', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_signed=dict( + argstr='-signed', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_unsigned=dict( + argstr='-unsigned', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + input_files=dict( + argstr='%s', + mandatory=True, + position=-2, + sep=' ', + xor=('input_files', 'filelist'), + ), + max_buffer_size_in_kb=dict( + argstr='-max_buffer_size_in_kb %d', + usedefault=True, + ), + no_check_dimensions=dict( + argstr='-nocheck_dimensions', + xor=('check_dimensions', 'no_check_dimensions'), + ), + no_copy_header=dict( + argstr='-nocopy_header', + xor=('copy_header', 'no_copy_header'), + ), + nonormalize=dict( + argstr='-nonormalize', + xor=('normalize', 'nonormalize'), + ), + normalize=dict( + argstr='-normalize', + xor=('normalize', 'nonormalize'), + ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_files'], + name_template='%s_averaged.mnc', + position=-1, + ), + quiet=dict( + argstr='-quiet', + xor=('verbose', 'quiet'), + ), + sdfile=dict(argstr='-sdfile %s', ), + two=dict(argstr='-2', ), + verbose=dict( + argstr='-verbose', + xor=('verbose', 'quiet'), + ), + voxel_range=dict(argstr='-range %d %d', ), + weights=dict( + argstr='-weights %s', + sep=',', + ), + width_weighted=dict( + argstr='-width_weighted', + requires=('avgdim', ), + ), + ) + inputs = Average.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Average_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Average.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_BBox.py b/nipype/interfaces/minc/tests/test_auto_BBox.py new file mode 100644 index 0000000000..a34aeab9b5 --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_BBox.py @@ -0,0 +1,54 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import BBox + + +def test_BBox_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + format_minccrop=dict(argstr='-minccrop', ), + format_mincresample=dict(argstr='-mincresample', ), + format_mincreshape=dict(argstr='-mincreshape', ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + one_line=dict( + argstr='-one_line', + xor=('one_line', 'two_lines'), + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + output_file=dict( + hash_files=False, + keep_extension=False, + name_source=['input_file'], + name_template='%s_bbox.txt', + position=-1, + ), + threshold=dict(argstr='-threshold', ), + two_lines=dict( + argstr='-two_lines', + xor=('one_line', 'two_lines'), + ), + ) + inputs = BBox.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BBox_outputs(): + output_map = dict(output_file=dict(), ) + outputs = BBox.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Beast.py b/nipype/interfaces/minc/tests/test_auto_Beast.py new file mode 100644 index 0000000000..4834cf3c4a --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Beast.py @@ -0,0 +1,86 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Beast + + +def test_Beast_inputs(): + input_map = dict( + abspath=dict( + argstr='-abspath', + usedefault=True, + ), + args=dict(argstr='%s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + confidence_level_alpha=dict( + argstr='-alpha %s', + usedefault=True, + ), + configuration_file=dict(argstr='-configuration %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill_holes=dict(argstr='-fill', ), + flip_images=dict(argstr='-flip', ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + library_dir=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + load_moments=dict(argstr='-load_moments', ), + median_filter=dict(argstr='-median', ), + nlm_filter=dict(argstr='-nlm_filter', ), + number_selected_images=dict( + argstr='-selection_num %s', + usedefault=True, + ), + output_file=dict( + argstr='%s', + hash_files=False, + name_source=['input_file'], + name_template='%s_beast_mask.mnc', + position=-1, + ), + patch_size=dict( + argstr='-patch_size %s', + usedefault=True, + ), + probability_map=dict(argstr='-probability', ), + same_resolution=dict(argstr='-same_resolution', ), + search_area=dict( + argstr='-search_area %s', + usedefault=True, + ), + smoothness_factor_beta=dict( + argstr='-beta %s', + usedefault=True, + ), + threshold_patch_selection=dict( + argstr='-threshold %s', + usedefault=True, + ), + voxel_size=dict( + argstr='-voxel_size %s', + usedefault=True, + ), + ) + inputs = Beast.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Beast_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Beast.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_BestLinReg.py b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py new file mode 100644 index 0000000000..fb9061040a --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_BestLinReg.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import BestLinReg + + +def test_BestLinReg_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + output_mnc=dict( + argstr='%s', + genfile=True, + hash_files=False, + keep_extension=False, + name_source=['source'], + name_template='%s_bestlinreg.mnc', + position=-1, + ), + output_xfm=dict( + argstr='%s', + genfile=True, + hash_files=False, + keep_extension=False, + name_source=['source'], + name_template='%s_bestlinreg.xfm', + position=-2, + ), + source=dict( + argstr='%s', + mandatory=True, + position=-4, + ), + target=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + verbose=dict(argstr='-verbose', ), + ) + inputs = BestLinReg.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BestLinReg_outputs(): + output_map = dict( + output_mnc=dict(), + output_xfm=dict(), + ) + outputs = BestLinReg.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_BigAverage.py b/nipype/interfaces/minc/tests/test_auto_BigAverage.py new file mode 100644 index 0000000000..ce1fb2b91e --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_BigAverage.py @@ -0,0 +1,56 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import BigAverage + + +def test_BigAverage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + clobber=dict( + argstr='--clobber', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_files=dict( + argstr='%s', + mandatory=True, + position=-2, + sep=' ', + ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_files'], + name_template='%s_bigaverage.mnc', + position=-1, + ), + output_float=dict(argstr='--float', ), + robust=dict(argstr='-robust', ), + sd_file=dict( + argstr='--sdfile %s', + hash_files=False, + name_source=['input_files'], + name_template='%s_bigaverage_stdev.mnc', + ), + tmpdir=dict(argstr='-tmpdir %s', ), + verbose=dict(argstr='--verbose', ), + ) + inputs = BigAverage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BigAverage_outputs(): + output_map = dict( + output_file=dict(), + sd_file=dict(), + ) + outputs = BigAverage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Blob.py b/nipype/interfaces/minc/tests/test_auto_Blob.py new file mode 100644 index 0000000000..b489ac944a --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Blob.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Blob + + +def test_Blob_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + determinant=dict(argstr='-determinant', ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + magnitude=dict(argstr='-magnitude', ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_file'], + name_template='%s_blob.mnc', + position=-1, + ), + trace=dict(argstr='-trace', ), + translation=dict(argstr='-translation', ), + ) + inputs = Blob.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Blob_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Blob.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Blur.py b/nipype/interfaces/minc/tests/test_auto_Blur.py new file mode 100644 index 0000000000..fb6e405012 --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Blur.py @@ -0,0 +1,72 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Blur + + +def test_Blur_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + dimensions=dict(argstr='-dimensions %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm=dict( + argstr='-fwhm %s', + mandatory=True, + xor=('fwhm', 'fwhm3d', 'standard_dev'), + ), + fwhm3d=dict( + argstr='-3dfwhm %s %s %s', + mandatory=True, + xor=('fwhm', 'fwhm3d', 'standard_dev'), + ), + gaussian=dict( + argstr='-gaussian', + xor=('gaussian', 'rect'), + ), + gradient=dict(argstr='-gradient', ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + no_apodize=dict(argstr='-no_apodize', ), + output_file_base=dict( + argstr='%s', + position=-1, + ), + partial=dict(argstr='-partial', ), + rect=dict( + argstr='-rect', + xor=('gaussian', 'rect'), + ), + standard_dev=dict( + argstr='-standarddev %s', + mandatory=True, + xor=('fwhm', 'fwhm3d', 'standard_dev'), + ), + ) + inputs = Blur.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Blur_outputs(): + output_map = dict( + gradient_dxyz=dict(), + output_file=dict(), + partial_dx=dict(), + partial_dxyz=dict(), + partial_dy=dict(), + partial_dz=dict(), + ) + outputs = Blur.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Calc.py b/nipype/interfaces/minc/tests/test_auto_Calc.py new file mode 100644 index 0000000000..7bec782c1c --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Calc.py @@ -0,0 +1,156 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Calc + + +def test_Calc_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + check_dimensions=dict( + argstr='-check_dimensions', + xor=('check_dimensions', 'no_check_dimensions'), + ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + copy_header=dict( + argstr='-copy_header', + xor=('copy_header', 'no_copy_header'), + ), + debug=dict(argstr='-debug', ), + environ=dict( + nohash=True, + usedefault=True, + ), + eval_width=dict(argstr='-eval_width %s', ), + expfile=dict( + argstr='-expfile %s', + mandatory=True, + xor=('expression', 'expfile'), + ), + expression=dict( + argstr="-expression '%s'", + mandatory=True, + xor=('expression', 'expfile'), + ), + filelist=dict( + argstr='-filelist %s', + mandatory=True, + xor=('input_files', 'filelist'), + ), + format_byte=dict( + argstr='-byte', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_double=dict( + argstr='-double', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_filetype=dict( + argstr='-filetype', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_float=dict( + argstr='-float', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_int=dict( + argstr='-int', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_long=dict( + argstr='-long', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_short=dict( + argstr='-short', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_signed=dict( + argstr='-signed', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_unsigned=dict( + argstr='-unsigned', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + ignore_nan=dict(argstr='-ignore_nan', ), + input_files=dict( + argstr='%s', + mandatory=True, + position=-2, + sep=' ', + ), + max_buffer_size_in_kb=dict(argstr='-max_buffer_size_in_kb %d', ), + no_check_dimensions=dict( + argstr='-nocheck_dimensions', + xor=('check_dimensions', 'no_check_dimensions'), + ), + no_copy_header=dict( + argstr='-nocopy_header', + xor=('copy_header', 'no_copy_header'), + ), + outfiles=dict(), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_files'], + name_template='%s_calc.mnc', + position=-1, + ), + output_illegal=dict( + argstr='-illegal_value', + xor=('output_nan', 'output_zero', 'output_illegal_value'), + ), + output_nan=dict( + argstr='-nan', + xor=('output_nan', 'output_zero', 'output_illegal_value'), + ), + output_zero=dict( + argstr='-zero', + xor=('output_nan', 'output_zero', 'output_illegal_value'), + ), + propagate_nan=dict(argstr='-propagate_nan', ), + quiet=dict( + argstr='-quiet', + xor=('verbose', 'quiet'), + ), + two=dict(argstr='-2', ), + verbose=dict( + argstr='-verbose', + xor=('verbose', 'quiet'), + ), + voxel_range=dict(argstr='-range %d %d', ), + ) + inputs = Calc.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Calc_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Calc.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Convert.py b/nipype/interfaces/minc/tests/test_auto_Convert.py new file mode 100644 index 0000000000..6df596c682 --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Convert.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Convert + + +def test_Convert_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + chunk=dict(argstr='-chunk %d', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + compression=dict(argstr='-compress %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_file'], + name_template='%s_convert_output.mnc', + position=-1, + ), + template=dict(argstr='-template', ), + two=dict(argstr='-2', ), + ) + inputs = Convert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Convert_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Convert.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Copy.py b/nipype/interfaces/minc/tests/test_auto_Copy.py new file mode 100644 index 0000000000..e91470ba6d --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Copy.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Copy + + +def test_Copy_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_file'], + name_template='%s_copy.mnc', + position=-1, + ), + pixel_values=dict( + argstr='-pixel_values', + xor=('pixel_values', 'real_values'), + ), + real_values=dict( + argstr='-real_values', + xor=('pixel_values', 'real_values'), + ), + ) + inputs = Copy.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Copy_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Copy.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Dump.py b/nipype/interfaces/minc/tests/test_auto_Dump.py new file mode 100644 index 0000000000..bcca2a4801 --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Dump.py @@ -0,0 +1,65 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Dump + + +def test_Dump_inputs(): + input_map = dict( + annotations_brief=dict( + argstr='-b %s', + xor=('annotations_brief', 'annotations_full'), + ), + annotations_full=dict( + argstr='-f %s', + xor=('annotations_brief', 'annotations_full'), + ), + args=dict(argstr='%s', ), + coordinate_data=dict( + argstr='-c', + xor=('coordinate_data', 'header_data'), + ), + environ=dict( + nohash=True, + usedefault=True, + ), + header_data=dict( + argstr='-h', + xor=('coordinate_data', 'header_data'), + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + line_length=dict(argstr='-l %d', ), + netcdf_name=dict(argstr='-n %s', ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + output_file=dict( + hash_files=False, + keep_extension=False, + name_source=['input_file'], + name_template='%s_dump.txt', + position=-1, + ), + precision=dict(argstr='%s', ), + variables=dict( + argstr='-v %s', + sep=',', + ), + ) + inputs = Dump.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Dump_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Dump.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Extract.py b/nipype/interfaces/minc/tests/test_auto_Extract.py new file mode 100644 index 0000000000..77126eac18 --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Extract.py @@ -0,0 +1,163 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Extract + + +def test_Extract_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + count=dict( + argstr='-count %s', + sep=',', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip_any_direction=dict( + argstr='-any_direction', + xor=('flip_positive_direction', 'flip_negative_direction', + 'flip_any_direction'), + ), + flip_negative_direction=dict( + argstr='-negative_direction', + xor=('flip_positive_direction', 'flip_negative_direction', + 'flip_any_direction'), + ), + flip_positive_direction=dict( + argstr='-positive_direction', + xor=('flip_positive_direction', 'flip_negative_direction', + 'flip_any_direction'), + ), + flip_x_any=dict( + argstr='-xanydirection', + xor=('flip_x_positive', 'flip_x_negative', 'flip_x_any'), + ), + flip_x_negative=dict( + argstr='-xdirection', + xor=('flip_x_positive', 'flip_x_negative', 'flip_x_any'), + ), + flip_x_positive=dict( + argstr='+xdirection', + xor=('flip_x_positive', 'flip_x_negative', 'flip_x_any'), + ), + flip_y_any=dict( + argstr='-yanydirection', + xor=('flip_y_positive', 'flip_y_negative', 'flip_y_any'), + ), + flip_y_negative=dict( + argstr='-ydirection', + xor=('flip_y_positive', 'flip_y_negative', 'flip_y_any'), + ), + flip_y_positive=dict( + argstr='+ydirection', + xor=('flip_y_positive', 'flip_y_negative', 'flip_y_any'), + ), + flip_z_any=dict( + argstr='-zanydirection', + xor=('flip_z_positive', 'flip_z_negative', 'flip_z_any'), + ), + flip_z_negative=dict( + argstr='-zdirection', + xor=('flip_z_positive', 'flip_z_negative', 'flip_z_any'), + ), + flip_z_positive=dict( + argstr='+zdirection', + xor=('flip_z_positive', 'flip_z_negative', 'flip_z_any'), + ), + image_maximum=dict(argstr='-image_maximum %s', ), + image_minimum=dict(argstr='-image_minimum %s', ), + image_range=dict(argstr='-image_range %s %s', ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + nonormalize=dict( + argstr='-nonormalize', + xor=('normalize', 'nonormalize'), + ), + normalize=dict( + argstr='-normalize', + xor=('normalize', 'nonormalize'), + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + output_file=dict( + hash_files=False, + keep_extension=False, + name_source=['input_file'], + name_template='%s.raw', + position=-1, + ), + start=dict( + argstr='-start %s', + sep=',', + ), + write_ascii=dict( + argstr='-ascii', + xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', + 'write_int', 'write_long', 'write_float', 'write_double', + 'write_signed', 'write_unsigned'), + ), + write_byte=dict( + argstr='-byte', + xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', + 'write_int', 'write_long', 'write_float', 'write_double', + 'write_signed', 'write_unsigned'), + ), + write_double=dict( + argstr='-double', + xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', + 'write_int', 'write_long', 'write_float', 'write_double', + 'write_signed', 'write_unsigned'), + ), + write_float=dict( + argstr='-float', + xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', + 'write_int', 'write_long', 'write_float', 'write_double', + 'write_signed', 'write_unsigned'), + ), + write_int=dict( + argstr='-int', + xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', + 'write_int', 'write_long', 'write_float', 'write_double', + 'write_signed', 'write_unsigned'), + ), + write_long=dict( + argstr='-long', + xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', + 'write_int', 'write_long', 'write_float', 'write_double', + 'write_signed', 'write_unsigned'), + ), + write_range=dict(argstr='-range %s %s', ), + write_short=dict( + argstr='-short', + xor=('write_ascii', 'write_ascii', 'write_byte', 'write_short', + 'write_int', 'write_long', 'write_float', 'write_double', + 'write_signed', 'write_unsigned'), + ), + write_signed=dict( + argstr='-signed', + xor=('write_signed', 'write_unsigned'), + ), + write_unsigned=dict( + argstr='-unsigned', + xor=('write_signed', 'write_unsigned'), + ), + ) + inputs = Extract.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Extract_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Extract.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py new file mode 100644 index 0000000000..c89dc65de6 --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Gennlxfm.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Gennlxfm + + +def test_Gennlxfm_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ident=dict(argstr='-ident', ), + like=dict(argstr='-like %s', ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['like'], + name_template='%s_gennlxfm.xfm', + position=-1, + ), + step=dict(argstr='-step %s', ), + verbose=dict(argstr='-verbose', ), + ) + inputs = Gennlxfm.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Gennlxfm_outputs(): + output_map = dict( + output_file=dict(), + output_grid=dict(), + ) + outputs = Gennlxfm.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Math.py b/nipype/interfaces/minc/tests/test_auto_Math.py new file mode 100644 index 0000000000..1d011034d2 --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Math.py @@ -0,0 +1,169 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Math + + +def test_Math_inputs(): + input_map = dict( + abs=dict(argstr='-abs', ), + args=dict(argstr='%s', ), + calc_add=dict(argstr='-add', ), + calc_and=dict(argstr='-and', ), + calc_div=dict(argstr='-div', ), + calc_mul=dict(argstr='-mult', ), + calc_not=dict(argstr='-not', ), + calc_or=dict(argstr='-or', ), + calc_sub=dict(argstr='-sub', ), + check_dimensions=dict( + argstr='-check_dimensions', + xor=('check_dimensions', 'no_check_dimensions'), + ), + clamp=dict(argstr='-clamp -const2 %s %s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + copy_header=dict( + argstr='-copy_header', + xor=('copy_header', 'no_copy_header'), + ), + count_valid=dict(argstr='-count_valid', ), + dimension=dict(argstr='-dimension %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + exp=dict(argstr='-exp -const2 %s %s', ), + filelist=dict( + argstr='-filelist %s', + mandatory=True, + xor=('input_files', 'filelist'), + ), + format_byte=dict( + argstr='-byte', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_double=dict( + argstr='-double', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_filetype=dict( + argstr='-filetype', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_float=dict( + argstr='-float', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_int=dict( + argstr='-int', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_long=dict( + argstr='-long', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_short=dict( + argstr='-short', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_signed=dict( + argstr='-signed', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + format_unsigned=dict( + argstr='-unsigned', + xor=('format_filetype', 'format_byte', 'format_short', + 'format_int', 'format_long', 'format_float', 'format_double', + 'format_signed', 'format_unsigned'), + ), + ignore_nan=dict(argstr='-ignore_nan', ), + input_files=dict( + argstr='%s', + mandatory=True, + position=-2, + sep=' ', + xor=('input_files', 'filelist'), + ), + invert=dict(argstr='-invert -const %s', ), + isnan=dict(argstr='-isnan', ), + log=dict(argstr='-log -const2 %s %s', ), + max_buffer_size_in_kb=dict( + argstr='-max_buffer_size_in_kb %d', + usedefault=True, + ), + maximum=dict(argstr='-maximum', ), + minimum=dict(argstr='-minimum', ), + nisnan=dict(argstr='-nisnan', ), + no_check_dimensions=dict( + argstr='-nocheck_dimensions', + xor=('check_dimensions', 'no_check_dimensions'), + ), + no_copy_header=dict( + argstr='-nocopy_header', + xor=('copy_header', 'no_copy_header'), + ), + nsegment=dict(argstr='-nsegment -const2 %s %s', ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_files'], + name_template='%s_mincmath.mnc', + position=-1, + ), + output_illegal=dict( + argstr='-illegal_value', + xor=('output_nan', 'output_zero', 'output_illegal_value'), + ), + output_nan=dict( + argstr='-nan', + xor=('output_nan', 'output_zero', 'output_illegal_value'), + ), + output_zero=dict( + argstr='-zero', + xor=('output_nan', 'output_zero', 'output_illegal_value'), + ), + percentdiff=dict(argstr='-percentdiff', ), + propagate_nan=dict(argstr='-propagate_nan', ), + scale=dict(argstr='-scale -const2 %s %s', ), + segment=dict(argstr='-segment -const2 %s %s', ), + sqrt=dict(argstr='-sqrt', ), + square=dict(argstr='-square', ), + test_eq=dict(argstr='-eq', ), + test_ge=dict(argstr='-ge', ), + test_gt=dict(argstr='-gt', ), + test_le=dict(argstr='-le', ), + test_lt=dict(argstr='-lt', ), + test_ne=dict(argstr='-ne', ), + two=dict(argstr='-2', ), + voxel_range=dict(argstr='-range %d %d', ), + ) + inputs = Math.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Math_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Math.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_NlpFit.py b/nipype/interfaces/minc/tests/test_auto_NlpFit.py new file mode 100644 index 0000000000..1a728a90dd --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_NlpFit.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import NlpFit + + +def test_NlpFit_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + config_file=dict( + argstr='-config_file %s', + mandatory=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + init_xfm=dict( + argstr='-init_xfm %s', + mandatory=True, + ), + input_grid_files=dict(), + output_xfm=dict( + argstr='%s', + genfile=True, + position=-1, + ), + source=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + source_mask=dict( + argstr='-source_mask %s', + mandatory=True, + ), + target=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + verbose=dict(argstr='-verbose', ), + ) + inputs = NlpFit.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_NlpFit_outputs(): + output_map = dict( + output_grid=dict(), + output_xfm=dict(), + ) + outputs = NlpFit.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Norm.py b/nipype/interfaces/minc/tests/test_auto_Norm.py new file mode 100644 index 0000000000..3a0d28f06d --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Norm.py @@ -0,0 +1,65 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Norm + + +def test_Norm_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + clamp=dict( + argstr='-clamp', + usedefault=True, + ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + cutoff=dict(argstr='-cutoff %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + lower=dict(argstr='-lower %s', ), + mask=dict(argstr='-mask %s', ), + out_ceil=dict(argstr='-out_ceil %s', ), + out_floor=dict(argstr='-out_floor %s', ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_file'], + name_template='%s_norm.mnc', + position=-1, + ), + output_threshold_mask=dict( + argstr='-threshold_mask %s', + hash_files=False, + name_source=['input_file'], + name_template='%s_norm_threshold_mask.mnc', + ), + threshold=dict(argstr='-threshold', ), + threshold_blur=dict(argstr='-threshold_blur %s', ), + threshold_bmt=dict(argstr='-threshold_bmt', ), + threshold_perc=dict(argstr='-threshold_perc %s', ), + upper=dict(argstr='-upper %s', ), + ) + inputs = Norm.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Norm_outputs(): + output_map = dict( + output_file=dict(), + output_threshold_mask=dict(), + ) + outputs = Norm.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Pik.py b/nipype/interfaces/minc/tests/test_auto_Pik.py new file mode 100644 index 0000000000..d74d9a86ad --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Pik.py @@ -0,0 +1,92 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Pik + + +def test_Pik_inputs(): + input_map = dict( + annotated_bar=dict(argstr='--anot_bar', ), + args=dict(argstr='%s', ), + auto_range=dict( + argstr='--auto_range', + xor=('image_range', 'auto_range'), + ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + depth=dict(argstr='--depth %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + horizontal_triplanar_view=dict( + argstr='--horizontal', + xor=('vertical_triplanar_view', 'horizontal_triplanar_view'), + ), + image_range=dict( + argstr='--image_range %s %s', + xor=('image_range', 'auto_range'), + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + jpg=dict(xor=('jpg', 'png'), ), + lookup=dict(argstr='--lookup %s', ), + minc_range=dict(argstr='--range %s %s', ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + keep_extension=False, + name_source=['input_file'], + name_template='%s.png', + position=-1, + ), + png=dict(xor=('jpg', 'png'), ), + sagittal_offset=dict(argstr='--sagittal_offset %s', ), + sagittal_offset_perc=dict(argstr='--sagittal_offset_perc %d', ), + scale=dict( + argstr='--scale %s', + usedefault=True, + ), + slice_x=dict( + argstr='-x', + xor=('slice_z', 'slice_y', 'slice_x'), + ), + slice_y=dict( + argstr='-y', + xor=('slice_z', 'slice_y', 'slice_x'), + ), + slice_z=dict( + argstr='-z', + xor=('slice_z', 'slice_y', 'slice_x'), + ), + start=dict(argstr='--slice %s', ), + tile_size=dict(argstr='--tilesize %s', ), + title=dict(argstr='%s', ), + title_size=dict( + argstr='--title_size %s', + requires=['title'], + ), + triplanar=dict(argstr='--triplanar', ), + vertical_triplanar_view=dict( + argstr='--vertical', + xor=('vertical_triplanar_view', 'horizontal_triplanar_view'), + ), + width=dict(argstr='--width %s', ), + ) + inputs = Pik.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Pik_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Pik.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Resample.py b/nipype/interfaces/minc/tests/test_auto_Resample.py new file mode 100644 index 0000000000..bd00bd224d --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Resample.py @@ -0,0 +1,251 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Resample + + +def test_Resample_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + coronal_slices=dict( + argstr='-coronal', + xor=('transverse', 'sagittal', 'coronal'), + ), + dircos=dict( + argstr='-dircos %s %s %s', + xor=('nelements', 'nelements_x_y_or_z'), + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fill=dict( + argstr='-fill', + xor=('nofill', 'fill'), + ), + fill_value=dict( + argstr='-fillvalue %s', + requires=['fill'], + ), + format_byte=dict( + argstr='-byte', + xor=('format_byte', 'format_short', 'format_int', 'format_long', + 'format_float', 'format_double', 'format_signed', + 'format_unsigned'), + ), + format_double=dict( + argstr='-double', + xor=('format_byte', 'format_short', 'format_int', 'format_long', + 'format_float', 'format_double', 'format_signed', + 'format_unsigned'), + ), + format_float=dict( + argstr='-float', + xor=('format_byte', 'format_short', 'format_int', 'format_long', + 'format_float', 'format_double', 'format_signed', + 'format_unsigned'), + ), + format_int=dict( + argstr='-int', + xor=('format_byte', 'format_short', 'format_int', 'format_long', + 'format_float', 'format_double', 'format_signed', + 'format_unsigned'), + ), + format_long=dict( + argstr='-long', + xor=('format_byte', 'format_short', 'format_int', 'format_long', + 'format_float', 'format_double', 'format_signed', + 'format_unsigned'), + ), + format_short=dict( + argstr='-short', + xor=('format_byte', 'format_short', 'format_int', 'format_long', + 'format_float', 'format_double', 'format_signed', + 'format_unsigned'), + ), + format_signed=dict( + argstr='-signed', + xor=('format_byte', 'format_short', 'format_int', 'format_long', + 'format_float', 'format_double', 'format_signed', + 'format_unsigned'), + ), + format_unsigned=dict( + argstr='-unsigned', + xor=('format_byte', 'format_short', 'format_int', 'format_long', + 'format_float', 'format_double', 'format_signed', + 'format_unsigned'), + ), + half_width_sinc_window=dict( + argstr='-width %s', + requires=['sinc_interpolation'], + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + input_grid_files=dict(), + invert_transformation=dict(argstr='-invert_transformation', ), + keep_real_range=dict( + argstr='-keep_real_range', + xor=('keep_real_range', 'nokeep_real_range'), + ), + like=dict(argstr='-like %s', ), + nearest_neighbour_interpolation=dict( + argstr='-nearest_neighbour', + xor=('trilinear_interpolation', 'tricubic_interpolation', + 'nearest_neighbour_interpolation', 'sinc_interpolation'), + ), + nelements=dict( + argstr='-nelements %s %s %s', + xor=('nelements', 'nelements_x_y_or_z'), + ), + no_fill=dict( + argstr='-nofill', + xor=('nofill', 'fill'), + ), + no_input_sampling=dict( + argstr='-use_input_sampling', + xor=('vio_transform', 'no_input_sampling'), + ), + nokeep_real_range=dict( + argstr='-nokeep_real_range', + xor=('keep_real_range', 'nokeep_real_range'), + ), + origin=dict(argstr='-origin %s %s %s', ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_file'], + name_template='%s_resample.mnc', + position=-1, + ), + output_range=dict(argstr='-range %s %s', ), + sagittal_slices=dict( + argstr='-sagittal', + xor=('transverse', 'sagittal', 'coronal'), + ), + sinc_interpolation=dict( + argstr='-sinc', + xor=('trilinear_interpolation', 'tricubic_interpolation', + 'nearest_neighbour_interpolation', 'sinc_interpolation'), + ), + sinc_window_hamming=dict( + argstr='-hamming', + requires=['sinc_interpolation'], + xor=('sinc_window_hanning', 'sinc_window_hamming'), + ), + sinc_window_hanning=dict( + argstr='-hanning', + requires=['sinc_interpolation'], + xor=('sinc_window_hanning', 'sinc_window_hamming'), + ), + spacetype=dict(argstr='-spacetype %s', ), + standard_sampling=dict(argstr='-standard_sampling', ), + start=dict( + argstr='-start %s %s %s', + xor=('nelements', 'nelements_x_y_or_z'), + ), + step=dict( + argstr='-step %s %s %s', + xor=('nelements', 'nelements_x_y_or_z'), + ), + talairach=dict(argstr='-talairach', ), + transformation=dict(argstr='-transformation %s', ), + transverse_slices=dict( + argstr='-transverse', + xor=('transverse', 'sagittal', 'coronal'), + ), + tricubic_interpolation=dict( + argstr='-tricubic', + xor=('trilinear_interpolation', 'tricubic_interpolation', + 'nearest_neighbour_interpolation', 'sinc_interpolation'), + ), + trilinear_interpolation=dict( + argstr='-trilinear', + xor=('trilinear_interpolation', 'tricubic_interpolation', + 'nearest_neighbour_interpolation', 'sinc_interpolation'), + ), + two=dict(argstr='-2', ), + units=dict(argstr='-units %s', ), + vio_transform=dict( + argstr='-tfm_input_sampling', + xor=('vio_transform', 'no_input_sampling'), + ), + xdircos=dict( + argstr='-xdircos %s', + requires=('ydircos', 'zdircos'), + xor=('dircos', 'dircos_x_y_or_z'), + ), + xnelements=dict( + argstr='-xnelements %s', + requires=('ynelements', 'znelements'), + xor=('nelements', 'nelements_x_y_or_z'), + ), + xstart=dict( + argstr='-xstart %s', + requires=('ystart', 'zstart'), + xor=('start', 'start_x_y_or_z'), + ), + xstep=dict( + argstr='-xstep %s', + requires=('ystep', 'zstep'), + xor=('step', 'step_x_y_or_z'), + ), + ydircos=dict( + argstr='-ydircos %s', + requires=('xdircos', 'zdircos'), + xor=('dircos', 'dircos_x_y_or_z'), + ), + ynelements=dict( + argstr='-ynelements %s', + requires=('xnelements', 'znelements'), + xor=('nelements', 'nelements_x_y_or_z'), + ), + ystart=dict( + argstr='-ystart %s', + requires=('xstart', 'zstart'), + xor=('start', 'start_x_y_or_z'), + ), + ystep=dict( + argstr='-ystep %s', + requires=('xstep', 'zstep'), + xor=('step', 'step_x_y_or_z'), + ), + zdircos=dict( + argstr='-zdircos %s', + requires=('xdircos', 'ydircos'), + xor=('dircos', 'dircos_x_y_or_z'), + ), + znelements=dict( + argstr='-znelements %s', + requires=('xnelements', 'ynelements'), + xor=('nelements', 'nelements_x_y_or_z'), + ), + zstart=dict( + argstr='-zstart %s', + requires=('xstart', 'ystart'), + xor=('start', 'start_x_y_or_z'), + ), + zstep=dict( + argstr='-zstep %s', + requires=('xstep', 'ystep'), + xor=('step', 'step_x_y_or_z'), + ), + ) + inputs = Resample.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Resample_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Resample.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Reshape.py b/nipype/interfaces/minc/tests/test_auto_Reshape.py new file mode 100644 index 0000000000..45e6ddeb4a --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Reshape.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Reshape + + +def test_Reshape_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_file'], + name_template='%s_reshape.mnc', + position=-1, + ), + verbose=dict(argstr='-verbose', ), + write_short=dict(argstr='-short', ), + ) + inputs = Reshape.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Reshape_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Reshape.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_ToEcat.py b/nipype/interfaces/minc/tests/test_auto_ToEcat.py new file mode 100644 index 0000000000..26a3ac0436 --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_ToEcat.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import ToEcat + + +def test_ToEcat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore_acquisition_variable=dict( + argstr='-ignore_acquisition_variable', ), + ignore_ecat_acquisition_variable=dict( + argstr='-ignore_ecat_acquisition_variable', ), + ignore_ecat_main=dict(argstr='-ignore_ecat_main', ), + ignore_ecat_subheader_variable=dict( + argstr='-ignore_ecat_subheader_variable', ), + ignore_patient_variable=dict(argstr='-ignore_patient_variable', ), + ignore_study_variable=dict(argstr='-ignore_study_variable', ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + no_decay_corr_fctr=dict(argstr='-no_decay_corr_fctr', ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + keep_extension=False, + name_source=['input_file'], + name_template='%s_to_ecat.v', + position=-1, + ), + voxels_as_integers=dict(argstr='-label', ), + ) + inputs = ToEcat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ToEcat_outputs(): + output_map = dict(output_file=dict(), ) + outputs = ToEcat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_ToRaw.py b/nipype/interfaces/minc/tests/test_auto_ToRaw.py new file mode 100644 index 0000000000..e010da322d --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_ToRaw.py @@ -0,0 +1,89 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import ToRaw + + +def test_ToRaw_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + nonormalize=dict( + argstr='-nonormalize', + xor=('normalize', 'nonormalize'), + ), + normalize=dict( + argstr='-normalize', + xor=('normalize', 'nonormalize'), + ), + out_file=dict( + argstr='> %s', + genfile=True, + position=-1, + ), + output_file=dict( + hash_files=False, + keep_extension=False, + name_source=['input_file'], + name_template='%s.raw', + position=-1, + ), + write_byte=dict( + argstr='-byte', + xor=('write_byte', 'write_short', 'write_int', 'write_long', + 'write_float', 'write_double'), + ), + write_double=dict( + argstr='-double', + xor=('write_byte', 'write_short', 'write_int', 'write_long', + 'write_float', 'write_double'), + ), + write_float=dict( + argstr='-float', + xor=('write_byte', 'write_short', 'write_int', 'write_long', + 'write_float', 'write_double'), + ), + write_int=dict( + argstr='-int', + xor=('write_byte', 'write_short', 'write_int', 'write_long', + 'write_float', 'write_double'), + ), + write_long=dict( + argstr='-long', + xor=('write_byte', 'write_short', 'write_int', 'write_long', + 'write_float', 'write_double'), + ), + write_range=dict(argstr='-range %s %s', ), + write_short=dict( + argstr='-short', + xor=('write_byte', 'write_short', 'write_int', 'write_long', + 'write_float', 'write_double'), + ), + write_signed=dict( + argstr='-signed', + xor=('write_signed', 'write_unsigned'), + ), + write_unsigned=dict( + argstr='-unsigned', + xor=('write_signed', 'write_unsigned'), + ), + ) + inputs = ToRaw.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ToRaw_outputs(): + output_map = dict(output_file=dict(), ) + outputs = ToRaw.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_VolSymm.py b/nipype/interfaces/minc/tests/test_auto_VolSymm.py new file mode 100644 index 0000000000..048ffcde9b --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_VolSymm.py @@ -0,0 +1,64 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import VolSymm + + +def test_VolSymm_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + config_file=dict(argstr='-config_file %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fit_linear=dict(argstr='-linear', ), + fit_nonlinear=dict(argstr='-nonlinear', ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + input_grid_files=dict(), + nofit=dict(argstr='-nofit', ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_file'], + name_template='%s_vol_symm.mnc', + position=-1, + ), + trans_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + keep_extension=False, + name_source=['input_file'], + name_template='%s_vol_symm.xfm', + position=-2, + ), + verbose=dict(argstr='-verbose', ), + x=dict(argstr='-x', ), + y=dict(argstr='-y', ), + z=dict(argstr='-z', ), + ) + inputs = VolSymm.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_VolSymm_outputs(): + output_map = dict( + output_file=dict(), + output_grid=dict(), + trans_file=dict(), + ) + outputs = VolSymm.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Volcentre.py b/nipype/interfaces/minc/tests/test_auto_Volcentre.py new file mode 100644 index 0000000000..b095751a4d --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Volcentre.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Volcentre + + +def test_Volcentre_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + centre=dict(argstr='-centre %s %s %s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + com=dict(argstr='-com', ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_file'], + name_template='%s_volcentre.mnc', + position=-1, + ), + verbose=dict(argstr='-verbose', ), + zero_dircos=dict(argstr='-zero_dircos', ), + ) + inputs = Volcentre.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Volcentre_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Volcentre.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Voliso.py b/nipype/interfaces/minc/tests/test_auto_Voliso.py new file mode 100644 index 0000000000..967642a328 --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Voliso.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Voliso + + +def test_Voliso_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + avgstep=dict(argstr='--avgstep', ), + clobber=dict( + argstr='--clobber', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + maxstep=dict(argstr='--maxstep %s', ), + minstep=dict(argstr='--minstep %s', ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_file'], + name_template='%s_voliso.mnc', + position=-1, + ), + verbose=dict(argstr='--verbose', ), + ) + inputs = Voliso.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Voliso_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Voliso.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_Volpad.py b/nipype/interfaces/minc/tests/test_auto_Volpad.py new file mode 100644 index 0000000000..865bc79e69 --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_Volpad.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import Volpad + + +def test_Volpad_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + auto=dict(argstr='-auto', ), + auto_freq=dict(argstr='-auto_freq %s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + distance=dict(argstr='-distance %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_file'], + name_template='%s_volpad.mnc', + position=-1, + ), + smooth=dict(argstr='-smooth', ), + smooth_distance=dict(argstr='-smooth_distance %s', ), + verbose=dict(argstr='-verbose', ), + ) + inputs = Volpad.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Volpad_outputs(): + output_map = dict(output_file=dict(), ) + outputs = Volpad.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_XfmAvg.py b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py new file mode 100644 index 0000000000..6d036a0c0e --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_XfmAvg.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import XfmAvg + + +def test_XfmAvg_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + avg_linear=dict(argstr='-avg_linear', ), + avg_nonlinear=dict(argstr='-avg_nonlinear', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore_linear=dict(argstr='-ignore_linear', ), + ignore_nonlinear=dict(argstr='-ignore_nonline', ), + input_files=dict( + argstr='%s', + mandatory=True, + position=-2, + sep=' ', + ), + input_grid_files=dict(), + output_file=dict( + argstr='%s', + genfile=True, + position=-1, + ), + verbose=dict(argstr='-verbose', ), + ) + inputs = XfmAvg.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_XfmAvg_outputs(): + output_map = dict( + output_file=dict(), + output_grid=dict(), + ) + outputs = XfmAvg.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_XfmConcat.py b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py new file mode 100644 index 0000000000..eb748953ef --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_XfmConcat.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import XfmConcat + + +def test_XfmConcat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_files=dict( + argstr='%s', + mandatory=True, + position=-2, + sep=' ', + ), + input_grid_files=dict(), + output_file=dict( + argstr='%s', + genfile=True, + hash_files=False, + name_source=['input_files'], + name_template='%s_xfmconcat.xfm', + position=-1, + ), + verbose=dict(argstr='-verbose', ), + ) + inputs = XfmConcat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_XfmConcat_outputs(): + output_map = dict( + output_file=dict(), + output_grids=dict(), + ) + outputs = XfmConcat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/minc/tests/test_auto_XfmInvert.py b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py new file mode 100644 index 0000000000..d729e90639 --- /dev/null +++ b/nipype/interfaces/minc/tests/test_auto_XfmInvert.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..minc import XfmInvert + + +def test_XfmInvert_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + clobber=dict( + argstr='-clobber', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + input_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + output_file=dict( + argstr='%s', + genfile=True, + position=-1, + ), + verbose=dict(argstr='-verbose', ), + ) + inputs = XfmInvert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_XfmInvert_outputs(): + output_map = dict( + output_file=dict(), + output_grid=dict(), + ) + outputs = XfmInvert.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/__init__.py b/nipype/interfaces/mipav/__init__.py new file mode 100644 index 0000000000..8a9e08fd9b --- /dev/null +++ b/nipype/interfaces/mipav/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from .developer import ( + JistLaminarVolumetricLayering, JistBrainMgdmSegmentation, + JistLaminarProfileGeometry, JistLaminarProfileCalculator, MedicAlgorithmN3, + JistLaminarROIAveraging, MedicAlgorithmLesionToads, + JistBrainMp2rageSkullStripping, JistCortexSurfaceMeshInflation, RandomVol, + MedicAlgorithmImageCalculator, JistBrainMp2rageDuraEstimation, + JistLaminarProfileSampling, MedicAlgorithmMipavReorient, + MedicAlgorithmSPECTRE2010, JistBrainPartialVolumeFilter, + JistIntensityMp2rageMasking, MedicAlgorithmThresholdToBinaryMask) diff --git a/nipype/interfaces/mipav/developer.py b/nipype/interfaces/mipav/developer.py new file mode 100644 index 0000000000..ffb9e10cc3 --- /dev/null +++ b/nipype/interfaces/mipav/developer.py @@ -0,0 +1,1616 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class JistLaminarVolumetricLayeringInputSpec(CommandLineInputSpec): + inInner = File( + desc="Inner Distance Image (GM/WM boundary)", + exists=True, + argstr="--inInner %s") + inOuter = File( + desc="Outer Distance Image (CSF/GM boundary)", + exists=True, + argstr="--inOuter %s") + inNumber = traits.Int(desc="Number of layers", argstr="--inNumber %d") + inMax = traits.Int( + desc="Max iterations for narrow band evolution", argstr="--inMax %d") + inMin = traits.Float( + desc="Min change ratio for narrow band evolution", argstr="--inMin %f") + inLayering = traits.Enum( + "distance-preserving", + "volume-preserving", + desc="Layering method", + argstr="--inLayering %s") + inLayering2 = traits.Enum( + "outward", + "inward", + desc="Layering direction", + argstr="--inLayering2 %s") + incurvature = traits.Int( + desc="curvature approximation scale (voxels)", + argstr="--incurvature %d") + inratio = traits.Float( + desc="ratio smoothing kernel size (voxels)", argstr="--inratio %f") + inpresmooth = traits.Enum( + "true", + "false", + desc="pre-smooth cortical surfaces", + argstr="--inpresmooth %s") + inTopology = traits.Enum( + "26/6", + "6/26", + "18/6", + "6/18", + "6/6", + "wcs", + "wco", + "no", + desc="Topology", + argstr="--inTopology %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outContinuous = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Continuous depth measurement", + argstr="--outContinuous %s") + outDiscrete = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Discrete sampled layers", + argstr="--outDiscrete %s") + outLayer = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Layer boundary surfaces", + argstr="--outLayer %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class JistLaminarVolumetricLayeringOutputSpec(TraitedSpec): + outContinuous = File(desc="Continuous depth measurement", exists=True) + outDiscrete = File(desc="Discrete sampled layers", exists=True) + outLayer = File(desc="Layer boundary surfaces", exists=True) + + +class JistLaminarVolumetricLayering(SEMLikeCommandLine): + """title: Volumetric Layering + +category: Developer Tools + +description: Builds a continuous layering of the cortex following distance-preserving or volume-preserving models of cortical folding. +Waehnert MD, Dinse J, Weiss M, Streicher MN, Waehnert P, Geyer S, Turner R, Bazin PL, Anatomically motivated modeling of cortical laminae, Neuroimage, 2013. + +version: 3.0.RC + +contributor: Miriam Waehnert (waehnert@cbs.mpg.de) http://www.cbs.mpg.de/ + +""" + + input_spec = JistLaminarVolumetricLayeringInputSpec + output_spec = JistLaminarVolumetricLayeringOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.laminar.JistLaminarVolumetricLayering " + _outputs_filenames = { + 'outContinuous': 'outContinuous.nii', + 'outLayer': 'outLayer.nii', + 'outDiscrete': 'outDiscrete.nii' + } + _redirect_x = True + + +class JistBrainMgdmSegmentationInputSpec(CommandLineInputSpec): + inMP2RAGE = File( + desc="MP2RAGE T1 Map Image", exists=True, argstr="--inMP2RAGE %s") + inMP2RAGE2 = File( + desc="MP2RAGE T1-weighted Image", + exists=True, + argstr="--inMP2RAGE2 %s") + inPV = File(desc="PV / Dura Image", exists=True, argstr="--inPV %s") + inMPRAGE = File( + desc="MPRAGE T1-weighted Image", exists=True, argstr="--inMPRAGE %s") + inFLAIR = File(desc="FLAIR Image", exists=True, argstr="--inFLAIR %s") + inAtlas = File(desc="Atlas file", exists=True, argstr="--inAtlas %s") + inData = traits.Float(desc="Data weight", argstr="--inData %f") + inCurvature = traits.Float( + desc="Curvature weight", argstr="--inCurvature %f") + inPosterior = traits.Float( + desc="Posterior scale (mm)", argstr="--inPosterior %f") + inMax = traits.Int(desc="Max iterations", argstr="--inMax %d") + inMin = traits.Float(desc="Min change", argstr="--inMin %f") + inSteps = traits.Int(desc="Steps", argstr="--inSteps %d") + inTopology = traits.Enum( + "26/6", + "6/26", + "18/6", + "6/18", + "6/6", + "wcs", + "wco", + "no", + desc="Topology", + argstr="--inTopology %s") + inCompute = traits.Enum( + "true", "false", desc="Compute posteriors", argstr="--inCompute %s") + inAdjust = traits.Enum( + "true", + "false", + desc="Adjust intensity priors", + argstr="--inAdjust %s") + inOutput = traits.Enum( + "segmentation", + "memberships", + desc="Output images", + argstr="--inOutput %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outSegmented = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Segmented Brain Image", + argstr="--outSegmented %s") + outLevelset = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Levelset Boundary Image", + argstr="--outLevelset %s") + outPosterior2 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Posterior Maximum Memberships (4D)", + argstr="--outPosterior2 %s") + outPosterior3 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Posterior Maximum Labels (4D)", + argstr="--outPosterior3 %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class JistBrainMgdmSegmentationOutputSpec(TraitedSpec): + outSegmented = File(desc="Segmented Brain Image", exists=True) + outLevelset = File(desc="Levelset Boundary Image", exists=True) + outPosterior2 = File( + desc="Posterior Maximum Memberships (4D)", exists=True) + outPosterior3 = File(desc="Posterior Maximum Labels (4D)", exists=True) + + +class JistBrainMgdmSegmentation(SEMLikeCommandLine): + """title: MGDM Whole Brain Segmentation + +category: Developer Tools + +description: Estimate brain structures from an atlas for a MRI dataset (multiple input combinations are possible). + +version: 2.0.RC + +""" + + input_spec = JistBrainMgdmSegmentationInputSpec + output_spec = JistBrainMgdmSegmentationOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.brain.JistBrainMgdmSegmentation " + _outputs_filenames = { + 'outSegmented': 'outSegmented.nii', + 'outPosterior2': 'outPosterior2.nii', + 'outPosterior3': 'outPosterior3.nii', + 'outLevelset': 'outLevelset.nii' + } + _redirect_x = True + + +class JistLaminarProfileGeometryInputSpec(CommandLineInputSpec): + inProfile = File( + desc="Profile Surface Image", exists=True, argstr="--inProfile %s") + incomputed = traits.Enum( + "thickness", + "curvedness", + "shape_index", + "mean_curvature", + "gauss_curvature", + "profile_length", + "profile_curvature", + "profile_torsion", + desc="computed measure", + argstr="--incomputed %s") + inregularization = traits.Enum( + "none", + "Gaussian", + desc="regularization", + argstr="--inregularization %s") + insmoothing = traits.Float( + desc="smoothing parameter", argstr="--insmoothing %f") + inoutside = traits.Float( + desc="outside extension (mm)", argstr="--inoutside %f") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outResult = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Result", + argstr="--outResult %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class JistLaminarProfileGeometryOutputSpec(TraitedSpec): + outResult = File(desc="Result", exists=True) + + +class JistLaminarProfileGeometry(SEMLikeCommandLine): + """title: Profile Geometry + +category: Developer Tools + +description: Compute various geometric quantities for a cortical layers. + +version: 3.0.RC + +""" + + input_spec = JistLaminarProfileGeometryInputSpec + output_spec = JistLaminarProfileGeometryOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.laminar.JistLaminarProfileGeometry " + _outputs_filenames = {'outResult': 'outResult.nii'} + _redirect_x = True + + +class JistLaminarProfileCalculatorInputSpec(CommandLineInputSpec): + inIntensity = File( + desc="Intensity Profile Image", exists=True, argstr="--inIntensity %s") + inMask = File( + desc="Mask Image (opt, 3D or 4D)", exists=True, argstr="--inMask %s") + incomputed = traits.Enum( + "mean", + "stdev", + "skewness", + "kurtosis", + desc="computed statistic", + argstr="--incomputed %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outResult = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Result", + argstr="--outResult %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class JistLaminarProfileCalculatorOutputSpec(TraitedSpec): + outResult = File(desc="Result", exists=True) + + +class JistLaminarProfileCalculator(SEMLikeCommandLine): + """title: Profile Calculator + +category: Developer Tools + +description: Compute various moments for intensities mapped along a cortical profile. + +version: 3.0.RC + +""" + + input_spec = JistLaminarProfileCalculatorInputSpec + output_spec = JistLaminarProfileCalculatorOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.laminar.JistLaminarProfileCalculator " + _outputs_filenames = {'outResult': 'outResult.nii'} + _redirect_x = True + + +class MedicAlgorithmN3InputSpec(CommandLineInputSpec): + inInput = File(desc="Input Volume", exists=True, argstr="--inInput %s") + inSignal = traits.Float( + desc= + "Default = min + 1, Values at less than threshold are treated as part of the background", + argstr="--inSignal %f") + inMaximum = traits.Int( + desc="Maximum number of Iterations", argstr="--inMaximum %d") + inEnd = traits.Float( + desc= + "Usually 0.01-0.00001, The measure used to terminate the iterations is the coefficient of variation of change in field estimates between successive iterations.", + argstr="--inEnd %f") + inField = traits.Float( + desc= + "Characteristic distance over which the field varies. The distance between adjacent knots in bspline fitting with at least 4 knots going in every dimension. The default in the dialog is one third the distance (resolution * extents) of the smallest dimension.", + argstr="--inField %f") + inSubsample = traits.Float( + desc= + "Usually between 1-32, The factor by which the data is subsampled to a lower resolution in estimating the slowly varying non-uniformity field. Reduce sampling in the finest sampling direction by the shrink factor.", + argstr="--inSubsample %f") + inKernel = traits.Float( + desc= + "Usually between 0.05-0.50, Width of deconvolution kernel used to sharpen the histogram. Larger values give faster convergence while smaller values give greater accuracy.", + argstr="--inKernel %f") + inWeiner = traits.Float( + desc="Usually between 0.0-1.0", argstr="--inWeiner %f") + inAutomatic = traits.Enum( + "true", + "false", + desc= + "If true determines the threshold by histogram analysis. If true a VOI cannot be used and the input threshold is ignored.", + argstr="--inAutomatic %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outInhomogeneity = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Inhomogeneity Corrected Volume", + argstr="--outInhomogeneity %s") + outInhomogeneity2 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Inhomogeneity Field", + argstr="--outInhomogeneity2 %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class MedicAlgorithmN3OutputSpec(TraitedSpec): + outInhomogeneity = File(desc="Inhomogeneity Corrected Volume", exists=True) + outInhomogeneity2 = File(desc="Inhomogeneity Field", exists=True) + + +class MedicAlgorithmN3(SEMLikeCommandLine): + """title: N3 Correction + +category: Developer Tools + +description: Non-parametric Intensity Non-uniformity Correction, N3, originally by J.G. Sled. + +version: 1.8.R + +""" + + input_spec = MedicAlgorithmN3InputSpec + output_spec = MedicAlgorithmN3OutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run edu.jhu.ece.iacl.plugins.classification.MedicAlgorithmN3 " + _outputs_filenames = { + 'outInhomogeneity2': 'outInhomogeneity2.nii', + 'outInhomogeneity': 'outInhomogeneity.nii' + } + _redirect_x = True + + +class JistLaminarROIAveragingInputSpec(CommandLineInputSpec): + inIntensity = File( + desc="Intensity Profile Image", exists=True, argstr="--inIntensity %s") + inROI = File(desc="ROI Mask", exists=True, argstr="--inROI %s") + inROI2 = traits.Str(desc="ROI Name", argstr="--inROI2 %s") + inMask = File( + desc="Mask Image (opt, 3D or 4D)", exists=True, argstr="--inMask %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outROI3 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="ROI Average", + argstr="--outROI3 %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class JistLaminarROIAveragingOutputSpec(TraitedSpec): + outROI3 = File(desc="ROI Average", exists=True) + + +class JistLaminarROIAveraging(SEMLikeCommandLine): + """title: Profile ROI Averaging + +category: Developer Tools + +description: Compute an average profile over a given ROI. + +version: 3.0.RC + +""" + + input_spec = JistLaminarROIAveragingInputSpec + output_spec = JistLaminarROIAveragingOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.laminar.JistLaminarROIAveraging " + _outputs_filenames = {'outROI3': 'outROI3'} + _redirect_x = True + + +class MedicAlgorithmLesionToadsInputSpec(CommandLineInputSpec): + inT1_MPRAGE = File( + desc="T1_MPRAGE Image", exists=True, argstr="--inT1_MPRAGE %s") + inT1_SPGR = File( + desc="T1_SPGR Image", exists=True, argstr="--inT1_SPGR %s") + inFLAIR = File(desc="FLAIR Image", exists=True, argstr="--inFLAIR %s") + inAtlas = traits.Enum( + "With Lesion", "No Lesion", desc="Atlas to Use", argstr="--inAtlas %s") + inOutput = traits.Enum( + "hard segmentation", + "hard segmentation+memberships", + "cruise inputs", + "dura removal inputs", + desc="Output images", + argstr="--inOutput %s") + inOutput2 = traits.Enum( + "true", + "false", + desc= + "Output the hard classification using maximum membership (not neceesarily topologically correct)", + argstr="--inOutput2 %s") + inCorrect = traits.Enum( + "true", + "false", + desc="Correct MR field inhomogeneity.", + argstr="--inCorrect %s") + inOutput3 = traits.Enum( + "true", + "false", + desc="Output the estimated inhomogeneity field", + argstr="--inOutput3 %s") + inAtlas2 = File( + desc="Atlas File - With Lesions", exists=True, argstr="--inAtlas2 %s") + inAtlas3 = File( + desc="Atlas File - No Lesion - T1 and FLAIR", + exists=True, + argstr="--inAtlas3 %s") + inAtlas4 = File( + desc="Atlas File - No Lesion - T1 Only", + exists=True, + argstr="--inAtlas4 %s") + inMaximum = traits.Int( + desc= + "Maximum distance from the interventricular WM boundary to downweight the lesion membership to avoid false postives", + argstr="--inMaximum %d") + inMaximum2 = traits.Int( + desc="Maximum Ventircle Distance", argstr="--inMaximum2 %d") + inMaximum3 = traits.Int( + desc="Maximum InterVentricular Distance", argstr="--inMaximum3 %d") + inInclude = traits.Enum( + "true", + "false", + desc="Include lesion in WM class in hard classification", + argstr="--inInclude %s") + inAtlas5 = traits.Float( + desc="Controls the effect of the statistical atlas on the segmentation", + argstr="--inAtlas5 %f") + inSmooting = traits.Float( + desc="Controls the effect of neighberhood voxels on the membership", + argstr="--inSmooting %f") + inMaximum4 = traits.Float( + desc= + "Maximum amount of relative change in the energy function considered as the convergence criteria", + argstr="--inMaximum4 %f") + inMaximum5 = traits.Int( + desc="Maximum iterations", argstr="--inMaximum5 %d") + inAtlas6 = traits.Enum( + "rigid", + "multi_fully_affine", + desc="Atlas alignment", + argstr="--inAtlas6 %s") + inConnectivity = traits.Enum( + "(26,6)", + "(6,26)", + "(6,18)", + "(18,6)", + desc="Connectivity (foreground,background)", + argstr="--inConnectivity %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outHard = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Hard segmentation", + argstr="--outHard %s") + outHard2 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Hard segmentationfrom memberships", + argstr="--outHard2 %s") + outInhomogeneity = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Inhomogeneity Field", + argstr="--outInhomogeneity %s") + outMembership = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Membership Functions", + argstr="--outMembership %s") + outLesion = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Lesion Segmentation", + argstr="--outLesion %s") + outSulcal = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Sulcal CSF Membership", + argstr="--outSulcal %s") + outCortical = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Cortical GM Membership", + argstr="--outCortical %s") + outFilled = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Filled WM Membership", + argstr="--outFilled %s") + outWM = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="WM Mask", + argstr="--outWM %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class MedicAlgorithmLesionToadsOutputSpec(TraitedSpec): + outHard = File(desc="Hard segmentation", exists=True) + outHard2 = File(desc="Hard segmentationfrom memberships", exists=True) + outInhomogeneity = File(desc="Inhomogeneity Field", exists=True) + outMembership = File(desc="Membership Functions", exists=True) + outLesion = File(desc="Lesion Segmentation", exists=True) + outSulcal = File(desc="Sulcal CSF Membership", exists=True) + outCortical = File(desc="Cortical GM Membership", exists=True) + outFilled = File(desc="Filled WM Membership", exists=True) + outWM = File(desc="WM Mask", exists=True) + + +class MedicAlgorithmLesionToads(SEMLikeCommandLine): + """title: Lesion TOADS + +category: Developer Tools + +description: Algorithm for simulataneous brain structures and MS lesion segmentation of MS Brains. The brain segmentation is topologically consistent and the algorithm can use multiple MR sequences as input data. +N. Shiee, P.-L. Bazin, A.Z. Ozturk, P.A. Calabresi, D.S. Reich, D.L. Pham, "A Topology-Preserving Approach to the Segmentation of Brain Images with Multiple Sclerosis", NeuroImage, vol. 49, no. 2, pp. 1524-1535, 2010. + +version: 1.9.R + +contributor: Navid Shiee (navid.shiee@nih.gov) http://iacl.ece.jhu.edu/~nshiee/ + +""" + + input_spec = MedicAlgorithmLesionToadsInputSpec + output_spec = MedicAlgorithmLesionToadsOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run edu.jhu.ece.iacl.plugins.classification.MedicAlgorithmLesionToads " + _outputs_filenames = { + 'outWM': 'outWM.nii', + 'outHard': 'outHard.nii', + 'outFilled': 'outFilled.nii', + 'outMembership': 'outMembership.nii', + 'outInhomogeneity': 'outInhomogeneity.nii', + 'outCortical': 'outCortical.nii', + 'outHard2': 'outHard2.nii', + 'outLesion': 'outLesion.nii', + 'outSulcal': 'outSulcal.nii' + } + _redirect_x = True + + +class JistBrainMp2rageSkullStrippingInputSpec(CommandLineInputSpec): + inSecond = File( + desc="Second inversion (Inv2) Image", + exists=True, + argstr="--inSecond %s") + inT1 = File( + desc="T1 Map (T1_Images) Image (opt)", exists=True, argstr="--inT1 %s") + inT1weighted = File( + desc="T1-weighted (UNI) Image (opt)", + exists=True, + argstr="--inT1weighted %s") + inFilter = File( + desc="Filter Image (opt)", exists=True, argstr="--inFilter %s") + inSkip = traits.Enum( + "true", "false", desc="Skip zero values", argstr="--inSkip %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outBrain = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Brain Mask Image", + argstr="--outBrain %s") + outMasked = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Masked T1 Map Image", + argstr="--outMasked %s") + outMasked2 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Masked T1-weighted Image", + argstr="--outMasked2 %s") + outMasked3 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Masked Filter Image", + argstr="--outMasked3 %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class JistBrainMp2rageSkullStrippingOutputSpec(TraitedSpec): + outBrain = File(desc="Brain Mask Image", exists=True) + outMasked = File(desc="Masked T1 Map Image", exists=True) + outMasked2 = File(desc="Masked T1-weighted Image", exists=True) + outMasked3 = File(desc="Masked Filter Image", exists=True) + + +class JistBrainMp2rageSkullStripping(SEMLikeCommandLine): + """title: MP2RAGE Skull Stripping + +category: Developer Tools + +description: Estimate a brain mask for a MP2RAGE dataset. At least a T1-weighted or a T1 map image is required. + +version: 3.0.RC + +""" + + input_spec = JistBrainMp2rageSkullStrippingInputSpec + output_spec = JistBrainMp2rageSkullStrippingOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.brain.JistBrainMp2rageSkullStripping " + _outputs_filenames = { + 'outBrain': 'outBrain.nii', + 'outMasked3': 'outMasked3.nii', + 'outMasked2': 'outMasked2.nii', + 'outMasked': 'outMasked.nii' + } + _redirect_x = True + + +class JistCortexSurfaceMeshInflationInputSpec(CommandLineInputSpec): + inLevelset = File( + desc="Levelset Image", exists=True, argstr="--inLevelset %s") + inSOR = traits.Float(desc="SOR Parameter", argstr="--inSOR %f") + inMean = traits.Float( + desc="Mean Curvature Threshold", argstr="--inMean %f") + inStep = traits.Int(desc="Step Size", argstr="--inStep %d") + inMax = traits.Int(desc="Max Iterations", argstr="--inMax %d") + inLorentzian = traits.Enum( + "true", "false", desc="Lorentzian Norm", argstr="--inLorentzian %s") + inTopology = traits.Enum( + "26/6", + "6/26", + "18/6", + "6/18", + "6/6", + "wcs", + "wco", + "no", + desc="Topology", + argstr="--inTopology %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outOriginal = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Original Surface", + argstr="--outOriginal %s") + outInflated = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Inflated Surface", + argstr="--outInflated %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class JistCortexSurfaceMeshInflationOutputSpec(TraitedSpec): + outOriginal = File(desc="Original Surface", exists=True) + outInflated = File(desc="Inflated Surface", exists=True) + + +class JistCortexSurfaceMeshInflation(SEMLikeCommandLine): + """title: Surface Mesh Inflation + +category: Developer Tools + +description: Inflates a cortical surface mesh. +D. Tosun, M. E. Rettmann, X. Han, X. Tao, C. Xu, S. M. Resnick, D. Pham, and J. L. Prince, Cortical Surface Segmentation and Mapping, NeuroImage, vol. 23, pp. S108--S118, 2004. + +version: 3.0.RC + +contributor: Duygu Tosun + +""" + + input_spec = JistCortexSurfaceMeshInflationInputSpec + output_spec = JistCortexSurfaceMeshInflationOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.cortex.JistCortexSurfaceMeshInflation " + _outputs_filenames = { + 'outOriginal': 'outOriginal', + 'outInflated': 'outInflated' + } + _redirect_x = True + + +class RandomVolInputSpec(CommandLineInputSpec): + inSize = traits.Int( + desc="Size of Volume in X direction", argstr="--inSize %d") + inSize2 = traits.Int( + desc="Size of Volume in Y direction", argstr="--inSize2 %d") + inSize3 = traits.Int( + desc="Size of Volume in Z direction", argstr="--inSize3 %d") + inSize4 = traits.Int( + desc="Size of Volume in t direction", argstr="--inSize4 %d") + inStandard = traits.Int( + desc="Standard Deviation for Normal Distribution", + argstr="--inStandard %d") + inLambda = traits.Float( + desc="Lambda Value for Exponential Distribution", + argstr="--inLambda %f") + inMaximum = traits.Int(desc="Maximum Value", argstr="--inMaximum %d") + inMinimum = traits.Int(desc="Minimum Value", argstr="--inMinimum %d") + inField = traits.Enum( + "Uniform", + "Normal", + "Exponential", + desc="Field", + argstr="--inField %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outRand1 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Rand1", + argstr="--outRand1 %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class RandomVolOutputSpec(TraitedSpec): + outRand1 = File(desc="Rand1", exists=True) + + +class RandomVol(SEMLikeCommandLine): + """title: Random Volume Generator + +category: Developer Tools + +description: Generate a random scalar volume. + +version: 1.12.RC + +documentation-url: http://www.nitrc.org/projects/jist/ + +""" + + input_spec = RandomVolInputSpec + output_spec = RandomVolOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run edu.jhu.bme.smile.demo.RandomVol " + _outputs_filenames = {'outRand1': 'outRand1.nii'} + _redirect_x = True + + +class MedicAlgorithmImageCalculatorInputSpec(CommandLineInputSpec): + inVolume = File(desc="Volume 1", exists=True, argstr="--inVolume %s") + inVolume2 = File(desc="Volume 2", exists=True, argstr="--inVolume2 %s") + inOperation = traits.Enum( + "Add", + "Subtract", + "Multiply", + "Divide", + "Min", + "Max", + desc="Operation", + argstr="--inOperation %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outResult = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Result Volume", + argstr="--outResult %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class MedicAlgorithmImageCalculatorOutputSpec(TraitedSpec): + outResult = File(desc="Result Volume", exists=True) + + +class MedicAlgorithmImageCalculator(SEMLikeCommandLine): + """title: Image Calculator + +category: Developer Tools + +description: Perform simple image calculator operations on two images. The operations include 'Add', 'Subtract', 'Multiply', and 'Divide' + +version: 1.10.RC + +documentation-url: http://www.iacl.ece.jhu.edu/ + +""" + + input_spec = MedicAlgorithmImageCalculatorInputSpec + output_spec = MedicAlgorithmImageCalculatorOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run edu.jhu.ece.iacl.plugins.utilities.math.MedicAlgorithmImageCalculator " + _outputs_filenames = {'outResult': 'outResult.nii'} + _redirect_x = True + + +class JistBrainMp2rageDuraEstimationInputSpec(CommandLineInputSpec): + inSecond = File( + desc="Second inversion (Inv2) Image", + exists=True, + argstr="--inSecond %s") + inSkull = File( + desc="Skull Stripping Mask", exists=True, argstr="--inSkull %s") + inDistance = traits.Float( + desc="Distance to background (mm)", argstr="--inDistance %f") + inoutput = traits.Enum( + "dura_region", + "boundary", + "dura_prior", + "bg_prior", + "intens_prior", + desc= + "Outputs an estimate of the dura / CSF boundary or an estimate of the entire dura region.", + argstr="--inoutput %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outDura = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Dura Image", + argstr="--outDura %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class JistBrainMp2rageDuraEstimationOutputSpec(TraitedSpec): + outDura = File(desc="Dura Image", exists=True) + + +class JistBrainMp2rageDuraEstimation(SEMLikeCommandLine): + """title: MP2RAGE Dura Estimation + +category: Developer Tools + +description: Filters a MP2RAGE brain image to obtain a probability map of dura matter. + +version: 3.0.RC + +""" + + input_spec = JistBrainMp2rageDuraEstimationInputSpec + output_spec = JistBrainMp2rageDuraEstimationOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.brain.JistBrainMp2rageDuraEstimation " + _outputs_filenames = {'outDura': 'outDura.nii'} + _redirect_x = True + + +class JistLaminarProfileSamplingInputSpec(CommandLineInputSpec): + inProfile = File( + desc="Profile Surface Image", exists=True, argstr="--inProfile %s") + inIntensity = File( + desc="Intensity Image", exists=True, argstr="--inIntensity %s") + inCortex = File( + desc="Cortex Mask (opt)", exists=True, argstr="--inCortex %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outProfilemapped = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Profile-mapped Intensity Image", + argstr="--outProfilemapped %s") + outProfile2 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Profile 4D Mask", + argstr="--outProfile2 %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class JistLaminarProfileSamplingOutputSpec(TraitedSpec): + outProfilemapped = File(desc="Profile-mapped Intensity Image", exists=True) + outProfile2 = File(desc="Profile 4D Mask", exists=True) + + +class JistLaminarProfileSampling(SEMLikeCommandLine): + """title: Profile Sampling + +category: Developer Tools + +description: Sample some intensity image along a cortical profile across layer surfaces. + +version: 3.0.RC + +""" + + input_spec = JistLaminarProfileSamplingInputSpec + output_spec = JistLaminarProfileSamplingOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.laminar.JistLaminarProfileSampling " + _outputs_filenames = { + 'outProfile2': 'outProfile2.nii', + 'outProfilemapped': 'outProfilemapped.nii' + } + _redirect_x = True + + +class MedicAlgorithmMipavReorientInputSpec(CommandLineInputSpec): + inSource = InputMultiPath( + File, desc="Source", sep=";", argstr="--inSource %s") + inTemplate = File(desc="Template", exists=True, argstr="--inTemplate %s") + inNew = traits.Enum( + "Dicom axial", + "Dicom coronal", + "Dicom sagittal", + "User defined", + desc="New image orientation", + argstr="--inNew %s") + inUser = traits.Enum( + "Unknown", + "Patient Right to Left", + "Patient Left to Right", + "Patient Posterior to Anterior", + "Patient Anterior to Posterior", + "Patient Inferior to Superior", + "Patient Superior to Inferior", + desc="User defined X-axis orientation (image left to right)", + argstr="--inUser %s") + inUser2 = traits.Enum( + "Unknown", + "Patient Right to Left", + "Patient Left to Right", + "Patient Posterior to Anterior", + "Patient Anterior to Posterior", + "Patient Inferior to Superior", + "Patient Superior to Inferior", + desc="User defined Y-axis orientation (image top to bottom)", + argstr="--inUser2 %s") + inUser3 = traits.Enum( + "Unknown", + "Patient Right to Left", + "Patient Left to Right", + "Patient Posterior to Anterior", + "Patient Anterior to Posterior", + "Patient Inferior to Superior", + "Patient Superior to Inferior", + desc="User defined Z-axis orientation (into the screen)", + argstr="--inUser3 %s") + inUser4 = traits.Enum( + "Axial", + "Coronal", + "Sagittal", + "Unknown", + desc="User defined Image Orientation", + argstr="--inUser4 %s") + inInterpolation = traits.Enum( + "Nearest Neighbor", + "Trilinear", + "Bspline 3rd order", + "Bspline 4th order", + "Cubic Lagrangian", + "Quintic Lagrangian", + "Heptic Lagrangian", + "Windowed Sinc", + desc="Interpolation", + argstr="--inInterpolation %s") + inResolution = traits.Enum( + "Unchanged", + "Finest cubic", + "Coarsest cubic", + "Same as template", + desc="Resolution", + argstr="--inResolution %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outReoriented = InputMultiPath( + File, desc="Reoriented Volume", sep=";", argstr="--outReoriented %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class MedicAlgorithmMipavReorientOutputSpec(TraitedSpec): + pass + + +class MedicAlgorithmMipavReorient(SEMLikeCommandLine): + """title: Reorient Volume + +category: Developer Tools + +description: Reorient a volume to a particular anatomical orientation. + +version: .alpha + +""" + + input_spec = MedicAlgorithmMipavReorientInputSpec + output_spec = MedicAlgorithmMipavReorientOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run edu.jhu.ece.iacl.plugins.utilities.volume.MedicAlgorithmMipavReorient " + _outputs_filenames = {} + _redirect_x = True + + +class MedicAlgorithmSPECTRE2010InputSpec(CommandLineInputSpec): + inInput = File( + desc="Input volume to be skullstripped.", + exists=True, + argstr="--inInput %s") + inAtlas = File( + desc= + "SPECTRE atlas description file. A text file enumerating atlas files and landmarks.", + exists=True, + argstr="--inAtlas %s") + inInitial = traits.Int( + desc= + "Erosion of the inital mask, which is based on the probability mask and the classification., The initial mask is ouput as the d0 volume at the conclusion of SPECTRE.", + argstr="--inInitial %d") + inImage = traits.Enum( + "T1_SPGR", + "T1_ALT", + "T1_MPRAGE", + "T2", + "FLAIR", + desc= + "Set the image modality. MP-RAGE is recommended for most T1 sequence images.", + argstr="--inImage %s") + inOutput = traits.Enum( + "true", + "false", + desc= + "Determines if the output results are transformed back into the space of the original input image.", + argstr="--inOutput %s") + inFind = traits.Enum( + "true", "false", desc="Find Midsaggital Plane", argstr="--inFind %s") + inRun = traits.Enum( + "true", "false", desc="Run Smooth Brain Mask", argstr="--inRun %s") + inResample = traits.Enum( + "true", + "false", + desc= + "Determines if the data is resampled to be isotropic during the processing.", + argstr="--inResample %s") + inInitial2 = traits.Float( + desc="Initial probability threshold", argstr="--inInitial2 %f") + inMinimum = traits.Float( + desc="Minimum probability threshold", argstr="--inMinimum %f") + inMMC = traits.Int( + desc= + "The size of the dilation step within the Modified Morphological Closing.", + argstr="--inMMC %d") + inMMC2 = traits.Int( + desc= + "The size of the erosion step within the Modified Morphological Closing.", + argstr="--inMMC2 %d") + inInhomogeneity = traits.Enum( + "true", + "false", + desc= + "Set to false by default, this parameter will make FANTASM try to do inhomogeneity correction during it's iterative cycle.", + argstr="--inInhomogeneity %s") + inSmoothing = traits.Float(argstr="--inSmoothing %f") + inBackground = traits.Float(argstr="--inBackground %f") + inOutput2 = traits.Enum( + "true", "false", desc="Output Plane?", argstr="--inOutput2 %s") + inOutput3 = traits.Enum( + "true", "false", desc="Output Split-Halves?", argstr="--inOutput3 %s") + inOutput4 = traits.Enum( + "true", + "false", + desc="Output Segmentation on Plane?", + argstr="--inOutput4 %s") + inDegrees = traits.Enum( + "Rigid - 6", + "Global rescale - 7", + "Specific rescale - 9", + "Affine - 12", + desc="Degrees of freedom", + argstr="--inDegrees %s") + inCost = traits.Enum( + "Correlation ratio", + "Least squares", + "Normalized cross correlation", + "Normalized mutual information", + desc="Cost function", + argstr="--inCost %s") + inRegistration = traits.Enum( + "Trilinear", + "Bspline 3rd order", + "Bspline 4th order", + "Cubic Lagrangian", + "Quintic Lagrangian", + "Heptic Lagrangian", + "Windowed sinc", + desc="Registration interpolation", + argstr="--inRegistration %s") + inOutput5 = traits.Enum( + "Trilinear", + "Bspline 3rd order", + "Bspline 4th order", + "Cubic Lagrangian", + "Quintic Lagrangian", + "Heptic Lagrangian", + "Windowed sinc", + "Nearest Neighbor", + desc="Output interpolation", + argstr="--inOutput5 %s") + inApply = traits.Enum( + "All", "X", "Y", "Z", desc="Apply rotation", argstr="--inApply %s") + inMinimum2 = traits.Float(desc="Minimum angle", argstr="--inMinimum2 %f") + inMaximum = traits.Float(desc="Maximum angle", argstr="--inMaximum %f") + inCoarse = traits.Float( + desc="Coarse angle increment", argstr="--inCoarse %f") + inFine = traits.Float(desc="Fine angle increment", argstr="--inFine %f") + inMultiple = traits.Int( + desc="Multiple of tolerance to bracket the minimum", + argstr="--inMultiple %d") + inNumber = traits.Int(desc="Number of iterations", argstr="--inNumber %d") + inNumber2 = traits.Int( + desc="Number of minima from Level 8 to test at Level 4", + argstr="--inNumber2 %d") + inUse = traits.Enum( + "true", + "false", + desc= + "Use the max of the min resolutions of the two datasets when resampling", + argstr="--inUse %s") + inSubsample = traits.Enum( + "true", + "false", + desc="Subsample image for speed", + argstr="--inSubsample %s") + inSkip = traits.Enum( + "true", + "false", + desc="Skip multilevel search (Assume images are close to alignment)", + argstr="--inSkip %s") + inMultithreading = traits.Enum( + "true", + "false", + desc= + "Set to false by default, this parameter controls the multithreaded behavior of the linear registration.", + argstr="--inMultithreading %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outOriginal = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "If Output in Original Space Flag is true then outputs the original input volume. Otherwise outputs the axialy reoriented input volume.", + argstr="--outOriginal %s") + outStripped = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Skullstripped result of the input volume with just the brain.", + argstr="--outStripped %s") + outMask = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Binary Mask of the skullstripped result with just the brain", + argstr="--outMask %s") + outPrior = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Probability prior from the atlas registrations", + argstr="--outPrior %s") + outFANTASM = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Tissue classification of of the whole input volume.", + argstr="--outFANTASM %s") + outd0 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Initial Brainmask", + argstr="--outd0 %s") + outMidsagittal = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Plane dividing the brain hemispheres", + argstr="--outMidsagittal %s") + outSplitHalves = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Skullstripped mask of the brain with the hemispheres divided.", + argstr="--outSplitHalves %s") + outSegmentation = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "2D image showing the tissue classification on the midsagittal plane", + argstr="--outSegmentation %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class MedicAlgorithmSPECTRE2010OutputSpec(TraitedSpec): + outOriginal = File( + desc= + "If Output in Original Space Flag is true then outputs the original input volume. Otherwise outputs the axialy reoriented input volume.", + exists=True) + outStripped = File( + desc="Skullstripped result of the input volume with just the brain.", + exists=True) + outMask = File( + desc="Binary Mask of the skullstripped result with just the brain", + exists=True) + outPrior = File( + desc="Probability prior from the atlas registrations", exists=True) + outFANTASM = File( + desc="Tissue classification of of the whole input volume.", + exists=True) + outd0 = File(desc="Initial Brainmask", exists=True) + outMidsagittal = File( + desc="Plane dividing the brain hemispheres", exists=True) + outSplitHalves = File( + desc="Skullstripped mask of the brain with the hemispheres divided.", + exists=True) + outSegmentation = File( + desc= + "2D image showing the tissue classification on the midsagittal plane", + exists=True) + + +class MedicAlgorithmSPECTRE2010(SEMLikeCommandLine): + """title: SPECTRE 2010 + +category: Developer Tools + +description: Simple Paradigm for Extra-Cranial Tissue REmoval + +Algorithm Version: 1.6 +GUI Version: 1.10 + +A. Carass, M.B. Wheeler, J. Cuzzocreo, P.-L. Bazin, S.S. Bassett, and J.L. Prince, 'A Joint Registration and Segmentation Approach to Skull Stripping', Fourth IEEE International Symposium on Biomedical Imaging (ISBI 2007), Arlington, VA, April 12-15, 2007. +A. Carass, J. Cuzzocreo, M.B. Wheeler, P.-L. Bazin, S.M. Resnick, and J.L. Prince, 'Simple paradigm for extra-cerebral tissue removal: Algorithm and analysis', NeuroImage 56(4):1982-1992, 2011. + +version: 1.6.R + +documentation-url: http://www.iacl.ece.jhu.edu/ + +contributor: Aaron Carass (aaron_carass@jhu.edu) http://www.iacl.ece.jhu.edu/ +Hanlin Wan (hanlinwan@gmail.com) + +""" + + input_spec = MedicAlgorithmSPECTRE2010InputSpec + output_spec = MedicAlgorithmSPECTRE2010OutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run edu.jhu.ece.iacl.plugins.segmentation.skull_strip.MedicAlgorithmSPECTRE2010 " + _outputs_filenames = { + 'outd0': 'outd0.nii', + 'outOriginal': 'outOriginal.nii', + 'outMask': 'outMask.nii', + 'outSplitHalves': 'outSplitHalves.nii', + 'outMidsagittal': 'outMidsagittal.nii', + 'outPrior': 'outPrior.nii', + 'outFANTASM': 'outFANTASM.nii', + 'outSegmentation': 'outSegmentation.nii', + 'outStripped': 'outStripped.nii' + } + _redirect_x = True + + +class JistBrainPartialVolumeFilterInputSpec(CommandLineInputSpec): + inInput = File(desc="Input Image", exists=True, argstr="--inInput %s") + inPV = traits.Enum( + "bright", + "dark", + "both", + desc= + "Outputs the raw intensity values or a probability score for the partial volume regions.", + argstr="--inPV %s") + inoutput = traits.Enum( + "probability", "intensity", desc="output", argstr="--inoutput %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outPartial = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Partial Volume Image", + argstr="--outPartial %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class JistBrainPartialVolumeFilterOutputSpec(TraitedSpec): + outPartial = File(desc="Partial Volume Image", exists=True) + + +class JistBrainPartialVolumeFilter(SEMLikeCommandLine): + """title: Partial Volume Filter + +category: Developer Tools + +description: Filters an image for regions of partial voluming assuming a ridge-like model of intensity. + +version: 2.0.RC + +""" + + input_spec = JistBrainPartialVolumeFilterInputSpec + output_spec = JistBrainPartialVolumeFilterOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.brain.JistBrainPartialVolumeFilter " + _outputs_filenames = {'outPartial': 'outPartial.nii'} + _redirect_x = True + + +class JistIntensityMp2rageMaskingInputSpec(CommandLineInputSpec): + inSecond = File( + desc="Second inversion (Inv2) Image", + exists=True, + argstr="--inSecond %s") + inQuantitative = File( + desc="Quantitative T1 Map (T1_Images) Image", + exists=True, + argstr="--inQuantitative %s") + inT1weighted = File( + desc="T1-weighted (UNI) Image", + exists=True, + argstr="--inT1weighted %s") + inBackground = traits.Enum( + "exponential", + "half-normal", + desc= + "Model distribution for background noise (default is half-normal, exponential is more stringent).", + argstr="--inBackground %s") + inSkip = traits.Enum( + "true", "false", desc="Skip zero values", argstr="--inSkip %s") + inMasking = traits.Enum( + "binary", + "proba", + desc= + "Whether to use a binary threshold or a weighted average based on the probability.", + argstr="--inMasking %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outSignal = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Signal Proba Image", + argstr="--outSignal_Proba %s") + outSignal2 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Signal Mask Image", + argstr="--outSignal_Mask %s") + outMasked = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Masked T1 Map Image", + argstr="--outMasked_T1_Map %s") + outMasked2 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Masked Iso Image", + argstr="--outMasked_T1weighted %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class JistIntensityMp2rageMaskingOutputSpec(TraitedSpec): + outSignal = File(desc="Signal Proba Image", exists=True) + outSignal2 = File(desc="Signal Mask Image", exists=True) + outMasked = File(desc="Masked T1 Map Image", exists=True) + outMasked2 = File(desc="Masked Iso Image", exists=True) + + +class JistIntensityMp2rageMasking(SEMLikeCommandLine): + """title: MP2RAGE Background Masking + +category: Developer Tools + +description: Estimate a background signal mask for a MP2RAGE dataset. + +version: 3.0.RC + +""" + + input_spec = JistIntensityMp2rageMaskingInputSpec + output_spec = JistIntensityMp2rageMaskingOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run de.mpg.cbs.jist.intensity.JistIntensityMp2rageMasking " + _outputs_filenames = { + 'outSignal2': 'outSignal2.nii', + 'outSignal': 'outSignal.nii', + 'outMasked2': 'outMasked2.nii', + 'outMasked': 'outMasked.nii' + } + _redirect_x = True + + +class MedicAlgorithmThresholdToBinaryMaskInputSpec(CommandLineInputSpec): + inLabel = InputMultiPath( + File, desc="Input volumes", sep=";", argstr="--inLabel %s") + inMinimum = traits.Float( + desc="Minimum threshold value.", argstr="--inMinimum %f") + inMaximum = traits.Float( + desc="Maximum threshold value.", argstr="--inMaximum %f") + inUse = traits.Enum( + "true", + "false", + desc="Use the images max intensity as the max value of the range.", + argstr="--inUse %s") + xPrefExt = traits.Enum( + "nrrd", desc="Output File Type", argstr="--xPrefExt %s") + outBinary = InputMultiPath( + File, desc="Binary Mask", sep=";", argstr="--outBinary %s") + null = traits.Str(desc="Execution Time", argstr="--null %s") + xDefaultMem = traits.Int( + desc="Set default maximum heap size", argstr="-xDefaultMem %d") + xMaxProcess = traits.Int( + 1, + desc="Set default maximum number of processes.", + argstr="-xMaxProcess %d", + usedefault=True) + + +class MedicAlgorithmThresholdToBinaryMaskOutputSpec(TraitedSpec): + pass + + +class MedicAlgorithmThresholdToBinaryMask(SEMLikeCommandLine): + """title: Threshold to Binary Mask + +category: Developer Tools + +description: Given a volume and an intensity range create a binary mask for values within that range. + +version: 1.2.RC + +documentation-url: http://www.iacl.ece.jhu.edu/ + +""" + + input_spec = MedicAlgorithmThresholdToBinaryMaskInputSpec + output_spec = MedicAlgorithmThresholdToBinaryMaskOutputSpec + _cmd = "java edu.jhu.ece.iacl.jist.cli.run edu.jhu.ece.iacl.plugins.utilities.volume.MedicAlgorithmThresholdToBinaryMask " + _outputs_filenames = {} + _redirect_x = True diff --git a/nipype/interfaces/mipav/generate_classes.py b/nipype/interfaces/mipav/generate_classes.py new file mode 100644 index 0000000000..df48a2d2da --- /dev/null +++ b/nipype/interfaces/mipav/generate_classes.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +if __name__ == "__main__": + from nipype.interfaces.slicer.generate_classes import generate_all_classes + + # NOTE: For now either the launcher needs to be found on the default path, or + # every tool in the modules list must be found on the default path + # AND calling the module with --xml must be supported and compliant. + modules_list = [ + 'edu.jhu.bme.smile.demo.RandomVol', + 'de.mpg.cbs.jist.laminar.JistLaminarProfileCalculator', + 'de.mpg.cbs.jist.laminar.JistLaminarProfileSampling', + 'de.mpg.cbs.jist.laminar.JistLaminarROIAveraging', + 'de.mpg.cbs.jist.laminar.JistLaminarVolumetricLayering', + 'de.mpg.cbs.jist.laminar.JistLaminarProfileGeometry', + 'de.mpg.cbs.jist.brain.JistBrainMgdmSegmentation', + 'de.mpg.cbs.jist.brain.JistBrainMp2rageSkullStripping', + 'de.mpg.cbs.jist.brain.JistBrainPartialVolumeFilter', + 'de.mpg.cbs.jist.brain.JistBrainMp2rageDuraEstimation' + ] + + modules_from_chris = [ + 'edu.jhu.ece.iacl.plugins.segmentation.skull_strip.MedicAlgorithmSPECTRE2010', + 'edu.jhu.ece.iacl.plugins.utilities.volume.MedicAlgorithmMipavReorient', + 'edu.jhu.ece.iacl.plugins.utilities.math.MedicAlgorithmImageCalculator', + 'de.mpg.cbs.jist.brain.JistBrainMp2rageDuraEstimation', + 'de.mpg.cbs.jist.brain.JistBrainPartialVolumeFilter', + 'edu.jhu.ece.iacl.plugins.utilities.volume.MedicAlgorithmThresholdToBinaryMask', + # 'de.mpg.cbs.jist.cortex.JistCortexFullCRUISE', # waiting for http://www.nitrc.org/tracker/index.php?func=detail&aid=7236&group_id=228&atid=942 to be fixed + 'de.mpg.cbs.jist.cortex.JistCortexSurfaceMeshInflation' + ] + + modules_from_julia = [ + 'de.mpg.cbs.jist.intensity.JistIntensityMp2rageMasking', + 'edu.jhu.ece.iacl.plugins.segmentation.skull_strip.MedicAlgorithmSPECTRE2010' + ] + + modules_from_leonie = [ + 'edu.jhu.ece.iacl.plugins.classification.MedicAlgorithmLesionToads' + ] + + modules_from_yasinyazici = [ + 'edu.jhu.ece.iacl.plugins.classification.MedicAlgorithmN3' + ] + + modules_list = list( + set(modules_list).union(modules_from_chris).union(modules_from_leonie) + .union(modules_from_julia).union(modules_from_yasinyazici).union( + modules_list)) + + generate_all_classes( + modules_list=modules_list, + launcher=["java edu.jhu.ece.iacl.jist.cli.run"], + redirect_x=True, + mipav_hacks=True) diff --git a/nipype/interfaces/mipav/tests/__init__.py b/nipype/interfaces/mipav/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/mipav/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py new file mode 100644 index 0000000000..16605acbb7 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMgdmSegmentation.py @@ -0,0 +1,69 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import JistBrainMgdmSegmentation + + +def test_JistBrainMgdmSegmentation_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inAdjust=dict(argstr='--inAdjust %s', ), + inAtlas=dict(argstr='--inAtlas %s', ), + inCompute=dict(argstr='--inCompute %s', ), + inCurvature=dict(argstr='--inCurvature %f', ), + inData=dict(argstr='--inData %f', ), + inFLAIR=dict(argstr='--inFLAIR %s', ), + inMP2RAGE=dict(argstr='--inMP2RAGE %s', ), + inMP2RAGE2=dict(argstr='--inMP2RAGE2 %s', ), + inMPRAGE=dict(argstr='--inMPRAGE %s', ), + inMax=dict(argstr='--inMax %d', ), + inMin=dict(argstr='--inMin %f', ), + inOutput=dict(argstr='--inOutput %s', ), + inPV=dict(argstr='--inPV %s', ), + inPosterior=dict(argstr='--inPosterior %f', ), + inSteps=dict(argstr='--inSteps %d', ), + inTopology=dict(argstr='--inTopology %s', ), + null=dict(argstr='--null %s', ), + outLevelset=dict( + argstr='--outLevelset %s', + hash_files=False, + ), + outPosterior2=dict( + argstr='--outPosterior2 %s', + hash_files=False, + ), + outPosterior3=dict( + argstr='--outPosterior3 %s', + hash_files=False, + ), + outSegmented=dict( + argstr='--outSegmented %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = JistBrainMgdmSegmentation.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JistBrainMgdmSegmentation_outputs(): + output_map = dict( + outLevelset=dict(), + outPosterior2=dict(), + outPosterior3=dict(), + outSegmented=dict(), + ) + outputs = JistBrainMgdmSegmentation.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py new file mode 100644 index 0000000000..ebb65bb789 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageDuraEstimation.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import JistBrainMp2rageDuraEstimation + + +def test_JistBrainMp2rageDuraEstimation_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inDistance=dict(argstr='--inDistance %f', ), + inSecond=dict(argstr='--inSecond %s', ), + inSkull=dict(argstr='--inSkull %s', ), + inoutput=dict(argstr='--inoutput %s', ), + null=dict(argstr='--null %s', ), + outDura=dict( + argstr='--outDura %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = JistBrainMp2rageDuraEstimation.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JistBrainMp2rageDuraEstimation_outputs(): + output_map = dict(outDura=dict(), ) + outputs = JistBrainMp2rageDuraEstimation.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py new file mode 100644 index 0000000000..abf794b662 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import JistBrainMp2rageSkullStripping + + +def test_JistBrainMp2rageSkullStripping_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inFilter=dict(argstr='--inFilter %s', ), + inSecond=dict(argstr='--inSecond %s', ), + inSkip=dict(argstr='--inSkip %s', ), + inT1=dict(argstr='--inT1 %s', ), + inT1weighted=dict(argstr='--inT1weighted %s', ), + null=dict(argstr='--null %s', ), + outBrain=dict( + argstr='--outBrain %s', + hash_files=False, + ), + outMasked=dict( + argstr='--outMasked %s', + hash_files=False, + ), + outMasked2=dict( + argstr='--outMasked2 %s', + hash_files=False, + ), + outMasked3=dict( + argstr='--outMasked3 %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = JistBrainMp2rageSkullStripping.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JistBrainMp2rageSkullStripping_outputs(): + output_map = dict( + outBrain=dict(), + outMasked=dict(), + outMasked2=dict(), + outMasked3=dict(), + ) + outputs = JistBrainMp2rageSkullStripping.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py new file mode 100644 index 0000000000..bb86144c20 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_JistBrainPartialVolumeFilter.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import JistBrainPartialVolumeFilter + + +def test_JistBrainPartialVolumeFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inInput=dict(argstr='--inInput %s', ), + inPV=dict(argstr='--inPV %s', ), + inoutput=dict(argstr='--inoutput %s', ), + null=dict(argstr='--null %s', ), + outPartial=dict( + argstr='--outPartial %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = JistBrainPartialVolumeFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JistBrainPartialVolumeFilter_outputs(): + output_map = dict(outPartial=dict(), ) + outputs = JistBrainPartialVolumeFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py new file mode 100644 index 0000000000..407b9755ca --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_JistCortexSurfaceMeshInflation.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import JistCortexSurfaceMeshInflation + + +def test_JistCortexSurfaceMeshInflation_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inLevelset=dict(argstr='--inLevelset %s', ), + inLorentzian=dict(argstr='--inLorentzian %s', ), + inMax=dict(argstr='--inMax %d', ), + inMean=dict(argstr='--inMean %f', ), + inSOR=dict(argstr='--inSOR %f', ), + inStep=dict(argstr='--inStep %d', ), + inTopology=dict(argstr='--inTopology %s', ), + null=dict(argstr='--null %s', ), + outInflated=dict( + argstr='--outInflated %s', + hash_files=False, + ), + outOriginal=dict( + argstr='--outOriginal %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = JistCortexSurfaceMeshInflation.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JistCortexSurfaceMeshInflation_outputs(): + output_map = dict( + outInflated=dict(), + outOriginal=dict(), + ) + outputs = JistCortexSurfaceMeshInflation.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py new file mode 100644 index 0000000000..bfdace4944 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_JistIntensityMp2rageMasking.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import JistIntensityMp2rageMasking + + +def test_JistIntensityMp2rageMasking_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inBackground=dict(argstr='--inBackground %s', ), + inMasking=dict(argstr='--inMasking %s', ), + inQuantitative=dict(argstr='--inQuantitative %s', ), + inSecond=dict(argstr='--inSecond %s', ), + inSkip=dict(argstr='--inSkip %s', ), + inT1weighted=dict(argstr='--inT1weighted %s', ), + null=dict(argstr='--null %s', ), + outMasked=dict( + argstr='--outMasked_T1_Map %s', + hash_files=False, + ), + outMasked2=dict( + argstr='--outMasked_T1weighted %s', + hash_files=False, + ), + outSignal=dict( + argstr='--outSignal_Proba %s', + hash_files=False, + ), + outSignal2=dict( + argstr='--outSignal_Mask %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = JistIntensityMp2rageMasking.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JistIntensityMp2rageMasking_outputs(): + output_map = dict( + outMasked=dict(), + outMasked2=dict(), + outSignal=dict(), + outSignal2=dict(), + ) + outputs = JistIntensityMp2rageMasking.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py new file mode 100644 index 0000000000..12203a1aa6 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileCalculator.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import JistLaminarProfileCalculator + + +def test_JistLaminarProfileCalculator_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inIntensity=dict(argstr='--inIntensity %s', ), + inMask=dict(argstr='--inMask %s', ), + incomputed=dict(argstr='--incomputed %s', ), + null=dict(argstr='--null %s', ), + outResult=dict( + argstr='--outResult %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = JistLaminarProfileCalculator.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JistLaminarProfileCalculator_outputs(): + output_map = dict(outResult=dict(), ) + outputs = JistLaminarProfileCalculator.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py new file mode 100644 index 0000000000..ddc4d5d922 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileGeometry.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import JistLaminarProfileGeometry + + +def test_JistLaminarProfileGeometry_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inProfile=dict(argstr='--inProfile %s', ), + incomputed=dict(argstr='--incomputed %s', ), + inoutside=dict(argstr='--inoutside %f', ), + inregularization=dict(argstr='--inregularization %s', ), + insmoothing=dict(argstr='--insmoothing %f', ), + null=dict(argstr='--null %s', ), + outResult=dict( + argstr='--outResult %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = JistLaminarProfileGeometry.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JistLaminarProfileGeometry_outputs(): + output_map = dict(outResult=dict(), ) + outputs = JistLaminarProfileGeometry.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py new file mode 100644 index 0000000000..58de472b85 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarProfileSampling.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import JistLaminarProfileSampling + + +def test_JistLaminarProfileSampling_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inCortex=dict(argstr='--inCortex %s', ), + inIntensity=dict(argstr='--inIntensity %s', ), + inProfile=dict(argstr='--inProfile %s', ), + null=dict(argstr='--null %s', ), + outProfile2=dict( + argstr='--outProfile2 %s', + hash_files=False, + ), + outProfilemapped=dict( + argstr='--outProfilemapped %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = JistLaminarProfileSampling.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JistLaminarProfileSampling_outputs(): + output_map = dict( + outProfile2=dict(), + outProfilemapped=dict(), + ) + outputs = JistLaminarProfileSampling.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py new file mode 100644 index 0000000000..bb9577ccee --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarROIAveraging.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import JistLaminarROIAveraging + + +def test_JistLaminarROIAveraging_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inIntensity=dict(argstr='--inIntensity %s', ), + inMask=dict(argstr='--inMask %s', ), + inROI=dict(argstr='--inROI %s', ), + inROI2=dict(argstr='--inROI2 %s', ), + null=dict(argstr='--null %s', ), + outROI3=dict( + argstr='--outROI3 %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = JistLaminarROIAveraging.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JistLaminarROIAveraging_outputs(): + output_map = dict(outROI3=dict(), ) + outputs = JistLaminarROIAveraging.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py new file mode 100644 index 0000000000..4aa9f9d77b --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_JistLaminarVolumetricLayering.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import JistLaminarVolumetricLayering + + +def test_JistLaminarVolumetricLayering_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inInner=dict(argstr='--inInner %s', ), + inLayering=dict(argstr='--inLayering %s', ), + inLayering2=dict(argstr='--inLayering2 %s', ), + inMax=dict(argstr='--inMax %d', ), + inMin=dict(argstr='--inMin %f', ), + inNumber=dict(argstr='--inNumber %d', ), + inOuter=dict(argstr='--inOuter %s', ), + inTopology=dict(argstr='--inTopology %s', ), + incurvature=dict(argstr='--incurvature %d', ), + inpresmooth=dict(argstr='--inpresmooth %s', ), + inratio=dict(argstr='--inratio %f', ), + null=dict(argstr='--null %s', ), + outContinuous=dict( + argstr='--outContinuous %s', + hash_files=False, + ), + outDiscrete=dict( + argstr='--outDiscrete %s', + hash_files=False, + ), + outLayer=dict( + argstr='--outLayer %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = JistLaminarVolumetricLayering.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JistLaminarVolumetricLayering_outputs(): + output_map = dict( + outContinuous=dict(), + outDiscrete=dict(), + outLayer=dict(), + ) + outputs = JistLaminarVolumetricLayering.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py new file mode 100644 index 0000000000..016beee263 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmImageCalculator.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import MedicAlgorithmImageCalculator + + +def test_MedicAlgorithmImageCalculator_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inOperation=dict(argstr='--inOperation %s', ), + inVolume=dict(argstr='--inVolume %s', ), + inVolume2=dict(argstr='--inVolume2 %s', ), + null=dict(argstr='--null %s', ), + outResult=dict( + argstr='--outResult %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = MedicAlgorithmImageCalculator.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MedicAlgorithmImageCalculator_outputs(): + output_map = dict(outResult=dict(), ) + outputs = MedicAlgorithmImageCalculator.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py new file mode 100644 index 0000000000..87f9ab6a72 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmLesionToads.py @@ -0,0 +1,99 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import MedicAlgorithmLesionToads + + +def test_MedicAlgorithmLesionToads_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inAtlas=dict(argstr='--inAtlas %s', ), + inAtlas2=dict(argstr='--inAtlas2 %s', ), + inAtlas3=dict(argstr='--inAtlas3 %s', ), + inAtlas4=dict(argstr='--inAtlas4 %s', ), + inAtlas5=dict(argstr='--inAtlas5 %f', ), + inAtlas6=dict(argstr='--inAtlas6 %s', ), + inConnectivity=dict(argstr='--inConnectivity %s', ), + inCorrect=dict(argstr='--inCorrect %s', ), + inFLAIR=dict(argstr='--inFLAIR %s', ), + inInclude=dict(argstr='--inInclude %s', ), + inMaximum=dict(argstr='--inMaximum %d', ), + inMaximum2=dict(argstr='--inMaximum2 %d', ), + inMaximum3=dict(argstr='--inMaximum3 %d', ), + inMaximum4=dict(argstr='--inMaximum4 %f', ), + inMaximum5=dict(argstr='--inMaximum5 %d', ), + inOutput=dict(argstr='--inOutput %s', ), + inOutput2=dict(argstr='--inOutput2 %s', ), + inOutput3=dict(argstr='--inOutput3 %s', ), + inSmooting=dict(argstr='--inSmooting %f', ), + inT1_MPRAGE=dict(argstr='--inT1_MPRAGE %s', ), + inT1_SPGR=dict(argstr='--inT1_SPGR %s', ), + null=dict(argstr='--null %s', ), + outCortical=dict( + argstr='--outCortical %s', + hash_files=False, + ), + outFilled=dict( + argstr='--outFilled %s', + hash_files=False, + ), + outHard=dict( + argstr='--outHard %s', + hash_files=False, + ), + outHard2=dict( + argstr='--outHard2 %s', + hash_files=False, + ), + outInhomogeneity=dict( + argstr='--outInhomogeneity %s', + hash_files=False, + ), + outLesion=dict( + argstr='--outLesion %s', + hash_files=False, + ), + outMembership=dict( + argstr='--outMembership %s', + hash_files=False, + ), + outSulcal=dict( + argstr='--outSulcal %s', + hash_files=False, + ), + outWM=dict( + argstr='--outWM %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = MedicAlgorithmLesionToads.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MedicAlgorithmLesionToads_outputs(): + output_map = dict( + outCortical=dict(), + outFilled=dict(), + outHard=dict(), + outHard2=dict(), + outInhomogeneity=dict(), + outLesion=dict(), + outMembership=dict(), + outSulcal=dict(), + outWM=dict(), + ) + outputs = MedicAlgorithmLesionToads.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py new file mode 100644 index 0000000000..7c8c7248ad --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmMipavReorient.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import MedicAlgorithmMipavReorient + + +def test_MedicAlgorithmMipavReorient_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inInterpolation=dict(argstr='--inInterpolation %s', ), + inNew=dict(argstr='--inNew %s', ), + inResolution=dict(argstr='--inResolution %s', ), + inSource=dict( + argstr='--inSource %s', + sep=';', + ), + inTemplate=dict(argstr='--inTemplate %s', ), + inUser=dict(argstr='--inUser %s', ), + inUser2=dict(argstr='--inUser2 %s', ), + inUser3=dict(argstr='--inUser3 %s', ), + inUser4=dict(argstr='--inUser4 %s', ), + null=dict(argstr='--null %s', ), + outReoriented=dict( + argstr='--outReoriented %s', + sep=';', + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = MedicAlgorithmMipavReorient.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MedicAlgorithmMipavReorient_outputs(): + output_map = dict() + outputs = MedicAlgorithmMipavReorient.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py new file mode 100644 index 0000000000..9d5a148a24 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmN3.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import MedicAlgorithmN3 + + +def test_MedicAlgorithmN3_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inAutomatic=dict(argstr='--inAutomatic %s', ), + inEnd=dict(argstr='--inEnd %f', ), + inField=dict(argstr='--inField %f', ), + inInput=dict(argstr='--inInput %s', ), + inKernel=dict(argstr='--inKernel %f', ), + inMaximum=dict(argstr='--inMaximum %d', ), + inSignal=dict(argstr='--inSignal %f', ), + inSubsample=dict(argstr='--inSubsample %f', ), + inWeiner=dict(argstr='--inWeiner %f', ), + null=dict(argstr='--null %s', ), + outInhomogeneity=dict( + argstr='--outInhomogeneity %s', + hash_files=False, + ), + outInhomogeneity2=dict( + argstr='--outInhomogeneity2 %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = MedicAlgorithmN3.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MedicAlgorithmN3_outputs(): + output_map = dict( + outInhomogeneity=dict(), + outInhomogeneity2=dict(), + ) + outputs = MedicAlgorithmN3.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py new file mode 100644 index 0000000000..b2d247e9dc --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmSPECTRE2010.py @@ -0,0 +1,112 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import MedicAlgorithmSPECTRE2010 + + +def test_MedicAlgorithmSPECTRE2010_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inApply=dict(argstr='--inApply %s', ), + inAtlas=dict(argstr='--inAtlas %s', ), + inBackground=dict(argstr='--inBackground %f', ), + inCoarse=dict(argstr='--inCoarse %f', ), + inCost=dict(argstr='--inCost %s', ), + inDegrees=dict(argstr='--inDegrees %s', ), + inFind=dict(argstr='--inFind %s', ), + inFine=dict(argstr='--inFine %f', ), + inImage=dict(argstr='--inImage %s', ), + inInhomogeneity=dict(argstr='--inInhomogeneity %s', ), + inInitial=dict(argstr='--inInitial %d', ), + inInitial2=dict(argstr='--inInitial2 %f', ), + inInput=dict(argstr='--inInput %s', ), + inMMC=dict(argstr='--inMMC %d', ), + inMMC2=dict(argstr='--inMMC2 %d', ), + inMaximum=dict(argstr='--inMaximum %f', ), + inMinimum=dict(argstr='--inMinimum %f', ), + inMinimum2=dict(argstr='--inMinimum2 %f', ), + inMultiple=dict(argstr='--inMultiple %d', ), + inMultithreading=dict(argstr='--inMultithreading %s', ), + inNumber=dict(argstr='--inNumber %d', ), + inNumber2=dict(argstr='--inNumber2 %d', ), + inOutput=dict(argstr='--inOutput %s', ), + inOutput2=dict(argstr='--inOutput2 %s', ), + inOutput3=dict(argstr='--inOutput3 %s', ), + inOutput4=dict(argstr='--inOutput4 %s', ), + inOutput5=dict(argstr='--inOutput5 %s', ), + inRegistration=dict(argstr='--inRegistration %s', ), + inResample=dict(argstr='--inResample %s', ), + inRun=dict(argstr='--inRun %s', ), + inSkip=dict(argstr='--inSkip %s', ), + inSmoothing=dict(argstr='--inSmoothing %f', ), + inSubsample=dict(argstr='--inSubsample %s', ), + inUse=dict(argstr='--inUse %s', ), + null=dict(argstr='--null %s', ), + outFANTASM=dict( + argstr='--outFANTASM %s', + hash_files=False, + ), + outMask=dict( + argstr='--outMask %s', + hash_files=False, + ), + outMidsagittal=dict( + argstr='--outMidsagittal %s', + hash_files=False, + ), + outOriginal=dict( + argstr='--outOriginal %s', + hash_files=False, + ), + outPrior=dict( + argstr='--outPrior %s', + hash_files=False, + ), + outSegmentation=dict( + argstr='--outSegmentation %s', + hash_files=False, + ), + outSplitHalves=dict( + argstr='--outSplitHalves %s', + hash_files=False, + ), + outStripped=dict( + argstr='--outStripped %s', + hash_files=False, + ), + outd0=dict( + argstr='--outd0 %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = MedicAlgorithmSPECTRE2010.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MedicAlgorithmSPECTRE2010_outputs(): + output_map = dict( + outFANTASM=dict(), + outMask=dict(), + outMidsagittal=dict(), + outOriginal=dict(), + outPrior=dict(), + outSegmentation=dict(), + outSplitHalves=dict(), + outStripped=dict(), + outd0=dict(), + ) + outputs = MedicAlgorithmSPECTRE2010.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py new file mode 100644 index 0000000000..b498025401 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_MedicAlgorithmThresholdToBinaryMask.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import MedicAlgorithmThresholdToBinaryMask + + +def test_MedicAlgorithmThresholdToBinaryMask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inLabel=dict( + argstr='--inLabel %s', + sep=';', + ), + inMaximum=dict(argstr='--inMaximum %f', ), + inMinimum=dict(argstr='--inMinimum %f', ), + inUse=dict(argstr='--inUse %s', ), + null=dict(argstr='--null %s', ), + outBinary=dict( + argstr='--outBinary %s', + sep=';', + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = MedicAlgorithmThresholdToBinaryMask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MedicAlgorithmThresholdToBinaryMask_outputs(): + output_map = dict() + outputs = MedicAlgorithmThresholdToBinaryMask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mipav/tests/test_auto_RandomVol.py b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py new file mode 100644 index 0000000000..d3d92142c4 --- /dev/null +++ b/nipype/interfaces/mipav/tests/test_auto_RandomVol.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..developer import RandomVol + + +def test_RandomVol_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inField=dict(argstr='--inField %s', ), + inLambda=dict(argstr='--inLambda %f', ), + inMaximum=dict(argstr='--inMaximum %d', ), + inMinimum=dict(argstr='--inMinimum %d', ), + inSize=dict(argstr='--inSize %d', ), + inSize2=dict(argstr='--inSize2 %d', ), + inSize3=dict(argstr='--inSize3 %d', ), + inSize4=dict(argstr='--inSize4 %d', ), + inStandard=dict(argstr='--inStandard %d', ), + null=dict(argstr='--null %s', ), + outRand1=dict( + argstr='--outRand1 %s', + hash_files=False, + ), + xDefaultMem=dict(argstr='-xDefaultMem %d', ), + xMaxProcess=dict( + argstr='-xMaxProcess %d', + usedefault=True, + ), + xPrefExt=dict(argstr='--xPrefExt %s', ), + ) + inputs = RandomVol.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RandomVol_outputs(): + output_map = dict(outRand1=dict(), ) + outputs = RandomVol.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mixins/__init__.py b/nipype/interfaces/mixins/__init__.py new file mode 100644 index 0000000000..587d3a22a8 --- /dev/null +++ b/nipype/interfaces/mixins/__init__.py @@ -0,0 +1,2 @@ +from .reporting import ( + ReportCapableInterface, ReportCapableInputSpec, ReportCapableOutputSpec) diff --git a/nipype/interfaces/mixins/reporting.py b/nipype/interfaces/mixins/reporting.py new file mode 100644 index 0000000000..3f4d1b1317 --- /dev/null +++ b/nipype/interfaces/mixins/reporting.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" class mixin and utilities for enabling reports for nipype interfaces """ +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +from abc import abstractmethod + +from ... import logging +from ..base import ( + File, BaseInterface, BaseInterfaceInputSpec, TraitedSpec) + +iflogger = logging.getLogger('nipype.interface') + + +class ReportCapableInputSpec(BaseInterfaceInputSpec): + out_report = File('report', usedefault=True, hash_files=False, + desc='filename for the visual report') + + +class ReportCapableOutputSpec(TraitedSpec): + out_report = File(desc='filename for the visual report') + + +class ReportCapableInterface(BaseInterface): + """Mixin to enable reporting for Nipype interfaces""" + _out_report = None + + def __init__(self, generate_report=False, **kwargs): + super(ReportCapableInterface, self).__init__(**kwargs) + self.generate_report = generate_report + + def _post_run_hook(self, runtime): + runtime = super(ReportCapableInterface, self)._post_run_hook(runtime) + + # leave early if there's nothing to do + if not self.generate_report: + return runtime + + self._out_report = self.inputs.out_report + if not os.path.isabs(self._out_report): + self._out_report = os.path.abspath(os.path.join(runtime.cwd, + self._out_report)) + + self._generate_report() + + return runtime + + def _list_outputs(self): + try: + outputs = super(ReportCapableInterface, self)._list_outputs() + except NotImplementedError: + outputs = {} + if self._out_report is not None: + outputs['out_report'] = self._out_report + return outputs + + @abstractmethod + def _generate_report(self): + """ + Saves report to file identified by _out_report instance variable + """ + raise NotImplementedError diff --git a/nipype/interfaces/mixins/tests/__init__.py b/nipype/interfaces/mixins/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/mixins/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/mixins/tests/test_auto_ReportCapableInterface.py b/nipype/interfaces/mixins/tests/test_auto_ReportCapableInterface.py new file mode 100644 index 0000000000..8985d7069d --- /dev/null +++ b/nipype/interfaces/mixins/tests/test_auto_ReportCapableInterface.py @@ -0,0 +1,12 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..reporting import ReportCapableInterface + + +def test_ReportCapableInterface_inputs(): + input_map = dict() + inputs = ReportCapableInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mne/__init__.py b/nipype/interfaces/mne/__init__.py new file mode 100644 index 0000000000..8bf3db28ed --- /dev/null +++ b/nipype/interfaces/mne/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +from .base import WatershedBEM diff --git a/nipype/interfaces/mne/base.py b/nipype/interfaces/mne/base.py new file mode 100644 index 0000000000..7f53071372 --- /dev/null +++ b/nipype/interfaces/mne/base.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, bytes + +import os.path as op +import glob + +from ... import logging +from ...utils.filemanip import simplify_list +from ..base import (traits, File, Directory, TraitedSpec, OutputMultiPath) +from ..freesurfer.base import FSCommand, FSTraitedSpec + +iflogger = logging.getLogger('nipype.interface') + + +class WatershedBEMInputSpec(FSTraitedSpec): + subject_id = traits.Str( + argstr='--subject %s', + mandatory=True, + desc='Subject ID (must have a complete Freesurfer directory)') + subjects_dir = Directory( + exists=True, + mandatory=True, + usedefault=True, + desc='Path to Freesurfer subjects directory') + volume = traits.Enum( + 'T1', + 'aparc+aseg', + 'aseg', + 'brain', + 'orig', + 'brainmask', + 'ribbon', + argstr='--volume %s', + usedefault=True, + desc='The volume from the "mri" directory to use (defaults to T1)') + overwrite = traits.Bool( + True, + usedefault=True, + argstr='--overwrite', + desc='Overwrites the existing files') + atlas_mode = traits.Bool( + argstr='--atlas', + desc='Use atlas mode for registration (default: no rigid alignment)') + + +class WatershedBEMOutputSpec(TraitedSpec): + mesh_files = OutputMultiPath( + File(exists=True), + desc=('Paths to the output meshes (brain, inner ' + 'skull, outer skull, outer skin)')) + brain_surface = File( + exists=True, + loc='bem/watershed', + desc='Brain surface (in Freesurfer format)') + inner_skull_surface = File( + exists=True, + loc='bem/watershed', + desc='Inner skull surface (in Freesurfer format)') + outer_skull_surface = File( + exists=True, + loc='bem/watershed', + desc='Outer skull surface (in Freesurfer format)') + outer_skin_surface = File( + exists=True, + loc='bem/watershed', + desc='Outer skin surface (in Freesurfer format)') + fif_file = File( + exists=True, + loc='bem', + altkey='fif', + desc='"fif" format file for EEG processing in MNE') + cor_files = OutputMultiPath( + File(exists=True), + loc='bem/watershed/ws', + altkey='COR', + desc='"COR" format files') + + +class WatershedBEM(FSCommand): + """Uses mne_watershed_bem to get information from dicom directories + + Examples + -------- + + >>> from nipype.interfaces.mne import WatershedBEM + >>> bem = WatershedBEM() + >>> bem.inputs.subject_id = 'subj1' + >>> bem.inputs.subjects_dir = '.' + >>> bem.cmdline + 'mne_watershed_bem --overwrite --subject subj1 --volume T1' + >>> bem.run() # doctest: +SKIP + + """ + + _cmd = 'mne_watershed_bem' + input_spec = WatershedBEMInputSpec + output_spec = WatershedBEMOutputSpec + _additional_metadata = ['loc', 'altkey'] + + def _get_files(self, path, key, dirval, altkey=None): + globsuffix = '*' + globprefix = '*' + keydir = op.join(path, dirval) + if altkey: + key = altkey + globpattern = op.join(keydir, ''.join((globprefix, key, globsuffix))) + return glob.glob(globpattern) + + def _list_outputs(self): + outputs = self.output_spec().get() + subjects_dir = self.inputs.subjects_dir + subject_path = op.join(subjects_dir, self.inputs.subject_id) + output_traits = self._outputs() + mesh_paths = [] + for k in list(outputs.keys()): + if k != 'mesh_files': + val = self._get_files(subject_path, k, + output_traits.traits()[k].loc, + output_traits.traits()[k].altkey) + if val: + value_list = simplify_list(val) + if isinstance(value_list, list): + out_files = [] + for value in value_list: + out_files.append(op.abspath(value)) + elif isinstance(value_list, (str, bytes)): + out_files = op.abspath(value_list) + else: + raise TypeError + outputs[k] = out_files + if not k.rfind('surface') == -1: + mesh_paths.append(out_files) + outputs['mesh_files'] = mesh_paths + return outputs diff --git a/nipype/interfaces/mne/tests/__init__.py b/nipype/interfaces/mne/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/mne/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py new file mode 100644 index 0000000000..4ced8de75a --- /dev/null +++ b/nipype/interfaces/mne/tests/test_auto_WatershedBEM.py @@ -0,0 +1,56 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import WatershedBEM + + +def test_WatershedBEM_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + atlas_mode=dict(argstr='--atlas', ), + environ=dict( + nohash=True, + usedefault=True, + ), + overwrite=dict( + argstr='--overwrite', + usedefault=True, + ), + subject_id=dict( + argstr='--subject %s', + mandatory=True, + ), + subjects_dir=dict( + mandatory=True, + usedefault=True, + ), + volume=dict( + argstr='--volume %s', + usedefault=True, + ), + ) + inputs = WatershedBEM.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_WatershedBEM_outputs(): + output_map = dict( + brain_surface=dict(loc='bem/watershed', ), + cor_files=dict( + altkey='COR', + loc='bem/watershed/ws', + ), + fif_file=dict( + altkey='fif', + loc='bem', + ), + inner_skull_surface=dict(loc='bem/watershed', ), + mesh_files=dict(), + outer_skin_surface=dict(loc='bem/watershed', ), + outer_skull_surface=dict(loc='bem/watershed', ), + ) + outputs = WatershedBEM.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/__init__.py b/nipype/interfaces/mrtrix/__init__.py new file mode 100644 index 0000000000..ea066d4cd8 --- /dev/null +++ b/nipype/interfaces/mrtrix/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from .tracking import (Tracks2Prob, FilterTracks, StreamlineTrack, + DiffusionTensorStreamlineTrack, + SphericallyDeconvolutedStreamlineTrack, + ProbabilisticSphericallyDeconvolutedStreamlineTrack) +from .tensors import (FSL2MRTrix, ConstrainedSphericalDeconvolution, + DWI2SphericalHarmonicsImage, EstimateResponseForSH, + GenerateDirections, FindShPeaks, Directions2Amplitude) +from .preprocess import (MRConvert, MRMultiply, MRTrixViewer, MRTrixInfo, + GenerateWhiteMatterMask, DWI2Tensor, + Tensor2ApparentDiffusion, Tensor2FractionalAnisotropy, + Tensor2Vector, MedianFilter3D, Erode, Threshold) +from .convert import MRTrix2TrackVis diff --git a/nipype/interfaces/mrtrix/convert.py b/nipype/interfaces/mrtrix/convert.py new file mode 100644 index 0000000000..a3a280c895 --- /dev/null +++ b/nipype/interfaces/mrtrix/convert.py @@ -0,0 +1,265 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from io import open + +import os.path as op +import nibabel as nb +import nibabel.trackvis as trk +import numpy as np +from nibabel.trackvis import HeaderError +from nibabel.volumeutils import native_code +from nibabel.orientations import aff2axcodes + +from ... import logging +from ...utils.filemanip import split_filename +from ...workflows.misc.utils import get_data_dims, get_vox_dims +from ..base import TraitedSpec, File, isdefined +from ..dipy.base import DipyBaseInterface, HAVE_DIPY as have_dipy + +iflogger = logging.getLogger('nipype.interface') + + +def transform_to_affine(streams, header, affine): + from dipy.tracking.utils import move_streamlines + rotation, scale = np.linalg.qr(affine) + streams = move_streamlines(streams, rotation) + scale[0:3, 0:3] = np.dot(scale[0:3, 0:3], + np.diag(1. / header['voxel_size'])) + scale[0:3, 3] = abs(scale[0:3, 3]) + streams = move_streamlines(streams, scale) + return streams + + +def read_mrtrix_tracks(in_file, as_generator=True): + header = read_mrtrix_header(in_file) + streamlines = read_mrtrix_streamlines(in_file, header, as_generator) + return header, streamlines + + +def read_mrtrix_header(in_file): + fileobj = open(in_file, 'rb') + header = {} + iflogger.info('Reading header data...') + for line in fileobj: + line = line.decode() + if line == 'END\n': + iflogger.info('Reached the end of the header!') + break + elif ': ' in line: + line = line.replace('\n', '') + line = line.replace("'", "") + key = line.split(': ')[0] + value = line.split(': ')[1] + header[key] = value + iflogger.info('...adding "%s" to header for key "%s"', value, key) + fileobj.close() + header['count'] = int(header['count'].replace('\n', '')) + header['offset'] = int(header['file'].replace('.', '')) + return header + + +def read_mrtrix_streamlines(in_file, header, as_generator=True): + offset = header['offset'] + stream_count = header['count'] + fileobj = open(in_file, 'rb') + fileobj.seek(offset) + endianness = native_code + f4dt = np.dtype(endianness + 'f4') + pt_cols = 3 + bytesize = pt_cols * 4 + + def points_per_track(offset): + track_points = [] + iflogger.info('Identifying the number of points per tract...') + all_str = fileobj.read() + num_triplets = int(len(all_str) / bytesize) + pts = np.ndarray( + shape=(num_triplets, pt_cols), dtype='f4', buffer=all_str) + nonfinite_list = np.where(np.invert(np.isfinite(pts[:, 2]))) + nonfinite_list = list(nonfinite_list[0])[ + 0:-1] # Converts numpy array to list, removes the last value + for idx, value in enumerate(nonfinite_list): + if idx == 0: + track_points.append(nonfinite_list[idx]) + else: + track_points.append( + nonfinite_list[idx] - nonfinite_list[idx - 1] - 1) + return track_points, nonfinite_list + + def track_gen(track_points): + n_streams = 0 + iflogger.info('Reading tracks...') + while True: + try: + n_pts = track_points[n_streams] + except IndexError: + break + pts_str = fileobj.read(n_pts * bytesize) + nan_str = fileobj.read(bytesize) + if len(pts_str) < (n_pts * bytesize): + if not n_streams == stream_count: + raise HeaderError('Expecting %s points, found only %s' % + (stream_count, n_streams)) + iflogger.error('Expecting %s points, found only %s', + stream_count, n_streams) + break + pts = np.ndarray( + shape=(n_pts, pt_cols), dtype=f4dt, buffer=pts_str) + nan_pt = np.ndarray(shape=(1, pt_cols), dtype=f4dt, buffer=nan_str) + if np.isfinite(nan_pt[0][0]): + raise ValueError + break + xyz = pts[:, :3] + yield xyz + n_streams += 1 + if n_streams == stream_count: + iflogger.info('100%% : %i tracks read', n_streams) + raise StopIteration + try: + if n_streams % int(stream_count / 100) == 0: + percent = int(float(n_streams) / float(stream_count) * 100) + iflogger.info('%i%% : %i tracks read', percent, n_streams) + except ZeroDivisionError: + iflogger.info('%i stream read out of %i', n_streams, + stream_count) + + track_points, nonfinite_list = points_per_track(offset) + fileobj.seek(offset) + streamlines = track_gen(track_points) + if not as_generator: + streamlines = list(streamlines) + return streamlines + + +class MRTrix2TrackVisInputSpec(TraitedSpec): + in_file = File( + exists=True, + mandatory=True, + desc='The input file for the tracks in MRTrix (.tck) format') + image_file = File( + exists=True, desc='The image the tracks were generated from') + matrix_file = File( + exists=True, + desc= + 'A transformation matrix to apply to the tracts after they have been generated (from FLIRT - affine transformation from image_file to registration_image_file)' + ) + registration_image_file = File( + exists=True, + desc='The final image the tracks should be registered to.') + out_filename = File( + 'converted.trk', + genfile=True, + usedefault=True, + desc='The output filename for the tracks in TrackVis (.trk) format') + + +class MRTrix2TrackVisOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class MRTrix2TrackVis(DipyBaseInterface): + """ + Converts MRtrix (.tck) tract files into TrackVis (.trk) format + using functions from dipy + Example + ------- + >>> import nipype.interfaces.mrtrix as mrt + >>> tck2trk = mrt.MRTrix2TrackVis() + >>> tck2trk.inputs.in_file = 'dwi_CSD_tracked.tck' + >>> tck2trk.inputs.image_file = 'diffusion.nii' + >>> tck2trk.run() # doctest: +SKIP + """ + input_spec = MRTrix2TrackVisInputSpec + output_spec = MRTrix2TrackVisOutputSpec + + def _run_interface(self, runtime): + from dipy.tracking.utils import move_streamlines, \ + affine_from_fsl_mat_file + dx, dy, dz = get_data_dims(self.inputs.image_file) + vx, vy, vz = get_vox_dims(self.inputs.image_file) + image_file = nb.load(self.inputs.image_file) + affine = image_file.affine + out_filename = op.abspath(self.inputs.out_filename) + + # Reads MRTrix tracks + header, streamlines = read_mrtrix_tracks( + self.inputs.in_file, as_generator=True) + iflogger.info('MRTrix Header:') + iflogger.info(header) + # Writes to Trackvis + trk_header = nb.trackvis.empty_header() + trk_header['dim'] = [dx, dy, dz] + trk_header['voxel_size'] = [vx, vy, vz] + trk_header['n_count'] = header['count'] + + if isdefined(self.inputs.matrix_file) and isdefined( + self.inputs.registration_image_file): + iflogger.info('Applying transformation from matrix file %s', + self.inputs.matrix_file) + xfm = np.genfromtxt(self.inputs.matrix_file) + iflogger.info(xfm) + registration_image_file = nb.load( + self.inputs.registration_image_file) + reg_affine = registration_image_file.affine + r_dx, r_dy, r_dz = get_data_dims( + self.inputs.registration_image_file) + r_vx, r_vy, r_vz = get_vox_dims( + self.inputs.registration_image_file) + iflogger.info('Using affine from registration image file %s', + self.inputs.registration_image_file) + iflogger.info(reg_affine) + trk_header['vox_to_ras'] = reg_affine + trk_header['dim'] = [r_dx, r_dy, r_dz] + trk_header['voxel_size'] = [r_vx, r_vy, r_vz] + + affine = np.dot(affine, np.diag(1. / np.array([vx, vy, vz, 1]))) + transformed_streamlines = transform_to_affine( + streamlines, trk_header, affine) + + aff = affine_from_fsl_mat_file(xfm, [vx, vy, vz], + [r_vx, r_vy, r_vz]) + iflogger.info(aff) + + axcode = aff2axcodes(reg_affine) + trk_header['voxel_order'] = axcode[0] + axcode[1] + axcode[2] + + final_streamlines = move_streamlines(transformed_streamlines, aff) + trk_tracks = ((ii, None, None) for ii in final_streamlines) + trk.write(out_filename, trk_tracks, trk_header) + iflogger.info('Saving transformed Trackvis file as %s', + out_filename) + iflogger.info('New TrackVis Header:') + iflogger.info(trk_header) + else: + iflogger.info( + 'Applying transformation from scanner coordinates to %s', + self.inputs.image_file) + axcode = aff2axcodes(affine) + trk_header['voxel_order'] = axcode[0] + axcode[1] + axcode[2] + trk_header['vox_to_ras'] = affine + transformed_streamlines = transform_to_affine( + streamlines, trk_header, affine) + trk_tracks = ((ii, None, None) for ii in transformed_streamlines) + trk.write(out_filename, trk_tracks, trk_header) + iflogger.info('Saving Trackvis file as %s', out_filename) + iflogger.info('TrackVis Header:') + iflogger.info(trk_header) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = op.abspath(self.inputs.out_filename) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '.trk' diff --git a/nipype/interfaces/mrtrix/defhdr.mat b/nipype/interfaces/mrtrix/defhdr.mat new file mode 100644 index 0000000000000000000000000000000000000000..e27ddf576f24fa7d876bb603c8ed276945dfe30d GIT binary patch literal 533 zcmeZu4DoSvQZUssQ1EpO(M`+DN!3vZ$Vn_o%P-2cQgHY2i*PhE(NS#k5il@%`tma{F!VAqFjUNWJ8iGuVFQ7N zTwyJipsS6Y%Nf*;h4mOPCT(4FEK7H5SxWn*vPTCi816C1HBOv3k-ISZ`py6j;T7t0 z*X%w${dcGEa$k;{)(T5P*DUCnvgvj1t`M&y%k352@0D|lub#5|j+U*$#wA%XAEz1_ z23tPzJDVzF7{)v0n8exG_@0*1sU9z%NA*0uojEUZvgd4V$;5m1tAhfh&+~3yH0gww z`dgP}rGF)6dr7+*O`Ml>_rRTFNeYdDLC=-K*|J($dzvg)CVpY*dUpBNi8W?M>)r0` zJ@G@t@ze<(AePZ&NV{uZ9#9*qu)q1=gcAl^U+8Hp;jMBD`)%~`UqSq~KT#gXZ<~kj z{mgw{p1p?qVL8*h#;*o%%pOT3_wI0=Kigv0v5TtT{~fLQ=E%SJr_IFw%C~o#PG0n1 zZ{_^xN2R9Qi`Z_|-plwaBmeT(AM1PDr|#GNSlwPb{qWz*jITG9-|dKg!hN3cjV=Fq wMzI@TW`EtZbq~+m&+7N5>wd3Z_v3s`-NN2aiRWvDr!)NIa{t8sS|Yn10FWN#ZU6uP literal 0 HcmV?d00001 diff --git a/nipype/interfaces/mrtrix/preprocess.py b/nipype/interfaces/mrtrix/preprocess.py new file mode 100644 index 0000000000..5fc67177a1 --- /dev/null +++ b/nipype/interfaces/mrtrix/preprocess.py @@ -0,0 +1,922 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os.path as op + +from ...utils.filemanip import split_filename +from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, + File, InputMultiPath, isdefined) + + +class MRConvertInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='voxel-order data filename') + out_filename = File( + genfile=True, argstr='%s', position=-1, desc='Output filename') + extract_at_axis = traits.Enum( + 1, + 2, + 3, + argstr='-coord %s', + position=1, + desc= + '"Extract data only at the coordinates specified. This option specifies the Axis. Must be used in conjunction with extract_at_coordinate.' + ) + extract_at_coordinate = traits.List( + traits.Float, + argstr='%s', + sep=',', + position=2, + minlen=1, + maxlen=3, + desc= + '"Extract data only at the coordinates specified. This option specifies the coordinates. Must be used in conjunction with extract_at_axis. Three comma-separated numbers giving the size of each voxel in mm.' + ) + voxel_dims = traits.List( + traits.Float, + argstr='-vox %s', + sep=',', + position=3, + minlen=3, + maxlen=3, + desc= + 'Three comma-separated numbers giving the size of each voxel in mm.') + output_datatype = traits.Enum( + "nii", + "float", + "char", + "short", + "int", + "long", + "double", + argstr='-output %s', + position=2, + desc= + '"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"' + ) # , usedefault=True) + extension = traits.Enum( + "mif", + "nii", + "float", + "char", + "short", + "int", + "long", + "double", + position=2, + desc= + '"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"', + usedefault=True) + layout = traits.Enum( + "nii", + "float", + "char", + "short", + "int", + "long", + "double", + argstr='-output %s', + position=2, + desc= + 'specify the layout of the data in memory. The actual layout produced will depend on whether the output image format can support it.' + ) + resample = traits.Float( + argstr='-scale %d', + position=3, + units='mm', + desc='Apply scaling to the intensity values.') + offset_bias = traits.Float( + argstr='-scale %d', + position=3, + units='mm', + desc='Apply offset to the intensity values.') + replace_NaN_with_zero = traits.Bool( + argstr='-zero', position=3, desc="Replace all NaN values with zero.") + prs = traits.Bool( + argstr='-prs', + position=3, + desc= + "Assume that the DW gradients are specified in the PRS frame (Siemens DICOM only)." + ) + + +class MRConvertOutputSpec(TraitedSpec): + converted = File(exists=True, desc='path/name of 4D volume in voxel order') + + +class MRConvert(CommandLine): + """ + Perform conversion between different file types and optionally extract a subset of the input image. + + If used correctly, this program can be a very useful workhorse. + In addition to converting images between different formats, it can + be used to extract specific studies from a data set, extract a specific + region of interest, flip the images, or to scale the intensity of the images. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> mrconvert = mrt.MRConvert() + >>> mrconvert.inputs.in_file = 'dwi_FA.mif' + >>> mrconvert.inputs.out_filename = 'dwi_FA.nii' + >>> mrconvert.run() # doctest: +SKIP + """ + + _cmd = 'mrconvert' + input_spec = MRConvertInputSpec + output_spec = MRConvertOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['converted'] = self.inputs.out_filename + if not isdefined(outputs['converted']): + outputs['converted'] = op.abspath(self._gen_outfilename()) + else: + outputs['converted'] = op.abspath(outputs['converted']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + if isdefined(self.inputs.out_filename): + outname = self.inputs.out_filename + else: + outname = name + '_mrconvert.' + self.inputs.extension + return outname + + +class DWI2TensorInputSpec(CommandLineInputSpec): + in_file = InputMultiPath( + File(exists=True), + argstr='%s', + mandatory=True, + position=-2, + desc='Diffusion-weighted images') + out_filename = File( + name_template="%s_tensor.mif", + name_source="in_file", + output_name="tensor", + argstr='%s', + desc='Output tensor filename', + position=-1) + encoding_file = File( + argstr='-grad %s', + position=2, + desc=('Encoding file supplied as a 4xN text file with ' + 'each line is in the format [ X Y Z b ], where ' + '[ X Y Z ] describe the direction of the applied ' + 'gradient, and b gives the b-value in units ' + '(1000 s/mm^2). See FSL2MRTrix()')) + ignore_slice_by_volume = traits.List( + traits.Int, + argstr='-ignoreslices %s', + sep=' ', + position=2, + minlen=2, + maxlen=2, + desc=('Requires two values (i.e. [34 ' + '1] for [Slice Volume] Ignores ' + 'the image slices specified ' + 'when computing the tensor. ' + 'Slice here means the z ' + 'coordinate of the slice to be ' + 'ignored.')) + ignore_volumes = traits.List( + traits.Int, + argstr='-ignorevolumes %s', + sep=' ', + position=2, + minlen=1, + desc=('Requires two values (i.e. [2 5 6] for ' + '[Volumes] Ignores the image volumes ' + 'specified when computing the tensor.')) + quiet = traits.Bool( + argstr='-quiet', + position=1, + desc=("Do not display information messages or progress " + "status.")) + debug = traits.Bool( + argstr='-debug', position=1, desc="Display debugging messages.") + + +class DWI2TensorOutputSpec(TraitedSpec): + tensor = File( + exists=True, desc='path/name of output diffusion tensor image') + + +class DWI2Tensor(CommandLine): + """ + Converts diffusion-weighted images to tensor images. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> dwi2tensor = mrt.DWI2Tensor() + >>> dwi2tensor.inputs.in_file = 'dwi.mif' + >>> dwi2tensor.inputs.encoding_file = 'encoding.txt' + >>> dwi2tensor.cmdline + 'dwi2tensor -grad encoding.txt dwi.mif dwi_tensor.mif' + >>> dwi2tensor.run() # doctest: +SKIP + """ + + _cmd = 'dwi2tensor' + input_spec = DWI2TensorInputSpec + output_spec = DWI2TensorOutputSpec + + +class Tensor2VectorInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='Diffusion tensor image') + out_filename = File( + genfile=True, argstr='%s', position=-1, desc='Output vector filename') + quiet = traits.Bool( + argstr='-quiet', + position=1, + desc="Do not display information messages or progress status.") + debug = traits.Bool( + argstr='-debug', position=1, desc="Display debugging messages.") + + +class Tensor2VectorOutputSpec(TraitedSpec): + vector = File( + exists=True, + desc= + 'the output image of the major eigenvectors of the diffusion tensor image.' + ) + + +class Tensor2Vector(CommandLine): + """ + Generates a map of the major eigenvectors of the tensors in each voxel. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> tensor2vector = mrt.Tensor2Vector() + >>> tensor2vector.inputs.in_file = 'dwi_tensor.mif' + >>> tensor2vector.run() # doctest: +SKIP + """ + + _cmd = 'tensor2vector' + input_spec = Tensor2VectorInputSpec + output_spec = Tensor2VectorOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['vector'] = self.inputs.out_filename + if not isdefined(outputs['vector']): + outputs['vector'] = op.abspath(self._gen_outfilename()) + else: + outputs['vector'] = op.abspath(outputs['vector']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_vector.mif' + + +class Tensor2FractionalAnisotropyInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='Diffusion tensor image') + out_filename = File( + genfile=True, + argstr='%s', + position=-1, + desc='Output Fractional Anisotropy filename') + quiet = traits.Bool( + argstr='-quiet', + position=1, + desc="Do not display information messages or progress status.") + debug = traits.Bool( + argstr='-debug', position=1, desc="Display debugging messages.") + + +class Tensor2FractionalAnisotropyOutputSpec(TraitedSpec): + FA = File( + exists=True, + desc= + 'the output image of the major eigenvectors of the diffusion tensor image.' + ) + + +class Tensor2FractionalAnisotropy(CommandLine): + """ + Generates a map of the fractional anisotropy in each voxel. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> tensor2FA = mrt.Tensor2FractionalAnisotropy() + >>> tensor2FA.inputs.in_file = 'dwi_tensor.mif' + >>> tensor2FA.run() # doctest: +SKIP + """ + + _cmd = 'tensor2FA' + input_spec = Tensor2FractionalAnisotropyInputSpec + output_spec = Tensor2FractionalAnisotropyOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['FA'] = self.inputs.out_filename + if not isdefined(outputs['FA']): + outputs['FA'] = op.abspath(self._gen_outfilename()) + else: + outputs['FA'] = op.abspath(outputs['FA']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_FA.mif' + + +class Tensor2ApparentDiffusionInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='Diffusion tensor image') + out_filename = File( + genfile=True, + argstr='%s', + position=-1, + desc='Output Fractional Anisotropy filename') + quiet = traits.Bool( + argstr='-quiet', + position=1, + desc="Do not display information messages or progress status.") + debug = traits.Bool( + argstr='-debug', position=1, desc="Display debugging messages.") + + +class Tensor2ApparentDiffusionOutputSpec(TraitedSpec): + ADC = File( + exists=True, + desc= + 'the output image of the major eigenvectors of the diffusion tensor image.' + ) + + +class Tensor2ApparentDiffusion(CommandLine): + """ + Generates a map of the apparent diffusion coefficient (ADC) in each voxel + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> tensor2ADC = mrt.Tensor2ApparentDiffusion() + >>> tensor2ADC.inputs.in_file = 'dwi_tensor.mif' + >>> tensor2ADC.run() # doctest: +SKIP + """ + + _cmd = 'tensor2ADC' + input_spec = Tensor2ApparentDiffusionInputSpec + output_spec = Tensor2ApparentDiffusionOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['ADC'] = self.inputs.out_filename + if not isdefined(outputs['ADC']): + outputs['ADC'] = op.abspath(self._gen_outfilename()) + else: + outputs['ADC'] = op.abspath(outputs['ADC']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_ADC.mif' + + +class MRMultiplyInputSpec(CommandLineInputSpec): + in_files = InputMultiPath( + File(exists=True), + argstr='%s', + mandatory=True, + position=-2, + desc='Input images to be multiplied') + out_filename = File( + genfile=True, argstr='%s', position=-1, desc='Output image filename') + quiet = traits.Bool( + argstr='-quiet', + position=1, + desc="Do not display information messages or progress status.") + debug = traits.Bool( + argstr='-debug', position=1, desc="Display debugging messages.") + + +class MRMultiplyOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output image of the multiplication') + + +class MRMultiply(CommandLine): + """ + Multiplies two images. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> MRmult = mrt.MRMultiply() + >>> MRmult.inputs.in_files = ['dwi.mif', 'dwi_WMProb.mif'] + >>> MRmult.run() # doctest: +SKIP + """ + + _cmd = 'mrmult' + input_spec = MRMultiplyInputSpec + output_spec = MRMultiplyOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_filename + if not isdefined(outputs['out_file']): + outputs['out_file'] = op.abspath(self._gen_outfilename()) + else: + outputs['out_file'] = op.abspath(outputs['out_file']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_files[0]) + return name + '_MRMult.mif' + + +class MRTrixViewerInputSpec(CommandLineInputSpec): + in_files = InputMultiPath( + File(exists=True), + argstr='%s', + mandatory=True, + position=-2, + desc='Input images to be viewed') + quiet = traits.Bool( + argstr='-quiet', + position=1, + desc="Do not display information messages or progress status.") + debug = traits.Bool( + argstr='-debug', position=1, desc="Display debugging messages.") + + +class MRTrixViewerOutputSpec(TraitedSpec): + pass + + +class MRTrixViewer(CommandLine): + """ + Loads the input images in the MRTrix Viewer. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> MRview = mrt.MRTrixViewer() + >>> MRview.inputs.in_files = 'dwi.mif' + >>> MRview.run() # doctest: +SKIP + """ + + _cmd = 'mrview' + input_spec = MRTrixViewerInputSpec + output_spec = MRTrixViewerOutputSpec + + def _list_outputs(self): + return + + +class MRTrixInfoInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='Input images to be read') + + +class MRTrixInfoOutputSpec(TraitedSpec): + pass + + +class MRTrixInfo(CommandLine): + """ + Prints out relevant header information found in the image specified. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> MRinfo = mrt.MRTrixInfo() + >>> MRinfo.inputs.in_file = 'dwi.mif' + >>> MRinfo.run() # doctest: +SKIP + """ + + _cmd = 'mrinfo' + input_spec = MRTrixInfoInputSpec + output_spec = MRTrixInfoOutputSpec + + def _list_outputs(self): + return + + +class GenerateWhiteMatterMaskInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-3, + desc='Diffusion-weighted images') + binary_mask = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='Binary brain mask') + out_WMProb_filename = File( + genfile=True, + argstr='%s', + position=-1, + desc='Output WM probability image filename') + encoding_file = File( + exists=True, + argstr='-grad %s', + mandatory=True, + position=1, + desc= + 'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix' + ) + noise_level_margin = traits.Float( + argstr='-margin %s', + desc= + 'Specify the width of the margin on either side of the image to be used to estimate the noise level (default = 10)' + ) + + +class GenerateWhiteMatterMaskOutputSpec(TraitedSpec): + WMprobabilitymap = File(exists=True, desc='WMprobabilitymap') + + +class GenerateWhiteMatterMask(CommandLine): + """ + Generates a white matter probability mask from the DW images. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> genWM = mrt.GenerateWhiteMatterMask() + >>> genWM.inputs.in_file = 'dwi.mif' + >>> genWM.inputs.encoding_file = 'encoding.txt' + >>> genWM.run() # doctest: +SKIP + """ + + _cmd = 'gen_WM_mask' + input_spec = GenerateWhiteMatterMaskInputSpec + output_spec = GenerateWhiteMatterMaskOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['WMprobabilitymap'] = op.abspath(self._gen_outfilename()) + return outputs + + def _gen_filename(self, name): + if name == 'out_WMProb_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_WMProb.mif' + + +class ErodeInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='Input mask image to be eroded') + out_filename = File( + genfile=True, argstr='%s', position=-1, desc='Output image filename') + number_of_passes = traits.Int( + argstr='-npass %s', desc='the number of passes (default: 1)') + dilate = traits.Bool( + argstr='-dilate', + position=1, + desc="Perform dilation rather than erosion") + quiet = traits.Bool( + argstr='-quiet', + position=1, + desc="Do not display information messages or progress status.") + debug = traits.Bool( + argstr='-debug', position=1, desc="Display debugging messages.") + + +class ErodeOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output image') + + +class Erode(CommandLine): + """ + Erode (or dilates) a mask (i.e. binary) image + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> erode = mrt.Erode() + >>> erode.inputs.in_file = 'mask.mif' + >>> erode.run() # doctest: +SKIP + """ + _cmd = 'erode' + input_spec = ErodeInputSpec + output_spec = ErodeOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_filename + if not isdefined(outputs['out_file']): + outputs['out_file'] = op.abspath(self._gen_outfilename()) + else: + outputs['out_file'] = op.abspath(outputs['out_file']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_erode.mif' + + +class ThresholdInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='The input image to be thresholded') + out_filename = File( + genfile=True, + argstr='%s', + position=-1, + desc='The output binary image mask.') + absolute_threshold_value = traits.Float( + argstr='-abs %s', + desc='Specify threshold value as absolute intensity.') + percentage_threshold_value = traits.Float( + argstr='-percent %s', + desc= + 'Specify threshold value as a percentage of the peak intensity in the input image.' + ) + invert = traits.Bool( + argstr='-invert', position=1, desc="Invert output binary mask") + replace_zeros_with_NaN = traits.Bool( + argstr='-nan', position=1, desc="Replace all zero values with NaN") + quiet = traits.Bool( + argstr='-quiet', + position=1, + desc="Do not display information messages or progress status.") + debug = traits.Bool( + argstr='-debug', position=1, desc="Display debugging messages.") + + +class ThresholdOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='The output binary image mask.') + + +class Threshold(CommandLine): + """ + Create bitwise image by thresholding image intensity. + + By default, the threshold level is determined using a histogram analysis + to cut out the background. Otherwise, the threshold intensity can be + specified using command line options. + Note that only the first study is used for thresholding. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> thresh = mrt.Threshold() + >>> thresh.inputs.in_file = 'wm_mask.mif' + >>> thresh.run() # doctest: +SKIP + """ + + _cmd = 'threshold' + input_spec = ThresholdInputSpec + output_spec = ThresholdOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_filename + if not isdefined(outputs['out_file']): + outputs['out_file'] = op.abspath(self._gen_outfilename()) + else: + outputs['out_file'] = op.abspath(outputs['out_file']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_thresh.mif' + + +class MedianFilter3DInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='Input images to be smoothed') + out_filename = File( + genfile=True, argstr='%s', position=-1, desc='Output image filename') + quiet = traits.Bool( + argstr='-quiet', + position=1, + desc="Do not display information messages or progress status.") + debug = traits.Bool( + argstr='-debug', position=1, desc="Display debugging messages.") + + +class MedianFilter3DOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output image') + + +class MedianFilter3D(CommandLine): + """ + Smooth images using a 3x3x3 median filter. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> median3d = mrt.MedianFilter3D() + >>> median3d.inputs.in_file = 'mask.mif' + >>> median3d.run() # doctest: +SKIP + """ + + _cmd = 'median3D' + input_spec = MedianFilter3DInputSpec + output_spec = MedianFilter3DOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_filename + if not isdefined(outputs['out_file']): + outputs['out_file'] = op.abspath(self._gen_outfilename()) + else: + outputs['out_file'] = op.abspath(outputs['out_file']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_median3D.mif' + + +class MRTransformInputSpec(CommandLineInputSpec): + in_files = InputMultiPath( + File(exists=True), + argstr='%s', + mandatory=True, + position=-2, + desc='Input images to be transformed') + out_filename = File( + genfile=True, argstr='%s', position=-1, desc='Output image') + invert = traits.Bool( + argstr='-inverse', + position=1, + desc="Invert the specified transform before using it") + replace_transform = traits.Bool( + argstr='-replace', + position=1, + desc= + "replace the current transform by that specified, rather than applying it to the current transform" + ) + transformation_file = File( + exists=True, + argstr='-transform %s', + position=1, + desc='The transform to apply, in the form of a 4x4 ascii file.') + template_image = File( + exists=True, + argstr='-template %s', + position=1, + desc='Reslice the input image to match the specified template image.') + reference_image = File( + exists=True, + argstr='-reference %s', + position=1, + desc= + 'in case the transform supplied maps from the input image onto a reference image, use this option to specify the reference. Note that this implicitly sets the -replace option.' + ) + flip_x = traits.Bool( + argstr='-flipx', + position=1, + desc= + "assume the transform is supplied assuming a coordinate system with the x-axis reversed relative to the MRtrix convention (i.e. x increases from right to left). This is required to handle transform matrices produced by FSL's FLIRT command. This is only used in conjunction with the -reference option." + ) + quiet = traits.Bool( + argstr='-quiet', + position=1, + desc="Do not display information messages or progress status.") + debug = traits.Bool( + argstr='-debug', position=1, desc="Display debugging messages.") + + +class MRTransformOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output image of the transformation') + + +class MRTransform(CommandLine): + """ + Apply spatial transformations or reslice images + + Example + ------- + + >>> MRxform = MRTransform() + >>> MRxform.inputs.in_files = 'anat_coreg.mif' + >>> MRxform.run() # doctest: +SKIP + """ + + _cmd = 'mrtransform' + input_spec = MRTransformInputSpec + output_spec = MRTransformOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_filename + if not isdefined(outputs['out_file']): + outputs['out_file'] = op.abspath(self._gen_outfilename()) + else: + outputs['out_file'] = op.abspath(outputs['out_file']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_files[0]) + return name + '_MRTransform.mif' diff --git a/nipype/interfaces/mrtrix/tensors.py b/nipype/interfaces/mrtrix/tensors.py new file mode 100644 index 0000000000..04c901f1f9 --- /dev/null +++ b/nipype/interfaces/mrtrix/tensors.py @@ -0,0 +1,612 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os.path as op +import numpy as np + +from ... import logging +from ...utils.filemanip import split_filename +from ..base import (CommandLineInputSpec, CommandLine, BaseInterface, traits, + File, TraitedSpec, isdefined) +iflogger = logging.getLogger('nipype.interface') + + +class DWI2SphericalHarmonicsImageInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='Diffusion-weighted images') + out_filename = File( + genfile=True, argstr='%s', position=-1, desc='Output filename') + encoding_file = File( + exists=True, + argstr='-grad %s', + mandatory=True, + position=1, + desc= + 'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix' + ) + maximum_harmonic_order = traits.Float( + argstr='-lmax %s', + desc= + 'set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.' + ) + normalise = traits.Bool( + argstr='-normalise', + position=3, + desc="normalise the DW signal to the b=0 image") + + +class DWI2SphericalHarmonicsImageOutputSpec(TraitedSpec): + spherical_harmonics_image = File( + exists=True, desc='Spherical harmonics image') + + +class DWI2SphericalHarmonicsImage(CommandLine): + """ + Convert base diffusion-weighted images to their spherical harmonic representation. + + This program outputs the spherical harmonic decomposition for the set measured signal attenuations. + The signal attenuations are calculated by identifying the b-zero images from the diffusion encoding supplied + (i.e. those with zero as the b-value), and dividing the remaining signals by the mean b-zero signal intensity. + The spherical harmonic decomposition is then calculated by least-squares linear fitting. + Note that this program makes use of implied symmetries in the diffusion profile. + + First, the fact the signal attenuation profile is real implies that it has conjugate symmetry, + i.e. Y(l,-m) = Y(l,m)* (where * denotes the complex conjugate). Second, the diffusion profile should be + antipodally symmetric (i.e. S(x) = S(-x)), implying that all odd l components should be zero. Therefore, + this program only computes the even elements. + + Note that the spherical harmonics equations used here differ slightly from those conventionally used, + in that the (-1)^m factor has been omitted. This should be taken into account in all subsequent calculations. + + Each volume in the output image corresponds to a different spherical harmonic component, according to the following convention: + + * [0] Y(0,0) + * [1] Im {Y(2,2)} + * [2] Im {Y(2,1)} + * [3] Y(2,0) + * [4] Re {Y(2,1)} + * [5] Re {Y(2,2)} + * [6] Im {Y(4,4)} + * [7] Im {Y(4,3)} + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> dwi2SH = mrt.DWI2SphericalHarmonicsImage() + >>> dwi2SH.inputs.in_file = 'diffusion.nii' + >>> dwi2SH.inputs.encoding_file = 'encoding.txt' + >>> dwi2SH.run() # doctest: +SKIP + """ + _cmd = 'dwi2SH' + input_spec = DWI2SphericalHarmonicsImageInputSpec + output_spec = DWI2SphericalHarmonicsImageOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['spherical_harmonics_image'] = self.inputs.out_filename + if not isdefined(outputs['spherical_harmonics_image']): + outputs['spherical_harmonics_image'] = op.abspath( + self._gen_outfilename()) + else: + outputs['spherical_harmonics_image'] = op.abspath( + outputs['spherical_harmonics_image']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_SH.mif' + + +class ConstrainedSphericalDeconvolutionInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-3, + desc='diffusion-weighted image') + response_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc= + 'the diffusion-weighted signal response function for a single fibre population (see EstimateResponse)' + ) + out_filename = File( + genfile=True, argstr='%s', position=-1, desc='Output filename') + mask_image = File( + exists=True, + argstr='-mask %s', + position=2, + desc= + 'only perform computation within the specified binary brain mask image' + ) + encoding_file = File( + exists=True, + argstr='-grad %s', + position=1, + desc= + 'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix' + ) + filter_file = File( + exists=True, + argstr='-filter %s', + position=-2, + desc= + 'a text file containing the filtering coefficients for each even harmonic order.' + 'the linear frequency filtering parameters used for the initial linear spherical deconvolution step (default = [ 1 1 1 0 0 ]).' + ) + + lambda_value = traits.Float( + argstr='-lambda %s', + desc= + 'the regularisation parameter lambda that controls the strength of the constraint (default = 1.0).' + ) + maximum_harmonic_order = traits.Int( + argstr='-lmax %s', + desc= + 'set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.' + ) + threshold_value = traits.Float( + argstr='-threshold %s', + desc= + 'the threshold below which the amplitude of the FOD is assumed to be zero, expressed as a fraction of the mean value of the initial FOD (default = 0.1)' + ) + iterations = traits.Int( + argstr='-niter %s', + desc= + 'the maximum number of iterations to perform for each voxel (default = 50)' + ) + debug = traits.Bool(argstr='-debug', desc='Display debugging messages.') + directions_file = File( + exists=True, + argstr='-directions %s', + position=-2, + desc= + 'a text file containing the [ el az ] pairs for the directions: Specify the directions over which to apply the non-negativity constraint (by default, the built-in 300 direction set is used)' + ) + + normalise = traits.Bool( + argstr='-normalise', + position=3, + desc="normalise the DW signal to the b=0 image") + + +class ConstrainedSphericalDeconvolutionOutputSpec(TraitedSpec): + spherical_harmonics_image = File( + exists=True, desc='Spherical harmonics image') + + +class ConstrainedSphericalDeconvolution(CommandLine): + """ + Perform non-negativity constrained spherical deconvolution. + + Note that this program makes use of implied symmetries in the diffusion profile. + First, the fact the signal attenuation profile is real implies that it has conjugate symmetry, + i.e. Y(l,-m) = Y(l,m)* (where * denotes the complex conjugate). Second, the diffusion profile should be + antipodally symmetric (i.e. S(x) = S(-x)), implying that all odd l components should be zero. + Therefore, this program only computes the even elements. Note that the spherical harmonics equations used here + differ slightly from those conventionally used, in that the (-1)^m factor has been omitted. This should be taken + into account in all subsequent calculations. Each volume in the output image corresponds to a different spherical + harmonic component, according to the following convention: + + * [0] Y(0,0) + * [1] Im {Y(2,2)} + * [2] Im {Y(2,1)} + * [3] Y(2,0) + * [4] Re {Y(2,1)} + * [5] Re {Y(2,2)} + * [6] Im {Y(4,4)} + * [7] Im {Y(4,3)} + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> csdeconv = mrt.ConstrainedSphericalDeconvolution() + >>> csdeconv.inputs.in_file = 'dwi.mif' + >>> csdeconv.inputs.encoding_file = 'encoding.txt' + >>> csdeconv.run() # doctest: +SKIP + """ + + _cmd = 'csdeconv' + input_spec = ConstrainedSphericalDeconvolutionInputSpec + output_spec = ConstrainedSphericalDeconvolutionOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['spherical_harmonics_image'] = self.inputs.out_filename + if not isdefined(outputs['spherical_harmonics_image']): + outputs['spherical_harmonics_image'] = op.abspath( + self._gen_outfilename()) + else: + outputs['spherical_harmonics_image'] = op.abspath( + outputs['spherical_harmonics_image']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_CSD.mif' + + +class EstimateResponseForSHInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-3, + desc='Diffusion-weighted images') + mask_image = File( + exists=True, + mandatory=True, + argstr='%s', + position=-2, + desc= + 'only perform computation within the specified binary brain mask image' + ) + out_filename = File( + genfile=True, argstr='%s', position=-1, desc='Output filename') + encoding_file = File( + exists=True, + argstr='-grad %s', + mandatory=True, + position=1, + desc= + 'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix' + ) + maximum_harmonic_order = traits.Int( + argstr='-lmax %s', + desc= + 'set the maximum harmonic order for the output series. By default, the program will use the highest possible lmax given the number of diffusion-weighted images.' + ) + normalise = traits.Bool( + argstr='-normalise', desc='normalise the DW signal to the b=0 image') + quiet = traits.Bool( + argstr='-quiet', + desc='Do not display information messages or progress status.') + debug = traits.Bool(argstr='-debug', desc='Display debugging messages.') + + +class EstimateResponseForSHOutputSpec(TraitedSpec): + response = File(exists=True, desc='Spherical harmonics image') + + +class EstimateResponseForSH(CommandLine): + """ + Estimates the fibre response function for use in spherical deconvolution. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> estresp = mrt.EstimateResponseForSH() + >>> estresp.inputs.in_file = 'dwi.mif' + >>> estresp.inputs.mask_image = 'dwi_WMProb.mif' + >>> estresp.inputs.encoding_file = 'encoding.txt' + >>> estresp.run() # doctest: +SKIP + """ + _cmd = 'estimate_response' + input_spec = EstimateResponseForSHInputSpec + output_spec = EstimateResponseForSHOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['response'] = self.inputs.out_filename + if not isdefined(outputs['response']): + outputs['response'] = op.abspath(self._gen_outfilename()) + else: + outputs['response'] = op.abspath(outputs['response']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_ER.txt' + + +def concat_files(bvec_file, bval_file, invert_x, invert_y, invert_z): + bvecs = np.loadtxt(bvec_file) + bvals = np.loadtxt(bval_file) + if np.shape(bvecs)[0] > np.shape(bvecs)[1]: + bvecs = np.transpose(bvecs) + if invert_x: + bvecs[0, :] = -bvecs[0, :] + iflogger.info('Inverting b-vectors in the x direction') + if invert_y: + bvecs[1, :] = -bvecs[1, :] + iflogger.info('Inverting b-vectors in the y direction') + if invert_z: + bvecs[2, :] = -bvecs[2, :] + iflogger.info('Inverting b-vectors in the z direction') + iflogger.info(np.shape(bvecs)) + iflogger.info(np.shape(bvals)) + encoding = np.transpose(np.vstack((bvecs, bvals))) + _, bvec, _ = split_filename(bvec_file) + _, bval, _ = split_filename(bval_file) + out_encoding_file = bvec + '_' + bval + '.txt' + np.savetxt(out_encoding_file, encoding) + return out_encoding_file + + +class FSL2MRTrixInputSpec(TraitedSpec): + bvec_file = File( + exists=True, mandatory=True, desc='FSL b-vectors file (3xN text file)') + bval_file = File( + exists=True, mandatory=True, desc='FSL b-values file (1xN text file)') + invert_x = traits.Bool( + False, usedefault=True, desc='Inverts the b-vectors along the x-axis') + invert_y = traits.Bool( + False, usedefault=True, desc='Inverts the b-vectors along the y-axis') + invert_z = traits.Bool( + False, usedefault=True, desc='Inverts the b-vectors along the z-axis') + out_encoding_file = File(genfile=True, desc='Output encoding filename') + + +class FSL2MRTrixOutputSpec(TraitedSpec): + encoding_file = File( + desc= + 'The gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient' + 'and b gives the b-value in units (1000 s/mm^2).') + + +class FSL2MRTrix(BaseInterface): + """ + Converts separate b-values and b-vectors from text files (FSL style) into a + 4xN text file in which each line is in the format [ X Y Z b ], where [ X Y Z ] + describe the direction of the applied gradient, and b gives the + b-value in units (1000 s/mm^2). + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> fsl2mrtrix = mrt.FSL2MRTrix() + >>> fsl2mrtrix.inputs.bvec_file = 'bvecs' + >>> fsl2mrtrix.inputs.bval_file = 'bvals' + >>> fsl2mrtrix.inputs.invert_y = True + >>> fsl2mrtrix.run() # doctest: +SKIP + """ + input_spec = FSL2MRTrixInputSpec + output_spec = FSL2MRTrixOutputSpec + + def _run_interface(self, runtime): + encoding = concat_files(self.inputs.bvec_file, self.inputs.bval_file, + self.inputs.invert_x, self.inputs.invert_y, + self.inputs.invert_z) + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['encoding_file'] = op.abspath( + self._gen_filename('out_encoding_file')) + return outputs + + def _gen_filename(self, name): + if name == 'out_encoding_file': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, bvec, _ = split_filename(self.inputs.bvec_file) + _, bval, _ = split_filename(self.inputs.bval_file) + return bvec + '_' + bval + '.txt' + + +class GenerateDirectionsInputSpec(CommandLineInputSpec): + num_dirs = traits.Int( + mandatory=True, + argstr='%s', + position=-2, + desc='the number of directions to generate.') + + power = traits.Float( + argstr='-power %s', + desc='specify exponent to use for repulsion power law.') + niter = traits.Int( + argstr='-niter %s', + desc='specify the maximum number of iterations to perform.') + display_info = traits.Bool( + argstr='-info', desc='Display information messages.') + quiet_display = traits.Bool( + argstr='-quiet', + desc='do not display information messages or progress status.') + display_debug = traits.Bool( + argstr='-debug', desc='Display debugging messages.') + out_file = File( + name_source=['num_dirs'], + name_template='directions_%d.txt', + argstr='%s', + hash_files=False, + position=-1, + desc='the text file to write the directions to, as [ az el ] pairs.') + + +class GenerateDirectionsOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='directions file') + + +class GenerateDirections(CommandLine): + """ + generate a set of directions evenly distributed over a hemisphere. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> gendir = mrt.GenerateDirections() + >>> gendir.inputs.num_dirs = 300 + >>> gendir.run() # doctest: +SKIP + """ + + _cmd = 'gendir' + input_spec = GenerateDirectionsInputSpec + output_spec = GenerateDirectionsOutputSpec + + +class FindShPeaksInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-3, + desc='the input image of SH coefficients.') + directions_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='the set of directions to use as seeds for the peak finding') + peaks_image = File( + exists=True, + argstr='-peaks %s', + desc= + 'the program will try to find the peaks that most closely match those in the image provided' + ) + num_peaks = traits.Int( + argstr='-num %s', desc='the number of peaks to extract (default is 3)') + peak_directions = traits.List( + traits.Float, + argstr='-direction %s', + sep=' ', + minlen=2, + maxlen=2, + desc= + 'phi theta. the direction of a peak to estimate. The algorithm will attempt to find the same number of peaks as have been specified using this option ' + ' phi: the azimuthal angle of the direction (in degrees). theta: the elevation angle of the direction (in degrees, from the vertical z-axis)' + ) + peak_threshold = traits.Float( + argstr='-threshold %s', + desc= + 'only peak amplitudes greater than the threshold will be considered') + display_info = traits.Bool( + argstr='-info', desc='Display information messages.') + quiet_display = traits.Bool( + argstr='-quiet', + desc='do not display information messages or progress status.') + display_debug = traits.Bool( + argstr='-debug', desc='Display debugging messages.') + out_file = File( + name_template="%s_peak_dirs.mif", + keep_extension=False, + argstr='%s', + hash_files=False, + position=-1, + desc= + 'the output image. Each volume corresponds to the x, y & z component of each peak direction vector in turn', + name_source=["in_file"]) + + +class FindShPeaksOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='Peak directions image') + + +class FindShPeaks(CommandLine): + """ + identify the orientations of the N largest peaks of a SH profile + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> shpeaks = mrt.FindShPeaks() + >>> shpeaks.inputs.in_file = 'csd.mif' + >>> shpeaks.inputs.directions_file = 'dirs.txt' + >>> shpeaks.inputs.num_peaks = 2 + >>> shpeaks.run() # doctest: +SKIP + """ + + _cmd = 'find_SH_peaks' + input_spec = FindShPeaksInputSpec + output_spec = FindShPeaksOutputSpec + + +class Directions2AmplitudeInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc= + 'the input directions image. Each volume corresponds to the x, y & z component of each direction vector in turn.' + ) + peaks_image = File( + exists=True, + argstr='-peaks %s', + desc= + 'the program will try to find the peaks that most closely match those in the image provided' + ) + num_peaks = traits.Int( + argstr='-num %s', desc='the number of peaks to extract (default is 3)') + peak_directions = traits.List( + traits.Float, + argstr='-direction %s', + sep=' ', + minlen=2, + maxlen=2, + desc= + 'phi theta. the direction of a peak to estimate. The algorithm will attempt to find the same number of peaks as have been specified using this option ' + ' phi: the azimuthal angle of the direction (in degrees). theta: the elevation angle of the direction (in degrees, from the vertical z-axis)' + ) + display_info = traits.Bool( + argstr='-info', desc='Display information messages.') + quiet_display = traits.Bool( + argstr='-quiet', + desc='do not display information messages or progress status.') + display_debug = traits.Bool( + argstr='-debug', desc='Display debugging messages.') + out_file = File( + name_template="%s_amplitudes.mif", + keep_extension=False, + argstr='%s', + hash_files=False, + position=-1, + desc='the output amplitudes image', + name_source=["in_file"]) + + +class Directions2AmplitudeOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='amplitudes image') + + +class Directions2Amplitude(CommandLine): + """ + convert directions image to amplitudes + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> amplitudes = mrt.Directions2Amplitude() + >>> amplitudes.inputs.in_file = 'peak_directions.mif' + >>> amplitudes.run() # doctest: +SKIP + """ + + _cmd = 'dir2amp' + input_spec = Directions2AmplitudeInputSpec + output_spec = Directions2AmplitudeOutputSpec diff --git a/nipype/interfaces/mrtrix/tests/__init__.py b/nipype/interfaces/mrtrix/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py new file mode 100644 index 0000000000..78a275f6a7 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_ConstrainedSphericalDeconvolution.py @@ -0,0 +1,65 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tensors import ConstrainedSphericalDeconvolution + + +def test_ConstrainedSphericalDeconvolution_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debug=dict(argstr='-debug', ), + directions_file=dict( + argstr='-directions %s', + position=-2, + ), + encoding_file=dict( + argstr='-grad %s', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + filter_file=dict( + argstr='-filter %s', + position=-2, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + iterations=dict(argstr='-niter %s', ), + lambda_value=dict(argstr='-lambda %s', ), + mask_image=dict( + argstr='-mask %s', + position=2, + ), + maximum_harmonic_order=dict(argstr='-lmax %s', ), + normalise=dict( + argstr='-normalise', + position=3, + ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + response_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + threshold_value=dict(argstr='-threshold %s', ), + ) + inputs = ConstrainedSphericalDeconvolution.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ConstrainedSphericalDeconvolution_outputs(): + output_map = dict(spherical_harmonics_image=dict(), ) + outputs = ConstrainedSphericalDeconvolution.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py new file mode 100644 index 0000000000..dc95cff525 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2SphericalHarmonicsImage.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tensors import DWI2SphericalHarmonicsImage + + +def test_DWI2SphericalHarmonicsImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + encoding_file=dict( + argstr='-grad %s', + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + maximum_harmonic_order=dict(argstr='-lmax %s', ), + normalise=dict( + argstr='-normalise', + position=3, + ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + ) + inputs = DWI2SphericalHarmonicsImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DWI2SphericalHarmonicsImage_outputs(): + output_map = dict(spherical_harmonics_image=dict(), ) + outputs = DWI2SphericalHarmonicsImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py new file mode 100644 index 0000000000..86975950bf --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_DWI2Tensor.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import DWI2Tensor + + +def test_DWI2Tensor_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debug=dict( + argstr='-debug', + position=1, + ), + encoding_file=dict( + argstr='-grad %s', + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignore_slice_by_volume=dict( + argstr='-ignoreslices %s', + position=2, + sep=' ', + ), + ignore_volumes=dict( + argstr='-ignorevolumes %s', + position=2, + sep=' ', + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr='%s', + name_source='in_file', + name_template='%s_tensor.mif', + output_name='tensor', + position=-1, + ), + quiet=dict( + argstr='-quiet', + position=1, + ), + ) + inputs = DWI2Tensor.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DWI2Tensor_outputs(): + output_map = dict(tensor=dict(), ) + outputs = DWI2Tensor.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py new file mode 100644 index 0000000000..2c6417fe3a --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_DiffusionTensorStreamlineTrack.py @@ -0,0 +1,125 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tracking import DiffusionTensorStreamlineTrack + + +def test_DiffusionTensorStreamlineTrack_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + cutoff_value=dict( + argstr='-cutoff %s', + units='NA', + ), + desired_number_of_tracks=dict(argstr='-number %d', ), + do_not_precompute=dict(argstr='-noprecomputed', ), + environ=dict( + nohash=True, + usedefault=True, + ), + exclude_file=dict( + argstr='-exclude %s', + xor=['exclude_file', 'exclude_spec'], + ), + exclude_spec=dict( + argstr='-exclude %s', + position=2, + sep=',', + units='mm', + xor=['exclude_file', 'exclude_spec'], + ), + gradient_encoding_file=dict( + argstr='-grad %s', + mandatory=True, + position=-2, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + include_file=dict( + argstr='-include %s', + xor=['include_file', 'include_spec'], + ), + include_spec=dict( + argstr='-include %s', + position=2, + sep=',', + units='mm', + xor=['include_file', 'include_spec'], + ), + initial_cutoff_value=dict( + argstr='-initcutoff %s', + units='NA', + ), + initial_direction=dict( + argstr='-initdirection %s', + units='voxels', + ), + inputmodel=dict( + argstr='%s', + position=-3, + usedefault=True, + ), + mask_file=dict( + argstr='-mask %s', + xor=['mask_file', 'mask_spec'], + ), + mask_spec=dict( + argstr='-mask %s', + position=2, + sep=',', + units='mm', + xor=['mask_file', 'mask_spec'], + ), + maximum_number_of_tracks=dict(argstr='-maxnum %d', ), + maximum_tract_length=dict( + argstr='-length %s', + units='mm', + ), + minimum_radius_of_curvature=dict( + argstr='-curvature %s', + units='mm', + ), + minimum_tract_length=dict( + argstr='-minlength %s', + units='mm', + ), + no_mask_interpolation=dict(argstr='-nomaskinterp', ), + out_file=dict( + argstr='%s', + name_source=['in_file'], + name_template='%s_tracked.tck', + output_name='tracked', + position=-1, + ), + seed_file=dict( + argstr='-seed %s', + xor=['seed_file', 'seed_spec'], + ), + seed_spec=dict( + argstr='-seed %s', + position=2, + sep=',', + units='mm', + xor=['seed_file', 'seed_spec'], + ), + step_size=dict( + argstr='-step %s', + units='mm', + ), + stop=dict(argstr='-stop', ), + unidirectional=dict(argstr='-unidirectional', ), + ) + inputs = DiffusionTensorStreamlineTrack.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DiffusionTensorStreamlineTrack_outputs(): + output_map = dict(tracked=dict(), ) + outputs = DiffusionTensorStreamlineTrack.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py new file mode 100644 index 0000000000..b5474bd18c --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_Directions2Amplitude.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tensors import Directions2Amplitude + + +def test_Directions2Amplitude_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + display_debug=dict(argstr='-debug', ), + display_info=dict(argstr='-info', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + num_peaks=dict(argstr='-num %s', ), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=False, + name_source=['in_file'], + name_template='%s_amplitudes.mif', + position=-1, + ), + peak_directions=dict( + argstr='-direction %s', + sep=' ', + ), + peaks_image=dict(argstr='-peaks %s', ), + quiet_display=dict(argstr='-quiet', ), + ) + inputs = Directions2Amplitude.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Directions2Amplitude_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Directions2Amplitude.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Erode.py b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py new file mode 100644 index 0000000000..6215593ab0 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_Erode.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Erode + + +def test_Erode_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debug=dict( + argstr='-debug', + position=1, + ), + dilate=dict( + argstr='-dilate', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + number_of_passes=dict(argstr='-npass %s', ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + quiet=dict( + argstr='-quiet', + position=1, + ), + ) + inputs = Erode.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Erode_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Erode.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py new file mode 100644 index 0000000000..983433104e --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_EstimateResponseForSH.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tensors import EstimateResponseForSH + + +def test_EstimateResponseForSH_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debug=dict(argstr='-debug', ), + encoding_file=dict( + argstr='-grad %s', + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + mask_image=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + maximum_harmonic_order=dict(argstr='-lmax %s', ), + normalise=dict(argstr='-normalise', ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + quiet=dict(argstr='-quiet', ), + ) + inputs = EstimateResponseForSH.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EstimateResponseForSH_outputs(): + output_map = dict(response=dict(), ) + outputs = EstimateResponseForSH.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py new file mode 100644 index 0000000000..6261b6b0a5 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_FSL2MRTrix.py @@ -0,0 +1,26 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tensors import FSL2MRTrix + + +def test_FSL2MRTrix_inputs(): + input_map = dict( + bval_file=dict(mandatory=True, ), + bvec_file=dict(mandatory=True, ), + invert_x=dict(usedefault=True, ), + invert_y=dict(usedefault=True, ), + invert_z=dict(usedefault=True, ), + out_encoding_file=dict(genfile=True, ), + ) + inputs = FSL2MRTrix.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FSL2MRTrix_outputs(): + output_map = dict(encoding_file=dict(), ) + outputs = FSL2MRTrix.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py new file mode 100644 index 0000000000..6e0e670d06 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_FilterTracks.py @@ -0,0 +1,73 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tracking import FilterTracks + + +def test_FilterTracks_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debug=dict( + argstr='-debug', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + exclude_file=dict( + argstr='-exclude %s', + xor=['exclude_file', 'exclude_spec'], + ), + exclude_spec=dict( + argstr='-exclude %s', + position=2, + sep=',', + units='mm', + xor=['exclude_file', 'exclude_spec'], + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + include_file=dict( + argstr='-include %s', + xor=['include_file', 'include_spec'], + ), + include_spec=dict( + argstr='-include %s', + position=2, + sep=',', + units='mm', + xor=['include_file', 'include_spec'], + ), + invert=dict(argstr='-invert', ), + minimum_tract_length=dict( + argstr='-minlength %s', + units='mm', + ), + no_mask_interpolation=dict(argstr='-nomaskinterp', ), + out_file=dict( + argstr='%s', + hash_files=False, + name_source=['in_file'], + name_template='%s_filt', + position=-1, + ), + quiet=dict( + argstr='-quiet', + position=1, + ), + ) + inputs = FilterTracks.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FilterTracks_outputs(): + output_map = dict(out_file=dict(), ) + outputs = FilterTracks.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py new file mode 100644 index 0000000000..5d9f51739c --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_FindShPeaks.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tensors import FindShPeaks + + +def test_FindShPeaks_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + directions_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + display_debug=dict(argstr='-debug', ), + display_info=dict(argstr='-info', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + num_peaks=dict(argstr='-num %s', ), + out_file=dict( + argstr='%s', + hash_files=False, + keep_extension=False, + name_source=['in_file'], + name_template='%s_peak_dirs.mif', + position=-1, + ), + peak_directions=dict( + argstr='-direction %s', + sep=' ', + ), + peak_threshold=dict(argstr='-threshold %s', ), + peaks_image=dict(argstr='-peaks %s', ), + quiet_display=dict(argstr='-quiet', ), + ) + inputs = FindShPeaks.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FindShPeaks_outputs(): + output_map = dict(out_file=dict(), ) + outputs = FindShPeaks.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py new file mode 100644 index 0000000000..2cfd89bb6f --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateDirections.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tensors import GenerateDirections + + +def test_GenerateDirections_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + display_debug=dict(argstr='-debug', ), + display_info=dict(argstr='-info', ), + environ=dict( + nohash=True, + usedefault=True, + ), + niter=dict(argstr='-niter %s', ), + num_dirs=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_file=dict( + argstr='%s', + hash_files=False, + name_source=['num_dirs'], + name_template='directions_%d.txt', + position=-1, + ), + power=dict(argstr='-power %s', ), + quiet_display=dict(argstr='-quiet', ), + ) + inputs = GenerateDirections.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GenerateDirections_outputs(): + output_map = dict(out_file=dict(), ) + outputs = GenerateDirections.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py new file mode 100644 index 0000000000..6a0305f9e2 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_GenerateWhiteMatterMask.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import GenerateWhiteMatterMask + + +def test_GenerateWhiteMatterMask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + binary_mask=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + encoding_file=dict( + argstr='-grad %s', + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + noise_level_margin=dict(argstr='-margin %s', ), + out_WMProb_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + ) + inputs = GenerateWhiteMatterMask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GenerateWhiteMatterMask_outputs(): + output_map = dict(WMprobabilitymap=dict(), ) + outputs = GenerateWhiteMatterMask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py new file mode 100644 index 0000000000..5aef0bfd13 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRConvert.py @@ -0,0 +1,79 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import MRConvert + + +def test_MRConvert_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + extension=dict( + position=2, + usedefault=True, + ), + extract_at_axis=dict( + argstr='-coord %s', + position=1, + ), + extract_at_coordinate=dict( + argstr='%s', + position=2, + sep=',', + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + layout=dict( + argstr='-output %s', + position=2, + ), + offset_bias=dict( + argstr='-scale %d', + position=3, + units='mm', + ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + output_datatype=dict( + argstr='-output %s', + position=2, + ), + prs=dict( + argstr='-prs', + position=3, + ), + replace_NaN_with_zero=dict( + argstr='-zero', + position=3, + ), + resample=dict( + argstr='-scale %d', + position=3, + units='mm', + ), + voxel_dims=dict( + argstr='-vox %s', + position=3, + sep=',', + ), + ) + inputs = MRConvert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRConvert_outputs(): + output_map = dict(converted=dict(), ) + outputs = MRConvert.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py new file mode 100644 index 0000000000..a5a864289a --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRMultiply.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import MRMultiply + + +def test_MRMultiply_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debug=dict( + argstr='-debug', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + quiet=dict( + argstr='-quiet', + position=1, + ), + ) + inputs = MRMultiply.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRMultiply_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRMultiply.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py new file mode 100644 index 0000000000..d6a3db0a1d --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTransform.py @@ -0,0 +1,67 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import MRTransform + + +def test_MRTransform_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debug=dict( + argstr='-debug', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flip_x=dict( + argstr='-flipx', + position=1, + ), + in_files=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + invert=dict( + argstr='-inverse', + position=1, + ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + quiet=dict( + argstr='-quiet', + position=1, + ), + reference_image=dict( + argstr='-reference %s', + position=1, + ), + replace_transform=dict( + argstr='-replace', + position=1, + ), + template_image=dict( + argstr='-template %s', + position=1, + ), + transformation_file=dict( + argstr='-transform %s', + position=1, + ), + ) + inputs = MRTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRTransform_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py new file mode 100644 index 0000000000..b6fefac9a0 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrix2TrackVis.py @@ -0,0 +1,28 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..convert import MRTrix2TrackVis + + +def test_MRTrix2TrackVis_inputs(): + input_map = dict( + image_file=dict(), + in_file=dict(mandatory=True, ), + matrix_file=dict(), + out_filename=dict( + genfile=True, + usedefault=True, + ), + registration_image_file=dict(), + ) + inputs = MRTrix2TrackVis.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRTrix2TrackVis_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRTrix2TrackVis.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py new file mode 100644 index 0000000000..0022de4d7a --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixInfo.py @@ -0,0 +1,30 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import MRTrixInfo + + +def test_MRTrixInfo_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + ) + inputs = MRTrixInfo.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRTrixInfo_outputs(): + output_map = dict() + outputs = MRTrixInfo.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py new file mode 100644 index 0000000000..117258ea17 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_MRTrixViewer.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import MRTrixViewer + + +def test_MRTrixViewer_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debug=dict( + argstr='-debug', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_files=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + quiet=dict( + argstr='-quiet', + position=1, + ), + ) + inputs = MRTrixViewer.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRTrixViewer_outputs(): + output_map = dict() + outputs = MRTrixViewer.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py new file mode 100644 index 0000000000..0e7daabcaa --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_MedianFilter3D.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import MedianFilter3D + + +def test_MedianFilter3D_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debug=dict( + argstr='-debug', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + quiet=dict( + argstr='-quiet', + position=1, + ), + ) + inputs = MedianFilter3D.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MedianFilter3D_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MedianFilter3D.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py new file mode 100644 index 0000000000..6d75b1b9a2 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_ProbabilisticSphericallyDeconvolutedStreamlineTrack.py @@ -0,0 +1,121 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tracking import ProbabilisticSphericallyDeconvolutedStreamlineTrack + + +def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + cutoff_value=dict( + argstr='-cutoff %s', + units='NA', + ), + desired_number_of_tracks=dict(argstr='-number %d', ), + do_not_precompute=dict(argstr='-noprecomputed', ), + environ=dict( + nohash=True, + usedefault=True, + ), + exclude_file=dict( + argstr='-exclude %s', + xor=['exclude_file', 'exclude_spec'], + ), + exclude_spec=dict( + argstr='-exclude %s', + position=2, + sep=',', + units='mm', + xor=['exclude_file', 'exclude_spec'], + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + include_file=dict( + argstr='-include %s', + xor=['include_file', 'include_spec'], + ), + include_spec=dict( + argstr='-include %s', + position=2, + sep=',', + units='mm', + xor=['include_file', 'include_spec'], + ), + initial_cutoff_value=dict( + argstr='-initcutoff %s', + units='NA', + ), + initial_direction=dict( + argstr='-initdirection %s', + units='voxels', + ), + inputmodel=dict( + argstr='%s', + position=-3, + usedefault=True, + ), + mask_file=dict( + argstr='-mask %s', + xor=['mask_file', 'mask_spec'], + ), + mask_spec=dict( + argstr='-mask %s', + position=2, + sep=',', + units='mm', + xor=['mask_file', 'mask_spec'], + ), + maximum_number_of_tracks=dict(argstr='-maxnum %d', ), + maximum_number_of_trials=dict(argstr='-trials %s', ), + maximum_tract_length=dict( + argstr='-length %s', + units='mm', + ), + minimum_radius_of_curvature=dict( + argstr='-curvature %s', + units='mm', + ), + minimum_tract_length=dict( + argstr='-minlength %s', + units='mm', + ), + no_mask_interpolation=dict(argstr='-nomaskinterp', ), + out_file=dict( + argstr='%s', + name_source=['in_file'], + name_template='%s_tracked.tck', + output_name='tracked', + position=-1, + ), + seed_file=dict( + argstr='-seed %s', + xor=['seed_file', 'seed_spec'], + ), + seed_spec=dict( + argstr='-seed %s', + position=2, + sep=',', + units='mm', + xor=['seed_file', 'seed_spec'], + ), + step_size=dict( + argstr='-step %s', + units='mm', + ), + stop=dict(argstr='-stop', ), + unidirectional=dict(argstr='-unidirectional', ), + ) + inputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ProbabilisticSphericallyDeconvolutedStreamlineTrack_outputs(): + output_map = dict(tracked=dict(), ) + outputs = ProbabilisticSphericallyDeconvolutedStreamlineTrack.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py new file mode 100644 index 0000000000..9bed95276d --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_SphericallyDeconvolutedStreamlineTrack.py @@ -0,0 +1,120 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tracking import SphericallyDeconvolutedStreamlineTrack + + +def test_SphericallyDeconvolutedStreamlineTrack_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + cutoff_value=dict( + argstr='-cutoff %s', + units='NA', + ), + desired_number_of_tracks=dict(argstr='-number %d', ), + do_not_precompute=dict(argstr='-noprecomputed', ), + environ=dict( + nohash=True, + usedefault=True, + ), + exclude_file=dict( + argstr='-exclude %s', + xor=['exclude_file', 'exclude_spec'], + ), + exclude_spec=dict( + argstr='-exclude %s', + position=2, + sep=',', + units='mm', + xor=['exclude_file', 'exclude_spec'], + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + include_file=dict( + argstr='-include %s', + xor=['include_file', 'include_spec'], + ), + include_spec=dict( + argstr='-include %s', + position=2, + sep=',', + units='mm', + xor=['include_file', 'include_spec'], + ), + initial_cutoff_value=dict( + argstr='-initcutoff %s', + units='NA', + ), + initial_direction=dict( + argstr='-initdirection %s', + units='voxels', + ), + inputmodel=dict( + argstr='%s', + position=-3, + usedefault=True, + ), + mask_file=dict( + argstr='-mask %s', + xor=['mask_file', 'mask_spec'], + ), + mask_spec=dict( + argstr='-mask %s', + position=2, + sep=',', + units='mm', + xor=['mask_file', 'mask_spec'], + ), + maximum_number_of_tracks=dict(argstr='-maxnum %d', ), + maximum_tract_length=dict( + argstr='-length %s', + units='mm', + ), + minimum_radius_of_curvature=dict( + argstr='-curvature %s', + units='mm', + ), + minimum_tract_length=dict( + argstr='-minlength %s', + units='mm', + ), + no_mask_interpolation=dict(argstr='-nomaskinterp', ), + out_file=dict( + argstr='%s', + name_source=['in_file'], + name_template='%s_tracked.tck', + output_name='tracked', + position=-1, + ), + seed_file=dict( + argstr='-seed %s', + xor=['seed_file', 'seed_spec'], + ), + seed_spec=dict( + argstr='-seed %s', + position=2, + sep=',', + units='mm', + xor=['seed_file', 'seed_spec'], + ), + step_size=dict( + argstr='-step %s', + units='mm', + ), + stop=dict(argstr='-stop', ), + unidirectional=dict(argstr='-unidirectional', ), + ) + inputs = SphericallyDeconvolutedStreamlineTrack.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SphericallyDeconvolutedStreamlineTrack_outputs(): + output_map = dict(tracked=dict(), ) + outputs = SphericallyDeconvolutedStreamlineTrack.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py new file mode 100644 index 0000000000..c898a60eb3 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_StreamlineTrack.py @@ -0,0 +1,120 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tracking import StreamlineTrack + + +def test_StreamlineTrack_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + cutoff_value=dict( + argstr='-cutoff %s', + units='NA', + ), + desired_number_of_tracks=dict(argstr='-number %d', ), + do_not_precompute=dict(argstr='-noprecomputed', ), + environ=dict( + nohash=True, + usedefault=True, + ), + exclude_file=dict( + argstr='-exclude %s', + xor=['exclude_file', 'exclude_spec'], + ), + exclude_spec=dict( + argstr='-exclude %s', + position=2, + sep=',', + units='mm', + xor=['exclude_file', 'exclude_spec'], + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + include_file=dict( + argstr='-include %s', + xor=['include_file', 'include_spec'], + ), + include_spec=dict( + argstr='-include %s', + position=2, + sep=',', + units='mm', + xor=['include_file', 'include_spec'], + ), + initial_cutoff_value=dict( + argstr='-initcutoff %s', + units='NA', + ), + initial_direction=dict( + argstr='-initdirection %s', + units='voxels', + ), + inputmodel=dict( + argstr='%s', + position=-3, + usedefault=True, + ), + mask_file=dict( + argstr='-mask %s', + xor=['mask_file', 'mask_spec'], + ), + mask_spec=dict( + argstr='-mask %s', + position=2, + sep=',', + units='mm', + xor=['mask_file', 'mask_spec'], + ), + maximum_number_of_tracks=dict(argstr='-maxnum %d', ), + maximum_tract_length=dict( + argstr='-length %s', + units='mm', + ), + minimum_radius_of_curvature=dict( + argstr='-curvature %s', + units='mm', + ), + minimum_tract_length=dict( + argstr='-minlength %s', + units='mm', + ), + no_mask_interpolation=dict(argstr='-nomaskinterp', ), + out_file=dict( + argstr='%s', + name_source=['in_file'], + name_template='%s_tracked.tck', + output_name='tracked', + position=-1, + ), + seed_file=dict( + argstr='-seed %s', + xor=['seed_file', 'seed_spec'], + ), + seed_spec=dict( + argstr='-seed %s', + position=2, + sep=',', + units='mm', + xor=['seed_file', 'seed_spec'], + ), + step_size=dict( + argstr='-step %s', + units='mm', + ), + stop=dict(argstr='-stop', ), + unidirectional=dict(argstr='-unidirectional', ), + ) + inputs = StreamlineTrack.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_StreamlineTrack_outputs(): + output_map = dict(tracked=dict(), ) + outputs = StreamlineTrack.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py new file mode 100644 index 0000000000..e9546d7e90 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2ApparentDiffusion.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Tensor2ApparentDiffusion + + +def test_Tensor2ApparentDiffusion_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debug=dict( + argstr='-debug', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + quiet=dict( + argstr='-quiet', + position=1, + ), + ) + inputs = Tensor2ApparentDiffusion.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Tensor2ApparentDiffusion_outputs(): + output_map = dict(ADC=dict(), ) + outputs = Tensor2ApparentDiffusion.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py new file mode 100644 index 0000000000..d16a907f62 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2FractionalAnisotropy.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Tensor2FractionalAnisotropy + + +def test_Tensor2FractionalAnisotropy_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debug=dict( + argstr='-debug', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + quiet=dict( + argstr='-quiet', + position=1, + ), + ) + inputs = Tensor2FractionalAnisotropy.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Tensor2FractionalAnisotropy_outputs(): + output_map = dict(FA=dict(), ) + outputs = Tensor2FractionalAnisotropy.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py new file mode 100644 index 0000000000..c07d0a8db4 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tensor2Vector.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Tensor2Vector + + +def test_Tensor2Vector_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debug=dict( + argstr='-debug', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + quiet=dict( + argstr='-quiet', + position=1, + ), + ) + inputs = Tensor2Vector.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Tensor2Vector_outputs(): + output_map = dict(vector=dict(), ) + outputs = Tensor2Vector.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py new file mode 100644 index 0000000000..39fbf14d2c --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_Threshold.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Threshold + + +def test_Threshold_inputs(): + input_map = dict( + absolute_threshold_value=dict(argstr='-abs %s', ), + args=dict(argstr='%s', ), + debug=dict( + argstr='-debug', + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + invert=dict( + argstr='-invert', + position=1, + ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + percentage_threshold_value=dict(argstr='-percent %s', ), + quiet=dict( + argstr='-quiet', + position=1, + ), + replace_zeros_with_NaN=dict( + argstr='-nan', + position=1, + ), + ) + inputs = Threshold.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Threshold_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Threshold.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py new file mode 100644 index 0000000000..f984203ab1 --- /dev/null +++ b/nipype/interfaces/mrtrix/tests/test_auto_Tracks2Prob.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tracking import Tracks2Prob + + +def test_Tracks2Prob_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + colour=dict( + argstr='-colour', + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fraction=dict( + argstr='-fraction', + position=3, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_filename=dict( + argstr='%s', + genfile=True, + position=-1, + ), + output_datatype=dict( + argstr='-datatype %s', + position=2, + ), + resample=dict( + argstr='-resample %d', + position=3, + units='mm', + ), + template_file=dict( + argstr='-template %s', + position=1, + ), + voxel_dims=dict( + argstr='-vox %s', + position=2, + sep=',', + ), + ) + inputs = Tracks2Prob.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Tracks2Prob_outputs(): + output_map = dict(tract_image=dict(), ) + outputs = Tracks2Prob.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix/tracking.py b/nipype/interfaces/mrtrix/tracking.py new file mode 100644 index 0000000000..7a7ed995f0 --- /dev/null +++ b/nipype/interfaces/mrtrix/tracking.py @@ -0,0 +1,504 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +import os.path as op + +from ...utils.filemanip import split_filename +from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, + File, isdefined) + + +class FilterTracksInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='input tracks to be filtered') + include_xor = ['include_file', 'include_spec'] + include_file = File( + exists=True, + argstr='-include %s', + desc='inclusion file', + xor=include_xor) + include_spec = traits.List( + traits.Float, + desc='inclusion specification in mm and radius (x y z r)', + position=2, + argstr='-include %s', + minlen=4, + maxlen=4, + sep=',', + units='mm', + xor=include_xor) + + exclude_xor = ['exclude_file', 'exclude_spec'] + exclude_file = File( + exists=True, + argstr='-exclude %s', + desc='exclusion file', + xor=exclude_xor) + exclude_spec = traits.List( + traits.Float, + desc='exclusion specification in mm and radius (x y z r)', + position=2, + argstr='-exclude %s', + minlen=4, + maxlen=4, + sep=',', + units='mm', + xor=exclude_xor) + + minimum_tract_length = traits.Float( + argstr='-minlength %s', + units='mm', + desc= + "Sets the minimum length of any track in millimeters (default is 10 mm)." + ) + + out_file = File( + argstr='%s', + position=-1, + desc='Output filtered track filename', + name_source=['in_file'], + hash_files=False, + name_template='%s_filt') + + no_mask_interpolation = traits.Bool( + argstr='-nomaskinterp', + desc="Turns off trilinear interpolation of mask images.") + invert = traits.Bool( + argstr='-invert', + desc="invert the matching process, so that tracks that would" + "otherwise have been included are now excluded and vice-versa.") + + quiet = traits.Bool( + argstr='-quiet', + position=1, + desc="Do not display information messages or progress status.") + debug = traits.Bool( + argstr='-debug', position=1, desc="Display debugging messages.") + + +class FilterTracksOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output filtered tracks') + + +class FilterTracks(CommandLine): + """ + Use regions-of-interest to select a subset of tracks + from a given MRtrix track file. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> filt = mrt.FilterTracks() + >>> filt.inputs.in_file = 'tracks.tck' + >>> filt.run() # doctest: +SKIP + """ + + _cmd = 'filter_tracks' + input_spec = FilterTracksInputSpec + output_spec = FilterTracksOutputSpec + + +class Tracks2ProbInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='tract file') + template_file = File( + exists=True, + argstr='-template %s', + position=1, + desc= + 'an image file to be used as a template for the output (the output image wil have the same transform and field of view)' + ) + voxel_dims = traits.List( + traits.Float, + argstr='-vox %s', + sep=',', + position=2, + minlen=3, + maxlen=3, + desc= + 'Three comma-separated numbers giving the size of each voxel in mm.') + colour = traits.Bool( + argstr='-colour', + position=3, + desc= + "add colour to the output image according to the direction of the tracks." + ) + fraction = traits.Bool( + argstr='-fraction', + position=3, + desc= + "produce an image of the fraction of fibres through each voxel (as a proportion of the total number in the file), rather than the count." + ) + output_datatype = traits.Enum( + "Bit", + "Int8", + "UInt8", + "Int16", + "UInt16", + "Int32", + "UInt32", + "float32", + "float64", + argstr='-datatype %s', + position=2, + desc= + '"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"' + ) # , usedefault=True) + resample = traits.Float( + argstr='-resample %d', + position=3, + units='mm', + desc= + 'resample the tracks at regular intervals using Hermite interpolation. If omitted, the program will select an appropriate interpolation factor automatically.' + ) + out_filename = File( + genfile=True, argstr='%s', position=-1, desc='output data file') + + +class Tracks2ProbOutputSpec(TraitedSpec): + tract_image = File( + exists=True, desc='Output tract count or track density image') + + +class Tracks2Prob(CommandLine): + """ + Convert a tract file into a map of the fraction of tracks to enter + each voxel - also known as a tract density image (TDI) - in MRtrix's + image format (.mif). This can be viewed using MRview or converted to + Nifti using MRconvert. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> tdi = mrt.Tracks2Prob() + >>> tdi.inputs.in_file = 'dwi_CSD_tracked.tck' + >>> tdi.inputs.colour = True + >>> tdi.run() # doctest: +SKIP + """ + + _cmd = 'tracks2prob' + input_spec = Tracks2ProbInputSpec + output_spec = Tracks2ProbOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['tract_image'] = self.inputs.out_filename + if not isdefined(outputs['tract_image']): + outputs['tract_image'] = op.abspath(self._gen_outfilename()) + else: + outputs['tract_image'] = os.path.abspath(outputs['tract_image']) + return outputs + + def _gen_filename(self, name): + if name == 'out_filename': + return self._gen_outfilename() + else: + return None + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_TDI.mif' + + +class StreamlineTrackInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='the image containing the source data.' + 'The type of data required depends on the type of tracking as set in the preceeding argument. For DT methods, ' + 'the base DWI are needed. For SD methods, the SH harmonic coefficients of the FOD are needed.' + ) + + seed_xor = ['seed_file', 'seed_spec'] + seed_file = File( + exists=True, argstr='-seed %s', desc='seed file', xor=seed_xor) + seed_spec = traits.List( + traits.Float, + desc='seed specification in mm and radius (x y z r)', + position=2, + argstr='-seed %s', + minlen=4, + maxlen=4, + sep=',', + units='mm', + xor=seed_xor) + + include_xor = ['include_file', 'include_spec'] + include_file = File( + exists=True, + argstr='-include %s', + desc='inclusion file', + xor=include_xor) + include_spec = traits.List( + traits.Float, + desc='inclusion specification in mm and radius (x y z r)', + position=2, + argstr='-include %s', + minlen=4, + maxlen=4, + sep=',', + units='mm', + xor=include_xor) + + exclude_xor = ['exclude_file', 'exclude_spec'] + exclude_file = File( + exists=True, + argstr='-exclude %s', + desc='exclusion file', + xor=exclude_xor) + exclude_spec = traits.List( + traits.Float, + desc='exclusion specification in mm and radius (x y z r)', + position=2, + argstr='-exclude %s', + minlen=4, + maxlen=4, + sep=',', + units='mm', + xor=exclude_xor) + + mask_xor = ['mask_file', 'mask_spec'] + mask_file = File( + exists=True, + argstr='-mask %s', + desc='mask file. Only tracks within mask.', + xor=mask_xor) + mask_spec = traits.List( + traits.Float, + desc= + 'Mask specification in mm and radius (x y z r). Tracks will be terminated when they leave the ROI.', + position=2, + argstr='-mask %s', + minlen=4, + maxlen=4, + sep=',', + units='mm', + xor=mask_xor) + + inputmodel = traits.Enum( + 'DT_STREAM', + 'SD_PROB', + 'SD_STREAM', + argstr='%s', + desc='input model type', + usedefault=True, + position=-3) + + stop = traits.Bool( + argstr='-stop', + desc="stop track as soon as it enters any of the include regions.") + do_not_precompute = traits.Bool( + argstr='-noprecomputed', + desc= + "Turns off precomputation of the legendre polynomial values. Warning: this will slow down the algorithm by a factor of approximately 4." + ) + unidirectional = traits.Bool( + argstr='-unidirectional', + desc= + "Track from the seed point in one direction only (default is to track in both directions)." + ) + no_mask_interpolation = traits.Bool( + argstr='-nomaskinterp', + desc="Turns off trilinear interpolation of mask images.") + + step_size = traits.Float( + argstr='-step %s', + units='mm', + desc="Set the step size of the algorithm in mm (default is 0.2).") + minimum_radius_of_curvature = traits.Float( + argstr='-curvature %s', + units='mm', + desc= + "Set the minimum radius of curvature (default is 2 mm for DT_STREAM, 0 for SD_STREAM, 1 mm for SD_PROB and DT_PROB)" + ) + desired_number_of_tracks = traits.Int( + argstr='-number %d', + desc='Sets the desired number of tracks.' + 'The program will continue to generate tracks until this number of tracks have been selected and written to the output file' + '(default is 100 for *_STREAM methods, 1000 for *_PROB methods).') + maximum_number_of_tracks = traits.Int( + argstr='-maxnum %d', + desc='Sets the maximum number of tracks to generate.' + "The program will not generate more tracks than this number, even if the desired number of tracks hasn't yet been reached" + '(default is 100 x number).') + + minimum_tract_length = traits.Float( + argstr='-minlength %s', + units='mm', + desc= + "Sets the minimum length of any track in millimeters (default is 10 mm)." + ) + maximum_tract_length = traits.Float( + argstr='-length %s', + units='mm', + desc= + "Sets the maximum length of any track in millimeters (default is 200 mm)." + ) + + cutoff_value = traits.Float( + argstr='-cutoff %s', + units='NA', + desc= + "Set the FA or FOD amplitude cutoff for terminating tracks (default is 0.1)." + ) + initial_cutoff_value = traits.Float( + argstr='-initcutoff %s', + units='NA', + desc= + "Sets the minimum FA or FOD amplitude for initiating tracks (default is twice the normal cutoff)." + ) + + initial_direction = traits.List( + traits.Int, + desc='Specify the initial tracking direction as a vector', + argstr='-initdirection %s', + minlen=2, + maxlen=2, + units='voxels') + out_file = File( + argstr='%s', + position=-1, + name_source=['in_file'], + name_template='%s_tracked.tck', + output_name='tracked', + desc='output data file') + + +class StreamlineTrackOutputSpec(TraitedSpec): + tracked = File( + exists=True, desc='output file containing reconstructed tracts') + + +class StreamlineTrack(CommandLine): + """ + Performs tractography using one of the following models: + 'dt_prob', 'dt_stream', 'sd_prob', 'sd_stream', + Where 'dt' stands for diffusion tensor, 'sd' stands for spherical + deconvolution, and 'prob' stands for probabilistic. + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> strack = mrt.StreamlineTrack() + >>> strack.inputs.inputmodel = 'SD_PROB' + >>> strack.inputs.in_file = 'data.Bfloat' + >>> strack.inputs.seed_file = 'seed_mask.nii' + >>> strack.inputs.mask_file = 'mask.nii' + >>> strack.cmdline + 'streamtrack -mask mask.nii -seed seed_mask.nii SD_PROB data.Bfloat data_tracked.tck' + >>> strack.run() # doctest: +SKIP + """ + _cmd = 'streamtrack' + input_spec = StreamlineTrackInputSpec + output_spec = StreamlineTrackOutputSpec + + +class DiffusionTensorStreamlineTrackInputSpec(StreamlineTrackInputSpec): + gradient_encoding_file = File( + exists=True, + argstr='-grad %s', + mandatory=True, + position=-2, + desc= + 'Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix' + ) + + +class DiffusionTensorStreamlineTrack(StreamlineTrack): + """ + Specialized interface to StreamlineTrack. This interface is used for + streamline tracking from diffusion tensor data, and calls the MRtrix + function 'streamtrack' with the option 'DT_STREAM' + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> dtstrack = mrt.DiffusionTensorStreamlineTrack() + >>> dtstrack.inputs.in_file = 'data.Bfloat' + >>> dtstrack.inputs.seed_file = 'seed_mask.nii' + >>> dtstrack.run() # doctest: +SKIP + """ + + input_spec = DiffusionTensorStreamlineTrackInputSpec + + def __init__(self, command=None, **inputs): + inputs["inputmodel"] = "DT_STREAM" + return super(DiffusionTensorStreamlineTrack, self).__init__( + command, **inputs) + + +class ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec( + StreamlineTrackInputSpec): + maximum_number_of_trials = traits.Int( + argstr='-trials %s', + desc= + "Set the maximum number of sampling trials at each point (only used for probabilistic tracking)." + ) + + +class ProbabilisticSphericallyDeconvolutedStreamlineTrack(StreamlineTrack): + """ + Performs probabilistic tracking using spherically deconvolved data + + Specialized interface to StreamlineTrack. This interface is used for + probabilistic tracking from spherically deconvolved data, and calls + the MRtrix function 'streamtrack' with the option 'SD_PROB' + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> sdprobtrack = mrt.ProbabilisticSphericallyDeconvolutedStreamlineTrack() + >>> sdprobtrack.inputs.in_file = 'data.Bfloat' + >>> sdprobtrack.inputs.seed_file = 'seed_mask.nii' + >>> sdprobtrack.run() # doctest: +SKIP + """ + input_spec = ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec + + def __init__(self, command=None, **inputs): + inputs["inputmodel"] = "SD_PROB" + return super(ProbabilisticSphericallyDeconvolutedStreamlineTrack, + self).__init__(command, **inputs) + + +class SphericallyDeconvolutedStreamlineTrack(StreamlineTrack): + """ + Performs streamline tracking using spherically deconvolved data + + Specialized interface to StreamlineTrack. This interface is used for + streamline tracking from spherically deconvolved data, and calls + the MRtrix function 'streamtrack' with the option 'SD_STREAM' + + Example + ------- + + >>> import nipype.interfaces.mrtrix as mrt + >>> sdtrack = mrt.SphericallyDeconvolutedStreamlineTrack() + >>> sdtrack.inputs.in_file = 'data.Bfloat' + >>> sdtrack.inputs.seed_file = 'seed_mask.nii' + >>> sdtrack.run() # doctest: +SKIP + """ + input_spec = StreamlineTrackInputSpec + + def __init__(self, command=None, **inputs): + inputs["inputmodel"] = "SD_STREAM" + return super(SphericallyDeconvolutedStreamlineTrack, self).__init__( + command, **inputs) diff --git a/nipype/interfaces/mrtrix3/__init__.py b/nipype/interfaces/mrtrix3/__init__.py new file mode 100644 index 0000000000..507380c30e --- /dev/null +++ b/nipype/interfaces/mrtrix3/__init__.py @@ -0,0 +1,12 @@ +from __future__ import absolute_import +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# -*- coding: utf-8 -*- + +from .utils import (Mesh2PVE, Generate5tt, BrainMask, TensorMetrics, + ComputeTDI, TCK2VTK, MRMath, MRConvert, DWIExtract) +from .preprocess import (ResponseSD, ACTPrepareFSL, ReplaceFSwithFIRST, + DWIDenoise) +from .tracking import Tractography +from .reconst import FitTensor, EstimateFOD +from .connectivity import LabelConfig, LabelConvert, BuildConnectome diff --git a/nipype/interfaces/mrtrix3/base.py b/nipype/interfaces/mrtrix3/base.py new file mode 100644 index 0000000000..55ca385153 --- /dev/null +++ b/nipype/interfaces/mrtrix3/base.py @@ -0,0 +1,80 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from ... import logging +from ..base import (CommandLineInputSpec, CommandLine, traits, File, isdefined) +iflogger = logging.getLogger('nipype.interface') + + +class MRTrix3BaseInputSpec(CommandLineInputSpec): + nthreads = traits.Int( + argstr='-nthreads %d', + desc='number of threads. if zero, the number' + ' of available cpus will be used', + nohash=True) + # DW gradient table import options + grad_file = File( + exists=True, + argstr='-grad %s', + desc='dw gradient scheme (MRTrix format') + grad_fsl = traits.Tuple( + File(exists=True), + File(exists=True), + argstr='-fslgrad %s %s', + desc='(bvecs, bvals) dw gradient scheme (FSL format') + bval_scale = traits.Enum( + 'yes', + 'no', + argstr='-bvalue_scaling %s', + desc='specifies whether the b - values should be scaled by the square' + ' of the corresponding DW gradient norm, as often required for ' + 'multishell or DSI DW acquisition schemes. The default action ' + 'can also be set in the MRtrix config file, under the ' + 'BValueScaling entry. Valid choices are yes / no, true / ' + 'false, 0 / 1 (default: true).') + + in_bvec = File( + exists=True, argstr='-fslgrad %s %s', desc='bvecs file in FSL format') + in_bval = File(exists=True, desc='bvals file in FSL format') + + +class MRTrix3Base(CommandLine): + def _format_arg(self, name, trait_spec, value): + if name == 'nthreads' and value == 0: + value = 1 + try: + from multiprocessing import cpu_count + value = cpu_count() + except: + iflogger.warn('Number of threads could not be computed') + pass + return trait_spec.argstr % value + + if name == 'in_bvec': + return trait_spec.argstr % (value, self.inputs.in_bval) + + return super(MRTrix3Base, self)._format_arg(name, trait_spec, value) + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + + try: + if (isdefined(self.inputs.grad_file) + or isdefined(self.inputs.grad_fsl)): + skip += ['in_bvec', 'in_bval'] + + is_bvec = isdefined(self.inputs.in_bvec) + is_bval = isdefined(self.inputs.in_bval) + if is_bvec or is_bval: + if not is_bvec or not is_bval: + raise RuntimeError('If using bvecs and bvals inputs, both' + 'should be defined') + skip += ['in_bval'] + except AttributeError: + pass + + return super(MRTrix3Base, self)._parse_inputs(skip=skip) diff --git a/nipype/interfaces/mrtrix3/connectivity.py b/nipype/interfaces/mrtrix3/connectivity.py new file mode 100644 index 0000000000..0e246c796b --- /dev/null +++ b/nipype/interfaces/mrtrix3/connectivity.py @@ -0,0 +1,309 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +import os.path as op + +from ..base import (CommandLineInputSpec, traits, TraitedSpec, File, isdefined) +from .base import MRTrix3Base + + +class BuildConnectomeInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-3, + desc='input tractography') + in_parc = File( + exists=True, argstr='%s', position=-2, desc='parcellation file') + out_file = File( + 'connectome.csv', + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + desc='output file after processing') + + nthreads = traits.Int( + argstr='-nthreads %d', + desc='number of threads. if zero, the number' + ' of available cpus will be used', + nohash=True) + + vox_lookup = traits.Bool( + argstr='-assignment_voxel_lookup', + desc='use a simple voxel lookup value at each streamline endpoint') + search_radius = traits.Float( + argstr='-assignment_radial_search %f', + desc='perform a radial search from each streamline endpoint to locate ' + 'the nearest node. Argument is the maximum radius in mm; if no node is' + ' found within this radius, the streamline endpoint is not assigned to' + ' any node.') + search_reverse = traits.Float( + argstr='-assignment_reverse_search %f', + desc='traverse from each streamline endpoint inwards along the ' + 'streamline, in search of the last node traversed by the streamline. ' + 'Argument is the maximum traversal length in mm (set to 0 to allow ' + 'search to continue to the streamline midpoint).') + search_forward = traits.Float( + argstr='-assignment_forward_search %f', + desc='project the streamline forwards from the endpoint in search of a' + 'parcellation node voxel. Argument is the maximum traversal length in ' + 'mm.') + + metric = traits.Enum( + 'count', + 'meanlength', + 'invlength', + 'invnodevolume', + 'mean_scalar', + 'invlength_invnodevolume', + argstr='-metric %s', + desc='specify the edge' + ' weight metric') + + in_scalar = File( + exists=True, + argstr='-image %s', + desc='provide the associated image ' + 'for the mean_scalar metric') + + in_weights = File( + exists=True, + argstr='-tck_weights_in %s', + desc='specify a text scalar ' + 'file containing the streamline weights') + + keep_unassigned = traits.Bool( + argstr='-keep_unassigned', + desc='By default, the program discards the' + ' information regarding those streamlines that are not successfully ' + 'assigned to a node pair. Set this option to keep these values (will ' + 'be the first row/column in the output matrix)') + zero_diagonal = traits.Bool( + argstr='-zero_diagonal', + desc='set all diagonal entries in the matrix ' + 'to zero (these represent streamlines that connect to the same node at' + ' both ends)') + + +class BuildConnectomeOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output response file') + + +class BuildConnectome(MRTrix3Base): + """ + Generate a connectome matrix from a streamlines file and a node + parcellation image + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> mat = mrt.BuildConnectome() + >>> mat.inputs.in_file = 'tracks.tck' + >>> mat.inputs.in_parc = 'aparc+aseg.nii' + >>> mat.cmdline # doctest: +ELLIPSIS + 'tck2connectome tracks.tck aparc+aseg.nii connectome.csv' + >>> mat.run() # doctest: +SKIP + """ + + _cmd = 'tck2connectome' + input_spec = BuildConnectomeInputSpec + output_spec = BuildConnectomeOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class LabelConfigInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-3, + desc='input anatomical image') + in_config = File( + exists=True, + argstr='%s', + position=-2, + desc='connectome configuration file') + out_file = File( + 'parcellation.mif', + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + desc='output file after processing') + + lut_basic = File( + argstr='-lut_basic %s', + desc='get information from ' + 'a basic lookup table consisting of index / name pairs') + lut_fs = File( + argstr='-lut_freesurfer %s', + desc='get information from ' + 'a FreeSurfer lookup table(typically "FreeSurferColorLUT' + '.txt")') + lut_aal = File( + argstr='-lut_aal %s', + desc='get information from the AAL ' + 'lookup table (typically "ROI_MNI_V4.txt")') + lut_itksnap = File( + argstr='-lut_itksnap %s', + desc='get information from an' + ' ITK - SNAP lookup table(this includes the IIT atlas ' + 'file "LUT_GM.txt")') + spine = File( + argstr='-spine %s', + desc='provide a manually-defined ' + 'segmentation of the base of the spine where the streamlines' + ' terminate, so that this can become a node in the connection' + ' matrix.') + nthreads = traits.Int( + argstr='-nthreads %d', + desc='number of threads. if zero, the number' + ' of available cpus will be used', + nohash=True) + + +class LabelConfigOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output response file') + + +class LabelConfig(MRTrix3Base): + """ + Re-configure parcellation to be incrementally defined. + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> labels = mrt.LabelConfig() + >>> labels.inputs.in_file = 'aparc+aseg.nii' + >>> labels.inputs.in_config = 'mrtrix3_labelconfig.txt' + >>> labels.cmdline # doctest: +ELLIPSIS + 'labelconfig aparc+aseg.nii mrtrix3_labelconfig.txt parcellation.mif' + >>> labels.run() # doctest: +SKIP + """ + + _cmd = 'labelconfig' + input_spec = LabelConfigInputSpec + output_spec = LabelConfigOutputSpec + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + + if not isdefined(self.inputs.in_config): + from distutils.spawn import find_executable + path = find_executable(self._cmd) + if path is None: + path = os.getenv(MRTRIX3_HOME, '/opt/mrtrix3') + else: + path = op.dirname(op.dirname(path)) + + self.inputs.in_config = op.join( + path, 'src/dwi/tractography/connectomics/' + 'example_configs/fs_default.txt') + + return super(LabelConfig, self)._parse_inputs(skip=skip) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class LabelConvertInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-4, + desc='input anatomical image') + in_lut = File( + exists=True, + argstr='%s', + mandatory=True, + position=-3, + desc='get information from ' + 'a basic lookup table consisting of index / name pairs') + in_config = File( + exists=True, + argstr='%s', + position=-2, + desc='connectome configuration file') + out_file = File( + 'parcellation.mif', + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + desc='output file after processing') + spine = File( + argstr='-spine %s', + desc='provide a manually-defined ' + 'segmentation of the base of the spine where the streamlines' + ' terminate, so that this can become a node in the connection' + ' matrix.') + num_threads = traits.Int( + argstr='-nthreads %d', + desc='number of threads. if zero, the number' + ' of available cpus will be used', + nohash=True) + + +class LabelConvertOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output response file') + + +class LabelConvert(MRTrix3Base): + """ + Re-configure parcellation to be incrementally defined. + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> labels = mrt.LabelConvert() + >>> labels.inputs.in_file = 'aparc+aseg.nii' + >>> labels.inputs.in_config = 'mrtrix3_labelconfig.txt' + >>> labels.inputs.in_lut = 'FreeSurferColorLUT.txt' + >>> labels.cmdline + 'labelconvert aparc+aseg.nii FreeSurferColorLUT.txt mrtrix3_labelconfig.txt parcellation.mif' + >>> labels.run() # doctest: +SKIP + """ + + _cmd = 'labelconvert' + input_spec = LabelConvertInputSpec + output_spec = LabelConvertOutputSpec + + def _parse_inputs(self, skip=None): + if skip is None: + skip = [] + + if not isdefined(self.inputs.in_config): + from nipype.utils.filemanip import which + path = which(self._cmd) + if path is None: + path = os.getenv(MRTRIX3_HOME, '/opt/mrtrix3') + else: + path = op.dirname(op.dirname(path)) + + self.inputs.in_config = op.join( + path, 'src/dwi/tractography/connectomics/' + 'example_configs/fs_default.txt') + + return super(LabelConvert, self)._parse_inputs(skip=skip) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs diff --git a/nipype/interfaces/mrtrix3/preprocess.py b/nipype/interfaces/mrtrix3/preprocess.py new file mode 100644 index 0000000000..fc3559c918 --- /dev/null +++ b/nipype/interfaces/mrtrix3/preprocess.py @@ -0,0 +1,264 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os.path as op + +from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, + File, isdefined, Undefined, InputMultiObject) +from .base import MRTrix3BaseInputSpec, MRTrix3Base + + +class DWIDenoiseInputSpec(MRTrix3BaseInputSpec): + in_file = File( + exists=True, + argstr='%s', + position=-2, + mandatory=True, + desc='input DWI image') + mask = File( + exists=True, + argstr='-mask %s', + position=1, + desc='mask image') + extent = traits.Tuple((traits.Int, traits.Int, traits.Int), + argstr='-extent %d,%d,%d', + desc='set the window size of the denoising filter. (default = 5,5,5)') + noise = File( + argstr='-noise %s', + desc='noise map') + out_file = File(name_template='%s_denoised', + name_source='in_file', + keep_extension=True, + argstr="%s", + position=-1, + desc="the output denoised DWI image") + +class DWIDenoiseOutputSpec(TraitedSpec): + out_file = File(desc="the output denoised DWI image", exists=True) + +class DWIDenoise(MRTrix3Base): + """ + Denoise DWI data and estimate the noise level based on the optimal + threshold for PCA. + + DWI data denoising and noise map estimation by exploiting data redundancy + in the PCA domain using the prior knowledge that the eigenspectrum of + random covariance matrices is described by the universal Marchenko Pastur + distribution. + + Important note: image denoising must be performed as the first step of the + image processing pipeline. The routine will fail if interpolation or + smoothing has been applied to the data prior to denoising. + + Note that this function does not correct for non-Gaussian noise biases. + + For more information, see + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> denoise = mrt.DWIDenoise() + >>> denoise.inputs.in_file = 'dwi.mif' + >>> denoise.inputs.mask = 'mask.mif' + >>> denoise.cmdline # doctest: +ELLIPSIS + 'dwidenoise -mask mask.mif dwi.mif dwi_denoised.mif' + >>> denoise.run() # doctest: +SKIP + """ + + _cmd = 'dwidenoise' + input_spec = DWIDenoiseInputSpec + output_spec = DWIDenoiseOutputSpec + + +class ResponseSDInputSpec(MRTrix3BaseInputSpec): + algorithm = traits.Enum( + 'msmt_5tt', + 'dhollander', + 'tournier', + 'tax', + argstr='%s', + position=1, + mandatory=True, + desc='response estimation algorithm (multi-tissue)') + in_file = File( + exists=True, + argstr='%s', + position=-5, + mandatory=True, + desc='input DWI image') + mtt_file = File(argstr='%s', position=-4, desc='input 5tt image') + wm_file = File( + 'wm.txt', + argstr='%s', + position=-3, + usedefault=True, + desc='output WM response text file') + gm_file = File( + argstr='%s', position=-2, desc='output GM response text file') + csf_file = File( + argstr='%s', position=-1, desc='output CSF response text file') + in_mask = File( + exists=True, argstr='-mask %s', desc='provide initial mask image') + max_sh = InputMultiObject( + traits.Int, + value=[8], + usedefault=True, + argstr='-lmax %s', + sep=',', + desc=('maximum harmonic degree of response function - single value for ' + 'single-shell response, list for multi-shell response')) + + +class ResponseSDOutputSpec(TraitedSpec): + wm_file = File(argstr='%s', desc='output WM response text file') + gm_file = File(argstr='%s', desc='output GM response text file') + csf_file = File(argstr='%s', desc='output CSF response text file') + + +class ResponseSD(MRTrix3Base): + """ + Estimate response function(s) for spherical deconvolution using the specified algorithm. + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> resp = mrt.ResponseSD() + >>> resp.inputs.in_file = 'dwi.mif' + >>> resp.inputs.algorithm = 'tournier' + >>> resp.inputs.grad_fsl = ('bvecs', 'bvals') + >>> resp.cmdline # doctest: +ELLIPSIS + 'dwi2response tournier -fslgrad bvecs bvals -lmax 8 dwi.mif wm.txt' + >>> resp.run() # doctest: +SKIP + + # We can also pass in multiple harmonic degrees in the case of multi-shell + >>> resp.inputs.max_sh = [6,8,10] + >>> resp.cmdline + 'dwi2response tournier -fslgrad bvecs bvals -lmax 6,8,10 dwi.mif wm.txt' + """ + + _cmd = 'dwi2response' + input_spec = ResponseSDInputSpec + output_spec = ResponseSDOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['wm_file'] = op.abspath(self.inputs.wm_file) + if self.inputs.gm_file != Undefined: + outputs['gm_file'] = op.abspath(self.inputs.gm_file) + if self.inputs.csf_file != Undefined: + outputs['csf_file'] = op.abspath(self.inputs.csf_file) + return outputs + + +class ACTPrepareFSLInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='input anatomical image') + + out_file = File( + 'act_5tt.mif', + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + desc='output file after processing') + + +class ACTPrepareFSLOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output response file') + + +class ACTPrepareFSL(CommandLine): + """ + Generate anatomical information necessary for Anatomically + Constrained Tractography (ACT). + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> prep = mrt.ACTPrepareFSL() + >>> prep.inputs.in_file = 'T1.nii.gz' + >>> prep.cmdline # doctest: +ELLIPSIS + 'act_anat_prepare_fsl T1.nii.gz act_5tt.mif' + >>> prep.run() # doctest: +SKIP + """ + + _cmd = 'act_anat_prepare_fsl' + input_spec = ACTPrepareFSLInputSpec + output_spec = ACTPrepareFSLOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class ReplaceFSwithFIRSTInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-4, + desc='input anatomical image') + in_t1w = File( + exists=True, + argstr='%s', + mandatory=True, + position=-3, + desc='input T1 image') + in_config = File( + exists=True, + argstr='%s', + position=-2, + desc='connectome configuration file') + + out_file = File( + 'aparc+first.mif', + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + desc='output file after processing') + + +class ReplaceFSwithFIRSTOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output response file') + + +class ReplaceFSwithFIRST(CommandLine): + """ + Replace deep gray matter structures segmented with FSL FIRST in a + FreeSurfer parcellation. + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> prep = mrt.ReplaceFSwithFIRST() + >>> prep.inputs.in_file = 'aparc+aseg.nii' + >>> prep.inputs.in_t1w = 'T1.nii.gz' + >>> prep.inputs.in_config = 'mrtrix3_labelconfig.txt' + >>> prep.cmdline # doctest: +ELLIPSIS + 'fs_parc_replace_sgm_first aparc+aseg.nii T1.nii.gz \ +mrtrix3_labelconfig.txt aparc+first.mif' + >>> prep.run() # doctest: +SKIP + """ + + _cmd = 'fs_parc_replace_sgm_first' + input_spec = ReplaceFSwithFIRSTInputSpec + output_spec = ReplaceFSwithFIRSTOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs diff --git a/nipype/interfaces/mrtrix3/reconst.py b/nipype/interfaces/mrtrix3/reconst.py new file mode 100644 index 0000000000..6583c58996 --- /dev/null +++ b/nipype/interfaces/mrtrix3/reconst.py @@ -0,0 +1,167 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os.path as op + +from ..base import traits, TraitedSpec, File, Undefined +from .base import MRTrix3BaseInputSpec, MRTrix3Base + + +class FitTensorInputSpec(MRTrix3BaseInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='input diffusion weighted images') + out_file = File( + 'dti.mif', + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + desc='the output diffusion tensor image') + + # General options + in_mask = File( + exists=True, + argstr='-mask %s', + desc=('only perform computation within the specified ' + 'binary brain mask image')) + method = traits.Enum( + 'nonlinear', + 'loglinear', + 'sech', + 'rician', + argstr='-method %s', + desc=('select method used to perform the fitting')) + reg_term = traits.Float( + 5.e3, usedefault=True, + argstr='-regularisation %f', + desc=('specify the strength of the regularisation term on the ' + 'magnitude of the tensor elements (default = 5000). This ' + 'only applies to the non-linear methods')) + + +class FitTensorOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output DTI file') + + +class FitTensor(MRTrix3Base): + """ + Convert diffusion-weighted images to tensor images + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> tsr = mrt.FitTensor() + >>> tsr.inputs.in_file = 'dwi.mif' + >>> tsr.inputs.in_mask = 'mask.nii.gz' + >>> tsr.inputs.grad_fsl = ('bvecs', 'bvals') + >>> tsr.cmdline # doctest: +ELLIPSIS + 'dwi2tensor -fslgrad bvecs bvals -mask mask.nii.gz \ +-regularisation 5000.000000 dwi.mif dti.mif' + >>> tsr.run() # doctest: +SKIP + """ + + _cmd = 'dwi2tensor' + input_spec = FitTensorInputSpec + output_spec = FitTensorOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class EstimateFODInputSpec(MRTrix3BaseInputSpec): + algorithm = traits.Enum( + 'csd', + 'msmt_csd', + argstr='%s', + position=-8, + mandatory=True, + desc='FOD algorithm') + in_file = File( + exists=True, + argstr='%s', + position=-7, + mandatory=True, + desc='input DWI image') + wm_txt = File( + argstr='%s', position=-6, mandatory=True, desc='WM response text file') + wm_odf = File( + 'wm.mif', + argstr='%s', + position=-5, + usedefault=True, + mandatory=True, + desc='output WM ODF') + gm_txt = File(argstr='%s', position=-4, desc='GM response text file') + gm_odf = File('gm.mif', usedefault=True, argstr='%s', + position=-3, desc='output GM ODF') + csf_txt = File(argstr='%s', position=-2, desc='CSF response text file') + csf_odf = File('csf.mif', usedefault=True, argstr='%s', + position=-1, desc='output CSF ODF') + mask_file = File(exists=True, argstr='-mask %s', desc='mask image') + + # DW Shell selection options + shell = traits.List( + traits.Float, + sep=',', + argstr='-shell %s', + desc='specify one or more dw gradient shells') + max_sh = traits.Int( + 8, usedefault=True, + argstr='-lmax %d', + desc='maximum harmonic degree of response function') + in_dirs = File( + exists=True, + argstr='-directions %s', + desc=('specify the directions over which to apply the non-negativity ' + 'constraint (by default, the built-in 300 direction set is ' + 'used). These should be supplied as a text file containing the ' + '[ az el ] pairs for the directions.')) + + +class EstimateFODOutputSpec(TraitedSpec): + wm_odf = File(argstr='%s', desc='output WM ODF') + gm_odf = File(argstr='%s', desc='output GM ODF') + csf_odf = File(argstr='%s', desc='output CSF ODF') + + +class EstimateFOD(MRTrix3Base): + """ + Estimate fibre orientation distributions from diffusion data using spherical deconvolution + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> fod = mrt.EstimateFOD() + >>> fod.inputs.algorithm = 'csd' + >>> fod.inputs.in_file = 'dwi.mif' + >>> fod.inputs.wm_txt = 'wm.txt' + >>> fod.inputs.grad_fsl = ('bvecs', 'bvals') + >>> fod.cmdline # doctest: +ELLIPSIS + 'dwi2fod -fslgrad bvecs bvals -lmax 8 csd dwi.mif wm.txt wm.mif gm.mif csf.mif' + >>> fod.run() # doctest: +SKIP + """ + + _cmd = 'dwi2fod' + input_spec = EstimateFODInputSpec + output_spec = EstimateFODOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['wm_odf'] = op.abspath(self.inputs.wm_odf) + if self.inputs.gm_odf != Undefined: + outputs['gm_odf'] = op.abspath(self.inputs.gm_odf) + if self.inputs.csf_odf != Undefined: + outputs['csf_odf'] = op.abspath(self.inputs.csf_odf) + return outputs diff --git a/nipype/interfaces/mrtrix3/tests/__init__.py b/nipype/interfaces/mrtrix3/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py new file mode 100644 index 0000000000..abb3ba6831 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ACTPrepareFSL.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ACTPrepareFSL + + +def test_ACTPrepareFSL_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + ) + inputs = ACTPrepareFSL.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ACTPrepareFSL_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ACTPrepareFSL.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py new file mode 100644 index 0000000000..c318be0bfd --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BrainMask.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import BrainMask + + +def test_BrainMask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict(argstr='-grad %s', ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + ) + inputs = BrainMask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BrainMask_outputs(): + output_map = dict(out_file=dict(), ) + outputs = BrainMask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py new file mode 100644 index 0000000000..95e5797d06 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_BuildConnectome.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..connectivity import BuildConnectome + + +def test_BuildConnectome_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + in_parc=dict( + argstr='%s', + position=-2, + ), + in_scalar=dict(argstr='-image %s', ), + in_weights=dict(argstr='-tck_weights_in %s', ), + keep_unassigned=dict(argstr='-keep_unassigned', ), + metric=dict(argstr='-metric %s', ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + search_forward=dict(argstr='-assignment_forward_search %f', ), + search_radius=dict(argstr='-assignment_radial_search %f', ), + search_reverse=dict(argstr='-assignment_reverse_search %f', ), + vox_lookup=dict(argstr='-assignment_voxel_lookup', ), + zero_diagonal=dict(argstr='-zero_diagonal', ), + ) + inputs = BuildConnectome.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BuildConnectome_outputs(): + output_map = dict(out_file=dict(), ) + outputs = BuildConnectome.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py new file mode 100644 index 0000000000..38369e8fe5 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ComputeTDI.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ComputeTDI + + +def test_ComputeTDI_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + contrast=dict(argstr='-constrast %s', ), + data_type=dict(argstr='-datatype %s', ), + dixel=dict(argstr='-dixel %s', ), + ends_only=dict(argstr='-ends_only', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm_tck=dict(argstr='-fwhm_tck %f', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + in_map=dict(argstr='-image %s', ), + map_zero=dict(argstr='-map_zero', ), + max_tod=dict(argstr='-tod %d', ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + position=-1, + usedefault=True, + ), + precise=dict(argstr='-precise', ), + reference=dict(argstr='-template %s', ), + stat_tck=dict(argstr='-stat_tck %s', ), + stat_vox=dict(argstr='-stat_vox %s', ), + tck_weights=dict(argstr='-tck_weights_in %s', ), + upsample=dict(argstr='-upsample %d', ), + use_dec=dict(argstr='-dec', ), + vox_size=dict( + argstr='-vox %s', + sep=',', + ), + ) + inputs = ComputeTDI.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ComputeTDI_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ComputeTDI.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py new file mode 100644 index 0000000000..769ccb34a9 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIDenoise.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import DWIDenoise + + +def test_DWIDenoise_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + extent=dict(argstr='-extent %d,%d,%d', ), + grad_file=dict(argstr='-grad %s', ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + mask=dict( + argstr='-mask %s', + position=1, + ), + noise=dict(argstr='-noise %s', ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + keep_extension=True, + name_source='in_file', + name_template='%s_denoised', + position=-1, + ), + ) + inputs = DWIDenoise.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DWIDenoise_outputs(): + output_map = dict(out_file=dict(), ) + outputs = DWIDenoise.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py new file mode 100644 index 0000000000..0114c5efe0 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_DWIExtract.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import DWIExtract + + +def test_DWIExtract_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + bzero=dict(argstr='-bzero', ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict(argstr='-grad %s', ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + nobzero=dict(argstr='-no_bzero', ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + shell=dict( + argstr='-shell %s', + sep=',', + ), + singleshell=dict(argstr='-singleshell', ), + ) + inputs = DWIExtract.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DWIExtract_outputs(): + output_map = dict(out_file=dict(), ) + outputs = DWIExtract.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py new file mode 100644 index 0000000000..a5c152e928 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_EstimateFOD.py @@ -0,0 +1,87 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..reconst import EstimateFOD + + +def test_EstimateFOD_inputs(): + input_map = dict( + algorithm=dict( + argstr='%s', + mandatory=True, + position=-8, + ), + args=dict(argstr='%s', ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + csf_odf=dict( + argstr='%s', + position=-1, + usedefault=True, + ), + csf_txt=dict( + argstr='%s', + position=-2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gm_odf=dict( + argstr='%s', + position=-3, + usedefault=True, + ), + gm_txt=dict( + argstr='%s', + position=-4, + ), + grad_file=dict(argstr='-grad %s', ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), + in_dirs=dict(argstr='-directions %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-7, + ), + mask_file=dict(argstr='-mask %s', ), + max_sh=dict( + argstr='-lmax %d', + usedefault=True, + ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + shell=dict( + argstr='-shell %s', + sep=',', + ), + wm_odf=dict( + argstr='%s', + mandatory=True, + position=-5, + usedefault=True, + ), + wm_txt=dict( + argstr='%s', + mandatory=True, + position=-6, + ), + ) + inputs = EstimateFOD.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EstimateFOD_outputs(): + output_map = dict( + csf_odf=dict(argstr='%s', ), + gm_odf=dict(argstr='%s', ), + wm_odf=dict(argstr='%s', ), + ) + outputs = EstimateFOD.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py new file mode 100644 index 0000000000..40b7baa931 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_FitTensor.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..reconst import FitTensor + + +def test_FitTensor_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict(argstr='-grad %s', ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + in_mask=dict(argstr='-mask %s', ), + method=dict(argstr='-method %s', ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + reg_term=dict( + argstr='-regularisation %f', + usedefault=True, + ), + ) + inputs = FitTensor.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FitTensor_outputs(): + output_map = dict(out_file=dict(), ) + outputs = FitTensor.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py new file mode 100644 index 0000000000..dcfbe1fc6f --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Generate5tt.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Generate5tt + + +def test_Generate5tt_inputs(): + input_map = dict( + algorithm=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + args=dict(argstr='%s', ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict(argstr='-grad %s', ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + ) + inputs = Generate5tt.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Generate5tt_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Generate5tt.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py new file mode 100644 index 0000000000..2de0e6c115 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConfig.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..connectivity import LabelConfig + + +def test_LabelConfig_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_config=dict( + argstr='%s', + position=-2, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + lut_aal=dict(argstr='-lut_aal %s', ), + lut_basic=dict(argstr='-lut_basic %s', ), + lut_fs=dict(argstr='-lut_freesurfer %s', ), + lut_itksnap=dict(argstr='-lut_itksnap %s', ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + spine=dict(argstr='-spine %s', ), + ) + inputs = LabelConfig.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_LabelConfig_outputs(): + output_map = dict(out_file=dict(), ) + outputs = LabelConfig.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py new file mode 100644 index 0000000000..1e93ae82d8 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_LabelConvert.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..connectivity import LabelConvert + + +def test_LabelConvert_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_config=dict( + argstr='%s', + position=-2, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-4, + ), + in_lut=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + num_threads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + spine=dict(argstr='-spine %s', ), + ) + inputs = LabelConvert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_LabelConvert_outputs(): + output_map = dict(out_file=dict(), ) + outputs = LabelConvert.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py new file mode 100644 index 0000000000..ece24e1d47 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRConvert.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRConvert + + +def test_MRConvert_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + axes=dict( + argstr='-axes %s', + sep=',', + ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + coord=dict( + argstr='-coord %s', + sep=' ', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict(argstr='-grad %s', ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + scaling=dict( + argstr='-scaling %s', + sep=',', + ), + vox=dict( + argstr='-vox %s', + sep=',', + ), + ) + inputs = MRConvert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRConvert_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRConvert.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py new file mode 100644 index 0000000000..eec06b6276 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRMath.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import MRMath + + +def test_MRMath_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + axis=dict(argstr='-axis %d', ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict(argstr='-grad %s', ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + operation=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + ) + inputs = MRMath.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MRMath_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MRMath.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py new file mode 100644 index 0000000000..453baa053a --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_MRTrix3Base.py @@ -0,0 +1,18 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import MRTrix3Base + + +def test_MRTrix3Base_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) + inputs = MRTrix3Base.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py new file mode 100644 index 0000000000..9aa29acbe8 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Mesh2PVE.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Mesh2PVE + + +def test_Mesh2PVE_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + in_first=dict(argstr='-first %s', ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + reference=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + ) + inputs = Mesh2PVE.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Mesh2PVE_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Mesh2PVE.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py new file mode 100644 index 0000000000..502a0f7fb1 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ReplaceFSwithFIRST.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ReplaceFSwithFIRST + + +def test_ReplaceFSwithFIRST_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_config=dict( + argstr='%s', + position=-2, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-4, + ), + in_t1w=dict( + argstr='%s', + mandatory=True, + position=-3, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + ) + inputs = ReplaceFSwithFIRST.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ReplaceFSwithFIRST_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ReplaceFSwithFIRST.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py new file mode 100644 index 0000000000..027527ec85 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_ResponseSD.py @@ -0,0 +1,71 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ResponseSD + + +def test_ResponseSD_inputs(): + input_map = dict( + algorithm=dict( + argstr='%s', + mandatory=True, + position=1, + ), + args=dict(argstr='%s', ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + csf_file=dict( + argstr='%s', + position=-1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + gm_file=dict( + argstr='%s', + position=-2, + ), + grad_file=dict(argstr='-grad %s', ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-5, + ), + in_mask=dict(argstr='-mask %s', ), + max_sh=dict( + argstr='-lmax %s', + sep=',', + usedefault=True, + ), + mtt_file=dict( + argstr='%s', + position=-4, + ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + wm_file=dict( + argstr='%s', + position=-3, + usedefault=True, + ), + ) + inputs = ResponseSD.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ResponseSD_outputs(): + output_map = dict( + csf_file=dict(argstr='%s', ), + gm_file=dict(argstr='%s', ), + wm_file=dict(argstr='%s', ), + ) + outputs = ResponseSD.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py new file mode 100644 index 0000000000..93a39484f8 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TCK2VTK.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TCK2VTK + + +def test_TCK2VTK_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + position=-1, + usedefault=True, + ), + reference=dict(argstr='-image %s', ), + voxel=dict(argstr='-image %s', ), + ) + inputs = TCK2VTK.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TCK2VTK_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TCK2VTK.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py new file mode 100644 index 0000000000..ae532c9910 --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_TensorMetrics.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import TensorMetrics + + +def test_TensorMetrics_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + component=dict( + argstr='-num %s', + sep=',', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + in_mask=dict(argstr='-mask %s', ), + modulate=dict(argstr='-modulate %s', ), + out_adc=dict(argstr='-adc %s', ), + out_eval=dict(argstr='-value %s', ), + out_evec=dict(argstr='-vector %s', ), + out_fa=dict(argstr='-fa %s', ), + ) + inputs = TensorMetrics.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TensorMetrics_outputs(): + output_map = dict( + out_adc=dict(), + out_eval=dict(), + out_evec=dict(), + out_fa=dict(), + ) + outputs = TensorMetrics.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py new file mode 100644 index 0000000000..dff5c783ee --- /dev/null +++ b/nipype/interfaces/mrtrix3/tests/test_auto_Tractography.py @@ -0,0 +1,100 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..tracking import Tractography + + +def test_Tractography_inputs(): + input_map = dict( + act_file=dict(argstr='-act %s', ), + algorithm=dict( + argstr='-algorithm %s', + usedefault=True, + ), + angle=dict(argstr='-angle %f', ), + args=dict(argstr='%s', ), + backtrack=dict(argstr='-backtrack', ), + bval_scale=dict(argstr='-bvalue_scaling %s', ), + crop_at_gmwmi=dict(argstr='-crop_at_gmwmi', ), + cutoff=dict(argstr='-cutoff %f', ), + cutoff_init=dict(argstr='-initcutoff %f', ), + downsample=dict(argstr='-downsample %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + grad_file=dict(argstr='-grad %s', ), + grad_fsl=dict(argstr='-fslgrad %s %s', ), + in_bval=dict(), + in_bvec=dict(argstr='-fslgrad %s %s', ), + in_file=dict( + argstr='%s', + mandatory=True, + position=-2, + ), + init_dir=dict(argstr='-initdirection %f,%f,%f', ), + max_length=dict(argstr='-maxlength %f', ), + max_seed_attempts=dict(argstr='-max_seed_attempts %d', ), + max_tracks=dict(argstr='-maxnum %d', ), + min_length=dict(argstr='-minlength %f', ), + n_samples=dict( + argstr='-samples %d', + usedefault=True, + ), + n_tracks=dict(argstr='-number %d', ), + n_trials=dict(argstr='-trials %d', ), + noprecompt=dict(argstr='-noprecomputed', ), + nthreads=dict( + argstr='-nthreads %d', + nohash=True, + ), + out_file=dict( + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + ), + out_seeds=dict( + argstr='-output_seeds %s', + usedefault=True, + ), + power=dict(argstr='-power %d', ), + roi_excl=dict(argstr='-exclude %s', ), + roi_incl=dict(argstr='-include %s', ), + roi_mask=dict(argstr='-mask %s', ), + seed_dynamic=dict(argstr='-seed_dynamic %s', ), + seed_gmwmi=dict( + argstr='-seed_gmwmi %s', + requires=['act_file'], + ), + seed_grid_voxel=dict( + argstr='-seed_grid_per_voxel %s %d', + xor=['seed_image', 'seed_rnd_voxel'], + ), + seed_image=dict(argstr='-seed_image %s', ), + seed_rejection=dict(argstr='-seed_rejection %s', ), + seed_rnd_voxel=dict( + argstr='-seed_random_per_voxel %s %d', + xor=['seed_image', 'seed_grid_voxel'], + ), + seed_sphere=dict(argstr='-seed_sphere %f,%f,%f,%f', ), + sph_trait=dict(argstr='%f,%f,%f,%f', ), + step_size=dict(argstr='-step %f', ), + stop=dict(argstr='-stop', ), + unidirectional=dict(argstr='-unidirectional', ), + use_rk4=dict(argstr='-rk4', ), + ) + inputs = Tractography.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Tractography_outputs(): + output_map = dict( + out_file=dict(), + out_seeds=dict(), + ) + outputs = Tractography.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/mrtrix3/tracking.py b/nipype/interfaces/mrtrix3/tracking.py new file mode 100644 index 0000000000..e8bf15a33f --- /dev/null +++ b/nipype/interfaces/mrtrix3/tracking.py @@ -0,0 +1,285 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os.path as op + +from ..base import traits, TraitedSpec, File +from .base import MRTrix3BaseInputSpec, MRTrix3Base + + +class TractographyInputSpec(MRTrix3BaseInputSpec): + sph_trait = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + traits.Float, + argstr='%f,%f,%f,%f') + + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='input file to be processed') + + out_file = File( + 'tracked.tck', + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + desc='output file containing tracks') + + algorithm = traits.Enum( + 'iFOD2', + 'FACT', + 'iFOD1', + 'Nulldist', + 'SD_Stream', + 'Tensor_Det', + 'Tensor_Prob', + usedefault=True, + argstr='-algorithm %s', + desc='tractography algorithm to be used') + + # ROIs processing options + roi_incl = traits.Either( + File(exists=True), + sph_trait, + argstr='-include %s', + desc=('specify an inclusion region of interest, streamlines must' + ' traverse ALL inclusion regions to be accepted')) + roi_excl = traits.Either( + File(exists=True), + sph_trait, + argstr='-exclude %s', + desc=('specify an exclusion region of interest, streamlines that' + ' enter ANY exclude region will be discarded')) + roi_mask = traits.Either( + File(exists=True), + sph_trait, + argstr='-mask %s', + desc=('specify a masking region of interest. If defined,' + 'streamlines exiting the mask will be truncated')) + + # Streamlines tractography options + step_size = traits.Float( + argstr='-step %f', + desc=('set the step size of the algorithm in mm (default is 0.1' + ' x voxelsize; for iFOD2: 0.5 x voxelsize)')) + angle = traits.Float( + argstr='-angle %f', + desc=('set the maximum angle between successive steps (default ' + 'is 90deg x stepsize / voxelsize)')) + n_tracks = traits.Int( + argstr='-number %d', + desc=('set the desired number of tracks. The program will continue' + ' to generate tracks until this number of tracks have been ' + 'selected and written to the output file')) + max_tracks = traits.Int( + argstr='-maxnum %d', + desc=('set the maximum number of tracks to generate. The program ' + 'will not generate more tracks than this number, even if ' + 'the desired number of tracks hasn\'t yet been reached ' + '(default is 100 x number)')) + max_length = traits.Float( + argstr='-maxlength %f', + desc=('set the maximum length of any track in mm (default is ' + '100 x voxelsize)')) + min_length = traits.Float( + argstr='-minlength %f', + desc=('set the minimum length of any track in mm (default is ' + '5 x voxelsize)')) + cutoff = traits.Float( + argstr='-cutoff %f', + desc=('set the FA or FOD amplitude cutoff for terminating ' + 'tracks (default is 0.1)')) + cutoff_init = traits.Float( + argstr='-initcutoff %f', + desc=('set the minimum FA or FOD amplitude for initiating ' + 'tracks (default is the same as the normal cutoff)')) + n_trials = traits.Int( + argstr='-trials %d', + desc=('set the maximum number of sampling trials at each point' + ' (only used for probabilistic tracking)')) + unidirectional = traits.Bool( + argstr='-unidirectional', + desc=('track from the seed point in one direction only ' + '(default is to track in both directions)')) + init_dir = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + argstr='-initdirection %f,%f,%f', + desc=('specify an initial direction for the tracking (this ' + 'should be supplied as a vector of 3 comma-separated values')) + noprecompt = traits.Bool( + argstr='-noprecomputed', + desc=('do NOT pre-compute legendre polynomial values. Warning: this ' + 'will slow down the algorithm by a factor of approximately 4')) + power = traits.Int( + argstr='-power %d', + desc=('raise the FOD to the power specified (default is 1/nsamples)')) + n_samples = traits.Int( + 4, usedefault=True, + argstr='-samples %d', + desc=('set the number of FOD samples to take per step for the 2nd ' + 'order (iFOD2) method')) + use_rk4 = traits.Bool( + argstr='-rk4', + desc=('use 4th-order Runge-Kutta integration (slower, but eliminates' + ' curvature overshoot in 1st-order deterministic methods)')) + stop = traits.Bool( + argstr='-stop', + desc=('stop propagating a streamline once it has traversed all ' + 'include regions')) + downsample = traits.Float( + argstr='-downsample %f', + desc='downsample the generated streamlines to reduce output file size') + + # Anatomically-Constrained Tractography options + act_file = File( + exists=True, + argstr='-act %s', + desc=('use the Anatomically-Constrained Tractography framework during' + ' tracking; provided image must be in the 5TT ' + '(five - tissue - type) format')) + backtrack = traits.Bool( + argstr='-backtrack', desc='allow tracks to be truncated') + + crop_at_gmwmi = traits.Bool( + argstr='-crop_at_gmwmi', + desc=('crop streamline endpoints more ' + 'precisely as they cross the GM-WM interface')) + + # Tractography seeding options + seed_sphere = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + traits.Float, + argstr='-seed_sphere %f,%f,%f,%f', + desc='spherical seed') + seed_image = File( + exists=True, + argstr='-seed_image %s', + desc='seed streamlines entirely at random within mask') + seed_rnd_voxel = traits.Tuple( + File(exists=True), + traits.Int(), + argstr='-seed_random_per_voxel %s %d', + xor=['seed_image', 'seed_grid_voxel'], + desc=('seed a fixed number of streamlines per voxel in a mask ' + 'image; random placement of seeds in each voxel')) + seed_grid_voxel = traits.Tuple( + File(exists=True), + traits.Int(), + argstr='-seed_grid_per_voxel %s %d', + xor=['seed_image', 'seed_rnd_voxel'], + desc=('seed a fixed number of streamlines per voxel in a mask ' + 'image; place seeds on a 3D mesh grid (grid_size argument ' + 'is per axis; so a grid_size of 3 results in 27 seeds per' + ' voxel)')) + seed_rejection = File( + exists=True, + argstr='-seed_rejection %s', + desc=('seed from an image using rejection sampling (higher ' + 'values = more probable to seed from')) + seed_gmwmi = File( + exists=True, + argstr='-seed_gmwmi %s', + requires=['act_file'], + desc=('seed from the grey matter - white matter interface (only ' + 'valid if using ACT framework)')) + seed_dynamic = File( + exists=True, + argstr='-seed_dynamic %s', + desc=('determine seed points dynamically using the SIFT model ' + '(must not provide any other seeding mechanism). Note that' + ' while this seeding mechanism improves the distribution of' + ' reconstructed streamlines density, it should NOT be used ' + 'as a substitute for the SIFT method itself.')) + max_seed_attempts = traits.Int( + argstr='-max_seed_attempts %d', + desc=('set the maximum number of times that the tracking ' + 'algorithm should attempt to find an appropriate tracking' + ' direction from a given seed point')) + out_seeds = File( + 'out_seeds.nii.gz', usedefault=True, + argstr='-output_seeds %s', + desc=('output the seed location of all successful streamlines to' + ' a file')) + + +class TractographyOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output filtered tracks') + out_seeds = File( + desc=('output the seed location of all successful' + ' streamlines to a file')) + + +class Tractography(MRTrix3Base): + """ + Performs streamlines tractography after selecting the appropriate + algorithm. + + .. [FACT] Mori, S.; Crain, B. J.; Chacko, V. P. & van Zijl, + P. C. M. Three-dimensional tracking of axonal projections in the + brain by magnetic resonance imaging. Annals of Neurology, 1999, + 45, 265-269 + + .. [iFOD1] Tournier, J.-D.; Calamante, F. & Connelly, A. MRtrix: + Diffusion tractography in crossing fiber regions. Int. J. Imaging + Syst. Technol., 2012, 22, 53-66 + + .. [iFOD2] Tournier, J.-D.; Calamante, F. & Connelly, A. Improved + probabilistic streamlines tractography by 2nd order integration + over fibre orientation distributions. Proceedings of the + International Society for Magnetic Resonance in Medicine, 2010, 1670 + + .. [Nulldist] Morris, D. M.; Embleton, K. V. & Parker, G. J. + Probabilistic fibre tracking: Differentiation of connections from + chance events. NeuroImage, 2008, 42, 1329-1339 + + .. [Tensor_Det] Basser, P. J.; Pajevic, S.; Pierpaoli, C.; Duda, J. + and Aldroubi, A. In vivo fiber tractography using DT-MRI data. + Magnetic Resonance in Medicine, 2000, 44, 625-632 + + .. [Tensor_Prob] Jones, D. Tractography Gone Wild: Probabilistic Fibre + Tracking Using the Wild Bootstrap With Diffusion Tensor MRI. IEEE + Transactions on Medical Imaging, 2008, 27, 1268-1274 + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> tk = mrt.Tractography() + >>> tk.inputs.in_file = 'fods.mif' + >>> tk.inputs.roi_mask = 'mask.nii.gz' + >>> tk.inputs.seed_sphere = (80, 100, 70, 10) + >>> tk.cmdline # doctest: +ELLIPSIS + 'tckgen -algorithm iFOD2 -samples 4 -output_seeds out_seeds.nii.gz \ +-mask mask.nii.gz -seed_sphere \ +80.000000,100.000000,70.000000,10.000000 fods.mif tracked.tck' + >>> tk.run() # doctest: +SKIP + """ + + _cmd = 'tckgen' + input_spec = TractographyInputSpec + output_spec = TractographyOutputSpec + + def _format_arg(self, name, trait_spec, value): + if 'roi_' in name and isinstance(value, tuple): + value = ['%f' % v for v in value] + return trait_spec.argstr % ','.join(value) + + return super(Tractography, self)._format_arg(name, trait_spec, value) + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs diff --git a/nipype/interfaces/mrtrix3/utils.py b/nipype/interfaces/mrtrix3/utils.py new file mode 100644 index 0000000000..a667c716f4 --- /dev/null +++ b/nipype/interfaces/mrtrix3/utils.py @@ -0,0 +1,679 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os.path as op + +from ..base import (CommandLineInputSpec, CommandLine, traits, TraitedSpec, + File, InputMultiPath, isdefined) +from .base import MRTrix3BaseInputSpec, MRTrix3Base + + +class BrainMaskInputSpec(MRTrix3BaseInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='input diffusion weighted images') + out_file = File( + 'brainmask.mif', + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + desc='output brain mask') + + +class BrainMaskOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output response file') + + +class BrainMask(CommandLine): + """ + Convert a mesh surface to a partial volume estimation image + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> bmsk = mrt.BrainMask() + >>> bmsk.inputs.in_file = 'dwi.mif' + >>> bmsk.cmdline # doctest: +ELLIPSIS + 'dwi2mask dwi.mif brainmask.mif' + >>> bmsk.run() # doctest: +SKIP + """ + + _cmd = 'dwi2mask' + input_spec = BrainMaskInputSpec + output_spec = BrainMaskOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class Mesh2PVEInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-3, + desc='input mesh') + reference = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='input reference image') + in_first = File( + exists=True, + argstr='-first %s', + desc='indicates that the mesh file is provided by FSL FIRST') + + out_file = File( + 'mesh2volume.nii.gz', + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + desc='output file containing SH coefficients') + + +class Mesh2PVEOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='the output response file') + + +class Mesh2PVE(CommandLine): + """ + Convert a mesh surface to a partial volume estimation image + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> m2p = mrt.Mesh2PVE() + >>> m2p.inputs.in_file = 'surf1.vtk' + >>> m2p.inputs.reference = 'dwi.mif' + >>> m2p.inputs.in_first = 'T1.nii.gz' + >>> m2p.cmdline # doctest: +ELLIPSIS + 'mesh2pve -first T1.nii.gz surf1.vtk dwi.mif mesh2volume.nii.gz' + >>> m2p.run() # doctest: +SKIP + """ + + _cmd = 'mesh2pve' + input_spec = Mesh2PVEInputSpec + output_spec = Mesh2PVEOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class Generate5ttInputSpec(MRTrix3BaseInputSpec): + algorithm = traits.Enum( + 'fsl', + 'gif', + 'freesurfer', + argstr='%s', + position=-3, + mandatory=True, + desc='tissue segmentation algorithm') + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='input image') + out_file = File( + argstr='%s', mandatory=True, position=-1, desc='output image') + + +class Generate5ttOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output image') + + +class Generate5tt(MRTrix3Base): + """ + Generate a 5TT image suitable for ACT using the selected algorithm + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> gen5tt = mrt.Generate5tt() + >>> gen5tt.inputs.in_file = 'T1.nii.gz' + >>> gen5tt.inputs.algorithm = 'fsl' + >>> gen5tt.inputs.out_file = '5tt.mif' + >>> gen5tt.cmdline # doctest: +ELLIPSIS + '5ttgen fsl T1.nii.gz 5tt.mif' + >>> gen5tt.run() # doctest: +SKIP + """ + + _cmd = '5ttgen' + input_spec = Generate5ttInputSpec + output_spec = Generate5ttOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class TensorMetricsInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-1, + desc='input DTI image') + + out_fa = File(argstr='-fa %s', desc='output FA file') + out_adc = File(argstr='-adc %s', desc='output ADC file') + out_evec = File( + argstr='-vector %s', desc='output selected eigenvector(s) file') + out_eval = File( + argstr='-value %s', desc='output selected eigenvalue(s) file') + component = traits.List( + [1], + usedefault=True, + argstr='-num %s', + sep=',', + desc=('specify the desired eigenvalue/eigenvector(s). Note that ' + 'several eigenvalues can be specified as a number sequence')) + in_mask = File( + exists=True, + argstr='-mask %s', + desc=('only perform computation within the specified binary' + ' brain mask image')) + modulate = traits.Enum( + 'FA', + 'none', + 'eval', + argstr='-modulate %s', + desc=('how to modulate the magnitude of the' + ' eigenvectors')) + + +class TensorMetricsOutputSpec(TraitedSpec): + out_fa = File(desc='output FA file') + out_adc = File(desc='output ADC file') + out_evec = File(desc='output selected eigenvector(s) file') + out_eval = File(desc='output selected eigenvalue(s) file') + + +class TensorMetrics(CommandLine): + """ + Compute metrics from tensors + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> comp = mrt.TensorMetrics() + >>> comp.inputs.in_file = 'dti.mif' + >>> comp.inputs.out_fa = 'fa.mif' + >>> comp.cmdline # doctest: +ELLIPSIS + 'tensor2metric -num 1 -fa fa.mif dti.mif' + >>> comp.run() # doctest: +SKIP + """ + + _cmd = 'tensor2metric' + input_spec = TensorMetricsInputSpec + output_spec = TensorMetricsOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + + for k in list(outputs.keys()): + if isdefined(getattr(self.inputs, k)): + outputs[k] = op.abspath(getattr(self.inputs, k)) + + return outputs + + +class ComputeTDIInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='input tractography') + out_file = File( + 'tdi.mif', + argstr='%s', + usedefault=True, + position=-1, + desc='output TDI file') + reference = File( + exists=True, + argstr='-template %s', + desc='a reference' + 'image to be used as template') + vox_size = traits.List( + traits.Int, argstr='-vox %s', sep=',', desc='voxel dimensions') + data_type = traits.Enum( + 'float', + 'unsigned int', + argstr='-datatype %s', + desc='specify output image data type') + use_dec = traits.Bool(argstr='-dec', desc='perform mapping in DEC space') + dixel = File( + argstr='-dixel %s', + desc='map streamlines to' + 'dixels within each voxel. Directions are stored as' + 'azimuth elevation pairs.') + max_tod = traits.Int( + argstr='-tod %d', + desc='generate a Track Orientation ' + 'Distribution (TOD) in each voxel.') + + contrast = traits.Enum( + 'tdi', + 'length', + 'invlength', + 'scalar_map', + 'scalar_map_conut', + 'fod_amp', + 'curvature', + argstr='-constrast %s', + desc='define the desired ' + 'form of contrast for the output image') + in_map = File( + exists=True, + argstr='-image %s', + desc='provide the' + 'scalar image map for generating images with ' + '\'scalar_map\' contrasts, or the SHs image for fod_amp') + + stat_vox = traits.Enum( + 'sum', + 'min', + 'mean', + 'max', + argstr='-stat_vox %s', + desc='define the statistic for choosing the final' + 'voxel intesities for a given contrast') + stat_tck = traits.Enum( + 'mean', + 'sum', + 'min', + 'max', + 'median', + 'mean_nonzero', + 'gaussian', + 'ends_min', + 'ends_mean', + 'ends_max', + 'ends_prod', + argstr='-stat_tck %s', + desc='define the statistic for choosing ' + 'the contribution to be made by each streamline as a function of' + ' the samples taken along their lengths.') + + fwhm_tck = traits.Float( + argstr='-fwhm_tck %f', + desc='define the statistic for choosing the' + ' contribution to be made by each streamline as a function of the ' + 'samples taken along their lengths') + + map_zero = traits.Bool( + argstr='-map_zero', + desc='if a streamline has zero contribution based ' + 'on the contrast & statistic, typically it is not mapped; use this ' + 'option to still contribute to the map even if this is the case ' + '(these non-contributing voxels can then influence the mean value in ' + 'each voxel of the map)') + + upsample = traits.Int( + argstr='-upsample %d', + desc='upsample the tracks by' + ' some ratio using Hermite interpolation before ' + 'mappping') + + precise = traits.Bool( + argstr='-precise', + desc='use a more precise streamline mapping ' + 'strategy, that accurately quantifies the length through each voxel ' + '(these lengths are then taken into account during TWI calculation)') + ends_only = traits.Bool( + argstr='-ends_only', + desc='only map the streamline' + ' endpoints to the image') + + tck_weights = File( + exists=True, + argstr='-tck_weights_in %s', + desc='specify' + ' a text scalar file containing the streamline weights') + nthreads = traits.Int( + argstr='-nthreads %d', + desc='number of threads. if zero, the number' + ' of available cpus will be used', + nohash=True) + + +class ComputeTDIOutputSpec(TraitedSpec): + out_file = File(desc='output TDI file') + + +class ComputeTDI(MRTrix3Base): + """ + Use track data as a form of contrast for producing a high-resolution + image. + + .. admonition:: References + + * For TDI or DEC TDI: Calamante, F.; Tournier, J.-D.; Jackson, G. D. & + Connelly, A. Track-density imaging (TDI): Super-resolution white + matter imaging using whole-brain track-density mapping. NeuroImage, + 2010, 53, 1233-1243 + + * If using -contrast length and -stat_vox mean: Pannek, K.; Mathias, + J. L.; Bigler, E. D.; Brown, G.; Taylor, J. D. & Rose, S. E. The + average pathlength map: A diffusion MRI tractography-derived index + for studying brain pathology. NeuroImage, 2011, 55, 133-141 + + * If using -dixel option with TDI contrast only: Smith, R.E., Tournier, + J-D., Calamante, F., Connelly, A. A novel paradigm for automated + segmentation of very large whole-brain probabilistic tractography + data sets. In proc. ISMRM, 2011, 19, 673 + + * If using -dixel option with any other contrast: Pannek, K., Raffelt, + D., Salvado, O., Rose, S. Incorporating directional information in + diffusion tractography derived maps: angular track imaging (ATI). + In Proc. ISMRM, 2012, 20, 1912 + + * If using -tod option: Dhollander, T., Emsell, L., Van Hecke, W., Maes, + F., Sunaert, S., Suetens, P. Track Orientation Density Imaging (TODI) + and Track Orientation Distribution (TOD) based tractography. + NeuroImage, 2014, 94, 312-336 + + * If using other contrasts / statistics: Calamante, F.; Tournier, J.-D.; + Smith, R. E. & Connelly, A. A generalised framework for + super-resolution track-weighted imaging. NeuroImage, 2012, 59, + 2494-2503 + + * If using -precise mapping option: Smith, R. E.; Tournier, J.-D.; + Calamante, F. & Connelly, A. SIFT: Spherical-deconvolution informed + filtering of tractograms. NeuroImage, 2013, 67, 298-312 (Appendix 3) + + + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> tdi = mrt.ComputeTDI() + >>> tdi.inputs.in_file = 'dti.mif' + >>> tdi.cmdline # doctest: +ELLIPSIS + 'tckmap dti.mif tdi.mif' + >>> tdi.run() # doctest: +SKIP + """ + + _cmd = 'tckmap' + input_spec = ComputeTDIInputSpec + output_spec = ComputeTDIOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class TCK2VTKInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='input tractography') + out_file = File( + 'tracks.vtk', + argstr='%s', + usedefault=True, + position=-1, + desc='output VTK file') + reference = File( + exists=True, + argstr='-image %s', + desc='if specified, the properties of' + ' this image will be used to convert track point positions from real ' + '(scanner) coordinates into image coordinates (in mm).') + voxel = File( + exists=True, + argstr='-image %s', + desc='if specified, the properties of' + ' this image will be used to convert track point positions from real ' + '(scanner) coordinates into image coordinates.') + + nthreads = traits.Int( + argstr='-nthreads %d', + desc='number of threads. if zero, the number' + ' of available cpus will be used', + nohash=True) + + +class TCK2VTKOutputSpec(TraitedSpec): + out_file = File(desc='output VTK file') + + +class TCK2VTK(MRTrix3Base): + """ + Convert a track file to a vtk format, cave: coordinates are in XYZ + coordinates not reference + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> vtk = mrt.TCK2VTK() + >>> vtk.inputs.in_file = 'tracks.tck' + >>> vtk.inputs.reference = 'b0.nii' + >>> vtk.cmdline # doctest: +ELLIPSIS + 'tck2vtk -image b0.nii tracks.tck tracks.vtk' + >>> vtk.run() # doctest: +SKIP + """ + + _cmd = 'tck2vtk' + input_spec = TCK2VTKInputSpec + output_spec = TCK2VTKOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class DWIExtractInputSpec(MRTrix3BaseInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='input image') + out_file = File( + argstr='%s', mandatory=True, position=-1, desc='output image') + bzero = traits.Bool(argstr='-bzero', desc='extract b=0 volumes') + nobzero = traits.Bool(argstr='-no_bzero', desc='extract non b=0 volumes') + singleshell = traits.Bool( + argstr='-singleshell', desc='extract volumes with a specific shell') + shell = traits.List( + traits.Float, + sep=',', + argstr='-shell %s', + desc='specify one or more gradient shells') + + +class DWIExtractOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output image') + + +class DWIExtract(MRTrix3Base): + """ + Extract diffusion-weighted volumes, b=0 volumes, or certain shells from a + DWI dataset + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> dwiextract = mrt.DWIExtract() + >>> dwiextract.inputs.in_file = 'dwi.mif' + >>> dwiextract.inputs.bzero = True + >>> dwiextract.inputs.out_file = 'b0vols.mif' + >>> dwiextract.inputs.grad_fsl = ('bvecs', 'bvals') + >>> dwiextract.cmdline # doctest: +ELLIPSIS + 'dwiextract -bzero -fslgrad bvecs bvals dwi.mif b0vols.mif' + >>> dwiextract.run() # doctest: +SKIP + """ + + _cmd = 'dwiextract' + input_spec = DWIExtractInputSpec + output_spec = DWIExtractOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class MRConvertInputSpec(MRTrix3BaseInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-2, + desc='input image') + out_file = File( + 'dwi.mif', + argstr='%s', + mandatory=True, + position=-1, + usedefault=True, + desc='output image') + coord = traits.List( + traits.Float, + sep=' ', + argstr='-coord %s', + desc='extract data at the specified coordinates') + vox = traits.List( + traits.Float, + sep=',', + argstr='-vox %s', + desc='change the voxel dimensions') + axes = traits.List( + traits.Int, + sep=',', + argstr='-axes %s', + desc='specify the axes that will be used') + scaling = traits.List( + traits.Float, + sep=',', + argstr='-scaling %s', + desc='specify the data scaling parameter') + + +class MRConvertOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output image') + + +class MRConvert(MRTrix3Base): + """ + Perform conversion between different file types and optionally extract a + subset of the input image + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> mrconvert = mrt.MRConvert() + >>> mrconvert.inputs.in_file = 'dwi.nii.gz' + >>> mrconvert.inputs.grad_fsl = ('bvecs', 'bvals') + >>> mrconvert.cmdline # doctest: +ELLIPSIS + 'mrconvert -fslgrad bvecs bvals dwi.nii.gz dwi.mif' + >>> mrconvert.run() # doctest: +SKIP + """ + + _cmd = 'mrconvert' + input_spec = MRConvertInputSpec + output_spec = MRConvertOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs + + +class MRMathInputSpec(MRTrix3BaseInputSpec): + in_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=-3, + desc='input image') + out_file = File( + argstr='%s', mandatory=True, position=-1, desc='output image') + operation = traits.Enum( + 'mean', + 'median', + 'sum', + 'product', + 'rms', + 'norm', + 'var', + 'std', + 'min', + 'max', + 'absmax', + 'magmax', + argstr='%s', + position=-2, + mandatory=True, + desc='operation to computer along a specified axis') + axis = traits.Int( + 0, + argstr='-axis %d', + desc='specfied axis to perform the operation along') + + +class MRMathOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='output image') + + +class MRMath(MRTrix3Base): + """ + Compute summary statistic on image intensities + along a specified axis of a single image + + Example + ------- + + >>> import nipype.interfaces.mrtrix3 as mrt + >>> mrmath = mrt.MRMath() + >>> mrmath.inputs.in_file = 'dwi.mif' + >>> mrmath.inputs.operation = 'mean' + >>> mrmath.inputs.axis = 3 + >>> mrmath.inputs.out_file = 'dwi_mean.mif' + >>> mrmath.inputs.grad_fsl = ('bvecs', 'bvals') + >>> mrmath.cmdline # doctest: +ELLIPSIS + 'mrmath -axis 3 -fslgrad bvecs bvals dwi.mif mean dwi_mean.mif' + >>> mrmath.run() # doctest: +SKIP + """ + + _cmd = 'mrmath' + input_spec = MRMathInputSpec + output_spec = MRMathOutputSpec + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = op.abspath(self.inputs.out_file) + return outputs diff --git a/nipype/interfaces/niftyfit/__init__.py b/nipype/interfaces/niftyfit/__init__.py new file mode 100644 index 0000000000..b9d4725496 --- /dev/null +++ b/nipype/interfaces/niftyfit/__init__.py @@ -0,0 +1,12 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The niftyfit module provides classes for interfacing with the `NiftyFit`_ +command line tools. + +Top-level namespace for niftyfit. +""" + +from .asl import FitAsl +from .dwi import FitDwi, DwiTool +from .qt1 import FitQt1 diff --git a/nipype/interfaces/niftyfit/asl.py b/nipype/interfaces/niftyfit/asl.py new file mode 100644 index 0000000000..c4920dc195 --- /dev/null +++ b/nipype/interfaces/niftyfit/asl.py @@ -0,0 +1,164 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The ASL module of niftyfit, which wraps the fitting methods in NiftyFit. +""" + +from ..base import TraitedSpec, traits, CommandLineInputSpec +from .base import NiftyFitCommand +from ..niftyreg.base import get_custom_path + + +class FitAslInputSpec(CommandLineInputSpec): + """ Input Spec for FitAsl. """ + desc = 'Filename of the 4D ASL (control/label) source image (mandatory).' + source_file = traits.File( + position=1, + exists=True, + argstr='-source %s', + mandatory=True, + desc=desc) + pasl = traits.Bool(desc='Fit PASL ASL data [default]', argstr='-pasl') + pcasl = traits.Bool(desc='Fit PCASL ASL data', argstr='-pcasl') + + # *** Output options: + desc = 'Filename of the Cerebral Blood Flow map (in ml/100g/min).' + cbf_file = traits.File( + name_source=['source_file'], + name_template='%s_cbf.nii.gz', + argstr='-cbf %s', + desc=desc) + error_file = traits.File( + name_source=['source_file'], + name_template='%s_error.nii.gz', + argstr='-error %s', + desc='Filename of the CBF error map.') + syn_file = traits.File( + name_source=['source_file'], + name_template='%s_syn.nii.gz', + argstr='-syn %s', + desc='Filename of the synthetic ASL data.') + + # *** Input options (see also fit_qt1 for generic T1 fitting): + desc = 'Filename of the estimated input T1 map (in ms).' + t1map = traits.File(exists=True, argstr='-t1map %s', desc=desc) + desc = 'Filename of the estimated input M0 map.' + m0map = traits.File(exists=True, argstr='-m0map %s', desc=desc) + desc = 'Filename of the estimated input M0 map error.' + m0mape = traits.File(exists=True, argstr='-m0mape %s', desc=desc) + desc = 'Filename of a [1,2,5]s Inversion Recovery volume (T1/M0 fitting \ +carried out internally).' + + ir_volume = traits.File(exists=True, argstr='-IRvolume %s', desc=desc) + desc = 'Output of [1,2,5]s Inversion Recovery fitting.' + ir_output = traits.File(exists=True, argstr='-IRoutput %s', desc=desc) + + # *** Experimental options (Choose those suitable for the model!): + mask = traits.File( + position=2, + exists=True, + desc='Filename of image mask.', + argstr='-mask %s') + t1_art_cmp = traits.Float( + desc='T1 of arterial component [1650ms].', argstr='-T1a %f') + desc = 'Single plasma/tissue partition coefficient [0.9ml/g].' + plasma_coeff = traits.Float(desc=desc, argstr='-L %f') + desc = 'Labelling efficiency [0.99 (pasl), 0.85 (pcasl)], ensure any \ +background suppression pulses are included in -eff' + + eff = traits.Float(desc=desc, argstr='-eff %f') + desc = 'Outlier rejection for multi CL volumes (enter z-score threshold \ +(e.g. 2.5)) [off].' + + out = traits.Float(desc=desc, argstr='-out %f') + + # *** PCASL options (Choose those suitable for the model!): + pld = traits.Float(desc='Post Labelling Delay [2000ms].', argstr='-PLD %f') + ldd = traits.Float(desc='Labelling Duration [1800ms].', argstr='-LDD %f') + desc = 'Difference in labelling delay per slice [0.0 ms/slice.' + dpld = traits.Float(desc=desc, argstr='-dPLD %f') + + # *** PASL options (Choose those suitable for the model!): + t_inv1 = traits.Float( + desc='Saturation pulse time [800ms].', argstr='-Tinv1 %f') + t_inv2 = traits.Float(desc='Inversion time [2000ms].', argstr='-Tinv2 %f') + desc = 'Difference in inversion time per slice [0ms/slice].' + dt_inv2 = traits.Float(desc=desc, argstr='-dTinv2 %f') + + # *** Other experimental assumptions: + + # Not programmed yet + # desc = 'Slope and intercept for Arterial Transit Time.' + # ATT = traits.Float(desc=desc, argstr='-ATT %f') + + gm_t1 = traits.Float(desc='T1 of GM [1150ms].', argstr='-gmT1 %f') + gm_plasma = traits.Float( + desc='Plasma/GM water partition [0.95ml/g].', argstr='-gmL %f') + gm_ttt = traits.Float(desc='Time to GM [ATT+0ms].', argstr='-gmTTT %f') + wm_t1 = traits.Float(desc='T1 of WM [800ms].', argstr='-wmT1 %f') + wm_plasma = traits.Float( + desc='Plasma/WM water partition [0.82ml/g].', argstr='-wmL %f') + wm_ttt = traits.Float(desc='Time to WM [ATT+0ms].', argstr='-wmTTT %f') + + # *** Segmentation options: + desc = 'Filename of the 4D segmentation (in ASL space) for L/T1 \ +estimation and PV correction {WM,GM,CSF}.' + + seg = traits.File(exists=True, argstr='-seg %s', desc=desc) + desc = 'Use sigmoid to estimate L from T1: L(T1|gmL,wmL) [Off].' + sig = traits.Bool(desc=desc, argstr='-sig') + desc = 'Simple PV correction (CBF=vg*CBFg + vw*CBFw, with CBFw=f*CBFg) \ +[0.25].' + + pv0 = traits.Int(desc=desc, argstr='-pv0 %d') + pv2 = traits.Int(desc='In plane PV kernel size [3x3].', argstr='-pv2 %d') + pv3 = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + desc='3D kernel size [3x3x1].', + argstr='-pv3 %d %d %d') + desc = 'Multiply CBF by this value (e.g. if CL are mislabelled use -1.0).' + mul = traits.Float(desc=desc, argstr='-mul %f') + mulgm = traits.Bool( + desc='Multiply CBF by segmentation [Off].', argstr='-sig') + desc = 'Set PV threshold for switching off LSQR [O.05].' + pv_threshold = traits.Bool(desc=desc, argstr='-pvthreshold') + segstyle = traits.Bool( + desc='Set CBF as [gm,wm] not [wm,gm].', argstr='-segstyle') + + +class FitAslOutputSpec(TraitedSpec): + """ Output Spec for FitAsl. """ + desc = 'Filename of the Cerebral Blood Flow map (in ml/100g/min).' + cbf_file = traits.File(exists=True, desc=desc) + desc = 'Filename of the CBF error map.' + error_file = traits.File(exists=True, desc=desc) + desc = 'Filename of the synthetic ASL data.' + syn_file = traits.File(exists=True, desc=desc) + + +class FitAsl(NiftyFitCommand): + """Interface for executable fit_asl from Niftyfit platform. + + Use NiftyFit to perform ASL fitting. + + ASL fitting routines (following EU Cost Action White Paper recommendations) + Fits Cerebral Blood Flow maps in the first instance. + + `Source code `_ + + Examples + -------- + >>> from nipype.interfaces import niftyfit + >>> node = niftyfit.FitAsl() + >>> node.inputs.source_file = 'asl.nii.gz' + >>> node.cmdline + 'fit_asl -source asl.nii.gz -cbf asl_cbf.nii.gz -error asl_error.nii.gz \ +-syn asl_syn.nii.gz' + + """ + _cmd = get_custom_path('fit_asl', env_dir='NIFTYFITDIR') + input_spec = FitAslInputSpec + output_spec = FitAslOutputSpec + _suffix = '_fit_asl' diff --git a/nipype/interfaces/niftyfit/base.py b/nipype/interfaces/niftyfit/base.py new file mode 100644 index 0000000000..87c1d63825 --- /dev/null +++ b/nipype/interfaces/niftyfit/base.py @@ -0,0 +1,48 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The niftyfit module provide an interface with the niftyfit software +developed in TIG, UCL. + +Software available at: +https://cmiclab.cs.ucl.ac.uk/CMIC/NiftyFit-Release + +Version used for this version of the interfaces (git): + +commit c6232e4c4223c3d19f7a32906409da5af36299a2 +Date: Fri Jan 6 13:34:02 2017 +0000 + +Examples +-------- +See the docstrings of the individual classes for examples. +""" + +import os + +from ..base import CommandLine +from ...utils.filemanip import split_filename + + +class NiftyFitCommand(CommandLine): + """ + Base support interface for NiftyFit commands. + """ + _suffix = '_nf' + + def __init__(self, **inputs): + """ Init method calling super. No version to be checked.""" + super(NiftyFitCommand, self).__init__(**inputs) + + def _gen_fname(self, basename, out_dir=None, suffix=None, ext=None): + if basename == '': + msg = 'Unable to generate filename for command %s. ' % self.cmd + msg += 'basename is not set!' + raise ValueError(msg) + _, final_bn, final_ext = split_filename(basename) + if out_dir is None: + out_dir = os.getcwd() + if ext is not None: + final_ext = ext + if suffix is not None: + final_bn = ''.join((final_bn, suffix)) + return os.path.abspath(os.path.join(out_dir, final_bn + final_ext)) diff --git a/nipype/interfaces/niftyfit/dwi.py b/nipype/interfaces/niftyfit/dwi.py new file mode 100644 index 0000000000..23b73aea90 --- /dev/null +++ b/nipype/interfaces/niftyfit/dwi.py @@ -0,0 +1,497 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The dwi module of niftyfit, which wraps the fitting methods in NiftyFit. +""" + +from ..base import TraitedSpec, traits, isdefined, CommandLineInputSpec +from .base import NiftyFitCommand +from ..niftyreg.base import get_custom_path + + +class FitDwiInputSpec(CommandLineInputSpec): + """ Input Spec for FitDwi. """ + # Inputs options + source_file = traits.File( + position=1, + exists=True, + argstr='-source %s', + mandatory=True, + desc='The source image containing the dwi data.') + desc = 'The file containing the bvalues of the source DWI.' + bval_file = traits.File( + position=2, exists=True, argstr='-bval %s', mandatory=True, desc=desc) + desc = 'The file containing the bvectors of the source DWI.' + bvec_file = traits.File( + position=3, exists=True, argstr='-bvec %s', mandatory=True, desc=desc) + te_file = traits.File( + exists=True, + argstr='-TE %s', + desc='Filename of TEs (ms).', + xor=['te_file']) + te_value = traits.File( + exists=True, + argstr='-TE %s', + desc='Value of TEs (ms).', + xor=['te_file']) + mask_file = traits.File( + exists=True, desc='The image mask', argstr='-mask %s') + desc = 'Filename of parameter priors for -ball and -nod.' + prior_file = traits.File(exists=True, argstr='-prior %s', desc=desc) + desc = 'Rotate the output tensors according to the q/s form of the image \ +(resulting tensors will be in mm coordinates, default: 0).' + + rot_sform_flag = traits.Int(desc=desc, argstr='-rotsform %d') + + # generic output options: + error_file = traits.File( + name_source=['source_file'], + name_template='%s_error.nii.gz', + desc='Filename of parameter error maps.', + argstr='-error %s') + res_file = traits.File( + name_source=['source_file'], + name_template='%s_resmap.nii.gz', + desc='Filename of model residual map.', + argstr='-res %s') + syn_file = traits.File( + name_source=['source_file'], + name_template='%s_syn.nii.gz', + desc='Filename of synthetic image.', + argstr='-syn %s') + nodiff_file = traits.File( + name_source=['source_file'], + name_template='%s_no_diff.nii.gz', + desc='Filename of average no diffusion image.', + argstr='-nodiff %s') + + # Output options, with templated output names based on the source image + mcmap_file = traits.File( + name_source=['source_file'], + name_template='%s_mcmap.nii.gz', + desc='Filename of multi-compartment model parameter map ' + '(-ivim,-ball,-nod)', + argstr='-mcmap %s', + requires=['nodv_flag']) + + # Model Specific Output options: + mdmap_file = traits.File( + name_source=['source_file'], + name_template='%s_mdmap.nii.gz', + desc='Filename of MD map/ADC', + argstr='-mdmap %s') + famap_file = traits.File( + name_source=['source_file'], + name_template='%s_famap.nii.gz', + desc='Filename of FA map', + argstr='-famap %s') + v1map_file = traits.File( + name_source=['source_file'], + name_template='%s_v1map.nii.gz', + desc='Filename of PDD map [x,y,z]', + argstr='-v1map %s') + rgbmap_file = traits.File( + name_source=['source_file'], + name_template='%s_rgbmap.nii.gz', + desc='Filename of colour-coded FA map', + argstr='-rgbmap %s', + requires=['dti_flag']) + + desc = 'Use lower triangular (tenmap2) or diagonal, off-diagonal tensor \ +format' + + ten_type = traits.Enum( + 'lower-tri', 'diag-off-diag', desc=desc, usedefault=True) + + tenmap_file = traits.File( + name_source=['source_file'], + name_template='%s_tenmap.nii.gz', + desc='Filename of tensor map [diag,offdiag].', + argstr='-tenmap %s', + requires=['dti_flag']) + tenmap2_file = traits.File( + name_source=['source_file'], + name_template='%s_tenmap2.nii.gz', + desc='Filename of tensor map [lower tri]', + argstr='-tenmap2 %s', + requires=['dti_flag']) + + # Methods options + desc = 'Fit single exponential to non-directional data [default with \ +no b-vectors]' + + mono_flag = traits.Bool( + desc=desc, + argstr='-mono', + position=4, + xor=[ + 'ivim_flag', 'dti_flag', 'ball_flag', 'ballv_flag', 'nod_flag', + 'nodv_flag' + ]) + ivim_flag = traits.Bool( + desc='Fit IVIM model to non-directional data.', + argstr='-ivim', + position=4, + xor=[ + 'mono_flag', 'dti_flag', 'ball_flag', 'ballv_flag', 'nod_flag', + 'nodv_flag' + ]) + desc = 'Fit the tensor model [default with b-vectors].' + dti_flag = traits.Bool( + desc=desc, + argstr='-dti', + position=4, + xor=[ + 'mono_flag', 'ivim_flag', 'ball_flag', 'ballv_flag', 'nod_flag', + 'nodv_flag' + ]) + ball_flag = traits.Bool( + desc='Fit the ball and stick model.', + argstr='-ball', + position=4, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'ballv_flag', 'nod_flag', + 'nodv_flag' + ]) + desc = 'Fit the ball and stick model with optimised PDD.' + ballv_flag = traits.Bool( + desc=desc, + argstr='-ballv', + position=4, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', 'nod_flag', + 'nodv_flag' + ]) + nod_flag = traits.Bool( + desc='Fit the NODDI model', + argstr='-nod', + position=4, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', 'ballv_flag', + 'nodv_flag' + ]) + nodv_flag = traits.Bool( + desc='Fit the NODDI model with optimised PDD', + argstr='-nodv', + position=4, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', 'ballv_flag', + 'nod_flag' + ]) + + # Experimental options + desc = 'Maximum number of non-linear LSQR iterations [100x2 passes])' + maxit_val = traits.Int(desc=desc, argstr='-maxit %d', requires=['gn_flag']) + desc = 'LM parameters (initial value, decrease rate) [100,1.2].' + lm_vals = traits.Tuple( + traits.Float, + traits.Float, + argstr='-lm %f %f', + requires=['gn_flag'], + desc=desc) + desc = 'Use Gauss-Newton algorithm [Levenberg-Marquardt].' + gn_flag = traits.Bool(desc=desc, argstr='-gn', xor=['wls_flag']) + desc = 'Use Variational Bayes fitting with known prior (currently \ +identity covariance...).' + + vb_flag = traits.Bool(desc=desc, argstr='-vb') + cov_file = traits.File( + exists=True, + desc='Filename of ithe nc*nc covariance matrix [I]', + argstr='-cov %s') + wls_flag = traits.Bool(desc=desc, argstr='-wls', xor=['gn_flag']) + desc = 'Use location-weighted least squares for DTI fitting [3x3 Gaussian]' + swls_val = traits.Float(desc=desc, argstr='-swls %f') + slice_no = traits.Int( + desc='Fit to single slice number.', argstr='-slice %d') + voxel = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + desc='Fit to single voxel only.', + argstr='-voxel %d %d %d') + diso_val = traits.Float( + desc='Isotropic diffusivity for -nod [3e-3]', argstr='-diso %f') + dpr_val = traits.Float( + desc='Parallel diffusivity for -nod [1.7e-3].', argstr='-dpr %f') + wm_t2_val = traits.Float( + desc='White matter T2 value [80ms].', argstr='-wmT2 %f') + csf_t2_val = traits.Float(desc='CSF T2 value [400ms].', argstr='-csfT2 %f') + desc = 'Threshold for perfusion/diffsuion effects [100].' + perf_thr = traits.Float(desc=desc, argstr='-perfthreshold %f') + + # MCMC options: + mcout = traits.File( + name_source=['source_file'], + name_template='%s_mcout.txt', + desc='Filename of mc samples (ascii text file)', + argstr='-mcout %s') + mcsamples = traits.Int( + desc='Number of samples to keep [100].', argstr='-mcsamples %d') + mcmaxit = traits.Int( + desc='Number of iterations to run [10,000].', argstr='-mcmaxit %d') + acceptance = traits.Float( + desc='Fraction of iterations to accept [0.23].', + argstr='-accpetance %f') + + +class FitDwiOutputSpec(TraitedSpec): + """ Output Spec for FitDwi. """ + + error_file = traits.File(desc='Filename of parameter error maps') + res_file = traits.File(desc='Filename of model residual map') + syn_file = traits.File(desc='Filename of synthetic image') + nodiff_file = traits.File(desc='Filename of average no diffusion image.') + mdmap_file = traits.File(desc='Filename of MD map/ADC') + famap_file = traits.File(desc='Filename of FA map') + v1map_file = traits.File(desc='Filename of PDD map [x,y,z]') + rgbmap_file = traits.File(desc='Filename of colour FA map') + tenmap_file = traits.File(desc='Filename of tensor map') + tenmap2_file = traits.File(desc='Filename of tensor map [lower tri]') + + mcmap_file = traits.File(desc='Filename of multi-compartment model ' + 'parameter map (-ivim,-ball,-nod).') + mcout = traits.File(desc='Filename of mc samples (ascii text file)') + + +class FitDwi(NiftyFitCommand): + """Interface for executable fit_dwi from Niftyfit platform. + + Use NiftyFit to perform diffusion model fitting. + + Diffusion-weighted MR Fitting. + Fits DWI parameter maps to multi-shell, multi-directional data. + + `Source code `_ + + Examples + -------- + + >>> from nipype.interfaces import niftyfit + >>> fit_dwi = niftyfit.FitDwi(dti_flag=True) + >>> fit_dwi.inputs.source_file = 'dwi.nii.gz' + >>> fit_dwi.inputs.bvec_file = 'bvecs' + >>> fit_dwi.inputs.bval_file = 'bvals' + >>> fit_dwi.inputs.rgbmap_file = 'rgb.nii.gz' + >>> fit_dwi.cmdline + 'fit_dwi -source dwi.nii.gz -bval bvals -bvec bvecs -dti \ +-error dwi_error.nii.gz -famap dwi_famap.nii.gz -mcout dwi_mcout.txt \ +-mdmap dwi_mdmap.nii.gz -nodiff dwi_no_diff.nii.gz -res dwi_resmap.nii.gz \ +-rgbmap rgb.nii.gz -syn dwi_syn.nii.gz -tenmap2 dwi_tenmap2.nii.gz \ +-v1map dwi_v1map.nii.gz' + + """ + _cmd = get_custom_path('fit_dwi', env_dir='NIFTYFITDIR') + input_spec = FitDwiInputSpec + output_spec = FitDwiOutputSpec + _suffix = '_fit_dwi' + + def _format_arg(self, name, trait_spec, value): + if name == 'tenmap_file' and self.inputs.ten_type != 'diag-off-diag': + return '' + if name == 'tenmap2_file' and self.inputs.ten_type != 'lower-tri': + return '' + return super(FitDwi, self)._format_arg(name, trait_spec, value) + + +class DwiToolInputSpec(CommandLineInputSpec): + """ Input Spec for DwiTool. """ + desc = 'The source image containing the fitted model.' + source_file = traits.File( + position=1, + exists=True, + desc=desc, + argstr='-source %s', + mandatory=True) + desc = 'The file containing the bvalues of the source DWI.' + bval_file = traits.File( + position=2, exists=True, desc=desc, argstr='-bval %s', mandatory=True) + desc = 'The file containing the bvectors of the source DWI.' + bvec_file = traits.File( + position=3, exists=True, desc=desc, argstr='-bvec %s') + b0_file = traits.File( + position=4, + exists=True, + desc='The B0 image corresponding to the source DWI', + argstr='-b0 %s') + mask_file = traits.File( + position=5, exists=True, desc='The image mask', argstr='-mask %s') + + # Output options, with templated output names based on the source image + desc = 'Filename of multi-compartment model parameter map \ +(-ivim,-ball,-nod)' + + mcmap_file = traits.File( + name_source=['source_file'], + name_template='%s_mcmap.nii.gz', + desc=desc, + argstr='-mcmap %s') + desc = 'Filename of synthetic image. Requires: bvec_file/b0_file.' + syn_file = traits.File( + name_source=['source_file'], + name_template='%s_syn.nii.gz', + desc=desc, + argstr='-syn %s', + requires=['bvec_file', 'b0_file']) + mdmap_file = traits.File( + name_source=['source_file'], + name_template='%s_mdmap.nii.gz', + desc='Filename of MD map/ADC', + argstr='-mdmap %s') + famap_file = traits.File( + name_source=['source_file'], + name_template='%s_famap.nii.gz', + desc='Filename of FA map', + argstr='-famap %s') + v1map_file = traits.File( + name_source=['source_file'], + name_template='%s_v1map.nii.gz', + desc='Filename of PDD map [x,y,z]', + argstr='-v1map %s') + rgbmap_file = traits.File( + name_source=['source_file'], + name_template='%s_rgbmap.nii.gz', + desc='Filename of colour FA map.', + argstr='-rgbmap %s') + logdti_file = traits.File( + name_source=['source_file'], + name_template='%s_logdti2.nii.gz', + desc='Filename of output logdti map.', + argstr='-logdti2 %s') + + # Methods options + desc = 'Input is a single exponential to non-directional data \ +[default with no b-vectors]' + + mono_flag = traits.Bool( + desc=desc, + position=6, + argstr='-mono', + xor=[ + 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', 'ballv_flag', + 'nod_flag', 'nodv_flag' + ]) + desc = 'Inputs is an IVIM model to non-directional data.' + ivim_flag = traits.Bool( + desc=desc, + position=6, + argstr='-ivim', + xor=[ + 'mono_flag', 'dti_flag', 'dti_flag2', 'ball_flag', 'ballv_flag', + 'nod_flag', 'nodv_flag' + ]) + dti_flag = traits.Bool( + desc='Input is a tensor model diag/off-diag.', + position=6, + argstr='-dti', + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag2', 'ball_flag', 'ballv_flag', + 'nod_flag', 'nodv_flag' + ]) + dti_flag2 = traits.Bool( + desc='Input is a tensor model lower triangular', + position=6, + argstr='-dti2', + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', 'ballv_flag', + 'nod_flag', 'nodv_flag' + ]) + ball_flag = traits.Bool( + desc='Input is a ball and stick model.', + position=6, + argstr='-ball', + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ballv_flag', + 'nod_flag', 'nodv_flag' + ]) + desc = 'Input is a ball and stick model with optimised PDD.' + ballv_flag = traits.Bool( + desc=desc, + position=6, + argstr='-ballv', + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', + 'nod_flag', 'nodv_flag' + ]) + nod_flag = traits.Bool( + desc='Input is a NODDI model', + position=6, + argstr='-nod', + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', + 'ballv_flag', 'nodv_flag' + ]) + nodv_flag = traits.Bool( + desc='Input is a NODDI model with optimised PDD', + position=6, + argstr='-nodv', + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', + 'ballv_flag', 'nod_flag' + ]) + + # Experimental options + diso_val = traits.Float( + desc='Isotropic diffusivity for -nod [3e-3]', argstr='-diso %f') + dpr_val = traits.Float( + desc='Parallel diffusivity for -nod [1.7e-3].', argstr='-dpr %f') + + +class DwiToolOutputSpec(TraitedSpec): + """ Output Spec for DwiTool. """ + desc = 'Filename of multi-compartment model parameter map \ +(-ivim,-ball,-nod)' + + mcmap_file = traits.File(desc=desc) + syn_file = traits.File(desc='Filename of synthetic image') + mdmap_file = traits.File(desc='Filename of MD map/ADC') + famap_file = traits.File(desc='Filename of FA map') + v1map_file = traits.File(desc='Filename of PDD map [x,y,z]') + rgbmap_file = traits.File(desc='Filename of colour FA map') + logdti_file = traits.File(desc='Filename of output logdti map') + + +class DwiTool(NiftyFitCommand): + """Interface for executable dwi_tool from Niftyfit platform. + + Use DwiTool. + + Diffusion-Weighted MR Prediction. + Predicts DWI from previously fitted models and calculates model derived + maps. + + `Source code `_ + + Examples + -------- + + >>> from nipype.interfaces import niftyfit + >>> dwi_tool = niftyfit.DwiTool(dti_flag=True) + >>> dwi_tool.inputs.source_file = 'dwi.nii.gz' + >>> dwi_tool.inputs.bvec_file = 'bvecs' + >>> dwi_tool.inputs.bval_file = 'bvals' + >>> dwi_tool.inputs.mask_file = 'mask.nii.gz' + >>> dwi_tool.inputs.b0_file = 'b0.nii.gz' + >>> dwi_tool.inputs.rgbmap_file = 'rgb_map.nii.gz' + >>> dwi_tool.cmdline + 'dwi_tool -source dwi.nii.gz -bval bvals -bvec bvecs -b0 b0.nii.gz \ +-mask mask.nii.gz -dti -famap dwi_famap.nii.gz -logdti2 dwi_logdti2.nii.gz \ +-mcmap dwi_mcmap.nii.gz -mdmap dwi_mdmap.nii.gz -rgbmap rgb_map.nii.gz \ +-syn dwi_syn.nii.gz -v1map dwi_v1map.nii.gz' + + """ + _cmd = get_custom_path('dwi_tool', env_dir='NIFTYFITDIR') + input_spec = DwiToolInputSpec + output_spec = DwiToolOutputSpec + _suffix = '_dwi_tool' + + def _format_arg(self, name, trait_spec, value): + if name == 'syn_file': + if not isdefined(self.inputs.bvec_file) or \ + not isdefined(self.inputs.b0_file): + return '' + if name in ['logdti_file', 'rgbmap_file']: + if not isdefined(self.inputs.dti_flag) and \ + not isdefined(self.inputs.dti_flag2): + return '' + return super(DwiTool, self)._format_arg(name, trait_spec, value) diff --git a/nipype/interfaces/niftyfit/qt1.py b/nipype/interfaces/niftyfit/qt1.py new file mode 100644 index 0000000000..ceefbae281 --- /dev/null +++ b/nipype/interfaces/niftyfit/qt1.py @@ -0,0 +1,186 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The QT1 module of niftyfit, which wraps the Multi-Echo T1 fitting methods +in NiftyFit. +""" + +from ..base import TraitedSpec, File, traits, CommandLineInputSpec +from .base import NiftyFitCommand +from ..niftyreg.base import get_custom_path + + +class FitQt1InputSpec(CommandLineInputSpec): + """ Input Spec for FitQt1. """ + desc = 'Filename of the 4D Multi-Echo T1 source image.' + source_file = File( + position=1, + exists=True, + desc=desc, + argstr='-source %s', + mandatory=True) + + # Output options: + t1map_file = File( + name_source=['source_file'], + name_template='%s_t1map.nii.gz', + argstr='-t1map %s', + desc='Filename of the estimated output T1 map (in ms).') + m0map_file = File( + name_source=['source_file'], + name_template='%s_m0map.nii.gz', + argstr='-m0map %s', + desc='Filename of the estimated input M0 map.') + desc = 'Filename of the estimated output multi-parameter map.' + mcmap_file = File( + name_source=['source_file'], + name_template='%s_mcmap.nii.gz', + argstr='-mcmap %s', + desc=desc) + comp_file = File( + name_source=['source_file'], + name_template='%s_comp.nii.gz', + argstr='-comp %s', + desc='Filename of the estimated multi-component T1 map.') + desc = 'Filename of the error map (symmetric matrix, [Diag,OffDiag]).' + error_file = File( + name_source=['source_file'], + name_template='%s_error.nii.gz', + argstr='-error %s', + desc=desc) + syn_file = File( + name_source=['source_file'], + name_template='%s_syn.nii.gz', + argstr='-syn %s', + desc='Filename of the synthetic ASL data.') + res_file = File( + name_source=['source_file'], + name_template='%s_res.nii.gz', + argstr='-res %s', + desc='Filename of the model fit residuals') + + # Other options: + mask = File( + position=2, + exists=True, + desc='Filename of image mask.', + argstr='-mask %s') + prior = File( + position=3, + exists=True, + desc='Filename of parameter prior.', + argstr='-prior %s') + te_value = traits.Float( + desc='TE Echo Time [0ms!].', argstr='-TE %f', position=4) + tr_value = traits.Float( + desc='TR Repetition Time [10s!].', argstr='-TR %f', position=5) + desc = 'Number of components to fit [1] (currently IR/SR only)' + # set position to be ahead of TIs + nb_comp = traits.Int(desc=desc, position=6, argstr='-nc %d') + desc = 'Set LM parameters (initial value, decrease rate) [100,1.2].' + lm_val = traits.Tuple( + traits.Float, traits.Float, desc=desc, argstr='-lm %f %f', position=7) + desc = 'Use Gauss-Newton algorithm [Levenberg-Marquardt].' + gn_flag = traits.Bool(desc=desc, argstr='-gn', position=8) + slice_no = traits.Int( + desc='Fit to single slice number.', argstr='-slice %d', position=9) + voxel = traits.Tuple( + traits.Int, + traits.Int, + traits.Int, + desc='Fit to single voxel only.', + argstr='-voxel %d %d %d', + position=10) + maxit = traits.Int( + desc='NLSQR iterations [100].', argstr='-maxit %d', position=11) + + # IR options: + sr_flag = traits.Bool( + desc='Saturation Recovery fitting [default].', + argstr='-SR', + position=12) + ir_flag = traits.Bool( + desc='Inversion Recovery fitting [default].', + argstr='-IR', + position=13) + tis = traits.List( + traits.Float, + position=14, + desc='Inversion times for T1 data [1s,2s,5s].', + argstr='-TIs %s', + sep=' ') + tis_list = traits.File( + exists=True, + argstr='-TIlist %s', + desc='Filename of list of pre-defined TIs.') + t1_list = traits.File( + exists=True, + argstr='-T1list %s', + desc='Filename of list of pre-defined T1s') + t1min = traits.Float( + desc='Minimum tissue T1 value [400ms].', argstr='-T1min %f') + t1max = traits.Float( + desc='Maximum tissue T1 value [4000ms].', argstr='-T1max %f') + + # SPGR options + spgr = traits.Bool(desc='Spoiled Gradient Echo fitting', argstr='-SPGR') + flips = traits.List( + traits.Float, desc='Flip angles', argstr='-flips %s', sep=' ') + desc = 'Filename of list of pre-defined flip angles (deg).' + flips_list = traits.File(exists=True, argstr='-fliplist %s', desc=desc) + desc = 'Filename of B1 estimate for fitting (or include in prior).' + b1map = traits.File(exists=True, argstr='-b1map %s', desc=desc) + + # MCMC options: + mcout = traits.File( + exists=True, + desc='Filename of mc samples (ascii text file)', + argstr='-mcout %s') + mcsamples = traits.Int( + desc='Number of samples to keep [100].', argstr='-mcsamples %d') + mcmaxit = traits.Int( + desc='Number of iterations to run [10,000].', argstr='-mcmaxit %d') + acceptance = traits.Float( + desc='Fraction of iterations to accept [0.23].', + argstr='-acceptance %f') + + +class FitQt1OutputSpec(TraitedSpec): + """ Output Spec for FitQt1. """ + t1map_file = File(desc='Filename of the estimated output T1 map (in ms)') + m0map_file = File(desc='Filename of the m0 map') + desc = 'Filename of the estimated output multi-parameter map' + mcmap_file = File(desc=desc) + comp_file = File(desc='Filename of the estimated multi-component T1 map.') + desc = 'Filename of the error map (symmetric matrix, [Diag,OffDiag])' + error_file = File(desc=desc) + syn_file = File(desc='Filename of the synthetic ASL data') + res_file = File(desc='Filename of the model fit residuals') + + +class FitQt1(NiftyFitCommand): + """Interface for executable fit_qt1 from Niftyfit platform. + + Use NiftyFit to perform Qt1 fitting. + + T1 Fitting Routine (To inversion recovery or spgr data). + Fits single component T1 maps in the first instance. + + `Source code `_ + + Examples + -------- + + >>> from nipype.interfaces.niftyfit import FitQt1 + >>> fit_qt1 = FitQt1() + >>> fit_qt1.inputs.source_file = 'TI4D.nii.gz' + >>> fit_qt1.cmdline + 'fit_qt1 -source TI4D.nii.gz -comp TI4D_comp.nii.gz \ +-error TI4D_error.nii.gz -m0map TI4D_m0map.nii.gz -mcmap TI4D_mcmap.nii.gz \ +-res TI4D_res.nii.gz -syn TI4D_syn.nii.gz -t1map TI4D_t1map.nii.gz' + + """ + _cmd = get_custom_path('fit_qt1', env_dir='NIFTYFITDIR') + input_spec = FitQt1InputSpec + output_spec = FitQt1OutputSpec + _suffix = '_fit_qt1' diff --git a/nipype/interfaces/niftyfit/tests/__init__.py b/nipype/interfaces/niftyfit/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/interfaces/niftyfit/tests/test_asl.py b/nipype/interfaces/niftyfit/tests/test_asl.py new file mode 100644 index 0000000000..f703555c16 --- /dev/null +++ b/nipype/interfaces/niftyfit/tests/test_asl.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import pytest + +from ....testing import example_data +from ...niftyreg import get_custom_path + +from ..asl import FitAsl +from ...niftyreg.tests.test_regutils import no_nifty_tool + + +@pytest.mark.skipif( + no_nifty_tool(cmd='fit_asl'), reason="niftyfit is not installed") +def test_fit_asl(): + """ Testing FitAsl interface.""" + # Create the test node + fit_asl = FitAsl() + + # Check if the command is properly defined + cmd = get_custom_path('fit_asl', env_dir='NIFTYFIT_DIR') + assert fit_asl.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + fit_asl.run() + + # Tests on the interface: + # Runs cbf fitting assuming all tissue is GM! + in_file = example_data('asl.nii.gz') + fit_asl.inputs.source_file = in_file + + cmd_tmp = '{cmd} -source {in_file} -cbf {cbf} -error {error} -syn {syn}' + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + cbf='asl_cbf.nii.gz', + error='asl_error.nii.gz', + syn='asl_syn.nii.gz', + ) + + assert fit_asl.cmdline == expected_cmd + + # Runs cbf fitting using IR/SR T1 data to estimate the local T1 and uses + # the segmentation data to fit tissue specific blood flow parameters + # (lambda,transit times,T1) + fit_asl2 = FitAsl(sig=True) + in_file = example_data('asl.nii.gz') + t1map = example_data('T1map.nii.gz') + seg = example_data('segmentation0.nii.gz') + fit_asl2.inputs.source_file = in_file + fit_asl2.inputs.t1map = t1map + fit_asl2.inputs.seg = seg + + cmd_tmp = '{cmd} -source {in_file} -cbf {cbf} -error {error} \ +-seg {seg} -sig -syn {syn} -t1map {t1map}' + + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + t1map=t1map, + seg=seg, + cbf='asl_cbf.nii.gz', + error='asl_error.nii.gz', + syn='asl_syn.nii.gz', + ) + + assert fit_asl2.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py new file mode 100644 index 0000000000..20995e806e --- /dev/null +++ b/nipype/interfaces/niftyfit/tests/test_auto_DwiTool.py @@ -0,0 +1,157 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dwi import DwiTool + + +def test_DwiTool_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + b0_file=dict( + argstr='-b0 %s', + position=4, + ), + ball_flag=dict( + argstr='-ball', + position=6, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', + 'ballv_flag', 'nod_flag', 'nodv_flag' + ], + ), + ballv_flag=dict( + argstr='-ballv', + position=6, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', + 'nod_flag', 'nodv_flag' + ], + ), + bval_file=dict( + argstr='-bval %s', + mandatory=True, + position=2, + ), + bvec_file=dict( + argstr='-bvec %s', + position=3, + ), + diso_val=dict(argstr='-diso %f', ), + dpr_val=dict(argstr='-dpr %f', ), + dti_flag=dict( + argstr='-dti', + position=6, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag2', 'ball_flag', + 'ballv_flag', 'nod_flag', 'nodv_flag' + ], + ), + dti_flag2=dict( + argstr='-dti2', + position=6, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', + 'ballv_flag', 'nod_flag', 'nodv_flag' + ], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + famap_file=dict( + argstr='-famap %s', + name_source=['source_file'], + name_template='%s_famap.nii.gz', + ), + ivim_flag=dict( + argstr='-ivim', + position=6, + xor=[ + 'mono_flag', 'dti_flag', 'dti_flag2', 'ball_flag', + 'ballv_flag', 'nod_flag', 'nodv_flag' + ], + ), + logdti_file=dict( + argstr='-logdti2 %s', + name_source=['source_file'], + name_template='%s_logdti2.nii.gz', + ), + mask_file=dict( + argstr='-mask %s', + position=5, + ), + mcmap_file=dict( + argstr='-mcmap %s', + name_source=['source_file'], + name_template='%s_mcmap.nii.gz', + ), + mdmap_file=dict( + argstr='-mdmap %s', + name_source=['source_file'], + name_template='%s_mdmap.nii.gz', + ), + mono_flag=dict( + argstr='-mono', + position=6, + xor=[ + 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', + 'ballv_flag', 'nod_flag', 'nodv_flag' + ], + ), + nod_flag=dict( + argstr='-nod', + position=6, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', + 'ballv_flag', 'nodv_flag' + ], + ), + nodv_flag=dict( + argstr='-nodv', + position=6, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'dti_flag2', 'ball_flag', + 'ballv_flag', 'nod_flag' + ], + ), + rgbmap_file=dict( + argstr='-rgbmap %s', + name_source=['source_file'], + name_template='%s_rgbmap.nii.gz', + ), + source_file=dict( + argstr='-source %s', + mandatory=True, + position=1, + ), + syn_file=dict( + argstr='-syn %s', + name_source=['source_file'], + name_template='%s_syn.nii.gz', + requires=['bvec_file', 'b0_file'], + ), + v1map_file=dict( + argstr='-v1map %s', + name_source=['source_file'], + name_template='%s_v1map.nii.gz', + ), + ) + inputs = DwiTool.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DwiTool_outputs(): + output_map = dict( + famap_file=dict(), + logdti_file=dict(), + mcmap_file=dict(), + mdmap_file=dict(), + rgbmap_file=dict(), + syn_file=dict(), + v1map_file=dict(), + ) + outputs = DwiTool.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py new file mode 100644 index 0000000000..b2e1bef961 --- /dev/null +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitAsl.py @@ -0,0 +1,85 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..asl import FitAsl + + +def test_FitAsl_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + cbf_file=dict( + argstr='-cbf %s', + name_source=['source_file'], + name_template='%s_cbf.nii.gz', + ), + dpld=dict(argstr='-dPLD %f', ), + dt_inv2=dict(argstr='-dTinv2 %f', ), + eff=dict(argstr='-eff %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + error_file=dict( + argstr='-error %s', + name_source=['source_file'], + name_template='%s_error.nii.gz', + ), + gm_plasma=dict(argstr='-gmL %f', ), + gm_t1=dict(argstr='-gmT1 %f', ), + gm_ttt=dict(argstr='-gmTTT %f', ), + ir_output=dict(argstr='-IRoutput %s', ), + ir_volume=dict(argstr='-IRvolume %s', ), + ldd=dict(argstr='-LDD %f', ), + m0map=dict(argstr='-m0map %s', ), + m0mape=dict(argstr='-m0mape %s', ), + mask=dict( + argstr='-mask %s', + position=2, + ), + mul=dict(argstr='-mul %f', ), + mulgm=dict(argstr='-sig', ), + out=dict(argstr='-out %f', ), + pasl=dict(argstr='-pasl', ), + pcasl=dict(argstr='-pcasl', ), + plasma_coeff=dict(argstr='-L %f', ), + pld=dict(argstr='-PLD %f', ), + pv0=dict(argstr='-pv0 %d', ), + pv2=dict(argstr='-pv2 %d', ), + pv3=dict(argstr='-pv3 %d %d %d', ), + pv_threshold=dict(argstr='-pvthreshold', ), + seg=dict(argstr='-seg %s', ), + segstyle=dict(argstr='-segstyle', ), + sig=dict(argstr='-sig', ), + source_file=dict( + argstr='-source %s', + mandatory=True, + position=1, + ), + syn_file=dict( + argstr='-syn %s', + name_source=['source_file'], + name_template='%s_syn.nii.gz', + ), + t1_art_cmp=dict(argstr='-T1a %f', ), + t1map=dict(argstr='-t1map %s', ), + t_inv1=dict(argstr='-Tinv1 %f', ), + t_inv2=dict(argstr='-Tinv2 %f', ), + wm_plasma=dict(argstr='-wmL %f', ), + wm_t1=dict(argstr='-wmT1 %f', ), + wm_ttt=dict(argstr='-wmTTT %f', ), + ) + inputs = FitAsl.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FitAsl_outputs(): + output_map = dict( + cbf_file=dict(), + error_file=dict(), + syn_file=dict(), + ) + outputs = FitAsl.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py new file mode 100644 index 0000000000..700d9a31c4 --- /dev/null +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitDwi.py @@ -0,0 +1,214 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dwi import FitDwi + + +def test_FitDwi_inputs(): + input_map = dict( + acceptance=dict(argstr='-accpetance %f', ), + args=dict(argstr='%s', ), + ball_flag=dict( + argstr='-ball', + position=4, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'ballv_flag', 'nod_flag', + 'nodv_flag' + ], + ), + ballv_flag=dict( + argstr='-ballv', + position=4, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', 'nod_flag', + 'nodv_flag' + ], + ), + bval_file=dict( + argstr='-bval %s', + mandatory=True, + position=2, + ), + bvec_file=dict( + argstr='-bvec %s', + mandatory=True, + position=3, + ), + cov_file=dict(argstr='-cov %s', ), + csf_t2_val=dict(argstr='-csfT2 %f', ), + diso_val=dict(argstr='-diso %f', ), + dpr_val=dict(argstr='-dpr %f', ), + dti_flag=dict( + argstr='-dti', + position=4, + xor=[ + 'mono_flag', 'ivim_flag', 'ball_flag', 'ballv_flag', + 'nod_flag', 'nodv_flag' + ], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + error_file=dict( + argstr='-error %s', + name_source=['source_file'], + name_template='%s_error.nii.gz', + ), + famap_file=dict( + argstr='-famap %s', + name_source=['source_file'], + name_template='%s_famap.nii.gz', + ), + gn_flag=dict( + argstr='-gn', + xor=['wls_flag'], + ), + ivim_flag=dict( + argstr='-ivim', + position=4, + xor=[ + 'mono_flag', 'dti_flag', 'ball_flag', 'ballv_flag', 'nod_flag', + 'nodv_flag' + ], + ), + lm_vals=dict( + argstr='-lm %f %f', + requires=['gn_flag'], + ), + mask_file=dict(argstr='-mask %s', ), + maxit_val=dict( + argstr='-maxit %d', + requires=['gn_flag'], + ), + mcmap_file=dict( + argstr='-mcmap %s', + name_source=['source_file'], + name_template='%s_mcmap.nii.gz', + requires=['nodv_flag'], + ), + mcmaxit=dict(argstr='-mcmaxit %d', ), + mcout=dict( + argstr='-mcout %s', + name_source=['source_file'], + name_template='%s_mcout.txt', + ), + mcsamples=dict(argstr='-mcsamples %d', ), + mdmap_file=dict( + argstr='-mdmap %s', + name_source=['source_file'], + name_template='%s_mdmap.nii.gz', + ), + mono_flag=dict( + argstr='-mono', + position=4, + xor=[ + 'ivim_flag', 'dti_flag', 'ball_flag', 'ballv_flag', 'nod_flag', + 'nodv_flag' + ], + ), + nod_flag=dict( + argstr='-nod', + position=4, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', + 'ballv_flag', 'nodv_flag' + ], + ), + nodiff_file=dict( + argstr='-nodiff %s', + name_source=['source_file'], + name_template='%s_no_diff.nii.gz', + ), + nodv_flag=dict( + argstr='-nodv', + position=4, + xor=[ + 'mono_flag', 'ivim_flag', 'dti_flag', 'ball_flag', + 'ballv_flag', 'nod_flag' + ], + ), + perf_thr=dict(argstr='-perfthreshold %f', ), + prior_file=dict(argstr='-prior %s', ), + res_file=dict( + argstr='-res %s', + name_source=['source_file'], + name_template='%s_resmap.nii.gz', + ), + rgbmap_file=dict( + argstr='-rgbmap %s', + name_source=['source_file'], + name_template='%s_rgbmap.nii.gz', + requires=['dti_flag'], + ), + rot_sform_flag=dict(argstr='-rotsform %d', ), + slice_no=dict(argstr='-slice %d', ), + source_file=dict( + argstr='-source %s', + mandatory=True, + position=1, + ), + swls_val=dict(argstr='-swls %f', ), + syn_file=dict( + argstr='-syn %s', + name_source=['source_file'], + name_template='%s_syn.nii.gz', + ), + te_file=dict( + argstr='-TE %s', + xor=['te_file'], + ), + te_value=dict( + argstr='-TE %s', + xor=['te_file'], + ), + ten_type=dict(usedefault=True, ), + tenmap2_file=dict( + argstr='-tenmap2 %s', + name_source=['source_file'], + name_template='%s_tenmap2.nii.gz', + requires=['dti_flag'], + ), + tenmap_file=dict( + argstr='-tenmap %s', + name_source=['source_file'], + name_template='%s_tenmap.nii.gz', + requires=['dti_flag'], + ), + v1map_file=dict( + argstr='-v1map %s', + name_source=['source_file'], + name_template='%s_v1map.nii.gz', + ), + vb_flag=dict(argstr='-vb', ), + voxel=dict(argstr='-voxel %d %d %d', ), + wls_flag=dict( + argstr='-wls', + xor=['gn_flag'], + ), + wm_t2_val=dict(argstr='-wmT2 %f', ), + ) + inputs = FitDwi.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FitDwi_outputs(): + output_map = dict( + error_file=dict(), + famap_file=dict(), + mcmap_file=dict(), + mcout=dict(), + mdmap_file=dict(), + nodiff_file=dict(), + res_file=dict(), + rgbmap_file=dict(), + syn_file=dict(), + tenmap2_file=dict(), + tenmap_file=dict(), + v1map_file=dict(), + ) + outputs = FitDwi.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py new file mode 100644 index 0000000000..0505ee514c --- /dev/null +++ b/nipype/interfaces/niftyfit/tests/test_auto_FitQt1.py @@ -0,0 +1,141 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..qt1 import FitQt1 + + +def test_FitQt1_inputs(): + input_map = dict( + acceptance=dict(argstr='-acceptance %f', ), + args=dict(argstr='%s', ), + b1map=dict(argstr='-b1map %s', ), + comp_file=dict( + argstr='-comp %s', + name_source=['source_file'], + name_template='%s_comp.nii.gz', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + error_file=dict( + argstr='-error %s', + name_source=['source_file'], + name_template='%s_error.nii.gz', + ), + flips=dict( + argstr='-flips %s', + sep=' ', + ), + flips_list=dict(argstr='-fliplist %s', ), + gn_flag=dict( + argstr='-gn', + position=8, + ), + ir_flag=dict( + argstr='-IR', + position=13, + ), + lm_val=dict( + argstr='-lm %f %f', + position=7, + ), + m0map_file=dict( + argstr='-m0map %s', + name_source=['source_file'], + name_template='%s_m0map.nii.gz', + ), + mask=dict( + argstr='-mask %s', + position=2, + ), + maxit=dict( + argstr='-maxit %d', + position=11, + ), + mcmap_file=dict( + argstr='-mcmap %s', + name_source=['source_file'], + name_template='%s_mcmap.nii.gz', + ), + mcmaxit=dict(argstr='-mcmaxit %d', ), + mcout=dict(argstr='-mcout %s', ), + mcsamples=dict(argstr='-mcsamples %d', ), + nb_comp=dict( + argstr='-nc %d', + position=6, + ), + prior=dict( + argstr='-prior %s', + position=3, + ), + res_file=dict( + argstr='-res %s', + name_source=['source_file'], + name_template='%s_res.nii.gz', + ), + slice_no=dict( + argstr='-slice %d', + position=9, + ), + source_file=dict( + argstr='-source %s', + mandatory=True, + position=1, + ), + spgr=dict(argstr='-SPGR', ), + sr_flag=dict( + argstr='-SR', + position=12, + ), + syn_file=dict( + argstr='-syn %s', + name_source=['source_file'], + name_template='%s_syn.nii.gz', + ), + t1_list=dict(argstr='-T1list %s', ), + t1map_file=dict( + argstr='-t1map %s', + name_source=['source_file'], + name_template='%s_t1map.nii.gz', + ), + t1max=dict(argstr='-T1max %f', ), + t1min=dict(argstr='-T1min %f', ), + te_value=dict( + argstr='-TE %f', + position=4, + ), + tis=dict( + argstr='-TIs %s', + position=14, + sep=' ', + ), + tis_list=dict(argstr='-TIlist %s', ), + tr_value=dict( + argstr='-TR %f', + position=5, + ), + voxel=dict( + argstr='-voxel %d %d %d', + position=10, + ), + ) + inputs = FitQt1.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FitQt1_outputs(): + output_map = dict( + comp_file=dict(), + error_file=dict(), + m0map_file=dict(), + mcmap_file=dict(), + res_file=dict(), + syn_file=dict(), + t1map_file=dict(), + ) + outputs = FitQt1.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py new file mode 100644 index 0000000000..6bfeb3c9d3 --- /dev/null +++ b/nipype/interfaces/niftyfit/tests/test_auto_NiftyFitCommand.py @@ -0,0 +1,18 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import NiftyFitCommand + + +def test_NiftyFitCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) + inputs = NiftyFitCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyfit/tests/test_dwi.py b/nipype/interfaces/niftyfit/tests/test_dwi.py new file mode 100644 index 0000000000..270d9c666a --- /dev/null +++ b/nipype/interfaces/niftyfit/tests/test_dwi.py @@ -0,0 +1,108 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import pytest + +from ....testing import example_data +from ...niftyreg import get_custom_path + +from ..dwi import FitDwi, DwiTool +from ...niftyreg.tests.test_regutils import no_nifty_tool + + +@pytest.mark.skipif( + no_nifty_tool(cmd='fit_dwi'), reason="niftyfit is not installed") +def test_fit_dwi(): + """ Testing FitDwi interface.""" + # Create a node object + fit_dwi = FitDwi() + + # Check if the command is properly defined + cmd = get_custom_path('fit_dwi', env_dir='NIFTYFITDIR') + assert fit_dwi.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + fit_dwi.run() + + # Assign some input data + in_file = example_data('dwi.nii.gz') + bval_file = example_data('bvals') + bvec_file = example_data('bvecs') + fit_dwi.inputs.source_file = in_file + fit_dwi.inputs.bval_file = bval_file + fit_dwi.inputs.bvec_file = bvec_file + fit_dwi.inputs.dti_flag = True + + cmd_tmp = '{cmd} -source {in_file} -bval {bval} -bvec {bvec} -dti \ +-error {error} -famap {fa} -mcmap {mc} -mcout {mcout} -mdmap {md} -nodiff \ +{nodiff} -res {res} -rgbmap {rgb} -syn {syn} -tenmap2 {ten2} -v1map {v1}' + + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + bval=bval_file, + bvec=bvec_file, + error='dwi_error.nii.gz', + fa='dwi_famap.nii.gz', + mc='dwi_mcmap.nii.gz', + md='dwi_mdmap.nii.gz', + nodiff='dwi_no_diff.nii.gz', + res='dwi_resmap.nii.gz', + rgb='dwi_rgbmap.nii.gz', + syn='dwi_syn.nii.gz', + ten2='dwi_tenmap2.nii.gz', + v1='dwi_v1map.nii.gz', + mcout='dwi_mcout.txt') + + assert fit_dwi.cmdline == expected_cmd + + +@pytest.mark.skipif( + no_nifty_tool(cmd='dwi_tool'), reason="niftyfit is not installed") +def test_dwi_tool(): + """ Testing DwiTool interface.""" + # Create a node object + dwi_tool = DwiTool() + + # Check if the command is properly defined + cmd = get_custom_path('dwi_tool', env_dir='NIFTYFITDIR') + assert dwi_tool.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + dwi_tool.run() + + # Assign some input data + in_file = example_data('dwi.nii.gz') + bval_file = example_data('bvals') + bvec_file = example_data('bvecs') + b0_file = example_data('b0.nii') + mask_file = example_data('mask.nii.gz') + dwi_tool.inputs.source_file = in_file + dwi_tool.inputs.mask_file = mask_file + dwi_tool.inputs.bval_file = bval_file + dwi_tool.inputs.bvec_file = bvec_file + dwi_tool.inputs.b0_file = b0_file + dwi_tool.inputs.dti_flag = True + + cmd_tmp = '{cmd} -source {in_file} -bval {bval} -bvec {bvec} -b0 {b0} \ +-mask {mask} -dti -famap {fa} -logdti2 {log} -mcmap {mc} -mdmap {md} \ +-rgbmap {rgb} -syn {syn} -v1map {v1}' + + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + bval=bval_file, + bvec=bvec_file, + b0=b0_file, + mask=mask_file, + fa='dwi_famap.nii.gz', + log='dwi_logdti2.nii.gz', + mc='dwi_mcmap.nii.gz', + md='dwi_mdmap.nii.gz', + rgb='dwi_rgbmap.nii.gz', + syn='dwi_syn.nii.gz', + v1='dwi_v1map.nii.gz') + + assert dwi_tool.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyfit/tests/test_qt1.py b/nipype/interfaces/niftyfit/tests/test_qt1.py new file mode 100644 index 0000000000..894017e654 --- /dev/null +++ b/nipype/interfaces/niftyfit/tests/test_qt1.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import pytest + +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from ..qt1 import FitQt1 + + +@pytest.mark.skipif( + no_nifty_tool(cmd='fit_qt1'), reason="niftyfit is not installed") +def test_fit_qt1(): + """ Testing FitQt1 interface.""" + # Create a node object + fit_qt1 = FitQt1() + + # Check if the command is properly defined + cmd = get_custom_path('fit_qt1', env_dir='NIFTYFITDIR') + assert fit_qt1.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + fit_qt1.run() + + # Regular test: + in_file = example_data('TI4D.nii.gz') + fit_qt1.inputs.source_file = in_file + + cmd_tmp = '{cmd} -source {in_file} -comp {comp} -error {error} -m0map \ +{map0} -mcmap {cmap} -res {res} -syn {syn} -t1map {t1map}' + + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + comp='TI4D_comp.nii.gz', + map0='TI4D_m0map.nii.gz', + error='TI4D_error.nii.gz', + cmap='TI4D_mcmap.nii.gz', + res='TI4D_res.nii.gz', + t1map='TI4D_t1map.nii.gz', + syn='TI4D_syn.nii.gz', + ) + + assert fit_qt1.cmdline == expected_cmd + + # Runs T1 fitting to inversion and saturation recovery data (NLSQR) + fit_qt1_2 = FitQt1(tis=[1, 2, 5], ir_flag=True) + in_file = example_data('TI4D.nii.gz') + fit_qt1_2.inputs.source_file = in_file + + cmd_tmp = '{cmd} -source {in_file} -IR -TIs 1.0 2.0 5.0 \ +-comp {comp} -error {error} -m0map {map0} -mcmap {cmap} -res {res} \ +-syn {syn} -t1map {t1map}' + + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + comp='TI4D_comp.nii.gz', + map0='TI4D_m0map.nii.gz', + error='TI4D_error.nii.gz', + cmap='TI4D_mcmap.nii.gz', + res='TI4D_res.nii.gz', + t1map='TI4D_t1map.nii.gz', + syn='TI4D_syn.nii.gz', + ) + + assert fit_qt1_2.cmdline == expected_cmd + + # Runs T1 fitting to spoiled gradient echo (SPGR) data (NLSQR) + fit_qt1_3 = FitQt1(flips=[2, 4, 8], spgr=True) + in_file = example_data('TI4D.nii.gz') + fit_qt1_3.inputs.source_file = in_file + + cmd_tmp = '{cmd} -source {in_file} -comp {comp} -error {error} \ +-flips 2.0 4.0 8.0 -m0map {map0} -mcmap {cmap} -res {res} -SPGR -syn {syn} \ +-t1map {t1map}' + + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + comp='TI4D_comp.nii.gz', + map0='TI4D_m0map.nii.gz', + error='TI4D_error.nii.gz', + cmap='TI4D_mcmap.nii.gz', + res='TI4D_res.nii.gz', + t1map='TI4D_t1map.nii.gz', + syn='TI4D_syn.nii.gz', + ) + + assert fit_qt1_3.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyreg/__init__.py b/nipype/interfaces/niftyreg/__init__.py new file mode 100644 index 0000000000..9854ebaea3 --- /dev/null +++ b/nipype/interfaces/niftyreg/__init__.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The niftyreg module provides classes for interfacing with the `NiftyReg +`_ command line tools. + +Top-level namespace for niftyreg. +""" + +from .base import get_custom_path +from .reg import RegAladin, RegF3D +from .regutils import (RegResample, RegJacobian, RegAverage, RegTools, + RegTransform, RegMeasure) diff --git a/nipype/interfaces/niftyreg/base.py b/nipype/interfaces/niftyreg/base.py new file mode 100644 index 0000000000..bd8a280aa5 --- /dev/null +++ b/nipype/interfaces/niftyreg/base.py @@ -0,0 +1,134 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The niftyreg module provides classes for interfacing with `niftyreg +`_ command line tools. + +These are the base tools for working with niftyreg. + +Registration tools are found in niftyreg/reg.py +Every other tool is found in niftyreg/regutils.py + +Examples +-------- +See the docstrings of the individual classes for examples. + +""" + +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import property, super +from distutils.version import StrictVersion +import os + +from ... import logging +from ..base import CommandLine, CommandLineInputSpec, traits, Undefined +from ...utils.filemanip import split_filename + +iflogger = logging.getLogger('nipype.interface') + + +def get_custom_path(command, env_dir='NIFTYREGDIR'): + return os.path.join(os.getenv(env_dir, ''), command) + + +class NiftyRegCommandInputSpec(CommandLineInputSpec): + """Input Spec for niftyreg interfaces.""" + # Set the number of omp thread to use + omp_core_val = traits.Int( + int(os.environ.get('OMP_NUM_THREADS', '1')), + desc='Number of openmp thread to use', + argstr='-omp %i', + usedefault=True) + + +class NiftyRegCommand(CommandLine): + """ + Base support interface for NiftyReg commands. + """ + _suffix = '_nr' + _min_version = '1.5.30' + + input_spec = NiftyRegCommandInputSpec + + def __init__(self, required_version=None, **inputs): + self.num_threads = 1 + super(NiftyRegCommand, self).__init__(**inputs) + self.required_version = required_version + _version = self.version_from_command() + if _version: + _version = _version.decode("utf-8") + if self._min_version is not None and \ + StrictVersion(_version) < StrictVersion(self._min_version): + msg = 'A later version of Niftyreg is required (%s < %s)' + iflogger.warning(msg, _version, self._min_version) + if required_version is not None: + if StrictVersion(_version) != StrictVersion(required_version): + msg = 'The version of NiftyReg differs from the required' + msg += '(%s != %s)' + iflogger.warning(msg, _version, self.required_version) + self.inputs.on_trait_change(self._omp_update, 'omp_core_val') + self.inputs.on_trait_change(self._environ_update, 'environ') + self._omp_update() + + def _omp_update(self): + if self.inputs.omp_core_val: + self.inputs.environ['OMP_NUM_THREADS'] = \ + str(self.inputs.omp_core_val) + self.num_threads = self.inputs.omp_core_val + else: + if 'OMP_NUM_THREADS' in self.inputs.environ: + del self.inputs.environ['OMP_NUM_THREADS'] + self.num_threads = 1 + + def _environ_update(self): + if self.inputs.environ: + if 'OMP_NUM_THREADS' in self.inputs.environ: + self.inputs.omp_core_val = \ + int(self.inputs.environ['OMP_NUM_THREADS']) + else: + self.inputs.omp_core_val = Undefined + else: + self.inputs.omp_core_val = Undefined + + def check_version(self): + _version = self.version_from_command() + if not _version: + raise Exception('Niftyreg not found') + # Decoding to string: + _version = _version.decode("utf-8") + if StrictVersion(_version) < StrictVersion(self._min_version): + err = 'A later version of Niftyreg is required (%s < %s)' + raise ValueError(err % (_version, self._min_version)) + if self.required_version: + if StrictVersion(_version) != StrictVersion(self.required_version): + err = 'The version of NiftyReg differs from the required' + err += '(%s != %s)' + raise ValueError(err % (_version, self.required_version)) + + @property + def version(self): + return self.version_from_command() + + def exists(self): + return self.version_from_command() is not None + + def _format_arg(self, name, spec, value): + if name == 'omp_core_val': + self.numthreads = value + return super(NiftyRegCommand, self)._format_arg(name, spec, value) + + def _gen_fname(self, basename, out_dir=None, suffix=None, ext=None): + if basename == '': + msg = 'Unable to generate filename for command %s. ' % self.cmd + msg += 'basename is not set!' + raise ValueError(msg) + _, final_bn, final_ext = split_filename(basename) + if out_dir is None: + out_dir = os.getcwd() + if ext is not None: + final_ext = ext + if suffix is not None: + final_bn = ''.join((final_bn, suffix)) + return os.path.abspath(os.path.join(out_dir, final_bn + final_ext)) diff --git a/nipype/interfaces/niftyreg/reg.py b/nipype/interfaces/niftyreg/reg.py new file mode 100644 index 0000000000..f36752b872 --- /dev/null +++ b/nipype/interfaces/niftyreg/reg.py @@ -0,0 +1,400 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The reg module provides classes for interfacing with the `niftyreg +`_ registration command line tools. + +The interfaces were written to work with niftyreg version 1.5.10 +""" + +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import staticmethod +import os + +from ..base import TraitedSpec, File, traits, isdefined +from .base import get_custom_path, NiftyRegCommand, NiftyRegCommandInputSpec +from ...utils.filemanip import split_filename + + +class RegAladinInputSpec(NiftyRegCommandInputSpec): + """ Input Spec for RegAladin. """ + # Input reference file + ref_file = File( + exists=True, + desc='The input reference/target image', + argstr='-ref %s', + mandatory=True) + # Input floating file + flo_file = File( + exists=True, + desc='The input floating/source image', + argstr='-flo %s', + mandatory=True) + # No symmetric flag + nosym_flag = traits.Bool( + argstr='-noSym', desc='Turn off symmetric registration') + # Rigid only registration + rig_only_flag = traits.Bool( + argstr='-rigOnly', desc='Do only a rigid registration') + # Directly optimise affine flag + desc = 'Directly optimise the affine parameters' + aff_direct_flag = traits.Bool(argstr='-affDirect', desc=desc) + # Input affine + in_aff_file = File( + exists=True, + desc='The input affine transformation', + argstr='-inaff %s') + # Input reference mask + rmask_file = File( + exists=True, desc='The input reference mask', argstr='-rmask %s') + # Input floating mask + fmask_file = File( + exists=True, desc='The input floating mask', argstr='-fmask %s') + # Maximum number of iterations + maxit_val = traits.Range( + desc='Maximum number of iterations', argstr='-maxit %d', low=0) + # Multiresolution levels + ln_val = traits.Range( + desc='Number of resolution levels to create', argstr='-ln %d', low=0) + # Number of resolution levels to process + lp_val = traits.Range( + desc='Number of resolution levels to perform', argstr='-lp %d', low=0) + # Smoothing to apply on reference image + desc = 'Amount of smoothing to apply to reference image' + smoo_r_val = traits.Float(desc=desc, argstr='-smooR %f') + # Smoothing to apply on floating image + desc = 'Amount of smoothing to apply to floating image' + smoo_f_val = traits.Float(desc=desc, argstr='-smooF %f') + # Use nifti header to initialise transformation + desc = 'Use nifti header to initialise transformation' + nac_flag = traits.Bool(desc=desc, argstr='-nac') + # Use the input masks centre of mass to initialise the transformation + desc = 'Use the masks centre of mass to initialise the transformation' + cog_flag = traits.Bool(desc=desc, argstr='-cog') + # Percent of blocks that are considered active. + v_val = traits.Range( + desc='Percent of blocks that are active', argstr='-pv %d', low=0) + # Percent of inlier blocks + i_val = traits.Range( + desc='Percent of inlier blocks', argstr='-pi %d', low=0) + # Lower threshold on reference image + ref_low_val = traits.Float( + desc='Lower threshold value on reference image', + argstr='-refLowThr %f') + # Upper threshold on reference image + ref_up_val = traits.Float( + desc='Upper threshold value on reference image', argstr='-refUpThr %f') + # Lower threshold on floating image + flo_low_val = traits.Float( + desc='Lower threshold value on floating image', argstr='-floLowThr %f') + # Upper threshold on floating image + flo_up_val = traits.Float( + desc='Upper threshold value on floating image', argstr='-floUpThr %f') + # Platform to use + platform_val = traits.Int(desc='Platform index', argstr='-platf %i') + # Platform to use + gpuid_val = traits.Int(desc='Device to use id', argstr='-gpuid %i') + # Verbosity off + verbosity_off_flag = traits.Bool( + argstr='-voff', desc='Turn off verbose output') + + # Affine output transformation matrix file + aff_file = File( + name_source=['flo_file'], + name_template='%s_aff.txt', + desc='The output affine matrix file', + argstr='-aff %s') + # Result warped image file + res_file = File( + name_source=['flo_file'], + name_template='%s_res.nii.gz', + desc='The affine transformed floating image', + argstr='-res %s') + + +class RegAladinOutputSpec(TraitedSpec): + """ Output Spec for RegAladin. """ + aff_file = File(desc='The output affine file') + res_file = File(desc='The output transformed image') + desc = 'Output string in the format for reg_average' + avg_output = traits.String(desc=desc) + + +class RegAladin(NiftyRegCommand): + """Interface for executable reg_aladin from NiftyReg platform. + + Block Matching algorithm for symmetric global registration. + Based on Modat et al., "Global image registration using + asymmetric block-matching approach" + J. Med. Img. 1(2) 024003, 2014, doi: 10.1117/1.JMI.1.2.024003 + + `Source code `_ + + Examples + -------- + >>> from nipype.interfaces import niftyreg + >>> node = niftyreg.RegAladin() + >>> node.inputs.ref_file = 'im1.nii' + >>> node.inputs.flo_file = 'im2.nii' + >>> node.inputs.rmask_file = 'mask.nii' + >>> node.inputs.omp_core_val = 4 + >>> node.cmdline + 'reg_aladin -aff im2_aff.txt -flo im2.nii -omp 4 -ref im1.nii \ +-res im2_res.nii.gz -rmask mask.nii' + + """ + _cmd = get_custom_path('reg_aladin') + input_spec = RegAladinInputSpec + output_spec = RegAladinOutputSpec + + def _list_outputs(self): + outputs = super(RegAladin, self)._list_outputs() + + # Make a list of the linear transformation file and the input image + aff = os.path.abspath(outputs['aff_file']) + flo = os.path.abspath(self.inputs.flo_file) + outputs['avg_output'] = '%s %s' % (aff, flo) + return outputs + + +class RegF3DInputSpec(NiftyRegCommandInputSpec): + """ Input Spec for RegF3D. """ + # Input reference file + ref_file = File( + exists=True, + desc='The input reference/target image', + argstr='-ref %s', + mandatory=True) + # Input floating file + flo_file = File( + exists=True, + desc='The input floating/source image', + argstr='-flo %s', + mandatory=True) + + # Input Affine file + aff_file = File( + exists=True, + desc='The input affine transformation file', + argstr='-aff %s') + + # Input cpp file + incpp_file = File( + exists=True, + desc='The input cpp transformation file', + argstr='-incpp %s') + + # Reference mask + rmask_file = File( + exists=True, desc='Reference image mask', argstr='-rmask %s') + + # Smoothing kernel for reference + desc = 'Smoothing kernel width for reference image' + ref_smooth_val = traits.Float(desc=desc, argstr='-smooR %f') + # Smoothing kernel for floating + desc = 'Smoothing kernel width for floating image' + flo_smooth_val = traits.Float(desc=desc, argstr='-smooF %f') + + # Lower threshold for reference image + rlwth_thr_val = traits.Float( + desc='Lower threshold for reference image', argstr='--rLwTh %f') + # Upper threshold for reference image + rupth_thr_val = traits.Float( + desc='Upper threshold for reference image', argstr='--rUpTh %f') + # Lower threshold for reference image + flwth_thr_val = traits.Float( + desc='Lower threshold for floating image', argstr='--fLwTh %f') + # Upper threshold for reference image + fupth_thr_val = traits.Float( + desc='Upper threshold for floating image', argstr='--fUpTh %f') + + # Lower threshold for reference image + desc = 'Lower threshold for reference image at the specified time point' + rlwth2_thr_val = traits.Tuple( + traits.Range(low=0), traits.Float, desc=desc, argstr='-rLwTh %d %f') + # Upper threshold for reference image + desc = 'Upper threshold for reference image at the specified time point' + rupth2_thr_val = traits.Tuple( + traits.Range(low=0), traits.Float, desc=desc, argstr='-rUpTh %d %f') + # Lower threshold for reference image + desc = 'Lower threshold for floating image at the specified time point' + flwth2_thr_val = traits.Tuple( + traits.Range(low=0), traits.Float, desc=desc, argstr='-fLwTh %d %f') + # Upper threshold for reference image + desc = 'Upper threshold for floating image at the specified time point' + fupth2_thr_val = traits.Tuple( + traits.Range(low=0), traits.Float, desc=desc, argstr='-fUpTh %d %f') + + # Final grid spacing along the 3 axes + sx_val = traits.Float( + desc='Final grid spacing along the x axes', argstr='-sx %f') + sy_val = traits.Float( + desc='Final grid spacing along the y axes', argstr='-sy %f') + sz_val = traits.Float( + desc='Final grid spacing along the z axes', argstr='-sz %f') + + # Regularisation options + be_val = traits.Float(desc='Bending energy value', argstr='-be %f') + le_val = traits.Float( + desc='Linear elasticity penalty term', argstr='-le %f') + jl_val = traits.Float( + desc='Log of jacobian of deformation penalty value', argstr='-jl %f') + desc = 'Do not approximate the log of jacobian penalty at control points \ +only' + + no_app_jl_flag = traits.Bool(argstr='-noAppJL', desc=desc) + + # Similarity measure options + desc = 'use NMI even when other options are specified' + nmi_flag = traits.Bool(argstr='--nmi', desc=desc) + desc = 'Number of bins in the histogram for reference image' + rbn_val = traits.Range(low=0, desc=desc, argstr='--rbn %d') + desc = 'Number of bins in the histogram for reference image' + fbn_val = traits.Range(low=0, desc=desc, argstr='--fbn %d') + desc = 'Number of bins in the histogram for reference image for given \ +time point' + + rbn2_val = traits.Tuple( + traits.Range(low=0), + traits.Range(low=0), + desc=desc, + argstr='-rbn %d %d') + + desc = 'Number of bins in the histogram for reference image for given \ +time point' + + fbn2_val = traits.Tuple( + traits.Range(low=0), + traits.Range(low=0), + desc=desc, + argstr='-fbn %d %d') + + lncc_val = traits.Float( + desc='SD of the Gaussian for computing LNCC', argstr='--lncc %f') + desc = 'SD of the Gaussian for computing LNCC for a given time point' + lncc2_val = traits.Tuple( + traits.Range(low=0), traits.Float, desc=desc, argstr='-lncc %d %f') + + ssd_flag = traits.Bool( + desc='Use SSD as the similarity measure', argstr='--ssd') + desc = 'Use SSD as the similarity measure for a given time point' + ssd2_flag = traits.Range(low=0, desc=desc, argstr='-ssd %d') + kld_flag = traits.Bool( + desc='Use KL divergence as the similarity measure', argstr='--kld') + desc = 'Use KL divergence as the similarity measure for a given time point' + kld2_flag = traits.Range(low=0, desc=desc, argstr='-kld %d') + amc_flag = traits.Bool(desc='Use additive NMI', argstr='-amc') + + nox_flag = traits.Bool(desc="Don't optimise in x direction", argstr='-nox') + noy_flag = traits.Bool(desc="Don't optimise in y direction", argstr='-noy') + noz_flag = traits.Bool(desc="Don't optimise in z direction", argstr='-noz') + + # Optimization options + maxit_val = traits.Range( + low=0, + argstr='-maxit %d', + desc='Maximum number of iterations per level') + ln_val = traits.Range( + low=0, argstr='-ln %d', desc='Number of resolution levels to create') + lp_val = traits.Range( + low=0, argstr='-lp %d', desc='Number of resolution levels to perform') + nopy_flag = traits.Bool( + desc='Do not use the multiresolution approach', argstr='-nopy') + noconj_flag = traits.Bool( + desc='Use simple GD optimization', argstr='-noConj') + desc = 'Add perturbation steps after each optimization step' + pert_val = traits.Range(low=0, desc=desc, argstr='-pert %d') + + # F3d2 options + vel_flag = traits.Bool( + desc='Use velocity field integration', argstr='-vel') + fmask_file = File( + exists=True, desc='Floating image mask', argstr='-fmask %s') + + # Other options + desc = 'Kernel width for smoothing the metric gradient' + smooth_grad_val = traits.Float(desc=desc, argstr='-smoothGrad %f') + # Padding value + pad_val = traits.Float(desc='Padding value', argstr='-pad %f') + # verbosity off + verbosity_off_flag = traits.Bool( + argstr='-voff', desc='Turn off verbose output') + + # Output CPP image file + cpp_file = File( + name_source=['flo_file'], + name_template='%s_cpp.nii.gz', + desc='The output CPP file', + argstr='-cpp %s') + # Output warped image file + res_file = File( + name_source=['flo_file'], + name_template='%s_res.nii.gz', + desc='The output resampled image', + argstr='-res %s') + + +class RegF3DOutputSpec(TraitedSpec): + """ Output Spec for RegF3D. """ + cpp_file = File(desc='The output CPP file') + res_file = File(desc='The output resampled image') + invcpp_file = File(desc='The output inverse CPP file') + invres_file = File(desc='The output inverse res file') + desc = 'Output string in the format for reg_average' + avg_output = traits.String(desc=desc) + + +class RegF3D(NiftyRegCommand): + """Interface for executable reg_f3d from NiftyReg platform. + + Fast Free-Form Deformation (F3D) algorithm for non-rigid registration. + Initially based on Modat et al., "Fast Free-Form Deformation using + graphics processing units", CMPB, 2010 + + `Source code `_ + + Examples + -------- + >>> from nipype.interfaces import niftyreg + >>> node = niftyreg.RegF3D() + >>> node.inputs.ref_file = 'im1.nii' + >>> node.inputs.flo_file = 'im2.nii' + >>> node.inputs.rmask_file = 'mask.nii' + >>> node.inputs.omp_core_val = 4 + >>> node.cmdline + 'reg_f3d -cpp im2_cpp.nii.gz -flo im2.nii -omp 4 -ref im1.nii \ +-res im2_res.nii.gz -rmask mask.nii' + + """ + _cmd = get_custom_path('reg_f3d') + input_spec = RegF3DInputSpec + output_spec = RegF3DOutputSpec + + @staticmethod + def _remove_extension(in_file): + dn, bn, _ = split_filename(in_file) + return os.path.join(dn, bn) + + def _list_outputs(self): + outputs = super(RegF3D, self)._list_outputs() + + if self.inputs.vel_flag is True: + res_name = self._remove_extension(outputs['res_file']) + cpp_name = self._remove_extension(outputs['cpp_file']) + outputs['invres_file'] = '%s_backward.nii.gz' % res_name + outputs['invcpp_file'] = '%s_backward.nii.gz' % cpp_name + + # Make a list of the linear transformation file and the input image + if self.inputs.vel_flag is True and isdefined(self.inputs.aff_file): + cpp_file = os.path.abspath(outputs['cpp_file']) + flo_file = os.path.abspath(self.inputs.flo_file) + outputs['avg_output'] = '%s %s %s' % (self.inputs.aff_file, + cpp_file, flo_file) + else: + cpp_file = os.path.abspath(outputs['cpp_file']) + flo_file = os.path.abspath(self.inputs.flo_file) + outputs['avg_output'] = '%s %s' % (cpp_file, flo_file) + + return outputs diff --git a/nipype/interfaces/niftyreg/regutils.py b/nipype/interfaces/niftyreg/regutils.py new file mode 100644 index 0000000000..c90aa53bed --- /dev/null +++ b/nipype/interfaces/niftyreg/regutils.py @@ -0,0 +1,831 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The regutils module provides classes for interfacing with the `niftyreg +`_ utility command line tools. + +The interfaces were written to work with niftyreg version 1.5.10 +""" + +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import len, open, property, super +import os + +from ..base import TraitedSpec, File, traits, isdefined +from .base import get_custom_path, NiftyRegCommand, NiftyRegCommandInputSpec +from ...utils.filemanip import split_filename + + +class RegResampleInputSpec(NiftyRegCommandInputSpec): + """ Input Spec for RegResample. """ + # Input reference file + ref_file = File( + exists=True, + desc='The input reference/target image', + argstr='-ref %s', + mandatory=True) + # Input floating file + flo_file = File( + exists=True, + desc='The input floating/source image', + argstr='-flo %s', + mandatory=True) + # Input deformation field + trans_file = File( + exists=True, desc='The input transformation file', argstr='-trans %s') + + type = traits.Enum( + 'res', + 'blank', + argstr='-%s', + position=-2, + usedefault=True, + desc='Type of output') + + # Output file name + out_file = File( + name_source=['flo_file'], + name_template='%s', + argstr='%s', + position=-1, + desc='The output filename of the transformed image') + + # Interpolation type + inter_val = traits.Enum( + 'NN', + 'LIN', + 'CUB', + 'SINC', + desc='Interpolation type', + argstr='-inter %d') + + # Padding value + pad_val = traits.Float(desc='Padding value', argstr='-pad %f') + + # Tensor flag + tensor_flag = traits.Bool(desc='Resample Tensor Map', argstr='-tensor ') + + # Verbosity off + verbosity_off_flag = traits.Bool( + argstr='-voff', desc='Turn off verbose output') + # PSF flag + desc = 'Perform the resampling in two steps to resample an image to a \ +lower resolution' + + psf_flag = traits.Bool(argstr='-psf', desc=desc) + desc = 'Minimise the matrix metric (0) or the determinant (1) when \ +estimating the PSF [0]' + + psf_alg = traits.Enum(0, 1, argstr='-psf_alg %d', desc=desc) + + +class RegResampleOutputSpec(TraitedSpec): + """ Output Spec for RegResample. """ + out_file = File(desc='The output filename of the transformed image') + + +class RegResample(NiftyRegCommand): + """Interface for executable reg_resample from NiftyReg platform. + + Tool to resample floating image in the space of a defined reference image + given a transformation parametrisation generated by reg_aladin, reg_f3d or + reg_transform + + `Source code `_ + + Examples + -------- + >>> from nipype.interfaces import niftyreg + >>> node = niftyreg.RegResample() + >>> node.inputs.ref_file = 'im1.nii' + >>> node.inputs.flo_file = 'im2.nii' + >>> node.inputs.trans_file = 'warpfield.nii' + >>> node.inputs.inter_val = 'LIN' + >>> node.inputs.omp_core_val = 4 + >>> node.cmdline + 'reg_resample -flo im2.nii -inter 1 -omp 4 -ref im1.nii -trans \ +warpfield.nii -res im2_res.nii.gz' + + """ + _cmd = get_custom_path('reg_resample') + input_spec = RegResampleInputSpec + output_spec = RegResampleOutputSpec + + # Need this overload to properly constraint the interpolation type input + def _format_arg(self, name, spec, value): + if name == 'inter_val': + inter_val = {'NN': 0, 'LIN': 1, 'CUB': 3, 'SINC': 4} + return spec.argstr % inter_val[value] + else: + return super(RegResample, self)._format_arg(name, spec, value) + + def _overload_extension(self, value, name=None): + path, base, _ = split_filename(value) + suffix = self.inputs.type + return os.path.join(path, '{0}_{1}.nii.gz'.format(base, suffix)) + + +class RegJacobianInputSpec(NiftyRegCommandInputSpec): + """ Input Spec for RegJacobian. """ + # Reference file name + desc = 'Reference/target file (required if specifying CPP transformations.' + ref_file = File(exists=True, desc=desc, argstr='-ref %s') + # Input transformation file + trans_file = File( + exists=True, + desc='The input non-rigid transformation', + argstr='-trans %s', + mandatory=True) + type = traits.Enum( + 'jac', + 'jacL', + 'jacM', + usedefault=True, + argstr='-%s', + position=-2, + desc='Type of jacobian outcome') + out_file = File( + name_source=['trans_file'], + name_template='%s', + desc='The output jacobian determinant file name', + argstr='%s', + position=-1) + + +class RegJacobianOutputSpec(TraitedSpec): + """ Output Spec for RegJacobian. """ + out_file = File(desc='The output file') + + +class RegJacobian(NiftyRegCommand): + """Interface for executable reg_resample from NiftyReg platform. + + Tool to generate Jacobian determinant maps from transformation + parametrisation generated by reg_f3d + + `Source code `_ + + Examples + -------- + >>> from nipype.interfaces import niftyreg + >>> node = niftyreg.RegJacobian() + >>> node.inputs.ref_file = 'im1.nii' + >>> node.inputs.trans_file = 'warpfield.nii' + >>> node.inputs.omp_core_val = 4 + >>> node.cmdline + 'reg_jacobian -omp 4 -ref im1.nii -trans warpfield.nii -jac \ +warpfield_jac.nii.gz' + + """ + _cmd = get_custom_path('reg_jacobian') + input_spec = RegJacobianInputSpec + output_spec = RegJacobianOutputSpec + + def _overload_extension(self, value, name=None): + path, base, _ = split_filename(value) + suffix = self.inputs.type + return os.path.join(path, '{0}_{1}.nii.gz'.format(base, suffix)) + + +class RegToolsInputSpec(NiftyRegCommandInputSpec): + """ Input Spec for RegTools. """ + # Input image file + in_file = File( + exists=True, + desc='The input image file path', + argstr='-in %s', + mandatory=True) + + # Output file path + out_file = File( + name_source=['in_file'], + name_template='%s_tools.nii.gz', + desc='The output file name', + argstr='-out %s') + + # Make the output image isotropic + iso_flag = traits.Bool(argstr='-iso', desc='Make output image isotropic') + + # Set scale, slope to 0 and 1. + noscl_flag = traits.Bool( + argstr='-noscl', desc='Set scale, slope to 0 and 1') + + # Values outside the mask are set to NaN + mask_file = File( + exists=True, + desc='Values outside the mask are set to NaN', + argstr='-nan %s') + + # Threshold the input image + desc = 'Binarise the input image with the given threshold' + thr_val = traits.Float(desc=desc, argstr='-thr %f') + + # Binarise the input image + bin_flag = traits.Bool(argstr='-bin', desc='Binarise the input image') + + # Compute the mean RMS between the two images + rms_val = File( + exists=True, + desc='Compute the mean RMS between the images', + argstr='-rms %s') + + # Perform division by image or value + div_val = traits.Either( + traits.Float, + File(exists=True), + desc='Divide the input by image or value', + argstr='-div %s') + + # Perform multiplication by image or value + mul_val = traits.Either( + traits.Float, + File(exists=True), + desc='Multiply the input by image or value', + argstr='-mul %s') + + # Perform addition by image or value + add_val = traits.Either( + traits.Float, + File(exists=True), + desc='Add to the input image or value', + argstr='-add %s') + + # Perform subtraction by image or value + sub_val = traits.Either( + traits.Float, + File(exists=True), + desc='Add to the input image or value', + argstr='-sub %s') + + # Downsample the image by a factor of 2. + down_flag = traits.Bool( + desc='Downsample the image by a factor of 2', argstr='-down') + + # Smoothing using spline kernel + desc = 'Smooth the input image using a cubic spline kernel' + smo_s_val = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + desc=desc, + argstr='-smoS %f %f %f') + + # Change the resolution of the input image + chg_res_val = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + desc='Change the resolution of the input image', + argstr='-chgres %f %f %f') + + # Smoothing using Gaussian kernel + desc = 'Smooth the input image using a Gaussian kernel' + smo_g_val = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + desc=desc, + argstr='-smoG %f %f %f') + + # Interpolation type + inter_val = traits.Enum( + 'NN', + 'LIN', + 'CUB', + 'SINC', + desc='Interpolation order to use to warp the floating image', + argstr='-interp %d') + + +class RegToolsOutputSpec(TraitedSpec): + """ Output Spec for RegTools. """ + out_file = File(desc='The output file', exists=True) + + +class RegTools(NiftyRegCommand): + """Interface for executable reg_tools from NiftyReg platform. + + Tool delivering various actions related to registration such as + resampling the input image to a chosen resolution or remove the nan and + inf in the input image by a specified value. + + `Source code `_ + + Examples + -------- + >>> from nipype.interfaces import niftyreg + >>> node = niftyreg.RegTools() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.mul_val = 4 + >>> node.inputs.omp_core_val = 4 + >>> node.cmdline + 'reg_tools -in im1.nii -mul 4.0 -omp 4 -out im1_tools.nii.gz' + + """ + _cmd = get_custom_path('reg_tools') + input_spec = RegToolsInputSpec + output_spec = RegToolsOutputSpec + _suffix = '_tools' + + # Need this overload to properly constraint the interpolation type input + def _format_arg(self, name, spec, value): + if name == 'inter_val': + inter_val = {'NN': 0, 'LIN': 1, 'CUB': 3, 'SINC': 4} + return spec.argstr % inter_val[value] + else: + return super(RegTools, self)._format_arg(name, spec, value) + + +class RegAverageInputSpec(NiftyRegCommandInputSpec): + """ Input Spec for RegAverage. """ + avg_files = traits.List( + File(exist=True), + position=1, + argstr='-avg %s', + sep=' ', + xor=[ + 'avg_lts_files', 'avg_ref_file', 'demean1_ref_file', + 'demean2_ref_file', 'demean3_ref_file', 'warp_files' + ], + desc='Averaging of images/affine transformations') + + desc = 'Robust average of affine transformations' + avg_lts_files = traits.List( + File(exist=True), + position=1, + argstr='-avg_lts %s', + sep=' ', + xor=[ + 'avg_files', 'avg_ref_file', 'demean1_ref_file', + 'demean2_ref_file', 'demean3_ref_file', 'warp_files' + ], + desc=desc) + + desc = 'All input images are resampled into the space of \ + and averaged. A cubic spline interpolation scheme is used for resampling' + + avg_ref_file = File( + position=1, + argstr='-avg_tran %s', + xor=[ + 'avg_files', 'avg_lts_files', 'demean1_ref_file', + 'demean2_ref_file', 'demean3_ref_file' + ], + requires=['warp_files'], + desc=desc) + + desc = 'Average images and demean average image that have affine \ +transformations to a common space' + + demean1_ref_file = File( + position=1, + argstr='-demean1 %s', + xor=[ + 'avg_files', 'avg_lts_files', 'avg_ref_file', 'demean2_ref_file', + 'demean3_ref_file' + ], + requires=['warp_files'], + desc=desc) + + desc = 'Average images and demean average image that have non-rigid \ +transformations to a common space' + + demean2_ref_file = File( + position=1, + argstr='-demean2 %s', + xor=[ + 'avg_files', 'avg_lts_files', 'avg_ref_file', 'demean1_ref_file', + 'demean3_ref_file' + ], + requires=['warp_files'], + desc=desc) + + desc = 'Average images and demean average image that have linear and \ +non-rigid transformations to a common space' + + demean3_ref_file = File( + position=1, + argstr='-demean3 %s', + xor=[ + 'avg_files', 'avg_lts_files', 'avg_ref_file', 'demean1_ref_file', + 'demean2_ref_file' + ], + requires=['warp_files'], + desc=desc) + + desc = 'transformation files and floating image pairs/triplets to the \ +reference space' + + warp_files = traits.List( + File(exist=True), + position=-1, + argstr='%s', + sep=' ', + xor=['avg_files', 'avg_lts_files'], + desc=desc) + + out_file = File( + genfile=True, position=0, desc='Output file name', argstr='%s') + + +class RegAverageOutputSpec(TraitedSpec): + """ Output Spec for RegAverage. """ + out_file = File(desc='Output file name') + + +class RegAverage(NiftyRegCommand): + """Interface for executable reg_average from NiftyReg platform. + + Compute average matrix or image from a list of matrices or image. + The tool can be use to resample images given input transformation + parametrisation as well as to demean transformations in Euclidean or + log-Euclidean space. + + This interface is different than the others in the way that the options + will be written in a command file that is given as a parameter. + + `Source code `_ + + Examples + -------- + + >>> from nipype.interfaces import niftyreg + >>> node = niftyreg.RegAverage() + >>> one_file = 'im1.nii' + >>> two_file = 'im2.nii' + >>> three_file = 'im3.nii' + >>> node.inputs.avg_files = [one_file, two_file, three_file] + >>> node.cmdline # doctest: +ELLIPSIS + 'reg_average --cmd_file .../reg_average_cmd' + """ + _cmd = get_custom_path('reg_average') + input_spec = RegAverageInputSpec + output_spec = RegAverageOutputSpec + _suffix = 'avg_out' + + def _gen_filename(self, name): + if name == 'out_file': + if isdefined(self.inputs.avg_lts_files): + return self._gen_fname(self._suffix, ext='.txt') + elif isdefined(self.inputs.avg_files): + _, _, _ext = split_filename(self.inputs.avg_files[0]) + if _ext not in ['.nii', '.nii.gz', '.hdr', '.img', '.img.gz']: + return self._gen_fname(self._suffix, ext=_ext) + return self._gen_fname(self._suffix, ext='.nii.gz') + + return None + + def _list_outputs(self): + outputs = self.output_spec().get() + + if isdefined(self.inputs.out_file): + outputs['out_file'] = self.inputs.out_file + else: + outputs['out_file'] = self._gen_filename('out_file') + + return outputs + + @property + def cmdline(self): + """ Rewrite the cmdline to write options in text_file.""" + argv = super(RegAverage, self).cmdline + reg_average_cmd = os.path.join(os.getcwd(), 'reg_average_cmd') + with open(reg_average_cmd, 'w') as f: + f.write(argv) + return '%s --cmd_file %s' % (self.cmd, reg_average_cmd) + + +class RegTransformInputSpec(NiftyRegCommandInputSpec): + """ Input Spec for RegTransform. """ + ref1_file = File( + exists=True, + desc='The input reference/target image', + argstr='-ref %s', + position=0) + + ref2_file = File( + exists=True, + desc='The input second reference/target image', + argstr='-ref2 %s', + position=1, + requires=['ref1_file']) + + def_input = File( + exists=True, + argstr='-def %s', + position=-2, + desc='Compute deformation field from transformation', + xor=[ + 'disp_input', 'flow_input', 'comp_input', 'upd_s_form_input', + 'inv_aff_input', 'inv_nrr_input', 'half_input', 'make_aff_input', + 'aff_2_rig_input', 'flirt_2_nr_input' + ]) + + disp_input = File( + exists=True, + argstr='-disp %s', + position=-2, + desc='Compute displacement field from transformation', + xor=[ + 'def_input', 'flow_input', 'comp_input', 'upd_s_form_input', + 'inv_aff_input', 'inv_nrr_input', 'half_input', 'make_aff_input', + 'aff_2_rig_input', 'flirt_2_nr_input' + ]) + + flow_input = File( + exists=True, + argstr='-flow %s', + position=-2, + desc='Compute flow field from spline SVF', + xor=[ + 'def_input', 'disp_input', 'comp_input', 'upd_s_form_input', + 'inv_aff_input', 'inv_nrr_input', 'half_input', 'make_aff_input', + 'aff_2_rig_input', 'flirt_2_nr_input' + ]) + + comp_input = File( + exists=True, + argstr='-comp %s', + position=-3, + desc='compose two transformations', + xor=[ + 'def_input', 'disp_input', 'flow_input', 'upd_s_form_input', + 'inv_aff_input', 'inv_nrr_input', 'half_input', 'make_aff_input', + 'aff_2_rig_input', 'flirt_2_nr_input' + ], + requires=['comp_input2']) + + comp_input2 = File( + exists=True, + argstr='%s', + position=-2, + desc='compose two transformations') + + desc = 'Update s-form using the affine transformation' + upd_s_form_input = File( + exists=True, + argstr='-updSform %s', + position=-3, + desc=desc, + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'inv_aff_input', 'inv_nrr_input', 'half_input', 'make_aff_input', + 'aff_2_rig_input', 'flirt_2_nr_input' + ], + requires=['upd_s_form_input2']) + + desc = 'Update s-form using the affine transformation' + upd_s_form_input2 = File( + exists=True, + argstr='%s', + position=-2, + desc=desc, + requires=['upd_s_form_input']) + + inv_aff_input = File( + exists=True, + argstr='-invAff %s', + position=-2, + desc='Invert an affine transformation', + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'upd_s_form_input', 'inv_nrr_input', 'half_input', + 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + ]) + + inv_nrr_input = traits.Tuple( + File(exists=True), + File(exists=True), + desc='Invert a non-linear transformation', + argstr='-invNrr %s %s', + position=-2, + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'upd_s_form_input', 'inv_aff_input', 'half_input', + 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + ]) + + half_input = File( + exists=True, + argstr='-half %s', + position=-2, + desc='Half way to the input transformation', + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', + 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + ]) + + argstr_tmp = '-makeAff %f %f %f %f %f %f %f %f %f %f %f %f' + make_aff_input = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + traits.Float, + traits.Float, + traits.Float, + traits.Float, + traits.Float, + traits.Float, + traits.Float, + traits.Float, + traits.Float, + argstr=argstr_tmp, + position=-2, + desc='Make an affine transformation matrix', + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', 'half_input', + 'aff_2_rig_input', 'flirt_2_nr_input' + ]) + + desc = 'Extract the rigid component from affine transformation' + aff_2_rig_input = File( + exists=True, + argstr='-aff2rig %s', + position=-2, + desc=desc, + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', 'half_input', + 'make_aff_input', 'flirt_2_nr_input' + ]) + + desc = 'Convert a FLIRT affine transformation to niftyreg affine \ +transformation' + + flirt_2_nr_input = traits.Tuple( + File(exists=True), + File(exists=True), + File(exists=True), + argstr='-flirtAff2NR %s %s %s', + position=-2, + desc=desc, + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', 'half_input', + 'make_aff_input', 'aff_2_rig_input' + ]) + + out_file = File( + genfile=True, + position=-1, + argstr='%s', + desc='transformation file to write') + + +class RegTransformOutputSpec(TraitedSpec): + """ Output Spec for RegTransform. """ + out_file = File(desc='Output File (transformation in any format)') + + +class RegTransform(NiftyRegCommand): + """Interface for executable reg_transform from NiftyReg platform. + + Tools to convert transformation parametrisation from one type to another + as well as to compose, inverse or half transformations. + + `Source code `_ + + Examples + -------- + >>> from nipype.interfaces import niftyreg + >>> node = niftyreg.RegTransform() + >>> node.inputs.def_input = 'warpfield.nii' + >>> node.inputs.omp_core_val = 4 + >>> node.cmdline # doctest: +ELLIPSIS + 'reg_transform -omp 4 -def warpfield.nii .../warpfield_trans.nii.gz' + + """ + _cmd = get_custom_path('reg_transform') + input_spec = RegTransformInputSpec + output_spec = RegTransformOutputSpec + _suffix = '_trans' + + def _find_input(self): + inputs = [ + self.inputs.def_input, self.inputs.disp_input, + self.inputs.flow_input, self.inputs.comp_input, + self.inputs.comp_input2, self.inputs.upd_s_form_input, + self.inputs.inv_aff_input, self.inputs.inv_nrr_input, + self.inputs.half_input, self.inputs.make_aff_input, + self.inputs.aff_2_rig_input, self.inputs.flirt_2_nr_input + ] + entries = [] + for entry in inputs: + if isdefined(entry): + entries.append(entry) + _, _, ext = split_filename(entry) + if ext == '.nii' or ext == '.nii.gz' or ext == '.hdr': + return entry + if len(entries): + return entries[0] + return None + + def _gen_filename(self, name): + if name == 'out_file': + if isdefined(self.inputs.make_aff_input): + return self._gen_fname( + 'matrix', suffix=self._suffix, ext='.txt') + + if isdefined(self.inputs.comp_input) and \ + isdefined(self.inputs.comp_input2): + _, bn1, ext1 = split_filename(self.inputs.comp_input) + _, _, ext2 = split_filename(self.inputs.comp_input2) + if ext1 in ['.nii', '.nii.gz', '.hdr', '.img', '.img.gz'] or \ + ext2 in ['.nii', '.nii.gz', '.hdr', '.img', '.img.gz']: + return self._gen_fname( + bn1, suffix=self._suffix, ext='.nii.gz') + else: + return self._gen_fname(bn1, suffix=self._suffix, ext=ext1) + + if isdefined(self.inputs.flirt_2_nr_input): + return self._gen_fname( + self.inputs.flirt_2_nr_input[0], + suffix=self._suffix, + ext='.txt') + + input_to_use = self._find_input() + _, _, ext = split_filename(input_to_use) + if ext not in ['.nii', '.nii.gz', '.hdr', '.img', '.img.gz']: + return self._gen_fname( + input_to_use, suffix=self._suffix, ext=ext) + else: + return self._gen_fname( + input_to_use, suffix=self._suffix, ext='.nii.gz') + + return None + + def _list_outputs(self): + outputs = self.output_spec().get() + + if isdefined(self.inputs.out_file): + outputs['out_file'] = self.inputs.out_file + else: + outputs['out_file'] = self._gen_filename('out_file') + + return outputs + + +class RegMeasureInputSpec(NiftyRegCommandInputSpec): + """ Input Spec for RegMeasure. """ + # Input reference file + ref_file = File( + exists=True, + desc='The input reference/target image', + argstr='-ref %s', + mandatory=True) + # Input floating file + flo_file = File( + exists=True, + desc='The input floating/source image', + argstr='-flo %s', + mandatory=True) + measure_type = traits.Enum( + 'ncc', + 'lncc', + 'nmi', + 'ssd', + mandatory=True, + argstr='-%s', + desc='Measure of similarity to compute') + out_file = File( + name_source=['flo_file'], + name_template='%s', + argstr='-out %s', + desc='The output text file containing the measure') + + +class RegMeasureOutputSpec(TraitedSpec): + """ Output Spec for RegMeasure. """ + out_file = File(desc='The output text file containing the measure') + + +class RegMeasure(NiftyRegCommand): + """Interface for executable reg_measure from NiftyReg platform. + + Given two input images, compute the specified measure(s) of similarity + + `Source code `_ + + Examples + -------- + >>> from nipype.interfaces import niftyreg + >>> node = niftyreg.RegMeasure() + >>> node.inputs.ref_file = 'im1.nii' + >>> node.inputs.flo_file = 'im2.nii' + >>> node.inputs.measure_type = 'lncc' + >>> node.inputs.omp_core_val = 4 + >>> node.cmdline + 'reg_measure -flo im2.nii -lncc -omp 4 -out im2_lncc.txt -ref im1.nii' + + """ + _cmd = get_custom_path('reg_measure') + input_spec = RegMeasureInputSpec + output_spec = RegMeasureOutputSpec + + def _overload_extension(self, value, name=None): + path, base, _ = split_filename(value) + suffix = self.inputs.measure_type + return os.path.join(path, '{0}_{1}.txt'.format(base, suffix)) diff --git a/nipype/interfaces/niftyreg/tests/__init__.py b/nipype/interfaces/niftyreg/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/niftyreg/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py new file mode 100644 index 0000000000..89615b50d7 --- /dev/null +++ b/nipype/interfaces/niftyreg/tests/test_auto_NiftyRegCommand.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import NiftyRegCommand + + +def test_NiftyRegCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + omp_core_val=dict( + argstr='-omp %i', + usedefault=True, + ), + ) + inputs = NiftyRegCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py new file mode 100644 index 0000000000..9507f53fa9 --- /dev/null +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAladin.py @@ -0,0 +1,73 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..reg import RegAladin + + +def test_RegAladin_inputs(): + input_map = dict( + aff_direct_flag=dict(argstr='-affDirect', ), + aff_file=dict( + argstr='-aff %s', + name_source=['flo_file'], + name_template='%s_aff.txt', + ), + args=dict(argstr='%s', ), + cog_flag=dict(argstr='-cog', ), + environ=dict( + nohash=True, + usedefault=True, + ), + flo_file=dict( + argstr='-flo %s', + mandatory=True, + ), + flo_low_val=dict(argstr='-floLowThr %f', ), + flo_up_val=dict(argstr='-floUpThr %f', ), + fmask_file=dict(argstr='-fmask %s', ), + gpuid_val=dict(argstr='-gpuid %i', ), + i_val=dict(argstr='-pi %d', ), + in_aff_file=dict(argstr='-inaff %s', ), + ln_val=dict(argstr='-ln %d', ), + lp_val=dict(argstr='-lp %d', ), + maxit_val=dict(argstr='-maxit %d', ), + nac_flag=dict(argstr='-nac', ), + nosym_flag=dict(argstr='-noSym', ), + omp_core_val=dict( + argstr='-omp %i', + usedefault=True, + ), + platform_val=dict(argstr='-platf %i', ), + ref_file=dict( + argstr='-ref %s', + mandatory=True, + ), + ref_low_val=dict(argstr='-refLowThr %f', ), + ref_up_val=dict(argstr='-refUpThr %f', ), + res_file=dict( + argstr='-res %s', + name_source=['flo_file'], + name_template='%s_res.nii.gz', + ), + rig_only_flag=dict(argstr='-rigOnly', ), + rmask_file=dict(argstr='-rmask %s', ), + smoo_f_val=dict(argstr='-smooF %f', ), + smoo_r_val=dict(argstr='-smooR %f', ), + v_val=dict(argstr='-pv %d', ), + verbosity_off_flag=dict(argstr='-voff', ), + ) + inputs = RegAladin.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RegAladin_outputs(): + output_map = dict( + aff_file=dict(), + avg_output=dict(), + res_file=dict(), + ) + outputs = RegAladin.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py new file mode 100644 index 0000000000..2510a46eb8 --- /dev/null +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegAverage.py @@ -0,0 +1,94 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..regutils import RegAverage + + +def test_RegAverage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + avg_files=dict( + argstr='-avg %s', + position=1, + sep=' ', + xor=[ + 'avg_lts_files', 'avg_ref_file', 'demean1_ref_file', + 'demean2_ref_file', 'demean3_ref_file', 'warp_files' + ], + ), + avg_lts_files=dict( + argstr='-avg_lts %s', + position=1, + sep=' ', + xor=[ + 'avg_files', 'avg_ref_file', 'demean1_ref_file', + 'demean2_ref_file', 'demean3_ref_file', 'warp_files' + ], + ), + avg_ref_file=dict( + argstr='-avg_tran %s', + position=1, + requires=['warp_files'], + xor=[ + 'avg_files', 'avg_lts_files', 'demean1_ref_file', + 'demean2_ref_file', 'demean3_ref_file' + ], + ), + demean1_ref_file=dict( + argstr='-demean1 %s', + position=1, + requires=['warp_files'], + xor=[ + 'avg_files', 'avg_lts_files', 'avg_ref_file', + 'demean2_ref_file', 'demean3_ref_file' + ], + ), + demean2_ref_file=dict( + argstr='-demean2 %s', + position=1, + requires=['warp_files'], + xor=[ + 'avg_files', 'avg_lts_files', 'avg_ref_file', + 'demean1_ref_file', 'demean3_ref_file' + ], + ), + demean3_ref_file=dict( + argstr='-demean3 %s', + position=1, + requires=['warp_files'], + xor=[ + 'avg_files', 'avg_lts_files', 'avg_ref_file', + 'demean1_ref_file', 'demean2_ref_file' + ], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + omp_core_val=dict( + argstr='-omp %i', + usedefault=True, + ), + out_file=dict( + argstr='%s', + genfile=True, + position=0, + ), + warp_files=dict( + argstr='%s', + position=-1, + sep=' ', + xor=['avg_files', 'avg_lts_files'], + ), + ) + inputs = RegAverage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RegAverage_outputs(): + output_map = dict(out_file=dict(), ) + outputs = RegAverage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py new file mode 100644 index 0000000000..e9bdab82c0 --- /dev/null +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegF3D.py @@ -0,0 +1,99 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..reg import RegF3D + + +def test_RegF3D_inputs(): + input_map = dict( + aff_file=dict(argstr='-aff %s', ), + amc_flag=dict(argstr='-amc', ), + args=dict(argstr='%s', ), + be_val=dict(argstr='-be %f', ), + cpp_file=dict( + argstr='-cpp %s', + name_source=['flo_file'], + name_template='%s_cpp.nii.gz', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fbn2_val=dict(argstr='-fbn %d %d', ), + fbn_val=dict(argstr='--fbn %d', ), + flo_file=dict( + argstr='-flo %s', + mandatory=True, + ), + flo_smooth_val=dict(argstr='-smooF %f', ), + flwth2_thr_val=dict(argstr='-fLwTh %d %f', ), + flwth_thr_val=dict(argstr='--fLwTh %f', ), + fmask_file=dict(argstr='-fmask %s', ), + fupth2_thr_val=dict(argstr='-fUpTh %d %f', ), + fupth_thr_val=dict(argstr='--fUpTh %f', ), + incpp_file=dict(argstr='-incpp %s', ), + jl_val=dict(argstr='-jl %f', ), + kld2_flag=dict(argstr='-kld %d', ), + kld_flag=dict(argstr='--kld', ), + le_val=dict(argstr='-le %f', ), + ln_val=dict(argstr='-ln %d', ), + lncc2_val=dict(argstr='-lncc %d %f', ), + lncc_val=dict(argstr='--lncc %f', ), + lp_val=dict(argstr='-lp %d', ), + maxit_val=dict(argstr='-maxit %d', ), + nmi_flag=dict(argstr='--nmi', ), + no_app_jl_flag=dict(argstr='-noAppJL', ), + noconj_flag=dict(argstr='-noConj', ), + nopy_flag=dict(argstr='-nopy', ), + nox_flag=dict(argstr='-nox', ), + noy_flag=dict(argstr='-noy', ), + noz_flag=dict(argstr='-noz', ), + omp_core_val=dict( + argstr='-omp %i', + usedefault=True, + ), + pad_val=dict(argstr='-pad %f', ), + pert_val=dict(argstr='-pert %d', ), + rbn2_val=dict(argstr='-rbn %d %d', ), + rbn_val=dict(argstr='--rbn %d', ), + ref_file=dict( + argstr='-ref %s', + mandatory=True, + ), + ref_smooth_val=dict(argstr='-smooR %f', ), + res_file=dict( + argstr='-res %s', + name_source=['flo_file'], + name_template='%s_res.nii.gz', + ), + rlwth2_thr_val=dict(argstr='-rLwTh %d %f', ), + rlwth_thr_val=dict(argstr='--rLwTh %f', ), + rmask_file=dict(argstr='-rmask %s', ), + rupth2_thr_val=dict(argstr='-rUpTh %d %f', ), + rupth_thr_val=dict(argstr='--rUpTh %f', ), + smooth_grad_val=dict(argstr='-smoothGrad %f', ), + ssd2_flag=dict(argstr='-ssd %d', ), + ssd_flag=dict(argstr='--ssd', ), + sx_val=dict(argstr='-sx %f', ), + sy_val=dict(argstr='-sy %f', ), + sz_val=dict(argstr='-sz %f', ), + vel_flag=dict(argstr='-vel', ), + verbosity_off_flag=dict(argstr='-voff', ), + ) + inputs = RegF3D.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RegF3D_outputs(): + output_map = dict( + avg_output=dict(), + cpp_file=dict(), + invcpp_file=dict(), + invres_file=dict(), + res_file=dict(), + ) + outputs = RegF3D.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py new file mode 100644 index 0000000000..63f917c683 --- /dev/null +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegJacobian.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..regutils import RegJacobian + + +def test_RegJacobian_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + omp_core_val=dict( + argstr='-omp %i', + usedefault=True, + ), + out_file=dict( + argstr='%s', + name_source=['trans_file'], + name_template='%s', + position=-1, + ), + ref_file=dict(argstr='-ref %s', ), + trans_file=dict( + argstr='-trans %s', + mandatory=True, + ), + type=dict( + argstr='-%s', + position=-2, + usedefault=True, + ), + ) + inputs = RegJacobian.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RegJacobian_outputs(): + output_map = dict(out_file=dict(), ) + outputs = RegJacobian.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py new file mode 100644 index 0000000000..3321d87afc --- /dev/null +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegMeasure.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..regutils import RegMeasure + + +def test_RegMeasure_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + flo_file=dict( + argstr='-flo %s', + mandatory=True, + ), + measure_type=dict( + argstr='-%s', + mandatory=True, + ), + omp_core_val=dict( + argstr='-omp %i', + usedefault=True, + ), + out_file=dict( + argstr='-out %s', + name_source=['flo_file'], + name_template='%s', + ), + ref_file=dict( + argstr='-ref %s', + mandatory=True, + ), + ) + inputs = RegMeasure.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RegMeasure_outputs(): + output_map = dict(out_file=dict(), ) + outputs = RegMeasure.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py new file mode 100644 index 0000000000..06b2b48401 --- /dev/null +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegResample.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..regutils import RegResample + + +def test_RegResample_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + flo_file=dict( + argstr='-flo %s', + mandatory=True, + ), + inter_val=dict(argstr='-inter %d', ), + omp_core_val=dict( + argstr='-omp %i', + usedefault=True, + ), + out_file=dict( + argstr='%s', + name_source=['flo_file'], + name_template='%s', + position=-1, + ), + pad_val=dict(argstr='-pad %f', ), + psf_alg=dict(argstr='-psf_alg %d', ), + psf_flag=dict(argstr='-psf', ), + ref_file=dict( + argstr='-ref %s', + mandatory=True, + ), + tensor_flag=dict(argstr='-tensor ', ), + trans_file=dict(argstr='-trans %s', ), + type=dict( + argstr='-%s', + position=-2, + usedefault=True, + ), + verbosity_off_flag=dict(argstr='-voff', ), + ) + inputs = RegResample.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RegResample_outputs(): + output_map = dict(out_file=dict(), ) + outputs = RegResample.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py new file mode 100644 index 0000000000..5deb4206e6 --- /dev/null +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTools.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..regutils import RegTools + + +def test_RegTools_inputs(): + input_map = dict( + add_val=dict(argstr='-add %s', ), + args=dict(argstr='%s', ), + bin_flag=dict(argstr='-bin', ), + chg_res_val=dict(argstr='-chgres %f %f %f', ), + div_val=dict(argstr='-div %s', ), + down_flag=dict(argstr='-down', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + ), + inter_val=dict(argstr='-interp %d', ), + iso_flag=dict(argstr='-iso', ), + mask_file=dict(argstr='-nan %s', ), + mul_val=dict(argstr='-mul %s', ), + noscl_flag=dict(argstr='-noscl', ), + omp_core_val=dict( + argstr='-omp %i', + usedefault=True, + ), + out_file=dict( + argstr='-out %s', + name_source=['in_file'], + name_template='%s_tools.nii.gz', + ), + rms_val=dict(argstr='-rms %s', ), + smo_g_val=dict(argstr='-smoG %f %f %f', ), + smo_s_val=dict(argstr='-smoS %f %f %f', ), + sub_val=dict(argstr='-sub %s', ), + thr_val=dict(argstr='-thr %f', ), + ) + inputs = RegTools.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RegTools_outputs(): + output_map = dict(out_file=dict(), ) + outputs = RegTools.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py new file mode 100644 index 0000000000..57c1b0ad86 --- /dev/null +++ b/nipype/interfaces/niftyreg/tests/test_auto_RegTransform.py @@ -0,0 +1,153 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..regutils import RegTransform + + +def test_RegTransform_inputs(): + input_map = dict( + aff_2_rig_input=dict( + argstr='-aff2rig %s', + position=-2, + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', + 'half_input', 'make_aff_input', 'flirt_2_nr_input' + ], + ), + args=dict(argstr='%s', ), + comp_input=dict( + argstr='-comp %s', + position=-3, + requires=['comp_input2'], + xor=[ + 'def_input', 'disp_input', 'flow_input', 'upd_s_form_input', + 'inv_aff_input', 'inv_nrr_input', 'half_input', + 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + ], + ), + comp_input2=dict( + argstr='%s', + position=-2, + ), + def_input=dict( + argstr='-def %s', + position=-2, + xor=[ + 'disp_input', 'flow_input', 'comp_input', 'upd_s_form_input', + 'inv_aff_input', 'inv_nrr_input', 'half_input', + 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + ], + ), + disp_input=dict( + argstr='-disp %s', + position=-2, + xor=[ + 'def_input', 'flow_input', 'comp_input', 'upd_s_form_input', + 'inv_aff_input', 'inv_nrr_input', 'half_input', + 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + ], + ), + environ=dict( + nohash=True, + usedefault=True, + ), + flirt_2_nr_input=dict( + argstr='-flirtAff2NR %s %s %s', + position=-2, + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', + 'half_input', 'make_aff_input', 'aff_2_rig_input' + ], + ), + flow_input=dict( + argstr='-flow %s', + position=-2, + xor=[ + 'def_input', 'disp_input', 'comp_input', 'upd_s_form_input', + 'inv_aff_input', 'inv_nrr_input', 'half_input', + 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + ], + ), + half_input=dict( + argstr='-half %s', + position=-2, + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', + 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + ], + ), + inv_aff_input=dict( + argstr='-invAff %s', + position=-2, + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'upd_s_form_input', 'inv_nrr_input', 'half_input', + 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + ], + ), + inv_nrr_input=dict( + argstr='-invNrr %s %s', + position=-2, + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'upd_s_form_input', 'inv_aff_input', 'half_input', + 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + ], + ), + make_aff_input=dict( + argstr='-makeAff %f %f %f %f %f %f %f %f %f %f %f %f', + position=-2, + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'upd_s_form_input', 'inv_aff_input', 'inv_nrr_input', + 'half_input', 'aff_2_rig_input', 'flirt_2_nr_input' + ], + ), + omp_core_val=dict( + argstr='-omp %i', + usedefault=True, + ), + out_file=dict( + argstr='%s', + genfile=True, + position=-1, + ), + ref1_file=dict( + argstr='-ref %s', + position=0, + ), + ref2_file=dict( + argstr='-ref2 %s', + position=1, + requires=['ref1_file'], + ), + upd_s_form_input=dict( + argstr='-updSform %s', + position=-3, + requires=['upd_s_form_input2'], + xor=[ + 'def_input', 'disp_input', 'flow_input', 'comp_input', + 'inv_aff_input', 'inv_nrr_input', 'half_input', + 'make_aff_input', 'aff_2_rig_input', 'flirt_2_nr_input' + ], + ), + upd_s_form_input2=dict( + argstr='%s', + position=-2, + requires=['upd_s_form_input'], + ), + ) + inputs = RegTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RegTransform_outputs(): + output_map = dict(out_file=dict(), ) + outputs = RegTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyreg/tests/test_reg.py b/nipype/interfaces/niftyreg/tests/test_reg.py new file mode 100644 index 0000000000..862760139e --- /dev/null +++ b/nipype/interfaces/niftyreg/tests/test_reg.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import pytest + +from ....testing import example_data +from .. import (get_custom_path, RegAladin, RegF3D) +from .test_regutils import no_nifty_tool + + +@pytest.mark.skipif( + no_nifty_tool(cmd='reg_aladin'), + reason="niftyreg is not installed. reg_aladin not found.") +def test_reg_aladin(): + """ tests for reg_aladin interface""" + # Create a reg_aladin object + nr_aladin = RegAladin() + + # Check if the command is properly defined + assert nr_aladin.cmd == get_custom_path('reg_aladin') + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + nr_aladin.run() + + # Assign some input data + ref_file = example_data('im1.nii') + flo_file = example_data('im2.nii') + rmask_file = example_data('mask.nii') + nr_aladin.inputs.ref_file = ref_file + nr_aladin.inputs.flo_file = flo_file + nr_aladin.inputs.rmask_file = rmask_file + nr_aladin.inputs.omp_core_val = 4 + + cmd_tmp = '{cmd} -aff {aff} -flo {flo} -omp 4 -ref {ref} -res {res} \ +-rmask {rmask}' + + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_aladin'), + aff='im2_aff.txt', + flo=flo_file, + ref=ref_file, + res='im2_res.nii.gz', + rmask=rmask_file, + ) + + assert nr_aladin.cmdline == expected_cmd + + +@pytest.mark.skipif( + no_nifty_tool(cmd='reg_f3d'), + reason="niftyreg is not installed. reg_f3d not found.") +def test_reg_f3d(): + """ tests for reg_f3d interface""" + # Create a reg_f3d object + nr_f3d = RegF3D() + + # Check if the command is properly defined + assert nr_f3d.cmd == get_custom_path('reg_f3d') + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + nr_f3d.run() + + # Assign some input data + ref_file = example_data('im1.nii') + flo_file = example_data('im2.nii') + rmask_file = example_data('mask.nii') + nr_f3d.inputs.ref_file = ref_file + nr_f3d.inputs.flo_file = flo_file + nr_f3d.inputs.rmask_file = rmask_file + nr_f3d.inputs.omp_core_val = 4 + nr_f3d.inputs.vel_flag = True + nr_f3d.inputs.be_val = 0.1 + nr_f3d.inputs.le_val = 0.1 + + cmd_tmp = '{cmd} -be 0.100000 -cpp {cpp} -flo {flo} -le 0.100000 -omp 4 \ +-ref {ref} -res {res} -rmask {rmask} -vel' + + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_f3d'), + cpp='im2_cpp.nii.gz', + flo=flo_file, + ref=ref_file, + res='im2_res.nii.gz', + rmask=rmask_file, + ) + + assert nr_f3d.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyreg/tests/test_regutils.py b/nipype/interfaces/niftyreg/tests/test_regutils.py new file mode 100644 index 0000000000..918d556ab2 --- /dev/null +++ b/nipype/interfaces/niftyreg/tests/test_regutils.py @@ -0,0 +1,469 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import os +import pytest + +from ....utils.filemanip import which +from ....testing import example_data +from .. import (get_custom_path, RegAverage, RegResample, RegJacobian, + RegTools, RegMeasure, RegTransform) + + +def no_nifty_tool(cmd=None): + return which(cmd) is None + + +@pytest.mark.skipif( + no_nifty_tool(cmd='reg_resample'), + reason="niftyreg is not installed. reg_resample not found.") +def test_reg_resample_res(): + """ tests for reg_resample interface """ + # Create a reg_resample object + nr_resample = RegResample() + + # Check if the command is properly defined + assert nr_resample.cmd == get_custom_path('reg_resample') + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + nr_resample.run() + + # Resample res + ref_file = example_data('im1.nii') + flo_file = example_data('im2.nii') + trans_file = example_data('warpfield.nii') + nr_resample.inputs.ref_file = ref_file + nr_resample.inputs.flo_file = flo_file + nr_resample.inputs.trans_file = trans_file + nr_resample.inputs.inter_val = 'LIN' + nr_resample.inputs.omp_core_val = 4 + + cmd_tmp = '{cmd} -flo {flo} -inter 1 -omp 4 -ref {ref} -trans {trans} \ +-res {res}' + + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_resample'), + flo=flo_file, + ref=ref_file, + trans=trans_file, + res='im2_res.nii.gz') + + assert nr_resample.cmdline == expected_cmd + + # test_reg_resample_blank() + nr_resample_2 = RegResample(type='blank', inter_val='LIN', omp_core_val=4) + ref_file = example_data('im1.nii') + flo_file = example_data('im2.nii') + trans_file = example_data('warpfield.nii') + nr_resample_2.inputs.ref_file = ref_file + nr_resample_2.inputs.flo_file = flo_file + nr_resample_2.inputs.trans_file = trans_file + + cmd_tmp = '{cmd} -flo {flo} -inter 1 -omp 4 -ref {ref} -trans {trans} \ +-blank {blank}' + + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_resample'), + flo=flo_file, + ref=ref_file, + trans=trans_file, + blank='im2_blank.nii.gz') + + assert nr_resample_2.cmdline == expected_cmd + + +@pytest.mark.skipif( + no_nifty_tool(cmd='reg_jacobian'), + reason="niftyreg is not installed. reg_jacobian not found.") +def test_reg_jacobian_jac(): + """ Test interface for RegJacobian """ + # Create a reg_jacobian object + nr_jacobian = RegJacobian() + + # Check if the command is properly defined + assert nr_jacobian.cmd == get_custom_path('reg_jacobian') + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + nr_jacobian.run() + + # Test Reg Jacobian: jac + ref_file = example_data('im1.nii') + trans_file = example_data('warpfield.nii') + nr_jacobian.inputs.ref_file = ref_file + nr_jacobian.inputs.trans_file = trans_file + nr_jacobian.inputs.omp_core_val = 4 + + cmd_tmp = '{cmd} -omp 4 -ref {ref} -trans {trans} -jac {jac}' + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_jacobian'), + ref=ref_file, + trans=trans_file, + jac='warpfield_jac.nii.gz') + + assert nr_jacobian.cmdline == expected_cmd + + # Test Reg Jacobian: jac m + nr_jacobian_2 = RegJacobian(type='jacM', omp_core_val=4) + ref_file = example_data('im1.nii') + trans_file = example_data('warpfield.nii') + nr_jacobian_2.inputs.ref_file = ref_file + nr_jacobian_2.inputs.trans_file = trans_file + + cmd_tmp = '{cmd} -omp 4 -ref {ref} -trans {trans} -jacM {jac}' + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_jacobian'), + ref=ref_file, + trans=trans_file, + jac='warpfield_jacM.nii.gz') + + assert nr_jacobian_2.cmdline == expected_cmd + + # Test Reg Jacobian: jac l + nr_jacobian_3 = RegJacobian(type='jacL', omp_core_val=4) + ref_file = example_data('im1.nii') + trans_file = example_data('warpfield.nii') + nr_jacobian_3.inputs.ref_file = ref_file + nr_jacobian_3.inputs.trans_file = trans_file + + cmd_tmp = '{cmd} -omp 4 -ref {ref} -trans {trans} -jacL {jac}' + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_jacobian'), + ref=ref_file, + trans=trans_file, + jac='warpfield_jacL.nii.gz') + + assert nr_jacobian_3.cmdline == expected_cmd + + +@pytest.mark.skipif( + no_nifty_tool(cmd='reg_tools'), + reason="niftyreg is not installed. reg_tools not found.") +def test_reg_tools_mul(): + """ tests for reg_tools interface """ + # Create a reg_tools object + nr_tools = RegTools() + + # Check if the command is properly defined + assert nr_tools.cmd == get_custom_path('reg_tools') + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + nr_tools.run() + + # Test reg_tools: mul + in_file = example_data('im1.nii') + nr_tools.inputs.in_file = in_file + nr_tools.inputs.mul_val = 4 + nr_tools.inputs.omp_core_val = 4 + + cmd_tmp = '{cmd} -in {in_file} -mul 4.0 -omp 4 -out {out_file}' + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_tools'), + in_file=in_file, + out_file='im1_tools.nii.gz') + + assert nr_tools.cmdline == expected_cmd + + # Test reg_tools: iso + nr_tools_2 = RegTools(iso_flag=True, omp_core_val=4) + in_file = example_data('im1.nii') + nr_tools_2.inputs.in_file = in_file + + cmd_tmp = '{cmd} -in {in_file} -iso -omp 4 -out {out_file}' + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_tools'), + in_file=in_file, + out_file='im1_tools.nii.gz') + + assert nr_tools_2.cmdline == expected_cmd + + +@pytest.mark.skipif( + no_nifty_tool(cmd='reg_average'), + reason="niftyreg is not installed. reg_average not found.") +def test_reg_average(): + """ tests for reg_average interface """ + # Create a reg_average object + nr_average = RegAverage() + + # Check if the command is properly defined + assert nr_average.cmd == get_custom_path('reg_average') + + # Average niis + one_file = example_data('im1.nii') + two_file = example_data('im2.nii') + three_file = example_data('im3.nii') + nr_average.inputs.avg_files = [one_file, two_file, three_file] + nr_average.inputs.omp_core_val = 1 + generated_cmd = nr_average.cmdline + + # Read the reg_average_cmd + reg_average_cmd = os.path.join(os.getcwd(), 'reg_average_cmd') + with open(reg_average_cmd, 'rb') as f_obj: + argv = f_obj.read() + os.remove(reg_average_cmd) + + expected_argv = '%s %s -avg %s %s %s -omp 1' % ( + get_custom_path('reg_average'), + os.path.join(os.getcwd(), 'avg_out.nii.gz'), one_file, two_file, + three_file) + + assert argv.decode('utf-8') == expected_argv + + # Test command line with text file + expected_cmd = ('%s --cmd_file %s' % (get_custom_path('reg_average'), + reg_average_cmd)) + + assert generated_cmd == expected_cmd + + # Test Reg Average: average txt + nr_average_2 = RegAverage() + one_file = example_data('TransformParameters.0.txt') + two_file = example_data('ants_Affine.txt') + three_file = example_data('elastix.txt') + nr_average_2.inputs.avg_files = [one_file, two_file, three_file] + nr_average_2.inputs.omp_core_val = 1 + generated_cmd = nr_average_2.cmdline + + # Read the reg_average_cmd + reg_average_cmd = os.path.join(os.getcwd(), 'reg_average_cmd') + with open(reg_average_cmd, 'rb') as f_obj: + argv = f_obj.read() + os.remove(reg_average_cmd) + + expected_argv = '%s %s -avg %s %s %s -omp 1' % ( + get_custom_path('reg_average'), + os.path.join(os.getcwd(), 'avg_out.txt'), one_file, two_file, + three_file) + + assert argv.decode('utf-8') == expected_argv + + # Test Reg Average: average list + nr_average_3 = RegAverage() + one_file = example_data('TransformParameters.0.txt') + two_file = example_data('ants_Affine.txt') + three_file = example_data('elastix.txt') + nr_average_3.inputs.avg_lts_files = [one_file, two_file, three_file] + nr_average_3.inputs.omp_core_val = 1 + generated_cmd = nr_average_3.cmdline + + # Read the reg_average_cmd + reg_average_cmd = os.path.join(os.getcwd(), 'reg_average_cmd') + with open(reg_average_cmd, 'rb') as f_obj: + argv = f_obj.read() + os.remove(reg_average_cmd) + + expected_argv = ('%s %s -avg_lts %s %s %s -omp 1' % + (get_custom_path('reg_average'), + os.path.join(os.getcwd(), 'avg_out.txt'), one_file, + two_file, three_file)) + + assert argv.decode('utf-8') == expected_argv + + # Test Reg Average: average ref + nr_average_4 = RegAverage() + ref_file = example_data('anatomical.nii') + one_file = example_data('im1.nii') + two_file = example_data('im2.nii') + three_file = example_data('im3.nii') + trans1_file = example_data('roi01.nii') + trans2_file = example_data('roi02.nii') + trans3_file = example_data('roi03.nii') + nr_average_4.inputs.warp_files = [ + trans1_file, one_file, trans2_file, two_file, trans3_file, three_file + ] + nr_average_4.inputs.avg_ref_file = ref_file + nr_average_4.inputs.omp_core_val = 1 + generated_cmd = nr_average_4.cmdline + + # Read the reg_average_cmd + reg_average_cmd = os.path.join(os.getcwd(), 'reg_average_cmd') + with open(reg_average_cmd, 'rb') as f_obj: + argv = f_obj.read() + os.remove(reg_average_cmd) + + expected_argv = ('%s %s -avg_tran %s -omp 1 %s %s %s %s %s %s' % + (get_custom_path('reg_average'), + os.path.join(os.getcwd(), 'avg_out.nii.gz'), ref_file, + trans1_file, one_file, trans2_file, two_file, + trans3_file, three_file)) + + assert argv.decode('utf-8') == expected_argv + + # Test Reg Average: demean3 + nr_average_5 = RegAverage() + ref_file = example_data('anatomical.nii') + one_file = example_data('im1.nii') + two_file = example_data('im2.nii') + three_file = example_data('im3.nii') + aff1_file = example_data('TransformParameters.0.txt') + aff2_file = example_data('ants_Affine.txt') + aff3_file = example_data('elastix.txt') + trans1_file = example_data('roi01.nii') + trans2_file = example_data('roi02.nii') + trans3_file = example_data('roi03.nii') + nr_average_5.inputs.warp_files = [ + aff1_file, trans1_file, one_file, aff2_file, trans2_file, two_file, + aff3_file, trans3_file, three_file + ] + nr_average_5.inputs.demean3_ref_file = ref_file + nr_average_5.inputs.omp_core_val = 1 + generated_cmd = nr_average_5.cmdline + + # Read the reg_average_cmd + reg_average_cmd = os.path.join(os.getcwd(), 'reg_average_cmd') + with open(reg_average_cmd, 'rb') as f_obj: + argv = f_obj.read() + os.remove(reg_average_cmd) + + expected_argv = ('%s %s -demean3 %s -omp 1 %s %s %s %s %s %s %s %s %s' % + (get_custom_path('reg_average'), + os.path.join(os.getcwd(), 'avg_out.nii.gz'), ref_file, + aff1_file, trans1_file, one_file, aff2_file, trans2_file, + two_file, aff3_file, trans3_file, three_file)) + + assert argv.decode('utf-8') == expected_argv + + +@pytest.mark.skipif( + no_nifty_tool(cmd='reg_transform'), + reason="niftyreg is not installed. reg_transform not found.") +def test_reg_transform_def(): + """ tests for reg_transform interface """ + # Create a reg_transform object + nr_transform = RegTransform() + + # Check if the command is properly defined + assert nr_transform.cmd == get_custom_path('reg_transform') + + # Assign some input data + trans_file = example_data('warpfield.nii') + nr_transform.inputs.def_input = trans_file + nr_transform.inputs.omp_core_val = 4 + + cmd_tmp = '{cmd} -omp 4 -def {trans_file} {out_file}' + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_transform'), + trans_file=trans_file, + out_file=os.path.join(os.getcwd(), 'warpfield_trans.nii.gz')) + + assert nr_transform.cmdline == expected_cmd + + # Test reg_transform: def ref + nr_transform_2 = RegTransform(omp_core_val=4) + ref_file = example_data('im1.nii') + trans_file = example_data('warpfield.nii') + nr_transform_2.inputs.ref1_file = ref_file + nr_transform_2.inputs.def_input = trans_file + + cmd_tmp = '{cmd} -ref {ref_file} -omp 4 -def {trans_file} {out_file}' + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_transform'), + ref_file=ref_file, + trans_file=trans_file, + out_file=os.path.join(os.getcwd(), 'warpfield_trans.nii.gz')) + + assert nr_transform_2.cmdline == expected_cmd + + # Test reg_transform: comp nii + nr_transform_3 = RegTransform(omp_core_val=4) + ref_file = example_data('im1.nii') + trans_file = example_data('warpfield.nii') + trans2_file = example_data('anatomical.nii') + nr_transform_3.inputs.ref1_file = ref_file + nr_transform_3.inputs.comp_input2 = trans2_file + nr_transform_3.inputs.comp_input = trans_file + + cmd_tmp = '{cmd} -ref {ref_file} -omp 4 -comp {trans1} {trans2} {out_file}' + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_transform'), + ref_file=ref_file, + trans1=trans_file, + trans2=trans2_file, + out_file=os.path.join(os.getcwd(), 'warpfield_trans.nii.gz')) + + assert nr_transform_3.cmdline == expected_cmd + + # Test reg_transform: comp txt + nr_transform_4 = RegTransform(omp_core_val=4) + aff1_file = example_data('ants_Affine.txt') + aff2_file = example_data('elastix.txt') + nr_transform_4.inputs.comp_input2 = aff2_file + nr_transform_4.inputs.comp_input = aff1_file + + cmd_tmp = '{cmd} -omp 4 -comp {aff1} {aff2} {out_file}' + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_transform'), + aff1=aff1_file, + aff2=aff2_file, + out_file=os.path.join(os.getcwd(), 'ants_Affine_trans.txt')) + + assert nr_transform_4.cmdline == expected_cmd + + # Test reg_transform: comp + nr_transform_5 = RegTransform(omp_core_val=4) + trans_file = example_data('warpfield.nii') + aff_file = example_data('elastix.txt') + nr_transform_5.inputs.comp_input2 = trans_file + nr_transform_5.inputs.comp_input = aff_file + + cmd_tmp = '{cmd} -omp 4 -comp {aff} {trans} {out_file}' + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_transform'), + aff=aff_file, + trans=trans_file, + out_file=os.path.join(os.getcwd(), 'elastix_trans.nii.gz')) + + assert nr_transform_5.cmdline == expected_cmd + + # Test reg_transform: flirt + nr_transform_6 = RegTransform(omp_core_val=4) + aff_file = example_data('elastix.txt') + ref_file = example_data('im1.nii') + in_file = example_data('im2.nii') + nr_transform_6.inputs.flirt_2_nr_input = (aff_file, ref_file, in_file) + + cmd_tmp = '{cmd} -omp 4 -flirtAff2NR {aff} {ref} {in_file} {out_file}' + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_transform'), + aff=aff_file, + ref=ref_file, + in_file=in_file, + out_file=os.path.join(os.getcwd(), 'elastix_trans.txt')) + + assert nr_transform_6.cmdline == expected_cmd + + +@pytest.mark.skipif( + no_nifty_tool(cmd='reg_measure'), + reason="niftyreg is not installed. reg_measure not found.") +def test_reg_measure(): + """ tests for reg_measure interface """ + # Create a reg_measure object + nr_measure = RegMeasure() + + # Check if the command is properly defined + assert nr_measure.cmd == get_custom_path('reg_measure') + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + nr_measure.run() + + # Assign some input data + ref_file = example_data('im1.nii') + flo_file = example_data('im2.nii') + nr_measure.inputs.ref_file = ref_file + nr_measure.inputs.flo_file = flo_file + nr_measure.inputs.measure_type = 'lncc' + nr_measure.inputs.omp_core_val = 4 + + cmd_tmp = '{cmd} -flo {flo} -lncc -omp 4 -out {out} -ref {ref}' + expected_cmd = cmd_tmp.format( + cmd=get_custom_path('reg_measure'), + flo=flo_file, + out='im2_lncc.txt', + ref=ref_file) + + assert nr_measure.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyseg/__init__.py b/nipype/interfaces/niftyseg/__init__.py new file mode 100644 index 0000000000..14b391edd5 --- /dev/null +++ b/nipype/interfaces/niftyseg/__init__.py @@ -0,0 +1,16 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The niftyseg module provides classes for interfacing with the `NIFTYSEG +`_ command line tools. + +Top-level namespace for niftyseg. +""" + +from .em import EM +from .label_fusion import LabelFusion, CalcTopNCC +from .lesions import FillLesions +from .maths import (UnaryMaths, BinaryMaths, BinaryMathsInteger, TupleMaths, + Merge) +from .patchmatch import PatchMatch +from .stats import UnaryStats, BinaryStats diff --git a/nipype/interfaces/niftyseg/base.py b/nipype/interfaces/niftyseg/base.py new file mode 100644 index 0000000000..d68bbcc73b --- /dev/null +++ b/nipype/interfaces/niftyseg/base.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The niftyseg module provides classes for interfacing with `niftyseg +`_ command line tools. +These are the base tools for working with niftyseg. +EM Statistical Segmentation tool is found in niftyseg/em.py +Fill lesions tool is found in niftyseg/lesions.py +Mathematical operation tool is found in niftyseg/maths.py +Patch Match tool is found in niftyseg/patchmatch.py +Statistical operation tool is found in niftyseg/stats.py +Label Fusion and CalcTopNcc tools are in niftyseg/steps.py +Examples +-------- +See the docstrings of the individual classes for examples. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from ..niftyfit.base import NiftyFitCommand + + +class NiftySegCommand(NiftyFitCommand): + """ + Base support interface for NiftySeg commands. + """ + _suffix = '_ns' + _min_version = None + + def __init__(self, **inputs): + super(NiftySegCommand, self).__init__(**inputs) + + def get_version(self): + return super(NiftySegCommand, self).version_from_command( + cmd='seg_EM', flag='--version') diff --git a/nipype/interfaces/niftyseg/em.py b/nipype/interfaces/niftyseg/em.py new file mode 100644 index 0000000000..e9c749c282 --- /dev/null +++ b/nipype/interfaces/niftyseg/em.py @@ -0,0 +1,161 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Nipype interface for seg_EM. + +The em module provides higher-level interfaces to some of the operations +that can be performed with the seg_em command-line program. + +Examples +-------- +See the docstrings of the individual classes for examples. +""" + +from ..base import (TraitedSpec, File, traits, CommandLineInputSpec, + InputMultiPath) +from .base import NiftySegCommand +from ..niftyreg.base import get_custom_path + + +class EMInputSpec(CommandLineInputSpec): + """Input Spec for EM.""" + in_file = File( + argstr='-in %s', + exists=True, + mandatory=True, + desc='Input image to segment', + position=4) + + mask_file = File( + argstr='-mask %s', + exists=True, + desc='Filename of the ROI for label fusion') + + # Priors + no_prior = traits.Int( + argstr='-nopriors %s', + mandatory=True, + desc='Number of classes to use without prior', + xor=['prior_4D', 'priors']) + + prior_4D = File( + argstr='-prior4D %s', + exists=True, + mandatory=True, + desc='4D file containing the priors', + xor=['no_prior', 'priors']) + + priors = InputMultiPath( + argstr='%s', + mandatory=True, + desc='List of priors filepaths.', + xor=['no_prior', 'prior_4D']) + + # iterations + max_iter = traits.Int( + argstr='-max_iter %s', + default_value=100, + usedefault=True, + desc='Maximum number of iterations') + + min_iter = traits.Int( + argstr='-min_iter %s', + default_value=0, + usedefault=True, + desc='Minimum number of iterations') + + # other options + bc_order_val = traits.Int( + argstr='-bc_order %s', + default_value=3, + usedefault=True, + desc='Polynomial order for the bias field') + + mrf_beta_val = traits.Float( + argstr='-mrf_beta %s', desc='Weight of the Markov Random Field') + + desc = 'Bias field correction will run only if the ratio of improvement \ +is below bc_thresh. (default=0 [OFF])' + + bc_thresh_val = traits.Float( + argstr='-bc_thresh %s', + default_value=0, + usedefault=True, + desc=desc) + + desc = 'Amount of regularization over the diagonal of the covariance \ +matrix [above 1]' + + reg_val = traits.Float(argstr='-reg %s', desc=desc) + + desc = 'Outlier detection as in (Van Leemput TMI 2003). is the \ +Mahalanobis threshold [recommended between 3 and 7] is a convergence \ +ratio below which the outlier detection is going to be done [recommended 0.01]' + + outlier_val = traits.Tuple( + traits.Float(), traits.Float(), argstr='-outlier %s %s', desc=desc) + + desc = 'Relax Priors [relaxation factor: 00 (recommended=2.0)] /only 3D/' + + relax_priors = traits.Tuple( + traits.Float(), traits.Float(), argstr='-rf %s %s', desc=desc) + + # outputs + out_file = File( + name_source=['in_file'], + name_template='%s_em.nii.gz', + argstr='-out %s', + desc='Output segmentation') + out_bc_file = File( + name_source=['in_file'], + name_template='%s_bc_em.nii.gz', + argstr='-bc_out %s', + desc='Output bias corrected image') + out_outlier_file = File( + name_source=['in_file'], + name_template='%s_outlier_em.nii.gz', + argstr='-out_outlier %s', + desc='Output outlierness image') + + +class EMOutputSpec(TraitedSpec): + """Output Spec for EM.""" + out_file = File(desc="Output segmentation") + out_bc_file = File(desc="Output bias corrected image") + out_outlier_file = File(desc='Output outlierness image') + + +class EM(NiftySegCommand): + """Interface for executable seg_EM from NiftySeg platform. + + seg_EM is a general purpose intensity based image segmentation tool. In + it's simplest form, it takes in one 2D or 3D image and segments it in n + classes. + + `Source code `_ | + `Documentation `_ + + Examples + -------- + >>> from nipype.interfaces import niftyseg + >>> node = niftyseg.EM() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.no_prior = 4 + >>> node.cmdline + 'seg_EM -in im1.nii -bc_order 3 -bc_thresh 0 -max_iter 100 -min_iter 0 -nopriors 4 \ +-bc_out im1_bc_em.nii.gz -out im1_em.nii.gz -out_outlier im1_outlier_em.nii.gz' + + """ + _cmd = get_custom_path('seg_EM', env_dir='NIFTYSEGDIR') + _suffix = '_em' + input_spec = EMInputSpec + output_spec = EMOutputSpec + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for seg_EM.""" + if opt == 'priors': + _nb_priors = len(self.inputs.priors) + return '-priors %d %s' % (_nb_priors, ' '.join(self.inputs.priors)) + else: + return super(EM, self)._format_arg(opt, spec, val) diff --git a/nipype/interfaces/niftyseg/label_fusion.py b/nipype/interfaces/niftyseg/label_fusion.py new file mode 100644 index 0000000000..1b0237d37c --- /dev/null +++ b/nipype/interfaces/niftyseg/label_fusion.py @@ -0,0 +1,339 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The fusion module provides higher-level interfaces to some of the operations +that can be performed with the seg_LabFusion command-line program. +""" + +from builtins import str +import os +import warnings + +from ..base import (TraitedSpec, File, traits, isdefined, CommandLineInputSpec, + NipypeInterfaceError) +from .base import NiftySegCommand +from ..niftyreg.base import get_custom_path +from ...utils.filemanip import load_json, save_json, split_filename + +warn = warnings.warn +warnings.filterwarnings('always', category=UserWarning) + + +class LabelFusionInput(CommandLineInputSpec): + """Input Spec for LabelFusion.""" + in_file = File( + argstr='-in %s', + exists=True, + mandatory=True, + position=1, + desc='Filename of the 4D integer label image.') + + template_file = File(exists=True, desc='Registered templates (4D Image)') + + file_to_seg = File( + exists=True, + mandatory=True, + desc='Original image to segment (3D Image)') + + mask_file = File( + argstr='-mask %s', + exists=True, + desc='Filename of the ROI for label fusion') + + out_file = File( + argstr='-out %s', + name_source=['in_file'], + name_template='%s', + desc='Output consensus segmentation') + + prob_flag = traits.Bool( + desc='Probabilistic/Fuzzy segmented image', argstr='-outProb') + + desc = 'Verbose level [0 = off, 1 = on, 2 = debug] (default = 0)' + verbose = traits.Enum('0', '1', '2', desc=desc, argstr='-v %s') + + desc = 'Only consider non-consensus voxels to calculate statistics' + unc = traits.Bool(desc=desc, argstr='-unc') + + classifier_type = traits.Enum( + 'STEPS', + 'STAPLE', + 'MV', + 'SBA', + argstr='-%s', + mandatory=True, + position=2, + desc='Type of Classifier Fusion.') + + desc = "Gaussian kernel size in mm to compute the local similarity" + kernel_size = traits.Float(desc=desc) + + template_num = traits.Int(desc='Number of labels to use') + + # STAPLE and MV options + sm_ranking = traits.Enum( + 'ALL', + 'GNCC', + 'ROINCC', + 'LNCC', + argstr='-%s', + usedefault=True, + position=3, + desc='Ranking for STAPLE and MV') + + dilation_roi = traits.Int(desc='Dilation of the ROI ( d>=1 )') + + # STAPLE and STEPS options + desc = 'Proportion of the label (only for single labels).' + proportion = traits.Float(argstr='-prop %s', desc=desc) + + desc = 'Update label proportions at each iteration' + prob_update_flag = traits.Bool(desc=desc, argstr='-prop_update') + + desc = 'Value of P and Q [ 0 < (P,Q) < 1 ] (default = 0.99 0.99)' + set_pq = traits.Tuple( + traits.Float, traits.Float, argstr='-setPQ %f %f', desc=desc) + + mrf_value = traits.Float( + argstr='-MRF_beta %f', desc='MRF prior strength (between 0 and 5)') + + desc = 'Maximum number of iterations (default = 15).' + max_iter = traits.Int(argstr='-max_iter %d', desc=desc) + + desc = 'If percent of labels agree, then area is not uncertain.' + unc_thresh = traits.Float(argstr='-uncthres %f', desc=desc) + + desc = 'Ratio for convergence (default epsilon = 10^-5).' + conv = traits.Float(argstr='-conv %f', desc=desc) + + +class LabelFusionOutput(TraitedSpec): + """Output Spec for LabelFusion.""" + out_file = File(exists=True, desc='image written after calculations') + + +class LabelFusion(NiftySegCommand): + """Interface for executable seg_LabelFusion from NiftySeg platform using + type STEPS as classifier Fusion. + + This executable implements 4 fusion strategies (-STEPS, -STAPLE, -MV or + - SBA), all of them using either a global (-GNCC), ROI-based (-ROINCC), + local (-LNCC) or no image similarity (-ALL). Combinations of fusion + algorithms and similarity metrics give rise to different variants of known + algorithms. As an example, using LNCC and MV as options will run a locally + weighted voting strategy with LNCC derived weights, while using STAPLE and + LNCC is equivalent to running STEPS as per its original formulation. + A few other options pertaining the use of an MRF (-MRF beta), the initial + sensitivity and specificity estimates and the use of only non-consensus + voxels (-unc) for the STAPLE and STEPS algorithm. All processing can be + masked (-mask), greatly reducing memory consumption. + + As an example, the command to use STEPS should be: + seg_LabFusion -in 4D_Propragated_Labels_to_fuse.nii -out \ + FusedSegmentation.nii -STEPS 2 15 TargetImage.nii \ + 4D_Propagated_Intensities.nii + + `Source code `_ | + `Documentation `_ + + Examples + -------- + >>> from nipype.interfaces import niftyseg + >>> node = niftyseg.LabelFusion() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.kernel_size = 2.0 + >>> node.inputs.file_to_seg = 'im2.nii' + >>> node.inputs.template_file = 'im3.nii' + >>> node.inputs.template_num = 2 + >>> node.inputs.classifier_type = 'STEPS' + >>> node.cmdline + 'seg_LabFusion -in im1.nii -STEPS 2.000000 2 im2.nii im3.nii -out im1_steps.nii' + + """ + _cmd = get_custom_path('seg_LabFusion', env_dir='NIFTYSEGDIR') + input_spec = LabelFusionInput + output_spec = LabelFusionOutput + _suffix = '_label_fused' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for seg_maths.""" + # Remove options if not STAPLE or STEPS as fusion type: + if opt in ['proportion', 'prob_update_flag', 'set_pq', 'mrf_value', + 'max_iter', 'unc_thresh', 'conv'] and\ + self.inputs.classifier_type not in ['STAPLE', 'STEPS']: + return '' + + if opt == 'sm_ranking': + return self.get_staple_args(val) + + # Return options string if STEPS: + if opt == 'classifier_type' and val == 'STEPS': + return self.get_steps_args() + + return super(LabelFusion, self)._format_arg(opt, spec, val) + + def get_steps_args(self): + if not isdefined(self.inputs.template_file): + err = "LabelFusion requires a value for input 'template_file' \ +when 'classifier_type' is set to 'STEPS'." + + raise NipypeInterfaceError(err) + if not isdefined(self.inputs.kernel_size): + err = "LabelFusion requires a value for input 'kernel_size' when \ +'classifier_type' is set to 'STEPS'." + + raise NipypeInterfaceError(err) + if not isdefined(self.inputs.template_num): + err = "LabelFusion requires a value for input 'template_num' when \ +'classifier_type' is set to 'STEPS'." + + raise NipypeInterfaceError(err) + return "-STEPS %f %d %s %s" % (self.inputs.kernel_size, + self.inputs.template_num, + self.inputs.file_to_seg, + self.inputs.template_file) + + def get_staple_args(self, ranking): + classtype = self.inputs.classifier_type + if classtype not in ['STAPLE', 'MV']: + return None + + if ranking == 'ALL': + return '-ALL' + + if not isdefined(self.inputs.template_file): + err = "LabelFusion requires a value for input 'tramplate_file' \ +when 'classifier_type' is set to '%s' and 'sm_ranking' is set to '%s'." + + raise NipypeInterfaceError(err % (classtype, ranking)) + if not isdefined(self.inputs.template_num): + err = "LabelFusion requires a value for input 'template-num' when \ +'classifier_type' is set to '%s' and 'sm_ranking' is set to '%s'." + + raise NipypeInterfaceError(err % (classtype, ranking)) + + if ranking == 'GNCC': + if not isdefined(self.inputs.template_num): + err = "LabelFusion requires a value for input 'template_num' \ +when 'classifier_type' is set to '%s' and 'sm_ranking' is set to '%s'." + + raise NipypeInterfaceError(err % (classtype, ranking)) + + return "-%s %d %s %s" % (ranking, self.inputs.template_num, + self.inputs.file_to_seg, + self.inputs.template_file) + + elif ranking == 'ROINCC': + if not isdefined(self.inputs.dilation_roi): + err = "LabelFusion requires a value for input 'dilation_roi' \ +when 'classifier_type' is set to '%s' and 'sm_ranking' is set to '%s'." + + raise NipypeInterfaceError(err % (classtype, ranking)) + + elif self.inputs.dilation_roi < 1: + err = "The 'dilation_roi' trait of a LabelFusionInput \ +instance must be an integer >= 1, but a value of '%s' was specified." + + raise NipypeInterfaceError(err % self.inputs.dilation_roi) + + return "-%s %d %d %s %s" % (ranking, self.inputs.dilation_roi, + self.inputs.template_num, + self.inputs.file_to_seg, + self.inputs.template_file) + elif ranking == 'LNCC': + if not isdefined(self.inputs.kernel_size): + err = "LabelFusion requires a value for input 'kernel_size' \ +when 'classifier_type' is set to '%s' and 'sm_ranking' is set to '%s'." + + raise NipypeInterfaceError(err % (classtype, ranking)) + + return "-%s %f %d %s %s" % (ranking, self.inputs.kernel_size, + self.inputs.template_num, + self.inputs.file_to_seg, + self.inputs.template_file) + + def _overload_extension(self, value, name=None): + path, base, _ = split_filename(value) + _, _, ext = split_filename(self.inputs.in_file) + suffix = self.inputs.classifier_type.lower() + return os.path.join(path, '{0}_{1}{2}'.format(base, suffix, ext)) + + +class CalcTopNCCInputSpec(CommandLineInputSpec): + """Input Spec for CalcTopNCC.""" + in_file = File( + argstr='-target %s', + exists=True, + mandatory=True, + desc='Target file', + position=1) + + num_templates = traits.Int( + argstr='-templates %s', + mandatory=True, + position=2, + desc='Number of Templates') + + in_templates = traits.List( + File(exists=True), argstr="%s", position=3, mandatory=True) + + top_templates = traits.Int( + argstr='-n %s', + mandatory=True, + position=4, + desc='Number of Top Templates') + + mask_file = File( + argstr='-mask %s', + exists=True, + desc='Filename of the ROI for label fusion') + + +class CalcTopNCCOutputSpec(TraitedSpec): + """Output Spec for CalcTopNCC.""" + out_files = traits.Any(File(exists=True)) + + +class CalcTopNCC(NiftySegCommand): + """Interface for executable seg_CalcTopNCC from NiftySeg platform. + + Examples + -------- + >>> from nipype.interfaces import niftyseg + >>> node = niftyseg.CalcTopNCC() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.num_templates = 2 + >>> node.inputs.in_templates = ['im2.nii', 'im3.nii'] + >>> node.inputs.top_templates = 1 + >>> node.cmdline + 'seg_CalcTopNCC -target im1.nii -templates 2 im2.nii im3.nii -n 1' + + """ + _cmd = get_custom_path('seg_CalcTopNCC', env_dir='NIFTYSEGDIR') + _suffix = '_topNCC' + input_spec = CalcTopNCCInputSpec + output_spec = CalcTopNCCOutputSpec + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + outputs = self._outputs() + # local caching for backward compatibility + outfile = os.path.join(os.getcwd(), 'CalcTopNCC.json') + if runtime is None or not runtime.stdout: + try: + out_files = load_json(outfile)['files'] + except IOError: + return self.run().outputs + else: + out_files = [] + for line in runtime.stdout.split('\n'): + if line: + values = line.split() + if len(values) > 1: + out_files.append([str(val) for val in values]) + else: + out_files.extend([str(val) for val in values]) + if len(out_files) == 1: + out_files = out_files[0] + save_json(outfile, dict(files=out_files)) + outputs.out_files = out_files + return outputs diff --git a/nipype/interfaces/niftyseg/lesions.py b/nipype/interfaces/niftyseg/lesions.py new file mode 100644 index 0000000000..14d7f23c6b --- /dev/null +++ b/nipype/interfaces/niftyseg/lesions.py @@ -0,0 +1,123 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Nipype interface for seg_FillLesions. + +The fusion module provides higher-level interfaces to some of the operations +that can be performed with the seg_FillLesions command-line program. + +Examples +-------- +See the docstrings of the individual classes for examples. +""" + +import warnings + +from ..base import TraitedSpec, File, traits, CommandLineInputSpec +from .base import NiftySegCommand +from ..niftyreg.base import get_custom_path + +warn = warnings.warn +warnings.filterwarnings('always', category=UserWarning) + + +class FillLesionsInputSpec(CommandLineInputSpec): + """Input Spec for FillLesions.""" + # Mandatory input arguments + in_file = File( + argstr='-i %s', + exists=True, + mandatory=True, + desc='Input image to fill lesions', + position=1) + + lesion_mask = File( + argstr='-l %s', + exists=True, + mandatory=True, + desc='Lesion mask', + position=2) + + # Output file name + out_file = File( + name_source=['in_file'], + name_template='%s_lesions_filled.nii.gz', + desc='The output filename of the fill lesions results', + argstr='-o %s', + position=3) + + # Optional arguments + desc = "Dilate the mask times (in voxels, by default 0)" + in_dilation = traits.Int(desc=desc, argstr='-dil %d') + + desc = 'Percentage of minimum number of voxels between patches \ +(by default 0.5).' + + match = traits.Float(desc=desc, argstr='-match %f') + + desc = 'Minimum percentage of valid voxels in target patch \ +(by default 0).' + + search = traits.Float(desc=desc, argstr='-search %f') + + desc = 'Smoothing by (in minimal 6-neighbourhood voxels \ +(by default 0.1)).' + + smooth = traits.Float(desc=desc, argstr='-smo %f') + + desc = 'Search regions size respect biggest patch size (by default 4).' + size = traits.Int(desc=desc, argstr='-size %d') + + desc = 'Patch cardinality weighting factor (by default 2).' + cwf = traits.Float(desc=desc, argstr='-cwf %f') + + desc = 'Give a binary mask with the valid search areas.' + bin_mask = File(desc=desc, argstr='-mask %s') + + desc = "Guizard et al. (FIN 2015) method, it doesn't include the \ +multiresolution/hierarchical inpainting part, this part needs to be done \ +with some external software such as reg_tools and reg_resample from NiftyReg. \ +By default it uses the method presented in Prados et al. (Neuroimage 2016)." + + other = traits.Bool(desc=desc, argstr='-other') + + use_2d = traits.Bool( + desc='Uses 2D patches in the Z axis, by default 3D.', argstr='-2D') + + debug = traits.Bool( + desc='Save all intermidium files (by default OFF).', argstr='-debug') + + desc = 'Set output (char, short, int, uchar, ushort, uint, \ +float, double).' + + out_datatype = traits.String(desc=desc, argstr='-odt %s') + + verbose = traits.Bool(desc='Verbose (by default OFF).', argstr='-v') + + +class FillLesionsOutputSpec(TraitedSpec): + """Output Spec for FillLesions.""" + out_file = File(desc="Output segmentation") + + +class FillLesions(NiftySegCommand): + """Interface for executable seg_FillLesions from NiftySeg platform. + + Fill all the masked lesions with WM intensity average. + + `Source code `_ | + `Documentation `_ + + Examples + -------- + >>> from nipype.interfaces import niftyseg + >>> node = niftyseg.FillLesions() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.lesion_mask = 'im2.nii' + >>> node.cmdline + 'seg_FillLesions -i im1.nii -l im2.nii -o im1_lesions_filled.nii.gz' + + """ + _cmd = get_custom_path('seg_FillLesions', env_dir='NIFTYSEGDIR') + input_spec = FillLesionsInputSpec + output_spec = FillLesionsOutputSpec diff --git a/nipype/interfaces/niftyseg/maths.py b/nipype/interfaces/niftyseg/maths.py new file mode 100644 index 0000000000..d4773f86e8 --- /dev/null +++ b/nipype/interfaces/niftyseg/maths.py @@ -0,0 +1,645 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Nipype interface for seg_maths. + +The maths module provides higher-level interfaces to some of the operations +that can be performed with the niftysegmaths (seg_maths) command-line program. + +Examples +-------- +See the docstrings of the individual classes for examples. +""" + +import os + +from ..base import (TraitedSpec, File, traits, isdefined, CommandLineInputSpec, + NipypeInterfaceError) +from .base import NiftySegCommand +from ..niftyreg.base import get_custom_path +from ...utils.filemanip import split_filename + + +class MathsInput(CommandLineInputSpec): + """Input Spec for seg_maths interfaces.""" + in_file = File( + position=2, + argstr='%s', + exists=True, + mandatory=True, + desc='image to operate on') + + out_file = File( + name_source=['in_file'], + name_template='%s', + position=-2, + argstr='%s', + desc='image to write') + + desc = 'datatype to use for output (default uses input type)' + output_datatype = traits.Enum( + 'float', + 'char', + 'int', + 'short', + 'double', + 'input', + position=-3, + argstr='-odt %s', + desc=desc) + + +class MathsOutput(TraitedSpec): + """Output Spec for seg_maths interfaces.""" + out_file = File(desc='image written after calculations') + + +class MathsCommand(NiftySegCommand): + """ + Base Command Interface for seg_maths interfaces. + + The executable seg_maths enables the sequential execution of arithmetic + operations, like multiplication (-mul), division (-div) or addition + (-add), binarisation (-bin) or thresholding (-thr) operations and + convolution by a Gaussian kernel (-smo). It also alows mathematical + morphology based operations like dilation (-dil), erosion (-ero), + connected components (-lconcomp) and hole filling (-fill), Euclidean + (- euc) and geodesic (-geo) distance transforms, local image similarity + metric calculation (-lncc and -lssd). Finally, it allows multiple + operations over the dimensionality of the image, from merging 3D images + together as a 4D image (-merge) or splitting (-split or -tp) 4D images + into several 3D images, to estimating the maximum, minimum and average + over all time-points, etc. + """ + _cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR') + input_spec = MathsInput + output_spec = MathsOutput + _suffix = '_maths' + + def _overload_extension(self, value, name=None): + path, base, _ = split_filename(value) + _, _, ext = split_filename(self.inputs.in_file) + + suffix = self._suffix + if suffix != '_merged' and isdefined(self.inputs.operation): + suffix = '_' + self.inputs.operation + + return os.path.join(path, '{0}{1}{2}'.format(base, suffix, ext)) + + +class UnaryMathsInput(MathsInput): + """Input Spec for seg_maths Unary operations.""" + operation = traits.Enum( + 'sqrt', + 'exp', + 'log', + 'recip', + 'abs', + 'bin', + 'otsu', + 'lconcomp', + 'concomp6', + 'concomp26', + 'fill', + 'euc', + 'tpmax', + 'tmean', + 'tmax', + 'tmin', + 'splitlab', + 'removenan', + 'isnan', + 'subsamp2', + 'scl', + '4to5', + 'range', + argstr='-%s', + position=4, + mandatory=True, + desc='operation to perform') + + +class UnaryMaths(MathsCommand): + """Interface for executable seg_maths from NiftySeg platform. + + Interface to use any unary mathematical operations that can be performed + + with the seg_maths command-line program. + + See below for those operations:: + + sqrt - Square root of the image). + + exp - Exponential root of the image. + + log - Log of the image. + + recip - Reciprocal (1/I) of the image. + + abs - Absolute value of the image. + + bin - Binarise the image. + + otsu - Otsu thresholding of the current image. + + lconcomp - Take the largest connected component + + concomp6 - Label the different connected components with a 6NN kernel + + concomp26 - Label the different connected components with a 26NN kernel + + fill - Fill holes in binary object (e.g. fill ventricle in brain mask). + + euc - Euclidean distance trasnform + + tpmax - Get the time point with the highest value (binarise 4D \ +probabilities) + + tmean - Mean value of all time points. + + tmax - Max value of all time points. + + tmin - Mean value of all time points. + + splitlab - Split the integer labels into multiple timepoints + + removenan - Remove all NaNs and replace then with 0 + + isnan - Binary image equal to 1 if the value is NaN and 0 otherwise + + subsamp2 - Subsample the image by 2 using NN sampling (qform and sform \ +scaled) + + scl - Reset scale and slope info. + + 4to5 - Flip the 4th and 5th dimension. + + range - Reset the image range to the min max. + + `Source code `_ | + `Documentation `_ + + Examples + -------- + >>> import copy + >>> from nipype.interfaces import niftyseg + >>> unary = niftyseg.UnaryMaths() + >>> unary.inputs.output_datatype = 'float' + >>> unary.inputs.in_file = 'im1.nii' + >>> # Test sqrt operation + >>> unary_sqrt = copy.deepcopy(unary) + >>> unary_sqrt.inputs.operation = 'sqrt' + >>> unary_sqrt.cmdline + 'seg_maths im1.nii -sqrt -odt float im1_sqrt.nii' + >>> unary_sqrt.run() # doctest: +SKIP + >>> # Test sqrt operation + >>> unary_abs = copy.deepcopy(unary) + >>> unary_abs.inputs.operation = 'abs' + >>> unary_abs.cmdline + 'seg_maths im1.nii -abs -odt float im1_abs.nii' + >>> unary_abs.run() # doctest: +SKIP + >>> # Test bin operation + >>> unary_bin = copy.deepcopy(unary) + >>> unary_bin.inputs.operation = 'bin' + >>> unary_bin.cmdline + 'seg_maths im1.nii -bin -odt float im1_bin.nii' + >>> unary_bin.run() # doctest: +SKIP + >>> # Test otsu operation + >>> unary_otsu = copy.deepcopy(unary) + >>> unary_otsu.inputs.operation = 'otsu' + >>> unary_otsu.cmdline + 'seg_maths im1.nii -otsu -odt float im1_otsu.nii' + >>> unary_otsu.run() # doctest: +SKIP + >>> # Test isnan operation + >>> unary_isnan = copy.deepcopy(unary) + >>> unary_isnan.inputs.operation = 'isnan' + >>> unary_isnan.cmdline + 'seg_maths im1.nii -isnan -odt float im1_isnan.nii' + >>> unary_isnan.run() # doctest: +SKIP + + """ + input_spec = UnaryMathsInput + + +class BinaryMathsInput(MathsInput): + """Input Spec for seg_maths Binary operations.""" + operation = traits.Enum( + 'mul', + 'div', + 'add', + 'sub', + 'pow', + 'thr', + 'uthr', + 'smo', + 'edge', + 'sobel3', + 'sobel5', + 'min', + 'smol', + 'geo', + 'llsnorm', + 'masknan', + 'hdr_copy', + 'splitinter', + mandatory=True, + argstr='-%s', + position=4, + desc='operation to perform') + + operand_file = File( + exists=True, + argstr='%s', + mandatory=True, + position=5, + xor=['operand_value', 'operand_str'], + desc='second image to perform operation with') + + operand_value = traits.Float( + argstr='%.8f', + mandatory=True, + position=5, + xor=['operand_file', 'operand_str'], + desc='float value to perform operation with') + + desc = 'string value to perform operation splitinter' + operand_str = traits.Enum( + 'x', + 'y', + 'z', + argstr='%s', + mandatory=True, + position=5, + xor=['operand_value', 'operand_file'], + desc=desc) + + +class BinaryMaths(MathsCommand): + """Interface for executable seg_maths from NiftySeg platform. + + Interface to use any binary mathematical operations that can be performed + + with the seg_maths command-line program. + + See below for those operations:: + + mul - - Multiply image value or by other image. + + div - - Divide image by or by other image. + + add - - Add image by or by other image. + + sub - - Subtract image by or by other image. + + pow - - Image to the power of . + + thr - - Threshold the image below . + + uthr - - Threshold image above . + + smo - - Gaussian smoothing by std (in voxels and up to \ +4-D). + + edge - - Calculate the edges of the image using a threshold <\ +float>. + + sobel3 - - Calculate the edges of all timepoints using a Sobel \ +filter with a 3x3x3 kernel and applying gaussian smoothing. + + sobel5 - - Calculate the edges of all timepoints using a Sobel \ +filter with a 5x5x5 kernel and applying gaussian smoothing. + + min - - Get the min per voxel between and . + + smol - - Gaussian smoothing of a 3D label image. + + geo - - Geodesic distance according to the speed function \ + + + llsnorm - Linear LS normalisation between current and \ + + + masknan - Assign everything outside the mask (mask==0) \ +with NaNs + + hdr_copy - Copy header from working image to and save in \ +. + + splitinter - Split interleaved slices in direction into \ +separate time points + + `Source code `_ | + `Documentation `_ + + Examples + -------- + >>> import copy + >>> from nipype.interfaces import niftyseg + >>> binary = niftyseg.BinaryMaths() + >>> binary.inputs.in_file = 'im1.nii' + >>> binary.inputs.output_datatype = 'float' + >>> # Test sub operation + >>> binary_sub = copy.deepcopy(binary) + >>> binary_sub.inputs.operation = 'sub' + >>> binary_sub.inputs.operand_file = 'im2.nii' + >>> binary_sub.cmdline + 'seg_maths im1.nii -sub im2.nii -odt float im1_sub.nii' + >>> binary_sub.run() # doctest: +SKIP + >>> # Test mul operation + >>> binary_mul = copy.deepcopy(binary) + >>> binary_mul.inputs.operation = 'mul' + >>> binary_mul.inputs.operand_value = 2.0 + >>> binary_mul.cmdline + 'seg_maths im1.nii -mul 2.00000000 -odt float im1_mul.nii' + >>> binary_mul.run() # doctest: +SKIP + >>> # Test llsnorm operation + >>> binary_llsnorm = copy.deepcopy(binary) + >>> binary_llsnorm.inputs.operation = 'llsnorm' + >>> binary_llsnorm.inputs.operand_file = 'im2.nii' + >>> binary_llsnorm.cmdline + 'seg_maths im1.nii -llsnorm im2.nii -odt float im1_llsnorm.nii' + >>> binary_llsnorm.run() # doctest: +SKIP + >>> # Test splitinter operation + >>> binary_splitinter = copy.deepcopy(binary) + >>> binary_splitinter.inputs.operation = 'splitinter' + >>> binary_splitinter.inputs.operand_str = 'z' + >>> binary_splitinter.cmdline + 'seg_maths im1.nii -splitinter z -odt float im1_splitinter.nii' + >>> binary_splitinter.run() # doctest: +SKIP + + """ + input_spec = BinaryMathsInput + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for seg_maths.""" + if opt == 'operand_str' and self.inputs.operation != 'splitinter': + err = 'operand_str set but with an operation different than \ +"splitinter"' + + raise NipypeInterfaceError(err) + + if opt == 'operation': + # Only float + if val in [ + 'pow', 'thr', 'uthr', 'smo', 'edge', 'sobel3', 'sobel5', + 'smol' + ]: + if not isdefined(self.inputs.operand_value): + err = 'operand_value not set for {0}.'.format(val) + raise NipypeInterfaceError(err) + # only files + elif val in ['min', 'llsnorm', 'masknan', 'hdr_copy']: + if not isdefined(self.inputs.operand_file): + err = 'operand_file not set for {0}.'.format(val) + raise NipypeInterfaceError(err) + # splitinter: + elif val == 'splitinter': + if not isdefined(self.inputs.operand_str): + err = 'operand_str not set for splitinter.' + raise NipypeInterfaceError(err) + + if opt == 'operand_value' and float(val) == 0.0: + return '0' + + return super(BinaryMaths, self)._format_arg(opt, spec, val) + + def _overload_extension(self, value, name=None): + if self.inputs.operation == 'hdr_copy': + path, base, _ = split_filename(value) + _, base, ext = split_filename(self.inputs.operand_file) + suffix = self.inputs.operation + return os.path.join(path, '{0}{1}{2}'.format(base, suffix, ext)) + else: + return super(BinaryMaths, self)._overload_extension(value, name) + + +class BinaryMathsInputInteger(MathsInput): + """Input Spec for seg_maths Binary operations that require integer.""" + operation = traits.Enum( + 'dil', + 'ero', + 'tp', + 'equal', + 'pad', + 'crop', + mandatory=True, + argstr='-%s', + position=4, + desc='operation to perform') + + operand_value = traits.Int( + argstr='%d', + mandatory=True, + position=5, + desc='int value to perform operation with') + + +class BinaryMathsInteger(MathsCommand): + """Interface for executable seg_maths from NiftySeg platform. + + Interface to use any integer mathematical operations that can be performed + + with the seg_maths command-line program. + + See below for those operations:: (requiring integer values) + + equal - - Get voxels equal to + + dil - - Dilate the image times (in voxels). + + ero - - Erode the image times (in voxels). + + tp - - Extract time point + + crop - - Crop voxels around each 3D volume. + + pad - - Pad voxels with NaN value around each 3D volume. + + `Source code `_ | + `Documentation `_ + + Examples + -------- + >>> import copy + >>> from nipype.interfaces.niftyseg import BinaryMathsInteger + >>> binaryi = BinaryMathsInteger() + >>> binaryi.inputs.in_file = 'im1.nii' + >>> binaryi.inputs.output_datatype = 'float' + >>> # Test dil operation + >>> binaryi_dil = copy.deepcopy(binaryi) + >>> binaryi_dil.inputs.operation = 'dil' + >>> binaryi_dil.inputs.operand_value = 2 + >>> binaryi_dil.cmdline + 'seg_maths im1.nii -dil 2 -odt float im1_dil.nii' + >>> binaryi_dil.run() # doctest: +SKIP + >>> # Test dil operation + >>> binaryi_ero = copy.deepcopy(binaryi) + >>> binaryi_ero.inputs.operation = 'ero' + >>> binaryi_ero.inputs.operand_value = 1 + >>> binaryi_ero.cmdline + 'seg_maths im1.nii -ero 1 -odt float im1_ero.nii' + >>> binaryi_ero.run() # doctest: +SKIP + >>> # Test pad operation + >>> binaryi_pad = copy.deepcopy(binaryi) + >>> binaryi_pad.inputs.operation = 'pad' + >>> binaryi_pad.inputs.operand_value = 4 + >>> binaryi_pad.cmdline + 'seg_maths im1.nii -pad 4 -odt float im1_pad.nii' + >>> binaryi_pad.run() # doctest: +SKIP + + """ + input_spec = BinaryMathsInputInteger + + +class TupleMathsInput(MathsInput): + """Input Spec for seg_maths Tuple operations.""" + operation = traits.Enum( + 'lncc', + 'lssd', + 'lltsnorm', + mandatory=True, + argstr='-%s', + position=4, + desc='operation to perform') + + operand_file1 = File( + exists=True, + argstr='%s', + mandatory=True, + position=5, + xor=['operand_value1'], + desc='image to perform operation 1 with') + + desc = 'float value to perform operation 1 with' + operand_value1 = traits.Float( + argstr='%.8f', + mandatory=True, + position=5, + xor=['operand_file1'], + desc=desc) + + operand_file2 = File( + exists=True, + argstr='%s', + mandatory=True, + position=6, + xor=['operand_value2'], + desc='image to perform operation 2 with') + + desc = 'float value to perform operation 2 with' + operand_value2 = traits.Float( + argstr='%.8f', + mandatory=True, + position=6, + xor=['operand_file2'], + desc=desc) + + +class TupleMaths(MathsCommand): + """Interface for executable seg_maths from NiftySeg platform. + + Interface to use any tuple mathematical operations that can be performed + + with the seg_maths command-line program. + + See below for those operations:: + + lncc Local CC between current img and on a kernel \ +with + + lssd Local SSD between current img and on a kernel \ +with + + lltsnorm Linear LTS normalisation assuming \ +percent outliers + + `Source code `_ | + `Documentation `_ + + Examples + -------- + >>> import copy + >>> from nipype.interfaces import niftyseg + >>> tuple = niftyseg.TupleMaths() + >>> tuple.inputs.in_file = 'im1.nii' + >>> tuple.inputs.output_datatype = 'float' + + >>> # Test lncc operation + >>> tuple_lncc = copy.deepcopy(tuple) + >>> tuple_lncc.inputs.operation = 'lncc' + >>> tuple_lncc.inputs.operand_file1 = 'im2.nii' + >>> tuple_lncc.inputs.operand_value2 = 2.0 + >>> tuple_lncc.cmdline + 'seg_maths im1.nii -lncc im2.nii 2.00000000 -odt float im1_lncc.nii' + >>> tuple_lncc.run() # doctest: +SKIP + + >>> # Test lssd operation + >>> tuple_lssd = copy.deepcopy(tuple) + >>> tuple_lssd.inputs.operation = 'lssd' + >>> tuple_lssd.inputs.operand_file1 = 'im2.nii' + >>> tuple_lssd.inputs.operand_value2 = 1.0 + >>> tuple_lssd.cmdline + 'seg_maths im1.nii -lssd im2.nii 1.00000000 -odt float im1_lssd.nii' + >>> tuple_lssd.run() # doctest: +SKIP + + >>> # Test lltsnorm operation + >>> tuple_lltsnorm = copy.deepcopy(tuple) + >>> tuple_lltsnorm.inputs.operation = 'lltsnorm' + >>> tuple_lltsnorm.inputs.operand_file1 = 'im2.nii' + >>> tuple_lltsnorm.inputs.operand_value2 = 0.01 + >>> tuple_lltsnorm.cmdline + 'seg_maths im1.nii -lltsnorm im2.nii 0.01000000 -odt float \ +im1_lltsnorm.nii' + >>> tuple_lltsnorm.run() # doctest: +SKIP + """ + input_spec = TupleMathsInput + + +class MergeInput(MathsInput): + """Input Spec for seg_maths merge operation.""" + dimension = traits.Int( + mandatory=True, desc='Dimension to merge the images.') + + desc = 'List of images to merge to the working image .' + merge_files = traits.List( + File(exists=True), argstr='%s', mandatory=True, position=4, desc=desc) + + +class Merge(MathsCommand): + """Interface for executable seg_maths from NiftySeg platform. + + Interface to use the merge operation that can be performed + + with the seg_maths command-line program. + + See below for this option:: + + merge Merge images and the working image in the \ + dimension + + `Source code `_ | + `Documentation `_ + + Examples + -------- + >>> from nipype.interfaces import niftyseg + >>> node = niftyseg.Merge() + >>> node.inputs.in_file = 'im1.nii' + >>> files = ['im2.nii', 'im3.nii'] + >>> node.inputs.merge_files = files + >>> node.inputs.dimension = 2 + >>> node.inputs.output_datatype = 'float' + >>> node.cmdline + 'seg_maths im1.nii -merge 2 2 im2.nii im3.nii -odt float im1_merged.nii' + + """ + input_spec = MergeInput + _suffix = '_merged' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for seg_maths.""" + if opt == 'merge_files': + return "-merge %d %d %s" % (len(val), self.inputs.dimension, + ' '.join(val)) + + return super(Merge, self)._format_arg(opt, spec, val) diff --git a/nipype/interfaces/niftyseg/patchmatch.py b/nipype/interfaces/niftyseg/patchmatch.py new file mode 100644 index 0000000000..5732b1ba17 --- /dev/null +++ b/nipype/interfaces/niftyseg/patchmatch.py @@ -0,0 +1,105 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The fusion module provides higher-level interfaces to some of the operations +that can be performed with the seg_DetectLesions command-line program. +""" + +import warnings + +from ..base import TraitedSpec, File, traits, CommandLineInputSpec +from .base import NiftySegCommand +from ..niftyreg.base import get_custom_path + +warn = warnings.warn +warnings.filterwarnings('always', category=UserWarning) + + +class PatchMatchInputSpec(CommandLineInputSpec): + """Input Spec for PatchMatch.""" + # Mandatory input arguments + in_file = File( + argstr='-i %s', + exists=True, + mandatory=True, + desc='Input image to segment', + position=1) + + mask_file = File( + argstr='-m %s', + exists=True, + mandatory=True, + desc='Input mask for the area where applies PatchMatch', + position=2) + + database_file = File( + argstr='-db %s', + exists=True, + mandatory=True, + desc='Database with the segmentations', + position=3) + + # Output file name + out_file = File( + name_source=['in_file'], + name_template='%s_pm.nii.gz', + desc='The output filename of the patchmatch results', + argstr='-o %s', + position=4) + + # Optional arguments + patch_size = traits.Int(desc="Patch size, #voxels", argstr='-size %i') + + desc = "Constrained search area size, number of times bigger than the \ +patchsize" + + cs_size = traits.Int(desc=desc, argstr='-cs %i') + + match_num = traits.Int( + desc="Number of better matching", argstr='-match %i') + + pm_num = traits.Int( + desc="Number of patchmatch executions", argstr='-pm %i') + + desc = "Number of iterations for the patchmatch algorithm" + it_num = traits.Int(desc=desc, argstr='-it %i') + + +class PatchMatchOutputSpec(TraitedSpec): + """OutputSpec for PatchMatch.""" + out_file = File(desc="Output segmentation") + + +class PatchMatch(NiftySegCommand): + """Interface for executable seg_PatchMatch from NiftySeg platform. + + The database file is a text file and in each line we have a template + file, a mask with the search region to consider and a file with the + label to propagate. + + Input image, input mask, template images from database and masks from + database must have the same 4D resolution (same number of XxYxZ voxels, + modalities and/or time-points). + Label files from database must have the same 3D resolution + (XxYxZ voxels) than input image but can have different number of + volumes than the input image allowing to propagate multiple labels + in the same execution. + + `Source code `_ | + `Documentation `_ + + Examples + -------- + >>> from nipype.interfaces import niftyseg + >>> node = niftyseg.PatchMatch() + >>> node.inputs.in_file = 'im1.nii' + >>> node.inputs.mask_file = 'im2.nii' + >>> node.inputs.database_file = 'db.xml' + >>> node.cmdline + 'seg_PatchMatch -i im1.nii -m im2.nii -db db.xml -o im1_pm.nii.gz' + + """ + _cmd = get_custom_path('seg_PatchMatch', env_dir='NIFTYSEGDIR') + input_spec = PatchMatchInputSpec + output_spec = PatchMatchOutputSpec + _suffix = '_pm' diff --git a/nipype/interfaces/niftyseg/stats.py b/nipype/interfaces/niftyseg/stats.py new file mode 100644 index 0000000000..796e07410c --- /dev/null +++ b/nipype/interfaces/niftyseg/stats.py @@ -0,0 +1,284 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The stats module provides higher-level interfaces to some of the operations +that can be performed with the niftyseg stats (seg_stats) command-line program. +""" +from __future__ import print_function +import numpy as np + +from ..base import TraitedSpec, File, traits, CommandLineInputSpec +from .base import NiftySegCommand +from ..niftyreg.base import get_custom_path + + +class StatsInput(CommandLineInputSpec): + """Input Spec for seg_stats interfaces.""" + in_file = File( + position=2, + argstr='%s', + exists=True, + mandatory=True, + desc='image to operate on') + + # Constrains + mask_file = File( + exists=True, + position=-2, + argstr='-m %s', + desc='statistics within the masked area') + + desc = 'Only estimate statistics if voxel is larger than ' + larger_voxel = traits.Float(argstr='-t %f', position=-3, desc=desc) + + +class StatsOutput(TraitedSpec): + """Output Spec for seg_stats interfaces.""" + output = traits.Array(desc='Output array from seg_stats') + + +class StatsCommand(NiftySegCommand): + """ + Base Command Interface for seg_stats interfaces. + + The executable seg_stats enables the estimation of image statistics on + continuous voxel intensities (average, standard deviation, min/max, robust + range, percentiles, sum, probabilistic volume, entropy, etc) either over + the full image or on a per slice basis (slice axis can be specified), + statistics over voxel coordinates (location of max, min and centre of + mass, bounding box, etc) and statistics over categorical images (e.g. per + region volume, count, average, Dice scores, etc). These statistics are + robust to the presence of NaNs, and can be constrained by a mask and/or + thresholded at a certain level. + """ + _cmd = get_custom_path('seg_stats', env_dir='NIFTYSEGDIR') + input_spec = StatsInput + output_spec = StatsOutput + + def _parse_stdout(self, stdout): + out = [] + for string_line in stdout.split("\n"): + if string_line.startswith('#'): + continue + if len(string_line) <= 1: + continue + line = [float(s) for s in string_line.split()] + out.append(line) + return np.array(out).squeeze() + + def _run_interface(self, runtime): + new_runtime = super(StatsCommand, self)._run_interface(runtime) + self.output = self._parse_stdout(new_runtime.stdout) + return new_runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['output'] = self.output + return outputs + + +class UnaryStatsInput(StatsInput): + """Input Spec for seg_stats unary operations.""" + operation = traits.Enum( + 'r', + 'R', + 'a', + 's', + 'v', + 'vl', + 'vp', + 'n', + 'np', + 'e', + 'ne', + 'x', + 'X', + 'c', + 'B', + 'xvox', + 'xdim', + argstr='-%s', + position=4, + mandatory=True, + desc='operation to perform') + + +class UnaryStats(StatsCommand): + """ + Interface for executable seg_stats from NiftySeg platform. + + Interface to use any unary statistical operations that can be performed + + with the seg_stats command-line program. + + See below for those operations:: + + r - The range of all voxels. + + R - The robust range (assuming 2% outliers on both sides) of all voxels + + a - Average of all voxels + + s - Standard deviation of all voxels + + v - Volume of all voxels above 0 (<# voxels> * ) + + vl - Volume of each integer label (<# voxels per label> * \ +) + + vp - Volume of all probabilsitic voxels (sum() * ) + + n - Count of all voxels above 0 (<# voxels>) + + np - Sum of all fuzzy voxels (sum()) + + e - Entropy of all voxels + + ne - Normalized entropy of all voxels + + x - Location (i j k x y z) of the smallest value in the image + + X - Location (i j k x y z) of the largest value in the image + + c - Location (i j k x y z) of the centre of mass of the object + + B - Bounding box of all nonzero voxels [ xmin xsize ymin ysize zmin zsize ] + + xvox - Output the number of voxels in the x direction. Replace x with \ +y/z for other directions. + + xdim - Output the voxel dimention in the x direction. Replace x with \ +y/z for other directions. + + `Source code `_ | + `Documentation `_ + + Examples + -------- + >>> import copy + >>> from nipype.interfaces import niftyseg + >>> unary = niftyseg.UnaryStats() + >>> unary.inputs.in_file = 'im1.nii' + >>> # Test v operation + >>> unary_v = copy.deepcopy(unary) + >>> unary_v.inputs.operation = 'v' + >>> unary_v.cmdline + 'seg_stats im1.nii -v' + >>> unary_v.run() # doctest: +SKIP + >>> # Test vl operation + >>> unary_vl = copy.deepcopy(unary) + >>> unary_vl.inputs.operation = 'vl' + >>> unary_vl.cmdline + 'seg_stats im1.nii -vl' + >>> unary_vl.run() # doctest: +SKIP + >>> # Test x operation + >>> unary_x = copy.deepcopy(unary) + >>> unary_x.inputs.operation = 'x' + >>> unary_x.cmdline + 'seg_stats im1.nii -x' + >>> unary_x.run() # doctest: +SKIP + + """ + input_spec = UnaryStatsInput + + +class BinaryStatsInput(StatsInput): + """Input Spec for seg_stats Binary operations.""" + operation = traits.Enum( + 'p', + 'sa', + 'ss', + 'svp', + 'al', + 'd', + 'ncc', + 'nmi', + 'Vl', + 'Nl', + mandatory=True, + argstr='-%s', + position=4, + desc='operation to perform') + + operand_file = File( + exists=True, + argstr="%s", + mandatory=True, + position=5, + xor=["operand_value"], + desc="second image to perform operation with") + + operand_value = traits.Float( + argstr='%.8f', + mandatory=True, + position=5, + xor=["operand_file"], + desc='value to perform operation with') + + +class BinaryStats(StatsCommand): + """ + Interface for executable seg_stats from NiftySeg platform. + + Interface to use any binary statistical operations that can be performed + + with the seg_stats command-line program. + + See below for those operations:: + + p - - The th percentile of all voxels intensity \ +(float=[0,100]) + + sa - - Average of all voxels + + ss - - Standard deviation of all voxels + + svp - - Volume of all probabilsitic voxels (sum() * \ +) + + al - - Average value in for each label in + + d - - Calculate the Dice score between all classes in \ +and + + ncc - - Normalized cross correlation between and + + nmi - - Normalized Mutual Information between and + + Vl - - Volume of each integer label . Save to file. + + Nl - - Count of each label . Save to file. + + `Source code `_ | + `Documentation `_ + + Examples + -------- + >>> import copy + >>> from nipype.interfaces import niftyseg + >>> binary = niftyseg.BinaryStats() + >>> binary.inputs.in_file = 'im1.nii' + >>> # Test sa operation + >>> binary_sa = copy.deepcopy(binary) + >>> binary_sa.inputs.operation = 'sa' + >>> binary_sa.inputs.operand_value = 2.0 + >>> binary_sa.cmdline + 'seg_stats im1.nii -sa 2.00000000' + >>> binary_sa.run() # doctest: +SKIP + >>> # Test ncc operation + >>> binary_ncc = copy.deepcopy(binary) + >>> binary_ncc.inputs.operation = 'ncc' + >>> binary_ncc.inputs.operand_file = 'im2.nii' + >>> binary_ncc.cmdline + 'seg_stats im1.nii -ncc im2.nii' + >>> binary_ncc.run() # doctest: +SKIP + >>> # Test Nl operation + >>> binary_nl = copy.deepcopy(binary) + >>> binary_nl.inputs.operation = 'Nl' + >>> binary_nl.inputs.operand_file = 'output.csv' + >>> binary_nl.cmdline + 'seg_stats im1.nii -Nl output.csv' + >>> binary_nl.run() # doctest: +SKIP + + """ + input_spec = BinaryStatsInput diff --git a/nipype/interfaces/niftyseg/tests/__init__.py b/nipype/interfaces/niftyseg/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py new file mode 100644 index 0000000000..cc7bc896c6 --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMaths.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import BinaryMaths + + +def test_BinaryMaths_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + operand_file=dict( + argstr='%s', + mandatory=True, + position=5, + xor=['operand_value', 'operand_str'], + ), + operand_str=dict( + argstr='%s', + mandatory=True, + position=5, + xor=['operand_value', 'operand_file'], + ), + operand_value=dict( + argstr='%.8f', + mandatory=True, + position=5, + xor=['operand_file', 'operand_str'], + ), + operation=dict( + argstr='-%s', + mandatory=True, + position=4, + ), + out_file=dict( + argstr='%s', + name_source=['in_file'], + name_template='%s', + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-3, + ), + ) + inputs = BinaryMaths.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BinaryMaths_outputs(): + output_map = dict(out_file=dict(), ) + outputs = BinaryMaths.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py new file mode 100644 index 0000000000..464ed6f6c4 --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryMathsInteger.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import BinaryMathsInteger + + +def test_BinaryMathsInteger_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + operand_value=dict( + argstr='%d', + mandatory=True, + position=5, + ), + operation=dict( + argstr='-%s', + mandatory=True, + position=4, + ), + out_file=dict( + argstr='%s', + name_source=['in_file'], + name_template='%s', + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-3, + ), + ) + inputs = BinaryMathsInteger.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BinaryMathsInteger_outputs(): + output_map = dict(out_file=dict(), ) + outputs = BinaryMathsInteger.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py new file mode 100644 index 0000000000..0771d72f5c --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_BinaryStats.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..stats import BinaryStats + + +def test_BinaryStats_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + larger_voxel=dict( + argstr='-t %f', + position=-3, + ), + mask_file=dict( + argstr='-m %s', + position=-2, + ), + operand_file=dict( + argstr='%s', + mandatory=True, + position=5, + xor=['operand_value'], + ), + operand_value=dict( + argstr='%.8f', + mandatory=True, + position=5, + xor=['operand_file'], + ), + operation=dict( + argstr='-%s', + mandatory=True, + position=4, + ), + ) + inputs = BinaryStats.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BinaryStats_outputs(): + output_map = dict(output=dict(), ) + outputs = BinaryStats.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py new file mode 100644 index 0000000000..dceea837c0 --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_CalcTopNCC.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..label_fusion import CalcTopNCC + + +def test_CalcTopNCC_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-target %s', + mandatory=True, + position=1, + ), + in_templates=dict( + argstr='%s', + mandatory=True, + position=3, + ), + mask_file=dict(argstr='-mask %s', ), + num_templates=dict( + argstr='-templates %s', + mandatory=True, + position=2, + ), + top_templates=dict( + argstr='-n %s', + mandatory=True, + position=4, + ), + ) + inputs = CalcTopNCC.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CalcTopNCC_outputs(): + output_map = dict(out_files=dict(), ) + outputs = CalcTopNCC.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_EM.py b/nipype/interfaces/niftyseg/tests/test_auto_EM.py new file mode 100644 index 0000000000..fc270ed22f --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_EM.py @@ -0,0 +1,85 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..em import EM + + +def test_EM_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bc_order_val=dict( + argstr='-bc_order %s', + usedefault=True, + ), + bc_thresh_val=dict( + argstr='-bc_thresh %s', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + position=4, + ), + mask_file=dict(argstr='-mask %s', ), + max_iter=dict( + argstr='-max_iter %s', + usedefault=True, + ), + min_iter=dict( + argstr='-min_iter %s', + usedefault=True, + ), + mrf_beta_val=dict(argstr='-mrf_beta %s', ), + no_prior=dict( + argstr='-nopriors %s', + mandatory=True, + xor=['prior_4D', 'priors'], + ), + out_bc_file=dict( + argstr='-bc_out %s', + name_source=['in_file'], + name_template='%s_bc_em.nii.gz', + ), + out_file=dict( + argstr='-out %s', + name_source=['in_file'], + name_template='%s_em.nii.gz', + ), + out_outlier_file=dict( + argstr='-out_outlier %s', + name_source=['in_file'], + name_template='%s_outlier_em.nii.gz', + ), + outlier_val=dict(argstr='-outlier %s %s', ), + prior_4D=dict( + argstr='-prior4D %s', + mandatory=True, + xor=['no_prior', 'priors'], + ), + priors=dict( + argstr='%s', + mandatory=True, + xor=['no_prior', 'prior_4D'], + ), + reg_val=dict(argstr='-reg %s', ), + relax_priors=dict(argstr='-rf %s %s', ), + ) + inputs = EM.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EM_outputs(): + output_map = dict( + out_bc_file=dict(), + out_file=dict(), + out_outlier_file=dict(), + ) + outputs = EM.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py new file mode 100644 index 0000000000..7cbfe58c6c --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_FillLesions.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..lesions import FillLesions + + +def test_FillLesions_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bin_mask=dict(argstr='-mask %s', ), + cwf=dict(argstr='-cwf %f', ), + debug=dict(argstr='-debug', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_dilation=dict(argstr='-dil %d', ), + in_file=dict( + argstr='-i %s', + mandatory=True, + position=1, + ), + lesion_mask=dict( + argstr='-l %s', + mandatory=True, + position=2, + ), + match=dict(argstr='-match %f', ), + other=dict(argstr='-other', ), + out_datatype=dict(argstr='-odt %s', ), + out_file=dict( + argstr='-o %s', + name_source=['in_file'], + name_template='%s_lesions_filled.nii.gz', + position=3, + ), + search=dict(argstr='-search %f', ), + size=dict(argstr='-size %d', ), + smooth=dict(argstr='-smo %f', ), + use_2d=dict(argstr='-2D', ), + verbose=dict(argstr='-v', ), + ) + inputs = FillLesions.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FillLesions_outputs(): + output_map = dict(out_file=dict(), ) + outputs = FillLesions.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py new file mode 100644 index 0000000000..eea345e1df --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_LabelFusion.py @@ -0,0 +1,61 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..label_fusion import LabelFusion + + +def test_LabelFusion_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + classifier_type=dict( + argstr='-%s', + mandatory=True, + position=2, + ), + conv=dict(argstr='-conv %f', ), + dilation_roi=dict(), + environ=dict( + nohash=True, + usedefault=True, + ), + file_to_seg=dict(mandatory=True, ), + in_file=dict( + argstr='-in %s', + mandatory=True, + position=1, + ), + kernel_size=dict(), + mask_file=dict(argstr='-mask %s', ), + max_iter=dict(argstr='-max_iter %d', ), + mrf_value=dict(argstr='-MRF_beta %f', ), + out_file=dict( + argstr='-out %s', + name_source=['in_file'], + name_template='%s', + ), + prob_flag=dict(argstr='-outProb', ), + prob_update_flag=dict(argstr='-prop_update', ), + proportion=dict(argstr='-prop %s', ), + set_pq=dict(argstr='-setPQ %f %f', ), + sm_ranking=dict( + argstr='-%s', + position=3, + usedefault=True, + ), + template_file=dict(), + template_num=dict(), + unc=dict(argstr='-unc', ), + unc_thresh=dict(argstr='-uncthres %f', ), + verbose=dict(argstr='-v %s', ), + ) + inputs = LabelFusion.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_LabelFusion_outputs(): + output_map = dict(out_file=dict(), ) + outputs = LabelFusion.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py new file mode 100644 index 0000000000..247dc9773e --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_MathsCommand.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import MathsCommand + + +def test_MathsCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + out_file=dict( + argstr='%s', + name_source=['in_file'], + name_template='%s', + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-3, + ), + ) + inputs = MathsCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MathsCommand_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MathsCommand.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_Merge.py b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py new file mode 100644 index 0000000000..c53e4edf40 --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_Merge.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import Merge + + +def test_Merge_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + dimension=dict(mandatory=True, ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + merge_files=dict( + argstr='%s', + mandatory=True, + position=4, + ), + out_file=dict( + argstr='%s', + name_source=['in_file'], + name_template='%s', + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-3, + ), + ) + inputs = Merge.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Merge_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Merge.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py new file mode 100644 index 0000000000..d840a40b2a --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_NiftySegCommand.py @@ -0,0 +1,18 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import NiftySegCommand + + +def test_NiftySegCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) + inputs = NiftySegCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py new file mode 100644 index 0000000000..bfeded6eef --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_PatchMatch.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..patchmatch import PatchMatch + + +def test_PatchMatch_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + cs_size=dict(argstr='-cs %i', ), + database_file=dict( + argstr='-db %s', + mandatory=True, + position=3, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + position=1, + ), + it_num=dict(argstr='-it %i', ), + mask_file=dict( + argstr='-m %s', + mandatory=True, + position=2, + ), + match_num=dict(argstr='-match %i', ), + out_file=dict( + argstr='-o %s', + name_source=['in_file'], + name_template='%s_pm.nii.gz', + position=4, + ), + patch_size=dict(argstr='-size %i', ), + pm_num=dict(argstr='-pm %i', ), + ) + inputs = PatchMatch.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PatchMatch_outputs(): + output_map = dict(out_file=dict(), ) + outputs = PatchMatch.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py new file mode 100644 index 0000000000..e845e18952 --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_StatsCommand.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..stats import StatsCommand + + +def test_StatsCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + larger_voxel=dict( + argstr='-t %f', + position=-3, + ), + mask_file=dict( + argstr='-m %s', + position=-2, + ), + ) + inputs = StatsCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_StatsCommand_outputs(): + output_map = dict(output=dict(), ) + outputs = StatsCommand.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py new file mode 100644 index 0000000000..7241264eee --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_TupleMaths.py @@ -0,0 +1,69 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import TupleMaths + + +def test_TupleMaths_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + operand_file1=dict( + argstr='%s', + mandatory=True, + position=5, + xor=['operand_value1'], + ), + operand_file2=dict( + argstr='%s', + mandatory=True, + position=6, + xor=['operand_value2'], + ), + operand_value1=dict( + argstr='%.8f', + mandatory=True, + position=5, + xor=['operand_file1'], + ), + operand_value2=dict( + argstr='%.8f', + mandatory=True, + position=6, + xor=['operand_file2'], + ), + operation=dict( + argstr='-%s', + mandatory=True, + position=4, + ), + out_file=dict( + argstr='%s', + name_source=['in_file'], + name_template='%s', + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-3, + ), + ) + inputs = TupleMaths.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TupleMaths_outputs(): + output_map = dict(out_file=dict(), ) + outputs = TupleMaths.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py new file mode 100644 index 0000000000..9b1cd7d194 --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryMaths.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maths import UnaryMaths + + +def test_UnaryMaths_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + operation=dict( + argstr='-%s', + mandatory=True, + position=4, + ), + out_file=dict( + argstr='%s', + name_source=['in_file'], + name_template='%s', + position=-2, + ), + output_datatype=dict( + argstr='-odt %s', + position=-3, + ), + ) + inputs = UnaryMaths.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_UnaryMaths_outputs(): + output_map = dict(out_file=dict(), ) + outputs = UnaryMaths.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py new file mode 100644 index 0000000000..34edf1a190 --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_auto_UnaryStats.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..stats import UnaryStats + + +def test_UnaryStats_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + larger_voxel=dict( + argstr='-t %f', + position=-3, + ), + mask_file=dict( + argstr='-m %s', + position=-2, + ), + operation=dict( + argstr='-%s', + mandatory=True, + position=4, + ), + ) + inputs = UnaryStats.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_UnaryStats_outputs(): + output_map = dict(output=dict(), ) + outputs = UnaryStats.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/niftyseg/tests/test_em_interfaces.py b/nipype/interfaces/niftyseg/tests/test_em_interfaces.py new file mode 100644 index 0000000000..f4c56da2fe --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_em_interfaces.py @@ -0,0 +1,43 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import pytest + +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from .. import EM + + +@pytest.mark.skipif( + no_nifty_tool(cmd='seg_EM'), reason="niftyseg is not installed") +def test_seg_em(): + + # Create a node object + seg_em = EM() + + # Check if the command is properly defined + cmd = get_custom_path('seg_EM', env_dir='NIFTYSEGDIR') + assert seg_em.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + seg_em.run() + + # Assign some input data + in_file = example_data('im1.nii') + seg_em.inputs.in_file = in_file + seg_em.inputs.no_prior = 4 + + cmd_tmp = '{cmd} -in {in_file} -nopriors 4 -bc_out {bc_out} -out \ +{out_file} -out_outlier {out_outlier}' + + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + out_file='im1_em.nii.gz', + bc_out='im1_bc_em.nii.gz', + out_outlier='im1_outlier_em.nii.gz', + ) + + assert seg_em.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyseg/tests/test_extra_PatchMatch.py b/nipype/interfaces/niftyseg/tests/test_extra_PatchMatch.py new file mode 100644 index 0000000000..9fe82ac544 --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_extra_PatchMatch.py @@ -0,0 +1,44 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import pytest + +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from .. import PatchMatch + + +@pytest.mark.skipif( + no_nifty_tool(cmd='seg_PatchMatch'), reason="niftyseg is not installed") +def test_seg_patchmatch(): + + # Create a node object + seg_patchmatch = PatchMatch() + + # Check if the command is properly defined + cmd = get_custom_path('seg_PatchMatch', env_dir='NIFTYSEGDIR') + assert seg_patchmatch.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + seg_patchmatch.run() + + # Assign some input data + in_file = example_data('im1.nii') + mask_file = example_data('im2.nii') + db_file = example_data('db.xml') + seg_patchmatch.inputs.in_file = in_file + seg_patchmatch.inputs.mask_file = mask_file + seg_patchmatch.inputs.database_file = db_file + + cmd_tmp = '{cmd} -i {in_file} -m {mask_file} -db {db} -o {out_file}' + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + mask_file=mask_file, + db=db_file, + out_file='im1_pm.nii.gz', + ) + + assert seg_patchmatch.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyseg/tests/test_label_fusion.py b/nipype/interfaces/niftyseg/tests/test_label_fusion.py new file mode 100644 index 0000000000..fdc186d6c2 --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_label_fusion.py @@ -0,0 +1,131 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import pytest + +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from .. import LabelFusion, CalcTopNCC + + +@pytest.mark.skipif( + no_nifty_tool(cmd='seg_LabFusion'), reason="niftyseg is not installed") +def test_seg_lab_fusion(): + """ Test interfaces for seg_labfusion""" + # Create a node object + steps = LabelFusion() + + # Check if the command is properly defined + cmd = get_custom_path('seg_LabFusion', env_dir='NIFTYSEGDIR') + assert steps.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + steps.run() + + # Assign some input data + in_file = example_data('im1.nii') + file_to_seg = example_data('im2.nii') + template_file = example_data('im3.nii') + steps.inputs.in_file = in_file + steps.inputs.kernel_size = 2.0 + steps.inputs.file_to_seg = file_to_seg + steps.inputs.template_file = template_file + steps.inputs.template_num = 2 + steps.inputs.classifier_type = 'STEPS' + + cmd_tmp = '{cmd} -in {in_file} -STEPS 2.000000 2 {file_to_seg} \ +{template_file} -out {out_file}' + + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + file_to_seg=file_to_seg, + template_file=template_file, + out_file='im1_steps.nii', + ) + + assert steps.cmdline == expected_cmd + + # Staple + staple = LabelFusion( + kernel_size=2.0, template_num=2, classifier_type='STAPLE') + in_file = example_data('im1.nii') + file_to_seg = example_data('im2.nii') + template_file = example_data('im3.nii') + staple.inputs.in_file = in_file + staple.inputs.file_to_seg = file_to_seg + staple.inputs.template_file = template_file + + cmd_tmp = '{cmd} -in {in_file} -STAPLE -ALL -out {out_file}' + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + file_to_seg=file_to_seg, + template_file=template_file, + out_file='im1_staple.nii', + ) + + assert staple.cmdline == expected_cmd + + # Assign some input data + mv_node = LabelFusion( + template_num=2, + classifier_type='MV', + sm_ranking='ROINCC', + dilation_roi=2) + in_file = example_data('im1.nii') + file_to_seg = example_data('im2.nii') + template_file = example_data('im3.nii') + mv_node.inputs.in_file = in_file + mv_node.inputs.file_to_seg = file_to_seg + mv_node.inputs.template_file = template_file + + cmd_tmp = '{cmd} -in {in_file} -MV -ROINCC 2 2 {file_to_seg} \ +{template_file} -out {out_file}' + + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + file_to_seg=file_to_seg, + template_file=template_file, + out_file='im1_mv.nii', + ) + + assert mv_node.cmdline == expected_cmd + + +@pytest.mark.skipif( + no_nifty_tool(cmd='seg_CalcTopNCC'), reason="niftyseg is not installed") +def test_seg_calctopncc(): + """ Test interfaces for seg_CalctoNCC""" + # Create a node object + calctopncc = CalcTopNCC() + + # Check if the command is properly defined + cmd = get_custom_path('seg_CalcTopNCC', env_dir='NIFTYSEGDIR') + assert calctopncc.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + calctopncc.run() + + # Assign some input data + in_file = example_data('im1.nii') + file1 = example_data('im2.nii') + file2 = example_data('im3.nii') + calctopncc.inputs.in_file = in_file + calctopncc.inputs.num_templates = 2 + calctopncc.inputs.in_templates = [file1, file2] + calctopncc.inputs.top_templates = 1 + + cmd_tmp = '{cmd} -target {in_file} -templates 2 {file1} {file2} -n 1' + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + file1=file1, + file2=file2, + ) + + assert calctopncc.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyseg/tests/test_lesions.py b/nipype/interfaces/niftyseg/tests/test_lesions.py new file mode 100644 index 0000000000..24b7e82cd7 --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_lesions.py @@ -0,0 +1,40 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import pytest + +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from .. import FillLesions + + +@pytest.mark.skipif( + no_nifty_tool(cmd='seg_FillLesions'), reason="niftyseg is not installed") +def test_seg_filllesions(): + + # Create a node object + seg_fill = FillLesions() + + # Check if the command is properly defined + cmd = get_custom_path('seg_FillLesions', env_dir='NIFTYSEGDIR') + assert seg_fill.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + seg_fill.run() + + # Assign some input data + in_file = example_data('im1.nii') + lesion_mask = example_data('im2.nii') + seg_fill.inputs.in_file = in_file + seg_fill.inputs.lesion_mask = lesion_mask + + expected_cmd = '{cmd} -i {in_file} -l {lesion_mask} -o {out_file}'.format( + cmd=cmd, + in_file=in_file, + lesion_mask=lesion_mask, + out_file='im1_lesions_filled.nii.gz', + ) + + assert seg_fill.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyseg/tests/test_maths.py b/nipype/interfaces/niftyseg/tests/test_maths.py new file mode 100644 index 0000000000..0680a8a481 --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_maths.py @@ -0,0 +1,159 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import pytest + +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from .. import (UnaryMaths, BinaryMaths, BinaryMathsInteger, TupleMaths, Merge) + + +@pytest.mark.skipif( + no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") +def test_unary_maths(): + + # Create a node object + unarym = UnaryMaths() + + # Check if the command is properly defined + cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR') + assert unarym.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + unarym.run() + + # Assign some input data + in_file = example_data('im1.nii') + unarym.inputs.in_file = in_file + unarym.inputs.operation = 'otsu' + unarym.inputs.output_datatype = 'float' + + expected_cmd = '{cmd} {in_file} -otsu -odt float {out_file}'.format( + cmd=cmd, in_file=in_file, out_file='im1_otsu.nii') + + assert unarym.cmdline == expected_cmd + + +@pytest.mark.skipif( + no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") +def test_binary_maths(): + + # Create a node object + binarym = BinaryMaths() + + # Check if the command is properly defined + cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR') + assert binarym.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + binarym.run() + + # Assign some input data + in_file = example_data('im1.nii') + binarym.inputs.in_file = in_file + binarym.inputs.operand_value = 2.0 + binarym.inputs.operation = 'sub' + binarym.inputs.output_datatype = 'float' + + cmd_tmp = '{cmd} {in_file} -sub 2.00000000 -odt float {out_file}' + expected_cmd = cmd_tmp.format( + cmd=cmd, in_file=in_file, out_file='im1_sub.nii') + + assert binarym.cmdline == expected_cmd + + +@pytest.mark.skipif( + no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") +def test_int_binary_maths(): + + # Create a node object + ibinarym = BinaryMathsInteger() + + # Check if the command is properly defined + cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR') + assert ibinarym.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + ibinarym.run() + + # Assign some input data + in_file = example_data('im1.nii') + ibinarym.inputs.in_file = in_file + ibinarym.inputs.operand_value = 2 + ibinarym.inputs.operation = 'dil' + ibinarym.inputs.output_datatype = 'float' + + expected_cmd = '{cmd} {in_file} -dil 2 -odt float {out_file}'.format( + cmd=cmd, in_file=in_file, out_file='im1_dil.nii') + + assert ibinarym.cmdline == expected_cmd + + +@pytest.mark.skipif( + no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") +def test_tuple_maths(): + + # Create a node object + tuplem = TupleMaths() + + # Check if the command is properly defined + cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR') + assert tuplem.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + tuplem.run() + + # Assign some input data + in_file = example_data('im1.nii') + op_file = example_data('im2.nii') + tuplem.inputs.in_file = in_file + tuplem.inputs.operation = 'lncc' + tuplem.inputs.operand_file1 = op_file + tuplem.inputs.operand_value2 = 2.0 + tuplem.inputs.output_datatype = 'float' + + cmd_tmp = '{cmd} {in_file} -lncc {op} 2.00000000 -odt float {out_file}' + expected_cmd = cmd_tmp.format( + cmd=cmd, in_file=in_file, op=op_file, out_file='im1_lncc.nii') + + assert tuplem.cmdline == expected_cmd + + +@pytest.mark.skipif( + no_nifty_tool(cmd='seg_maths'), reason="niftyseg is not installed") +def test_merge(): + + # Create a node object + merge = Merge() + + # Check if the command is properly defined + cmd = get_custom_path('seg_maths', env_dir='NIFTYSEGDIR') + assert merge.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + merge.run() + + # Assign some input data + in_file = example_data('im1.nii') + file1 = example_data('im2.nii') + file2 = example_data('im3.nii') + merge.inputs.in_file = in_file + merge.inputs.merge_files = [file1, file2] + merge.inputs.dimension = 2 + merge.inputs.output_datatype = 'float' + + cmd_tmp = '{cmd} {in_file} -merge 2 2 {f1} {f2} -odt float {out_file}' + expected_cmd = cmd_tmp.format( + cmd=cmd, + in_file=in_file, + f1=file1, + f2=file2, + out_file='im1_merged.nii') + + assert merge.cmdline == expected_cmd diff --git a/nipype/interfaces/niftyseg/tests/test_stats.py b/nipype/interfaces/niftyseg/tests/test_stats.py new file mode 100644 index 0000000000..91c234e98d --- /dev/null +++ b/nipype/interfaces/niftyseg/tests/test_stats.py @@ -0,0 +1,61 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import pytest + +from ....testing import example_data +from ...niftyreg import get_custom_path +from ...niftyreg.tests.test_regutils import no_nifty_tool +from .. import UnaryStats, BinaryStats + + +@pytest.mark.skipif( + no_nifty_tool(cmd='seg_stats'), reason="niftyseg is not installed") +def test_unary_stats(): + """ Test for the seg_stats interfaces """ + # Create a node object + unarys = UnaryStats() + + # Check if the command is properly defined + cmd = get_custom_path('seg_stats', env_dir='NIFTYSEGDIR') + assert unarys.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + unarys.run() + + # Assign some input data + in_file = example_data('im1.nii') + unarys.inputs.in_file = in_file + unarys.inputs.operation = 'a' + + expected_cmd = '{cmd} {in_file} -a'.format(cmd=cmd, in_file=in_file) + + assert unarys.cmdline == expected_cmd + + +@pytest.mark.skipif( + no_nifty_tool(cmd='seg_stats'), reason="niftyseg is not installed") +def test_binary_stats(): + """ Test for the seg_stats interfaces """ + # Create a node object + binarys = BinaryStats() + + # Check if the command is properly defined + cmd = get_custom_path('seg_stats', env_dir='NIFTYSEGDIR') + assert binarys.cmd == cmd + + # test raising error with mandatory args absent + with pytest.raises(ValueError): + binarys.run() + + # Assign some input data + in_file = example_data('im1.nii') + binarys.inputs.in_file = in_file + binarys.inputs.operand_value = 2 + binarys.inputs.operation = 'sa' + + expected_cmd = '{cmd} {in_file} -sa 2.00000000'.format( + cmd=cmd, in_file=in_file) + + assert binarys.cmdline == expected_cmd diff --git a/nipype/interfaces/nilearn.py b/nipype/interfaces/nilearn.py new file mode 100644 index 0000000000..699b7dc8d3 --- /dev/null +++ b/nipype/interfaces/nilearn.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +''' +Algorithms to compute statistics on :abbr:`fMRI (functional MRI)` +''' +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os + +import numpy as np +import nibabel as nb + +from ..interfaces.base import (traits, TraitedSpec, LibraryBaseInterface, + SimpleInterface, BaseInterfaceInputSpec, File, + InputMultiPath) + + +class NilearnBaseInterface(LibraryBaseInterface): + _pkg = 'nilearn' + + +class SignalExtractionInputSpec(BaseInterfaceInputSpec): + in_file = File(exists=True, mandatory=True, desc='4-D fMRI nii file') + label_files = InputMultiPath( + File(exists=True), + mandatory=True, + desc='a 3-D label image, with 0 denoting ' + 'background, or a list of 3-D probability ' + 'maps (one per label) or the equivalent 4D ' + 'file.') + class_labels = traits.List( + mandatory=True, + desc='Human-readable labels for each segment ' + 'in the label file, in order. The length of ' + 'class_labels must be equal to the number of ' + 'segments (background excluded). This list ' + 'corresponds to the class labels in label_file ' + 'in ascending order') + out_file = File( + 'signals.tsv', + usedefault=True, + exists=False, + desc='The name of the file to output to. ' + 'signals.tsv by default') + incl_shared_variance = traits.Bool( + True, + usedefault=True, + desc='By default ' + '(True), returns simple time series calculated from each ' + 'region independently (e.g., for noise regression). If ' + 'False, returns unique signals for each region, discarding ' + 'shared variance (e.g., for connectivity. Only has effect ' + 'with 4D probability maps.') + include_global = traits.Bool( + False, + usedefault=True, + desc='If True, include an extra column ' + 'labeled "GlobalSignal", with values calculated from the entire brain ' + '(instead of just regions).') + detrend = traits.Bool( + False, + usedefault=True, + desc='If True, perform detrending using nilearn.') + + +class SignalExtractionOutputSpec(TraitedSpec): + out_file = File( + exists=True, + desc='tsv file containing the computed ' + 'signals, with as many columns as there are labels and as ' + 'many rows as there are timepoints in in_file, plus a ' + 'header row with values from class_labels') + + +class SignalExtraction(NilearnBaseInterface, SimpleInterface): + ''' + Extracts signals over tissue classes or brain regions + + >>> seinterface = SignalExtraction() + >>> seinterface.inputs.in_file = 'functional.nii' + >>> seinterface.inputs.label_files = 'segmentation0.nii.gz' + >>> seinterface.inputs.out_file = 'means.tsv' + >>> segments = ['CSF', 'GrayMatter', 'WhiteMatter'] + >>> seinterface.inputs.class_labels = segments + >>> seinterface.inputs.detrend = True + >>> seinterface.inputs.include_global = True + ''' + input_spec = SignalExtractionInputSpec + output_spec = SignalExtractionOutputSpec + + def _run_interface(self, runtime): + maskers = self._process_inputs() + + signals = [] + for masker in maskers: + signals.append(masker.fit_transform(self.inputs.in_file)) + region_signals = np.hstack(signals) + + output = np.vstack((self.inputs.class_labels, + region_signals.astype(str))) + + # save output + self._results['out_file'] = os.path.join(runtime.cwd, + self.inputs.out_file) + np.savetxt( + self._results['out_file'], output, fmt=b'%s', delimiter='\t') + return runtime + + def _process_inputs(self): + ''' validate and process inputs into useful form. + Returns a list of nilearn maskers and the list of corresponding label + names.''' + import nilearn.input_data as nl + import nilearn.image as nli + + label_data = nli.concat_imgs(self.inputs.label_files) + maskers = [] + + # determine form of label files, choose appropriate nilearn masker + if np.amax(label_data.get_data()) > 1: # 3d label file + n_labels = np.amax(label_data.get_data()) + maskers.append(nl.NiftiLabelsMasker(label_data)) + else: # 4d labels + n_labels = label_data.get_data().shape[3] + if self.inputs.incl_shared_variance: # independent computation + for img in nli.iter_img(label_data): + maskers.append( + nl.NiftiMapsMasker( + self._4d(img.get_data(), img.affine))) + else: # one computation fitting all + maskers.append(nl.NiftiMapsMasker(label_data)) + + # check label list size + if not np.isclose(int(n_labels), n_labels): + raise ValueError( + 'The label files {} contain invalid value {}. Check input.' + .format(self.inputs.label_files, n_labels)) + + if len(self.inputs.class_labels) != n_labels: + raise ValueError('The length of class_labels {} does not ' + 'match the number of regions {} found in ' + 'label_files {}'.format(self.inputs.class_labels, + n_labels, + self.inputs.label_files)) + + if self.inputs.include_global: + global_label_data = label_data.get_data().sum( + axis=3) # sum across all regions + global_label_data = np.rint(global_label_data).astype(int).clip( + 0, 1) # binarize + global_label_data = self._4d(global_label_data, label_data.affine) + global_masker = nl.NiftiLabelsMasker( + global_label_data, detrend=self.inputs.detrend) + maskers.insert(0, global_masker) + self.inputs.class_labels.insert(0, 'GlobalSignal') + + for masker in maskers: + masker.set_params(detrend=self.inputs.detrend) + + return maskers + + def _4d(self, array, affine): + ''' takes a 3-dimensional numpy array and an affine, + returns the equivalent 4th dimensional nifti file ''' + return nb.Nifti1Image(array[:, :, :, np.newaxis], affine) diff --git a/nipype/interfaces/nipy/__init__.py b/nipype/interfaces/nipy/__init__.py new file mode 100644 index 0000000000..19d030b61a --- /dev/null +++ b/nipype/interfaces/nipy/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +from .model import FitGLM, EstimateContrast +from .preprocess import ComputeMask, SpaceTimeRealigner +from .utils import Similarity diff --git a/nipype/interfaces/nipy/base.py b/nipype/interfaces/nipy/base.py new file mode 100644 index 0000000000..077499c52a --- /dev/null +++ b/nipype/interfaces/nipy/base.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" Base interface for nipy """ + +from ..base import LibraryBaseInterface +from ...utils.misc import package_check + +# Originally set in model, preprocess and utils +# Set here to be imported, in case anybody depends on its presence +# Remove in 2.0 +have_nipy = True +try: + package_check('nipy') +except ImportError: + have_nipy = False + + +class NipyBaseInterface(LibraryBaseInterface): + _pkg = 'nipy' diff --git a/nipype/interfaces/nipy/model.py b/nipype/interfaces/nipy/model.py new file mode 100644 index 0000000000..8746728990 --- /dev/null +++ b/nipype/interfaces/nipy/model.py @@ -0,0 +1,343 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, str, bytes + +import os + +import nibabel as nb +import numpy as np + +from ...utils import NUMPY_MMAP + +from .base import NipyBaseInterface, have_nipy +from ..base import (TraitedSpec, traits, File, OutputMultiPath, + BaseInterfaceInputSpec, isdefined) + + +class FitGLMInputSpec(BaseInterfaceInputSpec): + session_info = traits.List( + minlen=1, + maxlen=1, + mandatory=True, + desc=('Session specific information generated by' + ' ``modelgen.SpecifyModel``, FitGLM does ' + 'not support multiple runs uless they are ' + 'concatenated (see SpecifyModel options)')) + hrf_model = traits.Enum( + 'Canonical', + 'Canonical With Derivative', + 'FIR', + desc=("that specifies the hemodynamic reponse " + "function it can be 'Canonical', 'Canonical " + "With Derivative' or 'FIR'"), + usedefault=True) + drift_model = traits.Enum( + "Cosine", + "Polynomial", + "Blank", + desc=("string that specifies the desired drift " + "model, to be chosen among 'Polynomial', " + "'Cosine', 'Blank'"), + usedefault=True) + TR = traits.Float(mandatory=True) + model = traits.Enum( + "ar1", + "spherical", + desc=("autoregressive mode is available only for the " + "kalman method"), + usedefault=True) + method = traits.Enum( + "kalman", + "ols", + desc=("method to fit the model, ols or kalma; kalman " + "is more time consuming but it supports " + "autoregressive model"), + usedefault=True) + mask = traits.File( + exists=True, + desc=("restrict the fitting only to the region defined " + "by this mask")) + normalize_design_matrix = traits.Bool( + False, + desc=("normalize (zscore) the " + "regressors before fitting"), + usedefault=True) + save_residuals = traits.Bool(False, usedefault=True) + plot_design_matrix = traits.Bool(False, usedefault=True) + + +class FitGLMOutputSpec(TraitedSpec): + beta = File(exists=True) + nvbeta = traits.Any() + s2 = File(exists=True) + dof = traits.Any() + constants = traits.Any() + axis = traits.Any() + reg_names = traits.List() + residuals = traits.File() + a = File(exists=True) + + +class FitGLM(NipyBaseInterface): + ''' + Fit GLM model based on the specified design. Supports only single or concatenated runs. + ''' + input_spec = FitGLMInputSpec + output_spec = FitGLMOutputSpec + + def _run_interface(self, runtime): + import nipy.modalities.fmri.glm as GLM + import nipy.modalities.fmri.design_matrix as dm + try: + BlockParadigm = dm.BlockParadigm + except AttributeError: + from nipy.modalities.fmri.experimental_paradigm import BlockParadigm + + session_info = self.inputs.session_info + + functional_runs = self.inputs.session_info[0]['scans'] + if isinstance(functional_runs, (str, bytes)): + functional_runs = [functional_runs] + nii = nb.load(functional_runs[0]) + data = nii.get_data() + + if isdefined(self.inputs.mask): + mask = nb.load(self.inputs.mask).get_data() > 0 + else: + mask = np.ones(nii.shape[:3]) == 1 + + timeseries = data.copy()[mask, :] + del data + + for functional_run in functional_runs[1:]: + nii = nb.load(functional_run, mmap=NUMPY_MMAP) + data = nii.get_data() + npdata = data.copy() + del data + timeseries = np.concatenate((timeseries, npdata[mask, :]), axis=1) + del npdata + + nscans = timeseries.shape[1] + + if 'hpf' in list(session_info[0].keys()): + hpf = session_info[0]['hpf'] + drift_model = self.inputs.drift_model + else: + hpf = 0 + drift_model = "Blank" + + reg_names = [] + for reg in session_info[0]['regress']: + reg_names.append(reg['name']) + + reg_vals = np.zeros((nscans, len(reg_names))) + for i in range(len(reg_names)): + reg_vals[:, i] = np.array( + session_info[0]['regress'][i]['val']).reshape(1, -1) + + frametimes = np.linspace(0, (nscans - 1) * self.inputs.TR, nscans) + + conditions = [] + onsets = [] + duration = [] + + for i, cond in enumerate(session_info[0]['cond']): + onsets += cond['onset'] + conditions += [cond['name']] * len(cond['onset']) + if len(cond['duration']) == 1: + duration += cond['duration'] * len(cond['onset']) + else: + duration += cond['duration'] + + if conditions: + paradigm = BlockParadigm( + con_id=conditions, onset=onsets, duration=duration) + else: + paradigm = None + design_matrix, self._reg_names = dm.dmtx_light( + frametimes, + paradigm, + drift_model=drift_model, + hfcut=hpf, + hrf_model=self.inputs.hrf_model, + add_regs=reg_vals, + add_reg_names=reg_names) + if self.inputs.normalize_design_matrix: + for i in range(len(self._reg_names) - 1): + design_matrix[:, i] = (( + design_matrix[:, i] - design_matrix[:, i].mean()) / + design_matrix[:, i].std()) + + if self.inputs.plot_design_matrix: + import pylab + pylab.pcolor(design_matrix) + pylab.savefig("design_matrix.pdf") + pylab.close() + pylab.clf() + + glm = GLM.GeneralLinearModel() + glm.fit( + timeseries.T, + design_matrix, + method=self.inputs.method, + model=self.inputs.model) + + self._beta_file = os.path.abspath("beta.nii") + beta = np.zeros(mask.shape + (glm.beta.shape[0], )) + beta[mask, :] = glm.beta.T + nb.save(nb.Nifti1Image(beta, nii.affine), self._beta_file) + + self._s2_file = os.path.abspath("s2.nii") + s2 = np.zeros(mask.shape) + s2[mask] = glm.s2 + nb.save(nb.Nifti1Image(s2, nii.affine), self._s2_file) + + if self.inputs.save_residuals: + explained = np.dot(design_matrix, glm.beta) + residuals = np.zeros(mask.shape + (nscans, )) + residuals[mask, :] = timeseries - explained.T + self._residuals_file = os.path.abspath("residuals.nii") + nb.save( + nb.Nifti1Image(residuals, nii.affine), self._residuals_file) + + self._nvbeta = glm.nvbeta + self._dof = glm.dof + self._constants = glm._constants + self._axis = glm._axis + if self.inputs.model == "ar1": + self._a_file = os.path.abspath("a.nii") + a = np.zeros(mask.shape) + a[mask] = glm.a.squeeze() + nb.save(nb.Nifti1Image(a, nii.affine), self._a_file) + self._model = glm.model + self._method = glm.method + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["beta"] = self._beta_file + outputs["nvbeta"] = self._nvbeta + outputs["s2"] = self._s2_file + outputs["dof"] = self._dof + outputs["constants"] = self._constants + outputs["axis"] = self._axis + outputs["reg_names"] = self._reg_names + if self.inputs.model == "ar1": + outputs["a"] = self._a_file + if self.inputs.save_residuals: + outputs["residuals"] = self._residuals_file + return outputs + + +class EstimateContrastInputSpec(BaseInterfaceInputSpec): + contrasts = traits.List( + traits.Either( + traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), + traits.List(traits.Float)), + traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), + traits.List(traits.Float), traits.List(traits.Float)), + traits.Tuple(traits.Str, traits.Enum('F'), + traits.List( + traits.Either( + traits.Tuple(traits.Str, traits.Enum('T'), + traits.List(traits.Str), + traits.List(traits.Float)), + traits.Tuple(traits.Str, traits.Enum('T'), + traits.List(traits.Str), + traits.List(traits.Float), + traits.List(traits.Float)))))), + desc="""List of contrasts with each contrast being a list of the form: + [('name', 'stat', [condition list], [weight list], [session list])]. if + session list is None or not provided, all sessions are used. For F + contrasts, the condition list should contain previously defined + T-contrasts.""", + mandatory=True) + beta = File( + exists=True, + desc="beta coefficients of the fitted model", + mandatory=True) + nvbeta = traits.Any(mandatory=True) + s2 = File( + exists=True, desc="squared variance of the residuals", mandatory=True) + dof = traits.Any(desc="degrees of freedom", mandatory=True) + constants = traits.Any(mandatory=True) + axis = traits.Any(mandatory=True) + reg_names = traits.List(mandatory=True) + mask = traits.File(exists=True) + + +class EstimateContrastOutputSpec(TraitedSpec): + stat_maps = OutputMultiPath(File(exists=True)) + z_maps = OutputMultiPath(File(exists=True)) + p_maps = OutputMultiPath(File(exists=True)) + + +class EstimateContrast(NipyBaseInterface): + ''' + Estimate contrast of a fitted model. + ''' + input_spec = EstimateContrastInputSpec + output_spec = EstimateContrastOutputSpec + + def _run_interface(self, runtime): + import nipy.modalities.fmri.glm as GLM + + beta_nii = nb.load(self.inputs.beta) + if isdefined(self.inputs.mask): + mask = nb.load(self.inputs.mask).get_data() > 0 + else: + mask = np.ones(beta_nii.shape[:3]) == 1 + + glm = GLM.GeneralLinearModel() + nii = nb.load(self.inputs.beta) + glm.beta = beta_nii.get_data().copy()[mask, :].T + glm.nvbeta = self.inputs.nvbeta + glm.s2 = nb.load(self.inputs.s2).get_data().copy()[mask] + glm.dof = self.inputs.dof + glm._axis = self.inputs.axis + glm._constants = self.inputs.constants + + reg_names = self.inputs.reg_names + + self._stat_maps = [] + self._p_maps = [] + self._z_maps = [] + for contrast_def in self.inputs.contrasts: + name = contrast_def[0] + contrast = np.zeros(len(reg_names)) + + for i, reg_name in enumerate(reg_names): + if reg_name in contrast_def[2]: + idx = contrast_def[2].index(reg_name) + contrast[i] = contrast_def[3][idx] + + est_contrast = glm.contrast(contrast) + + stat_map = np.zeros(mask.shape) + stat_map[mask] = est_contrast.stat().T + stat_map_file = os.path.abspath(name + "_stat_map.nii") + nb.save(nb.Nifti1Image(stat_map, nii.affine), stat_map_file) + self._stat_maps.append(stat_map_file) + + p_map = np.zeros(mask.shape) + p_map[mask] = est_contrast.pvalue().T + p_map_file = os.path.abspath(name + "_p_map.nii") + nb.save(nb.Nifti1Image(p_map, nii.affine), p_map_file) + self._p_maps.append(p_map_file) + + z_map = np.zeros(mask.shape) + z_map[mask] = est_contrast.zscore().T + z_map_file = os.path.abspath(name + "_z_map.nii") + nb.save(nb.Nifti1Image(z_map, nii.affine), z_map_file) + self._z_maps.append(z_map_file) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["stat_maps"] = self._stat_maps + outputs["p_maps"] = self._p_maps + outputs["z_maps"] = self._z_maps + return outputs diff --git a/nipype/interfaces/nipy/preprocess.py b/nipype/interfaces/nipy/preprocess.py new file mode 100644 index 0000000000..252463e3f7 --- /dev/null +++ b/nipype/interfaces/nipy/preprocess.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +import os + +import nibabel as nb +import numpy as np + +from ...utils import NUMPY_MMAP +from ...utils.filemanip import split_filename, fname_presuffix + +from .base import NipyBaseInterface, have_nipy +from ..base import (TraitedSpec, traits, BaseInterfaceInputSpec, + isdefined, File, InputMultiPath, OutputMultiPath) + + +class ComputeMaskInputSpec(BaseInterfaceInputSpec): + mean_volume = File( + exists=True, + mandatory=True, + desc="mean EPI image, used to compute the threshold for the mask") + reference_volume = File( + exists=True, + desc=("reference volume used to compute the mask. " + "If none is give, the mean volume is used.")) + m = traits.Float(desc="lower fraction of the histogram to be discarded") + M = traits.Float(desc="upper fraction of the histogram to be discarded") + cc = traits.Bool(desc="Keep only the largest connected component") + + +class ComputeMaskOutputSpec(TraitedSpec): + brain_mask = File(exists=True) + + +class ComputeMask(NipyBaseInterface): + input_spec = ComputeMaskInputSpec + output_spec = ComputeMaskOutputSpec + + def _run_interface(self, runtime): + from nipy.labs.mask import compute_mask + args = {} + for key in [ + k for k, _ in list(self.inputs.items()) + if k not in BaseInterfaceInputSpec().trait_names() + ]: + value = getattr(self.inputs, key) + if isdefined(value): + if key in ['mean_volume', 'reference_volume']: + nii = nb.load(value, mmap=NUMPY_MMAP) + value = nii.get_data() + args[key] = value + + brain_mask = compute_mask(**args) + _, name, ext = split_filename(self.inputs.mean_volume) + self._brain_mask_path = os.path.abspath("%s_mask.%s" % (name, ext)) + nb.save( + nb.Nifti1Image(brain_mask.astype(np.uint8), nii.affine), + self._brain_mask_path) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["brain_mask"] = self._brain_mask_path + return outputs + + +class SpaceTimeRealignerInputSpec(BaseInterfaceInputSpec): + in_file = InputMultiPath( + File(exists=True), + mandatory=True, + min_ver='0.4.0.dev', + desc="File to realign") + tr = traits.Float(desc="TR in seconds", requires=['slice_times']) + slice_times = traits.Either( + traits.List(traits.Float()), + traits.Enum('asc_alt_2', 'asc_alt_2_1', 'asc_alt_half', + 'asc_alt_siemens', 'ascending', 'desc_alt_2', + 'desc_alt_half', 'descending'), + desc=('Actual slice acquisition times.')) + slice_info = traits.Either( + traits.Int, + traits.List(min_len=2, max_len=2), + desc=('Single integer or length 2 sequence ' + 'If int, the axis in `images` that is the ' + 'slice axis. In a 4D image, this will ' + 'often be axis = 2. If a 2 sequence, then' + ' elements are ``(slice_axis, ' + 'slice_direction)``, where ``slice_axis`` ' + 'is the slice axis in the image as above, ' + 'and ``slice_direction`` is 1 if the ' + 'slices were acquired slice 0 first, slice' + ' -1 last, or -1 if acquired slice -1 ' + 'first, slice 0 last. If `slice_info` is ' + 'an int, assume ' + '``slice_direction`` == 1.'), + requires=['slice_times'], + ) + + +class SpaceTimeRealignerOutputSpec(TraitedSpec): + out_file = OutputMultiPath(File(exists=True), desc="Realigned files") + par_file = OutputMultiPath( + File(exists=True), + desc=("Motion parameter files. Angles are not " + "euler angles")) + + +class SpaceTimeRealigner(NipyBaseInterface): + """Simultaneous motion and slice timing correction algorithm + + If slice_times is not specified, this algorithm performs spatial motion + correction + + This interface wraps nipy's SpaceTimeRealign algorithm [Roche2011]_ or simply the + SpatialRealign algorithm when timing info is not provided. + + Examples + -------- + >>> from nipype.interfaces.nipy import SpaceTimeRealigner + >>> #Run spatial realignment only + >>> realigner = SpaceTimeRealigner() + >>> realigner.inputs.in_file = ['functional.nii'] + >>> res = realigner.run() # doctest: +SKIP + + >>> realigner = SpaceTimeRealigner() + >>> realigner.inputs.in_file = ['functional.nii'] + >>> realigner.inputs.tr = 2 + >>> realigner.inputs.slice_times = list(range(0, 3, 67)) + >>> realigner.inputs.slice_info = 2 + >>> res = realigner.run() # doctest: +SKIP + + + References + ---------- + .. [Roche2011] Roche A. A four-dimensional registration algorithm with \ + application to joint correction of motion and slice timing \ + in fMRI. IEEE Trans Med Imaging. 2011 Aug;30(8):1546-54. DOI_. + + .. _DOI: http://dx.doi.org/10.1109/TMI.2011.2131152 + + """ + + input_spec = SpaceTimeRealignerInputSpec + output_spec = SpaceTimeRealignerOutputSpec + keywords = ['slice timing', 'motion correction'] + + def _run_interface(self, runtime): + from nipy import save_image, load_image + all_ims = [load_image(fname) for fname in self.inputs.in_file] + + if not isdefined(self.inputs.slice_times): + from nipy.algorithms.registration.groupwise_registration import \ + SpaceRealign + R = SpaceRealign(all_ims) + else: + from nipy.algorithms.registration import SpaceTimeRealign + R = SpaceTimeRealign( + all_ims, + tr=self.inputs.tr, + slice_times=self.inputs.slice_times, + slice_info=self.inputs.slice_info, + ) + + R.estimate(refscan=None) + + corr_run = R.resample() + self._out_file_path = [] + self._par_file_path = [] + + for j, corr in enumerate(corr_run): + self._out_file_path.append( + os.path.abspath('corr_%s.nii.gz' % + (split_filename(self.inputs.in_file[j])[1]))) + save_image(corr, self._out_file_path[j]) + + self._par_file_path.append( + os.path.abspath('%s.par' % + (os.path.split(self.inputs.in_file[j])[1]))) + mfile = open(self._par_file_path[j], 'w') + motion = R._transforms[j] + # nipy does not encode euler angles. return in original form of + # translation followed by rotation vector see: + # http://en.wikipedia.org/wiki/Rodrigues'_rotation_formula + for i, mo in enumerate(motion): + params = [ + '%.10f' % item + for item in np.hstack((mo.translation, mo.rotation)) + ] + string = ' '.join(params) + '\n' + mfile.write(string) + mfile.close() + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = self._out_file_path + outputs['par_file'] = self._par_file_path + return outputs + + +class TrimInputSpec(BaseInterfaceInputSpec): + in_file = File(exists=True, mandatory=True, desc="EPI image to trim") + begin_index = traits.Int(0, usedefault=True, desc='first volume') + end_index = traits.Int( + 0, + usedefault=True, + desc='last volume indexed as in python (and 0 for last)') + out_file = File(desc='output filename') + suffix = traits.Str( + '_trim', + usedefault=True, + desc='suffix for out_file to use if no out_file provided') + + +class TrimOutputSpec(TraitedSpec): + out_file = File(exists=True) + + +class Trim(NipyBaseInterface): + """ Simple interface to trim a few volumes from a 4d fmri nifti file + + Examples + -------- + >>> from nipype.interfaces.nipy.preprocess import Trim + >>> trim = Trim() + >>> trim.inputs.in_file = 'functional.nii' + >>> trim.inputs.begin_index = 3 # remove 3 first volumes + >>> res = trim.run() # doctest: +SKIP + + """ + + input_spec = TrimInputSpec + output_spec = TrimOutputSpec + + def _run_interface(self, runtime): + out_file = self._list_outputs()['out_file'] + nii = nb.load(self.inputs.in_file) + if self.inputs.end_index == 0: + s = slice(self.inputs.begin_index, nii.shape[3]) + else: + s = slice(self.inputs.begin_index, self.inputs.end_index) + nii2 = nb.Nifti1Image(nii.get_data()[..., s], nii.affine, nii.header) + nb.save(nii2, out_file) + return runtime + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_file + if not isdefined(outputs['out_file']): + outputs['out_file'] = fname_presuffix( + self.inputs.in_file, + newpath=os.getcwd(), + suffix=self.inputs.suffix) + outputs['out_file'] = os.path.abspath(outputs['out_file']) + return outputs diff --git a/nipype/interfaces/nipy/tests/__init__.py b/nipype/interfaces/nipy/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/nipy/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py new file mode 100644 index 0000000000..f0f2453274 --- /dev/null +++ b/nipype/interfaces/nipy/tests/test_auto_ComputeMask.py @@ -0,0 +1,25 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ComputeMask + + +def test_ComputeMask_inputs(): + input_map = dict( + M=dict(), + cc=dict(), + m=dict(), + mean_volume=dict(mandatory=True, ), + reference_volume=dict(), + ) + inputs = ComputeMask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ComputeMask_outputs(): + output_map = dict(brain_mask=dict(), ) + outputs = ComputeMask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py new file mode 100644 index 0000000000..383d1cbea3 --- /dev/null +++ b/nipype/interfaces/nipy/tests/test_auto_EstimateContrast.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import EstimateContrast + + +def test_EstimateContrast_inputs(): + input_map = dict( + axis=dict(mandatory=True, ), + beta=dict(mandatory=True, ), + constants=dict(mandatory=True, ), + contrasts=dict(mandatory=True, ), + dof=dict(mandatory=True, ), + mask=dict(), + nvbeta=dict(mandatory=True, ), + reg_names=dict(mandatory=True, ), + s2=dict(mandatory=True, ), + ) + inputs = EstimateContrast.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EstimateContrast_outputs(): + output_map = dict( + p_maps=dict(), + stat_maps=dict(), + z_maps=dict(), + ) + outputs = EstimateContrast.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/nipy/tests/test_auto_FitGLM.py b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py new file mode 100644 index 0000000000..4ac3aa720e --- /dev/null +++ b/nipype/interfaces/nipy/tests/test_auto_FitGLM.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import FitGLM + + +def test_FitGLM_inputs(): + input_map = dict( + TR=dict(mandatory=True, ), + drift_model=dict(usedefault=True, ), + hrf_model=dict(usedefault=True, ), + mask=dict(), + method=dict(usedefault=True, ), + model=dict(usedefault=True, ), + normalize_design_matrix=dict(usedefault=True, ), + plot_design_matrix=dict(usedefault=True, ), + save_residuals=dict(usedefault=True, ), + session_info=dict(mandatory=True, ), + ) + inputs = FitGLM.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FitGLM_outputs(): + output_map = dict( + a=dict(), + axis=dict(), + beta=dict(), + constants=dict(), + dof=dict(), + nvbeta=dict(), + reg_names=dict(), + residuals=dict(), + s2=dict(), + ) + outputs = FitGLM.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/nipy/tests/test_auto_NipyBaseInterface.py b/nipype/interfaces/nipy/tests/test_auto_NipyBaseInterface.py new file mode 100644 index 0000000000..ca91500995 --- /dev/null +++ b/nipype/interfaces/nipy/tests/test_auto_NipyBaseInterface.py @@ -0,0 +1,12 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import NipyBaseInterface + + +def test_NipyBaseInterface_inputs(): + input_map = dict() + inputs = NipyBaseInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/nipy/tests/test_auto_Similarity.py b/nipype/interfaces/nipy/tests/test_auto_Similarity.py new file mode 100644 index 0000000000..fbdb6fffa6 --- /dev/null +++ b/nipype/interfaces/nipy/tests/test_auto_Similarity.py @@ -0,0 +1,25 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Similarity + + +def test_Similarity_inputs(): + input_map = dict( + mask1=dict(), + mask2=dict(), + metric=dict(usedefault=True, ), + volume1=dict(mandatory=True, ), + volume2=dict(mandatory=True, ), + ) + inputs = Similarity.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Similarity_outputs(): + output_map = dict(similarity=dict(), ) + outputs = Similarity.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py new file mode 100644 index 0000000000..fca6664a49 --- /dev/null +++ b/nipype/interfaces/nipy/tests/test_auto_SpaceTimeRealigner.py @@ -0,0 +1,30 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import SpaceTimeRealigner + + +def test_SpaceTimeRealigner_inputs(): + input_map = dict( + in_file=dict( + mandatory=True, + min_ver='0.4.0.dev', + ), + slice_info=dict(requires=['slice_times'], ), + slice_times=dict(), + tr=dict(requires=['slice_times'], ), + ) + inputs = SpaceTimeRealigner.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SpaceTimeRealigner_outputs(): + output_map = dict( + out_file=dict(), + par_file=dict(), + ) + outputs = SpaceTimeRealigner.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/nipy/tests/test_auto_Trim.py b/nipype/interfaces/nipy/tests/test_auto_Trim.py new file mode 100644 index 0000000000..6073c9082b --- /dev/null +++ b/nipype/interfaces/nipy/tests/test_auto_Trim.py @@ -0,0 +1,25 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Trim + + +def test_Trim_inputs(): + input_map = dict( + begin_index=dict(usedefault=True, ), + end_index=dict(usedefault=True, ), + in_file=dict(mandatory=True, ), + out_file=dict(), + suffix=dict(usedefault=True, ), + ) + inputs = Trim.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Trim_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Trim.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/nipy/utils.py b/nipype/interfaces/nipy/utils.py new file mode 100644 index 0000000000..406960fc6d --- /dev/null +++ b/nipype/interfaces/nipy/utils.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import warnings +import nibabel as nb + +from .base import NipyBaseInterface, have_nipy +from ..base import (TraitedSpec, traits, BaseInterfaceInputSpec, + File, isdefined) + + +class SimilarityInputSpec(BaseInterfaceInputSpec): + volume1 = File(exists=True, desc="3D volume", mandatory=True) + volume2 = File(exists=True, desc="3D volume", mandatory=True) + mask1 = File(exists=True, desc="3D volume") + mask2 = File(exists=True, desc="3D volume") + metric = traits.Either( + traits.Enum('cc', 'cr', 'crl1', 'mi', 'nmi', 'slr'), + traits.Callable(), + desc="""str or callable +Cost-function for assessing image similarity. If a string, +one of 'cc': correlation coefficient, 'cr': correlation +ratio, 'crl1': L1-norm based correlation ratio, 'mi': mutual +information, 'nmi': normalized mutual information, 'slr': +supervised log-likelihood ratio. If a callable, it should +take a two-dimensional array representing the image joint +histogram as an input and return a float.""", + usedefault=True) + + +class SimilarityOutputSpec(TraitedSpec): + similarity = traits.Float(desc="Similarity between volume 1 and 2") + + +class Similarity(NipyBaseInterface): + """Calculates similarity between two 3D volumes. Both volumes have to be in + the same coordinate system, same space within that coordinate system and + with the same voxel dimensions. + + .. deprecated:: 0.10.0 + Use :py:class:`nipype.algorithms.metrics.Similarity` instead. + + Example + ------- + >>> from nipype.interfaces.nipy.utils import Similarity + >>> similarity = Similarity() + >>> similarity.inputs.volume1 = 'rc1s1.nii' + >>> similarity.inputs.volume2 = 'rc1s2.nii' + >>> similarity.inputs.mask1 = 'mask.nii' + >>> similarity.inputs.mask2 = 'mask.nii' + >>> similarity.inputs.metric = 'cr' + >>> res = similarity.run() # doctest: +SKIP + """ + + input_spec = SimilarityInputSpec + output_spec = SimilarityOutputSpec + + def __init__(self, **inputs): + warnings.warn(("This interface is deprecated since 0.10.0." + " Please use nipype.algorithms.metrics.Similarity"), + DeprecationWarning) + super(Similarity, self).__init__(**inputs) + + def _run_interface(self, runtime): + from nipy.algorithms.registration.histogram_registration import HistogramRegistration + from nipy.algorithms.registration.affine import Affine + + vol1_nii = nb.load(self.inputs.volume1) + vol2_nii = nb.load(self.inputs.volume2) + + if isdefined(self.inputs.mask1): + mask1 = nb.load(self.inputs.mask1).get_data() == 1 + else: + mask1 = None + + if isdefined(self.inputs.mask2): + mask2 = nb.load(self.inputs.mask2).get_data() == 1 + else: + mask2 = None + + histreg = HistogramRegistration( + from_img=vol1_nii, + to_img=vol2_nii, + similarity=self.inputs.metric, + from_mask=mask1, + to_mask=mask2) + self._similarity = histreg.eval(Affine()) + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['similarity'] = self._similarity + return outputs diff --git a/nipype/interfaces/nitime/__init__.py b/nipype/interfaces/nitime/__init__.py new file mode 100644 index 0000000000..656d601b3e --- /dev/null +++ b/nipype/interfaces/nitime/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from .analysis import (CoherenceAnalyzerInputSpec, CoherenceAnalyzerOutputSpec, + CoherenceAnalyzer) diff --git a/nipype/interfaces/nitime/analysis.py b/nipype/interfaces/nitime/analysis.py new file mode 100644 index 0000000000..6e8ff17d0f --- /dev/null +++ b/nipype/interfaces/nitime/analysis.py @@ -0,0 +1,283 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" + +Interfaces to functionality from nitime for time-series analysis of fmri data + +- nitime.analysis.CoherenceAnalyzer: Coherence/y +- nitime.fmri.io: +- nitime.viz.drawmatrix_channels + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import zip, object, open + +import numpy as np +import tempfile + +from ...utils.misc import package_check +from ...utils.filemanip import fname_presuffix +from .base import NitimeBaseInterface +from ..base import (TraitedSpec, File, Undefined, traits, + isdefined, BaseInterfaceInputSpec) + +have_nitime = True +try: + package_check('nitime') +except ImportError: + have_nitime = False + + +class CoherenceAnalyzerInputSpec(BaseInterfaceInputSpec): + + # Input either csv file, or time-series object and use _xor_inputs to + # discriminate + _xor_inputs = ('in_file', 'in_TS') + in_file = File( + desc=('csv file with ROIs on the columns and ' + 'time-points on the rows. ROI names at the top row'), + exists=True, + requires=('TR', )) + + # If you gave just a file name, you need to specify the sampling_rate: + TR = traits.Float( + desc=('The TR used to collect the data' + 'in your csv file ')) + + in_TS = traits.Any(desc='a nitime TimeSeries object') + + NFFT = traits.Range( + low=32, + value=64, + usedefault=True, + desc=('This is the size of the window used for ' + 'the spectral estimation. Use values between ' + '32 and the number of samples in your time-series.' + '(Defaults to 64.)')) + n_overlap = traits.Range( + low=0, + value=0, + usedefault=True, + desc=('The number of samples which overlap' + 'between subsequent windows.(Defaults to 0)')) + + frequency_range = traits.List( + value=[0.02, 0.15], + usedefault=True, + minlen=2, + maxlen=2, + desc=('The range of frequencies over' + 'which the analysis will average.' + '[low,high] (Default [0.02,0.15]')) + + output_csv_file = File( + desc= + 'File to write outputs (coherence,time-delay) with file-names: file_name_ {coherence,timedelay}' + ) + + output_figure_file = File( + desc= + 'File to write output figures (coherence,time-delay) with file-names: file_name_{coherence,timedelay}. Possible formats: .png,.svg,.pdf,.jpg,...' + ) + + figure_type = traits.Enum( + 'matrix', + 'network', + usedefault=True, + desc=("The type of plot to generate, where " + "'matrix' denotes a matrix image and" + "'network' denotes a graph representation." + " Default: 'matrix'")) + + +class CoherenceAnalyzerOutputSpec(TraitedSpec): + coherence_array = traits.Array( + desc=('The pairwise coherence values' + 'between the ROIs')) + + timedelay_array = traits.Array( + desc=('The pairwise time delays between the' + 'ROIs (in seconds)')) + + coherence_csv = File( + desc=('A csv file containing the pairwise ' + 'coherence values')) + + timedelay_csv = File( + desc=('A csv file containing the pairwise ' + 'time delay values')) + + coherence_fig = File(desc=('Figure representing coherence values')) + timedelay_fig = File(desc=('Figure representing coherence values')) + + +class CoherenceAnalyzer(NitimeBaseInterface): + + input_spec = CoherenceAnalyzerInputSpec + output_spec = CoherenceAnalyzerOutputSpec + + def _read_csv(self): + """ + Read from csv in_file and return an array and ROI names + + The input file should have a first row containing the names of the + ROIs (strings) + + the rest of the data will be read in and transposed so that the rows + (TRs) will becomes the second (and last) dimension of the array + + """ + # Check that input conforms to expectations: + first_row = open(self.inputs.in_file).readline() + if not first_row[1].isalpha(): + raise ValueError( + "First row of in_file should contain ROI names as strings of characters" + ) + + roi_names = open(self.inputs.in_file).readline().replace( + '\"', '').strip('\n').split(',') + # Transpose, so that the time is the last dimension: + data = np.loadtxt(self.inputs.in_file, skiprows=1, delimiter=',').T + + return data, roi_names + + def _csv2ts(self): + """ Read data from the in_file and generate a nitime TimeSeries object""" + from nitime.timeseries import TimeSeries + data, roi_names = self._read_csv() + + TS = TimeSeries( + data=data, sampling_interval=self.inputs.TR, time_unit='s') + + TS.metadata = dict(ROIs=roi_names) + + return TS + + # Rewrite _run_interface, but not run + def _run_interface(self, runtime): + import nitime.analysis as nta + lb, ub = self.inputs.frequency_range + + if self.inputs.in_TS is Undefined: + # get TS form csv and inputs.TR + TS = self._csv2ts() + + else: + # get TS from inputs.in_TS + TS = self.inputs.in_TS + + # deal with creating or storing ROI names: + if 'ROIs' not in TS.metadata: + self.ROIs = ['roi_%d' % x for x, _ in enumerate(TS.data)] + else: + self.ROIs = TS.metadata['ROIs'] + + A = nta.CoherenceAnalyzer( + TS, + method=dict( + this_method='welch', + NFFT=self.inputs.NFFT, + n_overlap=self.inputs.n_overlap)) + + freq_idx = np.where( + (A.frequencies > self.inputs.frequency_range[0]) * + (A.frequencies < self.inputs.frequency_range[1]))[0] + + # Get the coherence matrix from the analyzer, averaging on the last + # (frequency) dimension: (roi X roi array) + self.coherence = np.mean(A.coherence[:, :, freq_idx], -1) + # Get the time delay from analyzer, (roi X roi array) + self.delay = np.mean(A.delay[:, :, freq_idx], -1) + return runtime + + # Rewrite _list_outputs (look at BET) + def _list_outputs(self): + outputs = self.output_spec().get() + + # if isdefined(self.inputs.output_csv_file): + + # write to a csv file and assign a value to self.coherence_file (a + # file name + path) + + # Always defined (the arrays): + outputs['coherence_array'] = self.coherence + outputs['timedelay_array'] = self.delay + + # Conditional + if isdefined(self.inputs.output_csv_file) and hasattr( + self, 'coherence'): + # we need to make a function that we call here that writes the + # coherence values to this file "coherence_csv" and makes the + # time_delay csv file?? + self._make_output_files() + outputs['coherence_csv'] = fname_presuffix( + self.inputs.output_csv_file, suffix='_coherence') + + outputs['timedelay_csv'] = fname_presuffix( + self.inputs.output_csv_file, suffix='_delay') + + if isdefined(self.inputs.output_figure_file) and hasattr( + self, 'coherence'): + self._make_output_figures() + outputs['coherence_fig'] = fname_presuffix( + self.inputs.output_figure_file, suffix='_coherence') + outputs['timedelay_fig'] = fname_presuffix( + self.inputs.output_figure_file, suffix='_delay') + + return outputs + + def _make_output_files(self): + """ + Generate the output csv files. + """ + for this in zip([self.coherence, self.delay], ['coherence', 'delay']): + tmp_f = tempfile.mkstemp()[1] + np.savetxt(tmp_f, this[0], delimiter=',') + + fid = open( + fname_presuffix( + self.inputs.output_csv_file, suffix='_%s' % this[1]), 'w+') + # this writes ROIs as header line + fid.write(',' + ','.join(self.ROIs) + '\n') + # this writes ROI and data to a line + for r, line in zip(self.ROIs, open(tmp_f)): + fid.write('%s,%s' % (r, line)) + fid.close() + + def _make_output_figures(self): + """ + Generate the desired figure and save the files according to + self.inputs.output_figure_file + + """ + import nitime.viz as viz + if self.inputs.figure_type == 'matrix': + fig_coh = viz.drawmatrix_channels( + self.coherence, channel_names=self.ROIs, color_anchor=0) + + fig_coh.savefig( + fname_presuffix( + self.inputs.output_figure_file, suffix='_coherence')) + + fig_dt = viz.drawmatrix_channels( + self.delay, channel_names=self.ROIs, color_anchor=0) + + fig_dt.savefig( + fname_presuffix( + self.inputs.output_figure_file, suffix='_delay')) + else: + fig_coh = viz.drawgraph_channels( + self.coherence, channel_names=self.ROIs) + + fig_coh.savefig( + fname_presuffix( + self.inputs.output_figure_file, suffix='_coherence')) + + fig_dt = viz.drawgraph_channels( + self.delay, channel_names=self.ROIs) + + fig_dt.savefig( + fname_presuffix( + self.inputs.output_figure_file, suffix='_delay')) diff --git a/nipype/interfaces/nitime/base.py b/nipype/interfaces/nitime/base.py new file mode 100644 index 0000000000..d9139f2c71 --- /dev/null +++ b/nipype/interfaces/nitime/base.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" Base interface for nitime """ + +from ..base import LibraryBaseInterface + + +class NitimeBaseInterface(LibraryBaseInterface): + _pkg = 'nitime' diff --git a/nipype/interfaces/nitime/tests/__init__.py b/nipype/interfaces/nitime/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/nitime/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py new file mode 100644 index 0000000000..4a641ebb50 --- /dev/null +++ b/nipype/interfaces/nitime/tests/test_auto_CoherenceAnalyzer.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..analysis import CoherenceAnalyzer + + +def test_CoherenceAnalyzer_inputs(): + input_map = dict( + NFFT=dict(usedefault=True, ), + TR=dict(), + figure_type=dict(usedefault=True, ), + frequency_range=dict(usedefault=True, ), + in_TS=dict(), + in_file=dict(requires=('TR', ), ), + n_overlap=dict(usedefault=True, ), + output_csv_file=dict(), + output_figure_file=dict(), + ) + inputs = CoherenceAnalyzer.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CoherenceAnalyzer_outputs(): + output_map = dict( + coherence_array=dict(), + coherence_csv=dict(), + coherence_fig=dict(), + timedelay_array=dict(), + timedelay_csv=dict(), + timedelay_fig=dict(), + ) + outputs = CoherenceAnalyzer.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/nitime/tests/test_auto_NitimeBaseInterface.py b/nipype/interfaces/nitime/tests/test_auto_NitimeBaseInterface.py new file mode 100644 index 0000000000..bf827b81b2 --- /dev/null +++ b/nipype/interfaces/nitime/tests/test_auto_NitimeBaseInterface.py @@ -0,0 +1,12 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import NitimeBaseInterface + + +def test_NitimeBaseInterface_inputs(): + input_map = dict() + inputs = NitimeBaseInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/nitime/tests/test_nitime.py b/nipype/interfaces/nitime/tests/test_nitime.py new file mode 100644 index 0000000000..daea1a177b --- /dev/null +++ b/nipype/interfaces/nitime/tests/test_nitime.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import os +import tempfile + +import numpy as np + +import pytest +from nipype.testing import example_data +import nipype.interfaces.nitime as nitime + +no_nitime = not nitime.analysis.have_nitime +display_available = 'DISPLAY' in os.environ and os.environ['DISPLAY'] + + +@pytest.mark.skipif(no_nitime, reason="nitime is not installed") +def test_read_csv(): + """Test that reading the data from csv file gives you back a reasonable + time-series object """ + CA = nitime.CoherenceAnalyzer() + CA.inputs.TR = 1.89 # bogus value just to pass traits test + CA.inputs.in_file = example_data('fmri_timeseries_nolabels.csv') + with pytest.raises(ValueError): + CA._read_csv() + + CA.inputs.in_file = example_data('fmri_timeseries.csv') + data, roi_names = CA._read_csv() + assert data[0][0] == 10125.9 + assert roi_names[0] == 'WM' + + +@pytest.mark.skipif(no_nitime, reason="nitime is not installed") +def test_coherence_analysis(tmpdir): + """Test that the coherence analyzer works """ + import nitime.analysis as nta + import nitime.timeseries as ts + + tmpdir.chdir() + # This is the nipype interface analysis: + CA = nitime.CoherenceAnalyzer() + CA.inputs.TR = 1.89 + CA.inputs.in_file = example_data('fmri_timeseries.csv') + if display_available: + tmp_png = tempfile.mkstemp(suffix='.png')[1] + CA.inputs.output_figure_file = tmp_png + tmp_csv = tempfile.mkstemp(suffix='.csv')[1] + CA.inputs.output_csv_file = tmp_csv + + o = CA.run() + assert o.outputs.coherence_array.shape == (31, 31) + + # This is the nitime analysis: + TR = 1.89 + data_rec = np.recfromcsv(example_data('fmri_timeseries.csv')) + roi_names = np.array(data_rec.dtype.names) + n_samples = data_rec.shape[0] + data = np.zeros((len(roi_names), n_samples)) + + for n_idx, roi in enumerate(roi_names): + data[n_idx] = data_rec[roi] + + T = ts.TimeSeries(data, sampling_interval=TR) + + assert (CA._csv2ts().data == T.data).all() + + T.metadata['roi'] = roi_names + C = nta.CoherenceAnalyzer( + T, + method=dict( + this_method='welch', + NFFT=CA.inputs.NFFT, + n_overlap=CA.inputs.n_overlap)) + + freq_idx = np.where((C.frequencies > CA.inputs.frequency_range[0]) * + (C.frequencies < CA.inputs.frequency_range[1]))[0] + + # Extract the coherence and average across these frequency bands: + # Averaging is done on the last dimension + coh = np.mean(C.coherence[:, :, freq_idx], -1) + + assert (o.outputs.coherence_array == coh).all() diff --git a/nipype/interfaces/petpvc.py b/nipype/interfaces/petpvc.py new file mode 100644 index 0000000000..598dd898c5 --- /dev/null +++ b/nipype/interfaces/petpvc.py @@ -0,0 +1,236 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os + +from .base import TraitedSpec, CommandLineInputSpec, CommandLine, File, isdefined, traits +from ..utils.filemanip import fname_presuffix +from ..external.due import BibTeX + +pvc_methods = [ + 'GTM', 'IY', 'IY+RL', 'IY+VC', 'LABBE', 'LABBE+MTC', 'LABBE+MTC+RL', + 'LABBE+MTC+VC', 'LABBE+RBV', 'LABBE+RBV+RL', 'LABBE+RBV+VC', 'MG', 'MG+RL', + 'MG+VC', 'MTC', 'MTC+RL', 'MTC+VC', 'RBV', 'RBV+RL', 'RBV+VC', 'RL', 'VC' +] + + +class PETPVCInputSpec(CommandLineInputSpec): + in_file = File( + desc="PET image file", exists=True, mandatory=True, argstr="-i %s") + out_file = File( + desc="Output file", genfile=True, hash_files=False, argstr="-o %s") + mask_file = File( + desc="Mask image file", exists=True, mandatory=True, argstr="-m %s") + pvc = traits.Enum( + pvc_methods, desc="Desired PVC method", mandatory=True, argstr="-p %s") + fwhm_x = traits.Float( + desc="The full-width at half maximum in mm along x-axis", + mandatory=True, + argstr="-x %.4f") + fwhm_y = traits.Float( + desc="The full-width at half maximum in mm along y-axis", + mandatory=True, + argstr="-y %.4f") + fwhm_z = traits.Float( + desc="The full-width at half maximum in mm along z-axis", + mandatory=True, + argstr="-z %.4f") + debug = traits.Bool( + desc="Prints debug information", + usedefault=True, + default_value=False, + argstr="-d") + n_iter = traits.Int( + desc="Number of iterations", default_value=10, usedefault=True, + argstr="-n %d") + n_deconv = traits.Int( + desc="Number of deconvolution iterations", + default_value=10, + usedefault=True, + argstr="-k %d") + alpha = traits.Float( + desc="Alpha value", default_value=1.5, usedefault=True, + argstr="-a %.4f") + stop_crit = traits.Float( + desc="Stopping criterion", default_value=0.01, usedefault=True, + argstr="-a %.4f") + + +class PETPVCOutputSpec(TraitedSpec): + out_file = File(desc="Output file") + + +class PETPVC(CommandLine): + """ Use PETPVC for partial volume correction of PET images. + + PETPVC is a software from the Nuclear Medicine Department + of the UCL University Hospital, London, UK. + + Its source code is here: https://github.com/UCL/PETPVC + + The methods that it implement are explained here: + K. Erlandsson, I. Buvat, P. H. Pretorius, B. A. Thomas, and B. F. Hutton, + "A review of partial volume correction techniques for emission tomography + and their applications in neurology, cardiology and oncology," Phys. Med. + Biol., vol. 57, no. 21, p. R119, 2012. + + Its command line help shows this: + + -i --input < filename > + = PET image file + -o --output < filename > + = Output file + [ -m --mask < filename > ] + = Mask image file + -p --pvc < keyword > + = Desired PVC method + -x < X > + = The full-width at half maximum in mm along x-axis + -y < Y > + = The full-width at half maximum in mm along y-axis + -z < Z > + = The full-width at half maximum in mm along z-axis + [ -d --debug ] + = Prints debug information + [ -n --iter [ Val ] ] + = Number of iterations + With: Val (Default = 10) + [ -k [ Val ] ] + = Number of deconvolution iterations + With: Val (Default = 10) + [ -a --alpha [ aval ] ] + = Alpha value + With: aval (Default = 1.5) + [ -s --stop [ stopval ] ] + = Stopping criterion + With: stopval (Default = 0.01) + + Technique - keyword + ------------------- + - Geometric transfer matrix - "GTM" + - Labbe approach - "LABBE" + - Richardson-Lucy - "RL" + - Van-Cittert - "VC" + - Region-based voxel-wise correction - "RBV" + - RBV with Labbe - "LABBE+RBV" + - RBV with Van-Cittert - "RBV+VC" + - RBV with Richardson-Lucy - "RBV+RL" + - RBV with Labbe and Van-Cittert - "LABBE+RBV+VC" + - RBV with Labbe and Richardson-Lucy- "LABBE+RBV+RL" + - Multi-target correction - "MTC" + - MTC with Labbe - "LABBE+MTC" + - MTC with Van-Cittert - "MTC+VC" + - MTC with Richardson-Lucy - "MTC+RL" + - MTC with Labbe and Van-Cittert - "LABBE+MTC+VC" + - MTC with Labbe and Richardson-Lucy- "LABBE+MTC+RL" + - Iterative Yang - "IY" + - Iterative Yang with Van-Cittert - "IY+VC" + - Iterative Yang with Richardson-Lucy - "IY+RL" + - Muller Gartner - "MG" + - Muller Gartner with Van-Cittert - "MG+VC" + - Muller Gartner with Richardson-Lucy - "MG+RL" + + Examples + -------- + >>> from ..testing import example_data + >>> #TODO get data for PETPVC + >>> pvc = PETPVC() + >>> pvc.inputs.in_file = 'pet.nii.gz' + >>> pvc.inputs.mask_file = 'tissues.nii.gz' + >>> pvc.inputs.out_file = 'pet_pvc_rbv.nii.gz' + >>> pvc.inputs.pvc = 'RBV' + >>> pvc.inputs.fwhm_x = 2.0 + >>> pvc.inputs.fwhm_y = 2.0 + >>> pvc.inputs.fwhm_z = 2.0 + >>> outs = pvc.run() #doctest: +SKIP + """ + input_spec = PETPVCInputSpec + output_spec = PETPVCOutputSpec + _cmd = 'petpvc' + + references_ = [{ + 'entry': + BibTeX( + "@article{0031-9155-61-22-7975," + "author={Benjamin A Thomas and Vesna Cuplov and Alexandre Bousse and " + "Adriana Mendes and Kris Thielemans and Brian F Hutton and Kjell Erlandsson}," + "title={PETPVC: a toolbox for performing partial volume correction " + "techniques in positron emission tomography}," + "journal={Physics in Medicine and Biology}," + "volume={61}," + "number={22}," + "pages={7975}," + "url={http://stacks.iop.org/0031-9155/61/i=22/a=7975}," + "doi={http://dx.doi.org/10.1088/0031-9155/61/22/7975}," + "year={2016}," + "}"), + 'description': + 'PETPVC software implementation publication', + 'tags': ['implementation'], + }] + + def _list_outputs(self): + outputs = self.output_spec().get() + outputs['out_file'] = self.inputs.out_file + if not isdefined(outputs['out_file']): + method_name = self.inputs.pvc.lower() + outputs['out_file'] = self._gen_fname( + self.inputs.in_file, suffix='_{}_pvc'.format(method_name)) + + outputs['out_file'] = os.path.abspath(outputs['out_file']) + return outputs + + def _gen_fname(self, + basename, + cwd=None, + suffix=None, + change_ext=True, + ext='.nii.gz'): + """Generate a filename based on the given parameters. + + The filename will take the form: cwd/basename. + If change_ext is True, it will use the extentions specified in + intputs.output_type. + + Parameters + ---------- + basename : str + Filename to base the new filename on. + cwd : str + Path to prefix to the new filename. (default is os.getcwd()) + suffix : str + Suffix to add to the `basename`. (defaults is '' ) + change_ext : bool + Flag to change the filename extension to the given `ext`. + (Default is False) + + Returns + ------- + fname : str + New filename based on given parameters. + + """ + if basename == '': + msg = 'Unable to generate filename for command %s. ' % self.cmd + msg += 'basename is not set!' + raise ValueError(msg) + if cwd is None: + cwd = os.getcwd() + if change_ext: + if suffix: + suffix = ''.join((suffix, ext)) + else: + suffix = ext + if suffix is None: + suffix = '' + fname = fname_presuffix( + basename, suffix=suffix, use_ext=False, newpath=cwd) + return fname + + def _gen_filename(self, name): + if name == 'out_file': + return self._list_outputs()['out_file'] + return None diff --git a/nipype/interfaces/quickshear.py b/nipype/interfaces/quickshear.py new file mode 100644 index 0000000000..b1317c3599 --- /dev/null +++ b/nipype/interfaces/quickshear.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +""" Quickshear is a simple geometric defacing algorithm +""" +from __future__ import unicode_literals + +from .base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File +from ..external.due import BibTeX + + +class QuickshearInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + position=1, + argstr='%s', + mandatory=True, + desc="neuroimage to deface") + mask_file = File( + exists=True, + position=2, + argstr='%s', + desc="brain mask", + mandatory=True) + out_file = File( + name_template="%s_defaced", + name_source='in_file', + position=3, + argstr='%s', + desc="defaced output image", + keep_extension=True) + buff = traits.Int( + position=4, + argstr='%d', + desc='buffer size (in voxels) between shearing ' + 'plane and the brain') + + +class QuickshearOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="defaced output image") + + +class Quickshear(CommandLine): + """ + Quickshear is a simple geometric defacing algorithm + + Given an anatomical image and a reasonable brainmask, Quickshear estimates + a shearing plane with the brain mask on one side and the face on the other, + zeroing out the face side. + + >>> from nipype.interfaces.quickshear import Quickshear + >>> qs = Quickshear(in_file='T1.nii', mask_file='brain_mask.nii') + >>> qs.cmdline + 'quickshear T1.nii brain_mask.nii T1_defaced.nii' + + In the absence of a precomputed mask, a simple pipeline can be generated + with any tool that generates brain masks: + + >>> from nipype.pipeline import engine as pe + >>> from nipype.interfaces import utility as niu + >>> from nipype.interfaces.fsl import BET + >>> deface_wf = pe.Workflow('deface_wf') + >>> inputnode = pe.Node(niu.IdentityInterface(['in_file']), + ... name='inputnode') + >>> outputnode = pe.Node(niu.IdentityInterface(['out_file']), + ... name='outputnode') + >>> bet = pe.Node(BET(mask=True), name='bet') + >>> quickshear = pe.Node(Quickshear(), name='quickshear') + >>> deface_wf.connect([ + ... (inputnode, bet, [('in_file', 'in_file')]), + ... (inputnode, quickshear, [('in_file', 'in_file')]), + ... (bet, quickshear, [('mask_file', 'mask_file')]), + ... (quickshear, outputnode, [('out_file', 'out_file')]), + ... ]) + >>> inputnode.inputs.in_file = 'T1.nii' + >>> res = deface_wf.run() # doctest: +SKIP + """ + _cmd = 'quickshear' + input_spec = QuickshearInputSpec + output_spec = QuickshearOutputSpec + + references_ = [{ + 'entry': + BibTeX('@inproceedings{Schimke2011,' + 'address = {San Francisco},' + 'author = {Schimke, Nakeisha and Hale, John},' + 'booktitle = {Proceedings of the 2nd USENIX Conference on ' + 'Health Security and Privacy},' + 'title = {{Quickshear Defacing for Neuroimages}},' + 'year = {2011},' + 'month = sep}'), + 'tags': ['implementation'], + }] diff --git a/nipype/interfaces/semtools/__init__.py b/nipype/interfaces/semtools/__init__.py new file mode 100644 index 0000000000..14473b8381 --- /dev/null +++ b/nipype/interfaces/semtools/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .diffusion import * +from .featurecreator import GenerateCsfClippedFromClassifiedImage +from .segmentation import * +from .filtering import * +from .brains import * +from .testing import * +from .utilities import * +from .legacy import * +from .registration import * +from .converters import DWISimpleCompare, DWICompare diff --git a/nipype/interfaces/semtools/brains/__init__.py b/nipype/interfaces/semtools/brains/__init__.py new file mode 100644 index 0000000000..ebfab84bf3 --- /dev/null +++ b/nipype/interfaces/semtools/brains/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .segmentation import SimilarityIndex, BRAINSTalairach, BRAINSTalairachMask +from .utilities import (HistogramMatchingFilter, GenerateEdgeMapImage, + GeneratePurePlugMask) +from .classify import BRAINSPosteriorToContinuousClass diff --git a/nipype/interfaces/semtools/brains/classify.py b/nipype/interfaces/semtools/brains/classify.py new file mode 100644 index 0000000000..89bb74f039 --- /dev/null +++ b/nipype/interfaces/semtools/brains/classify.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class BRAINSPosteriorToContinuousClassInputSpec(CommandLineInputSpec): + inputWhiteVolume = File( + desc="White Matter Posterior Volume", + exists=True, + argstr="--inputWhiteVolume %s") + inputBasalGmVolume = File( + desc="Basal Grey Matter Posterior Volume", + exists=True, + argstr="--inputBasalGmVolume %s") + inputSurfaceGmVolume = File( + desc="Surface Grey Matter Posterior Volume", + exists=True, + argstr="--inputSurfaceGmVolume %s") + inputCsfVolume = File( + desc="CSF Posterior Volume", exists=True, argstr="--inputCsfVolume %s") + inputVbVolume = File( + desc="Venous Blood Posterior Volume", + exists=True, + argstr="--inputVbVolume %s") + inputCrblGmVolume = File( + desc="Cerebellum Grey Matter Posterior Volume", + exists=True, + argstr="--inputCrblGmVolume %s") + inputCrblWmVolume = File( + desc="Cerebellum White Matter Posterior Volume", + exists=True, + argstr="--inputCrblWmVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output Continuous Tissue Classified Image", + argstr="--outputVolume %s") + + +class BRAINSPosteriorToContinuousClassOutputSpec(TraitedSpec): + outputVolume = File( + desc="Output Continuous Tissue Classified Image", exists=True) + + +class BRAINSPosteriorToContinuousClass(SEMLikeCommandLine): + """title: Tissue Classification + +category: BRAINS.Classify + +description: This program will generate an 8-bit continuous tissue classified image based on BRAINSABC posterior images. + +version: 3.0 + +documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/brains:BRAINSClassify + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Vincent A. Magnotta + +acknowledgements: Funding for this work was provided by NIH/NINDS award NS050568 + +""" + + input_spec = BRAINSPosteriorToContinuousClassInputSpec + output_spec = BRAINSPosteriorToContinuousClassOutputSpec + _cmd = " BRAINSPosteriorToContinuousClass " + _outputs_filenames = {'outputVolume': 'outputVolume'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/brains/segmentation.py b/nipype/interfaces/semtools/brains/segmentation.py new file mode 100644 index 0000000000..fae5e4f1a2 --- /dev/null +++ b/nipype/interfaces/semtools/brains/segmentation.py @@ -0,0 +1,185 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class SimilarityIndexInputSpec(CommandLineInputSpec): + outputCSVFilename = File( + desc="output CSV Filename", + exists=True, + argstr="--outputCSVFilename %s") + ANNContinuousVolume = File( + desc="ANN Continuous volume to be compared to the manual volume", + exists=True, + argstr="--ANNContinuousVolume %s") + inputManualVolume = File( + desc="input manual(reference) volume", + exists=True, + argstr="--inputManualVolume %s") + thresholdInterval = traits.Float( + desc= + "Threshold interval to compute similarity index between zero and one", + argstr="--thresholdInterval %f") + + +class SimilarityIndexOutputSpec(TraitedSpec): + pass + + +class SimilarityIndex(SEMLikeCommandLine): + """title: BRAINSCut:SimilarityIndexComputation + +category: BRAINS.Segmentation + +description: Automatic analysis of BRAINSCut Output + +version: 1.0 + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Eunyoung Regin Kim + +""" + + input_spec = SimilarityIndexInputSpec + output_spec = SimilarityIndexOutputSpec + _cmd = " SimilarityIndex " + _outputs_filenames = {} + _redirect_x = False + + +class BRAINSTalairachInputSpec(CommandLineInputSpec): + AC = InputMultiPath( + traits.Float, desc="Location of AC Point ", sep=",", argstr="--AC %s") + ACisIndex = traits.Bool(desc="AC Point is Index", argstr="--ACisIndex ") + PC = InputMultiPath( + traits.Float, desc="Location of PC Point ", sep=",", argstr="--PC %s") + PCisIndex = traits.Bool(desc="PC Point is Index", argstr="--PCisIndex ") + SLA = InputMultiPath( + traits.Float, + desc="Location of SLA Point ", + sep=",", + argstr="--SLA %s") + SLAisIndex = traits.Bool(desc="SLA Point is Index", argstr="--SLAisIndex ") + IRP = InputMultiPath( + traits.Float, + desc="Location of IRP Point ", + sep=",", + argstr="--IRP %s") + IRPisIndex = traits.Bool(desc="IRP Point is Index", argstr="--IRPisIndex ") + inputVolume = File( + desc="Input image used to define physical space of images", + exists=True, + argstr="--inputVolume %s") + outputBox = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Name of the resulting Talairach Bounding Box file", + argstr="--outputBox %s") + outputGrid = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Name of the resulting Talairach Grid file", + argstr="--outputGrid %s") + + +class BRAINSTalairachOutputSpec(TraitedSpec): + outputBox = File( + desc="Name of the resulting Talairach Bounding Box file", exists=True) + outputGrid = File( + desc="Name of the resulting Talairach Grid file", exists=True) + + +class BRAINSTalairach(SEMLikeCommandLine): + """title: BRAINS Talairach + +category: BRAINS.Segmentation + +description: This program creates a VTK structured grid defining the Talairach coordinate system based on four points: AC, PC, IRP, and SLA. The resulting structred grid can be written as either a classic VTK file or the new VTK XML file format. Two representations of the resulting grid can be written. The first is a bounding box representation that also contains the location of the AC and PC points. The second representation is the full Talairach grid representation that includes the additional rows of boxes added to the inferior allowing full coverage of the cerebellum. + +version: 0.1 + +documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/brains:BRAINSTalairach + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Steven Dunn and Vincent Magnotta + +acknowledgements: Funding for this work was provided by NIH/NINDS award NS050568 + +""" + + input_spec = BRAINSTalairachInputSpec + output_spec = BRAINSTalairachOutputSpec + _cmd = " BRAINSTalairach " + _outputs_filenames = {'outputGrid': 'outputGrid', 'outputBox': 'outputBox'} + _redirect_x = False + + +class BRAINSTalairachMaskInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Input image used to define physical space of resulting mask", + exists=True, + argstr="--inputVolume %s") + talairachParameters = File( + desc="Name of the Talairach parameter file.", + exists=True, + argstr="--talairachParameters %s") + talairachBox = File( + desc="Name of the Talairach box file.", + exists=True, + argstr="--talairachBox %s") + hemisphereMode = traits.Enum( + "left", + "right", + "both", + desc="Mode for box creation: left, right, both", + argstr="--hemisphereMode %s") + expand = traits.Bool( + desc="Expand exterior box to include surface CSF", argstr="--expand ") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output filename for the resulting binary image", + argstr="--outputVolume %s") + + +class BRAINSTalairachMaskOutputSpec(TraitedSpec): + outputVolume = File( + desc="Output filename for the resulting binary image", exists=True) + + +class BRAINSTalairachMask(SEMLikeCommandLine): + """title: Talairach Mask + +category: BRAINS.Segmentation + +description: This program creates a binary image representing the specified Talairach region. The input is an example image to define the physical space for the resulting image, the Talairach grid representation in VTK format, and the file containing the Talairach box definitions to be generated. These can be combined in BRAINS to create a label map using the procedure Brains::WorkupUtils::CreateLabelMapFromBinaryImages. + +version: 0.1 + +documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/brains:BRAINSTalairachMask + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Steven Dunn and Vincent Magnotta + +acknowledgements: Funding for this work was provided by NIH/NINDS award NS050568 + +""" + + input_spec = BRAINSTalairachMaskInputSpec + output_spec = BRAINSTalairachMaskOutputSpec + _cmd = " BRAINSTalairachMask " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/brains/tests/__init__.py b/nipype/interfaces/semtools/brains/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/semtools/brains/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py new file mode 100644 index 0000000000..81a22bfe38 --- /dev/null +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSPosteriorToContinuousClass.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..classify import BRAINSPosteriorToContinuousClass + + +def test_BRAINSPosteriorToContinuousClass_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBasalGmVolume=dict(argstr='--inputBasalGmVolume %s', ), + inputCrblGmVolume=dict(argstr='--inputCrblGmVolume %s', ), + inputCrblWmVolume=dict(argstr='--inputCrblWmVolume %s', ), + inputCsfVolume=dict(argstr='--inputCsfVolume %s', ), + inputSurfaceGmVolume=dict(argstr='--inputSurfaceGmVolume %s', ), + inputVbVolume=dict(argstr='--inputVbVolume %s', ), + inputWhiteVolume=dict(argstr='--inputWhiteVolume %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = BRAINSPosteriorToContinuousClass.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSPosteriorToContinuousClass_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = BRAINSPosteriorToContinuousClass.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py new file mode 100644 index 0000000000..ac589ad6dc --- /dev/null +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairach.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import BRAINSTalairach + + +def test_BRAINSTalairach_inputs(): + input_map = dict( + AC=dict( + argstr='--AC %s', + sep=',', + ), + ACisIndex=dict(argstr='--ACisIndex ', ), + IRP=dict( + argstr='--IRP %s', + sep=',', + ), + IRPisIndex=dict(argstr='--IRPisIndex ', ), + PC=dict( + argstr='--PC %s', + sep=',', + ), + PCisIndex=dict(argstr='--PCisIndex ', ), + SLA=dict( + argstr='--SLA %s', + sep=',', + ), + SLAisIndex=dict(argstr='--SLAisIndex ', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + outputBox=dict( + argstr='--outputBox %s', + hash_files=False, + ), + outputGrid=dict( + argstr='--outputGrid %s', + hash_files=False, + ), + ) + inputs = BRAINSTalairach.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSTalairach_outputs(): + output_map = dict( + outputBox=dict(), + outputGrid=dict(), + ) + outputs = BRAINSTalairach.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py new file mode 100644 index 0000000000..5ada1576e7 --- /dev/null +++ b/nipype/interfaces/semtools/brains/tests/test_auto_BRAINSTalairachMask.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import BRAINSTalairachMask + + +def test_BRAINSTalairachMask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + expand=dict(argstr='--expand ', ), + hemisphereMode=dict(argstr='--hemisphereMode %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + talairachBox=dict(argstr='--talairachBox %s', ), + talairachParameters=dict(argstr='--talairachParameters %s', ), + ) + inputs = BRAINSTalairachMask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSTalairachMask_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = BRAINSTalairachMask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py new file mode 100644 index 0000000000..eb4bdcffae --- /dev/null +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GenerateEdgeMapImage.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utilities import GenerateEdgeMapImage + + +def test_GenerateEdgeMapImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRVolumes=dict(argstr='--inputMRVolumes %s...', ), + inputMask=dict(argstr='--inputMask %s', ), + lowerPercentileMatching=dict(argstr='--lowerPercentileMatching %f', ), + maximumOutputRange=dict(argstr='--maximumOutputRange %d', ), + minimumOutputRange=dict(argstr='--minimumOutputRange %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputEdgeMap=dict( + argstr='--outputEdgeMap %s', + hash_files=False, + ), + outputMaximumGradientImage=dict( + argstr='--outputMaximumGradientImage %s', + hash_files=False, + ), + upperPercentileMatching=dict(argstr='--upperPercentileMatching %f', ), + ) + inputs = GenerateEdgeMapImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GenerateEdgeMapImage_outputs(): + output_map = dict( + outputEdgeMap=dict(), + outputMaximumGradientImage=dict(), + ) + outputs = GenerateEdgeMapImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py new file mode 100644 index 0000000000..ae16bc8fab --- /dev/null +++ b/nipype/interfaces/semtools/brains/tests/test_auto_GeneratePurePlugMask.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utilities import GeneratePurePlugMask + + +def test_GeneratePurePlugMask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputImageModalities=dict(argstr='--inputImageModalities %s...', ), + numberOfSubSamples=dict( + argstr='--numberOfSubSamples %s', + sep=',', + ), + outputMaskFile=dict( + argstr='--outputMaskFile %s', + hash_files=False, + ), + threshold=dict(argstr='--threshold %f', ), + ) + inputs = GeneratePurePlugMask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GeneratePurePlugMask_outputs(): + output_map = dict(outputMaskFile=dict(), ) + outputs = GeneratePurePlugMask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py new file mode 100644 index 0000000000..1377072149 --- /dev/null +++ b/nipype/interfaces/semtools/brains/tests/test_auto_HistogramMatchingFilter.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utilities import HistogramMatchingFilter + + +def test_HistogramMatchingFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + histogramAlgorithm=dict(argstr='--histogramAlgorithm %s', ), + inputBinaryVolume=dict(argstr='--inputBinaryVolume %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), + numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + referenceBinaryVolume=dict(argstr='--referenceBinaryVolume %s', ), + referenceVolume=dict(argstr='--referenceVolume %s', ), + verbose=dict(argstr='--verbose ', ), + writeHistogram=dict(argstr='--writeHistogram %s', ), + ) + inputs = HistogramMatchingFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_HistogramMatchingFilter_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = HistogramMatchingFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py new file mode 100644 index 0000000000..534488ad10 --- /dev/null +++ b/nipype/interfaces/semtools/brains/tests/test_auto_SimilarityIndex.py @@ -0,0 +1,29 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import SimilarityIndex + + +def test_SimilarityIndex_inputs(): + input_map = dict( + ANNContinuousVolume=dict(argstr='--ANNContinuousVolume %s', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputManualVolume=dict(argstr='--inputManualVolume %s', ), + outputCSVFilename=dict(argstr='--outputCSVFilename %s', ), + thresholdInterval=dict(argstr='--thresholdInterval %f', ), + ) + inputs = SimilarityIndex.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SimilarityIndex_outputs(): + output_map = dict() + outputs = SimilarityIndex.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/brains/utilities.py b/nipype/interfaces/semtools/brains/utilities.py new file mode 100644 index 0000000000..d794c9c587 --- /dev/null +++ b/nipype/interfaces/semtools/brains/utilities.py @@ -0,0 +1,192 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class HistogramMatchingFilterInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="The Input image to be computed for statistics", + exists=True, + argstr="--inputVolume %s") + referenceVolume = File( + desc="The Input image to be computed for statistics", + exists=True, + argstr="--referenceVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output Image File Name", + argstr="--outputVolume %s") + referenceBinaryVolume = File( + desc="referenceBinaryVolume", + exists=True, + argstr="--referenceBinaryVolume %s") + inputBinaryVolume = File( + desc="inputBinaryVolume", exists=True, argstr="--inputBinaryVolume %s") + numberOfMatchPoints = traits.Int( + desc=" number of histogram matching points", + argstr="--numberOfMatchPoints %d") + numberOfHistogramBins = traits.Int( + desc=" number of histogram bin", argstr="--numberOfHistogramBins %d") + writeHistogram = traits.Str( + desc= + " decide if histogram data would be written with prefixe of the file name", + argstr="--writeHistogram %s") + histogramAlgorithm = traits.Enum( + "OtsuHistogramMatching", + desc=" histogram algrithm selection", + argstr="--histogramAlgorithm %s") + verbose = traits.Bool( + desc=" verbose mode running for debbuging", argstr="--verbose ") + + +class HistogramMatchingFilterOutputSpec(TraitedSpec): + outputVolume = File(desc="Output Image File Name", exists=True) + + +class HistogramMatchingFilter(SEMLikeCommandLine): + """title: Write Out Image Intensities + +category: BRAINS.Utilities + +description: For Analysis + +version: 0.1 + +contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu + +""" + + input_spec = HistogramMatchingFilterInputSpec + output_spec = HistogramMatchingFilterOutputSpec + _cmd = " HistogramMatchingFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class GenerateEdgeMapImageInputSpec(CommandLineInputSpec): + inputMRVolumes = InputMultiPath( + File(exists=True), + desc= + "List of input structural MR volumes to create the maximum edgemap", + argstr="--inputMRVolumes %s...") + inputMask = File( + desc= + "Input mask file name. If set, image histogram percentiles will be calculated within the mask", + exists=True, + argstr="--inputMask %s") + minimumOutputRange = traits.Int( + desc= + "Map lower quantile and below to minimum output range. It should be a small number greater than zero. Default is 1", + argstr="--minimumOutputRange %d") + maximumOutputRange = traits.Int( + desc= + "Map upper quantile and above to maximum output range. Default is 255 that is the maximum range of unsigned char", + argstr="--maximumOutputRange %d") + lowerPercentileMatching = traits.Float( + desc= + "Map lower quantile and below to minOutputRange. It should be a value between zero and one", + argstr="--lowerPercentileMatching %f") + upperPercentileMatching = traits.Float( + desc= + "Map upper quantile and above to maxOutputRange. It should be a value between zero and one", + argstr="--upperPercentileMatching %f") + outputEdgeMap = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="output edgemap file name", + argstr="--outputEdgeMap %s") + outputMaximumGradientImage = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="output gradient image file name", + argstr="--outputMaximumGradientImage %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class GenerateEdgeMapImageOutputSpec(TraitedSpec): + outputEdgeMap = File(desc="(required) output file name", exists=True) + outputMaximumGradientImage = File( + desc="output gradient image file name", exists=True) + + +class GenerateEdgeMapImage(SEMLikeCommandLine): + """title: GenerateEdgeMapImage + +category: BRAINS.Utilities + +description: Automatic edgemap generation for edge-guided super-resolution reconstruction + +version: 1.0 + +contributor: Ali Ghayoor + +""" + + input_spec = GenerateEdgeMapImageInputSpec + output_spec = GenerateEdgeMapImageOutputSpec + _cmd = " GenerateEdgeMapImage " + _outputs_filenames = { + 'outputEdgeMap': 'outputEdgeMap', + 'outputMaximumGradientImage': 'outputMaximumGradientImage' + } + _redirect_x = False + + +class GeneratePurePlugMaskInputSpec(CommandLineInputSpec): + inputImageModalities = InputMultiPath( + File(exists=True), + desc="List of input image file names to create pure plugs mask", + argstr="--inputImageModalities %s...") + threshold = traits.Float( + desc="threshold value to define class membership", + argstr="--threshold %f") + numberOfSubSamples = InputMultiPath( + traits.Int, + desc= + "Number of continous index samples taken at each direction of lattice space for each plug volume", + sep=",", + argstr="--numberOfSubSamples %s") + outputMaskFile = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output binary mask file name", + argstr="--outputMaskFile %s") + + +class GeneratePurePlugMaskOutputSpec(TraitedSpec): + outputMaskFile = File( + desc="(required) Output binary mask file name", exists=True) + + +class GeneratePurePlugMask(SEMLikeCommandLine): + """title: GeneratePurePlugMask + +category: BRAINS.Utilities + +description: This program gets several modality image files and returns a binary mask that defines the pure plugs + +version: 1.0 + +contributor: Ali Ghayoor + +""" + + input_spec = GeneratePurePlugMaskInputSpec + output_spec = GeneratePurePlugMaskOutputSpec + _cmd = " GeneratePurePlugMask " + _outputs_filenames = {'outputMaskFile': 'outputMaskFile'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/converters.py b/nipype/interfaces/semtools/converters.py new file mode 100644 index 0000000000..de638935e5 --- /dev/null +++ b/nipype/interfaces/semtools/converters.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class DWISimpleCompareInputSpec(CommandLineInputSpec): + inputVolume1 = File( + desc="First input volume (.nhdr or .nrrd)", + exists=True, + argstr="--inputVolume1 %s") + inputVolume2 = File( + desc="Second input volume (.nhdr or .nrrd)", + exists=True, + argstr="--inputVolume2 %s") + checkDWIData = traits.Bool( + desc="check for existence of DWI data, and if present, compare it", + argstr="--checkDWIData ") + + +class DWISimpleCompareOutputSpec(TraitedSpec): + pass + + +class DWISimpleCompare(SEMLikeCommandLine): + """title: Nrrd DWI comparison + +category: Converters + +description: Compares two nrrd format DWI images and verifies that gradient magnitudes, gradient directions, measurement frame, and max B0 value are identicle. Used for testing DWIConvert. + +version: 0.1.0.$Revision: 916 $(alpha) + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DWIConvert + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Mark Scully (UIowa) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. + +""" + + input_spec = DWISimpleCompareInputSpec + output_spec = DWISimpleCompareOutputSpec + _cmd = " DWISimpleCompare " + _outputs_filenames = {} + _redirect_x = False + + +class DWICompareInputSpec(CommandLineInputSpec): + inputVolume1 = File( + desc="First input volume (.nhdr or .nrrd)", + exists=True, + argstr="--inputVolume1 %s") + inputVolume2 = File( + desc="Second input volume (.nhdr or .nrrd)", + exists=True, + argstr="--inputVolume2 %s") + + +class DWICompareOutputSpec(TraitedSpec): + pass + + +class DWICompare(SEMLikeCommandLine): + """title: Nrrd DWI comparison + +category: Converters + +description: Compares two nrrd format DWI images and verifies that gradient magnitudes, gradient directions, measurement frame, and max B0 value are identicle. Used for testing DWIConvert. + +version: 0.1.0.$Revision: 916 $(alpha) + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DWIConvert + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Mark Scully (UIowa) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. + +""" + + input_spec = DWICompareInputSpec + output_spec = DWICompareOutputSpec + _cmd = " DWICompare " + _outputs_filenames = {} + _redirect_x = False diff --git a/nipype/interfaces/semtools/diffusion/__init__.py b/nipype/interfaces/semtools/diffusion/__init__.py new file mode 100644 index 0000000000..215cfa41d7 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .diffusion import dtiaverage, dtiestim, dtiprocess, DWIConvert +from .tractography import * +from .gtract import ( + gtractTransformToDisplacementField, gtractInvertBSplineTransform, + gtractConcatDwi, gtractAverageBvalues, gtractCoregBvalues, + gtractResampleAnisotropy, gtractResampleCodeImage, + gtractCopyImageOrientation, gtractCreateGuideFiber, gtractAnisotropyMap, + gtractClipAnisotropy, gtractResampleB0, gtractInvertRigidTransform, + gtractImageConformity, compareTractInclusion, gtractFastMarchingTracking, + gtractInvertDisplacementField, gtractCoRegAnatomy, + gtractResampleDWIInPlace, gtractCostFastMarching, gtractFiberTracking, + extractNrrdVectorIndex, gtractResampleFibers, gtractTensor) +from .maxcurvature import maxcurvature diff --git a/nipype/interfaces/semtools/diffusion/diffusion.py b/nipype/interfaces/semtools/diffusion/diffusion.py new file mode 100644 index 0000000000..af943a04fb --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/diffusion.py @@ -0,0 +1,604 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class dtiaverageInputSpec(CommandLineInputSpec): + inputs = InputMultiPath( + File(exists=True), + desc="List of all the tensor fields to be averaged", + argstr="--inputs %s...") + tensor_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Averaged tensor volume", + argstr="--tensor_output %s") + DTI_double = traits.Bool( + desc= + "Tensor components are saved as doubles (cannot be visualized in Slicer)", + argstr="--DTI_double ") + verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") + + +class dtiaverageOutputSpec(TraitedSpec): + tensor_output = File(desc="Averaged tensor volume", exists=True) + + +class dtiaverage(SEMLikeCommandLine): + """title: DTIAverage (DTIProcess) + +category: Diffusion.Diffusion Tensor Images.CommandLineOnly + +description: dtiaverage is a program that allows to compute the average of an arbitrary number of tensor fields (listed after the --inputs option) This program is used in our pipeline as the last step of the atlas building processing. When all the tensor fields have been deformed in the same space, to create the average tensor field (--tensor_output) we use dtiaverage. + Several average method can be used (specified by the --method option): euclidian, log-euclidian and pga. The default being euclidian. + +version: 1.0.0 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + +license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. + +contributor: Casey Goodlett + +""" + + input_spec = dtiaverageInputSpec + output_spec = dtiaverageOutputSpec + _cmd = " dtiaverage " + _outputs_filenames = {'tensor_output': 'tensor_output.nii'} + _redirect_x = False + + +class dtiestimInputSpec(CommandLineInputSpec): + dwi_image = File( + desc="DWI image volume (required)", + exists=True, + argstr="--dwi_image %s") + tensor_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Tensor OutputImage", + argstr="--tensor_output %s") + B0 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Baseline image, average of all baseline images", + argstr="--B0 %s") + idwi = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "idwi output image. Image with isotropic diffusion-weighted information = geometric mean of diffusion images", + argstr="--idwi %s") + B0_mask_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "B0 mask used for the estimation. B0 thresholded either with the -t option value or the automatic OTSU value", + argstr="--B0_mask_output %s") + brain_mask = File( + desc= + "Brain mask. Image where for every voxel == 0 the tensors are not estimated. Be aware that in addition a threshold based masking will be performed by default. If such an additional threshold masking is NOT desired, then use option -t 0.", + exists=True, + argstr="--brain_mask %s") + bad_region_mask = File( + desc= + "Bad region mask. Image where for every voxel > 0 the tensors are not estimated", + exists=True, + argstr="--bad_region_mask %s") + method = traits.Enum( + "lls", + "wls", + "nls", + "ml", + desc= + "Esitmation method (lls:linear least squares, wls:weighted least squares, nls:non-linear least squares, ml:maximum likelihood)", + argstr="--method %s") + correction = traits.Enum( + "none", + "zero", + "abs", + "nearest", + desc= + "Correct the tensors if computed tensor is not semi-definite positive", + argstr="--correction %s") + threshold = traits.Int( + desc= + "Baseline threshold for estimation. If not specified calculated using an OTSU threshold on the baseline image.", + argstr="--threshold %d") + weight_iterations = traits.Int( + desc= + "Number of iterations to recaluate weightings from tensor estimate", + argstr="--weight_iterations %d") + step = traits.Float( + desc="Gradient descent step size (for nls and ml methods)", + argstr="--step %f") + sigma = traits.Float(argstr="--sigma %f") + DTI_double = traits.Bool( + desc= + "Tensor components are saved as doubles (cannot be visualized in Slicer)", + argstr="--DTI_double ") + verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") + defaultTensor = InputMultiPath( + traits.Float, + desc= + "Default tensor used if estimated tensor is below a given threshold", + sep=",", + argstr="--defaultTensor %s") + shiftNeg = traits.Bool( + desc= + "Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error). This is the same option as the one available in DWIToDTIEstimation in Slicer (but instead of just adding the minimum eigenvalue to all the eigenvalues if it is smaller than 0, we use a coefficient to have stictly positive eigenvalues", + argstr="--shiftNeg ") + shiftNegCoeff = traits.Float( + desc= + "Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error). Instead of just adding the minimum eigenvalue to all the eigenvalues if it is smaller than 0, we use a coefficient to have stictly positive eigenvalues. Coefficient must be between 1.0 and 1.001 (included).", + argstr="--shiftNegCoeff %f") + + +class dtiestimOutputSpec(TraitedSpec): + tensor_output = File(desc="Tensor OutputImage", exists=True) + B0 = File( + desc="Baseline image, average of all baseline images", exists=True) + idwi = File( + desc= + "idwi output image. Image with isotropic diffusion-weighted information = geometric mean of diffusion images", + exists=True) + B0_mask_output = File( + desc= + "B0 mask used for the estimation. B0 thresholded either with the -t option value or the automatic OTSU value", + exists=True) + + +class dtiestim(SEMLikeCommandLine): + """title: DTIEstim (DTIProcess) + +category: Diffusion.Diffusion Weighted Images + +description: dtiestim is a tool that takes in a set of DWIs (with --dwi_image option) in nrrd format and estimates a tensor field out of it. The output tensor file name is specified with the --tensor_output option +There are several methods to estimate the tensors which you can specify with the option --method lls|wls|nls|ml . Here is a short description of the different methods: + +lls + Linear least squares. Standard estimation technique that recovers the tensor parameters by multiplying the log of the normalized signal intensities by the pseudo-inverse of the gradient matrix. Default option. + +wls + Weighted least squares. This method is similar to the linear least squares method except that the gradient matrix is weighted by the original lls estimate. (See Salvador, R., Pena, A., Menon, D. K., Carpenter, T. A., Pickard, J. D., and Bullmore, E. T. Formal characterization and extension of the linearized diffusion tensor model. Human Brain Mapping 24, 2 (Feb. 2005), 144-155. for more information on this method). This method is recommended for most applications. The weight for each iteration can be specified with the --weight_iterations. It is not currently the default due to occasional matrix singularities. +nls + Non-linear least squares. This method does not take the log of the signal and requires an optimization based on levenberg-marquadt to optimize the parameters of the signal. The lls estimate is used as an initialization. For this method the step size can be specified with the --step option. +ml + Maximum likelihood estimation. This method is experimental and is not currently recommended. For this ml method the sigma can be specified with the option --sigma and the step size can be specified with the --step option. + +You can set a threshold (--threshold) to have the tensor estimated to only a subset of voxels. All the baseline voxel value higher than the threshold define the voxels where the tensors are computed. If not specified the threshold is calculated using an OTSU threshold on the baseline image.The masked generated by the -t option or by the otsu value can be saved with the --B0_mask_output option. + +dtiestim also can extract a few scalar images out of the DWI set of images: + + - the average baseline image (--B0) which is the average of all the B0s. + - the IDWI (--idwi)which is the geometric mean of the diffusion images. + +You can also load a mask if you want to compute the tensors only where the voxels are non-zero (--brain_mask) or a negative mask and the tensors will be estimated where the negative mask has zero values (--bad_region_mask) + +version: 1.2.0 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + +license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. + +contributor: Casey Goodlett, Francois Budin + +acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + +""" + + input_spec = dtiestimInputSpec + output_spec = dtiestimOutputSpec + _cmd = " dtiestim " + _outputs_filenames = { + 'B0': 'B0.nii', + 'idwi': 'idwi.nii', + 'tensor_output': 'tensor_output.nii', + 'B0_mask_output': 'B0_mask_output.nii' + } + _redirect_x = False + + +class dtiprocessInputSpec(CommandLineInputSpec): + dti_image = File( + desc="DTI tensor volume", exists=True, argstr="--dti_image %s") + fa_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Fractional Anisotropy output file", + argstr="--fa_output %s") + md_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Mean Diffusivity output file", + argstr="--md_output %s") + sigma = traits.Float(desc="Scale of gradients", argstr="--sigma %f") + fa_gradient_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Fractional Anisotropy Gradient output file", + argstr="--fa_gradient_output %s") + fa_gradmag_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Fractional Anisotropy Gradient Magnitude output file", + argstr="--fa_gradmag_output %s") + color_fa_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Color Fractional Anisotropy output file", + argstr="--color_fa_output %s") + principal_eigenvector_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Principal Eigenvectors Output", + argstr="--principal_eigenvector_output %s") + negative_eigenvector_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Negative Eigenvectors Output: create a binary image where if any of the eigen value is below zero, the voxel is set to 1, otherwise 0.", + argstr="--negative_eigenvector_output %s") + frobenius_norm_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Frobenius Norm Output", + argstr="--frobenius_norm_output %s") + lambda1_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Axial Diffusivity - Lambda 1 (largest eigenvalue) output", + argstr="--lambda1_output %s") + lambda2_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Lambda 2 (middle eigenvalue) output", + argstr="--lambda2_output %s") + lambda3_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Lambda 3 (smallest eigenvalue) output", + argstr="--lambda3_output %s") + RD_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="RD (Radial Diffusivity 1/2*(lambda2+lambda3)) output", + argstr="--RD_output %s") + rot_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Rotated tensor output file. Must also specify the dof file.", + argstr="--rot_output %s") + affineitk_file = File( + desc="Transformation file for affine transformation. ITK format.", + exists=True, + argstr="--affineitk_file %s") + dof_file = File( + desc= + "Transformation file for affine transformation. This can be ITK format (or the outdated RView).", + exists=True, + argstr="--dof_file %s") + newdof_file = File( + desc= + "Transformation file for affine transformation. RView NEW format. (txt file output of dof2mat)", + exists=True, + argstr="--newdof_file %s") + mask = File( + desc= + "Mask tensors. Specify --outmask if you want to save the masked tensor field, otherwise the mask is applied just for the current processing ", + exists=True, + argstr="--mask %s") + outmask = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Name of the masked tensor field.", + argstr="--outmask %s") + hField = traits.Bool( + desc= + "forward and inverse transformations are h-fields instead of displacement fields", + argstr="--hField ") + forward = File( + desc= + "Forward transformation. Assumed to be a deformation field in world coordinates, unless the --h-field option is specified.", + exists=True, + argstr="--forward %s") + deformation_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Warped tensor field based on a deformation field. This option requires the --forward,-F transformation to be specified.", + argstr="--deformation_output %s") + interpolation = traits.Enum( + "nearestneighbor", + "linear", + "cubic", + desc="Interpolation type (nearestneighbor, linear, cubic)", + argstr="--interpolation %s") + reorientation = traits.Enum( + "fs", + "ppd", + desc="Reorientation type (fs, ppd)", + argstr="--reorientation %s") + correction = traits.Enum( + "none", + "zero", + "abs", + "nearest", + desc= + "Correct the tensors if computed tensor is not semi-definite positive", + argstr="--correction %s") + scalar_float = traits.Bool( + desc= + "Write scalar [FA,MD] as unscaled float (with their actual values, otherwise scaled by 10 000). Also causes FA to be unscaled [0..1].", + argstr="--scalar_float ") + DTI_double = traits.Bool( + desc= + "Tensor components are saved as doubles (cannot be visualized in Slicer)", + argstr="--DTI_double ") + verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") + + +class dtiprocessOutputSpec(TraitedSpec): + fa_output = File(desc="Fractional Anisotropy output file", exists=True) + md_output = File(desc="Mean Diffusivity output file", exists=True) + fa_gradient_output = File( + desc="Fractional Anisotropy Gradient output file", exists=True) + fa_gradmag_output = File( + desc="Fractional Anisotropy Gradient Magnitude output file", + exists=True) + color_fa_output = File( + desc="Color Fractional Anisotropy output file", exists=True) + principal_eigenvector_output = File( + desc="Principal Eigenvectors Output", exists=True) + negative_eigenvector_output = File( + desc= + "Negative Eigenvectors Output: create a binary image where if any of the eigen value is below zero, the voxel is set to 1, otherwise 0.", + exists=True) + frobenius_norm_output = File(desc="Frobenius Norm Output", exists=True) + lambda1_output = File( + desc="Axial Diffusivity - Lambda 1 (largest eigenvalue) output", + exists=True) + lambda2_output = File( + desc="Lambda 2 (middle eigenvalue) output", exists=True) + lambda3_output = File( + desc="Lambda 3 (smallest eigenvalue) output", exists=True) + RD_output = File( + desc="RD (Radial Diffusivity 1/2*(lambda2+lambda3)) output", + exists=True) + rot_output = File( + desc="Rotated tensor output file. Must also specify the dof file.", + exists=True) + outmask = File(desc="Name of the masked tensor field.", exists=True) + deformation_output = File( + desc= + "Warped tensor field based on a deformation field. This option requires the --forward,-F transformation to be specified.", + exists=True) + + +class dtiprocess(SEMLikeCommandLine): + """title: DTIProcess (DTIProcess) + +category: Diffusion.Diffusion Tensor Images + +description: dtiprocess is a tool that handles tensor fields. It takes as an input a tensor field in nrrd format. +It can generate diffusion scalar properties out of the tensor field such as : FA (--fa_output), Gradient FA image (--fa_gradient_output), color FA (--color_fa_output), MD (--md_output), Frobenius norm (--frobenius_norm_output), lbd1, lbd2, lbd3 (--lambda{1,2,3}_output), binary map of voxel where if any of the eigenvalue is negative, the voxel is set to 1 (--negative_eigenvector_output) + +It also creates 4D images out of the tensor field such as: Highest eigenvector map (highest eigenvector at each voxel) (--principal_eigenvector_output) + +Masking capabilities: For any of the processing done with dtiprocess, it's possible to apply it on a masked region of the tensor field. You need to use the --mask option for any of the option to be applied on that tensor field sub-region only. If you want to save the masked tensor field use the option --outmask and specify the new masked tensor field file name. +dtiprocess also allows a range of transformations on the tensor fields. The transformed tensor field file name is specified with the option --deformation_output. There are 3 resampling interpolation methods specified with the tag --interpolation followed by the type to use (nearestneighbor, linear, cubic) Then you have several transformations possible to apply: + + - Affine transformations using as an input + - itk affine transformation file (based on the itkAffineTransform class) + - Affine transformations using rview (details and download at http://www.doc.ic.ac.uk/~dr/software/). There are 2 versions of rview both creating transformation files called dof files. The old version of rview outputs text files containing the transformation parameters. It can be read in with the --dof_file option. The new version outputs binary dof files. These dof files can be transformed into human readable file with the dof2mat tool which is part of the rview package. So you need to save the output of dof2mat into a text file which can then be used with the -- newdof_file option. Usage example: dof2mat mynewdoffile.dof >> mynewdoffile.txt dtiprocess --dti_image mytensorfield.nhdr --newdof_file mynewdoffile.txt --rot_output myaffinetensorfield.nhdr + +Non linear transformations as an input: The default transformation file type is d-field (displacement field) in nrrd format. The option to use is --forward with the name of the file. If the transformation file is a h-field you have to add the option --hField. + +version: 1.0.1 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + +license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. + +contributor: Casey Goodlett + +""" + + input_spec = dtiprocessInputSpec + output_spec = dtiprocessOutputSpec + _cmd = " dtiprocess " + _outputs_filenames = { + 'fa_gradmag_output': 'fa_gradmag_output.nii', + 'fa_gradient_output': 'fa_gradient_output.nii', + 'lambda1_output': 'lambda1_output.nii', + 'lambda2_output': 'lambda2_output.nii', + 'color_fa_output': 'color_fa_output.nii', + 'fa_output': 'fa_output.nii', + 'frobenius_norm_output': 'frobenius_norm_output.nii', + 'principal_eigenvector_output': 'principal_eigenvector_output.nii', + 'outmask': 'outmask.nii', + 'lambda3_output': 'lambda3_output.nii', + 'negative_eigenvector_output': 'negative_eigenvector_output.nii', + 'md_output': 'md_output.nii', + 'RD_output': 'RD_output.nii', + 'deformation_output': 'deformation_output.nii', + 'rot_output': 'rot_output.nii' + } + _redirect_x = False + + +class DWIConvertInputSpec(CommandLineInputSpec): + conversionMode = traits.Enum( + "DicomToNrrd", + "DicomToFSL", + "NrrdToFSL", + "FSLToNrrd", + desc= + "Determine which conversion to perform. DicomToNrrd (default): Convert DICOM series to NRRD DicomToFSL: Convert DICOM series to NIfTI File + gradient/bvalue text files NrrdToFSL: Convert DWI NRRD file to NIfTI File + gradient/bvalue text files FSLToNrrd: Convert NIfTI File + gradient/bvalue text files to NRRD file.", + argstr="--conversionMode %s") + inputVolume = File( + desc="Input DWI volume -- not used for DicomToNrrd mode.", + exists=True, + argstr="--inputVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output filename (.nhdr or .nrrd)", + argstr="--outputVolume %s") + inputDicomDirectory = Directory( + desc="Directory holding Dicom series", + exists=True, + argstr="--inputDicomDirectory %s") + fslNIFTIFile = File( + desc="4D NIfTI file containing gradient volumes", + exists=True, + argstr="--fslNIFTIFile %s") + inputBValues = File( + desc="The B Values are stored in FSL .bval text file format", + exists=True, + argstr="--inputBValues %s") + inputBVectors = File( + desc="The Gradient Vectors are stored in FSL .bvec text file format", + exists=True, + argstr="--inputBVectors %s") + outputBValues = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "The B Values are stored in FSL .bval text file format (defaults to .bval)", + argstr="--outputBValues %s") + outputBVectors = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "The Gradient Vectors are stored in FSL .bvec text file format (defaults to .bvec)", + argstr="--outputBVectors %s") + fMRI = traits.Bool( + desc="Output a NRRD file, but without gradients", argstr="--fMRI ") + writeProtocolGradientsFile = traits.Bool( + desc= + "Write the protocol gradients to a file suffixed by \'.txt\' as they were specified in the procol by multiplying each diffusion gradient direction by the measurement frame. This file is for debugging purposes only, the format is not fixed, and will likely change as debugging of new dicom formats is necessary.", + argstr="--writeProtocolGradientsFile ") + useIdentityMeaseurementFrame = traits.Bool( + desc= + "Adjust all the gradients so that the measurement frame is an identity matrix.", + argstr="--useIdentityMeaseurementFrame ") + useBMatrixGradientDirections = traits.Bool( + desc= + "Fill the nhdr header with the gradient directions and bvalues computed out of the BMatrix. Only changes behavior for Siemens data. In some cases the standard public gradients are not properly computed. The gradients can emperically computed from the private BMatrix fields. In some cases the private BMatrix is consistent with the public grandients, but not in all cases, when it exists BMatrix is usually most robust.", + argstr="--useBMatrixGradientDirections ") + outputDirectory = traits.Either( + traits.Bool, + Directory(), + hash_files=False, + desc="Directory holding the output NRRD file", + argstr="--outputDirectory %s") + gradientVectorFile = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Text file giving gradient vectors", + argstr="--gradientVectorFile %s") + smallGradientThreshold = traits.Float( + desc= + "If a gradient magnitude is greater than 0 and less than smallGradientThreshold, then DWIConvert will display an error message and quit, unless the useBMatrixGradientDirections option is set.", + argstr="--smallGradientThreshold %f") + allowLossyConversion = traits.Bool( + desc= + "The only supported output type is \'short\'. Conversion from images of a different type may cause data loss due to rounding or truncation. Use with caution!", + argstr="--allowLossyConversion ") + transposeInputBVectors = traits.Bool( + desc= + "FSL input BVectors are expected to be encoded in the input file as one vector per line. If it is not the case, use this option to transpose the file as it is read.", + argstr="--transposeInputBVectors ") + + +class DWIConvertOutputSpec(TraitedSpec): + outputVolume = File(desc="Output filename (.nhdr or .nrrd)", exists=True) + outputBValues = File( + desc= + "The B Values are stored in FSL .bval text file format (defaults to .bval)", + exists=True) + outputBVectors = File( + desc= + "The Gradient Vectors are stored in FSL .bvec text file format (defaults to .bvec)", + exists=True) + outputDirectory = Directory( + desc="Directory holding the output NRRD file", exists=True) + gradientVectorFile = File( + desc="Text file giving gradient vectors", exists=True) + + +class DWIConvert(SEMLikeCommandLine): + """title: DWIConverter + +category: Diffusion.Diffusion Data Conversion + +description: Converts diffusion weighted MR images in dicom series into Nrrd format for analysis in Slicer. This program has been tested on only a limited subset of DTI dicom formats available from Siemens, GE, and Phillips scanners. Work in progress to support dicom multi-frame data. The program parses dicom header to extract necessary information about measurement frame, diffusion weighting directions, b-values, etc, and write out a nrrd image. For non-diffusion weighted dicom images, it loads in an entire dicom series and writes out a single dicom volume in a .nhdr/.raw pair. + +version: Version 1.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DWIConverter + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Vince Magnotta (UIowa), Hans Johnson (UIowa), Joy Matsui (UIowa), Kent Williams (UIowa), Mark Scully (Uiowa), Xiaodong Tao (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. + +""" + + input_spec = DWIConvertInputSpec + output_spec = DWIConvertOutputSpec + _cmd = " DWIConvert " + _outputs_filenames = { + 'outputVolume': 'outputVolume.nii', + 'outputDirectory': 'outputDirectory', + 'outputBValues': 'outputBValues.bval', + 'gradientVectorFile': 'gradientVectorFile', + 'outputBVectors': 'outputBVectors.bvec' + } + _redirect_x = False diff --git a/nipype/interfaces/semtools/diffusion/gtract.py b/nipype/interfaces/semtools/diffusion/gtract.py new file mode 100644 index 0000000000..999c898599 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/gtract.py @@ -0,0 +1,1708 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class gtractTransformToDisplacementFieldInputSpec(CommandLineInputSpec): + inputTransform = File( + desc="Input Transform File Name", + exists=True, + argstr="--inputTransform %s") + inputReferenceVolume = File( + desc= + "Required: input image file name to exemplify the anatomical space over which to vcl_express the transform as a displacement field.", + exists=True, + argstr="--inputReferenceVolume %s") + outputDeformationFieldVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output deformation field", + argstr="--outputDeformationFieldVolume %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractTransformToDisplacementFieldOutputSpec(TraitedSpec): + outputDeformationFieldVolume = File( + desc="Output deformation field", exists=True) + + +class gtractTransformToDisplacementField(SEMLikeCommandLine): + """title: Create Displacement Field + +category: Diffusion.GTRACT + +description: This program will compute forward deformation from the given Transform. The size of the DF is equal to MNI space + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta, Madhura Ingalhalikar, and Greg Harris + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractTransformToDisplacementFieldInputSpec + output_spec = gtractTransformToDisplacementFieldOutputSpec + _cmd = " gtractTransformToDisplacementField " + _outputs_filenames = { + 'outputDeformationFieldVolume': 'outputDeformationFieldVolume.nii' + } + _redirect_x = False + + +class gtractInvertBSplineTransformInputSpec(CommandLineInputSpec): + inputReferenceVolume = File( + desc= + "Required: input image file name to exemplify the anatomical space to interpolate over.", + exists=True, + argstr="--inputReferenceVolume %s") + inputTransform = File( + desc="Required: input B-Spline transform file name", + exists=True, + argstr="--inputTransform %s") + outputTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output transform file name", + argstr="--outputTransform %s") + landmarkDensity = InputMultiPath( + traits.Int, + desc="Number of landmark subdivisions in all 3 directions", + sep=",", + argstr="--landmarkDensity %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractInvertBSplineTransformOutputSpec(TraitedSpec): + outputTransform = File( + desc="Required: output transform file name", exists=True) + + +class gtractInvertBSplineTransform(SEMLikeCommandLine): + """title: B-Spline Transform Inversion + +category: Diffusion.GTRACT + +description: This program will invert a B-Spline transform using a thin-plate spline approximation. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractInvertBSplineTransformInputSpec + output_spec = gtractInvertBSplineTransformOutputSpec + _cmd = " gtractInvertBSplineTransform " + _outputs_filenames = {'outputTransform': 'outputTransform.h5'} + _redirect_x = False + + +class gtractConcatDwiInputSpec(CommandLineInputSpec): + inputVolume = InputMultiPath( + File(exists=True), + desc= + "Required: input file containing the first diffusion weighted image", + argstr="--inputVolume %s...") + ignoreOrigins = traits.Bool( + desc= + "If image origins are different force all images to origin of first image", + argstr="--ignoreOrigins ") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output NRRD file containing the combined diffusion weighted images.", + argstr="--outputVolume %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractConcatDwiOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: name of output NRRD file containing the combined diffusion weighted images.", + exists=True) + + +class gtractConcatDwi(SEMLikeCommandLine): + """title: Concat DWI Images + +category: Diffusion.GTRACT + +description: This program will concatenate two DTI runs together. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractConcatDwiInputSpec + output_spec = gtractConcatDwiOutputSpec + _cmd = " gtractConcatDwi " + _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _redirect_x = False + + +class gtractAverageBvaluesInputSpec(CommandLineInputSpec): + inputVolume = File( + desc= + "Required: input image file name containing multiple baseline gradients to average", + exists=True, + argstr="--inputVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output NRRD file containing directly averaged baseline images", + argstr="--outputVolume %s") + directionsTolerance = traits.Float( + desc="Tolerance for matching identical gradient direction pairs", + argstr="--directionsTolerance %f") + averageB0only = traits.Bool( + desc= + "Average only baseline gradients. All other gradient directions are not averaged, but retained in the outputVolume", + argstr="--averageB0only ") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractAverageBvaluesOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: name of output NRRD file containing directly averaged baseline images", + exists=True) + + +class gtractAverageBvalues(SEMLikeCommandLine): + """title: Average B-Values + +category: Diffusion.GTRACT + +description: This program will directly average together the baseline gradients (b value equals 0) within a DWI scan. This is usually used after gtractCoregBvalues. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractAverageBvaluesInputSpec + output_spec = gtractAverageBvaluesOutputSpec + _cmd = " gtractAverageBvalues " + _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _redirect_x = False + + +class gtractCoregBvaluesInputSpec(CommandLineInputSpec): + movingVolume = File( + desc= + "Required: input moving image file name. In order to register gradients within a scan to its first gradient, set the movingVolume and fixedVolume as the same image.", + exists=True, + argstr="--movingVolume %s") + fixedVolume = File( + desc= + "Required: input fixed image file name. It is recommended that this image should either contain or be a b0 image.", + exists=True, + argstr="--fixedVolume %s") + fixedVolumeIndex = traits.Int( + desc= + "Index in the fixed image for registration. It is recommended that this image should be a b0 image.", + argstr="--fixedVolumeIndex %d") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output NRRD file containing moving images individually resampled and fit to the specified fixed image index.", + argstr="--outputVolume %s") + outputTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Registration 3D transforms concatenated in a single output file. There are no tools that can use this, but can be used for debugging purposes.", + argstr="--outputTransform %s") + eddyCurrentCorrection = traits.Bool( + desc= + "Flag to perform eddy current corection in addition to motion correction (recommended)", + argstr="--eddyCurrentCorrection ") + numberOfIterations = traits.Int( + desc="Number of iterations in each 3D fit", + argstr="--numberOfIterations %d") + numberOfSpatialSamples = traits.Int( + desc= + "The number of voxels sampled for mutual information computation. Increase this for a slower, more careful fit. NOTE that it is suggested to use samplingPercentage instead of this option. However, if set, it overwrites the samplingPercentage option. ", + argstr="--numberOfSpatialSamples %d") + samplingPercentage = traits.Float( + desc= + "This is a number in (0.0,1.0] interval that shows the percentage of the input fixed image voxels that are sampled for mutual information computation. Increase this for a slower, more careful fit. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation. The default is to use approximately 5% of voxels (for backwards compatibility 5% ~= 500000/(256*256*256)). Typical values range from 1% for low detail images to 20% for high detail images.", + argstr="--samplingPercentage %f") + relaxationFactor = traits.Float( + desc= + "Fraction of gradient from Jacobian to attempt to move in each 3D fit step (adjust when eddyCurrentCorrection is enabled; suggested value = 0.25)", + argstr="--relaxationFactor %f") + maximumStepSize = traits.Float( + desc= + "Maximum permitted step size to move in each 3D fit step (adjust when eddyCurrentCorrection is enabled; suggested value = 0.1)", + argstr="--maximumStepSize %f") + minimumStepSize = traits.Float( + desc= + "Minimum required step size to move in each 3D fit step without converging -- decrease this to make the fit more exacting", + argstr="--minimumStepSize %f") + spatialScale = traits.Float( + desc= + "How much to scale up changes in position compared to unit rotational changes in radians -- decrease this to put more rotation in the fit", + argstr="--spatialScale %f") + registerB0Only = traits.Bool( + desc="Register the B0 images only", argstr="--registerB0Only ") + debugLevel = traits.Int( + desc= + "Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", + argstr="--debugLevel %d") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractCoregBvaluesOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: name of output NRRD file containing moving images individually resampled and fit to the specified fixed image index.", + exists=True) + outputTransform = File( + desc= + "Registration 3D transforms concatenated in a single output file. There are no tools that can use this, but can be used for debugging purposes.", + exists=True) + + +class gtractCoregBvalues(SEMLikeCommandLine): + """title: Coregister B-Values + +category: Diffusion.GTRACT + +description: This step should be performed after converting DWI scans from DICOM to NRRD format. This program will register all gradients in a NRRD diffusion weighted 4D vector image (moving image) to a specified index in a fixed image. It also supports co-registration with a T2 weighted image or field map in the same plane as the DWI data. The fixed image for the registration should be a b0 image. A mutual information metric cost function is used for the registration because of the differences in signal intensity as a result of the diffusion gradients. The full affine allows the registration procedure to correct for eddy current distortions that may exist in the data. If the eddyCurrentCorrection is enabled, relaxationFactor (0.25) and maximumStepSize (0.1) should be adjusted. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractCoregBvaluesInputSpec + output_spec = gtractCoregBvaluesOutputSpec + _cmd = " gtractCoregBvalues " + _outputs_filenames = { + 'outputVolume': 'outputVolume.nrrd', + 'outputTransform': 'outputTransform.h5' + } + _redirect_x = False + + +class gtractResampleAnisotropyInputSpec(CommandLineInputSpec): + inputAnisotropyVolume = File( + desc="Required: input file containing the anisotropy image", + exists=True, + argstr="--inputAnisotropyVolume %s") + inputAnatomicalVolume = File( + desc= + "Required: input file containing the anatomical image whose characteristics will be cloned.", + exists=True, + argstr="--inputAnatomicalVolume %s") + inputTransform = File( + desc="Required: input Rigid OR Bspline transform file name", + exists=True, + argstr="--inputTransform %s") + transformType = traits.Enum( + "Rigid", + "B-Spline", + desc="Transform type: Rigid, B-Spline", + argstr="--transformType %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output NRRD file containing the resampled transformed anisotropy image.", + argstr="--outputVolume %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractResampleAnisotropyOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: name of output NRRD file containing the resampled transformed anisotropy image.", + exists=True) + + +class gtractResampleAnisotropy(SEMLikeCommandLine): + """title: Resample Anisotropy + +category: Diffusion.GTRACT + +description: This program will resample a floating point image using either the Rigid or B-Spline transform. You may want to save the aligned B0 image after each of the anisotropy map co-registration steps with the anatomical image to check the registration quality with another tool. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractResampleAnisotropyInputSpec + output_spec = gtractResampleAnisotropyOutputSpec + _cmd = " gtractResampleAnisotropy " + _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _redirect_x = False + + +class gtractResampleCodeImageInputSpec(CommandLineInputSpec): + inputCodeVolume = File( + desc="Required: input file containing the code image", + exists=True, + argstr="--inputCodeVolume %s") + inputReferenceVolume = File( + desc= + "Required: input file containing the standard image to clone the characteristics of.", + exists=True, + argstr="--inputReferenceVolume %s") + inputTransform = File( + desc="Required: input Rigid or Inverse-B-Spline transform file name", + exists=True, + argstr="--inputTransform %s") + transformType = traits.Enum( + "Rigid", + "Affine", + "B-Spline", + "Inverse-B-Spline", + "None", + desc="Transform type: Rigid or Inverse-B-Spline", + argstr="--transformType %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output NRRD file containing the resampled code image in acquisition space.", + argstr="--outputVolume %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractResampleCodeImageOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: name of output NRRD file containing the resampled code image in acquisition space.", + exists=True) + + +class gtractResampleCodeImage(SEMLikeCommandLine): + """title: Resample Code Image + +category: Diffusion.GTRACT + +description: This program will resample a short integer code image using either the Rigid or Inverse-B-Spline transform. The reference image is the DTI tensor anisotropy image space, and the input code image is in anatomical space. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractResampleCodeImageInputSpec + output_spec = gtractResampleCodeImageOutputSpec + _cmd = " gtractResampleCodeImage " + _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _redirect_x = False + + +class gtractCopyImageOrientationInputSpec(CommandLineInputSpec): + inputVolume = File( + desc= + "Required: input file containing the signed short image to reorient without resampling.", + exists=True, + argstr="--inputVolume %s") + inputReferenceVolume = File( + desc="Required: input file containing orietation that will be cloned.", + exists=True, + argstr="--inputReferenceVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output NRRD or Nifti file containing the reoriented image in reference image space.", + argstr="--outputVolume %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractCopyImageOrientationOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: name of output NRRD or Nifti file containing the reoriented image in reference image space.", + exists=True) + + +class gtractCopyImageOrientation(SEMLikeCommandLine): + """title: Copy Image Orientation + +category: Diffusion.GTRACT + +description: This program will copy the orientation from the reference image into the moving image. Currently, the registration process requires that the diffusion weighted images and the anatomical images have the same image orientation (i.e. Axial, Coronal, Sagittal). It is suggested that you copy the image orientation from the diffusion weighted images and apply this to the anatomical image. This image can be subsequently removed after the registration step is complete. We anticipate that this limitation will be removed in future versions of the registration programs. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractCopyImageOrientationInputSpec + output_spec = gtractCopyImageOrientationOutputSpec + _cmd = " gtractCopyImageOrientation " + _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _redirect_x = False + + +class gtractCreateGuideFiberInputSpec(CommandLineInputSpec): + inputFiber = File( + desc="Required: input fiber tract file name", + exists=True, + argstr="--inputFiber %s") + numberOfPoints = traits.Int( + desc="Number of points in output guide fiber", + argstr="--numberOfPoints %d") + outputFiber = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output guide fiber file name", + argstr="--outputFiber %s") + writeXMLPolyDataFile = traits.Bool( + desc= + "Flag to make use of XML files when reading and writing vtkPolyData.", + argstr="--writeXMLPolyDataFile ") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractCreateGuideFiberOutputSpec(TraitedSpec): + outputFiber = File( + desc="Required: output guide fiber file name", exists=True) + + +class gtractCreateGuideFiber(SEMLikeCommandLine): + """title: Create Guide Fiber + +category: Diffusion.GTRACT + +description: This program will create a guide fiber by averaging fibers from a previously generated tract. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractCreateGuideFiberInputSpec + output_spec = gtractCreateGuideFiberOutputSpec + _cmd = " gtractCreateGuideFiber " + _outputs_filenames = {'outputFiber': 'outputFiber.vtk'} + _redirect_x = False + + +class gtractAnisotropyMapInputSpec(CommandLineInputSpec): + inputTensorVolume = File( + desc="Required: input file containing the diffusion tensor image", + exists=True, + argstr="--inputTensorVolume %s") + anisotropyType = traits.Enum( + "ADC", + "FA", + "RA", + "VR", + "AD", + "RD", + "LI", + desc="Anisotropy Mapping Type: ADC, FA, RA, VR, AD, RD, LI", + argstr="--anisotropyType %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output NRRD file containing the selected kind of anisotropy scalar.", + argstr="--outputVolume %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractAnisotropyMapOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: name of output NRRD file containing the selected kind of anisotropy scalar.", + exists=True) + + +class gtractAnisotropyMap(SEMLikeCommandLine): + """title: Anisotropy Map + +category: Diffusion.GTRACT + +description: This program will generate a scalar map of anisotropy, given a tensor representation. Anisotropy images are used for fiber tracking, but the anisotropy scalars are not defined along the path. Instead, the tensor representation is included as point data allowing all of these metrics to be computed using only the fiber tract point data. The images can be saved in any ITK supported format, but it is suggested that you use an image format that supports the definition of the image origin. This includes NRRD, NifTI, and Meta formats. These images can also be used for scalar analysis including regional anisotropy measures or VBM style analysis. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractAnisotropyMapInputSpec + output_spec = gtractAnisotropyMapOutputSpec + _cmd = " gtractAnisotropyMap " + _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _redirect_x = False + + +class gtractClipAnisotropyInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Required: input image file name", + exists=True, + argstr="--inputVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output NRRD file containing the clipped anisotropy image", + argstr="--outputVolume %s") + clipFirstSlice = traits.Bool( + desc="Clip the first slice of the anisotropy image", + argstr="--clipFirstSlice ") + clipLastSlice = traits.Bool( + desc="Clip the last slice of the anisotropy image", + argstr="--clipLastSlice ") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractClipAnisotropyOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: name of output NRRD file containing the clipped anisotropy image", + exists=True) + + +class gtractClipAnisotropy(SEMLikeCommandLine): + """title: Clip Anisotropy + +category: Diffusion.GTRACT + +description: This program will zero the first and/or last slice of an anisotropy image, creating a clipped anisotropy image. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractClipAnisotropyInputSpec + output_spec = gtractClipAnisotropyOutputSpec + _cmd = " gtractClipAnisotropy " + _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _redirect_x = False + + +class gtractResampleB0InputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Required: input file containing the 4D image", + exists=True, + argstr="--inputVolume %s") + inputAnatomicalVolume = File( + desc= + "Required: input file containing the anatomical image defining the origin, spacing and size of the resampled image (template)", + exists=True, + argstr="--inputAnatomicalVolume %s") + inputTransform = File( + desc="Required: input Rigid OR Bspline transform file name", + exists=True, + argstr="--inputTransform %s") + vectorIndex = traits.Int( + desc="Index in the diffusion weighted image set for the B0 image", + argstr="--vectorIndex %d") + transformType = traits.Enum( + "Rigid", + "B-Spline", + desc="Transform type: Rigid, B-Spline", + argstr="--transformType %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output NRRD file containing the resampled input image.", + argstr="--outputVolume %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractResampleB0OutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: name of output NRRD file containing the resampled input image.", + exists=True) + + +class gtractResampleB0(SEMLikeCommandLine): + """title: Resample B0 + +category: Diffusion.GTRACT + +description: This program will resample a signed short image using either a Rigid or B-Spline transform. The user must specify a template image that will be used to define the origin, orientation, spacing, and size of the resampled image. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractResampleB0InputSpec + output_spec = gtractResampleB0OutputSpec + _cmd = " gtractResampleB0 " + _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _redirect_x = False + + +class gtractInvertRigidTransformInputSpec(CommandLineInputSpec): + inputTransform = File( + desc="Required: input rigid transform file name", + exists=True, + argstr="--inputTransform %s") + outputTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output transform file name", + argstr="--outputTransform %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractInvertRigidTransformOutputSpec(TraitedSpec): + outputTransform = File( + desc="Required: output transform file name", exists=True) + + +class gtractInvertRigidTransform(SEMLikeCommandLine): + """title: Rigid Transform Inversion + +category: Diffusion.GTRACT + +description: This program will invert a Rigid transform. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractInvertRigidTransformInputSpec + output_spec = gtractInvertRigidTransformOutputSpec + _cmd = " gtractInvertRigidTransform " + _outputs_filenames = {'outputTransform': 'outputTransform.h5'} + _redirect_x = False + + +class gtractImageConformityInputSpec(CommandLineInputSpec): + inputVolume = File( + desc= + "Required: input file containing the signed short image to reorient without resampling.", + exists=True, + argstr="--inputVolume %s") + inputReferenceVolume = File( + desc= + "Required: input file containing the standard image to clone the characteristics of.", + exists=True, + argstr="--inputReferenceVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output Nrrd or Nifti file containing the reoriented image in reference image space.", + argstr="--outputVolume %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractImageConformityOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: name of output Nrrd or Nifti file containing the reoriented image in reference image space.", + exists=True) + + +class gtractImageConformity(SEMLikeCommandLine): + """title: Image Conformity + +category: Diffusion.GTRACT + +description: This program will straighten out the Direction and Origin to match the Reference Image. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractImageConformityInputSpec + output_spec = gtractImageConformityOutputSpec + _cmd = " gtractImageConformity " + _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _redirect_x = False + + +class compareTractInclusionInputSpec(CommandLineInputSpec): + testFiber = File( + desc="Required: test fiber tract file name", + exists=True, + argstr="--testFiber %s") + standardFiber = File( + desc="Required: standard fiber tract file name", + exists=True, + argstr="--standardFiber %s") + closeness = traits.Float( + desc= + "Closeness of every test fiber to some fiber in the standard tract, computed as a sum of squares of spatial differences of standard points", + argstr="--closeness %f") + numberOfPoints = traits.Int( + desc="Number of points in comparison fiber pairs", + argstr="--numberOfPoints %d") + testForBijection = traits.Bool( + desc="Flag to apply the closeness criterion both ways", + argstr="--testForBijection ") + testForFiberCardinality = traits.Bool( + desc="Flag to require the same number of fibers in both tracts", + argstr="--testForFiberCardinality ") + writeXMLPolyDataFile = traits.Bool( + desc= + "Flag to make use of XML files when reading and writing vtkPolyData.", + argstr="--writeXMLPolyDataFile ") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class compareTractInclusionOutputSpec(TraitedSpec): + pass + + +class compareTractInclusion(SEMLikeCommandLine): + """title: Compare Tracts + +category: Diffusion.GTRACT + +description: This program will halt with a status code indicating whether a test tract is nearly enough included in a standard tract in the sense that every fiber in the test tract has a low enough sum of squares distance to some fiber in the standard tract modulo spline resampling of every fiber to a fixed number of points. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = compareTractInclusionInputSpec + output_spec = compareTractInclusionOutputSpec + _cmd = " compareTractInclusion " + _outputs_filenames = {} + _redirect_x = False + + +class gtractFastMarchingTrackingInputSpec(CommandLineInputSpec): + inputTensorVolume = File( + desc="Required: input tensor image file name", + exists=True, + argstr="--inputTensorVolume %s") + inputAnisotropyVolume = File( + desc="Required: input anisotropy image file name", + exists=True, + argstr="--inputAnisotropyVolume %s") + inputCostVolume = File( + desc="Required: input vcl_cost image file name", + exists=True, + argstr="--inputCostVolume %s") + inputStartingSeedsLabelMapVolume = File( + desc="Required: input starting seeds LabelMap image file name", + exists=True, + argstr="--inputStartingSeedsLabelMapVolume %s") + startingSeedsLabel = traits.Int( + desc="Label value for Starting Seeds", + argstr="--startingSeedsLabel %d") + outputTract = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", + argstr="--outputTract %s") + writeXMLPolyDataFile = traits.Bool( + desc="Flag to make use of the XML format for vtkPolyData fiber tracts.", + argstr="--writeXMLPolyDataFile ") + numberOfIterations = traits.Int( + desc="Number of iterations used for the optimization", + argstr="--numberOfIterations %d") + seedThreshold = traits.Float( + desc="Anisotropy threshold used for seed selection", + argstr="--seedThreshold %f") + trackingThreshold = traits.Float( + desc="Anisotropy threshold used for fiber tracking", + argstr="--trackingThreshold %f") + costStepSize = traits.Float( + desc="Cost image sub-voxel sampling", argstr="--costStepSize %f") + maximumStepSize = traits.Float( + desc="Maximum step size to move when tracking", + argstr="--maximumStepSize %f") + minimumStepSize = traits.Float( + desc="Minimum step size to move when tracking", + argstr="--minimumStepSize %f") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractFastMarchingTrackingOutputSpec(TraitedSpec): + outputTract = File( + desc= + "Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", + exists=True) + + +class gtractFastMarchingTracking(SEMLikeCommandLine): + """title: Fast Marching Tracking + +category: Diffusion.GTRACT + +description: This program will use a fast marching fiber tracking algorithm to identify fiber tracts from a tensor image. This program is the second portion of the algorithm. The user must first run gtractCostFastMarching to generate the vcl_cost image. The second step of the algorithm implemented here is a gradient descent soplution from the defined ending region back to the seed points specified in gtractCostFastMarching. This algorithm is roughly based on the work by G. Parker et al. from IEEE Transactions On Medical Imaging, 21(5): 505-512, 2002. An additional feature of including anisotropy into the vcl_cost function calculation is included. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. The original code here was developed by Daisy Espino. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractFastMarchingTrackingInputSpec + output_spec = gtractFastMarchingTrackingOutputSpec + _cmd = " gtractFastMarchingTracking " + _outputs_filenames = {'outputTract': 'outputTract.vtk'} + _redirect_x = False + + +class gtractInvertDisplacementFieldInputSpec(CommandLineInputSpec): + baseImage = File( + desc= + "Required: base image used to define the size of the inverse field", + exists=True, + argstr="--baseImage %s") + deformationImage = File( + desc="Required: Displacement field image", + exists=True, + argstr="--deformationImage %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: Output deformation field", + argstr="--outputVolume %s") + subsamplingFactor = traits.Int( + desc="Subsampling factor for the deformation field", + argstr="--subsamplingFactor %d") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractInvertDisplacementFieldOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: Output deformation field", exists=True) + + +class gtractInvertDisplacementField(SEMLikeCommandLine): + """title: Invert Displacement Field + +category: Diffusion.GTRACT + +description: This program will invert a deformatrion field. The size of the deformation field is defined by an example image provided by the user + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractInvertDisplacementFieldInputSpec + output_spec = gtractInvertDisplacementFieldOutputSpec + _cmd = " gtractInvertDisplacementField " + _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _redirect_x = False + + +class gtractCoRegAnatomyInputSpec(CommandLineInputSpec): + inputVolume = File( + desc= + "Required: input vector image file name. It is recommended that the input volume is the skull stripped baseline image of the DWI scan.", + exists=True, + argstr="--inputVolume %s") + inputAnatomicalVolume = File( + desc= + "Required: input anatomical image file name. It is recommended that that the input anatomical image has been skull stripped and has the same orientation as the DWI scan.", + exists=True, + argstr="--inputAnatomicalVolume %s") + vectorIndex = traits.Int( + desc= + "Vector image index in the moving image (within the DWI) to be used for registration.", + argstr="--vectorIndex %d") + inputRigidTransform = File( + desc= + "Required (for B-Spline type co-registration): input rigid transform file name. Used as a starting point for the anatomical B-Spline registration.", + exists=True, + argstr="--inputRigidTransform %s") + outputTransformName = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: filename for the fit transform.", + argstr="--outputTransformName %s") + transformType = traits.Enum( + "Rigid", + "Bspline", + desc="Transform Type: Rigid|Bspline", + argstr="--transformType %s") + numberOfIterations = traits.Int( + desc="Number of iterations in the selected 3D fit", + argstr="--numberOfIterations %d") + gridSize = InputMultiPath( + traits.Int, + desc="Number of grid subdivisions in all 3 directions", + sep=",", + argstr="--gridSize %s") + borderSize = traits.Int(desc="Size of border", argstr="--borderSize %d") + numberOfHistogramBins = traits.Int( + desc="Number of histogram bins", argstr="--numberOfHistogramBins %d") + spatialScale = traits.Int( + desc= + "Scales the number of voxels in the image by this value to specify the number of voxels used in the registration", + argstr="--spatialScale %d") + convergence = traits.Float( + desc="Convergence Factor", argstr="--convergence %f") + gradientTolerance = traits.Float( + desc="Gradient Tolerance", argstr="--gradientTolerance %f") + maxBSplineDisplacement = traits.Float( + desc= + " Sets the maximum allowed displacements in image physical coordinates for BSpline control grid along each axis. A value of 0.0 indicates that the problem should be unbounded. NOTE: This only constrains the BSpline portion, and does not limit the displacement from the associated bulk transform. This can lead to a substantial reduction in computation time in the BSpline optimizer., ", + argstr="--maxBSplineDisplacement %f") + maximumStepSize = traits.Float( + desc="Maximum permitted step size to move in the selected 3D fit", + argstr="--maximumStepSize %f") + minimumStepSize = traits.Float( + desc= + "Minimum required step size to move in the selected 3D fit without converging -- decrease this to make the fit more exacting", + argstr="--minimumStepSize %f") + translationScale = traits.Float( + desc= + "How much to scale up changes in position compared to unit rotational changes in radians -- decrease this to put more translation in the fit", + argstr="--translationScale %f") + relaxationFactor = traits.Float( + desc= + "Fraction of gradient from Jacobian to attempt to move in the selected 3D fit", + argstr="--relaxationFactor %f") + numberOfSamples = traits.Int( + desc= + "The number of voxels sampled for mutual information computation. Increase this for a slower, more careful fit. NOTE that it is suggested to use samplingPercentage instead of this option. However, if set, it overwrites the samplingPercentage option. ", + argstr="--numberOfSamples %d") + samplingPercentage = traits.Float( + desc= + "This is a number in (0.0,1.0] interval that shows the percentage of the input fixed image voxels that are sampled for mutual information computation. Increase this for a slower, more careful fit. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation. The default is to use approximately 5% of voxels (for backwards compatibility 5% ~= 500000/(256*256*256)). Typical values range from 1% for low detail images to 20% for high detail images.", + argstr="--samplingPercentage %f") + useMomentsAlign = traits.Bool( + desc= + "MomentsAlign assumes that the center of mass of the images represent similar structures. Perform a MomentsAlign registration as part of the sequential registration steps. This option MUST come first, and CAN NOT be used with either CenterOfHeadLAlign, GeometryAlign, or initialTransform file. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useMomentsAlign ") + useGeometryAlign = traits.Bool( + desc= + "GeometryAlign on assumes that the center of the voxel lattice of the images represent similar structures. Perform a GeometryCenterAlign registration as part of the sequential registration steps. This option MUST come first, and CAN NOT be used with either MomentsAlign, CenterOfHeadAlign, or initialTransform file. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useGeometryAlign ") + useCenterOfHeadAlign = traits.Bool( + desc= + "CenterOfHeadAlign attempts to find a hemisphere full of foreground voxels from the superior direction as an estimate of where the center of a head shape would be to drive a center of mass estimate. Perform a CenterOfHeadAlign registration as part of the sequential registration steps. This option MUST come first, and CAN NOT be used with either MomentsAlign, GeometryAlign, or initialTransform file. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useCenterOfHeadAlign ") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractCoRegAnatomyOutputSpec(TraitedSpec): + outputTransformName = File( + desc="Required: filename for the fit transform.", exists=True) + + +class gtractCoRegAnatomy(SEMLikeCommandLine): + """title: Coregister B0 to Anatomy B-Spline + +category: Diffusion.GTRACT + +description: This program will register a Nrrd diffusion weighted 4D vector image to a fixed anatomical image. Two registration methods are supported for alignment with anatomical images: Rigid and B-Spline. The rigid registration performs a rigid body registration with the anatomical images and should be done as well to initialize the B-Spline transform. The B-SPline transform is the deformable transform, where the user can control the amount of deformation based on the number of control points as well as the maximum distance that these points can move. The B-Spline registration places a low dimensional grid in the image, which is deformed. This allows for some susceptibility related distortions to be removed from the diffusion weighted images. In general the amount of motion in the slice selection and read-out directions direction should be kept low. The distortion is in the phase encoding direction in the images. It is recommended that skull stripped (i.e. image containing only brain with skull removed) images shoud be used for image co-registration with the B-Spline transform. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractCoRegAnatomyInputSpec + output_spec = gtractCoRegAnatomyOutputSpec + _cmd = " gtractCoRegAnatomy " + _outputs_filenames = {'outputTransformName': 'outputTransformName.h5'} + _redirect_x = False + + +class gtractResampleDWIInPlaceInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Required: input image is a 4D NRRD image.", + exists=True, + argstr="--inputVolume %s") + referenceVolume = File( + desc= + "If provided, resample to the final space of the referenceVolume 3D data set.", + exists=True, + argstr="--referenceVolume %s") + outputResampledB0 = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Convenience function for extracting the first index location (assumed to be the B0)", + argstr="--outputResampledB0 %s") + inputTransform = File( + desc= + "Required: transform file derived from rigid registration of b0 image to reference structural image.", + exists=True, + argstr="--inputTransform %s") + warpDWITransform = File( + desc="Optional: transform file to warp gradient volumes.", + exists=True, + argstr="--warpDWITransform %s") + debugLevel = traits.Int( + desc= + "Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", + argstr="--debugLevel %d") + imageOutputSize = InputMultiPath( + traits.Int, + desc= + "The voxel lattice for the output image, padding is added if necessary. NOTE: if 0,0,0, then the inputVolume size is used.", + sep=",", + argstr="--imageOutputSize %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: output image (NRRD file) that has been rigidly transformed into the space of the structural image and padded if image padding was changed from 0,0,0 default.", + argstr="--outputVolume %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractResampleDWIInPlaceOutputSpec(TraitedSpec): + outputResampledB0 = File( + desc= + "Convenience function for extracting the first index location (assumed to be the B0)", + exists=True) + outputVolume = File( + desc= + "Required: output image (NRRD file) that has been rigidly transformed into the space of the structural image and padded if image padding was changed from 0,0,0 default.", + exists=True) + + +class gtractResampleDWIInPlace(SEMLikeCommandLine): + """title: Resample DWI In Place + +category: Diffusion.GTRACT + +description: Resamples DWI image to structural image. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta, Greg Harris, Hans Johnson, and Joy Matsui. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractResampleDWIInPlaceInputSpec + output_spec = gtractResampleDWIInPlaceOutputSpec + _cmd = " gtractResampleDWIInPlace " + _outputs_filenames = { + 'outputResampledB0': 'outputResampledB0.nii', + 'outputVolume': 'outputVolume.nii' + } + _redirect_x = False + + +class gtractCostFastMarchingInputSpec(CommandLineInputSpec): + inputTensorVolume = File( + desc="Required: input tensor image file name", + exists=True, + argstr="--inputTensorVolume %s") + inputAnisotropyVolume = File( + desc="Required: input anisotropy image file name", + exists=True, + argstr="--inputAnisotropyVolume %s") + inputStartingSeedsLabelMapVolume = File( + desc="Required: input starting seeds LabelMap image file name", + exists=True, + argstr="--inputStartingSeedsLabelMapVolume %s") + startingSeedsLabel = traits.Int( + desc="Label value for Starting Seeds", + argstr="--startingSeedsLabel %d") + outputCostVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output vcl_cost image", + argstr="--outputCostVolume %s") + outputSpeedVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output speed image", + argstr="--outputSpeedVolume %s") + anisotropyWeight = traits.Float( + desc="Anisotropy weight used for vcl_cost function calculations", + argstr="--anisotropyWeight %f") + stoppingValue = traits.Float( + desc="Terminiating value for vcl_cost function estimation", + argstr="--stoppingValue %f") + seedThreshold = traits.Float( + desc="Anisotropy threshold used for seed selection", + argstr="--seedThreshold %f") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractCostFastMarchingOutputSpec(TraitedSpec): + outputCostVolume = File(desc="Output vcl_cost image", exists=True) + outputSpeedVolume = File(desc="Output speed image", exists=True) + + +class gtractCostFastMarching(SEMLikeCommandLine): + """title: Cost Fast Marching + +category: Diffusion.GTRACT + +description: This program will use a fast marching fiber tracking algorithm to identify fiber tracts from a tensor image. This program is the first portion of the algorithm. The user must first run gtractFastMarchingTracking to generate the actual fiber tracts. This algorithm is roughly based on the work by G. Parker et al. from IEEE Transactions On Medical Imaging, 21(5): 505-512, 2002. An additional feature of including anisotropy into the vcl_cost function calculation is included. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. The original code here was developed by Daisy Espino. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractCostFastMarchingInputSpec + output_spec = gtractCostFastMarchingOutputSpec + _cmd = " gtractCostFastMarching " + _outputs_filenames = { + 'outputCostVolume': 'outputCostVolume.nrrd', + 'outputSpeedVolume': 'outputSpeedVolume.nrrd' + } + _redirect_x = False + + +class gtractFiberTrackingInputSpec(CommandLineInputSpec): + inputTensorVolume = File( + desc= + "Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): input tensor image file name", + exists=True, + argstr="--inputTensorVolume %s") + inputAnisotropyVolume = File( + desc= + "Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): input anisotropy image file name", + exists=True, + argstr="--inputAnisotropyVolume %s") + inputStartingSeedsLabelMapVolume = File( + desc= + "Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): input starting seeds LabelMap image file name", + exists=True, + argstr="--inputStartingSeedsLabelMapVolume %s") + startingSeedsLabel = traits.Int( + desc= + "Label value for Starting Seeds (required if Label number used to create seed point in Slicer was not 1)", + argstr="--startingSeedsLabel %d") + inputEndingSeedsLabelMapVolume = File( + desc= + "Required (for Streamline, GraphSearch, and Guided fiber tracking methods): input ending seeds LabelMap image file name", + exists=True, + argstr="--inputEndingSeedsLabelMapVolume %s") + endingSeedsLabel = traits.Int( + desc= + "Label value for Ending Seeds (required if Label number used to create seed point in Slicer was not 1)", + argstr="--endingSeedsLabel %d") + inputTract = File( + desc= + "Required (for Guided fiber tracking method): guide fiber in vtkPolydata file containing one tract line.", + exists=True, + argstr="--inputTract %s") + outputTract = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): name of output vtkPolydata file containing tract lines and the point data collected along them.", + argstr="--outputTract %s") + writeXMLPolyDataFile = traits.Bool( + desc="Flag to make use of the XML format for vtkPolyData fiber tracts.", + argstr="--writeXMLPolyDataFile ") + trackingMethod = traits.Enum( + "Guided", + "Free", + "Streamline", + "GraphSearch", + desc="Fiber tracking Filter Type: Guided|Free|Streamline|GraphSearch", + argstr="--trackingMethod %s") + guidedCurvatureThreshold = traits.Float( + desc="Guided Curvature Threshold (Degrees)", + argstr="--guidedCurvatureThreshold %f") + maximumGuideDistance = traits.Float( + desc="Maximum distance for using the guide fiber direction", + argstr="--maximumGuideDistance %f") + seedThreshold = traits.Float( + desc= + "Anisotropy threshold for seed selection (recommended for Free fiber tracking)", + argstr="--seedThreshold %f") + trackingThreshold = traits.Float( + desc= + "Anisotropy threshold for fiber tracking (anisotropy values of the next point along the path)", + argstr="--trackingThreshold %f") + curvatureThreshold = traits.Float( + desc= + "Curvature threshold in degrees (recommended for Free fiber tracking)", + argstr="--curvatureThreshold %f") + branchingThreshold = traits.Float( + desc= + "Anisotropy Branching threshold (recommended for GraphSearch fiber tracking method)", + argstr="--branchingThreshold %f") + maximumBranchPoints = traits.Int( + desc= + "Maximum branch points (recommended for GraphSearch fiber tracking method)", + argstr="--maximumBranchPoints %d") + useRandomWalk = traits.Bool( + desc="Flag to use random walk.", argstr="--useRandomWalk ") + randomSeed = traits.Int( + desc="Random number generator seed", argstr="--randomSeed %d") + branchingAngle = traits.Float( + desc= + "Branching angle in degrees (recommended for GraphSearch fiber tracking method)", + argstr="--branchingAngle %f") + minimumLength = traits.Float( + desc="Minimum fiber length. Helpful for filtering invalid tracts.", + argstr="--minimumLength %f") + maximumLength = traits.Float( + desc="Maximum fiber length (voxels)", argstr="--maximumLength %f") + stepSize = traits.Float( + desc="Fiber tracking step size", argstr="--stepSize %f") + useLoopDetection = traits.Bool( + desc="Flag to make use of loop detection.", + argstr="--useLoopDetection ") + useTend = traits.Bool( + desc="Flag to make use of Tend F and Tend G parameters.", + argstr="--useTend ") + tendF = traits.Float(desc="Tend F parameter", argstr="--tendF %f") + tendG = traits.Float(desc="Tend G parameter", argstr="--tendG %f") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractFiberTrackingOutputSpec(TraitedSpec): + outputTract = File( + desc= + "Required (for Free, Streamline, GraphSearch, and Guided fiber tracking methods): name of output vtkPolydata file containing tract lines and the point data collected along them.", + exists=True) + + +class gtractFiberTracking(SEMLikeCommandLine): + """title: Fiber Tracking + +category: Diffusion.GTRACT + +description: This program implements four fiber tracking methods (Free, Streamline, GraphSearch, Guided). The output of the fiber tracking is vtkPolyData (i.e. Polylines) that can be loaded into Slicer3 for visualization. The poly data can be saved in either old VTK format files (.vtk) or in the new VTK XML format (.xml). The polylines contain point data that defines ther Tensor at each point along the fiber tract. This can then be used to rendered as glyphs in Slicer3 and can be used to define severeal scalar measures without referencing back to the anisotropy images. (1) Free tracking is a basic streamlines algorithm. This is a direct implementation of the method original proposed by Basser et al. The tracking follows the primarty eigenvector. The tracking begins with seed points in the starting region. Only those voxels above the specified anisotropy threshold in the starting region are used as seed points. Tracking terminates either as a result of maximum fiber length, low ansiotropy, or large curvature. This is a great way to explore your data. (2) The streamlines algorithm is a direct implementation of the method originally proposed by Basser et al. The tracking follows the primary eigenvector. The tracking begins with seed points in the starting region. Only those voxels above the specified anisotropy threshold in the starting region are used as seed points. Tracking terminates either by reaching the ending region or reaching some stopping criteria. Stopping criteria are specified using the following parameters: tracking threshold, curvature threshold, and max length. Only paths terminating in the ending region are kept in this method. The TEND algorithm proposed by Lazar et al. (Human Brain Mapping 18:306-321, 2003) has been instrumented. This can be enabled using the --useTend option while performing Streamlines tracking. This utilizes the entire diffusion tensor to deflect the incoming vector instead of simply following the primary eigenvector. The TEND parameters are set using the --tendF and --tendG options. (3) Graph Search tracking is the first step in the full GTRACT algorithm developed by Cheng et al. (NeuroImage 31(3): 1075-1085, 2006) for finding the tracks in a tensor image. This method was developed to generate fibers in a Tensor representation where crossing fibers occur. The graph search algorithm follows the primary eigenvector in non-ambigous regions and utilizes branching and a graph search algorithm in ambigous regions. Ambiguous tracking regions are defined based on two criteria: Branching Al Threshold (anisotropy values below this value and above the traching threshold) and Curvature Major Eigen (angles of the primary eigenvector direction and the current tracking direction). In regions that meet this criteria, two or three tracking paths are considered. The first is the standard primary eigenvector direction. The second is the seconadary eigenvector direction. This is based on the assumption that these regions may be prolate regions. If the Random Walk option is selected then a third direction is also considered. This direction is defined by a cone pointing from the current position to the centroid of the ending region. The interior angle of the cone is specified by the user with the Branch/Guide Angle parameter. A vector contained inside of the cone is selected at random and used as the third direction. This method can also utilize the TEND option where the primary tracking direction is that specified by the TEND method instead of the primary eigenvector. The parameter '--maximumBranchPoints' allows the tracking to have this number of branches being considered at a time. If this number of branch points is exceeded at any time, then the algorithm will revert back to a streamline alogrithm until the number of branches is reduced. This allows the user to constrain the computational complexity of the algorithm. (4) The second phase of the GTRACT algorithm is Guided Tracking. This method incorporates anatomical information about the track orientation using an initial guess of the fiber track. In the originally proposed GTRACT method, this would be created from the fibers resulting from the Graph Search tracking. However, in practice this can be created using any method and could be defined manually. To create the guide fiber the program gtractCreateGuideFiber can be used. This program will load a fiber tract that has been generated and create a centerline representation of the fiber tract (i.e. a single fiber). In this method, the fiber tracking follows the primary eigenvector direction unless it deviates from the guide fiber track by a angle greater than that specified by the '--guidedCurvatureThreshold' parameter. The user must specify the guide fiber when running this program. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta, Greg Harris and Yongqiang Zhao. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractFiberTrackingInputSpec + output_spec = gtractFiberTrackingOutputSpec + _cmd = " gtractFiberTracking " + _outputs_filenames = {'outputTract': 'outputTract.vtk'} + _redirect_x = False + + +class extractNrrdVectorIndexInputSpec(CommandLineInputSpec): + inputVolume = File( + desc= + "Required: input file containing the vector that will be extracted", + exists=True, + argstr="--inputVolume %s") + vectorIndex = traits.Int( + desc="Index in the vector image to extract", argstr="--vectorIndex %d") + setImageOrientation = traits.Enum( + "AsAcquired", + "Axial", + "Coronal", + "Sagittal", + desc= + "Sets the image orientation of the extracted vector (Axial, Coronal, Sagittal)", + argstr="--setImageOrientation %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output NRRD file containing the vector image at the given index", + argstr="--outputVolume %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class extractNrrdVectorIndexOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: name of output NRRD file containing the vector image at the given index", + exists=True) + + +class extractNrrdVectorIndex(SEMLikeCommandLine): + """title: Extract Nrrd Index + +category: Diffusion.GTRACT + +description: This program will extract a 3D image (single vector) from a vector 3D image at a given vector index. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = extractNrrdVectorIndexInputSpec + output_spec = extractNrrdVectorIndexOutputSpec + _cmd = " extractNrrdVectorIndex " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class gtractResampleFibersInputSpec(CommandLineInputSpec): + inputForwardDeformationFieldVolume = File( + desc="Required: input forward deformation field image file name", + exists=True, + argstr="--inputForwardDeformationFieldVolume %s") + inputReverseDeformationFieldVolume = File( + desc="Required: input reverse deformation field image file name", + exists=True, + argstr="--inputReverseDeformationFieldVolume %s") + inputTract = File( + desc="Required: name of input vtkPolydata file containing tract lines.", + exists=True, + argstr="--inputTract %s") + outputTract = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", + argstr="--outputTract %s") + writeXMLPolyDataFile = traits.Bool( + desc="Flag to make use of the XML format for vtkPolyData fiber tracts.", + argstr="--writeXMLPolyDataFile ") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractResampleFibersOutputSpec(TraitedSpec): + outputTract = File( + desc= + "Required: name of output vtkPolydata file containing tract lines and the point data collected along them.", + exists=True) + + +class gtractResampleFibers(SEMLikeCommandLine): + """title: Resample Fibers + +category: Diffusion.GTRACT + +description: This program will resample a fiber tract with respect to a pair of deformation fields that represent the forward and reverse deformation fields. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractResampleFibersInputSpec + output_spec = gtractResampleFibersOutputSpec + _cmd = " gtractResampleFibers " + _outputs_filenames = {'outputTract': 'outputTract.vtk'} + _redirect_x = False + + +class gtractTensorInputSpec(CommandLineInputSpec): + inputVolume = File( + desc= + "Required: input image 4D NRRD image. Must contain data based on at least 6 distinct diffusion directions. The inputVolume is allowed to have multiple b0 and gradient direction images. Averaging of the b0 image is done internally in this step. Prior averaging of the DWIs is not required.", + exists=True, + argstr="--inputVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: name of output NRRD file containing the Tensor vector image", + argstr="--outputVolume %s") + medianFilterSize = InputMultiPath( + traits.Int, + desc="Median filter radius in all 3 directions", + sep=",", + argstr="--medianFilterSize %s") + maskProcessingMode = traits.Enum( + "NOMASK", + "ROIAUTO", + "ROI", + desc= + "ROIAUTO: mask is implicitly defined using a otsu forground and hole filling algorithm. ROI: Uses the masks to define what parts of the image should be used for computing the transform. NOMASK: no mask used", + argstr="--maskProcessingMode %s") + maskVolume = File( + desc="Mask Image, if maskProcessingMode is ROI", + exists=True, + argstr="--maskVolume %s") + backgroundSuppressingThreshold = traits.Int( + desc= + "Image threshold to suppress background. This sets a threshold used on the b0 image to remove background voxels from processing. Typically, values of 100 and 500 work well for Siemens and GE DTI data, respectively. Check your data particularly in the globus pallidus to make sure the brain tissue is not being eliminated with this threshold.", + argstr="--backgroundSuppressingThreshold %d") + resampleIsotropic = traits.Bool( + desc= + "Flag to resample to isotropic voxels. Enabling this feature is recommended if fiber tracking will be performed.", + argstr="--resampleIsotropic ") + size = traits.Float( + desc="Isotropic voxel size to resample to", argstr="--size %f") + b0Index = traits.Int( + desc="Index in input vector index to extract", argstr="--b0Index %d") + applyMeasurementFrame = traits.Bool( + desc="Flag to apply the measurement frame to the gradient directions", + argstr="--applyMeasurementFrame ") + ignoreIndex = InputMultiPath( + traits.Int, + desc= + "Ignore diffusion gradient index. Used to remove specific gradient directions with artifacts.", + sep=",", + argstr="--ignoreIndex %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class gtractTensorOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: name of output NRRD file containing the Tensor vector image", + exists=True) + + +class gtractTensor(SEMLikeCommandLine): + """title: Tensor Estimation + +category: Diffusion.GTRACT + +description: This step will convert a b-value averaged diffusion tensor image to a 3x3 tensor voxel image. This step takes the diffusion tensor image data and generates a tensor representation of the data based on the signal intensity decay, b values applied, and the diffusion difrections. The apparent diffusion coefficient for a given orientation is computed on a pixel-by-pixel basis by fitting the image data (voxel intensities) to the Stejskal-Tanner equation. If at least 6 diffusion directions are used, then the diffusion tensor can be computed. This program uses itk::DiffusionTensor3DReconstructionImageFilter. The user can adjust background threshold, median filter, and isotropic resampling. + +version: 4.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:GTRACT + +license: http://mri.radiology.uiowa.edu/copyright/GTRACT-Copyright.txt + +contributor: This tool was developed by Vincent Magnotta and Greg Harris. + +acknowledgements: Funding for this version of the GTRACT program was provided by NIH/NINDS R01NS050568-01A2S1 + +""" + + input_spec = gtractTensorInputSpec + output_spec = gtractTensorOutputSpec + _cmd = " gtractTensor " + _outputs_filenames = {'outputVolume': 'outputVolume.nrrd'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/diffusion/maxcurvature.py b/nipype/interfaces/semtools/diffusion/maxcurvature.py new file mode 100644 index 0000000000..570109eb1b --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/maxcurvature.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class maxcurvatureInputSpec(CommandLineInputSpec): + image = File(desc="FA Image", exists=True, argstr="--image %s") + output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output File", + argstr="--output %s") + sigma = traits.Float(desc="Scale of Gradients", argstr="--sigma %f") + verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") + + +class maxcurvatureOutputSpec(TraitedSpec): + output = File(desc="Output File", exists=True) + + +class maxcurvature(SEMLikeCommandLine): + """title: MaxCurvature-Hessian (DTIProcess) + +category: Diffusion + +description: This program computes the Hessian of the FA image (--image). We use this scalar image as a registration input when doing DTI atlas building. For most adult FA we use a sigma of 2 whereas for neonate or primate images and sigma of 1 or 1.5 is more appropriate. For really noisy images, 2.5 - 4 can be considered. The final image (--output) shows the main feature of the input image. + +version: 1.1.0 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + +license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. + +contributor: Casey Goodlett + +acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + +""" + + input_spec = maxcurvatureInputSpec + output_spec = maxcurvatureOutputSpec + _cmd = " maxcurvature " + _outputs_filenames = {'output': 'output.nii'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/diffusion/tests/__init__.py b/nipype/interfaces/semtools/diffusion/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py new file mode 100644 index 0000000000..05593527fb --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_DWIConvert.py @@ -0,0 +1,67 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import DWIConvert + + +def test_DWIConvert_inputs(): + input_map = dict( + allowLossyConversion=dict(argstr='--allowLossyConversion ', ), + args=dict(argstr='%s', ), + conversionMode=dict(argstr='--conversionMode %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fMRI=dict(argstr='--fMRI ', ), + fslNIFTIFile=dict(argstr='--fslNIFTIFile %s', ), + gradientVectorFile=dict( + argstr='--gradientVectorFile %s', + hash_files=False, + ), + inputBValues=dict(argstr='--inputBValues %s', ), + inputBVectors=dict(argstr='--inputBVectors %s', ), + inputDicomDirectory=dict(argstr='--inputDicomDirectory %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + outputBValues=dict( + argstr='--outputBValues %s', + hash_files=False, + ), + outputBVectors=dict( + argstr='--outputBVectors %s', + hash_files=False, + ), + outputDirectory=dict( + argstr='--outputDirectory %s', + hash_files=False, + ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + smallGradientThreshold=dict(argstr='--smallGradientThreshold %f', ), + transposeInputBVectors=dict(argstr='--transposeInputBVectors ', ), + useBMatrixGradientDirections=dict( + argstr='--useBMatrixGradientDirections ', ), + useIdentityMeaseurementFrame=dict( + argstr='--useIdentityMeaseurementFrame ', ), + writeProtocolGradientsFile=dict( + argstr='--writeProtocolGradientsFile ', ), + ) + inputs = DWIConvert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DWIConvert_outputs(): + output_map = dict( + gradientVectorFile=dict(), + outputBValues=dict(), + outputBVectors=dict(), + outputDirectory=dict(), + outputVolume=dict(), + ) + outputs = DWIConvert.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py new file mode 100644 index 0000000000..eb9d8c6184 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_compareTractInclusion.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import compareTractInclusion + + +def test_compareTractInclusion_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + closeness=dict(argstr='--closeness %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + numberOfPoints=dict(argstr='--numberOfPoints %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + standardFiber=dict(argstr='--standardFiber %s', ), + testFiber=dict(argstr='--testFiber %s', ), + testForBijection=dict(argstr='--testForBijection ', ), + testForFiberCardinality=dict(argstr='--testForFiberCardinality ', ), + writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), + ) + inputs = compareTractInclusion.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_compareTractInclusion_outputs(): + output_map = dict() + outputs = compareTractInclusion.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py new file mode 100644 index 0000000000..92c00853f4 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiaverage.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import dtiaverage + + +def test_dtiaverage_inputs(): + input_map = dict( + DTI_double=dict(argstr='--DTI_double ', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputs=dict(argstr='--inputs %s...', ), + tensor_output=dict( + argstr='--tensor_output %s', + hash_files=False, + ), + verbose=dict(argstr='--verbose ', ), + ) + inputs = dtiaverage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_dtiaverage_outputs(): + output_map = dict(tensor_output=dict(), ) + outputs = dtiaverage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py new file mode 100644 index 0000000000..440fd4df07 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiestim.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import dtiestim + + +def test_dtiestim_inputs(): + input_map = dict( + B0=dict( + argstr='--B0 %s', + hash_files=False, + ), + B0_mask_output=dict( + argstr='--B0_mask_output %s', + hash_files=False, + ), + DTI_double=dict(argstr='--DTI_double ', ), + args=dict(argstr='%s', ), + bad_region_mask=dict(argstr='--bad_region_mask %s', ), + brain_mask=dict(argstr='--brain_mask %s', ), + correction=dict(argstr='--correction %s', ), + defaultTensor=dict( + argstr='--defaultTensor %s', + sep=',', + ), + dwi_image=dict(argstr='--dwi_image %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + idwi=dict( + argstr='--idwi %s', + hash_files=False, + ), + method=dict(argstr='--method %s', ), + shiftNeg=dict(argstr='--shiftNeg ', ), + shiftNegCoeff=dict(argstr='--shiftNegCoeff %f', ), + sigma=dict(argstr='--sigma %f', ), + step=dict(argstr='--step %f', ), + tensor_output=dict( + argstr='--tensor_output %s', + hash_files=False, + ), + threshold=dict(argstr='--threshold %d', ), + verbose=dict(argstr='--verbose ', ), + weight_iterations=dict(argstr='--weight_iterations %d', ), + ) + inputs = dtiestim.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_dtiestim_outputs(): + output_map = dict( + B0=dict(), + B0_mask_output=dict(), + idwi=dict(), + tensor_output=dict(), + ) + outputs = dtiestim.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py new file mode 100644 index 0000000000..3148d1edb5 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_dtiprocess.py @@ -0,0 +1,115 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import dtiprocess + + +def test_dtiprocess_inputs(): + input_map = dict( + DTI_double=dict(argstr='--DTI_double ', ), + RD_output=dict( + argstr='--RD_output %s', + hash_files=False, + ), + affineitk_file=dict(argstr='--affineitk_file %s', ), + args=dict(argstr='%s', ), + color_fa_output=dict( + argstr='--color_fa_output %s', + hash_files=False, + ), + correction=dict(argstr='--correction %s', ), + deformation_output=dict( + argstr='--deformation_output %s', + hash_files=False, + ), + dof_file=dict(argstr='--dof_file %s', ), + dti_image=dict(argstr='--dti_image %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fa_gradient_output=dict( + argstr='--fa_gradient_output %s', + hash_files=False, + ), + fa_gradmag_output=dict( + argstr='--fa_gradmag_output %s', + hash_files=False, + ), + fa_output=dict( + argstr='--fa_output %s', + hash_files=False, + ), + forward=dict(argstr='--forward %s', ), + frobenius_norm_output=dict( + argstr='--frobenius_norm_output %s', + hash_files=False, + ), + hField=dict(argstr='--hField ', ), + interpolation=dict(argstr='--interpolation %s', ), + lambda1_output=dict( + argstr='--lambda1_output %s', + hash_files=False, + ), + lambda2_output=dict( + argstr='--lambda2_output %s', + hash_files=False, + ), + lambda3_output=dict( + argstr='--lambda3_output %s', + hash_files=False, + ), + mask=dict(argstr='--mask %s', ), + md_output=dict( + argstr='--md_output %s', + hash_files=False, + ), + negative_eigenvector_output=dict( + argstr='--negative_eigenvector_output %s', + hash_files=False, + ), + newdof_file=dict(argstr='--newdof_file %s', ), + outmask=dict( + argstr='--outmask %s', + hash_files=False, + ), + principal_eigenvector_output=dict( + argstr='--principal_eigenvector_output %s', + hash_files=False, + ), + reorientation=dict(argstr='--reorientation %s', ), + rot_output=dict( + argstr='--rot_output %s', + hash_files=False, + ), + scalar_float=dict(argstr='--scalar_float ', ), + sigma=dict(argstr='--sigma %f', ), + verbose=dict(argstr='--verbose ', ), + ) + inputs = dtiprocess.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_dtiprocess_outputs(): + output_map = dict( + RD_output=dict(), + color_fa_output=dict(), + deformation_output=dict(), + fa_gradient_output=dict(), + fa_gradmag_output=dict(), + fa_output=dict(), + frobenius_norm_output=dict(), + lambda1_output=dict(), + lambda2_output=dict(), + lambda3_output=dict(), + md_output=dict(), + negative_eigenvector_output=dict(), + outmask=dict(), + principal_eigenvector_output=dict(), + rot_output=dict(), + ) + outputs = dtiprocess.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py new file mode 100644 index 0000000000..ac5784f1c7 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import extractNrrdVectorIndex + + +def test_extractNrrdVectorIndex_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + setImageOrientation=dict(argstr='--setImageOrientation %s', ), + vectorIndex=dict(argstr='--vectorIndex %d', ), + ) + inputs = extractNrrdVectorIndex.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_extractNrrdVectorIndex_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = extractNrrdVectorIndex.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py new file mode 100644 index 0000000000..195d7dc3e1 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAnisotropyMap.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractAnisotropyMap + + +def test_gtractAnisotropyMap_inputs(): + input_map = dict( + anisotropyType=dict(argstr='--anisotropyType %s', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTensorVolume=dict(argstr='--inputTensorVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = gtractAnisotropyMap.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractAnisotropyMap_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = gtractAnisotropyMap.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py new file mode 100644 index 0000000000..fbb5bd6a55 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractAverageBvalues.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractAverageBvalues + + +def test_gtractAverageBvalues_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + averageB0only=dict(argstr='--averageB0only ', ), + directionsTolerance=dict(argstr='--directionsTolerance %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = gtractAverageBvalues.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractAverageBvalues_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = gtractAverageBvalues.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py new file mode 100644 index 0000000000..fed980c463 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractClipAnisotropy.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractClipAnisotropy + + +def test_gtractClipAnisotropy_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + clipFirstSlice=dict(argstr='--clipFirstSlice ', ), + clipLastSlice=dict(argstr='--clipLastSlice ', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = gtractClipAnisotropy.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractClipAnisotropy_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = gtractClipAnisotropy.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py new file mode 100644 index 0000000000..456e1e79fa --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoRegAnatomy.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractCoRegAnatomy + + +def test_gtractCoRegAnatomy_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + borderSize=dict(argstr='--borderSize %d', ), + convergence=dict(argstr='--convergence %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + gradientTolerance=dict(argstr='--gradientTolerance %f', ), + gridSize=dict( + argstr='--gridSize %s', + sep=',', + ), + inputAnatomicalVolume=dict(argstr='--inputAnatomicalVolume %s', ), + inputRigidTransform=dict(argstr='--inputRigidTransform %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + maxBSplineDisplacement=dict(argstr='--maxBSplineDisplacement %f', ), + maximumStepSize=dict(argstr='--maximumStepSize %f', ), + minimumStepSize=dict(argstr='--minimumStepSize %f', ), + numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), + numberOfIterations=dict(argstr='--numberOfIterations %d', ), + numberOfSamples=dict(argstr='--numberOfSamples %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputTransformName=dict( + argstr='--outputTransformName %s', + hash_files=False, + ), + relaxationFactor=dict(argstr='--relaxationFactor %f', ), + samplingPercentage=dict(argstr='--samplingPercentage %f', ), + spatialScale=dict(argstr='--spatialScale %d', ), + transformType=dict(argstr='--transformType %s', ), + translationScale=dict(argstr='--translationScale %f', ), + useCenterOfHeadAlign=dict(argstr='--useCenterOfHeadAlign ', ), + useGeometryAlign=dict(argstr='--useGeometryAlign ', ), + useMomentsAlign=dict(argstr='--useMomentsAlign ', ), + vectorIndex=dict(argstr='--vectorIndex %d', ), + ) + inputs = gtractCoRegAnatomy.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractCoRegAnatomy_outputs(): + output_map = dict(outputTransformName=dict(), ) + outputs = gtractCoRegAnatomy.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py new file mode 100644 index 0000000000..7b75858eff --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractConcatDwi.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractConcatDwi + + +def test_gtractConcatDwi_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignoreOrigins=dict(argstr='--ignoreOrigins ', ), + inputVolume=dict(argstr='--inputVolume %s...', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = gtractConcatDwi.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractConcatDwi_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = gtractConcatDwi.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py new file mode 100644 index 0000000000..d9e0b725c8 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCopyImageOrientation.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractCopyImageOrientation + + +def test_gtractCopyImageOrientation_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = gtractCopyImageOrientation.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractCopyImageOrientation_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = gtractCopyImageOrientation.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py new file mode 100644 index 0000000000..3143b16dfb --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCoregBvalues.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractCoregBvalues + + +def test_gtractCoregBvalues_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debugLevel=dict(argstr='--debugLevel %d', ), + eddyCurrentCorrection=dict(argstr='--eddyCurrentCorrection ', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedVolume=dict(argstr='--fixedVolume %s', ), + fixedVolumeIndex=dict(argstr='--fixedVolumeIndex %d', ), + maximumStepSize=dict(argstr='--maximumStepSize %f', ), + minimumStepSize=dict(argstr='--minimumStepSize %f', ), + movingVolume=dict(argstr='--movingVolume %s', ), + numberOfIterations=dict(argstr='--numberOfIterations %d', ), + numberOfSpatialSamples=dict(argstr='--numberOfSpatialSamples %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputTransform=dict( + argstr='--outputTransform %s', + hash_files=False, + ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + registerB0Only=dict(argstr='--registerB0Only ', ), + relaxationFactor=dict(argstr='--relaxationFactor %f', ), + samplingPercentage=dict(argstr='--samplingPercentage %f', ), + spatialScale=dict(argstr='--spatialScale %f', ), + ) + inputs = gtractCoregBvalues.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractCoregBvalues_outputs(): + output_map = dict( + outputTransform=dict(), + outputVolume=dict(), + ) + outputs = gtractCoregBvalues.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py new file mode 100644 index 0000000000..32d1e68898 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCostFastMarching.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractCostFastMarching + + +def test_gtractCostFastMarching_inputs(): + input_map = dict( + anisotropyWeight=dict(argstr='--anisotropyWeight %f', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputAnisotropyVolume=dict(argstr='--inputAnisotropyVolume %s', ), + inputStartingSeedsLabelMapVolume=dict( + argstr='--inputStartingSeedsLabelMapVolume %s', ), + inputTensorVolume=dict(argstr='--inputTensorVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputCostVolume=dict( + argstr='--outputCostVolume %s', + hash_files=False, + ), + outputSpeedVolume=dict( + argstr='--outputSpeedVolume %s', + hash_files=False, + ), + seedThreshold=dict(argstr='--seedThreshold %f', ), + startingSeedsLabel=dict(argstr='--startingSeedsLabel %d', ), + stoppingValue=dict(argstr='--stoppingValue %f', ), + ) + inputs = gtractCostFastMarching.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractCostFastMarching_outputs(): + output_map = dict( + outputCostVolume=dict(), + outputSpeedVolume=dict(), + ) + outputs = gtractCostFastMarching.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py new file mode 100644 index 0000000000..bbe375bbb9 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractCreateGuideFiber.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractCreateGuideFiber + + +def test_gtractCreateGuideFiber_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFiber=dict(argstr='--inputFiber %s', ), + numberOfPoints=dict(argstr='--numberOfPoints %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputFiber=dict( + argstr='--outputFiber %s', + hash_files=False, + ), + writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), + ) + inputs = gtractCreateGuideFiber.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractCreateGuideFiber_outputs(): + output_map = dict(outputFiber=dict(), ) + outputs = gtractCreateGuideFiber.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py new file mode 100644 index 0000000000..ec3a99b91e --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFastMarchingTracking.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractFastMarchingTracking + + +def test_gtractFastMarchingTracking_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + costStepSize=dict(argstr='--costStepSize %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputAnisotropyVolume=dict(argstr='--inputAnisotropyVolume %s', ), + inputCostVolume=dict(argstr='--inputCostVolume %s', ), + inputStartingSeedsLabelMapVolume=dict( + argstr='--inputStartingSeedsLabelMapVolume %s', ), + inputTensorVolume=dict(argstr='--inputTensorVolume %s', ), + maximumStepSize=dict(argstr='--maximumStepSize %f', ), + minimumStepSize=dict(argstr='--minimumStepSize %f', ), + numberOfIterations=dict(argstr='--numberOfIterations %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputTract=dict( + argstr='--outputTract %s', + hash_files=False, + ), + seedThreshold=dict(argstr='--seedThreshold %f', ), + startingSeedsLabel=dict(argstr='--startingSeedsLabel %d', ), + trackingThreshold=dict(argstr='--trackingThreshold %f', ), + writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), + ) + inputs = gtractFastMarchingTracking.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractFastMarchingTracking_outputs(): + output_map = dict(outputTract=dict(), ) + outputs = gtractFastMarchingTracking.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py new file mode 100644 index 0000000000..96c93b8b64 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractFiberTracking.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractFiberTracking + + +def test_gtractFiberTracking_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + branchingAngle=dict(argstr='--branchingAngle %f', ), + branchingThreshold=dict(argstr='--branchingThreshold %f', ), + curvatureThreshold=dict(argstr='--curvatureThreshold %f', ), + endingSeedsLabel=dict(argstr='--endingSeedsLabel %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + guidedCurvatureThreshold=dict( + argstr='--guidedCurvatureThreshold %f', ), + inputAnisotropyVolume=dict(argstr='--inputAnisotropyVolume %s', ), + inputEndingSeedsLabelMapVolume=dict( + argstr='--inputEndingSeedsLabelMapVolume %s', ), + inputStartingSeedsLabelMapVolume=dict( + argstr='--inputStartingSeedsLabelMapVolume %s', ), + inputTensorVolume=dict(argstr='--inputTensorVolume %s', ), + inputTract=dict(argstr='--inputTract %s', ), + maximumBranchPoints=dict(argstr='--maximumBranchPoints %d', ), + maximumGuideDistance=dict(argstr='--maximumGuideDistance %f', ), + maximumLength=dict(argstr='--maximumLength %f', ), + minimumLength=dict(argstr='--minimumLength %f', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputTract=dict( + argstr='--outputTract %s', + hash_files=False, + ), + randomSeed=dict(argstr='--randomSeed %d', ), + seedThreshold=dict(argstr='--seedThreshold %f', ), + startingSeedsLabel=dict(argstr='--startingSeedsLabel %d', ), + stepSize=dict(argstr='--stepSize %f', ), + tendF=dict(argstr='--tendF %f', ), + tendG=dict(argstr='--tendG %f', ), + trackingMethod=dict(argstr='--trackingMethod %s', ), + trackingThreshold=dict(argstr='--trackingThreshold %f', ), + useLoopDetection=dict(argstr='--useLoopDetection ', ), + useRandomWalk=dict(argstr='--useRandomWalk ', ), + useTend=dict(argstr='--useTend ', ), + writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), + ) + inputs = gtractFiberTracking.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractFiberTracking_outputs(): + output_map = dict(outputTract=dict(), ) + outputs = gtractFiberTracking.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py new file mode 100644 index 0000000000..9ddde832b2 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractImageConformity.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractImageConformity + + +def test_gtractImageConformity_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = gtractImageConformity.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractImageConformity_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = gtractImageConformity.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py new file mode 100644 index 0000000000..ca642ceb66 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertBSplineTransform.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractInvertBSplineTransform + + +def test_gtractInvertBSplineTransform_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', ), + inputTransform=dict(argstr='--inputTransform %s', ), + landmarkDensity=dict( + argstr='--landmarkDensity %s', + sep=',', + ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputTransform=dict( + argstr='--outputTransform %s', + hash_files=False, + ), + ) + inputs = gtractInvertBSplineTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractInvertBSplineTransform_outputs(): + output_map = dict(outputTransform=dict(), ) + outputs = gtractInvertBSplineTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py new file mode 100644 index 0000000000..e4fd213d39 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertDisplacementField.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractInvertDisplacementField + + +def test_gtractInvertDisplacementField_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + baseImage=dict(argstr='--baseImage %s', ), + deformationImage=dict(argstr='--deformationImage %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + subsamplingFactor=dict(argstr='--subsamplingFactor %d', ), + ) + inputs = gtractInvertDisplacementField.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractInvertDisplacementField_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = gtractInvertDisplacementField.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py new file mode 100644 index 0000000000..c035862663 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractInvertRigidTransform.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractInvertRigidTransform + + +def test_gtractInvertRigidTransform_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTransform=dict(argstr='--inputTransform %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputTransform=dict( + argstr='--outputTransform %s', + hash_files=False, + ), + ) + inputs = gtractInvertRigidTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractInvertRigidTransform_outputs(): + output_map = dict(outputTransform=dict(), ) + outputs = gtractInvertRigidTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py new file mode 100644 index 0000000000..43f428b233 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleAnisotropy.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractResampleAnisotropy + + +def test_gtractResampleAnisotropy_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputAnatomicalVolume=dict(argstr='--inputAnatomicalVolume %s', ), + inputAnisotropyVolume=dict(argstr='--inputAnisotropyVolume %s', ), + inputTransform=dict(argstr='--inputTransform %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + transformType=dict(argstr='--transformType %s', ), + ) + inputs = gtractResampleAnisotropy.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractResampleAnisotropy_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = gtractResampleAnisotropy.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py new file mode 100644 index 0000000000..812afca5c0 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleB0.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractResampleB0 + + +def test_gtractResampleB0_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputAnatomicalVolume=dict(argstr='--inputAnatomicalVolume %s', ), + inputTransform=dict(argstr='--inputTransform %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + transformType=dict(argstr='--transformType %s', ), + vectorIndex=dict(argstr='--vectorIndex %d', ), + ) + inputs = gtractResampleB0.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractResampleB0_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = gtractResampleB0.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py new file mode 100644 index 0000000000..decc017c60 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleCodeImage.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractResampleCodeImage + + +def test_gtractResampleCodeImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputCodeVolume=dict(argstr='--inputCodeVolume %s', ), + inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', ), + inputTransform=dict(argstr='--inputTransform %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + transformType=dict(argstr='--transformType %s', ), + ) + inputs = gtractResampleCodeImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractResampleCodeImage_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = gtractResampleCodeImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py new file mode 100644 index 0000000000..b815d3d3d6 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleDWIInPlace.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractResampleDWIInPlace + + +def test_gtractResampleDWIInPlace_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debugLevel=dict(argstr='--debugLevel %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + imageOutputSize=dict( + argstr='--imageOutputSize %s', + sep=',', + ), + inputTransform=dict(argstr='--inputTransform %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputResampledB0=dict( + argstr='--outputResampledB0 %s', + hash_files=False, + ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + referenceVolume=dict(argstr='--referenceVolume %s', ), + warpDWITransform=dict(argstr='--warpDWITransform %s', ), + ) + inputs = gtractResampleDWIInPlace.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractResampleDWIInPlace_outputs(): + output_map = dict( + outputResampledB0=dict(), + outputVolume=dict(), + ) + outputs = gtractResampleDWIInPlace.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py new file mode 100644 index 0000000000..d0d2cd5664 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractResampleFibers.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractResampleFibers + + +def test_gtractResampleFibers_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputForwardDeformationFieldVolume=dict( + argstr='--inputForwardDeformationFieldVolume %s', ), + inputReverseDeformationFieldVolume=dict( + argstr='--inputReverseDeformationFieldVolume %s', ), + inputTract=dict(argstr='--inputTract %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputTract=dict( + argstr='--outputTract %s', + hash_files=False, + ), + writeXMLPolyDataFile=dict(argstr='--writeXMLPolyDataFile ', ), + ) + inputs = gtractResampleFibers.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractResampleFibers_outputs(): + output_map = dict(outputTract=dict(), ) + outputs = gtractResampleFibers.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py new file mode 100644 index 0000000000..ead96d1c71 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTensor.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractTensor + + +def test_gtractTensor_inputs(): + input_map = dict( + applyMeasurementFrame=dict(argstr='--applyMeasurementFrame ', ), + args=dict(argstr='%s', ), + b0Index=dict(argstr='--b0Index %d', ), + backgroundSuppressingThreshold=dict( + argstr='--backgroundSuppressingThreshold %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ignoreIndex=dict( + argstr='--ignoreIndex %s', + sep=',', + ), + inputVolume=dict(argstr='--inputVolume %s', ), + maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), + maskVolume=dict(argstr='--maskVolume %s', ), + medianFilterSize=dict( + argstr='--medianFilterSize %s', + sep=',', + ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + resampleIsotropic=dict(argstr='--resampleIsotropic ', ), + size=dict(argstr='--size %f', ), + ) + inputs = gtractTensor.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractTensor_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = gtractTensor.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py new file mode 100644 index 0000000000..2dfde189e2 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_gtractTransformToDisplacementField.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..gtract import gtractTransformToDisplacementField + + +def test_gtractTransformToDisplacementField_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', ), + inputTransform=dict(argstr='--inputTransform %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputDeformationFieldVolume=dict( + argstr='--outputDeformationFieldVolume %s', + hash_files=False, + ), + ) + inputs = gtractTransformToDisplacementField.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_gtractTransformToDisplacementField_outputs(): + output_map = dict(outputDeformationFieldVolume=dict(), ) + outputs = gtractTransformToDisplacementField.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py new file mode 100644 index 0000000000..cc06c5ede5 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tests/test_auto_maxcurvature.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..maxcurvature import maxcurvature + + +def test_maxcurvature_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + image=dict(argstr='--image %s', ), + output=dict( + argstr='--output %s', + hash_files=False, + ), + sigma=dict(argstr='--sigma %f', ), + verbose=dict(argstr='--verbose ', ), + ) + inputs = maxcurvature.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_maxcurvature_outputs(): + output_map = dict(output=dict(), ) + outputs = maxcurvature.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tractography/__init__.py b/nipype/interfaces/semtools/diffusion/tractography/__init__.py new file mode 100644 index 0000000000..f846b7fde5 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tractography/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .commandlineonly import fiberstats +from .fiberprocess import fiberprocess +from .fibertrack import fibertrack +from .ukftractography import UKFTractography diff --git a/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py b/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py new file mode 100644 index 0000000000..19adc2a817 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ....base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class fiberstatsInputSpec(CommandLineInputSpec): + fiber_file = File( + desc="DTI Fiber File", exists=True, argstr="--fiber_file %s") + verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") + + +class fiberstatsOutputSpec(TraitedSpec): + pass + + +class fiberstats(SEMLikeCommandLine): + """title: FiberStats (DTIProcess) + +category: Diffusion.Tractography.CommandLineOnly + +description: Obsolete tool - Not used anymore + +version: 1.1.0 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + +license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. + +contributor: Casey Goodlett + +acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + +""" + + input_spec = fiberstatsInputSpec + output_spec = fiberstatsOutputSpec + _cmd = " fiberstats " + _outputs_filenames = {} + _redirect_x = False diff --git a/nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py b/nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py new file mode 100644 index 0000000000..c0e9dcbbaf --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tractography/fiberprocess.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ....base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class fiberprocessInputSpec(CommandLineInputSpec): + fiber_file = File( + desc="DTI fiber file", exists=True, argstr="--fiber_file %s") + fiber_output = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Output fiber file. May be warped or updated with new data depending on other options used.", + argstr="--fiber_output %s") + tensor_volume = File( + desc="Interpolate tensor values from the given field", + exists=True, + argstr="--tensor_volume %s") + h_field = File( + desc= + "HField for warp and statistics lookup. If this option is used tensor-volume must also be specified.", + exists=True, + argstr="--h_field %s") + displacement_field = File( + desc= + "Displacement Field for warp and statistics lookup. If this option is used tensor-volume must also be specified.", + exists=True, + argstr="--displacement_field %s") + saveProperties = traits.Bool( + desc= + "save the tensor property as scalar data into the vtk (only works for vtk fiber files). ", + argstr="--saveProperties ") + no_warp = traits.Bool( + desc= + "Do not warp the geometry of the tensors only obtain the new statistics.", + argstr="--no_warp ") + fiber_radius = traits.Float( + desc="set radius of all fibers to this value", + argstr="--fiber_radius %f") + index_space = traits.Bool( + desc= + "Use index-space for fiber output coordinates, otherwise us world space for fiber output coordinates (from tensor file).", + argstr="--index_space ") + voxelize = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Voxelize fiber into a label map (the labelmap filename is the argument of -V). The tensor file must be specified using -T for information about the size, origin, spacing of the image. The deformation is applied before the voxelization ", + argstr="--voxelize %s") + voxelize_count_fibers = traits.Bool( + desc="Count number of fibers per-voxel instead of just setting to 1", + argstr="--voxelize_count_fibers ") + voxel_label = traits.Int( + desc="Label for voxelized fiber", argstr="--voxel_label %d") + verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") + noDataChange = traits.Bool( + desc="Do not change data ??? ", argstr="--noDataChange ") + + +class fiberprocessOutputSpec(TraitedSpec): + fiber_output = File( + desc= + "Output fiber file. May be warped or updated with new data depending on other options used.", + exists=True) + voxelize = File( + desc= + "Voxelize fiber into a label map (the labelmap filename is the argument of -V). The tensor file must be specified using -T for information about the size, origin, spacing of the image. The deformation is applied before the voxelization ", + exists=True) + + +class fiberprocess(SEMLikeCommandLine): + """title: FiberProcess (DTIProcess) + +category: Diffusion.Tractography + +description: fiberprocess is a tool that manage fiber files extracted from the fibertrack tool or any fiber tracking algorithm. It takes as an input .fib and .vtk files (--fiber_file) and saves the changed fibers (--fiber_output) into the 2 same formats. The main purpose of this tool is to deform the fiber file with a transformation field as an input (--displacement_field or --h_field depending if you deal with dfield or hfield). To use that option you need to specify the tensor field from which the fiber file was extracted with the option --tensor_volume. The transformation applied on the fiber file is the inverse of the one input. If the transformation is from one case to an atlas, fiberprocess assumes that the fiber file is in the atlas space and you want it in the original case space, so it's the inverse of the transformation which has been computed. +You have 2 options for fiber modification. You can either deform the fibers (their geometry) into the space OR you can keep the same geometry but map the diffusion properties (fa, md, lbd's...) of the original tensor field along the fibers at the corresponding locations. This is triggered by the --no_warp option. To use the previous example: when you have a tensor field in the original space and the deformed tensor field in the atlas space, you want to track the fibers in the atlas space, keeping this geometry but with the original case diffusion properties. Then you can specify the transformations field (from original case -> atlas) and the original tensor field with the --tensor_volume option. +With fiberprocess you can also binarize a fiber file. Using the --voxelize option will create an image where each voxel through which a fiber is passing is set to 1. The output is going to be a binary image with the values 0 or 1 by default but the 1 value voxel can be set to any number with the --voxel_label option. Finally you can create an image where the value at the voxel is the number of fiber passing through. (--voxelize_count_fibers) + +version: 1.0.0 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + +license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. + +contributor: Casey Goodlett + +""" + + input_spec = fiberprocessInputSpec + output_spec = fiberprocessOutputSpec + _cmd = " fiberprocess " + _outputs_filenames = { + 'fiber_output': 'fiber_output.vtk', + 'voxelize': 'voxelize.nii' + } + _redirect_x = False diff --git a/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py b/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py new file mode 100644 index 0000000000..498cb2579d --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ....base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class fibertrackInputSpec(CommandLineInputSpec): + input_tensor_file = File( + desc="Tensor Image", exists=True, argstr="--input_tensor_file %s") + input_roi_file = File( + desc= + "The filename of the image which contains the labels used for seeding and constraining the algorithm.", + exists=True, + argstr="--input_roi_file %s") + output_fiber_file = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "The filename for the fiber file produced by the algorithm. This file must end in a .fib or .vtk extension for ITK spatial object and vtkPolyData formats respectively.", + argstr="--output_fiber_file %s") + source_label = traits.Int( + desc= + "The label of voxels in the labelfile to use for seeding tractography. One tract is seeded from the center of each voxel with this label", + argstr="--source_label %d") + target_label = traits.Int( + desc= + "The label of voxels in the labelfile used to constrain tractography. Tracts that do not pass through a voxel with this label are rejected. Set this keep all tracts.", + argstr="--target_label %d") + forbidden_label = traits.Int( + desc="Forbidden label", argstr="--forbidden_label %d") + whole_brain = traits.Bool( + desc= + "If this option is enabled all voxels in the image are used to seed tractography. When this option is enabled both source and target labels function as target labels", + argstr="--whole_brain ") + max_angle = traits.Float( + desc="Maximum angle of change in radians", argstr="--max_angle %f") + step_size = traits.Float( + desc="Step size in mm for the tracking algorithm", + argstr="--step_size %f") + min_fa = traits.Float( + desc="The minimum FA threshold to continue tractography", + argstr="--min_fa %f") + force = traits.Bool(desc="Ignore sanity checks.", argstr="--force ") + verbose = traits.Bool(desc="produce verbose output", argstr="--verbose ") + really_verbose = traits.Bool( + desc="Follow detail of fiber tracking algorithm", + argstr="--really_verbose ") + + +class fibertrackOutputSpec(TraitedSpec): + output_fiber_file = File( + desc= + "The filename for the fiber file produced by the algorithm. This file must end in a .fib or .vtk extension for ITK spatial object and vtkPolyData formats respectively.", + exists=True) + + +class fibertrack(SEMLikeCommandLine): + """title: FiberTrack (DTIProcess) + +category: Diffusion.Tractography + +description: This program implements a simple streamline tractography method based on the principal eigenvector of the tensor field. A fourth order Runge-Kutta integration rule used to advance the streamlines. +As a first parameter you have to input the tensor field (with the --input_tensor_file option). Then the region of interest image file is set with the --input_roi_file. Next you want to set the output fiber file name after the --output_fiber_file option. +You can specify the label value in the input_roi_file with the --target_label, --source_label and --fobidden_label options. By default target label is 1, source label is 2 and forbidden label is 0. The source label is where the streamlines are seeded, the target label defines the voxels through which the fibers must pass by to be kept in the final fiber file and the forbidden label defines the voxels where the streamlines are stopped if they pass through it. There is also a --whole_brain option which, if enabled, consider both target and source labels of the roi image as target labels and all the voxels of the image are considered as sources. +During the tractography, the --fa_min parameter is used as the minimum value needed at different voxel for the tracking to keep going along a streamline. The --step_size parameter is used for each iteration of the tracking algorithm and defines the length of each step. The --max_angle option defines the maximum angle allowed between two successive segments along the tracked fiber. + +version: 1.1.0 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + +license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. + +contributor: Casey Goodlett + +acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + +""" + + input_spec = fibertrackInputSpec + output_spec = fibertrackOutputSpec + _cmd = " fibertrack " + _outputs_filenames = {'output_fiber_file': 'output_fiber_file.vtk'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/__init__.py b/nipype/interfaces/semtools/diffusion/tractography/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py new file mode 100644 index 0000000000..7c61974ef3 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_UKFTractography.py @@ -0,0 +1,70 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..ukftractography import UKFTractography + + +def test_UKFTractography_inputs(): + input_map = dict( + Ql=dict(argstr='--Ql %f', ), + Qm=dict(argstr='--Qm %f', ), + Qw=dict(argstr='--Qw %f', ), + Rs=dict(argstr='--Rs %f', ), + args=dict(argstr='%s', ), + dwiFile=dict(argstr='--dwiFile %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + freeWater=dict(argstr='--freeWater ', ), + fullTensorModel=dict(argstr='--fullTensorModel ', ), + labels=dict( + argstr='--labels %s', + sep=',', + ), + maskFile=dict(argstr='--maskFile %s', ), + maxBranchingAngle=dict(argstr='--maxBranchingAngle %f', ), + maxHalfFiberLength=dict(argstr='--maxHalfFiberLength %f', ), + minBranchingAngle=dict(argstr='--minBranchingAngle %f', ), + minFA=dict(argstr='--minFA %f', ), + minGA=dict(argstr='--minGA %f', ), + numTensor=dict(argstr='--numTensor %s', ), + numThreads=dict(argstr='--numThreads %d', ), + recordCovariance=dict(argstr='--recordCovariance ', ), + recordFA=dict(argstr='--recordFA ', ), + recordFreeWater=dict(argstr='--recordFreeWater ', ), + recordLength=dict(argstr='--recordLength %f', ), + recordNMSE=dict(argstr='--recordNMSE ', ), + recordState=dict(argstr='--recordState ', ), + recordTensors=dict(argstr='--recordTensors ', ), + recordTrace=dict(argstr='--recordTrace ', ), + seedFALimit=dict(argstr='--seedFALimit %f', ), + seedsFile=dict(argstr='--seedsFile %s', ), + seedsPerVoxel=dict(argstr='--seedsPerVoxel %d', ), + stepLength=dict(argstr='--stepLength %f', ), + storeGlyphs=dict(argstr='--storeGlyphs ', ), + tracts=dict( + argstr='--tracts %s', + hash_files=False, + ), + tractsWithSecondTensor=dict( + argstr='--tractsWithSecondTensor %s', + hash_files=False, + ), + writeAsciiTracts=dict(argstr='--writeAsciiTracts ', ), + writeUncompressedTracts=dict(argstr='--writeUncompressedTracts ', ), + ) + inputs = UKFTractography.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_UKFTractography_outputs(): + output_map = dict( + tracts=dict(), + tractsWithSecondTensor=dict(), + ) + outputs = UKFTractography.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py new file mode 100644 index 0000000000..b4756a9406 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberprocess.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fiberprocess import fiberprocess + + +def test_fiberprocess_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + displacement_field=dict(argstr='--displacement_field %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fiber_file=dict(argstr='--fiber_file %s', ), + fiber_output=dict( + argstr='--fiber_output %s', + hash_files=False, + ), + fiber_radius=dict(argstr='--fiber_radius %f', ), + h_field=dict(argstr='--h_field %s', ), + index_space=dict(argstr='--index_space ', ), + noDataChange=dict(argstr='--noDataChange ', ), + no_warp=dict(argstr='--no_warp ', ), + saveProperties=dict(argstr='--saveProperties ', ), + tensor_volume=dict(argstr='--tensor_volume %s', ), + verbose=dict(argstr='--verbose ', ), + voxel_label=dict(argstr='--voxel_label %d', ), + voxelize=dict( + argstr='--voxelize %s', + hash_files=False, + ), + voxelize_count_fibers=dict(argstr='--voxelize_count_fibers ', ), + ) + inputs = fiberprocess.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_fiberprocess_outputs(): + output_map = dict( + fiber_output=dict(), + voxelize=dict(), + ) + outputs = fiberprocess.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py new file mode 100644 index 0000000000..d6f3a5cd50 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fiberstats.py @@ -0,0 +1,27 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..commandlineonly import fiberstats + + +def test_fiberstats_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fiber_file=dict(argstr='--fiber_file %s', ), + verbose=dict(argstr='--verbose ', ), + ) + inputs = fiberstats.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_fiberstats_outputs(): + output_map = dict() + outputs = fiberstats.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py new file mode 100644 index 0000000000..33e57c0ca3 --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tractography/tests/test_auto_fibertrack.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..fibertrack import fibertrack + + +def test_fibertrack_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + forbidden_label=dict(argstr='--forbidden_label %d', ), + force=dict(argstr='--force ', ), + input_roi_file=dict(argstr='--input_roi_file %s', ), + input_tensor_file=dict(argstr='--input_tensor_file %s', ), + max_angle=dict(argstr='--max_angle %f', ), + min_fa=dict(argstr='--min_fa %f', ), + output_fiber_file=dict( + argstr='--output_fiber_file %s', + hash_files=False, + ), + really_verbose=dict(argstr='--really_verbose ', ), + source_label=dict(argstr='--source_label %d', ), + step_size=dict(argstr='--step_size %f', ), + target_label=dict(argstr='--target_label %d', ), + verbose=dict(argstr='--verbose ', ), + whole_brain=dict(argstr='--whole_brain ', ), + ) + inputs = fibertrack.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_fibertrack_outputs(): + output_map = dict(output_fiber_file=dict(), ) + outputs = fibertrack.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py b/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py new file mode 100644 index 0000000000..11971dbb6d --- /dev/null +++ b/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ....base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class UKFTractographyInputSpec(CommandLineInputSpec): + dwiFile = File(desc="Input DWI volume", exists=True, argstr="--dwiFile %s") + seedsFile = File( + desc= + "Seeds for diffusion. If not specified, full brain tractography will be performed, and the algorithm will start from every voxel in the brain mask where the Generalized Anisotropy is bigger than 0.18", + exists=True, + argstr="--seedsFile %s") + labels = InputMultiPath( + traits.Int, + desc="A vector of the ROI labels to be used", + sep=",", + argstr="--labels %s") + maskFile = File( + desc="Mask for diffusion tractography", + exists=True, + argstr="--maskFile %s") + tracts = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Tracts generated, with first tensor output", + argstr="--tracts %s") + writeAsciiTracts = traits.Bool( + desc="Write tract file as a VTK binary data file", + argstr="--writeAsciiTracts ") + writeUncompressedTracts = traits.Bool( + desc="Write tract file as a VTK uncompressed data file", + argstr="--writeUncompressedTracts ") + seedsPerVoxel = traits.Int( + desc= + " Each seed generates a fiber, thus using more seeds generates more fibers. In general use 1 or 2 seeds, and for a more thorough result use 5 or 10 (depending on your machine this may take up to 2 days to run)., ", + argstr="--seedsPerVoxel %d") + numTensor = traits.Enum( + "1", "2", desc="Number of tensors used", argstr="--numTensor %s") + freeWater = traits.Bool( + desc= + "Adds a term for free water difusion to the model. (Note for experts: if checked, the 1T simple model is forced) ", + argstr="--freeWater ") + recordFA = traits.Bool( + desc= + "Whether to store FA. Attaches field 'FA', and 'FA2' for 2-tensor case to fiber. ", + argstr="--recordFA ") + recordFreeWater = traits.Bool( + desc= + "Whether to store the fraction of free water. Attaches field 'FreeWater' to fiber.", + argstr="--recordFreeWater ") + recordTrace = traits.Bool( + desc= + "Whether to store Trace. Attaches field 'Trace', and 'Trace2' for 2-tensor case to fiber.", + argstr="--recordTrace ") + recordTensors = traits.Bool( + desc= + "Recording the tensors enables Slicer to color the fiber bundles by FA, orientation, and so on. The fields will be called 'TensorN', where N is the tensor number. ", + argstr="--recordTensors ") + recordNMSE = traits.Bool( + desc="Whether to store NMSE. Attaches field 'NMSE' to fiber. ", + argstr="--recordNMSE ") + recordState = traits.Bool( + desc= + "Whether to attach the states to the fiber. Will generate field 'state'.", + argstr="--recordState ") + recordCovariance = traits.Bool( + desc= + "Whether to store the covariance. Will generate field 'covariance' in fiber.", + argstr="--recordCovariance ") + recordLength = traits.Float( + desc="Record length of tractography, in millimeters", + argstr="--recordLength %f") + minFA = traits.Float( + desc= + "Abort the tractography when the Fractional Anisotropy is less than this value", + argstr="--minFA %f") + minGA = traits.Float( + desc= + "Abort the tractography when the Generalized Anisotropy is less than this value", + argstr="--minGA %f") + fullTensorModel = traits.Bool( + desc= + "Whether to use the full tensor model. If unchecked, use the default simple tensor model", + argstr="--fullTensorModel ") + numThreads = traits.Int( + desc= + "Number of threads used during computation. Set to the number of cores on your workstation for optimal speed. If left undefined the number of cores detected will be used. ", + argstr="--numThreads %d") + stepLength = traits.Float( + desc="Step length of tractography, in millimeters", + argstr="--stepLength %f") + maxHalfFiberLength = traits.Float( + desc= + "The max length limit of the half fibers generated during tractography. Here the fiber is \'half\' because the tractography goes in only one direction from one seed point at a time", + argstr="--maxHalfFiberLength %f") + seedFALimit = traits.Float( + desc="Seed points whose FA are below this value are excluded", + argstr="--seedFALimit %f") + Qm = traits.Float( + desc="Process noise for angles/direction", argstr="--Qm %f") + Ql = traits.Float(desc="Process noise for eigenvalues", argstr="--Ql %f") + Qw = traits.Float( + desc= + "Process noise for free water weights, ignored if no free water estimation", + argstr="--Qw %f") + Rs = traits.Float(desc="Measurement noise", argstr="--Rs %f") + maxBranchingAngle = traits.Float( + desc= + "Maximum branching angle, in degrees. When using multiple tensors, a new branch will be created when the tensors' major directions form an angle between (minBranchingAngle, maxBranchingAngle). Branching is supressed when this maxBranchingAngle is set to 0.0", + argstr="--maxBranchingAngle %f") + minBranchingAngle = traits.Float( + desc= + "Minimum branching angle, in degrees. When using multiple tensors, a new branch will be created when the tensors' major directions form an angle between (minBranchingAngle, maxBranchingAngle)", + argstr="--minBranchingAngle %f") + tractsWithSecondTensor = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Tracts generated, with second tensor output (if there is one)", + argstr="--tractsWithSecondTensor %s") + storeGlyphs = traits.Bool( + desc= + "Store tensors' main directions as two-point lines in a separate file named glyphs_{tracts}. When using multiple tensors, only the major tensors' main directions are stored", + argstr="--storeGlyphs ") + + +class UKFTractographyOutputSpec(TraitedSpec): + tracts = File( + desc="Tracts generated, with first tensor output", exists=True) + tractsWithSecondTensor = File( + desc="Tracts generated, with second tensor output (if there is one)", + exists=True) + + +class UKFTractography(SEMLikeCommandLine): + """title: UKF Tractography + +category: Diffusion.Tractography + +description: This module traces fibers in a DWI Volume using the multiple tensor unscented Kalman Filter methology. For more informations check the documentation. + +version: 1.0 + +documentation-url: http://www.nitrc.org/plugins/mwiki/index.php/ukftractography:MainPage + +contributor: Yogesh Rathi, Stefan Lienhard, Yinpeng Li, Martin Styner, Ipek Oguz, Yundi Shi, Christian Baumgartner, Kent Williams, Hans Johnson, Peter Savadjiev, Carl-Fredrik Westin. + +acknowledgements: The development of this module was supported by NIH grants R01 MH097979 (PI Rathi), R01 MH092862 (PIs Westin and Verma), U01 NS083223 (PI Westin), R01 MH074794 (PI Westin) and P41 EB015902 (PI Kikinis). + +""" + + input_spec = UKFTractographyInputSpec + output_spec = UKFTractographyOutputSpec + _cmd = " UKFTractography " + _outputs_filenames = { + 'tracts': 'tracts.vtp', + 'tractsWithSecondTensor': 'tractsWithSecondTensor.vtp' + } + _redirect_x = False diff --git a/nipype/interfaces/semtools/featurecreator.py b/nipype/interfaces/semtools/featurecreator.py new file mode 100644 index 0000000000..69ff2d675c --- /dev/null +++ b/nipype/interfaces/semtools/featurecreator.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class GenerateCsfClippedFromClassifiedImageInputSpec(CommandLineInputSpec): + inputCassifiedVolume = File( + desc="Required: input tissue label image", + exists=True, + argstr="--inputCassifiedVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output image", + argstr="--outputVolume %s") + + +class GenerateCsfClippedFromClassifiedImageOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: output image", exists=True) + + +class GenerateCsfClippedFromClassifiedImage(SEMLikeCommandLine): + """title: GenerateCsfClippedFromClassifiedImage + +category: FeatureCreator + +description: Get the distance from a voxel to the nearest voxel of a given tissue type. + +version: 0.1.0.$Revision: 1 $(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was written by Hans J. Johnson. + +""" + + input_spec = GenerateCsfClippedFromClassifiedImageInputSpec + output_spec = GenerateCsfClippedFromClassifiedImageOutputSpec + _cmd = " GenerateCsfClippedFromClassifiedImage " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/filtering/__init__.py b/nipype/interfaces/semtools/filtering/__init__.py new file mode 100644 index 0000000000..1e69233303 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .denoising import UnbiasedNonLocalMeans +from .featuredetection import ( + GenerateSummedGradientImage, CannySegmentationLevelSetImageFilter, + DilateImage, TextureFromNoiseImageFilter, FlippedDifference, ErodeImage, + GenerateBrainClippedImage, NeighborhoodMedian, GenerateTestImage, + NeighborhoodMean, HammerAttributeCreator, TextureMeasureFilter, DilateMask, + DumpBinaryTrainingVectors, DistanceMaps, STAPLEAnalysis, + GradientAnisotropicDiffusionImageFilter, CannyEdge) diff --git a/nipype/interfaces/semtools/filtering/denoising.py b/nipype/interfaces/semtools/filtering/denoising.py new file mode 100644 index 0000000000..97d687c512 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/denoising.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class UnbiasedNonLocalMeansInputSpec(CommandLineInputSpec): + sigma = traits.Float( + desc= + "The root power of noise (sigma) in the complex Gaussian process the Rician comes from. If it is underestimated, the algorithm fails to remove the noise. If it is overestimated, over-blurring is likely to occur.", + argstr="--sigma %f") + rs = InputMultiPath( + traits.Int, + desc= + "The algorithm search for similar voxels in a neighborhood of this radius (radii larger than 5,5,5 are very slow, and the results can be only marginally better. Small radii may fail to effectively remove the noise).", + sep=",", + argstr="--rs %s") + rc = InputMultiPath( + traits.Int, + desc= + "Similarity between blocks is computed as the difference between mean values and gradients. These parameters are computed fitting a hyperplane with LS inside a neighborhood of this size", + sep=",", + argstr="--rc %s") + hp = traits.Float( + desc= + "This parameter is related to noise; the larger the parameter, the more aggressive the filtering. Should be near 1, and only values between 0.8 and 1.2 are allowed", + argstr="--hp %f") + ps = traits.Float( + desc= + "To accelerate computations, preselection is used: if the normalized difference is above this threshold, the voxel will be discarded (non used for average)", + argstr="--ps %f") + inputVolume = File( + position=-2, desc="Input MRI volume.", exists=True, argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output (filtered) MRI volume.", + argstr="%s") + + +class UnbiasedNonLocalMeansOutputSpec(TraitedSpec): + outputVolume = File( + position=-1, desc="Output (filtered) MRI volume.", exists=True) + + +class UnbiasedNonLocalMeans(SEMLikeCommandLine): + """title: Unbiased NLM for MRI + +category: Filtering.Denoising + +description: This module implements a fast version of the popular Non-Local Means filter for image denoising. This algorithm filters each pixel as a weighted average of its neighbors in a large vicinity. The weights are computed based on the similarity of each neighbor with the voxel to be denoised. + In the original formulation a patch with a certain radius is centered in each of the voxels, and the Mean Squared Error between each pair of corresponding voxels is computed. In this implementation, only the mean value and gradient components are compared. This, together with an efficient memory management, can attain a speed-up of nearly 20x. Besides, the filtering is more accurate than the original with poor SNR. + This code is intended for its use with MRI (or any other Rician-distributed modality): the second order moment is estimated, then we subtract twice the squared power of noise, and finally we take the square root of the result to remove the Rician bias. + The original implementation of the NLM filter may be found in: + A. Buades, B. Coll, J. Morel, "A review of image denoising algorithms, with a new one", Multiscale Modelling and Simulation 4(2): 490-530. 2005. + The correction of the Rician bias is described in the following reference (among others): + S. Aja-Fernandez, K. Krissian, "An unbiased Non-Local Means scheme for DWI filtering", in: Proceedings of the MICCAI Workshop on Computational Diffusion MRI, 2008, pp. 277-284. + The whole description of this version may be found in the following paper (please, cite it if you are willing to use this software): + A. Tristan-Vega, V. Garcia Perez, S. Aja-Fenandez, and C.-F. Westin, "Efficient and Robust Nonlocal Means Denoising of MR Data Based on Salient Features Matching", Computer Methods and Programs in Biomedicine. (Accepted for publication) 2011. + +version: 0.0.1.$Revision: 1 $(beta) + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Modules:UnbiasedNonLocalMeans-Documentation-3.6 + +contributor: Antonio Tristan Vega, Veronica Garcia-Perez, Santiago Aja-Fernandez, Carl-Fredrik Westin + +acknowledgements: Supported by grant number FMECD-2010/71131616E from the Spanish Ministry of Education/Fulbright Committee + +""" + + input_spec = UnbiasedNonLocalMeansInputSpec + output_spec = UnbiasedNonLocalMeansOutputSpec + _cmd = " UnbiasedNonLocalMeans " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/filtering/featuredetection.py b/nipype/interfaces/semtools/filtering/featuredetection.py new file mode 100644 index 0000000000..ca4973ab43 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/featuredetection.py @@ -0,0 +1,830 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class GenerateSummedGradientImageInputSpec(CommandLineInputSpec): + inputVolume1 = File( + desc="input volume 1, usally t1 image", + exists=True, + argstr="--inputVolume1 %s") + inputVolume2 = File( + desc="input volume 2, usally t2 image", + exists=True, + argstr="--inputVolume2 %s") + outputFileName = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="(required) output file name", + argstr="--outputFileName %s") + MaximumGradient = traits.Bool( + desc= + "If set this flag, it will compute maximum gradient between two input volumes instead of sum of it.", + argstr="--MaximumGradient ") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class GenerateSummedGradientImageOutputSpec(TraitedSpec): + outputFileName = File(desc="(required) output file name", exists=True) + + +class GenerateSummedGradientImage(SEMLikeCommandLine): + """title: GenerateSummedGradient + +category: Filtering.FeatureDetection + +description: Automatic FeatureImages using neural networks + +version: 1.0 + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Greg Harris, Eun Young Kim + +""" + + input_spec = GenerateSummedGradientImageInputSpec + output_spec = GenerateSummedGradientImageOutputSpec + _cmd = " GenerateSummedGradientImage " + _outputs_filenames = {'outputFileName': 'outputFileName'} + _redirect_x = False + + +class CannySegmentationLevelSetImageFilterInputSpec(CommandLineInputSpec): + inputVolume = File(exists=True, argstr="--inputVolume %s") + initialModel = File(exists=True, argstr="--initialModel %s") + outputVolume = traits.Either( + traits.Bool, File(), hash_files=False, argstr="--outputVolume %s") + outputSpeedVolume = traits.Either( + traits.Bool, File(), hash_files=False, argstr="--outputSpeedVolume %s") + cannyThreshold = traits.Float( + desc="Canny Threshold Value", argstr="--cannyThreshold %f") + cannyVariance = traits.Float( + desc="Canny variance", argstr="--cannyVariance %f") + advectionWeight = traits.Float( + desc= + "Controls the smoothness of the resulting mask, small number are more smooth, large numbers allow more sharp corners. ", + argstr="--advectionWeight %f") + initialModelIsovalue = traits.Float( + desc= + "The identification of the input model iso-surface. (for a binary image with 0s and 1s use 0.5) (for a binary image with 0s and 255's use 127.5).", + argstr="--initialModelIsovalue %f") + maxIterations = traits.Int(desc="The", argstr="--maxIterations %d") + + +class CannySegmentationLevelSetImageFilterOutputSpec(TraitedSpec): + outputVolume = File(exists=True) + outputSpeedVolume = File(exists=True) + + +class CannySegmentationLevelSetImageFilter(SEMLikeCommandLine): + """title: Canny Level Set Image Filter + +category: Filtering.FeatureDetection + +description: The CannySegmentationLevelSet is commonly used to refine a manually generated manual mask. + +version: 0.3.0 + +license: CC + +contributor: Regina Kim + +acknowledgements: This command module was derived from Insight/Examples/Segmentation/CannySegmentationLevelSetImageFilter.cxx (copyright) Insight Software Consortium. See http://wiki.na-mic.org/Wiki/index.php/Slicer3:Execution_Model_Documentation for more detailed descriptions. + +""" + + input_spec = CannySegmentationLevelSetImageFilterInputSpec + output_spec = CannySegmentationLevelSetImageFilterOutputSpec + _cmd = " CannySegmentationLevelSetImageFilter " + _outputs_filenames = { + 'outputVolume': 'outputVolume.nii', + 'outputSpeedVolume': 'outputSpeedVolume.nii' + } + _redirect_x = False + + +class DilateImageInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Required: input image", exists=True, argstr="--inputVolume %s") + inputMaskVolume = File( + desc="Required: input brain mask image", + exists=True, + argstr="--inputMaskVolume %s") + inputRadius = traits.Int( + desc="Required: input neighborhood radius", argstr="--inputRadius %d") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output image", + argstr="--outputVolume %s") + + +class DilateImageOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: output image", exists=True) + + +class DilateImage(SEMLikeCommandLine): + """title: Dilate Image + +category: Filtering.FeatureDetection + +description: Uses mathematical morphology to dilate the input images. + +version: 0.1.0.$Revision: 1 $(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + +""" + + input_spec = DilateImageInputSpec + output_spec = DilateImageOutputSpec + _cmd = " DilateImage " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class TextureFromNoiseImageFilterInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Required: input image", exists=True, argstr="--inputVolume %s") + inputRadius = traits.Int( + desc="Required: input neighborhood radius", argstr="--inputRadius %d") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output image", + argstr="--outputVolume %s") + + +class TextureFromNoiseImageFilterOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: output image", exists=True) + + +class TextureFromNoiseImageFilter(SEMLikeCommandLine): + """title: TextureFromNoiseImageFilter + +category: Filtering.FeatureDetection + +description: Calculate the local noise in an image. + +version: 0.1.0.$Revision: 1 $(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Eunyoung Regina Kim + +""" + + input_spec = TextureFromNoiseImageFilterInputSpec + output_spec = TextureFromNoiseImageFilterOutputSpec + _cmd = " TextureFromNoiseImageFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class FlippedDifferenceInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Required: input image", exists=True, argstr="--inputVolume %s") + inputMaskVolume = File( + desc="Required: input brain mask image", + exists=True, + argstr="--inputMaskVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output image", + argstr="--outputVolume %s") + + +class FlippedDifferenceOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: output image", exists=True) + + +class FlippedDifference(SEMLikeCommandLine): + """title: Flip Image + +category: Filtering.FeatureDetection + +description: Difference between an image and the axially flipped version of that image. + +version: 0.1.0.$Revision: 1 $(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + +""" + + input_spec = FlippedDifferenceInputSpec + output_spec = FlippedDifferenceOutputSpec + _cmd = " FlippedDifference " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class ErodeImageInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Required: input image", exists=True, argstr="--inputVolume %s") + inputMaskVolume = File( + desc="Required: input brain mask image", + exists=True, + argstr="--inputMaskVolume %s") + inputRadius = traits.Int( + desc="Required: input neighborhood radius", argstr="--inputRadius %d") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output image", + argstr="--outputVolume %s") + + +class ErodeImageOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: output image", exists=True) + + +class ErodeImage(SEMLikeCommandLine): + """title: Erode Image + +category: Filtering.FeatureDetection + +description: Uses mathematical morphology to erode the input images. + +version: 0.1.0.$Revision: 1 $(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + +""" + + input_spec = ErodeImageInputSpec + output_spec = ErodeImageOutputSpec + _cmd = " ErodeImage " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class GenerateBrainClippedImageInputSpec(CommandLineInputSpec): + inputImg = File( + desc="input volume 1, usally t1 image", + exists=True, + argstr="--inputImg %s") + inputMsk = File( + desc="input volume 2, usally t2 image", + exists=True, + argstr="--inputMsk %s") + outputFileName = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="(required) output file name", + argstr="--outputFileName %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class GenerateBrainClippedImageOutputSpec(TraitedSpec): + outputFileName = File(desc="(required) output file name", exists=True) + + +class GenerateBrainClippedImage(SEMLikeCommandLine): + """title: GenerateBrainClippedImage + +category: Filtering.FeatureDetection + +description: Automatic FeatureImages using neural networks + +version: 1.0 + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Eun Young Kim + +""" + + input_spec = GenerateBrainClippedImageInputSpec + output_spec = GenerateBrainClippedImageOutputSpec + _cmd = " GenerateBrainClippedImage " + _outputs_filenames = {'outputFileName': 'outputFileName'} + _redirect_x = False + + +class NeighborhoodMedianInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Required: input image", exists=True, argstr="--inputVolume %s") + inputMaskVolume = File( + desc="Required: input brain mask image", + exists=True, + argstr="--inputMaskVolume %s") + inputRadius = traits.Int( + desc="Required: input neighborhood radius", argstr="--inputRadius %d") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output image", + argstr="--outputVolume %s") + + +class NeighborhoodMedianOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: output image", exists=True) + + +class NeighborhoodMedian(SEMLikeCommandLine): + """title: Neighborhood Median + +category: Filtering.FeatureDetection + +description: Calculates the median, for the given neighborhood size, at each voxel of the input image. + +version: 0.1.0.$Revision: 1 $(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + +""" + + input_spec = NeighborhoodMedianInputSpec + output_spec = NeighborhoodMedianOutputSpec + _cmd = " NeighborhoodMedian " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class GenerateTestImageInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="input volume 1, usally t1 image", + exists=True, + argstr="--inputVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="(required) output file name", + argstr="--outputVolume %s") + lowerBoundOfOutputVolume = traits.Float( + argstr="--lowerBoundOfOutputVolume %f") + upperBoundOfOutputVolume = traits.Float( + argstr="--upperBoundOfOutputVolume %f") + outputVolumeSize = traits.Float( + desc="output Volume Size", argstr="--outputVolumeSize %f") + + +class GenerateTestImageOutputSpec(TraitedSpec): + outputVolume = File(desc="(required) output file name", exists=True) + + +class GenerateTestImage(SEMLikeCommandLine): + """title: DownSampleImage + +category: Filtering.FeatureDetection + +description: Down sample image for testing + +version: 1.0 + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Eun Young Kim + +""" + + input_spec = GenerateTestImageInputSpec + output_spec = GenerateTestImageOutputSpec + _cmd = " GenerateTestImage " + _outputs_filenames = {'outputVolume': 'outputVolume'} + _redirect_x = False + + +class NeighborhoodMeanInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Required: input image", exists=True, argstr="--inputVolume %s") + inputMaskVolume = File( + desc="Required: input brain mask image", + exists=True, + argstr="--inputMaskVolume %s") + inputRadius = traits.Int( + desc="Required: input neighborhood radius", argstr="--inputRadius %d") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output image", + argstr="--outputVolume %s") + + +class NeighborhoodMeanOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: output image", exists=True) + + +class NeighborhoodMean(SEMLikeCommandLine): + """title: Neighborhood Mean + +category: Filtering.FeatureDetection + +description: Calculates the mean, for the given neighborhood size, at each voxel of the T1, T2, and FLAIR. + +version: 0.1.0.$Revision: 1 $(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + +""" + + input_spec = NeighborhoodMeanInputSpec + output_spec = NeighborhoodMeanOutputSpec + _cmd = " NeighborhoodMean " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class HammerAttributeCreatorInputSpec(CommandLineInputSpec): + Scale = traits.Int(desc="Determine Scale of Ball", argstr="--Scale %d") + Strength = traits.Float( + desc="Determine Strength of Edges", argstr="--Strength %f") + inputGMVolume = File( + desc="Required: input grey matter posterior image", + exists=True, + argstr="--inputGMVolume %s") + inputWMVolume = File( + desc="Required: input white matter posterior image", + exists=True, + argstr="--inputWMVolume %s") + inputCSFVolume = File( + desc="Required: input CSF posterior image", + exists=True, + argstr="--inputCSFVolume %s") + outputVolumeBase = traits.Str( + desc= + "Required: output image base name to be appended for each feature vector.", + argstr="--outputVolumeBase %s") + + +class HammerAttributeCreatorOutputSpec(TraitedSpec): + pass + + +class HammerAttributeCreator(SEMLikeCommandLine): + """title: HAMMER Feature Vectors + +category: Filtering.FeatureDetection + +description: Create the feature vectors used by HAMMER. + +version: 0.1.0.$Revision: 1 $(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This was extracted from the Hammer Registration source code, and wrapped up by Hans J. Johnson. + +""" + + input_spec = HammerAttributeCreatorInputSpec + output_spec = HammerAttributeCreatorOutputSpec + _cmd = " HammerAttributeCreator " + _outputs_filenames = {} + _redirect_x = False + + +class TextureMeasureFilterInputSpec(CommandLineInputSpec): + inputVolume = File(exists=True, argstr="--inputVolume %s") + inputMaskVolume = File(exists=True, argstr="--inputMaskVolume %s") + distance = traits.Int(argstr="--distance %d") + insideROIValue = traits.Float(argstr="--insideROIValue %f") + outputFilename = traits.Either( + traits.Bool, File(), hash_files=False, argstr="--outputFilename %s") + + +class TextureMeasureFilterOutputSpec(TraitedSpec): + outputFilename = File(exists=True) + + +class TextureMeasureFilter(SEMLikeCommandLine): + """title: Canny Level Set Image Filter + +category: Filtering.FeatureDetection + +description: The CannySegmentationLevelSet is commonly used to refine a manually generated manual mask. + +version: 0.3.0 + +license: CC + +contributor: Regina Kim + +acknowledgements: This command module was derived from Insight/Examples/Segmentation/CannySegmentationLevelSetImageFilter.cxx (copyright) Insight Software Consortium. See http://wiki.na-mic.org/Wiki/index.php/Slicer3:Execution_Model_Documentation for more detailed descriptions. + +""" + + input_spec = TextureMeasureFilterInputSpec + output_spec = TextureMeasureFilterOutputSpec + _cmd = " TextureMeasureFilter " + _outputs_filenames = {'outputFilename': 'outputFilename'} + _redirect_x = False + + +class DilateMaskInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Required: input image", exists=True, argstr="--inputVolume %s") + inputBinaryVolume = File( + desc="Required: input brain mask image", + exists=True, + argstr="--inputBinaryVolume %s") + sizeStructuralElement = traits.Int( + desc= + "size of structural element. sizeStructuralElement=1 means that 3x3x3 structuring element for 3D", + argstr="--sizeStructuralElement %d") + lowerThreshold = traits.Float( + desc="Required: lowerThreshold value", argstr="--lowerThreshold %f") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output image", + argstr="--outputVolume %s") + + +class DilateMaskOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: output image", exists=True) + + +class DilateMask(SEMLikeCommandLine): + """title: Dilate Image + +category: Filtering.FeatureDetection + +description: Uses mathematical morphology to dilate the input images. + +version: 0.1.0.$Revision: 1 $(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + +""" + + input_spec = DilateMaskInputSpec + output_spec = DilateMaskOutputSpec + _cmd = " DilateMask " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class DumpBinaryTrainingVectorsInputSpec(CommandLineInputSpec): + inputHeaderFilename = File( + desc="Required: input header file name", + exists=True, + argstr="--inputHeaderFilename %s") + inputVectorFilename = File( + desc="Required: input vector filename", + exists=True, + argstr="--inputVectorFilename %s") + + +class DumpBinaryTrainingVectorsOutputSpec(TraitedSpec): + pass + + +class DumpBinaryTrainingVectors(SEMLikeCommandLine): + """title: Erode Image + +category: Filtering.FeatureDetection + +description: Uses mathematical morphology to erode the input images. + +version: 0.1.0.$Revision: 1 $(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + +""" + + input_spec = DumpBinaryTrainingVectorsInputSpec + output_spec = DumpBinaryTrainingVectorsOutputSpec + _cmd = " DumpBinaryTrainingVectors " + _outputs_filenames = {} + _redirect_x = False + + +class DistanceMapsInputSpec(CommandLineInputSpec): + inputLabelVolume = File( + desc="Required: input tissue label image", + exists=True, + argstr="--inputLabelVolume %s") + inputMaskVolume = File( + desc="Required: input brain mask image", + exists=True, + argstr="--inputMaskVolume %s") + inputTissueLabel = traits.Int( + desc= + "Required: input integer value of tissue type used to calculate distance", + argstr="--inputTissueLabel %d") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output image", + argstr="--outputVolume %s") + + +class DistanceMapsOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: output image", exists=True) + + +class DistanceMaps(SEMLikeCommandLine): + """title: Mauerer Distance + +category: Filtering.FeatureDetection + +description: Get the distance from a voxel to the nearest voxel of a given tissue type. + +version: 0.1.0.$Revision: 1 $(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + +""" + + input_spec = DistanceMapsInputSpec + output_spec = DistanceMapsOutputSpec + _cmd = " DistanceMaps " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class STAPLEAnalysisInputSpec(CommandLineInputSpec): + inputDimension = traits.Int( + desc="Required: input image Dimension 2 or 3", + argstr="--inputDimension %d") + inputLabelVolume = InputMultiPath( + File(exists=True), + desc="Required: input label volume", + argstr="--inputLabelVolume %s...") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output image", + argstr="--outputVolume %s") + + +class STAPLEAnalysisOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: output image", exists=True) + + +class STAPLEAnalysis(SEMLikeCommandLine): + """title: Dilate Image + +category: Filtering.FeatureDetection + +description: Uses mathematical morphology to dilate the input images. + +version: 0.1.0.$Revision: 1 $(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Mark Scully and Jeremy Bockholt. + +""" + + input_spec = STAPLEAnalysisInputSpec + output_spec = STAPLEAnalysisOutputSpec + _cmd = " STAPLEAnalysis " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class GradientAnisotropicDiffusionImageFilterInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Required: input image", exists=True, argstr="--inputVolume %s") + numberOfIterations = traits.Int( + desc="Optional value for number of Iterations", + argstr="--numberOfIterations %d") + timeStep = traits.Float( + desc="Time step for diffusion process", argstr="--timeStep %f") + conductance = traits.Float( + desc="Conductance for diffusion process", argstr="--conductance %f") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output image", + argstr="--outputVolume %s") + + +class GradientAnisotropicDiffusionImageFilterOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: output image", exists=True) + + +class GradientAnisotropicDiffusionImageFilter(SEMLikeCommandLine): + """title: GradientAnisopropicDiffusionFilter + +category: Filtering.FeatureDetection + +description: Image Smoothing using Gradient Anisotropic Diffuesion Filer + +contributor: This tool was developed by Eun Young Kim by modifying ITK Example + +""" + + input_spec = GradientAnisotropicDiffusionImageFilterInputSpec + output_spec = GradientAnisotropicDiffusionImageFilterOutputSpec + _cmd = " GradientAnisotropicDiffusionImageFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class CannyEdgeInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Required: input tissue label image", + exists=True, + argstr="--inputVolume %s") + variance = traits.Float( + desc= + "Variance and Maximum error are used in the Gaussian smoothing of the input image. See itkDiscreteGaussianImageFilter for information on these parameters.", + argstr="--variance %f") + upperThreshold = traits.Float( + desc= + "Threshold is the lowest allowed value in the output image. Its data type is the same as the data type of the output image. Any values below the Threshold level will be replaced with the OutsideValue parameter value, whose default is zero. ", + argstr="--upperThreshold %f") + lowerThreshold = traits.Float( + desc= + "Threshold is the lowest allowed value in the output image. Its data type is the same as the data type of the output image. Any values below the Threshold level will be replaced with the OutsideValue parameter value, whose default is zero. ", + argstr="--lowerThreshold %f") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Required: output image", + argstr="--outputVolume %s") + + +class CannyEdgeOutputSpec(TraitedSpec): + outputVolume = File(desc="Required: output image", exists=True) + + +class CannyEdge(SEMLikeCommandLine): + """title: Canny Edge Detection + +category: Filtering.FeatureDetection + +description: Get the distance from a voxel to the nearest voxel of a given tissue type. + +version: 0.1.0.(alpha) + +documentation-url: http:://www.na-mic.org/ + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was written by Hans J. Johnson. + +""" + + input_spec = CannyEdgeInputSpec + output_spec = CannyEdgeOutputSpec + _cmd = " CannyEdge " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/filtering/tests/__init__.py b/nipype/interfaces/semtools/filtering/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py new file mode 100644 index 0000000000..cfbee7449c --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannyEdge.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import CannyEdge + + +def test_CannyEdge_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + lowerThreshold=dict(argstr='--lowerThreshold %f', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + upperThreshold=dict(argstr='--upperThreshold %f', ), + variance=dict(argstr='--variance %f', ), + ) + inputs = CannyEdge.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CannyEdge_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = CannyEdge.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py new file mode 100644 index 0000000000..b4196db823 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_CannySegmentationLevelSetImageFilter.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import CannySegmentationLevelSetImageFilter + + +def test_CannySegmentationLevelSetImageFilter_inputs(): + input_map = dict( + advectionWeight=dict(argstr='--advectionWeight %f', ), + args=dict(argstr='%s', ), + cannyThreshold=dict(argstr='--cannyThreshold %f', ), + cannyVariance=dict(argstr='--cannyVariance %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + initialModel=dict(argstr='--initialModel %s', ), + initialModelIsovalue=dict(argstr='--initialModelIsovalue %f', ), + inputVolume=dict(argstr='--inputVolume %s', ), + maxIterations=dict(argstr='--maxIterations %d', ), + outputSpeedVolume=dict( + argstr='--outputSpeedVolume %s', + hash_files=False, + ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = CannySegmentationLevelSetImageFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CannySegmentationLevelSetImageFilter_outputs(): + output_map = dict( + outputSpeedVolume=dict(), + outputVolume=dict(), + ) + outputs = CannySegmentationLevelSetImageFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py new file mode 100644 index 0000000000..7945ec8ade --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateImage.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import DilateImage + + +def test_DilateImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputRadius=dict(argstr='--inputRadius %d', ), + inputVolume=dict(argstr='--inputVolume %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = DilateImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DilateImage_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = DilateImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py new file mode 100644 index 0000000000..6bd6235e80 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DilateMask.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import DilateMask + + +def test_DilateMask_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryVolume=dict(argstr='--inputBinaryVolume %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + lowerThreshold=dict(argstr='--lowerThreshold %f', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + sizeStructuralElement=dict(argstr='--sizeStructuralElement %d', ), + ) + inputs = DilateMask.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DilateMask_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = DilateMask.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py new file mode 100644 index 0000000000..f7aaf722c4 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DistanceMaps.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import DistanceMaps + + +def test_DistanceMaps_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputLabelVolume=dict(argstr='--inputLabelVolume %s', ), + inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputTissueLabel=dict(argstr='--inputTissueLabel %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = DistanceMaps.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DistanceMaps_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = DistanceMaps.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py new file mode 100644 index 0000000000..320bb76b77 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_DumpBinaryTrainingVectors.py @@ -0,0 +1,27 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import DumpBinaryTrainingVectors + + +def test_DumpBinaryTrainingVectors_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputHeaderFilename=dict(argstr='--inputHeaderFilename %s', ), + inputVectorFilename=dict(argstr='--inputVectorFilename %s', ), + ) + inputs = DumpBinaryTrainingVectors.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DumpBinaryTrainingVectors_outputs(): + output_map = dict() + outputs = DumpBinaryTrainingVectors.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py new file mode 100644 index 0000000000..9e096b0062 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_ErodeImage.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import ErodeImage + + +def test_ErodeImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputRadius=dict(argstr='--inputRadius %d', ), + inputVolume=dict(argstr='--inputVolume %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = ErodeImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ErodeImage_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = ErodeImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py new file mode 100644 index 0000000000..539660f73b --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_FlippedDifference.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import FlippedDifference + + +def test_FlippedDifference_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = FlippedDifference.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FlippedDifference_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = FlippedDifference.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py new file mode 100644 index 0000000000..0a211f23d2 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateBrainClippedImage.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import GenerateBrainClippedImage + + +def test_GenerateBrainClippedImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputImg=dict(argstr='--inputImg %s', ), + inputMsk=dict(argstr='--inputMsk %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputFileName=dict( + argstr='--outputFileName %s', + hash_files=False, + ), + ) + inputs = GenerateBrainClippedImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GenerateBrainClippedImage_outputs(): + output_map = dict(outputFileName=dict(), ) + outputs = GenerateBrainClippedImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py new file mode 100644 index 0000000000..58d3f35c5a --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateSummedGradientImage.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import GenerateSummedGradientImage + + +def test_GenerateSummedGradientImage_inputs(): + input_map = dict( + MaximumGradient=dict(argstr='--MaximumGradient ', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict(argstr='--inputVolume1 %s', ), + inputVolume2=dict(argstr='--inputVolume2 %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputFileName=dict( + argstr='--outputFileName %s', + hash_files=False, + ), + ) + inputs = GenerateSummedGradientImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GenerateSummedGradientImage_outputs(): + output_map = dict(outputFileName=dict(), ) + outputs = GenerateSummedGradientImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py new file mode 100644 index 0000000000..1348e61d4b --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GenerateTestImage.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import GenerateTestImage + + +def test_GenerateTestImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + lowerBoundOfOutputVolume=dict( + argstr='--lowerBoundOfOutputVolume %f', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + outputVolumeSize=dict(argstr='--outputVolumeSize %f', ), + upperBoundOfOutputVolume=dict( + argstr='--upperBoundOfOutputVolume %f', ), + ) + inputs = GenerateTestImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GenerateTestImage_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = GenerateTestImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py new file mode 100644 index 0000000000..27ebb18d29 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_GradientAnisotropicDiffusionImageFilter.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import GradientAnisotropicDiffusionImageFilter + + +def test_GradientAnisotropicDiffusionImageFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + conductance=dict(argstr='--conductance %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfIterations=dict(argstr='--numberOfIterations %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + timeStep=dict(argstr='--timeStep %f', ), + ) + inputs = GradientAnisotropicDiffusionImageFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GradientAnisotropicDiffusionImageFilter_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = GradientAnisotropicDiffusionImageFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py new file mode 100644 index 0000000000..9ad82598bb --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_HammerAttributeCreator.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import HammerAttributeCreator + + +def test_HammerAttributeCreator_inputs(): + input_map = dict( + Scale=dict(argstr='--Scale %d', ), + Strength=dict(argstr='--Strength %f', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputCSFVolume=dict(argstr='--inputCSFVolume %s', ), + inputGMVolume=dict(argstr='--inputGMVolume %s', ), + inputWMVolume=dict(argstr='--inputWMVolume %s', ), + outputVolumeBase=dict(argstr='--outputVolumeBase %s', ), + ) + inputs = HammerAttributeCreator.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_HammerAttributeCreator_outputs(): + output_map = dict() + outputs = HammerAttributeCreator.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py new file mode 100644 index 0000000000..c6b1ef20a9 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMean.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import NeighborhoodMean + + +def test_NeighborhoodMean_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputRadius=dict(argstr='--inputRadius %d', ), + inputVolume=dict(argstr='--inputVolume %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = NeighborhoodMean.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_NeighborhoodMean_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = NeighborhoodMean.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py new file mode 100644 index 0000000000..2c2b2d585f --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_NeighborhoodMedian.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import NeighborhoodMedian + + +def test_NeighborhoodMedian_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputRadius=dict(argstr='--inputRadius %d', ), + inputVolume=dict(argstr='--inputVolume %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = NeighborhoodMedian.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_NeighborhoodMedian_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = NeighborhoodMedian.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py new file mode 100644 index 0000000000..75bb83315f --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_STAPLEAnalysis.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import STAPLEAnalysis + + +def test_STAPLEAnalysis_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputDimension=dict(argstr='--inputDimension %d', ), + inputLabelVolume=dict(argstr='--inputLabelVolume %s...', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = STAPLEAnalysis.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_STAPLEAnalysis_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = STAPLEAnalysis.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py new file mode 100644 index 0000000000..cd4b4a1af2 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureFromNoiseImageFilter.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import TextureFromNoiseImageFilter + + +def test_TextureFromNoiseImageFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputRadius=dict(argstr='--inputRadius %d', ), + inputVolume=dict(argstr='--inputVolume %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = TextureFromNoiseImageFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TextureFromNoiseImageFilter_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = TextureFromNoiseImageFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py new file mode 100644 index 0000000000..0ad523c7d0 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_TextureMeasureFilter.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featuredetection import TextureMeasureFilter + + +def test_TextureMeasureFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + distance=dict(argstr='--distance %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + insideROIValue=dict(argstr='--insideROIValue %f', ), + outputFilename=dict( + argstr='--outputFilename %s', + hash_files=False, + ), + ) + inputs = TextureMeasureFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TextureMeasureFilter_outputs(): + output_map = dict(outputFilename=dict(), ) + outputs = TextureMeasureFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py new file mode 100644 index 0000000000..b84cda1fc3 --- /dev/null +++ b/nipype/interfaces/semtools/filtering/tests/test_auto_UnbiasedNonLocalMeans.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..denoising import UnbiasedNonLocalMeans + + +def test_UnbiasedNonLocalMeans_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + hp=dict(argstr='--hp %f', ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ps=dict(argstr='--ps %f', ), + rc=dict( + argstr='--rc %s', + sep=',', + ), + rs=dict( + argstr='--rs %s', + sep=',', + ), + sigma=dict(argstr='--sigma %f', ), + ) + inputs = UnbiasedNonLocalMeans.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_UnbiasedNonLocalMeans_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = UnbiasedNonLocalMeans.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/generated.sh b/nipype/interfaces/semtools/generated.sh new file mode 100644 index 0000000000..e167eae6f7 --- /dev/null +++ b/nipype/interfaces/semtools/generated.sh @@ -0,0 +1 @@ +local_generate_classes.py --python_paths=/scratch/johnsonhj/src/NEP-11/NIPYPE --program_paths=/scratch/johnsonhj/src/NEP-11/bin:/usr/local/bin:/opt/ogs/bin/darwin-x64:/bin:/sbin:/usr/local/bin:/usr/bin:/usr/sbin:/usr/texbin:/Shared/sinapse/sharedopt/20120722/Darwin_i386/vv/bin:/usr/texbin:/scratch/johnsonhj/bin --output_path=/scratch/johnsonhj/src/NEP-11/BRAINSTools/AutoWorkup diff --git a/nipype/interfaces/semtools/legacy/__init__.py b/nipype/interfaces/semtools/legacy/__init__.py new file mode 100644 index 0000000000..3309d49d62 --- /dev/null +++ b/nipype/interfaces/semtools/legacy/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .registration import scalartransform diff --git a/nipype/interfaces/semtools/legacy/registration.py b/nipype/interfaces/semtools/legacy/registration.py new file mode 100644 index 0000000000..04bb425e3d --- /dev/null +++ b/nipype/interfaces/semtools/legacy/registration.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class scalartransformInputSpec(CommandLineInputSpec): + input_image = File( + desc="Image to tranform", exists=True, argstr="--input_image %s") + output_image = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="The transformed image", + argstr="--output_image %s") + transformation = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output file for transformation parameters", + argstr="--transformation %s") + invert = traits.Bool( + desc="Invert tranform before applying.", argstr="--invert ") + deformation = File( + desc="Deformation field.", exists=True, argstr="--deformation %s") + h_field = traits.Bool( + desc="The deformation is an h-field.", argstr="--h_field ") + interpolation = traits.Enum( + "nearestneighbor", + "linear", + "cubic", + desc="Interpolation type (nearestneighbor, linear, cubic)", + argstr="--interpolation %s") + + +class scalartransformOutputSpec(TraitedSpec): + output_image = File(desc="The transformed image", exists=True) + transformation = File( + desc="Output file for transformation parameters", exists=True) + + +class scalartransform(SEMLikeCommandLine): + """title: ScalarTransform (DTIProcess) + +category: Legacy.Registration + +version: 1.0.0 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/Nightly/Extensions/DTIProcess + +license: Copyright (c) Casey Goodlett. All rights reserved. + See http://www.ia.unc.edu/dev/Copyright.htm for details. + This software is distributed WITHOUT ANY WARRANTY; without even + the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + PURPOSE. See the above copyright notices for more information. + +contributor: Casey Goodlett + +""" + + input_spec = scalartransformInputSpec + output_spec = scalartransformOutputSpec + _cmd = " scalartransform " + _outputs_filenames = { + 'output_image': 'output_image.nii', + 'transformation': 'transformation' + } + _redirect_x = False diff --git a/nipype/interfaces/semtools/legacy/tests/__init__.py b/nipype/interfaces/semtools/legacy/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/semtools/legacy/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py new file mode 100644 index 0000000000..f9ff60cfd9 --- /dev/null +++ b/nipype/interfaces/semtools/legacy/tests/test_auto_scalartransform.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import scalartransform + + +def test_scalartransform_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + deformation=dict(argstr='--deformation %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + h_field=dict(argstr='--h_field ', ), + input_image=dict(argstr='--input_image %s', ), + interpolation=dict(argstr='--interpolation %s', ), + invert=dict(argstr='--invert ', ), + output_image=dict( + argstr='--output_image %s', + hash_files=False, + ), + transformation=dict( + argstr='--transformation %s', + hash_files=False, + ), + ) + inputs = scalartransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_scalartransform_outputs(): + output_map = dict( + output_image=dict(), + transformation=dict(), + ) + outputs = scalartransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/registration/__init__.py b/nipype/interfaces/semtools/registration/__init__.py new file mode 100644 index 0000000000..33bd60ad59 --- /dev/null +++ b/nipype/interfaces/semtools/registration/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .specialized import (VBRAINSDemonWarp, BRAINSDemonWarp, + BRAINSTransformFromFiducials) +from .brainsresample import BRAINSResample +from .brainsfit import BRAINSFit +from .brainsresize import BRAINSResize diff --git a/nipype/interfaces/semtools/registration/brainsfit.py b/nipype/interfaces/semtools/registration/brainsfit.py new file mode 100644 index 0000000000..6142aac418 --- /dev/null +++ b/nipype/interfaces/semtools/registration/brainsfit.py @@ -0,0 +1,411 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class BRAINSFitInputSpec(CommandLineInputSpec): + fixedVolume = File( + desc= + "Input fixed image (the moving image will be transformed into this image space).", + exists=True, + argstr="--fixedVolume %s") + movingVolume = File( + desc= + "Input moving image (this image will be transformed into the fixed image space).", + exists=True, + argstr="--movingVolume %s") + samplingPercentage = traits.Float( + desc= + "Fraction of voxels of the fixed image that will be used for registration. The number has to be larger than zero and less or equal to one. Higher values increase the computation time but may give more accurate results. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation. The default is 0.002 (use approximately 0.2% of voxels, resulting in 100000 samples in a 512x512x192 volume) to provide a very fast registration in most cases. Typical values range from 0.01 (1%) for low detail images to 0.2 (20%) for high detail images.", + argstr="--samplingPercentage %f") + splineGridSize = InputMultiPath( + traits.Int, + desc= + "Number of BSpline grid subdivisions along each axis of the fixed image, centered on the image space. Values must be 3 or higher for the BSpline to be correctly computed.", + sep=",", + argstr="--splineGridSize %s") + linearTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "(optional) Output estimated transform - in case the computed transform is not BSpline. NOTE: You must set at least one output object (transform and/or output volume).", + argstr="--linearTransform %s") + bsplineTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "(optional) Output estimated transform - in case the computed transform is BSpline. NOTE: You must set at least one output object (transform and/or output volume).", + argstr="--bsplineTransform %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "(optional) Output image: the moving image warped to the fixed image space. NOTE: You must set at least one output object (transform and/or output volume).", + argstr="--outputVolume %s") + initialTransform = File( + desc= + "Transform to be applied to the moving image to initialize the registration. This can only be used if Initialize Transform Mode is Off.", + exists=True, + argstr="--initialTransform %s") + initializeTransformMode = traits.Enum( + "Off", + "useMomentsAlign", + "useCenterOfHeadAlign", + "useGeometryAlign", + "useCenterOfROIAlign", + desc= + "Determine how to initialize the transform center. useMomentsAlign assumes that the center of mass of the images represent similar structures. useCenterOfHeadAlign attempts to use the top of head and shape of neck to drive a center of mass estimate. useGeometryAlign on assumes that the center of the voxel lattice of the images represent similar structures. Off assumes that the physical space of the images are close. This flag is mutually exclusive with the Initialization transform.", + argstr="--initializeTransformMode %s") + useRigid = traits.Bool( + desc= + "Perform a rigid registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useRigid ") + useScaleVersor3D = traits.Bool( + desc= + "Perform a ScaleVersor3D registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useScaleVersor3D ") + useScaleSkewVersor3D = traits.Bool( + desc= + "Perform a ScaleSkewVersor3D registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useScaleSkewVersor3D ") + useAffine = traits.Bool( + desc= + "Perform an Affine registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useAffine ") + useBSpline = traits.Bool( + desc= + "Perform a BSpline registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useBSpline ") + useSyN = traits.Bool( + desc= + "Perform a SyN registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useSyN ") + useComposite = traits.Bool( + desc= + "Perform a Composite registration as part of the sequential registration steps. This family of options overrides the use of transformType if any of them are set.", + argstr="--useComposite ") + maskProcessingMode = traits.Enum( + "NOMASK", + "ROIAUTO", + "ROI", + desc= + "Specifies a mask to only consider a certain image region for the registration. If ROIAUTO is chosen, then the mask is computed using Otsu thresholding and hole filling. If ROI is chosen then the mask has to be specified as in input.", + argstr="--maskProcessingMode %s") + fixedBinaryVolume = File( + desc= + "Fixed Image binary mask volume, required if Masking Option is ROI. Image areas where the mask volume has zero value are ignored during the registration.", + exists=True, + argstr="--fixedBinaryVolume %s") + movingBinaryVolume = File( + desc= + "Moving Image binary mask volume, required if Masking Option is ROI. Image areas where the mask volume has zero value are ignored during the registration.", + exists=True, + argstr="--movingBinaryVolume %s") + outputFixedVolumeROI = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "ROI that is automatically computed from the fixed image. Only available if Masking Option is ROIAUTO. Image areas where the mask volume has zero value are ignored during the registration.", + argstr="--outputFixedVolumeROI %s") + outputMovingVolumeROI = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "ROI that is automatically computed from the moving image. Only available if Masking Option is ROIAUTO. Image areas where the mask volume has zero value are ignored during the registration.", + argstr="--outputMovingVolumeROI %s") + useROIBSpline = traits.Bool( + desc= + "If enabled then the bounding box of the input ROIs defines the BSpline grid support region. Otherwise the BSpline grid support region is the whole fixed image.", + argstr="--useROIBSpline ") + histogramMatch = traits.Bool( + desc= + "Apply histogram matching operation for the input images to make them more similar. This is suitable for images of the same modality that may have different brightness or contrast, but the same overall intensity profile. Do NOT use if registering images from different modalities.", + argstr="--histogramMatch ") + medianFilterSize = InputMultiPath( + traits.Int, + desc= + "Apply median filtering to reduce noise in the input volumes. The 3 values specify the radius for the optional MedianImageFilter preprocessing in all 3 directions (in voxels).", + sep=",", + argstr="--medianFilterSize %s") + removeIntensityOutliers = traits.Float( + desc= + "Remove very high and very low intensity voxels from the input volumes. The parameter specifies the half percentage to decide outliers of image intensities. The default value is zero, which means no outlier removal. If the value of 0.005 is given, the 0.005% of both tails will be thrown away, so 0.01% of intensities in total would be ignored in the statistic calculation.", + argstr="--removeIntensityOutliers %f") + fixedVolume2 = File( + desc= + "Input fixed image that will be used for multimodal registration. (the moving image will be transformed into this image space).", + exists=True, + argstr="--fixedVolume2 %s") + movingVolume2 = File( + desc= + "Input moving image that will be used for multimodal registration(this image will be transformed into the fixed image space).", + exists=True, + argstr="--movingVolume2 %s") + outputVolumePixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uint", + "uchar", + desc="Data type for representing a voxel of the Output Volume.", + argstr="--outputVolumePixelType %s") + backgroundFillValue = traits.Float( + desc= + "This value will be used for filling those areas of the output image that have no corresponding voxels in the input moving image.", + argstr="--backgroundFillValue %f") + scaleOutputValues = traits.Bool( + desc= + "If true, and the voxel values do not fit within the minimum and maximum values of the desired outputVolumePixelType, then linearly scale the min/max output image voxel values to fit within the min/max range of the outputVolumePixelType.", + argstr="--scaleOutputValues ") + interpolationMode = traits.Enum( + "NearestNeighbor", + "Linear", + "ResampleInPlace", + "BSpline", + "WindowedSinc", + "Hamming", + "Cosine", + "Welch", + "Lanczos", + "Blackman", + desc= + "Type of interpolation to be used when applying transform to moving volume. Options are Linear, NearestNeighbor, BSpline, WindowedSinc, Hamming, Cosine, Welch, Lanczos, or ResampleInPlace. The ResampleInPlace option will create an image with the same discrete voxel values and will adjust the origin and direction of the physical space interpretation.", + argstr="--interpolationMode %s") + numberOfIterations = InputMultiPath( + traits.Int, + desc= + "The maximum number of iterations to try before stopping the optimization. When using a lower value (500-1000) then the registration is forced to terminate earlier but there is a higher risk of stopping before an optimal solution is reached.", + sep=",", + argstr="--numberOfIterations %s") + maximumStepLength = traits.Float( + desc= + "Starting step length of the optimizer. In general, higher values allow for recovering larger initial misalignments but there is an increased chance that the registration will not converge.", + argstr="--maximumStepLength %f") + minimumStepLength = InputMultiPath( + traits.Float, + desc= + "Each step in the optimization takes steps at least this big. When none are possible, registration is complete. Smaller values allows the optimizer to make smaller adjustments, but the registration time may increase.", + sep=",", + argstr="--minimumStepLength %s") + relaxationFactor = traits.Float( + desc= + "Specifies how quickly the optimization step length is decreased during registration. The value must be larger than 0 and smaller than 1. Larger values result in slower step size decrease, which allow for recovering larger initial misalignments but it increases the registration time and the chance that the registration will not converge.", + argstr="--relaxationFactor %f") + translationScale = traits.Float( + desc= + "How much to scale up changes in position (in mm) compared to unit rotational changes (in radians) -- decrease this to allow for more rotation in the search pattern.", + argstr="--translationScale %f") + reproportionScale = traits.Float( + desc= + "ScaleVersor3D 'Scale' compensation factor. Increase this to allow for more rescaling in a ScaleVersor3D or ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", + argstr="--reproportionScale %f") + skewScale = traits.Float( + desc= + "ScaleSkewVersor3D Skew compensation factor. Increase this to allow for more skew in a ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", + argstr="--skewScale %f") + maxBSplineDisplacement = traits.Float( + desc= + "Maximum allowed displacements in image physical coordinates (mm) for BSpline control grid along each axis. A value of 0.0 indicates that the problem should be unbounded. NOTE: This only constrains the BSpline portion, and does not limit the displacement from the associated bulk transform. This can lead to a substantial reduction in computation time in the BSpline optimizer., ", + argstr="--maxBSplineDisplacement %f") + fixedVolumeTimeIndex = traits.Int( + desc= + "The index in the time series for the 3D fixed image to fit. Only allowed if the fixed input volume is 4-dimensional.", + argstr="--fixedVolumeTimeIndex %d") + movingVolumeTimeIndex = traits.Int( + desc= + "The index in the time series for the 3D moving image to fit. Only allowed if the moving input volume is 4-dimensional", + argstr="--movingVolumeTimeIndex %d") + numberOfHistogramBins = traits.Int( + desc= + "The number of histogram levels used for mutual information metric estimation.", + argstr="--numberOfHistogramBins %d") + numberOfMatchPoints = traits.Int( + desc= + "Number of histogram match points used for mutual information metric estimation.", + argstr="--numberOfMatchPoints %d") + costMetric = traits.Enum( + "MMI", + "MSE", + "NC", + "MIH", + desc= + "The cost metric to be used during fitting. Defaults to MMI. Options are MMI (Mattes Mutual Information), MSE (Mean Square Error), NC (Normalized Correlation), MC (Match Cardinality for binary images)", + argstr="--costMetric %s") + maskInferiorCutOffFromCenter = traits.Float( + desc= + "If Initialize Transform Mode is set to useCenterOfHeadAlign or Masking Option is ROIAUTO then this value defines the how much is cut of from the inferior part of the image. The cut-off distance is specified in millimeters, relative to the image center. If the value is 1000 or larger then no cut-off performed.", + argstr="--maskInferiorCutOffFromCenter %f") + ROIAutoDilateSize = traits.Float( + desc= + "This flag is only relevant when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. A setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", + argstr="--ROIAutoDilateSize %f") + ROIAutoClosingSize = traits.Float( + desc= + "This flag is only relevant when using ROIAUTO mode for initializing masks. It defines the hole closing size in mm. It is rounded up to the nearest whole pixel size in each direction. The default is to use a closing size of 9mm. For mouse data this value may need to be reset to 0.9 or smaller.", + argstr="--ROIAutoClosingSize %f") + numberOfSamples = traits.Int( + desc= + "The number of voxels sampled for mutual information computation. Increase this for higher accuracy, at the cost of longer computation time., NOTE that it is suggested to use samplingPercentage instead of this option. However, if set to non-zero, numberOfSamples overwrites the samplingPercentage option. ", + argstr="--numberOfSamples %d") + strippedOutputTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overridden if either bsplineTransform or linearTransform is set.", + argstr="--strippedOutputTransform %s") + transformType = InputMultiPath( + traits.Str, + desc= + "Specifies a list of registration types to be used. The valid types are, Rigid, ScaleVersor3D, ScaleSkewVersor3D, Affine, BSpline and SyN. Specifying more than one in a comma separated list will initialize the next stage with the previous results. If registrationClass flag is used, it overrides this parameter setting.", + sep=",", + argstr="--transformType %s") + outputTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", + argstr="--outputTransform %s") + initializeRegistrationByCurrentGenericTransform = traits.Bool( + desc= + "If this flag is ON, the current generic composite transform, resulted from the linear registration stages, is set to initialize the follow nonlinear registration process. However, by the default behaviour, the moving image is first warped based on the existant transform before it is passed to the BSpline registration filter. It is done to speed up the BSpline registration by reducing the computations of composite transform Jacobian.", + argstr="--initializeRegistrationByCurrentGenericTransform ") + failureExitCode = traits.Int( + desc= + "If the fit fails, exit with this status code. (It can be used to force a successfult exit status of (0) if the registration fails due to reaching the maximum number of iterations.", + argstr="--failureExitCode %d") + writeTransformOnFailure = traits.Bool( + desc= + "Flag to save the final transform even if the numberOfIterations are reached without convergence. (Intended for use when --failureExitCode 0 )", + argstr="--writeTransformOnFailure ") + numberOfThreads = traits.Int( + desc= + "Explicitly specify the maximum number of threads to use. (default is auto-detected)", + argstr="--numberOfThreads %d") + debugLevel = traits.Int( + desc= + "Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", + argstr="--debugLevel %d") + costFunctionConvergenceFactor = traits.Float( + desc= + "From itkLBFGSBOptimizer.h: Set/Get the CostFunctionConvergenceFactor. Algorithm terminates when the reduction in cost function is less than (factor * epsmcj) where epsmch is the machine precision. Typical values for factor: 1e+12 for low accuracy; 1e+7 for moderate accuracy and 1e+1 for extremely high accuracy. 1e+9 seems to work well., ", + argstr="--costFunctionConvergenceFactor %f") + projectedGradientTolerance = traits.Float( + desc= + "From itkLBFGSBOptimizer.h: Set/Get the ProjectedGradientTolerance. Algorithm terminates when the project gradient is below the tolerance. Default lbfgsb value is 1e-5, but 1e-4 seems to work well., ", + argstr="--projectedGradientTolerance %f") + maximumNumberOfEvaluations = traits.Int( + desc= + "Maximum number of evaluations for line search in lbfgsb optimizer.", + argstr="--maximumNumberOfEvaluations %d") + maximumNumberOfCorrections = traits.Int( + desc="Maximum number of corrections in lbfgsb optimizer.", + argstr="--maximumNumberOfCorrections %d") + gui = traits.Bool( + desc= + "Display intermediate image volumes for debugging. NOTE: This is not part of the standard build sytem, and probably does nothing on your installation.", + argstr="--gui ") + promptUser = traits.Bool( + desc= + "Prompt the user to hit enter each time an image is sent to the DebugImageViewer", + argstr="--promptUser ") + metricSamplingStrategy = traits.Enum( + "Random", + desc= + "It defines the method that registration filter uses to sample the input fixed image. Only Random is supported for now.", + argstr="--metricSamplingStrategy %s") + logFileReport = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "A file to write out final information report in CSV file: MetricName,MetricValue,FixedImageName,FixedMaskName,MovingImageName,MovingMaskName", + argstr="--logFileReport %s") + writeOutputTransformInFloat = traits.Bool( + desc= + "By default, the output registration transforms (either the output composite transform or each transform component) are written to the disk in double precision. If this flag is ON, the output transforms will be written in single (float) precision. It is especially important if the output transform is a displacement field transform, or it is a composite transform that includes several displacement fields.", + argstr="--writeOutputTransformInFloat ") + + +class BRAINSFitOutputSpec(TraitedSpec): + linearTransform = File( + desc= + "(optional) Output estimated transform - in case the computed transform is not BSpline. NOTE: You must set at least one output object (transform and/or output volume).", + exists=True) + bsplineTransform = File( + desc= + "(optional) Output estimated transform - in case the computed transform is BSpline. NOTE: You must set at least one output object (transform and/or output volume).", + exists=True) + outputVolume = File( + desc= + "(optional) Output image: the moving image warped to the fixed image space. NOTE: You must set at least one output object (transform and/or output volume).", + exists=True) + outputFixedVolumeROI = File( + desc= + "ROI that is automatically computed from the fixed image. Only available if Masking Option is ROIAUTO. Image areas where the mask volume has zero value are ignored during the registration.", + exists=True) + outputMovingVolumeROI = File( + desc= + "ROI that is automatically computed from the moving image. Only available if Masking Option is ROIAUTO. Image areas where the mask volume has zero value are ignored during the registration.", + exists=True) + strippedOutputTransform = File( + desc= + "Rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overridden if either bsplineTransform or linearTransform is set.", + exists=True) + outputTransform = File( + desc= + "(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", + exists=True) + logFileReport = File( + desc= + "A file to write out final information report in CSV file: MetricName,MetricValue,FixedImageName,FixedMaskName,MovingImageName,MovingMaskName", + exists=True) + + +class BRAINSFit(SEMLikeCommandLine): + """title: General Registration (BRAINS) + +category: Registration + +description: Register a three-dimensional volume to a reference volume (Mattes Mutual Information by default). Full documentation avalable here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSFit. Method described in BRAINSFit: Mutual Information Registrations of Whole-Brain 3D Images, Using the Insight Toolkit, Johnson H.J., Harris G., Williams K., The Insight Journal, 2007. http://hdl.handle.net/1926/1291 + +version: 3.0.0 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSFit + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://www.psychiatry.uiowa.edu + +acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5) 1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard + +""" + + input_spec = BRAINSFitInputSpec + output_spec = BRAINSFitOutputSpec + _cmd = " BRAINSFit " + _outputs_filenames = { + 'outputVolume': 'outputVolume.nii', + 'bsplineTransform': 'bsplineTransform.h5', + 'outputTransform': 'outputTransform.h5', + 'outputFixedVolumeROI': 'outputFixedVolumeROI.nii', + 'strippedOutputTransform': 'strippedOutputTransform.h5', + 'outputMovingVolumeROI': 'outputMovingVolumeROI.nii', + 'linearTransform': 'linearTransform.h5', + 'logFileReport': 'logFileReport' + } + _redirect_x = False diff --git a/nipype/interfaces/semtools/registration/brainsresample.py b/nipype/interfaces/semtools/registration/brainsresample.py new file mode 100644 index 0000000000..f9ea80acbd --- /dev/null +++ b/nipype/interfaces/semtools/registration/brainsresample.py @@ -0,0 +1,106 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class BRAINSResampleInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Image To Warp", exists=True, argstr="--inputVolume %s") + referenceVolume = File( + desc= + "Reference image used only to define the output space. If not specified, the warping is done in the same space as the image to warp.", + exists=True, + argstr="--referenceVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Resulting deformed image", + argstr="--outputVolume %s") + pixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uint", + "uchar", + "binary", + desc= + "Specifies the pixel type for the input/output images. The \'binary\' pixel type uses a modified algorithm whereby the image is read in as unsigned char, a signed distance map is created, signed distance map is resampled, and then a thresholded image of type unsigned char is written to disk.", + argstr="--pixelType %s") + deformationVolume = File( + desc= + "Displacement Field to be used to warp the image (ITKv3 or earlier)", + exists=True, + argstr="--deformationVolume %s") + warpTransform = File( + desc= + "Filename for the BRAINSFit transform (ITKv3 or earlier) or composite transform file (ITKv4)", + exists=True, + argstr="--warpTransform %s") + interpolationMode = traits.Enum( + "NearestNeighbor", + "Linear", + "ResampleInPlace", + "BSpline", + "WindowedSinc", + "Hamming", + "Cosine", + "Welch", + "Lanczos", + "Blackman", + desc= + "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s") + inverseTransform = traits.Bool( + desc= + "True/False is to compute inverse of given transformation. Default is false", + argstr="--inverseTransform ") + defaultValue = traits.Float( + desc="Default voxel value", argstr="--defaultValue %f") + gridSpacing = InputMultiPath( + traits.Int, + desc= + "Add warped grid to output image to help show the deformation that occured with specified spacing. A spacing of 0 in a dimension indicates that grid lines should be rendered to fall exactly (i.e. do not allow displacements off that plane). This is useful for makeing a 2D image of grid lines from the 3D space", + sep=",", + argstr="--gridSpacing %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSResampleOutputSpec(TraitedSpec): + outputVolume = File(desc="Resulting deformed image", exists=True) + + +class BRAINSResample(SEMLikeCommandLine): + """title: Resample Image (BRAINS) + +category: Registration + +description: This program collects together three common image processing tasks that all involve resampling an image volume: Resampling to a new resolution and spacing, applying a transformation (using an ITK transform IO mechanisms) and Warping (using a vector image deformation field). Full documentation available here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSResample. + +version: 3.0.0 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSResample + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Vincent Magnotta, Greg Harris, and Hans Johnson. + +acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + +""" + + input_spec = BRAINSResampleInputSpec + output_spec = BRAINSResampleOutputSpec + _cmd = " BRAINSResample " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/registration/brainsresize.py b/nipype/interfaces/semtools/registration/brainsresize.py new file mode 100644 index 0000000000..11238dd914 --- /dev/null +++ b/nipype/interfaces/semtools/registration/brainsresize.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class BRAINSResizeInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Image To Scale", exists=True, argstr="--inputVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Resulting scaled image", + argstr="--outputVolume %s") + pixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uint", + "uchar", + "binary", + desc= + "Specifies the pixel type for the input/output images. The \'binary\' pixel type uses a modified algorithm whereby the image is read in as unsigned char, a signed distance map is created, signed distance map is resampled, and then a thresholded image of type unsigned char is written to disk.", + argstr="--pixelType %s") + scaleFactor = traits.Float( + desc="The scale factor for the image spacing.", + argstr="--scaleFactor %f") + + +class BRAINSResizeOutputSpec(TraitedSpec): + outputVolume = File(desc="Resulting scaled image", exists=True) + + +class BRAINSResize(SEMLikeCommandLine): + """title: Resize Image (BRAINS) + +category: Registration + +description: This program is useful for downsampling an image by a constant scale factor. + +version: 3.0.0 + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Hans Johnson. + +acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + +""" + + input_spec = BRAINSResizeInputSpec + output_spec = BRAINSResizeOutputSpec + _cmd = " BRAINSResize " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/registration/specialized.py b/nipype/interfaces/semtools/registration/specialized.py new file mode 100644 index 0000000000..2cc08e3ec7 --- /dev/null +++ b/nipype/interfaces/semtools/registration/specialized.py @@ -0,0 +1,568 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class VBRAINSDemonWarpInputSpec(CommandLineInputSpec): + movingVolume = InputMultiPath( + File(exists=True), + desc="Required: input moving image", + argstr="--movingVolume %s...") + fixedVolume = InputMultiPath( + File(exists=True), + desc="Required: input fixed (target) image", + argstr="--fixedVolume %s...") + inputPixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uchar", + desc= + "Input volumes will be typecast to this format: float|short|ushort|int|uchar", + argstr="--inputPixelType %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: output resampled moving image (will have the same physical space as the fixedVolume).", + argstr="--outputVolume %s") + outputDisplacementFieldVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Output deformation field vector image (will have the same physical space as the fixedVolume).", + argstr="--outputDisplacementFieldVolume %s") + outputPixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uchar", + desc= + "outputVolume will be typecast to this format: float|short|ushort|int|uchar", + argstr="--outputPixelType %s") + interpolationMode = traits.Enum( + "NearestNeighbor", + "Linear", + "ResampleInPlace", + "BSpline", + "WindowedSinc", + "Hamming", + "Cosine", + "Welch", + "Lanczos", + "Blackman", + desc= + "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s") + registrationFilterType = traits.Enum( + "Demons", + "FastSymmetricForces", + "Diffeomorphic", + "LogDemons", + "SymmetricLogDemons", + desc= + "Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic|LogDemons|SymmetricLogDemons", + argstr="--registrationFilterType %s") + smoothDisplacementFieldSigma = traits.Float( + desc= + "A gaussian smoothing value to be applied to the deformation feild at each iteration.", + argstr="--smoothDisplacementFieldSigma %f") + numberOfPyramidLevels = traits.Int( + desc= + "Number of image pyramid levels to use in the multi-resolution registration.", + argstr="--numberOfPyramidLevels %d") + minimumFixedPyramid = InputMultiPath( + traits.Int, + desc= + "The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + sep=",", + argstr="--minimumFixedPyramid %s") + minimumMovingPyramid = InputMultiPath( + traits.Int, + desc= + "The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + sep=",", + argstr="--minimumMovingPyramid %s") + arrayOfPyramidLevelIterations = InputMultiPath( + traits.Int, + desc="The number of iterations for each pyramid level", + sep=",", + argstr="--arrayOfPyramidLevelIterations %s") + histogramMatch = traits.Bool( + desc= + "Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", + argstr="--histogramMatch ") + numberOfHistogramBins = traits.Int( + desc="The number of histogram levels", + argstr="--numberOfHistogramBins %d") + numberOfMatchPoints = traits.Int( + desc="The number of match points for histrogramMatch", + argstr="--numberOfMatchPoints %d") + medianFilterSize = InputMultiPath( + traits.Int, + desc= + "Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", + sep=",", + argstr="--medianFilterSize %s") + initializeWithDisplacementField = File( + desc="Initial deformation field vector image file name", + exists=True, + argstr="--initializeWithDisplacementField %s") + initializeWithTransform = File( + desc="Initial Transform filename", + exists=True, + argstr="--initializeWithTransform %s") + makeBOBF = traits.Bool( + desc= + "Flag to make Brain-Only Background-Filled versions of the input and target volumes.", + argstr="--makeBOBF ") + fixedBinaryVolume = File( + desc="Mask filename for desired region of interest in the Fixed image.", + exists=True, + argstr="--fixedBinaryVolume %s") + movingBinaryVolume = File( + desc= + "Mask filename for desired region of interest in the Moving image.", + exists=True, + argstr="--movingBinaryVolume %s") + lowerThresholdForBOBF = traits.Int( + desc="Lower threshold for performing BOBF", + argstr="--lowerThresholdForBOBF %d") + upperThresholdForBOBF = traits.Int( + desc="Upper threshold for performing BOBF", + argstr="--upperThresholdForBOBF %d") + backgroundFillValue = traits.Int( + desc="Replacement value to overwrite background when performing BOBF", + argstr="--backgroundFillValue %d") + seedForBOBF = InputMultiPath( + traits.Int, + desc="coordinates in all 3 directions for Seed when performing BOBF", + sep=",", + argstr="--seedForBOBF %s") + neighborhoodForBOBF = InputMultiPath( + traits.Int, + desc= + "neighborhood in all 3 directions to be included when performing BOBF", + sep=",", + argstr="--neighborhoodForBOBF %s") + outputDisplacementFieldPrefix = traits.Str( + desc= + "Displacement field filename prefix for writing separate x, y, and z component images", + argstr="--outputDisplacementFieldPrefix %s") + outputCheckerboardVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + argstr="--outputCheckerboardVolume %s") + checkerboardPatternSubdivisions = InputMultiPath( + traits.Int, + desc="Number of Checkerboard subdivisions in all 3 directions", + sep=",", + argstr="--checkerboardPatternSubdivisions %s") + outputNormalized = traits.Bool( + desc= + "Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", + argstr="--outputNormalized ") + outputDebug = traits.Bool( + desc="Flag to write debugging images after each step.", + argstr="--outputDebug ") + weightFactors = InputMultiPath( + traits.Float, + desc="Weight fatctors for each input images", + sep=",", + argstr="--weightFactors %s") + gradient_type = traits.Enum( + "0", + "1", + "2", + desc= + "Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", + argstr="--gradient_type %s") + upFieldSmoothing = traits.Float( + desc="Smoothing sigma for the update field at each iteration", + argstr="--upFieldSmoothing %f") + max_step_length = traits.Float( + desc="Maximum length of an update vector (0: no restriction)", + argstr="--max_step_length %f") + use_vanilla_dem = traits.Bool( + desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem ") + gui = traits.Bool( + desc="Display intermediate image volumes for debugging", + argstr="--gui ") + promptUser = traits.Bool( + desc= + "Prompt the user to hit enter each time an image is sent to the DebugImageViewer", + argstr="--promptUser ") + numberOfBCHApproximationTerms = traits.Int( + desc="Number of terms in the BCH expansion", + argstr="--numberOfBCHApproximationTerms %d") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class VBRAINSDemonWarpOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: output resampled moving image (will have the same physical space as the fixedVolume).", + exists=True) + outputDisplacementFieldVolume = File( + desc= + "Output deformation field vector image (will have the same physical space as the fixedVolume).", + exists=True) + outputCheckerboardVolume = File( + desc= + "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + exists=True) + + +class VBRAINSDemonWarp(SEMLikeCommandLine): + """title: Vector Demon Registration (BRAINS) + +category: Registration.Specialized + +description: This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp. + +version: 3.0.0 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSDemonWarp + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Hans J. Johnson and Greg Harris. + +acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + +""" + + input_spec = VBRAINSDemonWarpInputSpec + output_spec = VBRAINSDemonWarpOutputSpec + _cmd = " VBRAINSDemonWarp " + _outputs_filenames = { + 'outputVolume': 'outputVolume.nii', + 'outputCheckerboardVolume': 'outputCheckerboardVolume.nii', + 'outputDisplacementFieldVolume': 'outputDisplacementFieldVolume.nrrd' + } + _redirect_x = False + + +class BRAINSDemonWarpInputSpec(CommandLineInputSpec): + movingVolume = File( + desc="Required: input moving image", + exists=True, + argstr="--movingVolume %s") + fixedVolume = File( + desc="Required: input fixed (target) image", + exists=True, + argstr="--fixedVolume %s") + inputPixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uchar", + desc= + "Input volumes will be typecast to this format: float|short|ushort|int|uchar", + argstr="--inputPixelType %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: output resampled moving image (will have the same physical space as the fixedVolume).", + argstr="--outputVolume %s") + outputDisplacementFieldVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Output deformation field vector image (will have the same physical space as the fixedVolume).", + argstr="--outputDisplacementFieldVolume %s") + outputPixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uchar", + desc= + "outputVolume will be typecast to this format: float|short|ushort|int|uchar", + argstr="--outputPixelType %s") + interpolationMode = traits.Enum( + "NearestNeighbor", + "Linear", + "ResampleInPlace", + "BSpline", + "WindowedSinc", + "Hamming", + "Cosine", + "Welch", + "Lanczos", + "Blackman", + desc= + "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s") + registrationFilterType = traits.Enum( + "Demons", + "FastSymmetricForces", + "Diffeomorphic", + desc= + "Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic", + argstr="--registrationFilterType %s") + smoothDisplacementFieldSigma = traits.Float( + desc= + "A gaussian smoothing value to be applied to the deformation feild at each iteration.", + argstr="--smoothDisplacementFieldSigma %f") + numberOfPyramidLevels = traits.Int( + desc= + "Number of image pyramid levels to use in the multi-resolution registration.", + argstr="--numberOfPyramidLevels %d") + minimumFixedPyramid = InputMultiPath( + traits.Int, + desc= + "The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + sep=",", + argstr="--minimumFixedPyramid %s") + minimumMovingPyramid = InputMultiPath( + traits.Int, + desc= + "The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + sep=",", + argstr="--minimumMovingPyramid %s") + arrayOfPyramidLevelIterations = InputMultiPath( + traits.Int, + desc="The number of iterations for each pyramid level", + sep=",", + argstr="--arrayOfPyramidLevelIterations %s") + histogramMatch = traits.Bool( + desc= + "Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", + argstr="--histogramMatch ") + numberOfHistogramBins = traits.Int( + desc="The number of histogram levels", + argstr="--numberOfHistogramBins %d") + numberOfMatchPoints = traits.Int( + desc="The number of match points for histrogramMatch", + argstr="--numberOfMatchPoints %d") + medianFilterSize = InputMultiPath( + traits.Int, + desc= + "Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", + sep=",", + argstr="--medianFilterSize %s") + initializeWithDisplacementField = File( + desc="Initial deformation field vector image file name", + exists=True, + argstr="--initializeWithDisplacementField %s") + initializeWithTransform = File( + desc="Initial Transform filename", + exists=True, + argstr="--initializeWithTransform %s") + maskProcessingMode = traits.Enum( + "NOMASK", + "ROIAUTO", + "ROI", + "BOBF", + desc= + "What mode to use for using the masks: NOMASK|ROIAUTO|ROI|BOBF. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. Where the Region Of Interest mode uses the masks to define what parts of the image should be used for computing the deformation field. Brain Only Background Fill uses the masks to pre-process the input images by clipping and filling in the background with a predefined value.", + argstr="--maskProcessingMode %s") + fixedBinaryVolume = File( + desc="Mask filename for desired region of interest in the Fixed image.", + exists=True, + argstr="--fixedBinaryVolume %s") + movingBinaryVolume = File( + desc= + "Mask filename for desired region of interest in the Moving image.", + exists=True, + argstr="--movingBinaryVolume %s") + lowerThresholdForBOBF = traits.Int( + desc="Lower threshold for performing BOBF", + argstr="--lowerThresholdForBOBF %d") + upperThresholdForBOBF = traits.Int( + desc="Upper threshold for performing BOBF", + argstr="--upperThresholdForBOBF %d") + backgroundFillValue = traits.Int( + desc="Replacement value to overwrite background when performing BOBF", + argstr="--backgroundFillValue %d") + seedForBOBF = InputMultiPath( + traits.Int, + desc="coordinates in all 3 directions for Seed when performing BOBF", + sep=",", + argstr="--seedForBOBF %s") + neighborhoodForBOBF = InputMultiPath( + traits.Int, + desc= + "neighborhood in all 3 directions to be included when performing BOBF", + sep=",", + argstr="--neighborhoodForBOBF %s") + outputDisplacementFieldPrefix = traits.Str( + desc= + "Displacement field filename prefix for writing separate x, y, and z component images", + argstr="--outputDisplacementFieldPrefix %s") + outputCheckerboardVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + argstr="--outputCheckerboardVolume %s") + checkerboardPatternSubdivisions = InputMultiPath( + traits.Int, + desc="Number of Checkerboard subdivisions in all 3 directions", + sep=",", + argstr="--checkerboardPatternSubdivisions %s") + outputNormalized = traits.Bool( + desc= + "Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", + argstr="--outputNormalized ") + outputDebug = traits.Bool( + desc="Flag to write debugging images after each step.", + argstr="--outputDebug ") + gradient_type = traits.Enum( + "0", + "1", + "2", + desc= + "Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", + argstr="--gradient_type %s") + upFieldSmoothing = traits.Float( + desc="Smoothing sigma for the update field at each iteration", + argstr="--upFieldSmoothing %f") + max_step_length = traits.Float( + desc="Maximum length of an update vector (0: no restriction)", + argstr="--max_step_length %f") + use_vanilla_dem = traits.Bool( + desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem ") + gui = traits.Bool( + desc="Display intermediate image volumes for debugging", + argstr="--gui ") + promptUser = traits.Bool( + desc= + "Prompt the user to hit enter each time an image is sent to the DebugImageViewer", + argstr="--promptUser ") + numberOfBCHApproximationTerms = traits.Int( + desc="Number of terms in the BCH expansion", + argstr="--numberOfBCHApproximationTerms %d") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSDemonWarpOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: output resampled moving image (will have the same physical space as the fixedVolume).", + exists=True) + outputDisplacementFieldVolume = File( + desc= + "Output deformation field vector image (will have the same physical space as the fixedVolume).", + exists=True) + outputCheckerboardVolume = File( + desc= + "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + exists=True) + + +class BRAINSDemonWarp(SEMLikeCommandLine): + """title: Demon Registration (BRAINS) + +category: Registration.Specialized + +description: This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSDemonWarp. + +version: 3.0.0 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSDemonWarp + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Hans J. Johnson and Greg Harris. + +acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + +""" + + input_spec = BRAINSDemonWarpInputSpec + output_spec = BRAINSDemonWarpOutputSpec + _cmd = " BRAINSDemonWarp " + _outputs_filenames = { + 'outputVolume': 'outputVolume.nii', + 'outputCheckerboardVolume': 'outputCheckerboardVolume.nii', + 'outputDisplacementFieldVolume': 'outputDisplacementFieldVolume.nrrd' + } + _redirect_x = False + + +class BRAINSTransformFromFiducialsInputSpec(CommandLineInputSpec): + fixedLandmarks = InputMultiPath( + traits.List(traits.Float(), minlen=3, maxlen=3), + desc="Ordered list of landmarks in the fixed image", + argstr="--fixedLandmarks %s...") + movingLandmarks = InputMultiPath( + traits.List(traits.Float(), minlen=3, maxlen=3), + desc="Ordered list of landmarks in the moving image", + argstr="--movingLandmarks %s...") + saveTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Save the transform that results from registration", + argstr="--saveTransform %s") + transformType = traits.Enum( + "Translation", + "Rigid", + "Similarity", + desc="Type of transform to produce", + argstr="--transformType %s") + fixedLandmarksFile = File( + desc="An fcsv formatted file with a list of landmark points.", + exists=True, + argstr="--fixedLandmarksFile %s") + movingLandmarksFile = File( + desc="An fcsv formatted file with a list of landmark points.", + exists=True, + argstr="--movingLandmarksFile %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSTransformFromFiducialsOutputSpec(TraitedSpec): + saveTransform = File( + desc="Save the transform that results from registration", exists=True) + + +class BRAINSTransformFromFiducials(SEMLikeCommandLine): + """title: Fiducial Registration (BRAINS) + +category: Registration.Specialized + +description: Computes a rigid, similarity or affine transform from a matched list of fiducials + +version: 0.1.0.$Revision$ + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Modules:TransformFromFiducials-Documentation-3.6 + +contributor: Casey B Goodlett + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = BRAINSTransformFromFiducialsInputSpec + output_spec = BRAINSTransformFromFiducialsOutputSpec + _cmd = " BRAINSTransformFromFiducials " + _outputs_filenames = {'saveTransform': 'saveTransform.h5'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/registration/tests/__init__.py b/nipype/interfaces/semtools/registration/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/semtools/registration/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py new file mode 100644 index 0000000000..c631f9b96e --- /dev/null +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSDemonWarp.py @@ -0,0 +1,103 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import BRAINSDemonWarp + + +def test_BRAINSDemonWarp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + arrayOfPyramidLevelIterations=dict( + argstr='--arrayOfPyramidLevelIterations %s', + sep=',', + ), + backgroundFillValue=dict(argstr='--backgroundFillValue %d', ), + checkerboardPatternSubdivisions=dict( + argstr='--checkerboardPatternSubdivisions %s', + sep=',', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), + fixedVolume=dict(argstr='--fixedVolume %s', ), + gradient_type=dict(argstr='--gradient_type %s', ), + gui=dict(argstr='--gui ', ), + histogramMatch=dict(argstr='--histogramMatch ', ), + initializeWithDisplacementField=dict( + argstr='--initializeWithDisplacementField %s', ), + initializeWithTransform=dict(argstr='--initializeWithTransform %s', ), + inputPixelType=dict(argstr='--inputPixelType %s', ), + interpolationMode=dict(argstr='--interpolationMode %s', ), + lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), + maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), + max_step_length=dict(argstr='--max_step_length %f', ), + medianFilterSize=dict( + argstr='--medianFilterSize %s', + sep=',', + ), + minimumFixedPyramid=dict( + argstr='--minimumFixedPyramid %s', + sep=',', + ), + minimumMovingPyramid=dict( + argstr='--minimumMovingPyramid %s', + sep=',', + ), + movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), + movingVolume=dict(argstr='--movingVolume %s', ), + neighborhoodForBOBF=dict( + argstr='--neighborhoodForBOBF %s', + sep=',', + ), + numberOfBCHApproximationTerms=dict( + argstr='--numberOfBCHApproximationTerms %d', ), + numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), + numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), + numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputCheckerboardVolume=dict( + argstr='--outputCheckerboardVolume %s', + hash_files=False, + ), + outputDebug=dict(argstr='--outputDebug ', ), + outputDisplacementFieldPrefix=dict( + argstr='--outputDisplacementFieldPrefix %s', ), + outputDisplacementFieldVolume=dict( + argstr='--outputDisplacementFieldVolume %s', + hash_files=False, + ), + outputNormalized=dict(argstr='--outputNormalized ', ), + outputPixelType=dict(argstr='--outputPixelType %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + promptUser=dict(argstr='--promptUser ', ), + registrationFilterType=dict(argstr='--registrationFilterType %s', ), + seedForBOBF=dict( + argstr='--seedForBOBF %s', + sep=',', + ), + smoothDisplacementFieldSigma=dict( + argstr='--smoothDisplacementFieldSigma %f', ), + upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), + upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d', ), + use_vanilla_dem=dict(argstr='--use_vanilla_dem ', ), + ) + inputs = BRAINSDemonWarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSDemonWarp_outputs(): + output_map = dict( + outputCheckerboardVolume=dict(), + outputDisplacementFieldVolume=dict(), + outputVolume=dict(), + ) + outputs = BRAINSDemonWarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py new file mode 100644 index 0000000000..18059c4f57 --- /dev/null +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSFit.py @@ -0,0 +1,145 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsfit import BRAINSFit + + +def test_BRAINSFit_inputs(): + input_map = dict( + ROIAutoClosingSize=dict(argstr='--ROIAutoClosingSize %f', ), + ROIAutoDilateSize=dict(argstr='--ROIAutoDilateSize %f', ), + args=dict(argstr='%s', ), + backgroundFillValue=dict(argstr='--backgroundFillValue %f', ), + bsplineTransform=dict( + argstr='--bsplineTransform %s', + hash_files=False, + ), + costFunctionConvergenceFactor=dict( + argstr='--costFunctionConvergenceFactor %f', ), + costMetric=dict(argstr='--costMetric %s', ), + debugLevel=dict(argstr='--debugLevel %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + failureExitCode=dict(argstr='--failureExitCode %d', ), + fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), + fixedVolume=dict(argstr='--fixedVolume %s', ), + fixedVolume2=dict(argstr='--fixedVolume2 %s', ), + fixedVolumeTimeIndex=dict(argstr='--fixedVolumeTimeIndex %d', ), + gui=dict(argstr='--gui ', ), + histogramMatch=dict(argstr='--histogramMatch ', ), + initialTransform=dict(argstr='--initialTransform %s', ), + initializeRegistrationByCurrentGenericTransform=dict( + argstr='--initializeRegistrationByCurrentGenericTransform ', ), + initializeTransformMode=dict(argstr='--initializeTransformMode %s', ), + interpolationMode=dict(argstr='--interpolationMode %s', ), + linearTransform=dict( + argstr='--linearTransform %s', + hash_files=False, + ), + logFileReport=dict( + argstr='--logFileReport %s', + hash_files=False, + ), + maskInferiorCutOffFromCenter=dict( + argstr='--maskInferiorCutOffFromCenter %f', ), + maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), + maxBSplineDisplacement=dict(argstr='--maxBSplineDisplacement %f', ), + maximumNumberOfCorrections=dict( + argstr='--maximumNumberOfCorrections %d', ), + maximumNumberOfEvaluations=dict( + argstr='--maximumNumberOfEvaluations %d', ), + maximumStepLength=dict(argstr='--maximumStepLength %f', ), + medianFilterSize=dict( + argstr='--medianFilterSize %s', + sep=',', + ), + metricSamplingStrategy=dict(argstr='--metricSamplingStrategy %s', ), + minimumStepLength=dict( + argstr='--minimumStepLength %s', + sep=',', + ), + movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), + movingVolume=dict(argstr='--movingVolume %s', ), + movingVolume2=dict(argstr='--movingVolume2 %s', ), + movingVolumeTimeIndex=dict(argstr='--movingVolumeTimeIndex %d', ), + numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), + numberOfIterations=dict( + argstr='--numberOfIterations %s', + sep=',', + ), + numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), + numberOfSamples=dict(argstr='--numberOfSamples %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputFixedVolumeROI=dict( + argstr='--outputFixedVolumeROI %s', + hash_files=False, + ), + outputMovingVolumeROI=dict( + argstr='--outputMovingVolumeROI %s', + hash_files=False, + ), + outputTransform=dict( + argstr='--outputTransform %s', + hash_files=False, + ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + outputVolumePixelType=dict(argstr='--outputVolumePixelType %s', ), + projectedGradientTolerance=dict( + argstr='--projectedGradientTolerance %f', ), + promptUser=dict(argstr='--promptUser ', ), + relaxationFactor=dict(argstr='--relaxationFactor %f', ), + removeIntensityOutliers=dict(argstr='--removeIntensityOutliers %f', ), + reproportionScale=dict(argstr='--reproportionScale %f', ), + samplingPercentage=dict(argstr='--samplingPercentage %f', ), + scaleOutputValues=dict(argstr='--scaleOutputValues ', ), + skewScale=dict(argstr='--skewScale %f', ), + splineGridSize=dict( + argstr='--splineGridSize %s', + sep=',', + ), + strippedOutputTransform=dict( + argstr='--strippedOutputTransform %s', + hash_files=False, + ), + transformType=dict( + argstr='--transformType %s', + sep=',', + ), + translationScale=dict(argstr='--translationScale %f', ), + useAffine=dict(argstr='--useAffine ', ), + useBSpline=dict(argstr='--useBSpline ', ), + useComposite=dict(argstr='--useComposite ', ), + useROIBSpline=dict(argstr='--useROIBSpline ', ), + useRigid=dict(argstr='--useRigid ', ), + useScaleSkewVersor3D=dict(argstr='--useScaleSkewVersor3D ', ), + useScaleVersor3D=dict(argstr='--useScaleVersor3D ', ), + useSyN=dict(argstr='--useSyN ', ), + writeOutputTransformInFloat=dict( + argstr='--writeOutputTransformInFloat ', ), + writeTransformOnFailure=dict(argstr='--writeTransformOnFailure ', ), + ) + inputs = BRAINSFit.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSFit_outputs(): + output_map = dict( + bsplineTransform=dict(), + linearTransform=dict(), + logFileReport=dict(), + outputFixedVolumeROI=dict(), + outputMovingVolumeROI=dict(), + outputTransform=dict(), + outputVolume=dict(), + strippedOutputTransform=dict(), + ) + outputs = BRAINSFit.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py new file mode 100644 index 0000000000..98ec5f4ff3 --- /dev/null +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResample.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsresample import BRAINSResample + + +def test_BRAINSResample_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + defaultValue=dict(argstr='--defaultValue %f', ), + deformationVolume=dict(argstr='--deformationVolume %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + gridSpacing=dict( + argstr='--gridSpacing %s', + sep=',', + ), + inputVolume=dict(argstr='--inputVolume %s', ), + interpolationMode=dict(argstr='--interpolationMode %s', ), + inverseTransform=dict(argstr='--inverseTransform ', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + pixelType=dict(argstr='--pixelType %s', ), + referenceVolume=dict(argstr='--referenceVolume %s', ), + warpTransform=dict(argstr='--warpTransform %s', ), + ) + inputs = BRAINSResample.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSResample_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = BRAINSResample.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py new file mode 100644 index 0000000000..4ce0d7159c --- /dev/null +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsresize import BRAINSResize + + +def test_BRAINSResize_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + pixelType=dict(argstr='--pixelType %s', ), + scaleFactor=dict(argstr='--scaleFactor %f', ), + ) + inputs = BRAINSResize.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSResize_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = BRAINSResize.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py new file mode 100644 index 0000000000..2a700eaac7 --- /dev/null +++ b/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSTransformFromFiducials.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import BRAINSTransformFromFiducials + + +def test_BRAINSTransformFromFiducials_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ), + fixedLandmarksFile=dict(argstr='--fixedLandmarksFile %s', ), + movingLandmarks=dict(argstr='--movingLandmarks %s...', ), + movingLandmarksFile=dict(argstr='--movingLandmarksFile %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + saveTransform=dict( + argstr='--saveTransform %s', + hash_files=False, + ), + transformType=dict(argstr='--transformType %s', ), + ) + inputs = BRAINSTransformFromFiducials.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSTransformFromFiducials_outputs(): + output_map = dict(saveTransform=dict(), ) + outputs = BRAINSTransformFromFiducials.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py new file mode 100644 index 0000000000..b3255da1d3 --- /dev/null +++ b/nipype/interfaces/semtools/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -0,0 +1,107 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import VBRAINSDemonWarp + + +def test_VBRAINSDemonWarp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + arrayOfPyramidLevelIterations=dict( + argstr='--arrayOfPyramidLevelIterations %s', + sep=',', + ), + backgroundFillValue=dict(argstr='--backgroundFillValue %d', ), + checkerboardPatternSubdivisions=dict( + argstr='--checkerboardPatternSubdivisions %s', + sep=',', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), + fixedVolume=dict(argstr='--fixedVolume %s...', ), + gradient_type=dict(argstr='--gradient_type %s', ), + gui=dict(argstr='--gui ', ), + histogramMatch=dict(argstr='--histogramMatch ', ), + initializeWithDisplacementField=dict( + argstr='--initializeWithDisplacementField %s', ), + initializeWithTransform=dict(argstr='--initializeWithTransform %s', ), + inputPixelType=dict(argstr='--inputPixelType %s', ), + interpolationMode=dict(argstr='--interpolationMode %s', ), + lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), + makeBOBF=dict(argstr='--makeBOBF ', ), + max_step_length=dict(argstr='--max_step_length %f', ), + medianFilterSize=dict( + argstr='--medianFilterSize %s', + sep=',', + ), + minimumFixedPyramid=dict( + argstr='--minimumFixedPyramid %s', + sep=',', + ), + minimumMovingPyramid=dict( + argstr='--minimumMovingPyramid %s', + sep=',', + ), + movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), + movingVolume=dict(argstr='--movingVolume %s...', ), + neighborhoodForBOBF=dict( + argstr='--neighborhoodForBOBF %s', + sep=',', + ), + numberOfBCHApproximationTerms=dict( + argstr='--numberOfBCHApproximationTerms %d', ), + numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), + numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), + numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputCheckerboardVolume=dict( + argstr='--outputCheckerboardVolume %s', + hash_files=False, + ), + outputDebug=dict(argstr='--outputDebug ', ), + outputDisplacementFieldPrefix=dict( + argstr='--outputDisplacementFieldPrefix %s', ), + outputDisplacementFieldVolume=dict( + argstr='--outputDisplacementFieldVolume %s', + hash_files=False, + ), + outputNormalized=dict(argstr='--outputNormalized ', ), + outputPixelType=dict(argstr='--outputPixelType %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + promptUser=dict(argstr='--promptUser ', ), + registrationFilterType=dict(argstr='--registrationFilterType %s', ), + seedForBOBF=dict( + argstr='--seedForBOBF %s', + sep=',', + ), + smoothDisplacementFieldSigma=dict( + argstr='--smoothDisplacementFieldSigma %f', ), + upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), + upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d', ), + use_vanilla_dem=dict(argstr='--use_vanilla_dem ', ), + weightFactors=dict( + argstr='--weightFactors %s', + sep=',', + ), + ) + inputs = VBRAINSDemonWarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_VBRAINSDemonWarp_outputs(): + output_map = dict( + outputCheckerboardVolume=dict(), + outputDisplacementFieldVolume=dict(), + outputVolume=dict(), + ) + outputs = VBRAINSDemonWarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/segmentation/__init__.py b/nipype/interfaces/semtools/segmentation/__init__.py new file mode 100644 index 0000000000..d0cd69e0c9 --- /dev/null +++ b/nipype/interfaces/semtools/segmentation/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .specialized import ( + BRAINSCut, BRAINSROIAuto, BRAINSConstellationDetector, + BRAINSCreateLabelMapFromProbabilityMaps, BinaryMaskEditorBasedOnLandmarks, + BRAINSMultiSTAPLE, BRAINSABC, ESLR) diff --git a/nipype/interfaces/semtools/segmentation/specialized.py b/nipype/interfaces/semtools/segmentation/specialized.py new file mode 100644 index 0000000000..fa08b8e260 --- /dev/null +++ b/nipype/interfaces/semtools/segmentation/specialized.py @@ -0,0 +1,929 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class BRAINSCutInputSpec(CommandLineInputSpec): + netConfiguration = File( + desc= + "XML File defining BRAINSCut parameters. OLD NAME. PLEASE USE modelConfigurationFilename instead.", + exists=True, + argstr="--netConfiguration %s") + modelConfigurationFilename = File( + desc="XML File defining BRAINSCut parameters", + exists=True, + argstr="--modelConfigurationFilename %s") + trainModelStartIndex = traits.Int( + desc="Starting iteration for training", + argstr="--trainModelStartIndex %d") + verbose = traits.Int( + desc="print out some debugging information", argstr="--verbose %d") + multiStructureThreshold = traits.Bool( + desc="multiStructureThreshold module to deal with overlaping area", + argstr="--multiStructureThreshold ") + histogramEqualization = traits.Bool( + desc= + "A Histogram Equalization process could be added to the creating/applying process from Subject To Atlas. Default is false, which genreate input vectors without Histogram Equalization. ", + argstr="--histogramEqualization ") + computeSSEOn = traits.Bool( + desc= + "compute Sum of Square Error (SSE) along the trained model until the number of iteration given in the modelConfigurationFilename file", + argstr="--computeSSEOn ") + generateProbability = traits.Bool( + desc="Generate probability map", argstr="--generateProbability ") + createVectors = traits.Bool( + desc="create vectors for training neural net", + argstr="--createVectors ") + trainModel = traits.Bool( + desc="train the neural net", argstr="--trainModel ") + NoTrainingVectorShuffling = traits.Bool( + desc="If this flag is on, there will be no shuffling.", + argstr="--NoTrainingVectorShuffling ") + applyModel = traits.Bool( + desc="apply the neural net", argstr="--applyModel ") + validate = traits.Bool( + desc= + "validate data set.Just need for the first time run ( This is for validation of xml file and not working yet )", + argstr="--validate ") + method = traits.Enum("RandomForest", "ANN", argstr="--method %s") + numberOfTrees = traits.Int( + desc= + " Random tree: number of trees. This is to be used when only one model with specified depth wish to be created. ", + argstr="--numberOfTrees %d") + randomTreeDepth = traits.Int( + desc= + " Random tree depth. This is to be used when only one model with specified depth wish to be created. ", + argstr="--randomTreeDepth %d") + modelFilename = traits.Str( + desc= + " model file name given from user (not by xml configuration file) ", + argstr="--modelFilename %s") + + +class BRAINSCutOutputSpec(TraitedSpec): + pass + + +class BRAINSCut(SEMLikeCommandLine): + """title: BRAINSCut (BRAINS) + +category: Segmentation.Specialized + +description: Automatic Segmentation using neural networks + +version: 1.0 + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Vince Magnotta, Hans Johnson, Greg Harris, Kent Williams, Eunyoung Regina Kim + +""" + + input_spec = BRAINSCutInputSpec + output_spec = BRAINSCutOutputSpec + _cmd = " BRAINSCut " + _outputs_filenames = {} + _redirect_x = False + + +class BRAINSROIAutoInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="The input image for finding the largest region filled mask.", + exists=True, + argstr="--inputVolume %s") + outputROIMaskVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="The ROI automatically found from the input image.", + argstr="--outputROIMaskVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "The inputVolume with optional [maskOutput|cropOutput] to the region of the brain mask.", + argstr="--outputVolume %s") + maskOutput = traits.Bool( + desc="The inputVolume multiplied by the ROI mask.", + argstr="--maskOutput ") + cropOutput = traits.Bool( + desc="The inputVolume cropped to the region of the ROI mask.", + argstr="--cropOutput ") + otsuPercentileThreshold = traits.Float( + desc="Parameter to the Otsu threshold algorithm.", + argstr="--otsuPercentileThreshold %f") + thresholdCorrectionFactor = traits.Float( + desc= + "A factor to scale the Otsu algorithm's result threshold, in case clipping mangles the image.", + argstr="--thresholdCorrectionFactor %f") + closingSize = traits.Float( + desc= + "The Closing Size (in millimeters) for largest connected filled mask. This value is divided by image spacing and rounded to the next largest voxel number.", + argstr="--closingSize %f") + ROIAutoDilateSize = traits.Float( + desc= + "This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", + argstr="--ROIAutoDilateSize %f") + outputVolumePixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uint", + "uchar", + desc= + "The output image Pixel Type is the scalar datatype for representation of the Output Volume.", + argstr="--outputVolumePixelType %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSROIAutoOutputSpec(TraitedSpec): + outputROIMaskVolume = File( + desc="The ROI automatically found from the input image.", exists=True) + outputVolume = File( + desc= + "The inputVolume with optional [maskOutput|cropOutput] to the region of the brain mask.", + exists=True) + + +class BRAINSROIAuto(SEMLikeCommandLine): + """title: Foreground masking (BRAINS) + +category: Segmentation.Specialized + +description: This program is used to create a mask over the most prominant forground region in an image. This is accomplished via a combination of otsu thresholding and a closing operation. More documentation is available here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ForegroundMasking. + +version: 2.4.1 + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://www.psychiatry.uiowa.edu + +acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5), fedorov -at- bwh.harvard.edu (Slicer integration); (1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard) + +""" + + input_spec = BRAINSROIAutoInputSpec + output_spec = BRAINSROIAutoOutputSpec + _cmd = " BRAINSROIAuto " + _outputs_filenames = { + 'outputVolume': 'outputVolume.nii', + 'outputROIMaskVolume': 'outputROIMaskVolume.nii' + } + _redirect_x = False + + +class BRAINSConstellationDetectorInputSpec(CommandLineInputSpec): + houghEyeDetectorMode = traits.Int( + desc= + ", This flag controls the mode of Hough eye detector. By default, value of 1 is for T1W images, while the value of 0 is for T2W and PD images., ", + argstr="--houghEyeDetectorMode %d") + inputTemplateModel = File( + desc="User-specified template model., ", + exists=True, + argstr="--inputTemplateModel %s") + LLSModel = File( + desc="Linear least squares model filename in HD5 format", + exists=True, + argstr="--LLSModel %s") + inputVolume = File( + desc="Input image in which to find ACPC points", + exists=True, + argstr="--inputVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "ACPC-aligned output image with the same voxels, but updated origin, and direction cosign so that the AC point would fall at the physical location (0.0,0.0,0.0), and the mid-sagital plane is the plane where physical L/R coordinate is 0.0.", + argstr="--outputVolume %s") + outputResampledVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "ACPC-aligned output image in a resampled unifor space. Currently this is a 1mm, 256^3, Identity direction image.", + argstr="--outputResampledVolume %s") + outputTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "The filename for the original space to ACPC alignment to be written (in .h5 format)., ", + argstr="--outputTransform %s") + outputLandmarksInInputSpace = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + ", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (.fcsv) with the landmarks in the original image space (the detected RP, AC, PC, and VN4) in it to be written., ", + argstr="--outputLandmarksInInputSpace %s") + outputLandmarksInACPCAlignedSpace = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + ", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (.fcsv) with the landmarks in the output image space (the detected RP, AC, PC, and VN4) in it to be written., ", + argstr="--outputLandmarksInACPCAlignedSpace %s") + outputMRML = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + ", The filename for the new subject-specific scene definition file in the same format produced by Slicer3 (in .mrml format). Only the components that were specified by the user on command line would be generated. Compatible components include inputVolume, outputVolume, outputLandmarksInInputSpace, outputLandmarksInACPCAlignedSpace, and outputTransform., ", + argstr="--outputMRML %s") + outputVerificationScript = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + ", The filename for the Slicer3 script that verifies the aligned landmarks against the aligned image file. This will happen only in conjunction with saveOutputLandmarks and an outputVolume., ", + argstr="--outputVerificationScript %s") + mspQualityLevel = traits.Int( + desc= + ", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds), NOTE: -1= Prealigned so no estimate!., ", + argstr="--mspQualityLevel %d") + otsuPercentileThreshold = traits.Float( + desc= + ", This is a parameter to FindLargestForegroundFilledMask, which is employed when acLowerBound is set and an outputUntransformedClippedVolume is requested., ", + argstr="--otsuPercentileThreshold %f") + acLowerBound = traits.Float( + desc= + ", When generating a resampled output image, replace the image with the BackgroundFillValue everywhere below the plane This Far in physical units (millimeters) below (inferior to) the AC point (as found by the model.) The oversize default was chosen to have no effect. Based on visualizing a thousand masks in the IPIG study, we recommend a limit no smaller than 80.0 mm., ", + argstr="--acLowerBound %f") + cutOutHeadInOutputVolume = traits.Bool( + desc= + ", Flag to cut out just the head tissue when producing an (un)transformed clipped volume., ", + argstr="--cutOutHeadInOutputVolume ") + outputUntransformedClippedVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Output image in which to store neck-clipped input image, with the use of --acLowerBound and maybe --cutOutHeadInUntransformedVolume.", + argstr="--outputUntransformedClippedVolume %s") + rescaleIntensities = traits.Bool( + desc= + ", Flag to turn on rescaling image intensities on input., ", + argstr="--rescaleIntensities ") + trimRescaledIntensities = traits.Float( + desc= + ", Turn on clipping the rescaled image one-tailed on input. Units of standard deviations above the mean. Very large values are very permissive. Non-positive value turns clipping off. Defaults to removing 0.00001 of a normal tail above the mean., ", + argstr="--trimRescaledIntensities %f") + rescaleIntensitiesOutputRange = InputMultiPath( + traits.Int, + desc= + ", This pair of integers gives the lower and upper bounds on the signal portion of the output image. Out-of-field voxels are taken from BackgroundFillValue., ", + sep=",", + argstr="--rescaleIntensitiesOutputRange %s") + BackgroundFillValue = traits.Str( + desc= + "Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", + argstr="--BackgroundFillValue %s") + interpolationMode = traits.Enum( + "NearestNeighbor", + "Linear", + "ResampleInPlace", + "BSpline", + "WindowedSinc", + "Hamming", + "Cosine", + "Welch", + "Lanczos", + "Blackman", + desc= + "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s") + forceACPoint = InputMultiPath( + traits.Float, + desc= + ", Use this flag to manually specify the AC point from the original image on the command line., ", + sep=",", + argstr="--forceACPoint %s") + forcePCPoint = InputMultiPath( + traits.Float, + desc= + ", Use this flag to manually specify the PC point from the original image on the command line., ", + sep=",", + argstr="--forcePCPoint %s") + forceVN4Point = InputMultiPath( + traits.Float, + desc= + ", Use this flag to manually specify the VN4 point from the original image on the command line., ", + sep=",", + argstr="--forceVN4Point %s") + forceRPPoint = InputMultiPath( + traits.Float, + desc= + ", Use this flag to manually specify the RP point from the original image on the command line., ", + sep=",", + argstr="--forceRPPoint %s") + inputLandmarksEMSP = File( + desc= + ", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (in .fcsv) with the landmarks in the estimated MSP aligned space to be loaded. The detector will only process landmarks not enlisted on the file., ", + exists=True, + argstr="--inputLandmarksEMSP %s") + forceHoughEyeDetectorReportFailure = traits.Bool( + desc= + ", Flag indicates whether the Hough eye detector should report failure, ", + argstr="--forceHoughEyeDetectorReportFailure ") + rmpj = traits.Float( + desc= + ", Search radius for MPJ in unit of mm, ", + argstr="--rmpj %f") + rac = traits.Float( + desc=", Search radius for AC in unit of mm, ", + argstr="--rac %f") + rpc = traits.Float( + desc=", Search radius for PC in unit of mm, ", + argstr="--rpc %f") + rVN4 = traits.Float( + desc= + ", Search radius for VN4 in unit of mm, ", + argstr="--rVN4 %f") + debug = traits.Bool( + desc= + ", Show internal debugging information., ", + argstr="--debug ") + verbose = traits.Bool( + desc=", Show more verbose output, ", + argstr="--verbose ") + writeBranded2DImage = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + ", The filename for the 2D .png branded midline debugging image. This will happen only in conjunction with requesting an outputVolume., ", + argstr="--writeBranded2DImage %s") + resultsDir = traits.Either( + traits.Bool, + Directory(), + hash_files=False, + desc= + ", The directory for the debuging images to be written., ", + argstr="--resultsDir %s") + writedebuggingImagesLevel = traits.Int( + desc= + ", This flag controls if debugging images are produced. By default value of 0 is no images. Anything greater than zero will be increasing level of debugging images., ", + argstr="--writedebuggingImagesLevel %d") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + atlasVolume = File( + desc="Atlas volume image to be used for BRAINSFit registration", + exists=True, + argstr="--atlasVolume %s") + atlasLandmarks = File( + desc= + "Atlas landmarks to be used for BRAINSFit registration initialization, ", + exists=True, + argstr="--atlasLandmarks %s") + atlasLandmarkWeights = File( + desc= + "Weights associated with atlas landmarks to be used for BRAINSFit registration initialization, ", + exists=True, + argstr="--atlasLandmarkWeights %s") + + +class BRAINSConstellationDetectorOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "ACPC-aligned output image with the same voxels, but updated origin, and direction cosign so that the AC point would fall at the physical location (0.0,0.0,0.0), and the mid-sagital plane is the plane where physical L/R coordinate is 0.0.", + exists=True) + outputResampledVolume = File( + desc= + "ACPC-aligned output image in a resampled unifor space. Currently this is a 1mm, 256^3, Identity direction image.", + exists=True) + outputTransform = File( + desc= + "The filename for the original space to ACPC alignment to be written (in .h5 format)., ", + exists=True) + outputLandmarksInInputSpace = File( + desc= + ", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (.fcsv) with the landmarks in the original image space (the detected RP, AC, PC, and VN4) in it to be written., ", + exists=True) + outputLandmarksInACPCAlignedSpace = File( + desc= + ", The filename for the new subject-specific landmark definition file in the same format produced by Slicer3 (.fcsv) with the landmarks in the output image space (the detected RP, AC, PC, and VN4) in it to be written., ", + exists=True) + outputMRML = File( + desc= + ", The filename for the new subject-specific scene definition file in the same format produced by Slicer3 (in .mrml format). Only the components that were specified by the user on command line would be generated. Compatible components include inputVolume, outputVolume, outputLandmarksInInputSpace, outputLandmarksInACPCAlignedSpace, and outputTransform., ", + exists=True) + outputVerificationScript = File( + desc= + ", The filename for the Slicer3 script that verifies the aligned landmarks against the aligned image file. This will happen only in conjunction with saveOutputLandmarks and an outputVolume., ", + exists=True) + outputUntransformedClippedVolume = File( + desc= + "Output image in which to store neck-clipped input image, with the use of --acLowerBound and maybe --cutOutHeadInUntransformedVolume.", + exists=True) + writeBranded2DImage = File( + desc= + ", The filename for the 2D .png branded midline debugging image. This will happen only in conjunction with requesting an outputVolume., ", + exists=True) + resultsDir = Directory( + desc= + ", The directory for the debuging images to be written., ", + exists=True) + + +class BRAINSConstellationDetector(SEMLikeCommandLine): + """title: Brain Landmark Constellation Detector (BRAINS) + +category: Segmentation.Specialized + +description: This program will find the mid-sagittal plane, a constellation of landmarks in a volume, and create an AC/PC aligned data set with the AC point at the center of the voxel lattice (labeled at the origin of the image physical space.) Part of this work is an extention of the algorithms originally described by Dr. Babak A. Ardekani, Alvin H. Bachman, Model-based automatic detection of the anterior and posterior commissures on MRI scans, NeuroImage, Volume 46, Issue 3, 1 July 2009, Pages 677-682, ISSN 1053-8119, DOI: 10.1016/j.neuroimage.2009.02.030. (http://www.sciencedirect.com/science/article/B6WNP-4VRP25C-4/2/8207b962a38aa83c822c6379bc43fe4c) + +version: 1.0 + +documentation-url: http://www.nitrc.org/projects/brainscdetector/ + +""" + + input_spec = BRAINSConstellationDetectorInputSpec + output_spec = BRAINSConstellationDetectorOutputSpec + _cmd = " BRAINSConstellationDetector " + _outputs_filenames = { + 'outputVolume': + 'outputVolume.nii.gz', + 'outputMRML': + 'outputMRML.mrml', + 'resultsDir': + 'resultsDir', + 'outputResampledVolume': + 'outputResampledVolume.nii.gz', + 'outputTransform': + 'outputTransform.h5', + 'writeBranded2DImage': + 'writeBranded2DImage.png', + 'outputLandmarksInACPCAlignedSpace': + 'outputLandmarksInACPCAlignedSpace.fcsv', + 'outputLandmarksInInputSpace': + 'outputLandmarksInInputSpace.fcsv', + 'outputUntransformedClippedVolume': + 'outputUntransformedClippedVolume.nii.gz', + 'outputVerificationScript': + 'outputVerificationScript.sh' + } + _redirect_x = False + + +class BRAINSCreateLabelMapFromProbabilityMapsInputSpec(CommandLineInputSpec): + inputProbabilityVolume = InputMultiPath( + File(exists=True), + desc="The list of proobabilityimages.", + argstr="--inputProbabilityVolume %s...") + priorLabelCodes = InputMultiPath( + traits.Int, + desc= + "A list of PriorLabelCode values used for coding the output label images", + sep=",", + argstr="--priorLabelCodes %s") + foregroundPriors = InputMultiPath( + traits.Int, + desc="A list: For each Prior Label, 1 if foreground, 0 if background", + sep=",", + argstr="--foregroundPriors %s") + nonAirRegionMask = File( + desc= + "a mask representing the \'NonAirRegion\' -- Just force pixels in this region to zero", + exists=True, + argstr="--nonAirRegionMask %s") + inclusionThreshold = traits.Float( + desc="tolerance for inclusion", argstr="--inclusionThreshold %f") + dirtyLabelVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="the labels prior to cleaning", + argstr="--dirtyLabelVolume %s") + cleanLabelVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="the foreground labels volume", + argstr="--cleanLabelVolume %s") + + +class BRAINSCreateLabelMapFromProbabilityMapsOutputSpec(TraitedSpec): + dirtyLabelVolume = File(desc="the labels prior to cleaning", exists=True) + cleanLabelVolume = File(desc="the foreground labels volume", exists=True) + + +class BRAINSCreateLabelMapFromProbabilityMaps(SEMLikeCommandLine): + """title: Create Label Map From Probability Maps (BRAINS) + +category: Segmentation.Specialized + +description: Given A list of Probability Maps, generate a LabelMap. + +""" + + input_spec = BRAINSCreateLabelMapFromProbabilityMapsInputSpec + output_spec = BRAINSCreateLabelMapFromProbabilityMapsOutputSpec + _cmd = " BRAINSCreateLabelMapFromProbabilityMaps " + _outputs_filenames = { + 'dirtyLabelVolume': 'dirtyLabelVolume.nii', + 'cleanLabelVolume': 'cleanLabelVolume.nii' + } + _redirect_x = False + + +class BinaryMaskEditorBasedOnLandmarksInputSpec(CommandLineInputSpec): + inputBinaryVolume = File( + desc="Input binary image in which to be edited", + exists=True, + argstr="--inputBinaryVolume %s") + outputBinaryVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output binary image in which to be edited", + argstr="--outputBinaryVolume %s") + inputLandmarksFilename = File( + desc= + " The filename for the landmark definition file in the same format produced by Slicer3 (.fcsv). ", + exists=True, + argstr="--inputLandmarksFilename %s") + inputLandmarkNames = InputMultiPath( + traits.Str, + desc= + " A target input landmark name to be edited. This should be listed in the inputLandmakrFilename Given. ", + sep=",", + argstr="--inputLandmarkNames %s") + setCutDirectionForLandmark = InputMultiPath( + traits.Str, + desc= + "Setting the cutting out direction of the input binary image to the one of anterior, posterior, left, right, superior or posterior. (ENUMERATION: ANTERIOR, POSTERIOR, LEFT, RIGHT, SUPERIOR, POSTERIOR) ", + sep=",", + argstr="--setCutDirectionForLandmark %s") + setCutDirectionForObliquePlane = InputMultiPath( + traits.Str, + desc= + "If this is true, the mask will be thresholded out to the direction of inferior, posterior, and/or left. Default behavrior is that cutting out to the direction of superior, anterior and/or right. ", + sep=",", + argstr="--setCutDirectionForObliquePlane %s") + inputLandmarkNamesForObliquePlane = InputMultiPath( + traits.Str, + desc= + " Three subset landmark names of inputLandmarksFilename for a oblique plane computation. The plane computed for binary volume editing. ", + sep=",", + argstr="--inputLandmarkNamesForObliquePlane %s") + + +class BinaryMaskEditorBasedOnLandmarksOutputSpec(TraitedSpec): + outputBinaryVolume = File( + desc="Output binary image in which to be edited", exists=True) + + +class BinaryMaskEditorBasedOnLandmarks(SEMLikeCommandLine): + """title: BRAINS Binary Mask Editor Based On Landmarks(BRAINS) + +category: Segmentation.Specialized + +version: 1.0 + +documentation-url: http://www.nitrc.org/projects/brainscdetector/ + +""" + + input_spec = BinaryMaskEditorBasedOnLandmarksInputSpec + output_spec = BinaryMaskEditorBasedOnLandmarksOutputSpec + _cmd = " BinaryMaskEditorBasedOnLandmarks " + _outputs_filenames = {'outputBinaryVolume': 'outputBinaryVolume.nii'} + _redirect_x = False + + +class BRAINSMultiSTAPLEInputSpec(CommandLineInputSpec): + inputCompositeT1Volume = File( + desc= + "Composite T1, all label maps transofrmed into the space for this image.", + exists=True, + argstr="--inputCompositeT1Volume %s") + inputLabelVolume = InputMultiPath( + File(exists=True), + desc="The list of proobabilityimages.", + argstr="--inputLabelVolume %s...") + inputTransform = InputMultiPath( + File(exists=True), + desc="transforms to apply to label volumes", + argstr="--inputTransform %s...") + labelForUndecidedPixels = traits.Int( + desc="Label for undecided pixels", + argstr="--labelForUndecidedPixels %d") + resampledVolumePrefix = traits.Str( + desc="if given, write out resampled volumes with this prefix", + argstr="--resampledVolumePrefix %s") + skipResampling = traits.Bool( + desc="Omit resampling images into reference space", + argstr="--skipResampling ") + outputMultiSTAPLE = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="the MultiSTAPLE average of input label volumes", + argstr="--outputMultiSTAPLE %s") + outputConfusionMatrix = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Confusion Matrix", + argstr="--outputConfusionMatrix %s") + + +class BRAINSMultiSTAPLEOutputSpec(TraitedSpec): + outputMultiSTAPLE = File( + desc="the MultiSTAPLE average of input label volumes", exists=True) + outputConfusionMatrix = File(desc="Confusion Matrix", exists=True) + + +class BRAINSMultiSTAPLE(SEMLikeCommandLine): + """title: Create best representative label map) + +category: Segmentation.Specialized + +description: given a list of label map images, create a representative/average label map. + +""" + + input_spec = BRAINSMultiSTAPLEInputSpec + output_spec = BRAINSMultiSTAPLEOutputSpec + _cmd = " BRAINSMultiSTAPLE " + _outputs_filenames = { + 'outputMultiSTAPLE': 'outputMultiSTAPLE.nii', + 'outputConfusionMatrix': 'outputConfusionMatrixh5|mat|txt' + } + _redirect_x = False + + +class BRAINSABCInputSpec(CommandLineInputSpec): + inputVolumes = InputMultiPath( + File(exists=True), + desc="The list of input image files to be segmented.", + argstr="--inputVolumes %s...") + atlasDefinition = File( + desc="Contains all parameters for Atlas", + exists=True, + argstr="--atlasDefinition %s") + restoreState = File( + desc="The initial state for the registration process", + exists=True, + argstr="--restoreState %s") + saveState = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "(optional) Filename to which save the final state of the registration", + argstr="--saveState %s") + inputVolumeTypes = InputMultiPath( + traits.Str, + desc="The list of input image types corresponding to the inputVolumes.", + sep=",", + argstr="--inputVolumeTypes %s") + outputDir = traits.Either( + traits.Bool, + Directory(), + hash_files=False, + desc="Ouput directory", + argstr="--outputDir %s") + atlasToSubjectTransformType = traits.Enum( + "Identity", + "Rigid", + "Affine", + "BSpline", + "SyN", + desc= + " What type of linear transform type do you want to use to register the atlas to the reference subject image.", + argstr="--atlasToSubjectTransformType %s") + atlasToSubjectTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="The transform from atlas to the subject", + argstr="--atlasToSubjectTransform %s") + atlasToSubjectInitialTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="The initial transform from atlas to the subject", + argstr="--atlasToSubjectInitialTransform %s") + subjectIntermodeTransformType = traits.Enum( + "Identity", + "Rigid", + "Affine", + "BSpline", + desc= + " What type of linear transform type do you want to use to register the atlas to the reference subject image.", + argstr="--subjectIntermodeTransformType %s") + outputVolumes = traits.Either( + traits.Bool, + InputMultiPath(File(), ), + hash_files=False, + desc= + "Corrected Output Images: should specify the same number of images as inputVolume, if only one element is given, then it is used as a file pattern where %s is replaced by the imageVolumeType, and %d by the index list location.", + argstr="--outputVolumes %s...") + outputLabels = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output Label Image", + argstr="--outputLabels %s") + outputDirtyLabels = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output Dirty Label Image", + argstr="--outputDirtyLabels %s") + posteriorTemplate = traits.Str( + desc="filename template for Posterior output files", + argstr="--posteriorTemplate %s") + outputFormat = traits.Enum( + "NIFTI", + "Meta", + "Nrrd", + desc="Output format", + argstr="--outputFormat %s") + interpolationMode = traits.Enum( + "BSpline", + "NearestNeighbor", + "WindowedSinc", + "Linear", + "ResampleInPlace", + "Hamming", + "Cosine", + "Welch", + "Lanczos", + "Blackman", + desc= + "Type of interpolation to be used when applying transform to moving volume. Options are Linear, NearestNeighbor, BSpline, WindowedSinc, or ResampleInPlace. The ResampleInPlace option will create an image with the same discrete voxel values and will adjust the origin and direction of the physical space interpretation.", + argstr="--interpolationMode %s") + maxIterations = traits.Int( + desc="Filter iterations", argstr="--maxIterations %d") + medianFilterSize = InputMultiPath( + traits.Int, + desc= + "The radius for the optional MedianImageFilter preprocessing in all 3 directions.", + sep=",", + argstr="--medianFilterSize %s") + filterIteration = traits.Int( + desc="Filter iterations", argstr="--filterIteration %d") + filterTimeStep = traits.Float( + desc= + "Filter time step should be less than (PixelSpacing/(1^(DIM+1)), value is set to negative, then allow automatic setting of this value. ", + argstr="--filterTimeStep %f") + filterMethod = traits.Enum( + "None", + "CurvatureFlow", + "GradientAnisotropicDiffusion", + "Median", + desc="Filter method for preprocessing of registration", + argstr="--filterMethod %s") + maxBiasDegree = traits.Int( + desc="Maximum bias degree", argstr="--maxBiasDegree %d") + useKNN = traits.Bool( + desc="Use the KNN stage of estimating posteriors.", argstr="--useKNN ") + purePlugsThreshold = traits.Float( + desc= + "If this threshold value is greater than zero, only pure samples are used to compute the distributions in EM classification, and only pure samples are used for KNN training. The default value is set to 0, that means not using pure plugs. However, a value of 0.2 is suggested if you want to activate using pure plugs option.", + argstr="--purePlugsThreshold %f") + numberOfSubSamplesInEachPlugArea = InputMultiPath( + traits.Int, + desc= + "Number of continous index samples taken at each direction of lattice space for each plug volume.", + sep=",", + argstr="--numberOfSubSamplesInEachPlugArea %s") + atlasWarpingOff = traits.Bool( + desc="Deformable registration of atlas to subject", + argstr="--atlasWarpingOff ") + gridSize = InputMultiPath( + traits.Int, + desc="Grid size for atlas warping with BSplines", + sep=",", + argstr="--gridSize %s") + defaultSuffix = traits.Str(argstr="--defaultSuffix %s") + implicitOutputs = traits.Either( + traits.Bool, + InputMultiPath(File(), ), + hash_files=False, + desc= + "Outputs to be made available to NiPype. Needed because not all BRAINSABC outputs have command line arguments.", + argstr="--implicitOutputs %s...") + debuglevel = traits.Int( + desc= + "Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", + argstr="--debuglevel %d") + writeLess = traits.Bool( + desc="Does not write posteriors and filtered, bias corrected images", + argstr="--writeLess ") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSABCOutputSpec(TraitedSpec): + saveState = File( + desc= + "(optional) Filename to which save the final state of the registration", + exists=True) + outputDir = Directory(desc="Ouput directory", exists=True) + atlasToSubjectTransform = File( + desc="The transform from atlas to the subject", exists=True) + atlasToSubjectInitialTransform = File( + desc="The initial transform from atlas to the subject", exists=True) + outputVolumes = OutputMultiPath( + File(exists=True), + desc= + "Corrected Output Images: should specify the same number of images as inputVolume, if only one element is given, then it is used as a file pattern where %s is replaced by the imageVolumeType, and %d by the index list location." + ) + outputLabels = File(desc="Output Label Image", exists=True) + outputDirtyLabels = File(desc="Output Dirty Label Image", exists=True) + implicitOutputs = OutputMultiPath( + File(exists=True), + desc= + "Outputs to be made available to NiPype. Needed because not all BRAINSABC outputs have command line arguments." + ) + + +class BRAINSABC(SEMLikeCommandLine): + """title: Intra-subject registration, bias Correction, and tissue classification (BRAINS) + +category: Segmentation.Specialized + +description: Atlas-based tissue segmentation method. This is an algorithmic extension of work done by XXXX at UNC and Utah XXXX need more description here. + +""" + + input_spec = BRAINSABCInputSpec + output_spec = BRAINSABCOutputSpec + _cmd = " BRAINSABC " + _outputs_filenames = { + 'saveState': 'saveState.h5', + 'outputLabels': 'outputLabels.nii.gz', + 'atlasToSubjectTransform': 'atlasToSubjectTransform.h5', + 'atlasToSubjectInitialTransform': 'atlasToSubjectInitialTransform.h5', + 'outputDirtyLabels': 'outputDirtyLabels.nii.gz', + 'outputVolumes': 'outputVolumes.nii.gz', + 'outputDir': 'outputDir', + 'implicitOutputs': 'implicitOutputs.nii.gz' + } + _redirect_x = False + + +class ESLRInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Input Label Volume", exists=True, argstr="--inputVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output Label Volume", + argstr="--outputVolume %s") + low = traits.Int( + desc="The lower bound of the labels to be used.", argstr="--low %d") + high = traits.Int( + desc="The higher bound of the labels to be used.", argstr="--high %d") + closingSize = traits.Int( + desc="The closing size for hole filling.", argstr="--closingSize %d") + openingSize = traits.Int( + desc="The opening size for hole filling.", argstr="--openingSize %d") + safetySize = traits.Int( + desc="The safetySize size for the clipping region.", + argstr="--safetySize %d") + preserveOutside = traits.Bool( + desc="For values outside the specified range, preserve those values.", + argstr="--preserveOutside ") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class ESLROutputSpec(TraitedSpec): + outputVolume = File(desc="Output Label Volume", exists=True) + + +class ESLR(SEMLikeCommandLine): + """title: Clean Contiguous Label Map (BRAINS) + +category: Segmentation.Specialized + +description: From a range of label map values, extract the largest contiguous region of those labels + +""" + + input_spec = ESLRInputSpec + output_spec = ESLROutputSpec + _cmd = " ESLR " + _outputs_filenames = {'outputVolume': 'outputVolume.nii.gz'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/segmentation/tests/__init__.py b/nipype/interfaces/semtools/segmentation/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/semtools/segmentation/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py new file mode 100644 index 0000000000..2d66884522 --- /dev/null +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSABC.py @@ -0,0 +1,104 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import BRAINSABC + + +def test_BRAINSABC_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + atlasDefinition=dict(argstr='--atlasDefinition %s', ), + atlasToSubjectInitialTransform=dict( + argstr='--atlasToSubjectInitialTransform %s', + hash_files=False, + ), + atlasToSubjectTransform=dict( + argstr='--atlasToSubjectTransform %s', + hash_files=False, + ), + atlasToSubjectTransformType=dict( + argstr='--atlasToSubjectTransformType %s', ), + atlasWarpingOff=dict(argstr='--atlasWarpingOff ', ), + debuglevel=dict(argstr='--debuglevel %d', ), + defaultSuffix=dict(argstr='--defaultSuffix %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + filterIteration=dict(argstr='--filterIteration %d', ), + filterMethod=dict(argstr='--filterMethod %s', ), + filterTimeStep=dict(argstr='--filterTimeStep %f', ), + gridSize=dict( + argstr='--gridSize %s', + sep=',', + ), + implicitOutputs=dict( + argstr='--implicitOutputs %s...', + hash_files=False, + ), + inputVolumeTypes=dict( + argstr='--inputVolumeTypes %s', + sep=',', + ), + inputVolumes=dict(argstr='--inputVolumes %s...', ), + interpolationMode=dict(argstr='--interpolationMode %s', ), + maxBiasDegree=dict(argstr='--maxBiasDegree %d', ), + maxIterations=dict(argstr='--maxIterations %d', ), + medianFilterSize=dict( + argstr='--medianFilterSize %s', + sep=',', + ), + numberOfSubSamplesInEachPlugArea=dict( + argstr='--numberOfSubSamplesInEachPlugArea %s', + sep=',', + ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputDir=dict( + argstr='--outputDir %s', + hash_files=False, + ), + outputDirtyLabels=dict( + argstr='--outputDirtyLabels %s', + hash_files=False, + ), + outputFormat=dict(argstr='--outputFormat %s', ), + outputLabels=dict( + argstr='--outputLabels %s', + hash_files=False, + ), + outputVolumes=dict( + argstr='--outputVolumes %s...', + hash_files=False, + ), + posteriorTemplate=dict(argstr='--posteriorTemplate %s', ), + purePlugsThreshold=dict(argstr='--purePlugsThreshold %f', ), + restoreState=dict(argstr='--restoreState %s', ), + saveState=dict( + argstr='--saveState %s', + hash_files=False, + ), + subjectIntermodeTransformType=dict( + argstr='--subjectIntermodeTransformType %s', ), + useKNN=dict(argstr='--useKNN ', ), + writeLess=dict(argstr='--writeLess ', ), + ) + inputs = BRAINSABC.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSABC_outputs(): + output_map = dict( + atlasToSubjectInitialTransform=dict(), + atlasToSubjectTransform=dict(), + implicitOutputs=dict(), + outputDir=dict(), + outputDirtyLabels=dict(), + outputLabels=dict(), + outputVolumes=dict(), + saveState=dict(), + ) + outputs = BRAINSABC.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py new file mode 100644 index 0000000000..9b2d10061c --- /dev/null +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSConstellationDetector.py @@ -0,0 +1,123 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import BRAINSConstellationDetector + + +def test_BRAINSConstellationDetector_inputs(): + input_map = dict( + BackgroundFillValue=dict(argstr='--BackgroundFillValue %s', ), + LLSModel=dict(argstr='--LLSModel %s', ), + acLowerBound=dict(argstr='--acLowerBound %f', ), + args=dict(argstr='%s', ), + atlasLandmarkWeights=dict(argstr='--atlasLandmarkWeights %s', ), + atlasLandmarks=dict(argstr='--atlasLandmarks %s', ), + atlasVolume=dict(argstr='--atlasVolume %s', ), + cutOutHeadInOutputVolume=dict(argstr='--cutOutHeadInOutputVolume ', ), + debug=dict(argstr='--debug ', ), + environ=dict( + nohash=True, + usedefault=True, + ), + forceACPoint=dict( + argstr='--forceACPoint %s', + sep=',', + ), + forceHoughEyeDetectorReportFailure=dict( + argstr='--forceHoughEyeDetectorReportFailure ', ), + forcePCPoint=dict( + argstr='--forcePCPoint %s', + sep=',', + ), + forceRPPoint=dict( + argstr='--forceRPPoint %s', + sep=',', + ), + forceVN4Point=dict( + argstr='--forceVN4Point %s', + sep=',', + ), + houghEyeDetectorMode=dict(argstr='--houghEyeDetectorMode %d', ), + inputLandmarksEMSP=dict(argstr='--inputLandmarksEMSP %s', ), + inputTemplateModel=dict(argstr='--inputTemplateModel %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + interpolationMode=dict(argstr='--interpolationMode %s', ), + mspQualityLevel=dict(argstr='--mspQualityLevel %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), + outputLandmarksInACPCAlignedSpace=dict( + argstr='--outputLandmarksInACPCAlignedSpace %s', + hash_files=False, + ), + outputLandmarksInInputSpace=dict( + argstr='--outputLandmarksInInputSpace %s', + hash_files=False, + ), + outputMRML=dict( + argstr='--outputMRML %s', + hash_files=False, + ), + outputResampledVolume=dict( + argstr='--outputResampledVolume %s', + hash_files=False, + ), + outputTransform=dict( + argstr='--outputTransform %s', + hash_files=False, + ), + outputUntransformedClippedVolume=dict( + argstr='--outputUntransformedClippedVolume %s', + hash_files=False, + ), + outputVerificationScript=dict( + argstr='--outputVerificationScript %s', + hash_files=False, + ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + rVN4=dict(argstr='--rVN4 %f', ), + rac=dict(argstr='--rac %f', ), + rescaleIntensities=dict(argstr='--rescaleIntensities ', ), + rescaleIntensitiesOutputRange=dict( + argstr='--rescaleIntensitiesOutputRange %s', + sep=',', + ), + resultsDir=dict( + argstr='--resultsDir %s', + hash_files=False, + ), + rmpj=dict(argstr='--rmpj %f', ), + rpc=dict(argstr='--rpc %f', ), + trimRescaledIntensities=dict(argstr='--trimRescaledIntensities %f', ), + verbose=dict(argstr='--verbose ', ), + writeBranded2DImage=dict( + argstr='--writeBranded2DImage %s', + hash_files=False, + ), + writedebuggingImagesLevel=dict( + argstr='--writedebuggingImagesLevel %d', ), + ) + inputs = BRAINSConstellationDetector.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSConstellationDetector_outputs(): + output_map = dict( + outputLandmarksInACPCAlignedSpace=dict(), + outputLandmarksInInputSpace=dict(), + outputMRML=dict(), + outputResampledVolume=dict(), + outputTransform=dict(), + outputUntransformedClippedVolume=dict(), + outputVerificationScript=dict(), + outputVolume=dict(), + resultsDir=dict(), + writeBranded2DImage=dict(), + ) + outputs = BRAINSConstellationDetector.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py new file mode 100644 index 0000000000..9f2b00c311 --- /dev/null +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCreateLabelMapFromProbabilityMaps.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import BRAINSCreateLabelMapFromProbabilityMaps + + +def test_BRAINSCreateLabelMapFromProbabilityMaps_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + cleanLabelVolume=dict( + argstr='--cleanLabelVolume %s', + hash_files=False, + ), + dirtyLabelVolume=dict( + argstr='--dirtyLabelVolume %s', + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + foregroundPriors=dict( + argstr='--foregroundPriors %s', + sep=',', + ), + inclusionThreshold=dict(argstr='--inclusionThreshold %f', ), + inputProbabilityVolume=dict(argstr='--inputProbabilityVolume %s...', ), + nonAirRegionMask=dict(argstr='--nonAirRegionMask %s', ), + priorLabelCodes=dict( + argstr='--priorLabelCodes %s', + sep=',', + ), + ) + inputs = BRAINSCreateLabelMapFromProbabilityMaps.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSCreateLabelMapFromProbabilityMaps_outputs(): + output_map = dict( + cleanLabelVolume=dict(), + dirtyLabelVolume=dict(), + ) + outputs = BRAINSCreateLabelMapFromProbabilityMaps.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py new file mode 100644 index 0000000000..9d66d4a463 --- /dev/null +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSCut.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import BRAINSCut + + +def test_BRAINSCut_inputs(): + input_map = dict( + NoTrainingVectorShuffling=dict( + argstr='--NoTrainingVectorShuffling ', ), + applyModel=dict(argstr='--applyModel ', ), + args=dict(argstr='%s', ), + computeSSEOn=dict(argstr='--computeSSEOn ', ), + createVectors=dict(argstr='--createVectors ', ), + environ=dict( + nohash=True, + usedefault=True, + ), + generateProbability=dict(argstr='--generateProbability ', ), + histogramEqualization=dict(argstr='--histogramEqualization ', ), + method=dict(argstr='--method %s', ), + modelConfigurationFilename=dict( + argstr='--modelConfigurationFilename %s', ), + modelFilename=dict(argstr='--modelFilename %s', ), + multiStructureThreshold=dict(argstr='--multiStructureThreshold ', ), + netConfiguration=dict(argstr='--netConfiguration %s', ), + numberOfTrees=dict(argstr='--numberOfTrees %d', ), + randomTreeDepth=dict(argstr='--randomTreeDepth %d', ), + trainModel=dict(argstr='--trainModel ', ), + trainModelStartIndex=dict(argstr='--trainModelStartIndex %d', ), + validate=dict(argstr='--validate ', ), + verbose=dict(argstr='--verbose %d', ), + ) + inputs = BRAINSCut.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSCut_outputs(): + output_map = dict() + outputs = BRAINSCut.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py new file mode 100644 index 0000000000..826ec19f0e --- /dev/null +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSMultiSTAPLE.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import BRAINSMultiSTAPLE + + +def test_BRAINSMultiSTAPLE_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputCompositeT1Volume=dict(argstr='--inputCompositeT1Volume %s', ), + inputLabelVolume=dict(argstr='--inputLabelVolume %s...', ), + inputTransform=dict(argstr='--inputTransform %s...', ), + labelForUndecidedPixels=dict(argstr='--labelForUndecidedPixels %d', ), + outputConfusionMatrix=dict( + argstr='--outputConfusionMatrix %s', + hash_files=False, + ), + outputMultiSTAPLE=dict( + argstr='--outputMultiSTAPLE %s', + hash_files=False, + ), + resampledVolumePrefix=dict(argstr='--resampledVolumePrefix %s', ), + skipResampling=dict(argstr='--skipResampling ', ), + ) + inputs = BRAINSMultiSTAPLE.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSMultiSTAPLE_outputs(): + output_map = dict( + outputConfusionMatrix=dict(), + outputMultiSTAPLE=dict(), + ) + outputs = BRAINSMultiSTAPLE.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py new file mode 100644 index 0000000000..3e70b75883 --- /dev/null +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import BRAINSROIAuto + + +def test_BRAINSROIAuto_inputs(): + input_map = dict( + ROIAutoDilateSize=dict(argstr='--ROIAutoDilateSize %f', ), + args=dict(argstr='%s', ), + closingSize=dict(argstr='--closingSize %f', ), + cropOutput=dict(argstr='--cropOutput ', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + maskOutput=dict(argstr='--maskOutput ', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), + outputROIMaskVolume=dict( + argstr='--outputROIMaskVolume %s', + hash_files=False, + ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + outputVolumePixelType=dict(argstr='--outputVolumePixelType %s', ), + thresholdCorrectionFactor=dict( + argstr='--thresholdCorrectionFactor %f', ), + ) + inputs = BRAINSROIAuto.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSROIAuto_outputs(): + output_map = dict( + outputROIMaskVolume=dict(), + outputVolume=dict(), + ) + outputs = BRAINSROIAuto.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py new file mode 100644 index 0000000000..d71e7003ff --- /dev/null +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_BinaryMaskEditorBasedOnLandmarks.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import BinaryMaskEditorBasedOnLandmarks + + +def test_BinaryMaskEditorBasedOnLandmarks_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryVolume=dict(argstr='--inputBinaryVolume %s', ), + inputLandmarkNames=dict( + argstr='--inputLandmarkNames %s', + sep=',', + ), + inputLandmarkNamesForObliquePlane=dict( + argstr='--inputLandmarkNamesForObliquePlane %s', + sep=',', + ), + inputLandmarksFilename=dict(argstr='--inputLandmarksFilename %s', ), + outputBinaryVolume=dict( + argstr='--outputBinaryVolume %s', + hash_files=False, + ), + setCutDirectionForLandmark=dict( + argstr='--setCutDirectionForLandmark %s', + sep=',', + ), + setCutDirectionForObliquePlane=dict( + argstr='--setCutDirectionForObliquePlane %s', + sep=',', + ), + ) + inputs = BinaryMaskEditorBasedOnLandmarks.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BinaryMaskEditorBasedOnLandmarks_outputs(): + output_map = dict(outputBinaryVolume=dict(), ) + outputs = BinaryMaskEditorBasedOnLandmarks.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py new file mode 100644 index 0000000000..2e099b4f56 --- /dev/null +++ b/nipype/interfaces/semtools/segmentation/tests/test_auto_ESLR.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import ESLR + + +def test_ESLR_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + closingSize=dict(argstr='--closingSize %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + high=dict(argstr='--high %d', ), + inputVolume=dict(argstr='--inputVolume %s', ), + low=dict(argstr='--low %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + openingSize=dict(argstr='--openingSize %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + preserveOutside=dict(argstr='--preserveOutside ', ), + safetySize=dict(argstr='--safetySize %d', ), + ) + inputs = ESLR.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ESLR_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = ESLR.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/testing/__init__.py b/nipype/interfaces/semtools/testing/__init__.py new file mode 100644 index 0000000000..66a4a2262e --- /dev/null +++ b/nipype/interfaces/semtools/testing/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .featuredetection import SphericalCoordinateGeneration +from .landmarkscompare import LandmarksCompare +from .generateaveragelmkfile import GenerateAverageLmkFile diff --git a/nipype/interfaces/semtools/testing/featuredetection.py b/nipype/interfaces/semtools/testing/featuredetection.py new file mode 100644 index 0000000000..e8f332c0a6 --- /dev/null +++ b/nipype/interfaces/semtools/testing/featuredetection.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class SphericalCoordinateGenerationInputSpec(CommandLineInputSpec): + inputAtlasImage = File( + desc="Input atlas image", exists=True, argstr="--inputAtlasImage %s") + outputPath = traits.Str( + desc="Output path for rho, phi and theta images", + argstr="--outputPath %s") + + +class SphericalCoordinateGenerationOutputSpec(TraitedSpec): + pass + + +class SphericalCoordinateGeneration(SEMLikeCommandLine): + """title: Spherical Coordinate Generation + +category: Testing.FeatureDetection + +description: get the atlas image as input and generates the rho, phi and theta images. + +version: 0.1.0.$Revision: 1 $(alpha) + +contributor: Ali Ghayoor + +""" + + input_spec = SphericalCoordinateGenerationInputSpec + output_spec = SphericalCoordinateGenerationOutputSpec + _cmd = " SphericalCoordinateGeneration " + _outputs_filenames = {} + _redirect_x = False diff --git a/nipype/interfaces/semtools/testing/generateaveragelmkfile.py b/nipype/interfaces/semtools/testing/generateaveragelmkfile.py new file mode 100644 index 0000000000..bbb414c366 --- /dev/null +++ b/nipype/interfaces/semtools/testing/generateaveragelmkfile.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class GenerateAverageLmkFileInputSpec(CommandLineInputSpec): + inputLandmarkFiles = InputMultiPath( + traits.Str, + desc="Input landmark files names (.fcsv or .wts)", + sep=",", + argstr="--inputLandmarkFiles %s") + outputLandmarkFile = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Ouput landmark file name that includes average values for landmarks (.fcsv or .wts)", + argstr="--outputLandmarkFile %s") + + +class GenerateAverageLmkFileOutputSpec(TraitedSpec): + outputLandmarkFile = File( + desc= + "Ouput landmark file name that includes average values for landmarks (.fcsv or .wts)", + exists=True) + + +class GenerateAverageLmkFile(SEMLikeCommandLine): + """title: Average Fiducials + +category: Testing + +description: This program gets several fcsv file each one contains several landmarks with the same name but slightly different coordinates. For EACH landmark we compute the average coordination. + +contributor: Ali Ghayoor + +""" + + input_spec = GenerateAverageLmkFileInputSpec + output_spec = GenerateAverageLmkFileOutputSpec + _cmd = " GenerateAverageLmkFile " + _outputs_filenames = {'outputLandmarkFile': 'outputLandmarkFile'} + _redirect_x = False diff --git a/nipype/interfaces/semtools/testing/landmarkscompare.py b/nipype/interfaces/semtools/testing/landmarkscompare.py new file mode 100644 index 0000000000..872d6d0df0 --- /dev/null +++ b/nipype/interfaces/semtools/testing/landmarkscompare.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class LandmarksCompareInputSpec(CommandLineInputSpec): + inputLandmarkFile1 = File( + desc="First input landmark file (.fcsv or .wts)", + exists=True, + argstr="--inputLandmarkFile1 %s") + inputLandmarkFile2 = File( + desc="Second input landmark file (.fcsv or .wts)", + exists=True, + argstr="--inputLandmarkFile2 %s") + tolerance = traits.Float( + desc= + "The maximum error (in mm) allowed in each direction of a landmark", + argstr="--tolerance %f") + + +class LandmarksCompareOutputSpec(TraitedSpec): + pass + + +class LandmarksCompare(SEMLikeCommandLine): + """title: Compare Fiducials + +category: Testing + +description: Compares two .fcsv or .wts text files and verifies that they are identicle. Used for testing landmarks files. + +contributor: Ali Ghayoor + +""" + + input_spec = LandmarksCompareInputSpec + output_spec = LandmarksCompareOutputSpec + _cmd = " LandmarksCompare " + _outputs_filenames = {} + _redirect_x = False diff --git a/nipype/interfaces/semtools/tests/__init__.py b/nipype/interfaces/semtools/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/semtools/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/semtools/tests/test_auto_DWICompare.py b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py new file mode 100644 index 0000000000..c00b0cc36f --- /dev/null +++ b/nipype/interfaces/semtools/tests/test_auto_DWICompare.py @@ -0,0 +1,27 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..converters import DWICompare + + +def test_DWICompare_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict(argstr='--inputVolume1 %s', ), + inputVolume2=dict(argstr='--inputVolume2 %s', ), + ) + inputs = DWICompare.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DWICompare_outputs(): + output_map = dict() + outputs = DWICompare.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py new file mode 100644 index 0000000000..e515bc613c --- /dev/null +++ b/nipype/interfaces/semtools/tests/test_auto_DWISimpleCompare.py @@ -0,0 +1,28 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..converters import DWISimpleCompare + + +def test_DWISimpleCompare_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + checkDWIData=dict(argstr='--checkDWIData ', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict(argstr='--inputVolume1 %s', ), + inputVolume2=dict(argstr='--inputVolume2 %s', ), + ) + inputs = DWISimpleCompare.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DWISimpleCompare_outputs(): + output_map = dict() + outputs = DWISimpleCompare.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py new file mode 100644 index 0000000000..63f45831af --- /dev/null +++ b/nipype/interfaces/semtools/tests/test_auto_GenerateCsfClippedFromClassifiedImage.py @@ -0,0 +1,30 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..featurecreator import GenerateCsfClippedFromClassifiedImage + + +def test_GenerateCsfClippedFromClassifiedImage_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputCassifiedVolume=dict(argstr='--inputCassifiedVolume %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = GenerateCsfClippedFromClassifiedImage.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GenerateCsfClippedFromClassifiedImage_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = GenerateCsfClippedFromClassifiedImage.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/__init__.py b/nipype/interfaces/semtools/utilities/__init__.py new file mode 100644 index 0000000000..698b76a534 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/__init__.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .brains import ( + BRAINSConstellationModeler, landmarksConstellationWeights, + BRAINSTrimForegroundInDirection, BRAINSLmkTransform, BRAINSMush, + BRAINSTransformConvert, landmarksConstellationAligner, BRAINSEyeDetector, + BRAINSLinearModelerEPCA, BRAINSInitializedControlPoints, + CleanUpOverlapLabels, BRAINSClipInferior, + GenerateLabelMapFromProbabilityMap, BRAINSAlignMSP, + BRAINSLandmarkInitializer, insertMidACPCpoint, BRAINSSnapShotWriter, + JointHistogram, ShuffleVectorsModule, ImageRegionPlotter) diff --git a/nipype/interfaces/semtools/utilities/brains.py b/nipype/interfaces/semtools/utilities/brains.py new file mode 100644 index 0000000000..abc696b5d9 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/brains.py @@ -0,0 +1,1293 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +import os + +from ...base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, + TraitedSpec, File, Directory, traits, isdefined, + InputMultiPath, OutputMultiPath) + + +class BRAINSConstellationModelerInputSpec(CommandLineInputSpec): + verbose = traits.Bool( + desc=", Show more verbose output, ", + argstr="--verbose ") + inputTrainingList = File( + desc= + ", Setup file, giving all parameters for training up a template model for each landmark., ", + exists=True, + argstr="--inputTrainingList %s") + outputModel = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + ", The full filename of the output model file., ", + argstr="--outputModel %s") + saveOptimizedLandmarks = traits.Bool( + desc= + ", Flag to make a new subject-specific landmark definition file in the same format produced by Slicer3 with the optimized landmark (the detected RP, AC, and PC) in it. Useful to tighten the variances in the ConstellationModeler., ", + argstr="--saveOptimizedLandmarks ") + optimizedLandmarksFilenameExtender = traits.Str( + desc= + ", If the trainingList is (indexFullPathName) and contains landmark data filenames [path]/[filename].fcsv , make the optimized landmarks filenames out of [path]/[filename](thisExtender) and the optimized version of the input trainingList out of (indexFullPathName)(thisExtender) , when you rewrite all the landmarks according to the saveOptimizedLandmarks flag., ", + argstr="--optimizedLandmarksFilenameExtender %s") + resultsDir = traits.Either( + traits.Bool, + Directory(), + hash_files=False, + desc= + ", The directory for the results to be written., ", + argstr="--resultsDir %s") + mspQualityLevel = traits.Int( + desc= + ", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds)., ", + argstr="--mspQualityLevel %d") + rescaleIntensities = traits.Bool( + desc= + ", Flag to turn on rescaling image intensities on input., ", + argstr="--rescaleIntensities ") + trimRescaledIntensities = traits.Float( + desc= + ", Turn on clipping the rescaled image one-tailed on input. Units of standard deviations above the mean. Very large values are very permissive. Non-positive value turns clipping off. Defaults to removing 0.00001 of a normal tail above the mean., ", + argstr="--trimRescaledIntensities %f") + rescaleIntensitiesOutputRange = InputMultiPath( + traits.Int, + desc= + ", This pair of integers gives the lower and upper bounds on the signal portion of the output image. Out-of-field voxels are taken from BackgroundFillValue., ", + sep=",", + argstr="--rescaleIntensitiesOutputRange %s") + BackgroundFillValue = traits.Str( + desc= + "Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", + argstr="--BackgroundFillValue %s") + writedebuggingImagesLevel = traits.Int( + desc= + ", This flag controls if debugging images are produced. By default value of 0 is no images. Anything greater than zero will be increasing level of debugging images., ", + argstr="--writedebuggingImagesLevel %d") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSConstellationModelerOutputSpec(TraitedSpec): + outputModel = File( + desc= + ", The full filename of the output model file., ", + exists=True) + resultsDir = Directory( + desc= + ", The directory for the results to be written., ", + exists=True) + + +class BRAINSConstellationModeler(SEMLikeCommandLine): + """title: Generate Landmarks Model (BRAINS) + +category: Utilities.BRAINS + +description: Train up a model for BRAINSConstellationDetector + +""" + + input_spec = BRAINSConstellationModelerInputSpec + output_spec = BRAINSConstellationModelerOutputSpec + _cmd = " BRAINSConstellationModeler " + _outputs_filenames = { + 'outputModel': 'outputModel.mdl', + 'resultsDir': 'resultsDir' + } + _redirect_x = False + + +class landmarksConstellationWeightsInputSpec(CommandLineInputSpec): + inputTrainingList = File( + desc= + ", Setup file, giving all parameters for training up a Weight list for landmark., ", + exists=True, + argstr="--inputTrainingList %s") + inputTemplateModel = File( + desc="User-specified template model., ", + exists=True, + argstr="--inputTemplateModel %s") + LLSModel = File( + desc="Linear least squares model filename in HD5 format", + exists=True, + argstr="--LLSModel %s") + outputWeightsList = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + ", The filename of a csv file which is a list of landmarks and their corresponding weights., ", + argstr="--outputWeightsList %s") + + +class landmarksConstellationWeightsOutputSpec(TraitedSpec): + outputWeightsList = File( + desc= + ", The filename of a csv file which is a list of landmarks and their corresponding weights., ", + exists=True) + + +class landmarksConstellationWeights(SEMLikeCommandLine): + """title: Generate Landmarks Weights (BRAINS) + +category: Utilities.BRAINS + +description: Train up a list of Weights for the Landmarks in BRAINSConstellationDetector + +""" + + input_spec = landmarksConstellationWeightsInputSpec + output_spec = landmarksConstellationWeightsOutputSpec + _cmd = " landmarksConstellationWeights " + _outputs_filenames = {'outputWeightsList': 'outputWeightsList.wts'} + _redirect_x = False + + +class BRAINSTrimForegroundInDirectionInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Input image to trim off the neck (and also air-filling noise.)", + exists=True, + argstr="--inputVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Output image with neck and air-filling noise trimmed isotropic image with AC at center of image.", + argstr="--outputVolume %s") + directionCode = traits.Int( + desc= + ", This flag chooses which dimension to compare. The sign lets you flip direction., ", + argstr="--directionCode %d") + otsuPercentileThreshold = traits.Float( + desc= + ", This is a parameter to FindLargestForegroundFilledMask, which is employed to trim off air-filling noise., ", + argstr="--otsuPercentileThreshold %f") + closingSize = traits.Int( + desc= + ", This is a parameter to FindLargestForegroundFilledMask, ", + argstr="--closingSize %d") + headSizeLimit = traits.Float( + desc= + ", Use this to vary from the command line our search for how much upper tissue is head for the center-of-mass calculation. Units are CCs, not cubic millimeters., ", + argstr="--headSizeLimit %f") + BackgroundFillValue = traits.Str( + desc= + "Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", + argstr="--BackgroundFillValue %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSTrimForegroundInDirectionOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Output image with neck and air-filling noise trimmed isotropic image with AC at center of image.", + exists=True) + + +class BRAINSTrimForegroundInDirection(SEMLikeCommandLine): + """title: Trim Foreground In Direction (BRAINS) + +category: Utilities.BRAINS + +description: This program will trim off the neck and also air-filling noise from the inputImage. + +version: 0.1 + +documentation-url: http://www.nitrc.org/projects/art/ + +""" + + input_spec = BRAINSTrimForegroundInDirectionInputSpec + output_spec = BRAINSTrimForegroundInDirectionOutputSpec + _cmd = " BRAINSTrimForegroundInDirection " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class BRAINSLmkTransformInputSpec(CommandLineInputSpec): + inputMovingLandmarks = File( + desc="Input Moving Landmark list file in fcsv, ", + exists=True, + argstr="--inputMovingLandmarks %s") + inputFixedLandmarks = File( + desc="Input Fixed Landmark list file in fcsv, ", + exists=True, + argstr="--inputFixedLandmarks %s") + outputAffineTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="The filename for the estimated affine transform, ", + argstr="--outputAffineTransform %s") + inputMovingVolume = File( + desc="The filename of input moving volume", + exists=True, + argstr="--inputMovingVolume %s") + inputReferenceVolume = File( + desc="The filename of the reference volume", + exists=True, + argstr="--inputReferenceVolume %s") + outputResampledVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="The filename of the output resampled volume", + argstr="--outputResampledVolume %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSLmkTransformOutputSpec(TraitedSpec): + outputAffineTransform = File( + desc="The filename for the estimated affine transform, ", + exists=True) + outputResampledVolume = File( + desc="The filename of the output resampled volume", exists=True) + + +class BRAINSLmkTransform(SEMLikeCommandLine): + """title: Landmark Transform (BRAINS) + +category: Utilities.BRAINS + +description: This utility program estimates the affine transform to align the fixed landmarks to the moving landmarks, and then generate the resampled moving image to the same physical space as that of the reference image. + +version: 1.0 + +documentation-url: http://www.nitrc.org/projects/brainscdetector/ + +""" + + input_spec = BRAINSLmkTransformInputSpec + output_spec = BRAINSLmkTransformOutputSpec + _cmd = " BRAINSLmkTransform " + _outputs_filenames = { + 'outputResampledVolume': 'outputResampledVolume.nii', + 'outputAffineTransform': 'outputAffineTransform.h5' + } + _redirect_x = False + + +class BRAINSMushInputSpec(CommandLineInputSpec): + inputFirstVolume = File( + desc="Input image (1) for mixture optimization", + exists=True, + argstr="--inputFirstVolume %s") + inputSecondVolume = File( + desc="Input image (2) for mixture optimization", + exists=True, + argstr="--inputSecondVolume %s") + inputMaskVolume = File( + desc="Input label image for mixture optimization", + exists=True, + argstr="--inputMaskVolume %s") + outputWeightsFile = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output Weights File", + argstr="--outputWeightsFile %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="The MUSH image produced from the T1 and T2 weighted images", + argstr="--outputVolume %s") + outputMask = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="The brain volume mask generated from the MUSH image", + argstr="--outputMask %s") + seed = InputMultiPath( + traits.Int, + desc="Seed Point for Brain Region Filling", + sep=",", + argstr="--seed %s") + desiredMean = traits.Float( + desc="Desired mean within the mask for weighted sum of both images.", + argstr="--desiredMean %f") + desiredVariance = traits.Float( + desc= + "Desired variance within the mask for weighted sum of both images.", + argstr="--desiredVariance %f") + lowerThresholdFactorPre = traits.Float( + desc="Lower threshold factor for finding an initial brain mask", + argstr="--lowerThresholdFactorPre %f") + upperThresholdFactorPre = traits.Float( + desc="Upper threshold factor for finding an initial brain mask", + argstr="--upperThresholdFactorPre %f") + lowerThresholdFactor = traits.Float( + desc="Lower threshold factor for defining the brain mask", + argstr="--lowerThresholdFactor %f") + upperThresholdFactor = traits.Float( + desc="Upper threshold factor for defining the brain mask", + argstr="--upperThresholdFactor %f") + boundingBoxSize = InputMultiPath( + traits.Int, + desc= + "Size of the cubic bounding box mask used when no brain mask is present", + sep=",", + argstr="--boundingBoxSize %s") + boundingBoxStart = InputMultiPath( + traits.Int, + desc= + "XYZ point-coordinate for the start of the cubic bounding box mask used when no brain mask is present", + sep=",", + argstr="--boundingBoxStart %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSMushOutputSpec(TraitedSpec): + outputWeightsFile = File(desc="Output Weights File", exists=True) + outputVolume = File( + desc="The MUSH image produced from the T1 and T2 weighted images", + exists=True) + outputMask = File( + desc="The brain volume mask generated from the MUSH image", + exists=True) + + +class BRAINSMush(SEMLikeCommandLine): + """title: Brain Extraction from T1/T2 image (BRAINS) + +category: Utilities.BRAINS + +description: This program: 1) generates a weighted mixture image optimizing the mean and variance and 2) produces a mask of the brain volume + +version: 0.1.0.$Revision: 1.4 $(alpha) + +documentation-url: http:://mri.radiology.uiowa.edu + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool is a modification by Steven Dunn of a program developed by Greg Harris and Ron Pierson. + +acknowledgements: This work was developed by the University of Iowa Departments of Radiology and Psychiatry. This software was supported in part of NIH/NINDS award NS050568. + +""" + + input_spec = BRAINSMushInputSpec + output_spec = BRAINSMushOutputSpec + _cmd = " BRAINSMush " + _outputs_filenames = { + 'outputMask': 'outputMask.nii.gz', + 'outputWeightsFile': 'outputWeightsFile.txt', + 'outputVolume': 'outputVolume.nii.gz' + } + _redirect_x = False + + +class BRAINSTransformConvertInputSpec(CommandLineInputSpec): + inputTransform = File(exists=True, argstr="--inputTransform %s") + referenceVolume = File(exists=True, argstr="--referenceVolume %s") + outputTransformType = traits.Enum( + "Affine", + "VersorRigid", + "ScaleVersor", + "ScaleSkewVersor", + "DisplacementField", + "Same", + desc= + "The target transformation type. Must be conversion-compatible with the input transform type", + argstr="--outputTransformType %s") + outputPrecisionType = traits.Enum( + "double", + "float", + desc= + "Precision type of the output transform. It can be either single precision or double precision", + argstr="--outputPrecisionType %s") + displacementVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + argstr="--displacementVolume %s") + outputTransform = traits.Either( + traits.Bool, File(), hash_files=False, argstr="--outputTransform %s") + + +class BRAINSTransformConvertOutputSpec(TraitedSpec): + displacementVolume = File(exists=True) + outputTransform = File(exists=True) + + +class BRAINSTransformConvert(SEMLikeCommandLine): + """title: BRAINS Transform Convert + +category: Utilities.BRAINS + +description: Convert ITK transforms to higher order transforms + +version: 1.0 + +documentation-url: A utility to convert between transform file formats. + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Hans J. Johnson,Kent Williams, Ali Ghayoor + +""" + + input_spec = BRAINSTransformConvertInputSpec + output_spec = BRAINSTransformConvertOutputSpec + _cmd = " BRAINSTransformConvert " + _outputs_filenames = { + 'displacementVolume': 'displacementVolume.nii', + 'outputTransform': 'outputTransform.mat' + } + _redirect_x = False + + +class landmarksConstellationAlignerInputSpec(CommandLineInputSpec): + inputLandmarksPaired = File( + desc="Input landmark file (.fcsv)", + exists=True, + argstr="--inputLandmarksPaired %s") + outputLandmarksPaired = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output landmark file (.fcsv)", + argstr="--outputLandmarksPaired %s") + + +class landmarksConstellationAlignerOutputSpec(TraitedSpec): + outputLandmarksPaired = File( + desc="Output landmark file (.fcsv)", exists=True) + + +class landmarksConstellationAligner(SEMLikeCommandLine): + """title: MidACPC Landmark Insertion + +category: Utilities.BRAINS + +description: This program converts the original landmark files to the acpc-aligned landmark files + +contributor: Ali Ghayoor + +""" + + input_spec = landmarksConstellationAlignerInputSpec + output_spec = landmarksConstellationAlignerOutputSpec + _cmd = " landmarksConstellationAligner " + _outputs_filenames = {'outputLandmarksPaired': 'outputLandmarksPaired'} + _redirect_x = False + + +class BRAINSEyeDetectorInputSpec(CommandLineInputSpec): + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + inputVolume = File( + desc="The input volume", exists=True, argstr="--inputVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="The output volume", + argstr="--outputVolume %s") + debugDir = traits.Str( + desc="A place for debug information", argstr="--debugDir %s") + + +class BRAINSEyeDetectorOutputSpec(TraitedSpec): + outputVolume = File(desc="The output volume", exists=True) + + +class BRAINSEyeDetector(SEMLikeCommandLine): + """title: Eye Detector (BRAINS) + +category: Utilities.BRAINS + +version: 1.0 + +documentation-url: http://www.nitrc.org/projects/brainscdetector/ + +""" + + input_spec = BRAINSEyeDetectorInputSpec + output_spec = BRAINSEyeDetectorOutputSpec + _cmd = " BRAINSEyeDetector " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class BRAINSLinearModelerEPCAInputSpec(CommandLineInputSpec): + inputTrainingList = File( + desc="Input Training Landmark List Filename, ", + exists=True, + argstr="--inputTrainingList %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSLinearModelerEPCAOutputSpec(TraitedSpec): + pass + + +class BRAINSLinearModelerEPCA(SEMLikeCommandLine): + """title: Landmark Linear Modeler (BRAINS) + +category: Utilities.BRAINS + +description: Training linear model using EPCA. Implementation based on my MS thesis, "A METHOD FOR AUTOMATED LANDMARK CONSTELLATION DETECTION USING EVOLUTIONARY PRINCIPAL COMPONENTS AND STATISTICAL SHAPE MODELS" + +version: 1.0 + +documentation-url: http://www.nitrc.org/projects/brainscdetector/ + +""" + + input_spec = BRAINSLinearModelerEPCAInputSpec + output_spec = BRAINSLinearModelerEPCAOutputSpec + _cmd = " BRAINSLinearModelerEPCA " + _outputs_filenames = {} + _redirect_x = False + + +class BRAINSInitializedControlPointsInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Input Volume", exists=True, argstr="--inputVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output Volume", + argstr="--outputVolume %s") + splineGridSize = InputMultiPath( + traits.Int, + desc= + "The number of subdivisions of the BSpline Grid to be centered on the image space. Each dimension must have at least 3 subdivisions for the BSpline to be correctly computed. ", + sep=",", + argstr="--splineGridSize %s") + permuteOrder = InputMultiPath( + traits.Int, + desc= + "The permutation order for the images. The default is 0,1,2 (i.e. no permutation)", + sep=",", + argstr="--permuteOrder %s") + outputLandmarksFile = traits.Str( + desc="Output filename", argstr="--outputLandmarksFile %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSInitializedControlPointsOutputSpec(TraitedSpec): + outputVolume = File(desc="Output Volume", exists=True) + + +class BRAINSInitializedControlPoints(SEMLikeCommandLine): + """title: Initialized Control Points (BRAINS) + +category: Utilities.BRAINS + +description: Outputs bspline control points as landmarks + +version: 0.1.0.$Revision: 916 $(alpha) + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Mark Scully + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for Mark Scully and Hans Johnson at the University of Iowa. + +""" + + input_spec = BRAINSInitializedControlPointsInputSpec + output_spec = BRAINSInitializedControlPointsOutputSpec + _cmd = " BRAINSInitializedControlPoints " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class CleanUpOverlapLabelsInputSpec(CommandLineInputSpec): + inputBinaryVolumes = InputMultiPath( + File(exists=True), + desc= + "The list of binary images to be checked and cleaned up. Order is important. Binary volume given first always wins out. ", + argstr="--inputBinaryVolumes %s...") + outputBinaryVolumes = traits.Either( + traits.Bool, + InputMultiPath(File(), ), + hash_files=False, + desc= + "The output label map images, with integer values in it. Each label value specified in the inputLabels is combined into this output label map volume", + argstr="--outputBinaryVolumes %s...") + + +class CleanUpOverlapLabelsOutputSpec(TraitedSpec): + outputBinaryVolumes = OutputMultiPath( + File(exists=True), + desc= + "The output label map images, with integer values in it. Each label value specified in the inputLabels is combined into this output label map volume" + ) + + +class CleanUpOverlapLabels(SEMLikeCommandLine): + """title: Clean Up Overla Labels + +category: Utilities.BRAINS + +description: Take a series of input binary images and clean up for those overlapped area. Binary volumes given first always wins out + +version: 0.1.0 + +contributor: Eun Young Kim + +""" + + input_spec = CleanUpOverlapLabelsInputSpec + output_spec = CleanUpOverlapLabelsOutputSpec + _cmd = " CleanUpOverlapLabels " + _outputs_filenames = {'outputBinaryVolumes': 'outputBinaryVolumes.nii'} + _redirect_x = False + + +class BRAINSClipInferiorInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Input image to make a clipped short int copy from.", + exists=True, + argstr="--inputVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Output image, a short int copy of the upper portion of the input image, filled with BackgroundFillValue.", + argstr="--outputVolume %s") + acLowerBound = traits.Float( + desc= + ", When the input image to the output image, replace the image with the BackgroundFillValue everywhere below the plane This Far in physical units (millimeters) below (inferior to) the AC point (assumed to be the voxel field middle.) The oversize default was chosen to have no effect. Based on visualizing a thousand masks in the IPIG study, we recommend a limit no smaller than 80.0 mm., ", + argstr="--acLowerBound %f") + BackgroundFillValue = traits.Str( + desc= + "Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", + argstr="--BackgroundFillValue %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSClipInferiorOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Output image, a short int copy of the upper portion of the input image, filled with BackgroundFillValue.", + exists=True) + + +class BRAINSClipInferior(SEMLikeCommandLine): + """title: Clip Inferior of Center of Brain (BRAINS) + +category: Utilities.BRAINS + +description: This program will read the inputVolume as a short int image, write the BackgroundFillValue everywhere inferior to the lower bound, and write the resulting clipped short int image in the outputVolume. + +version: 1.0 + +""" + + input_spec = BRAINSClipInferiorInputSpec + output_spec = BRAINSClipInferiorOutputSpec + _cmd = " BRAINSClipInferior " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + _redirect_x = False + + +class GenerateLabelMapFromProbabilityMapInputSpec(CommandLineInputSpec): + inputVolumes = InputMultiPath( + File(exists=True), + desc="The Input probaiblity images to be computed for lable maps", + argstr="--inputVolumes %s...") + outputLabelVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="The Input binary image for region of interest", + argstr="--outputLabelVolume %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class GenerateLabelMapFromProbabilityMapOutputSpec(TraitedSpec): + outputLabelVolume = File( + desc="The Input binary image for region of interest", exists=True) + + +class GenerateLabelMapFromProbabilityMap(SEMLikeCommandLine): + """title: Label Map from Probability Images + +category: Utilities.BRAINS + +description: Given a list of probability maps for labels, create a discrete label map where only the highest probability region is used for the labeling. + +version: 0.1 + +contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu + +""" + + input_spec = GenerateLabelMapFromProbabilityMapInputSpec + output_spec = GenerateLabelMapFromProbabilityMapOutputSpec + _cmd = " GenerateLabelMapFromProbabilityMap " + _outputs_filenames = {'outputLabelVolume': 'outputLabelVolume.nii.gz'} + _redirect_x = False + + +class BRAINSAlignMSPInputSpec(CommandLineInputSpec): + inputVolume = File( + desc=", The Image to be resampled, ", + exists=True, + argstr="--inputVolume %s") + OutputresampleMSP = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc=", The image to be output., ", + argstr="--OutputresampleMSP %s") + verbose = traits.Bool( + desc=", Show more verbose output, ", argstr="--verbose ") + resultsDir = traits.Either( + traits.Bool, + Directory(), + hash_files=False, + desc=", The directory for the results to be written., ", + argstr="--resultsDir %s") + writedebuggingImagesLevel = traits.Int( + desc= + ", This flag controls if debugging images are produced. By default value of 0 is no images. Anything greater than zero will be increasing level of debugging images., ", + argstr="--writedebuggingImagesLevel %d") + mspQualityLevel = traits.Int( + desc= + ", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds)., ", + argstr="--mspQualityLevel %d") + rescaleIntensities = traits.Bool( + desc= + ", Flag to turn on rescaling image intensities on input., ", + argstr="--rescaleIntensities ") + trimRescaledIntensities = traits.Float( + desc= + ", Turn on clipping the rescaled image one-tailed on input. Units of standard deviations above the mean. Very large values are very permissive. Non-positive value turns clipping off. Defaults to removing 0.00001 of a normal tail above the mean., ", + argstr="--trimRescaledIntensities %f") + rescaleIntensitiesOutputRange = InputMultiPath( + traits.Int, + desc= + ", This pair of integers gives the lower and upper bounds on the signal portion of the output image. Out-of-field voxels are taken from BackgroundFillValue., ", + sep=",", + argstr="--rescaleIntensitiesOutputRange %s") + BackgroundFillValue = traits.Str( + desc= + "Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", + argstr="--BackgroundFillValue %s") + interpolationMode = traits.Enum( + "NearestNeighbor", + "Linear", + "ResampleInPlace", + "BSpline", + "WindowedSinc", + "Hamming", + "Cosine", + "Welch", + "Lanczos", + "Blackman", + desc= + "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSAlignMSPOutputSpec(TraitedSpec): + OutputresampleMSP = File( + desc=", The image to be output., ", exists=True) + resultsDir = Directory( + desc=", The directory for the results to be written., ", + exists=True) + + +class BRAINSAlignMSP(SEMLikeCommandLine): + """title: Align Mid Saggital Brain (BRAINS) + +category: Utilities.BRAINS + +description: Resample an image into ACPC alignement ACPCDetect + +""" + + input_spec = BRAINSAlignMSPInputSpec + output_spec = BRAINSAlignMSPOutputSpec + _cmd = " BRAINSAlignMSP " + _outputs_filenames = { + 'OutputresampleMSP': 'OutputresampleMSP.nii', + 'resultsDir': 'resultsDir' + } + _redirect_x = False + + +class BRAINSLandmarkInitializerInputSpec(CommandLineInputSpec): + inputFixedLandmarkFilename = File( + desc="input fixed landmark. *.fcsv", + exists=True, + argstr="--inputFixedLandmarkFilename %s") + inputMovingLandmarkFilename = File( + desc="input moving landmark. *.fcsv", + exists=True, + argstr="--inputMovingLandmarkFilename %s") + inputWeightFilename = File( + desc= + "Input weight file name for landmarks. Higher weighted landmark will be considered more heavily. Weights are propotional, that is the magnitude of weights will be normalized by its minimum and maximum value. ", + exists=True, + argstr="--inputWeightFilename %s") + outputTransformFilename = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="output transform file name (ex: ./outputTransform.mat) ", + argstr="--outputTransformFilename %s") + + +class BRAINSLandmarkInitializerOutputSpec(TraitedSpec): + outputTransformFilename = File( + desc="output transform file name (ex: ./outputTransform.mat) ", + exists=True) + + +class BRAINSLandmarkInitializer(SEMLikeCommandLine): + """title: BRAINSLandmarkInitializer + +category: Utilities.BRAINS + +description: Create transformation file (*mat) from a pair of landmarks (*fcsv) files. + +version: 1.0 + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Eunyoung Regina Kim + +""" + + input_spec = BRAINSLandmarkInitializerInputSpec + output_spec = BRAINSLandmarkInitializerOutputSpec + _cmd = " BRAINSLandmarkInitializer " + _outputs_filenames = {'outputTransformFilename': 'outputTransformFilename'} + _redirect_x = False + + +class insertMidACPCpointInputSpec(CommandLineInputSpec): + inputLandmarkFile = File( + desc="Input landmark file (.fcsv)", + exists=True, + argstr="--inputLandmarkFile %s") + outputLandmarkFile = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Output landmark file (.fcsv)", + argstr="--outputLandmarkFile %s") + + +class insertMidACPCpointOutputSpec(TraitedSpec): + outputLandmarkFile = File(desc="Output landmark file (.fcsv)", exists=True) + + +class insertMidACPCpoint(SEMLikeCommandLine): + """title: MidACPC Landmark Insertion + +category: Utilities.BRAINS + +description: This program gets a landmark fcsv file and adds a new landmark as the midpoint between AC and PC points to the output landmark fcsv file + +contributor: Ali Ghayoor + +""" + + input_spec = insertMidACPCpointInputSpec + output_spec = insertMidACPCpointOutputSpec + _cmd = " insertMidACPCpoint " + _outputs_filenames = {'outputLandmarkFile': 'outputLandmarkFile'} + _redirect_x = False + + +class BRAINSSnapShotWriterInputSpec(CommandLineInputSpec): + inputVolumes = InputMultiPath( + File(exists=True), + desc= + "Input image volume list to be extracted as 2D image. Multiple input is possible. At least one input is required.", + argstr="--inputVolumes %s...") + inputBinaryVolumes = InputMultiPath( + File(exists=True), + desc= + "Input mask (binary) volume list to be extracted as 2D image. Multiple input is possible.", + argstr="--inputBinaryVolumes %s...") + inputSliceToExtractInPhysicalPoint = InputMultiPath( + traits.Float, + desc= + "2D slice number of input images. For autoWorkUp output, which AC-PC aligned, 0,0,0 will be the center.", + sep=",", + argstr="--inputSliceToExtractInPhysicalPoint %s") + inputSliceToExtractInIndex = InputMultiPath( + traits.Int, + desc= + "2D slice number of input images. For size of 256*256*256 image, 128 is usually used.", + sep=",", + argstr="--inputSliceToExtractInIndex %s") + inputSliceToExtractInPercent = InputMultiPath( + traits.Int, + desc= + "2D slice number of input images. Percentage input from 0%-100%. (ex. --inputSliceToExtractInPercent 50,50,50", + sep=",", + argstr="--inputSliceToExtractInPercent %s") + inputPlaneDirection = InputMultiPath( + traits.Int, + desc= + "Plane to display. In general, 0=saggital, 1=coronal, and 2=axial plane.", + sep=",", + argstr="--inputPlaneDirection %s") + outputFilename = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="2D file name of input images. Required.", + argstr="--outputFilename %s") + + +class BRAINSSnapShotWriterOutputSpec(TraitedSpec): + outputFilename = File( + desc="2D file name of input images. Required.", exists=True) + + +class BRAINSSnapShotWriter(SEMLikeCommandLine): + """title: BRAINSSnapShotWriter + +category: Utilities.BRAINS + +description: Create 2D snapshot of input images. Mask images are color-coded + +version: 1.0 + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Eunyoung Regina Kim + +""" + + input_spec = BRAINSSnapShotWriterInputSpec + output_spec = BRAINSSnapShotWriterOutputSpec + _cmd = " BRAINSSnapShotWriter " + _outputs_filenames = {'outputFilename': 'outputFilename'} + _redirect_x = False + + +class JointHistogramInputSpec(CommandLineInputSpec): + inputVolumeInXAxis = File( + desc="The Input image to be computed for statistics", + exists=True, + argstr="--inputVolumeInXAxis %s") + inputVolumeInYAxis = File( + desc="The Input image to be computed for statistics", + exists=True, + argstr="--inputVolumeInYAxis %s") + inputMaskVolumeInXAxis = File( + desc= + "Input mask volume for inputVolumeInXAxis. Histogram will be computed just for the masked region", + exists=True, + argstr="--inputMaskVolumeInXAxis %s") + inputMaskVolumeInYAxis = File( + desc= + "Input mask volume for inputVolumeInYAxis. Histogram will be computed just for the masked region", + exists=True, + argstr="--inputMaskVolumeInYAxis %s") + outputJointHistogramImage = traits.Str( + desc= + " output joint histogram image file name. Histogram is usually 2D image. ", + argstr="--outputJointHistogramImage %s") + verbose = traits.Bool( + desc=" print debugging information, ", argstr="--verbose ") + + +class JointHistogramOutputSpec(TraitedSpec): + pass + + +class JointHistogram(SEMLikeCommandLine): + """title: Write Out Image Intensities + +category: Utilities.BRAINS + +description: For Analysis + +version: 0.1 + +contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu + +""" + + input_spec = JointHistogramInputSpec + output_spec = JointHistogramOutputSpec + _cmd = " JointHistogram " + _outputs_filenames = {} + _redirect_x = False + + +class ShuffleVectorsModuleInputSpec(CommandLineInputSpec): + inputVectorFileBaseName = File( + desc= + "input vector file name prefix. Usually end with .txt and header file has prost fix of .txt.hdr", + exists=True, + argstr="--inputVectorFileBaseName %s") + outputVectorFileBaseName = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "output vector file name prefix. Usually end with .txt and header file has prost fix of .txt.hdr", + argstr="--outputVectorFileBaseName %s") + resampleProportion = traits.Float( + desc= + "downsample size of 1 will be the same size as the input images, downsample size of 3 will throw 2/3 the vectors away.", + argstr="--resampleProportion %f") + + +class ShuffleVectorsModuleOutputSpec(TraitedSpec): + outputVectorFileBaseName = File( + desc= + "output vector file name prefix. Usually end with .txt and header file has prost fix of .txt.hdr", + exists=True) + + +class ShuffleVectorsModule(SEMLikeCommandLine): + """title: ShuffleVectors + +category: Utilities.BRAINS + +description: Automatic Segmentation using neural networks + +version: 1.0 + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Hans Johnson + +""" + + input_spec = ShuffleVectorsModuleInputSpec + output_spec = ShuffleVectorsModuleOutputSpec + _cmd = " ShuffleVectorsModule " + _outputs_filenames = { + 'outputVectorFileBaseName': 'outputVectorFileBaseName' + } + _redirect_x = False + + +class ImageRegionPlotterInputSpec(CommandLineInputSpec): + inputVolume1 = File( + desc="The Input image to be computed for statistics", + exists=True, + argstr="--inputVolume1 %s") + inputVolume2 = File( + desc="The Input image to be computed for statistics", + exists=True, + argstr="--inputVolume2 %s") + inputBinaryROIVolume = File( + desc="The Input binary image for region of interest", + exists=True, + argstr="--inputBinaryROIVolume %s") + inputLabelVolume = File( + desc="The Label Image", exists=True, argstr="--inputLabelVolume %s") + numberOfHistogramBins = traits.Int( + desc=" the number of histogram levels", + argstr="--numberOfHistogramBins %d") + outputJointHistogramData = traits.Str( + desc=" output data file name", argstr="--outputJointHistogramData %s") + useROIAUTO = traits.Bool( + desc= + " Use ROIAUTO to compute region of interest. This cannot be used with inputLabelVolume", + argstr="--useROIAUTO ") + useIntensityForHistogram = traits.Bool( + desc= + " Create Intensity Joint Histogram instead of Quantile Joint Histogram", + argstr="--useIntensityForHistogram ") + verbose = traits.Bool( + desc=" print debugging information, ", argstr="--verbose ") + + +class ImageRegionPlotterOutputSpec(TraitedSpec): + pass + + +class ImageRegionPlotter(SEMLikeCommandLine): + """title: Write Out Image Intensities + +category: Utilities.BRAINS + +description: For Analysis + +version: 0.1 + +contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu + +""" + + input_spec = ImageRegionPlotterInputSpec + output_spec = ImageRegionPlotterOutputSpec + _cmd = " ImageRegionPlotter " + _outputs_filenames = {} + _redirect_x = False + + +class fcsv_to_hdf5InputSpec(CommandLineInputSpec): + versionID = traits.Str( + desc= + ", Current version ID. It should be match with the version of BCD that will be using the output model file, ", + argstr="--versionID %s") + landmarksInformationFile = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc=", name of HDF5 file to write matrices into, ", + argstr="--landmarksInformationFile %s") + landmarkTypesList = File( + desc=", file containing list of landmark types, ", + exists=True, + argstr="--landmarkTypesList %s") + modelFile = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + ", name of HDF5 file containing BRAINSConstellationDetector Model file (LLSMatrices, LLSMeans and LLSSearchRadii), ", + argstr="--modelFile %s") + landmarkGlobPattern = traits.Str( + desc="Glob pattern to select fcsv files", + argstr="--landmarkGlobPattern %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class fcsv_to_hdf5OutputSpec(TraitedSpec): + landmarksInformationFile = File( + desc=", name of HDF5 file to write matrices into, ", + exists=True) + modelFile = File( + desc= + ", name of HDF5 file containing BRAINSConstellationDetector Model file (LLSMatrices, LLSMeans and LLSSearchRadii), ", + exists=True) + + +class fcsv_to_hdf5(SEMLikeCommandLine): + """title: fcsv_to_hdf5 (BRAINS) + +category: Utilities.BRAINS + +description: Convert a collection of fcsv files to a HDF5 format file + +""" + + input_spec = fcsv_to_hdf5InputSpec + output_spec = fcsv_to_hdf5OutputSpec + _cmd = " fcsv_to_hdf5 " + _outputs_filenames = { + 'modelFile': 'modelFile', + 'landmarksInformationFile': 'landmarksInformationFile.h5' + } + _redirect_x = False + + +class FindCenterOfBrainInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="The image in which to find the center.", + exists=True, + argstr="--inputVolume %s") + imageMask = File(exists=True, argstr="--imageMask %s") + clippedImageMask = traits.Either( + traits.Bool, File(), hash_files=False, argstr="--clippedImageMask %s") + maximize = traits.Bool(argstr="--maximize ") + axis = traits.Int(argstr="--axis %d") + otsuPercentileThreshold = traits.Float( + argstr="--otsuPercentileThreshold %f") + closingSize = traits.Int(argstr="--closingSize %d") + headSizeLimit = traits.Float(argstr="--headSizeLimit %f") + headSizeEstimate = traits.Float(argstr="--headSizeEstimate %f") + backgroundValue = traits.Int(argstr="--backgroundValue %d") + generateDebugImages = traits.Bool(argstr="--generateDebugImages ") + debugDistanceImage = traits.Either( + traits.Bool, + File(), + hash_files=False, + argstr="--debugDistanceImage %s") + debugGridImage = traits.Either( + traits.Bool, File(), hash_files=False, argstr="--debugGridImage %s") + debugAfterGridComputationsForegroundImage = traits.Either( + traits.Bool, + File(), + hash_files=False, + argstr="--debugAfterGridComputationsForegroundImage %s") + debugClippedImageMask = traits.Either( + traits.Bool, + File(), + hash_files=False, + argstr="--debugClippedImageMask %s") + debugTrimmedImage = traits.Either( + traits.Bool, File(), hash_files=False, argstr="--debugTrimmedImage %s") + + +class FindCenterOfBrainOutputSpec(TraitedSpec): + clippedImageMask = File(exists=True) + debugDistanceImage = File(exists=True) + debugGridImage = File(exists=True) + debugAfterGridComputationsForegroundImage = File(exists=True) + debugClippedImageMask = File(exists=True) + debugTrimmedImage = File(exists=True) + + +class FindCenterOfBrain(SEMLikeCommandLine): + """title: Center Of Brain (BRAINS) + +category: Utilities.BRAINS + +description: Finds the center point of a brain + +version: 3.0.0 + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://wwww.psychiatry.uiowa.edu + +acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering + +""" + + input_spec = FindCenterOfBrainInputSpec + output_spec = FindCenterOfBrainOutputSpec + _cmd = " FindCenterOfBrain " + _outputs_filenames = { + 'debugClippedImageMask': + 'debugClippedImageMask.nii', + 'debugTrimmedImage': + 'debugTrimmedImage.nii', + 'debugDistanceImage': + 'debugDistanceImage.nii', + 'debugGridImage': + 'debugGridImage.nii', + 'clippedImageMask': + 'clippedImageMask.nii', + 'debugAfterGridComputationsForegroundImage': + 'debugAfterGridComputationsForegroundImage.nii' + } + _redirect_x = False diff --git a/nipype/interfaces/semtools/utilities/tests/__init__.py b/nipype/interfaces/semtools/utilities/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py new file mode 100644 index 0000000000..27b65a4eba --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import BRAINSAlignMSP + + +def test_BRAINSAlignMSP_inputs(): + input_map = dict( + BackgroundFillValue=dict(argstr='--BackgroundFillValue %s', ), + OutputresampleMSP=dict( + argstr='--OutputresampleMSP %s', + hash_files=False, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + interpolationMode=dict(argstr='--interpolationMode %s', ), + mspQualityLevel=dict(argstr='--mspQualityLevel %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + rescaleIntensities=dict(argstr='--rescaleIntensities ', ), + rescaleIntensitiesOutputRange=dict( + argstr='--rescaleIntensitiesOutputRange %s', + sep=',', + ), + resultsDir=dict( + argstr='--resultsDir %s', + hash_files=False, + ), + trimRescaledIntensities=dict(argstr='--trimRescaledIntensities %f', ), + verbose=dict(argstr='--verbose ', ), + writedebuggingImagesLevel=dict( + argstr='--writedebuggingImagesLevel %d', ), + ) + inputs = BRAINSAlignMSP.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSAlignMSP_outputs(): + output_map = dict( + OutputresampleMSP=dict(), + resultsDir=dict(), + ) + outputs = BRAINSAlignMSP.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py new file mode 100644 index 0000000000..7a0528f201 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSClipInferior.py @@ -0,0 +1,33 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import BRAINSClipInferior + + +def test_BRAINSClipInferior_inputs(): + input_map = dict( + BackgroundFillValue=dict(argstr='--BackgroundFillValue %s', ), + acLowerBound=dict(argstr='--acLowerBound %f', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = BRAINSClipInferior.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSClipInferior_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = BRAINSClipInferior.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py new file mode 100644 index 0000000000..a124ad60d9 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSConstellationModeler.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import BRAINSConstellationModeler + + +def test_BRAINSConstellationModeler_inputs(): + input_map = dict( + BackgroundFillValue=dict(argstr='--BackgroundFillValue %s', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTrainingList=dict(argstr='--inputTrainingList %s', ), + mspQualityLevel=dict(argstr='--mspQualityLevel %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + optimizedLandmarksFilenameExtender=dict( + argstr='--optimizedLandmarksFilenameExtender %s', ), + outputModel=dict( + argstr='--outputModel %s', + hash_files=False, + ), + rescaleIntensities=dict(argstr='--rescaleIntensities ', ), + rescaleIntensitiesOutputRange=dict( + argstr='--rescaleIntensitiesOutputRange %s', + sep=',', + ), + resultsDir=dict( + argstr='--resultsDir %s', + hash_files=False, + ), + saveOptimizedLandmarks=dict(argstr='--saveOptimizedLandmarks ', ), + trimRescaledIntensities=dict(argstr='--trimRescaledIntensities %f', ), + verbose=dict(argstr='--verbose ', ), + writedebuggingImagesLevel=dict( + argstr='--writedebuggingImagesLevel %d', ), + ) + inputs = BRAINSConstellationModeler.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSConstellationModeler_outputs(): + output_map = dict( + outputModel=dict(), + resultsDir=dict(), + ) + outputs = BRAINSConstellationModeler.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py new file mode 100644 index 0000000000..9df3c8b8d6 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSEyeDetector.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import BRAINSEyeDetector + + +def test_BRAINSEyeDetector_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + debugDir=dict(argstr='--debugDir %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = BRAINSEyeDetector.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSEyeDetector_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = BRAINSEyeDetector.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py new file mode 100644 index 0000000000..bc3807f9f8 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSInitializedControlPoints.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import BRAINSInitializedControlPoints + + +def test_BRAINSInitializedControlPoints_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputLandmarksFile=dict(argstr='--outputLandmarksFile %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + permuteOrder=dict( + argstr='--permuteOrder %s', + sep=',', + ), + splineGridSize=dict( + argstr='--splineGridSize %s', + sep=',', + ), + ) + inputs = BRAINSInitializedControlPoints.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSInitializedControlPoints_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = BRAINSInitializedControlPoints.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py new file mode 100644 index 0000000000..2456b8fb4d --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLandmarkInitializer.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import BRAINSLandmarkInitializer + + +def test_BRAINSLandmarkInitializer_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFixedLandmarkFilename=dict( + argstr='--inputFixedLandmarkFilename %s', ), + inputMovingLandmarkFilename=dict( + argstr='--inputMovingLandmarkFilename %s', ), + inputWeightFilename=dict(argstr='--inputWeightFilename %s', ), + outputTransformFilename=dict( + argstr='--outputTransformFilename %s', + hash_files=False, + ), + ) + inputs = BRAINSLandmarkInitializer.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSLandmarkInitializer_outputs(): + output_map = dict(outputTransformFilename=dict(), ) + outputs = BRAINSLandmarkInitializer.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py new file mode 100644 index 0000000000..b6b83baa12 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLinearModelerEPCA.py @@ -0,0 +1,27 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import BRAINSLinearModelerEPCA + + +def test_BRAINSLinearModelerEPCA_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTrainingList=dict(argstr='--inputTrainingList %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + ) + inputs = BRAINSLinearModelerEPCA.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSLinearModelerEPCA_outputs(): + output_map = dict() + outputs = BRAINSLinearModelerEPCA.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py new file mode 100644 index 0000000000..262d948029 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSLmkTransform.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import BRAINSLmkTransform + + +def test_BRAINSLmkTransform_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFixedLandmarks=dict(argstr='--inputFixedLandmarks %s', ), + inputMovingLandmarks=dict(argstr='--inputMovingLandmarks %s', ), + inputMovingVolume=dict(argstr='--inputMovingVolume %s', ), + inputReferenceVolume=dict(argstr='--inputReferenceVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputAffineTransform=dict( + argstr='--outputAffineTransform %s', + hash_files=False, + ), + outputResampledVolume=dict( + argstr='--outputResampledVolume %s', + hash_files=False, + ), + ) + inputs = BRAINSLmkTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSLmkTransform_outputs(): + output_map = dict( + outputAffineTransform=dict(), + outputResampledVolume=dict(), + ) + outputs = BRAINSLmkTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py new file mode 100644 index 0000000000..9627258bcf --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSMush.py @@ -0,0 +1,63 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import BRAINSMush + + +def test_BRAINSMush_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + boundingBoxSize=dict( + argstr='--boundingBoxSize %s', + sep=',', + ), + boundingBoxStart=dict( + argstr='--boundingBoxStart %s', + sep=',', + ), + desiredMean=dict(argstr='--desiredMean %f', ), + desiredVariance=dict(argstr='--desiredVariance %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFirstVolume=dict(argstr='--inputFirstVolume %s', ), + inputMaskVolume=dict(argstr='--inputMaskVolume %s', ), + inputSecondVolume=dict(argstr='--inputSecondVolume %s', ), + lowerThresholdFactor=dict(argstr='--lowerThresholdFactor %f', ), + lowerThresholdFactorPre=dict(argstr='--lowerThresholdFactorPre %f', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputMask=dict( + argstr='--outputMask %s', + hash_files=False, + ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + outputWeightsFile=dict( + argstr='--outputWeightsFile %s', + hash_files=False, + ), + seed=dict( + argstr='--seed %s', + sep=',', + ), + upperThresholdFactor=dict(argstr='--upperThresholdFactor %f', ), + upperThresholdFactorPre=dict(argstr='--upperThresholdFactorPre %f', ), + ) + inputs = BRAINSMush.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSMush_outputs(): + output_map = dict( + outputMask=dict(), + outputVolume=dict(), + outputWeightsFile=dict(), + ) + outputs = BRAINSMush.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py new file mode 100644 index 0000000000..1b803956e7 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSSnapShotWriter.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import BRAINSSnapShotWriter + + +def test_BRAINSSnapShotWriter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryVolumes=dict(argstr='--inputBinaryVolumes %s...', ), + inputPlaneDirection=dict( + argstr='--inputPlaneDirection %s', + sep=',', + ), + inputSliceToExtractInIndex=dict( + argstr='--inputSliceToExtractInIndex %s', + sep=',', + ), + inputSliceToExtractInPercent=dict( + argstr='--inputSliceToExtractInPercent %s', + sep=',', + ), + inputSliceToExtractInPhysicalPoint=dict( + argstr='--inputSliceToExtractInPhysicalPoint %s', + sep=',', + ), + inputVolumes=dict(argstr='--inputVolumes %s...', ), + outputFilename=dict( + argstr='--outputFilename %s', + hash_files=False, + ), + ) + inputs = BRAINSSnapShotWriter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSSnapShotWriter_outputs(): + output_map = dict(outputFilename=dict(), ) + outputs = BRAINSSnapShotWriter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py new file mode 100644 index 0000000000..c6d10736d7 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTransformConvert.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import BRAINSTransformConvert + + +def test_BRAINSTransformConvert_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + displacementVolume=dict( + argstr='--displacementVolume %s', + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTransform=dict(argstr='--inputTransform %s', ), + outputPrecisionType=dict(argstr='--outputPrecisionType %s', ), + outputTransform=dict( + argstr='--outputTransform %s', + hash_files=False, + ), + outputTransformType=dict(argstr='--outputTransformType %s', ), + referenceVolume=dict(argstr='--referenceVolume %s', ), + ) + inputs = BRAINSTransformConvert.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSTransformConvert_outputs(): + output_map = dict( + displacementVolume=dict(), + outputTransform=dict(), + ) + outputs = BRAINSTransformConvert.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py new file mode 100644 index 0000000000..d2f3b74140 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSTrimForegroundInDirection.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import BRAINSTrimForegroundInDirection + + +def test_BRAINSTrimForegroundInDirection_inputs(): + input_map = dict( + BackgroundFillValue=dict(argstr='--BackgroundFillValue %s', ), + args=dict(argstr='%s', ), + closingSize=dict(argstr='--closingSize %d', ), + directionCode=dict(argstr='--directionCode %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + headSizeLimit=dict(argstr='--headSizeLimit %f', ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + ) + inputs = BRAINSTrimForegroundInDirection.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSTrimForegroundInDirection_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = BRAINSTrimForegroundInDirection.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py new file mode 100644 index 0000000000..936437886a --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_CleanUpOverlapLabels.py @@ -0,0 +1,30 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import CleanUpOverlapLabels + + +def test_CleanUpOverlapLabels_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryVolumes=dict(argstr='--inputBinaryVolumes %s...', ), + outputBinaryVolumes=dict( + argstr='--outputBinaryVolumes %s...', + hash_files=False, + ), + ) + inputs = CleanUpOverlapLabels.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CleanUpOverlapLabels_outputs(): + output_map = dict(outputBinaryVolumes=dict(), ) + outputs = CleanUpOverlapLabels.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py new file mode 100644 index 0000000000..46924bf180 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_FindCenterOfBrain.py @@ -0,0 +1,66 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import FindCenterOfBrain + + +def test_FindCenterOfBrain_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + axis=dict(argstr='--axis %d', ), + backgroundValue=dict(argstr='--backgroundValue %d', ), + clippedImageMask=dict( + argstr='--clippedImageMask %s', + hash_files=False, + ), + closingSize=dict(argstr='--closingSize %d', ), + debugAfterGridComputationsForegroundImage=dict( + argstr='--debugAfterGridComputationsForegroundImage %s', + hash_files=False, + ), + debugClippedImageMask=dict( + argstr='--debugClippedImageMask %s', + hash_files=False, + ), + debugDistanceImage=dict( + argstr='--debugDistanceImage %s', + hash_files=False, + ), + debugGridImage=dict( + argstr='--debugGridImage %s', + hash_files=False, + ), + debugTrimmedImage=dict( + argstr='--debugTrimmedImage %s', + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + generateDebugImages=dict(argstr='--generateDebugImages ', ), + headSizeEstimate=dict(argstr='--headSizeEstimate %f', ), + headSizeLimit=dict(argstr='--headSizeLimit %f', ), + imageMask=dict(argstr='--imageMask %s', ), + inputVolume=dict(argstr='--inputVolume %s', ), + maximize=dict(argstr='--maximize ', ), + otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), + ) + inputs = FindCenterOfBrain.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FindCenterOfBrain_outputs(): + output_map = dict( + clippedImageMask=dict(), + debugAfterGridComputationsForegroundImage=dict(), + debugClippedImageMask=dict(), + debugDistanceImage=dict(), + debugGridImage=dict(), + debugTrimmedImage=dict(), + ) + outputs = FindCenterOfBrain.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py new file mode 100644 index 0000000000..9087d6667b --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_GenerateLabelMapFromProbabilityMap.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import GenerateLabelMapFromProbabilityMap + + +def test_GenerateLabelMapFromProbabilityMap_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolumes=dict(argstr='--inputVolumes %s...', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputLabelVolume=dict( + argstr='--outputLabelVolume %s', + hash_files=False, + ), + ) + inputs = GenerateLabelMapFromProbabilityMap.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GenerateLabelMapFromProbabilityMap_outputs(): + output_map = dict(outputLabelVolume=dict(), ) + outputs = GenerateLabelMapFromProbabilityMap.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py new file mode 100644 index 0000000000..f5372a240e --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ImageRegionPlotter.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import ImageRegionPlotter + + +def test_ImageRegionPlotter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputBinaryROIVolume=dict(argstr='--inputBinaryROIVolume %s', ), + inputLabelVolume=dict(argstr='--inputLabelVolume %s', ), + inputVolume1=dict(argstr='--inputVolume1 %s', ), + inputVolume2=dict(argstr='--inputVolume2 %s', ), + numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), + outputJointHistogramData=dict( + argstr='--outputJointHistogramData %s', ), + useIntensityForHistogram=dict(argstr='--useIntensityForHistogram ', ), + useROIAUTO=dict(argstr='--useROIAUTO ', ), + verbose=dict(argstr='--verbose ', ), + ) + inputs = ImageRegionPlotter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ImageRegionPlotter_outputs(): + output_map = dict() + outputs = ImageRegionPlotter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py new file mode 100644 index 0000000000..daf6d5634f --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_JointHistogram.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import JointHistogram + + +def test_JointHistogram_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMaskVolumeInXAxis=dict(argstr='--inputMaskVolumeInXAxis %s', ), + inputMaskVolumeInYAxis=dict(argstr='--inputMaskVolumeInYAxis %s', ), + inputVolumeInXAxis=dict(argstr='--inputVolumeInXAxis %s', ), + inputVolumeInYAxis=dict(argstr='--inputVolumeInYAxis %s', ), + outputJointHistogramImage=dict( + argstr='--outputJointHistogramImage %s', ), + verbose=dict(argstr='--verbose ', ), + ) + inputs = JointHistogram.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JointHistogram_outputs(): + output_map = dict() + outputs = JointHistogram.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py new file mode 100644 index 0000000000..7bea38e2f1 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_ShuffleVectorsModule.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import ShuffleVectorsModule + + +def test_ShuffleVectorsModule_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVectorFileBaseName=dict(argstr='--inputVectorFileBaseName %s', ), + outputVectorFileBaseName=dict( + argstr='--outputVectorFileBaseName %s', + hash_files=False, + ), + resampleProportion=dict(argstr='--resampleProportion %f', ), + ) + inputs = ShuffleVectorsModule.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ShuffleVectorsModule_outputs(): + output_map = dict(outputVectorFileBaseName=dict(), ) + outputs = ShuffleVectorsModule.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py new file mode 100644 index 0000000000..32e6ce65cd --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_fcsv_to_hdf5.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import fcsv_to_hdf5 + + +def test_fcsv_to_hdf5_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + landmarkGlobPattern=dict(argstr='--landmarkGlobPattern %s', ), + landmarkTypesList=dict(argstr='--landmarkTypesList %s', ), + landmarksInformationFile=dict( + argstr='--landmarksInformationFile %s', + hash_files=False, + ), + modelFile=dict( + argstr='--modelFile %s', + hash_files=False, + ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + versionID=dict(argstr='--versionID %s', ), + ) + inputs = fcsv_to_hdf5.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_fcsv_to_hdf5_outputs(): + output_map = dict( + landmarksInformationFile=dict(), + modelFile=dict(), + ) + outputs = fcsv_to_hdf5.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py new file mode 100644 index 0000000000..196b09b304 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py @@ -0,0 +1,30 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import insertMidACPCpoint + + +def test_insertMidACPCpoint_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputLandmarkFile=dict(argstr='--inputLandmarkFile %s', ), + outputLandmarkFile=dict( + argstr='--outputLandmarkFile %s', + hash_files=False, + ), + ) + inputs = insertMidACPCpoint.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_insertMidACPCpoint_outputs(): + output_map = dict(outputLandmarkFile=dict(), ) + outputs = insertMidACPCpoint.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py new file mode 100644 index 0000000000..151a2c7b3a --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationAligner.py @@ -0,0 +1,30 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import landmarksConstellationAligner + + +def test_landmarksConstellationAligner_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputLandmarksPaired=dict(argstr='--inputLandmarksPaired %s', ), + outputLandmarksPaired=dict( + argstr='--outputLandmarksPaired %s', + hash_files=False, + ), + ) + inputs = landmarksConstellationAligner.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_landmarksConstellationAligner_outputs(): + output_map = dict(outputLandmarksPaired=dict(), ) + outputs = landmarksConstellationAligner.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py new file mode 100644 index 0000000000..0bcd747d36 --- /dev/null +++ b/nipype/interfaces/semtools/utilities/tests/test_auto_landmarksConstellationWeights.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brains import landmarksConstellationWeights + + +def test_landmarksConstellationWeights_inputs(): + input_map = dict( + LLSModel=dict(argstr='--LLSModel %s', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTemplateModel=dict(argstr='--inputTemplateModel %s', ), + inputTrainingList=dict(argstr='--inputTrainingList %s', ), + outputWeightsList=dict( + argstr='--outputWeightsList %s', + hash_files=False, + ), + ) + inputs = landmarksConstellationWeights.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_landmarksConstellationWeights_outputs(): + output_map = dict(outputWeightsList=dict(), ) + outputs = landmarksConstellationWeights.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/__init__.py b/nipype/interfaces/slicer/__init__.py new file mode 100644 index 0000000000..c7ffc42259 --- /dev/null +++ b/nipype/interfaces/slicer/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .diffusion import * +from .segmentation import * +from .filtering import * +from .utilities import EMSegmentTransformToNewFormat +from .surface import (MergeModels, ModelToLabelMap, GrayscaleModelMaker, + ProbeVolumeWithModel, LabelMapSmoothing, ModelMaker) +from .quantification import * +from .legacy import * +from .registration import * +from .converters import DicomToNrrdConverter, OrientScalarVolume diff --git a/nipype/interfaces/slicer/base.py b/nipype/interfaces/slicer/base.py new file mode 100644 index 0000000000..aae54ec00b --- /dev/null +++ b/nipype/interfaces/slicer/base.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +from ..base import SEMLikeCommandLine + + +class SlicerCommandLine(SEMLikeCommandLine): + pass diff --git a/nipype/interfaces/slicer/converters.py b/nipype/interfaces/slicer/converters.py new file mode 100644 index 0000000000..e93b994110 --- /dev/null +++ b/nipype/interfaces/slicer/converters.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class DicomToNrrdConverterInputSpec(CommandLineInputSpec): + inputDicomDirectory = Directory( + desc="Directory holding Dicom series", + exists=True, + argstr="--inputDicomDirectory %s") + outputDirectory = traits.Either( + traits.Bool, + Directory(), + hash_files=False, + desc="Directory holding the output NRRD format", + argstr="--outputDirectory %s") + outputVolume = traits.Str( + desc="Output filename (.nhdr or .nrrd)", argstr="--outputVolume %s") + smallGradientThreshold = traits.Float( + desc= + "If a gradient magnitude is greater than 0 and less than smallGradientThreshold, then DicomToNrrdConverter will display an error message and quit, unless the useBMatrixGradientDirections option is set.", + argstr="--smallGradientThreshold %f") + writeProtocolGradientsFile = traits.Bool( + desc= + "Write the protocol gradients to a file suffixed by \'.txt\' as they were specified in the procol by multiplying each diffusion gradient direction by the measurement frame. This file is for debugging purposes only, the format is not fixed, and will likely change as debugging of new dicom formats is necessary.", + argstr="--writeProtocolGradientsFile ") + useIdentityMeaseurementFrame = traits.Bool( + desc= + "Adjust all the gradients so that the measurement frame is an identity matrix.", + argstr="--useIdentityMeaseurementFrame ") + useBMatrixGradientDirections = traits.Bool( + desc= + "Fill the nhdr header with the gradient directions and bvalues computed out of the BMatrix. Only changes behavior for Siemens data.", + argstr="--useBMatrixGradientDirections ") + + +class DicomToNrrdConverterOutputSpec(TraitedSpec): + outputDirectory = Directory( + desc="Directory holding the output NRRD format", exists=True) + + +class DicomToNrrdConverter(SEMLikeCommandLine): + """title: DICOM to NRRD Converter + +category: Converters + +description: Converts diffusion weighted MR images in dicom series into Nrrd format for analysis in Slicer. This program has been tested on only a limited subset of DTI dicom formats available from Siemens, GE, and Phillips scanners. Work in progress to support dicom multi-frame data. The program parses dicom header to extract necessary information about measurement frame, diffusion weighting directions, b-values, etc, and write out a nrrd image. For non-diffusion weighted dicom images, it loads in an entire dicom series and writes out a single dicom volume in a .nhdr/.raw pair. + +version: 0.2.0.$Revision: 916 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DicomToNrrdConverter + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Xiaodong Tao (GE), Vince Magnotta (UIowa), Hans Johnson (UIowa) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for DTI data produced on Philips scanners was contributed by Vincent Magnotta and Hans Johnson at the University of Iowa. + +""" + + input_spec = DicomToNrrdConverterInputSpec + output_spec = DicomToNrrdConverterOutputSpec + _cmd = "DicomToNrrdConverter " + _outputs_filenames = {'outputDirectory': 'outputDirectory'} + + +class OrientScalarVolumeInputSpec(CommandLineInputSpec): + inputVolume1 = File( + position=-2, desc="Input volume 1", exists=True, argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="The oriented volume", + argstr="%s") + orientation = traits.Enum( + "Axial", + "Coronal", + "Sagittal", + "RIP", + "LIP", + "RSP", + "LSP", + "RIA", + "LIA", + "RSA", + "LSA", + "IRP", + "ILP", + "SRP", + "SLP", + "IRA", + "ILA", + "SRA", + "SLA", + "RPI", + "LPI", + "RAI", + "LAI", + "RPS", + "LPS", + "RAS", + "LAS", + "PRI", + "PLI", + "ARI", + "ALI", + "PRS", + "PLS", + "ARS", + "ALS", + "IPR", + "SPR", + "IAR", + "SAR", + "IPL", + "SPL", + "IAL", + "SAL", + "PIR", + "PSR", + "AIR", + "ASR", + "PIL", + "PSL", + "AIL", + "ASL", + desc="Orientation choices", + argstr="--orientation %s") + + +class OrientScalarVolumeOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="The oriented volume", exists=True) + + +class OrientScalarVolume(SEMLikeCommandLine): + """title: Orient Scalar Volume + +category: Converters + +description: Orients an output volume. Rearranges the slices in a volume according to the selected orientation. The slices are not interpolated. They are just reordered and/or permuted. The resulting volume will cover the original volume. NOTE: since Slicer takes into account the orientation of a volume, the re-oriented volume will not show any difference from the original volume, To see the difference, save the volume and display it with a system that either ignores the orientation of the image (e.g. Paraview) or displays individual images. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OrientImage + +contributor: Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = OrientScalarVolumeInputSpec + output_spec = OrientScalarVolumeOutputSpec + _cmd = "OrientScalarVolume " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} diff --git a/nipype/interfaces/slicer/diffusion/__init__.py b/nipype/interfaces/slicer/diffusion/__init__.py new file mode 100644 index 0000000000..fd7231cefe --- /dev/null +++ b/nipype/interfaces/slicer/diffusion/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .diffusion import ( + ResampleDTIVolume, DWIRicianLMMSEFilter, TractographyLabelMapSeeding, + DWIJointRicianLMMSEFilter, DiffusionWeightedVolumeMasking, DTIimport, + DWIToDTIEstimation, DiffusionTensorScalarMeasurements, DTIexport) diff --git a/nipype/interfaces/slicer/diffusion/diffusion.py b/nipype/interfaces/slicer/diffusion/diffusion.py new file mode 100644 index 0000000000..a088d25f8a --- /dev/null +++ b/nipype/interfaces/slicer/diffusion/diffusion.py @@ -0,0 +1,654 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class ResampleDTIVolumeInputSpec(CommandLineInputSpec): + inputVolume = File( + position=-2, + desc="Input volume to be resampled", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Resampled Volume", + argstr="%s") + Reference = File( + desc="Reference Volume (spacing,size,orientation,origin)", + exists=True, + argstr="--Reference %s") + transformationFile = File(exists=True, argstr="--transformationFile %s") + defField = File( + desc= + "File containing the deformation field (3D vector image containing vectors with 3 components)", + exists=True, + argstr="--defField %s") + hfieldtype = traits.Enum( + "displacement", + "h-Field", + desc="Set if the deformation field is an -Field", + argstr="--hfieldtype %s") + interpolation = traits.Enum( + "linear", + "nn", + "ws", + "bs", + desc= + "Sampling algorithm (linear , nn (nearest neighborhoor), ws (WindowedSinc), bs (BSpline) )", + argstr="--interpolation %s") + correction = traits.Enum( + "zero", + "none", + "abs", + "nearest", + desc= + "Correct the tensors if computed tensor is not semi-definite positive", + argstr="--correction %s") + transform_tensor_method = traits.Enum( + "PPD", + "FS", + desc= + "Chooses between 2 methods to transform the tensors: Finite Strain (FS), faster but less accurate, or Preservation of the Principal Direction (PPD)", + argstr="--transform_tensor_method %s") + transform_order = traits.Enum( + "input-to-output", + "output-to-input", + desc="Select in what order the transforms are read", + argstr="--transform_order %s") + notbulk = traits.Bool( + desc= + "The transform following the BSpline transform is not set as a bulk transform for the BSpline transform", + argstr="--notbulk ") + spaceChange = traits.Bool( + desc= + "Space Orientation between transform and image is different (RAS/LPS) (warning: if the transform is a Transform Node in Slicer3, do not select)", + argstr="--spaceChange ") + rotation_point = traits.List( + desc="Center of rotation (only for rigid and affine transforms)", + argstr="--rotation_point %s") + centered_transform = traits.Bool( + desc= + "Set the center of the transformation to the center of the input image (only for rigid and affine transforms)", + argstr="--centered_transform ") + image_center = traits.Enum( + "input", + "output", + desc= + "Image to use to center the transform (used only if \'Centered Transform\' is selected)", + argstr="--image_center %s") + Inverse_ITK_Transformation = traits.Bool( + desc= + "Inverse the transformation before applying it from output image to input image (only for rigid and affine transforms)", + argstr="--Inverse_ITK_Transformation ") + spacing = InputMultiPath( + traits.Float, + desc="Spacing along each dimension (0 means use input spacing)", + sep=",", + argstr="--spacing %s") + size = InputMultiPath( + traits.Float, + desc="Size along each dimension (0 means use input size)", + sep=",", + argstr="--size %s") + origin = traits.List( + desc="Origin of the output Image", argstr="--origin %s") + direction_matrix = InputMultiPath( + traits.Float, + desc= + "9 parameters of the direction matrix by rows (ijk to LPS if LPS transform, ijk to RAS if RAS transform)", + sep=",", + argstr="--direction_matrix %s") + number_of_thread = traits.Int( + desc="Number of thread used to compute the output image", + argstr="--number_of_thread %d") + default_pixel_value = traits.Float( + desc= + "Default pixel value for samples falling outside of the input region", + argstr="--default_pixel_value %f") + window_function = traits.Enum( + "h", + "c", + "w", + "l", + "b", + desc= + "Window Function , h = Hamming , c = Cosine , w = Welch , l = Lanczos , b = Blackman", + argstr="--window_function %s") + spline_order = traits.Int( + desc="Spline Order (Spline order may be from 0 to 5)", + argstr="--spline_order %d") + transform_matrix = InputMultiPath( + traits.Float, + desc= + "12 parameters of the transform matrix by rows ( --last 3 being translation-- )", + sep=",", + argstr="--transform_matrix %s") + transform = traits.Enum( + "rt", + "a", + desc="Transform algorithm, rt = Rigid Transform, a = Affine Transform", + argstr="--transform %s") + + +class ResampleDTIVolumeOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Resampled Volume", exists=True) + + +class ResampleDTIVolume(SEMLikeCommandLine): + """title: Resample DTI Volume + +category: Diffusion.Diffusion Tensor Images + +description: Resampling an image is a very important task in image analysis. It is especially important in the frame of image registration. This module implements DT image resampling through the use of itk Transforms. The resampling is controlled by the Output Spacing. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. + +version: 0.1 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleDTI + +contributor: Francois Budin (UNC) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Information on the National Centers for Biomedical Computing can be obtained from http://nihroadmap.nih.gov/bioinformatics + +""" + + input_spec = ResampleDTIVolumeInputSpec + output_spec = ResampleDTIVolumeOutputSpec + _cmd = "ResampleDTIVolume " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + + +class DWIRicianLMMSEFilterInputSpec(CommandLineInputSpec): + iter = traits.Int( + desc="Number of iterations for the noise removal filter.", + argstr="--iter %d") + re = InputMultiPath( + traits.Int, desc="Estimation radius.", sep=",", argstr="--re %s") + rf = InputMultiPath( + traits.Int, desc="Filtering radius.", sep=",", argstr="--rf %s") + mnvf = traits.Int( + desc="Minimum number of voxels in kernel used for filtering.", + argstr="--mnvf %d") + mnve = traits.Int( + desc="Minimum number of voxels in kernel used for estimation.", + argstr="--mnve %d") + minnstd = traits.Int( + desc="Minimum allowed noise standard deviation.", + argstr="--minnstd %d") + maxnstd = traits.Int( + desc="Maximum allowed noise standard deviation.", + argstr="--maxnstd %d") + hrf = traits.Float( + desc="How many histogram bins per unit interval.", argstr="--hrf %f") + uav = traits.Bool( + desc="Use absolute value in case of negative square.", argstr="--uav ") + inputVolume = File( + position=-2, desc="Input DWI volume.", exists=True, argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output DWI volume.", + argstr="%s") + compressOutput = traits.Bool( + desc="Compress the data of the compressed file using gzip", + argstr="--compressOutput ") + + +class DWIRicianLMMSEFilterOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output DWI volume.", exists=True) + + +class DWIRicianLMMSEFilter(SEMLikeCommandLine): + """title: DWI Rician LMMSE Filter + +category: Diffusion.Diffusion Weighted Images + +description: This module reduces noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the image in the mean squared error sense using a Rician noise model. Images corresponding to each gradient direction, including baseline, are processed individually. The noise parameter is automatically estimated (noise estimation improved but slower). +Note that this is a general purpose filter for MRi images. The module jointLMMSE has been specifically designed for DWI volumes and shows a better performance, so its use is recommended instead. +A complete description of the algorithm in this module can be found in: +S. Aja-Fernandez, M. Niethammer, M. Kubicki, M. Shenton, and C.-F. Westin. Restoration of DWI data using a Rician LMMSE estimator. IEEE Transactions on Medical Imaging, 27(10): pp. 1389-1403, Oct. 2008. + +version: 0.1.1.$Revision: 1 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RicianLMMSEImageFilter + +contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa), Marc Niethammer (UNC) + +acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). + +""" + + input_spec = DWIRicianLMMSEFilterInputSpec + output_spec = DWIRicianLMMSEFilterOutputSpec + _cmd = "DWIRicianLMMSEFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + + +class TractographyLabelMapSeedingInputSpec(CommandLineInputSpec): + InputVolume = File( + position=-2, desc="Input DTI volume", exists=True, argstr="%s") + inputroi = File( + desc="Label map with seeding ROIs", + exists=True, + argstr="--inputroi %s") + OutputFibers = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Tractography result", + argstr="%s") + useindexspace = traits.Bool( + desc="Seed at IJK voxel grid", argstr="--useindexspace ") + seedspacing = traits.Float( + desc= + "Spacing (in mm) between seed points, only matters if use Use Index Space is off", + argstr="--seedspacing %f") + randomgrid = traits.Bool( + desc="Enable random placing of seeds", argstr="--randomgrid ") + clthreshold = traits.Float( + desc="Minimum Linear Measure for the seeding to start.", + argstr="--clthreshold %f") + minimumlength = traits.Float( + desc="Minimum length of the fibers (in mm)", + argstr="--minimumlength %f") + maximumlength = traits.Float( + desc="Maximum length of fibers (in mm)", argstr="--maximumlength %f") + stoppingmode = traits.Enum( + "LinearMeasure", + "FractionalAnisotropy", + desc="Tensor measurement used to stop the tractography", + argstr="--stoppingmode %s") + stoppingvalue = traits.Float( + desc= + "Tractography will stop when the stopping measurement drops below this value", + argstr="--stoppingvalue %f") + stoppingcurvature = traits.Float( + desc= + "Tractography will stop if radius of curvature becomes smaller than this number units are degrees per mm", + argstr="--stoppingcurvature %f") + integrationsteplength = traits.Float( + desc="Distance between points on the same fiber in mm", + argstr="--integrationsteplength %f") + label = traits.Int( + desc="Label value that defines seeding region.", argstr="--label %d") + writetofile = traits.Bool( + desc="Write fibers to disk or create in the scene?", + argstr="--writetofile ") + outputdirectory = traits.Either( + traits.Bool, + Directory(), + hash_files=False, + desc="Directory in which to save fiber(s)", + argstr="--outputdirectory %s") + name = traits.Str(desc="Name to use for fiber files", argstr="--name %s") + + +class TractographyLabelMapSeedingOutputSpec(TraitedSpec): + OutputFibers = File(position=-1, desc="Tractography result", exists=True) + outputdirectory = Directory( + desc="Directory in which to save fiber(s)", exists=True) + + +class TractographyLabelMapSeeding(SEMLikeCommandLine): + """title: Tractography Label Map Seeding + +category: Diffusion.Diffusion Tensor Images + +description: Seed tracts on a Diffusion Tensor Image (DT) from a label map + +version: 0.1.0.$Revision: 1892 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Seeding + +license: slicer3 + +contributor: Raul San Jose (SPL, BWH), Demian Wassermann (SPL, BWH) + +acknowledgements: Laboratory of Mathematics in Imaging. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = TractographyLabelMapSeedingInputSpec + output_spec = TractographyLabelMapSeedingOutputSpec + _cmd = "TractographyLabelMapSeeding " + _outputs_filenames = { + 'OutputFibers': 'OutputFibers.vtk', + 'outputdirectory': 'outputdirectory' + } + + +class DWIJointRicianLMMSEFilterInputSpec(CommandLineInputSpec): + re = InputMultiPath( + traits.Int, desc="Estimation radius.", sep=",", argstr="--re %s") + rf = InputMultiPath( + traits.Int, desc="Filtering radius.", sep=",", argstr="--rf %s") + ng = traits.Int( + desc= + "The number of the closest gradients that are used to jointly filter a given gradient direction (0 to use all).", + argstr="--ng %d") + inputVolume = File( + position=-2, desc="Input DWI volume.", exists=True, argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output DWI volume.", + argstr="%s") + compressOutput = traits.Bool( + desc="Compress the data of the compressed file using gzip", + argstr="--compressOutput ") + + +class DWIJointRicianLMMSEFilterOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output DWI volume.", exists=True) + + +class DWIJointRicianLMMSEFilter(SEMLikeCommandLine): + """title: DWI Joint Rician LMMSE Filter + +category: Diffusion.Diffusion Weighted Images + +description: This module reduces Rician noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the image in the mean squared error sense using a Rician noise model. The N closest gradient directions to the direction being processed are filtered together to improve the results: the noise-free signal is seen as an n-diemensional vector which has to be estimated with the LMMSE method from a set of corrupted measurements. To that end, the covariance matrix of the noise-free vector and the cross covariance between this signal and the noise have to be estimated, which is done taking into account the image formation process. +The noise parameter is automatically estimated from a rough segmentation of the background of the image. In this area the signal is simply 0, so that Rician statistics reduce to Rayleigh and the noise power can be easily estimated from the mode of the histogram. +A complete description of the algorithm may be found in: +Antonio Tristan-Vega and Santiago Aja-Fernandez, DWI filtering using joint information for DTI and HARDI, Medical Image Analysis, Volume 14, Issue 2, Pages 205-218. 2010. + +version: 0.1.1.$Revision: 1 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/JointRicianLMMSEImageFilter + +contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa) + +acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). + +""" + + input_spec = DWIJointRicianLMMSEFilterInputSpec + output_spec = DWIJointRicianLMMSEFilterOutputSpec + _cmd = "DWIJointRicianLMMSEFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + + +class DiffusionWeightedVolumeMaskingInputSpec(CommandLineInputSpec): + inputVolume = File( + position=-4, desc="Input DWI volume", exists=True, argstr="%s") + outputBaseline = traits.Either( + traits.Bool, + File(), + position=-2, + hash_files=False, + desc="Estimated baseline volume", + argstr="%s") + thresholdMask = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Otsu Threshold Mask", + argstr="%s") + otsuomegathreshold = traits.Float( + desc= + "Control the sharpness of the threshold in the Otsu computation. 0: lower threshold, 1: higher threhold", + argstr="--otsuomegathreshold %f") + removeislands = traits.Bool( + desc="Remove Islands in Threshold Mask?", argstr="--removeislands ") + + +class DiffusionWeightedVolumeMaskingOutputSpec(TraitedSpec): + outputBaseline = File( + position=-2, desc="Estimated baseline volume", exists=True) + thresholdMask = File(position=-1, desc="Otsu Threshold Mask", exists=True) + + +class DiffusionWeightedVolumeMasking(SEMLikeCommandLine): + """title: Diffusion Weighted Volume Masking + +category: Diffusion.Diffusion Weighted Images + +description:

Performs a mask calculation from a diffusion weighted (DW) image.

Starting from a dw image, this module computes the baseline image averaging all the images without diffusion weighting and then applies the otsu segmentation algorithm in order to produce a mask. this mask can then be used when estimating the diffusion tensor (dt) image, not to estimate tensors all over the volume.

+ +version: 0.1.0.$Revision: 1892 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionWeightedMasking + +license: slicer3 + +contributor: Demian Wassermann (SPL, BWH) + +""" + + input_spec = DiffusionWeightedVolumeMaskingInputSpec + output_spec = DiffusionWeightedVolumeMaskingOutputSpec + _cmd = "DiffusionWeightedVolumeMasking " + _outputs_filenames = { + 'outputBaseline': 'outputBaseline.nii', + 'thresholdMask': 'thresholdMask.nii' + } + + +class DTIimportInputSpec(CommandLineInputSpec): + inputFile = File( + position=-2, desc="Input DTI file", exists=True, argstr="%s") + outputTensor = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output DTI volume", + argstr="%s") + testingmode = traits.Bool( + desc= + "Enable testing mode. Sample helix file (helix-DTI.nhdr) will be loaded into Slicer and converted in Nifti.", + argstr="--testingmode ") + + +class DTIimportOutputSpec(TraitedSpec): + outputTensor = File(position=-1, desc="Output DTI volume", exists=True) + + +class DTIimport(SEMLikeCommandLine): + """title: DTIimport + +category: Diffusion.Diffusion Data Conversion + +description: Import tensor datasets from various formats, including the NifTi file format + +version: 1.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DTIImport + +contributor: Sonia Pujol (SPL, BWH) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NA-MIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = DTIimportInputSpec + output_spec = DTIimportOutputSpec + _cmd = "DTIimport " + _outputs_filenames = {'outputTensor': 'outputTensor.nii'} + + +class DWIToDTIEstimationInputSpec(CommandLineInputSpec): + inputVolume = File( + position=-3, desc="Input DWI volume", exists=True, argstr="%s") + mask = File( + desc="Mask where the tensors will be computed", + exists=True, + argstr="--mask %s") + outputTensor = traits.Either( + traits.Bool, + File(), + position=-2, + hash_files=False, + desc="Estimated DTI volume", + argstr="%s") + outputBaseline = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Estimated baseline volume", + argstr="%s") + enumeration = traits.Enum( + "LS", + "WLS", + desc="LS: Least Squares, WLS: Weighted Least Squares", + argstr="--enumeration %s") + shiftNeg = traits.Bool( + desc= + "Shift eigenvalues so all are positive (accounts for bad tensors related to noise or acquisition error)", + argstr="--shiftNeg ") + + +class DWIToDTIEstimationOutputSpec(TraitedSpec): + outputTensor = File(position=-2, desc="Estimated DTI volume", exists=True) + outputBaseline = File( + position=-1, desc="Estimated baseline volume", exists=True) + + +class DWIToDTIEstimation(SEMLikeCommandLine): + """title: DWI to DTI Estimation + +category: Diffusion.Diffusion Weighted Images + +description: Performs a tensor model estimation from diffusion weighted images. + +There are three estimation methods available: least squares, weigthed least squares and non-linear estimation. The first method is the traditional method for tensor estimation and the fastest one. Weighted least squares takes into account the noise characteristics of the MRI images to weight the DWI samples used in the estimation based on its intensity magnitude. The last method is the more complex. + +version: 0.1.0.$Revision: 1892 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionTensorEstimation + +license: slicer3 + +contributor: Raul San Jose (SPL, BWH) + +acknowledgements: This command module is based on the estimation functionality provided by the Teem library. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = DWIToDTIEstimationInputSpec + output_spec = DWIToDTIEstimationOutputSpec + _cmd = "DWIToDTIEstimation " + _outputs_filenames = { + 'outputTensor': 'outputTensor.nii', + 'outputBaseline': 'outputBaseline.nii' + } + + +class DiffusionTensorScalarMeasurementsInputSpec(CommandLineInputSpec): + inputVolume = File( + position=-3, desc="Input DTI volume", exists=True, argstr="%s") + outputScalar = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Scalar volume derived from tensor", + argstr="%s") + enumeration = traits.Enum( + "Trace", + "Determinant", + "RelativeAnisotropy", + "FractionalAnisotropy", + "Mode", + "LinearMeasure", + "PlanarMeasure", + "SphericalMeasure", + "MinEigenvalue", + "MidEigenvalue", + "MaxEigenvalue", + "MaxEigenvalueProjectionX", + "MaxEigenvalueProjectionY", + "MaxEigenvalueProjectionZ", + "RAIMaxEigenvecX", + "RAIMaxEigenvecY", + "RAIMaxEigenvecZ", + "MaxEigenvecX", + "MaxEigenvecY", + "MaxEigenvecZ", + "D11", + "D22", + "D33", + "ParallelDiffusivity", + "PerpendicularDffusivity", + desc="An enumeration of strings", + argstr="--enumeration %s") + + +class DiffusionTensorScalarMeasurementsOutputSpec(TraitedSpec): + outputScalar = File( + position=-1, desc="Scalar volume derived from tensor", exists=True) + + +class DiffusionTensorScalarMeasurements(SEMLikeCommandLine): + """title: Diffusion Tensor Scalar Measurements + +category: Diffusion.Diffusion Tensor Images + +description: Compute a set of different scalar measurements from a tensor field, specially oriented for Diffusion Tensors where some rotationally invariant measurements, like Fractional Anisotropy, are highly used to describe the anistropic behaviour of the tensor. + +version: 0.1.0.$Revision: 1892 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DiffusionTensorMathematics + +contributor: Raul San Jose (SPL, BWH) + +acknowledgements: LMI + +""" + + input_spec = DiffusionTensorScalarMeasurementsInputSpec + output_spec = DiffusionTensorScalarMeasurementsOutputSpec + _cmd = "DiffusionTensorScalarMeasurements " + _outputs_filenames = {'outputScalar': 'outputScalar.nii'} + + +class DTIexportInputSpec(CommandLineInputSpec): + inputTensor = File( + position=-2, desc="Input DTI volume", exists=True, argstr="%s") + outputFile = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output DTI file", + argstr="%s") + + +class DTIexportOutputSpec(TraitedSpec): + outputFile = File(position=-1, desc="Output DTI file", exists=True) + + +class DTIexport(SEMLikeCommandLine): + """title: DTIexport + +category: Diffusion.Diffusion Data Conversion + +description: Export DTI data to various file formats + +version: 1.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/DTIExport + +contributor: Sonia Pujol (SPL, BWH) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NA-MIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = DTIexportInputSpec + output_spec = DTIexportOutputSpec + _cmd = "DTIexport " + _outputs_filenames = {'outputFile': 'outputFile'} diff --git a/nipype/interfaces/slicer/diffusion/tests/__init__.py b/nipype/interfaces/slicer/diffusion/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/slicer/diffusion/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py new file mode 100644 index 0000000000..c8d5df2ba2 --- /dev/null +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIexport.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import DTIexport + + +def test_DTIexport_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputTensor=dict( + argstr='%s', + position=-2, + ), + outputFile=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = DTIexport.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DTIexport_outputs(): + output_map = dict(outputFile=dict(position=-1, ), ) + outputs = DTIexport.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py new file mode 100644 index 0000000000..ec1d66bc0b --- /dev/null +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DTIimport.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import DTIimport + + +def test_DTIimport_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputFile=dict( + argstr='%s', + position=-2, + ), + outputTensor=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + testingmode=dict(argstr='--testingmode ', ), + ) + inputs = DTIimport.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DTIimport_outputs(): + output_map = dict(outputTensor=dict(position=-1, ), ) + outputs = DTIimport.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py new file mode 100644 index 0000000000..a2caa2f633 --- /dev/null +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIJointRicianLMMSEFilter.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import DWIJointRicianLMMSEFilter + + +def test_DWIJointRicianLMMSEFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + compressOutput=dict(argstr='--compressOutput ', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + ng=dict(argstr='--ng %d', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + re=dict( + argstr='--re %s', + sep=',', + ), + rf=dict( + argstr='--rf %s', + sep=',', + ), + ) + inputs = DWIJointRicianLMMSEFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DWIJointRicianLMMSEFilter_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = DWIJointRicianLMMSEFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py new file mode 100644 index 0000000000..7eff851562 --- /dev/null +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIRicianLMMSEFilter.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import DWIRicianLMMSEFilter + + +def test_DWIRicianLMMSEFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + compressOutput=dict(argstr='--compressOutput ', ), + environ=dict( + nohash=True, + usedefault=True, + ), + hrf=dict(argstr='--hrf %f', ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + iter=dict(argstr='--iter %d', ), + maxnstd=dict(argstr='--maxnstd %d', ), + minnstd=dict(argstr='--minnstd %d', ), + mnve=dict(argstr='--mnve %d', ), + mnvf=dict(argstr='--mnvf %d', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + re=dict( + argstr='--re %s', + sep=',', + ), + rf=dict( + argstr='--rf %s', + sep=',', + ), + uav=dict(argstr='--uav ', ), + ) + inputs = DWIRicianLMMSEFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DWIRicianLMMSEFilter_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = DWIRicianLMMSEFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py new file mode 100644 index 0000000000..217f91edf0 --- /dev/null +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DWIToDTIEstimation.py @@ -0,0 +1,45 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import DWIToDTIEstimation + + +def test_DWIToDTIEstimation_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + enumeration=dict(argstr='--enumeration %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-3, + ), + mask=dict(argstr='--mask %s', ), + outputBaseline=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + outputTensor=dict( + argstr='%s', + hash_files=False, + position=-2, + ), + shiftNeg=dict(argstr='--shiftNeg ', ), + ) + inputs = DWIToDTIEstimation.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DWIToDTIEstimation_outputs(): + output_map = dict( + outputBaseline=dict(position=-1, ), + outputTensor=dict(position=-2, ), + ) + outputs = DWIToDTIEstimation.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py new file mode 100644 index 0000000000..b75989c349 --- /dev/null +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionTensorScalarMeasurements.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import DiffusionTensorScalarMeasurements + + +def test_DiffusionTensorScalarMeasurements_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + enumeration=dict(argstr='--enumeration %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-3, + ), + outputScalar=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = DiffusionTensorScalarMeasurements.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DiffusionTensorScalarMeasurements_outputs(): + output_map = dict(outputScalar=dict(position=-1, ), ) + outputs = DiffusionTensorScalarMeasurements.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py new file mode 100644 index 0000000000..5e8c39a4ac --- /dev/null +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_DiffusionWeightedVolumeMasking.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import DiffusionWeightedVolumeMasking + + +def test_DiffusionWeightedVolumeMasking_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-4, + ), + otsuomegathreshold=dict(argstr='--otsuomegathreshold %f', ), + outputBaseline=dict( + argstr='%s', + hash_files=False, + position=-2, + ), + removeislands=dict(argstr='--removeislands ', ), + thresholdMask=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = DiffusionWeightedVolumeMasking.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DiffusionWeightedVolumeMasking_outputs(): + output_map = dict( + outputBaseline=dict(position=-2, ), + thresholdMask=dict(position=-1, ), + ) + outputs = DiffusionWeightedVolumeMasking.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py new file mode 100644 index 0000000000..30860d9da0 --- /dev/null +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_ResampleDTIVolume.py @@ -0,0 +1,71 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import ResampleDTIVolume + + +def test_ResampleDTIVolume_inputs(): + input_map = dict( + Inverse_ITK_Transformation=dict( + argstr='--Inverse_ITK_Transformation ', ), + Reference=dict(argstr='--Reference %s', ), + args=dict(argstr='%s', ), + centered_transform=dict(argstr='--centered_transform ', ), + correction=dict(argstr='--correction %s', ), + defField=dict(argstr='--defField %s', ), + default_pixel_value=dict(argstr='--default_pixel_value %f', ), + direction_matrix=dict( + argstr='--direction_matrix %s', + sep=',', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hfieldtype=dict(argstr='--hfieldtype %s', ), + image_center=dict(argstr='--image_center %s', ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + interpolation=dict(argstr='--interpolation %s', ), + notbulk=dict(argstr='--notbulk ', ), + number_of_thread=dict(argstr='--number_of_thread %d', ), + origin=dict(argstr='--origin %s', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + rotation_point=dict(argstr='--rotation_point %s', ), + size=dict( + argstr='--size %s', + sep=',', + ), + spaceChange=dict(argstr='--spaceChange ', ), + spacing=dict( + argstr='--spacing %s', + sep=',', + ), + spline_order=dict(argstr='--spline_order %d', ), + transform=dict(argstr='--transform %s', ), + transform_matrix=dict( + argstr='--transform_matrix %s', + sep=',', + ), + transform_order=dict(argstr='--transform_order %s', ), + transform_tensor_method=dict(argstr='--transform_tensor_method %s', ), + transformationFile=dict(argstr='--transformationFile %s', ), + window_function=dict(argstr='--window_function %s', ), + ) + inputs = ResampleDTIVolume.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ResampleDTIVolume_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = ResampleDTIVolume.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py new file mode 100644 index 0000000000..31fb7d5c0d --- /dev/null +++ b/nipype/interfaces/slicer/diffusion/tests/test_auto_TractographyLabelMapSeeding.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..diffusion import TractographyLabelMapSeeding + + +def test_TractographyLabelMapSeeding_inputs(): + input_map = dict( + InputVolume=dict( + argstr='%s', + position=-2, + ), + OutputFibers=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + args=dict(argstr='%s', ), + clthreshold=dict(argstr='--clthreshold %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputroi=dict(argstr='--inputroi %s', ), + integrationsteplength=dict(argstr='--integrationsteplength %f', ), + label=dict(argstr='--label %d', ), + maximumlength=dict(argstr='--maximumlength %f', ), + minimumlength=dict(argstr='--minimumlength %f', ), + name=dict(argstr='--name %s', ), + outputdirectory=dict( + argstr='--outputdirectory %s', + hash_files=False, + ), + randomgrid=dict(argstr='--randomgrid ', ), + seedspacing=dict(argstr='--seedspacing %f', ), + stoppingcurvature=dict(argstr='--stoppingcurvature %f', ), + stoppingmode=dict(argstr='--stoppingmode %s', ), + stoppingvalue=dict(argstr='--stoppingvalue %f', ), + useindexspace=dict(argstr='--useindexspace ', ), + writetofile=dict(argstr='--writetofile ', ), + ) + inputs = TractographyLabelMapSeeding.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TractographyLabelMapSeeding_outputs(): + output_map = dict( + OutputFibers=dict(position=-1, ), + outputdirectory=dict(), + ) + outputs = TractographyLabelMapSeeding.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/__init__.py b/nipype/interfaces/slicer/filtering/__init__.py new file mode 100644 index 0000000000..30ace1d2f2 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/__init__.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .morphology import (GrayscaleGrindPeakImageFilter, + GrayscaleFillHoleImageFilter) +from .denoising import (GradientAnisotropicDiffusion, + CurvatureAnisotropicDiffusion, GaussianBlurImageFilter, + MedianImageFilter) +from .arithmetic import (MultiplyScalarVolumes, MaskScalarVolume, + SubtractScalarVolumes, AddScalarVolumes, + CastScalarVolume) +from .extractskeleton import ExtractSkeleton +from .histogrammatching import HistogramMatching +from .thresholdscalarvolume import ThresholdScalarVolume +from .n4itkbiasfieldcorrection import N4ITKBiasFieldCorrection +from .checkerboardfilter import CheckerBoardFilter +from .imagelabelcombine import ImageLabelCombine +from .votingbinaryholefillingimagefilter import VotingBinaryHoleFillingImageFilter +from .resamplescalarvectordwivolume import ResampleScalarVectorDWIVolume diff --git a/nipype/interfaces/slicer/filtering/arithmetic.py b/nipype/interfaces/slicer/filtering/arithmetic.py new file mode 100644 index 0000000000..22785e32e1 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/arithmetic.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class MultiplyScalarVolumesInputSpec(CommandLineInputSpec): + inputVolume1 = File( + position=-3, desc="Input volume 1", exists=True, argstr="%s") + inputVolume2 = File( + position=-2, desc="Input volume 2", exists=True, argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Volume1 * Volume2", + argstr="%s") + order = traits.Enum( + "0", + "1", + "2", + "3", + desc= + "Interpolation order if two images are in different coordinate frames or have different sampling.", + argstr="--order %s") + + +class MultiplyScalarVolumesOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Volume1 * Volume2", exists=True) + + +class MultiplyScalarVolumes(SEMLikeCommandLine): + """title: Multiply Scalar Volumes + +category: Filtering.Arithmetic + +description: Multiplies two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. + +version: 0.1.0.$Revision: 8595 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Multiply + +contributor: Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = MultiplyScalarVolumesInputSpec + output_spec = MultiplyScalarVolumesOutputSpec + _cmd = "MultiplyScalarVolumes " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + + +class MaskScalarVolumeInputSpec(CommandLineInputSpec): + InputVolume = File( + position=-3, + desc="Input volume to be masked", + exists=True, + argstr="%s") + MaskVolume = File( + position=-2, + desc="Label volume containing the mask", + exists=True, + argstr="%s") + OutputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc= + "Output volume: Input Volume masked by label value from Mask Volume", + argstr="%s") + label = traits.Int( + desc="Label value in the Mask Volume to use as the mask", + argstr="--label %d") + replace = traits.Int( + desc="Value to use for the output volume outside of the mask", + argstr="--replace %d") + + +class MaskScalarVolumeOutputSpec(TraitedSpec): + OutputVolume = File( + position=-1, + desc= + "Output volume: Input Volume masked by label value from Mask Volume", + exists=True) + + +class MaskScalarVolume(SEMLikeCommandLine): + """title: Mask Scalar Volume + +category: Filtering.Arithmetic + +description: Masks two images. The output image is set to 0 everywhere except where the chosen label from the mask volume is present, at which point it will retain it's original values. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. + +version: 0.1.0.$Revision: 8595 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Mask + +contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = MaskScalarVolumeInputSpec + output_spec = MaskScalarVolumeOutputSpec + _cmd = "MaskScalarVolume " + _outputs_filenames = {'OutputVolume': 'OutputVolume.nii'} + + +class SubtractScalarVolumesInputSpec(CommandLineInputSpec): + inputVolume1 = File( + position=-3, desc="Input volume 1", exists=True, argstr="%s") + inputVolume2 = File( + position=-2, desc="Input volume 2", exists=True, argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Volume1 - Volume2", + argstr="%s") + order = traits.Enum( + "0", + "1", + "2", + "3", + desc= + "Interpolation order if two images are in different coordinate frames or have different sampling.", + argstr="--order %s") + + +class SubtractScalarVolumesOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Volume1 - Volume2", exists=True) + + +class SubtractScalarVolumes(SEMLikeCommandLine): + """title: Subtract Scalar Volumes + +category: Filtering.Arithmetic + +description: Subtracts two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Subtract + +contributor: Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = SubtractScalarVolumesInputSpec + output_spec = SubtractScalarVolumesOutputSpec + _cmd = "SubtractScalarVolumes " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + + +class AddScalarVolumesInputSpec(CommandLineInputSpec): + inputVolume1 = File( + position=-3, desc="Input volume 1", exists=True, argstr="%s") + inputVolume2 = File( + position=-2, desc="Input volume 2", exists=True, argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Volume1 + Volume2", + argstr="%s") + order = traits.Enum( + "0", + "1", + "2", + "3", + desc= + "Interpolation order if two images are in different coordinate frames or have different sampling.", + argstr="--order %s") + + +class AddScalarVolumesOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Volume1 + Volume2", exists=True) + + +class AddScalarVolumes(SEMLikeCommandLine): + """title: Add Scalar Volumes + +category: Filtering.Arithmetic + +description: Adds two images. Although all image types are supported on input, only signed types are produced. The two images do not have to have the same dimensions. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Add + +contributor: Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = AddScalarVolumesInputSpec + output_spec = AddScalarVolumesOutputSpec + _cmd = "AddScalarVolumes " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + + +class CastScalarVolumeInputSpec(CommandLineInputSpec): + InputVolume = File( + position=-2, + desc="Input volume, the volume to cast.", + exists=True, + argstr="%s") + OutputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output volume, cast to the new type.", + argstr="%s") + type = traits.Enum( + "Char", + "UnsignedChar", + "Short", + "UnsignedShort", + "Int", + "UnsignedInt", + "Float", + "Double", + desc="Type for the new output volume.", + argstr="--type %s") + + +class CastScalarVolumeOutputSpec(TraitedSpec): + OutputVolume = File( + position=-1, desc="Output volume, cast to the new type.", exists=True) + + +class CastScalarVolume(SEMLikeCommandLine): + """title: Cast Scalar Volume + +category: Filtering.Arithmetic + +description: Cast a volume to a given data type. +Use at your own risk when casting an input volume into a lower precision type! +Allows casting to the same type as the input volume. + +version: 0.1.0.$Revision: 2104 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Cast + +contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = CastScalarVolumeInputSpec + output_spec = CastScalarVolumeOutputSpec + _cmd = "CastScalarVolume " + _outputs_filenames = {'OutputVolume': 'OutputVolume.nii'} diff --git a/nipype/interfaces/slicer/filtering/checkerboardfilter.py b/nipype/interfaces/slicer/filtering/checkerboardfilter.py new file mode 100644 index 0000000000..e4ad85dc5e --- /dev/null +++ b/nipype/interfaces/slicer/filtering/checkerboardfilter.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class CheckerBoardFilterInputSpec(CommandLineInputSpec): + checkerPattern = InputMultiPath( + traits.Int, + desc= + "The pattern of input 1 and input 2 in the output image. The user can specify the number of checkers in each dimension. A checkerPattern of 2,2,1 means that images will alternate in every other checker in the first two dimensions. The same pattern will be used in the 3rd dimension.", + sep=",", + argstr="--checkerPattern %s") + inputVolume1 = File( + position=-3, desc="First Input volume", exists=True, argstr="%s") + inputVolume2 = File( + position=-2, desc="Second Input volume", exists=True, argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output filtered", + argstr="%s") + + +class CheckerBoardFilterOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output filtered", exists=True) + + +class CheckerBoardFilter(SEMLikeCommandLine): + """title: CheckerBoard Filter + +category: Filtering + +description: Create a checkerboard volume of two volumes. The output volume will show the two inputs alternating according to the user supplied checkerPattern. This filter is often used to compare the results of image registration. Note that the second input is resampled to the same origin, spacing and direction before it is composed with the first input. The scalar type of the output volume will be the same as the input image scalar type. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/CheckerBoard + +contributor: Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = CheckerBoardFilterInputSpec + output_spec = CheckerBoardFilterOutputSpec + _cmd = "CheckerBoardFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} diff --git a/nipype/interfaces/slicer/filtering/denoising.py b/nipype/interfaces/slicer/filtering/denoising.py new file mode 100644 index 0000000000..0dbaaebf74 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/denoising.py @@ -0,0 +1,210 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class GradientAnisotropicDiffusionInputSpec(CommandLineInputSpec): + conductance = traits.Float( + desc= + "Conductance controls the sensitivity of the conductance term. As a general rule, the lower the value, the more strongly the filter preserves edges. A high value will cause diffusion (smoothing) across edges. Note that the number of iterations controls how much smoothing is done within regions bounded by edges.", + argstr="--conductance %f") + iterations = traits.Int( + desc= + "The more iterations, the more smoothing. Each iteration takes the same amount of time. If it takes 10 seconds for one iteration, then it will take 100 seconds for 10 iterations. Note that the conductance controls how much each iteration smooths across edges.", + argstr="--iterations %d") + timeStep = traits.Float( + desc= + "The time step depends on the dimensionality of the image. In Slicer the images are 3D and the default (.0625) time step will provide a stable solution.", + argstr="--timeStep %f") + inputVolume = File( + position=-2, + desc="Input volume to be filtered", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output filtered", + argstr="%s") + + +class GradientAnisotropicDiffusionOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output filtered", exists=True) + + +class GradientAnisotropicDiffusion(SEMLikeCommandLine): + """title: Gradient Anisotropic Diffusion + +category: Filtering.Denoising + +description: Runs gradient anisotropic diffusion on a volume. + +Anisotropic diffusion methods reduce noise (or unwanted detail) in images while preserving specific image features, like edges. For many applications, there is an assumption that light-dark transitions (edges) are interesting. Standard isotropic diffusion methods move and blur light-dark boundaries. Anisotropic diffusion methods are formulated to specifically preserve edges. The conductance term for this implementation is a function of the gradient magnitude of the image at each point, reducing the strength of diffusion at edges. The numerical implementation of this equation is similar to that described in the Perona-Malik paper, but uses a more robust technique for gradient magnitude estimation and has been generalized to N-dimensions. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GradientAnisotropicDiffusion + +contributor: Bill Lorensen (GE) + +acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium + +""" + + input_spec = GradientAnisotropicDiffusionInputSpec + output_spec = GradientAnisotropicDiffusionOutputSpec + _cmd = "GradientAnisotropicDiffusion " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + + +class CurvatureAnisotropicDiffusionInputSpec(CommandLineInputSpec): + conductance = traits.Float( + desc= + "Conductance controls the sensitivity of the conductance term. As a general rule, the lower the value, the more strongly the filter preserves edges. A high value will cause diffusion (smoothing) across edges. Note that the number of iterations controls how much smoothing is done within regions bounded by edges.", + argstr="--conductance %f") + iterations = traits.Int( + desc= + "The more iterations, the more smoothing. Each iteration takes the same amount of time. If it takes 10 seconds for one iteration, then it will take 100 seconds for 10 iterations. Note that the conductance controls how much each iteration smooths across edges.", + argstr="--iterations %d") + timeStep = traits.Float( + desc= + "The time step depends on the dimensionality of the image. In Slicer the images are 3D and the default (.0625) time step will provide a stable solution.", + argstr="--timeStep %f") + inputVolume = File( + position=-2, + desc="Input volume to be filtered", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output filtered", + argstr="%s") + + +class CurvatureAnisotropicDiffusionOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output filtered", exists=True) + + +class CurvatureAnisotropicDiffusion(SEMLikeCommandLine): + """title: Curvature Anisotropic Diffusion + +category: Filtering.Denoising + +description: Performs anisotropic diffusion on an image using a modified curvature diffusion equation (MCDE). + +MCDE does not exhibit the edge enhancing properties of classic anisotropic diffusion, which can under certain conditions undergo a 'negative' diffusion, which enhances the contrast of edges. Equations of the form of MCDE always undergo positive diffusion, with the conductance term only varying the strength of that diffusion. + + Qualitatively, MCDE compares well with other non-linear diffusion techniques. It is less sensitive to contrast than classic Perona-Malik style diffusion, and preserves finer detailed structures in images. There is a potential speed trade-off for using this function in place of Gradient Anisotropic Diffusion. Each iteration of the solution takes roughly twice as long. Fewer iterations, however, may be required to reach an acceptable solution. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/CurvatureAnisotropicDiffusion + +contributor: Bill Lorensen (GE) + +acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium + +""" + + input_spec = CurvatureAnisotropicDiffusionInputSpec + output_spec = CurvatureAnisotropicDiffusionOutputSpec + _cmd = "CurvatureAnisotropicDiffusion " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + + +class GaussianBlurImageFilterInputSpec(CommandLineInputSpec): + sigma = traits.Float( + desc="Sigma value in physical units (e.g., mm) of the Gaussian kernel", + argstr="--sigma %f") + inputVolume = File( + position=-2, desc="Input volume", exists=True, argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Blurred Volume", + argstr="%s") + + +class GaussianBlurImageFilterOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Blurred Volume", exists=True) + + +class GaussianBlurImageFilter(SEMLikeCommandLine): + """title: Gaussian Blur Image Filter + +category: Filtering.Denoising + +description: Apply a gaussian blurr to an image + +version: 0.1.0.$Revision: 1.1 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GaussianBlurImageFilter + +contributor: Julien Jomier (Kitware), Stephen Aylward (Kitware) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = GaussianBlurImageFilterInputSpec + output_spec = GaussianBlurImageFilterOutputSpec + _cmd = "GaussianBlurImageFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + + +class MedianImageFilterInputSpec(CommandLineInputSpec): + neighborhood = InputMultiPath( + traits.Int, + desc="The size of the neighborhood in each dimension", + sep=",", + argstr="--neighborhood %s") + inputVolume = File( + position=-2, + desc="Input volume to be filtered", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output filtered", + argstr="%s") + + +class MedianImageFilterOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output filtered", exists=True) + + +class MedianImageFilter(SEMLikeCommandLine): + """title: Median Image Filter + +category: Filtering.Denoising + +description: The MedianImageFilter is commonly used as a robust approach for noise reduction. This filter is particularly efficient against "salt-and-pepper" noise. In other words, it is robust to the presence of gray-level outliers. MedianImageFilter computes the value of each output pixel as the statistical median of the neighborhood of values around the corresponding input pixel. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MedianImageFilter + +contributor: Bill Lorensen (GE) + +acknowledgements: This command module was derived from Insight/Examples/Filtering/MedianImageFilter (copyright) Insight Software Consortium + +""" + + input_spec = MedianImageFilterInputSpec + output_spec = MedianImageFilterOutputSpec + _cmd = "MedianImageFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} diff --git a/nipype/interfaces/slicer/filtering/extractskeleton.py b/nipype/interfaces/slicer/filtering/extractskeleton.py new file mode 100644 index 0000000000..d7770c8f2e --- /dev/null +++ b/nipype/interfaces/slicer/filtering/extractskeleton.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class ExtractSkeletonInputSpec(CommandLineInputSpec): + InputImageFileName = File( + position=-2, desc="Input image", exists=True, argstr="%s") + OutputImageFileName = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Skeleton of the input image", + argstr="%s") + type = traits.Enum( + "1D", "2D", desc="Type of skeleton to create", argstr="--type %s") + dontPrune = traits.Bool( + desc="Return the full skeleton, not just the maximal skeleton", + argstr="--dontPrune ") + numPoints = traits.Int( + desc="Number of points used to represent the skeleton", + argstr="--numPoints %d") + pointsFile = traits.Str( + desc= + "Name of the file to store the coordinates of the central (1D) skeleton points", + argstr="--pointsFile %s") + + +class ExtractSkeletonOutputSpec(TraitedSpec): + OutputImageFileName = File( + position=-1, desc="Skeleton of the input image", exists=True) + + +class ExtractSkeleton(SEMLikeCommandLine): + """title: Extract Skeleton + +category: Filtering + +description: Extract the skeleton of a binary object. The skeleton can be limited to being a 1D curve or allowed to be a full 2D manifold. The branches of the skeleton can be pruned so that only the maximal center skeleton is returned. + +version: 0.1.0.$Revision: 2104 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ExtractSkeleton + +contributor: Pierre Seroul (UNC), Martin Styner (UNC), Guido Gerig (UNC), Stephen Aylward (Kitware) + +acknowledgements: The original implementation of this method was provided by ETH Zurich, Image Analysis Laboratory of Profs Olaf Kuebler, Gabor Szekely and Guido Gerig. Martin Styner at UNC, Chapel Hill made enhancements. Wrapping for Slicer was provided by Pierre Seroul and Stephen Aylward at Kitware, Inc. + +""" + + input_spec = ExtractSkeletonInputSpec + output_spec = ExtractSkeletonOutputSpec + _cmd = "ExtractSkeleton " + _outputs_filenames = {'OutputImageFileName': 'OutputImageFileName.nii'} diff --git a/nipype/interfaces/slicer/filtering/histogrammatching.py b/nipype/interfaces/slicer/filtering/histogrammatching.py new file mode 100644 index 0000000000..1b3b26b061 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/histogrammatching.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class HistogramMatchingInputSpec(CommandLineInputSpec): + numberOfHistogramLevels = traits.Int( + desc="The number of hisogram levels to use", + argstr="--numberOfHistogramLevels %d") + numberOfMatchPoints = traits.Int( + desc="The number of match points to use", + argstr="--numberOfMatchPoints %d") + threshold = traits.Bool( + desc= + "If on, only pixels above the mean in each volume are thresholded.", + argstr="--threshold ") + inputVolume = File( + position=-3, + desc="Input volume to be filtered", + exists=True, + argstr="%s") + referenceVolume = File( + position=-2, + desc="Input volume whose histogram will be matched", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc= + "Output volume. This is the input volume with intensities matched to the reference volume.", + argstr="%s") + + +class HistogramMatchingOutputSpec(TraitedSpec): + outputVolume = File( + position=-1, + desc= + "Output volume. This is the input volume with intensities matched to the reference volume.", + exists=True) + + +class HistogramMatching(SEMLikeCommandLine): + """title: Histogram Matching + +category: Filtering + +description: Normalizes the grayscale values of a source image based on the grayscale values of a reference image. This filter uses a histogram matching technique where the histograms of the two images are matched only at a specified number of quantile values. + +The filter was orginally designed to normalize MR images of the sameMR protocol and same body part. The algorithm works best if background pixels are excluded from both the source and reference histograms. A simple background exclusion method is to exclude all pixels whose grayscale values are smaller than the mean grayscale value. ThresholdAtMeanIntensity switches on this simple background exclusion method. + +Number of match points governs the number of quantile values to be matched. + +The filter assumes that both the source and reference are of the same type and that the input and output image type have the same number of dimension and have scalar pixel types. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/HistogramMatching + +contributor: Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = HistogramMatchingInputSpec + output_spec = HistogramMatchingOutputSpec + _cmd = "HistogramMatching " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} diff --git a/nipype/interfaces/slicer/filtering/imagelabelcombine.py b/nipype/interfaces/slicer/filtering/imagelabelcombine.py new file mode 100644 index 0000000000..067a575045 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/imagelabelcombine.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class ImageLabelCombineInputSpec(CommandLineInputSpec): + InputLabelMap_A = File( + position=-3, desc="Label map image", exists=True, argstr="%s") + InputLabelMap_B = File( + position=-2, desc="Label map image", exists=True, argstr="%s") + OutputLabelMap = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Resulting Label map image", + argstr="%s") + first_overwrites = traits.Bool( + desc="Use first or second label when both are present", + argstr="--first_overwrites ") + + +class ImageLabelCombineOutputSpec(TraitedSpec): + OutputLabelMap = File( + position=-1, desc="Resulting Label map image", exists=True) + + +class ImageLabelCombine(SEMLikeCommandLine): + """title: Image Label Combine + +category: Filtering + +description: Combine two label maps into one + +version: 0.1.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ImageLabelCombine + +contributor: Alex Yarmarkovich (SPL, BWH) + +""" + + input_spec = ImageLabelCombineInputSpec + output_spec = ImageLabelCombineOutputSpec + _cmd = "ImageLabelCombine " + _outputs_filenames = {'OutputLabelMap': 'OutputLabelMap.nii'} diff --git a/nipype/interfaces/slicer/filtering/morphology.py b/nipype/interfaces/slicer/filtering/morphology.py new file mode 100644 index 0000000000..913c63d5ab --- /dev/null +++ b/nipype/interfaces/slicer/filtering/morphology.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class GrayscaleGrindPeakImageFilterInputSpec(CommandLineInputSpec): + inputVolume = File( + position=-2, + desc="Input volume to be filtered", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output filtered", + argstr="%s") + + +class GrayscaleGrindPeakImageFilterOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output filtered", exists=True) + + +class GrayscaleGrindPeakImageFilter(SEMLikeCommandLine): + """title: Grayscale Grind Peak Image Filter + +category: Filtering.Morphology + +description: GrayscaleGrindPeakImageFilter removes peaks in a grayscale image. Peaks are local maxima in the grayscale topography that are not connected to boundaries of the image. Gray level values adjacent to a peak are extrapolated through the peak. + +This filter is used to smooth over local maxima without affecting the values of local minima. If you take the difference between the output of this filter and the original image (and perhaps threshold the difference above a small value), you'll obtain a map of the local maxima. + +This filter uses the GrayscaleGeodesicDilateImageFilter. It provides its own input as the "mask" input to the geodesic erosion. The "marker" image for the geodesic erosion is constructed such that boundary pixels match the boundary pixels of the input image and the interior pixels are set to the minimum pixel value in the input image. + +This filter is the dual to the GrayscaleFillholeImageFilter which implements the Fillhole algorithm. Since it is a dual, it is somewhat superfluous but is provided as a convenience. + +Geodesic morphology and the Fillhole algorithm is described in Chapter 6 of Pierre Soille's book "Morphological Image Analysis: Principles and Applications", Second Edition, Springer, 2003. + +A companion filter, Grayscale Fill Hole, fills holes in grayscale images. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleGrindPeakImageFilter + +contributor: Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = GrayscaleGrindPeakImageFilterInputSpec + output_spec = GrayscaleGrindPeakImageFilterOutputSpec + _cmd = "GrayscaleGrindPeakImageFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + + +class GrayscaleFillHoleImageFilterInputSpec(CommandLineInputSpec): + inputVolume = File( + position=-2, + desc="Input volume to be filtered", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output filtered", + argstr="%s") + + +class GrayscaleFillHoleImageFilterOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output filtered", exists=True) + + +class GrayscaleFillHoleImageFilter(SEMLikeCommandLine): + """title: Grayscale Fill Hole Image Filter + +category: Filtering.Morphology + +description: GrayscaleFillholeImageFilter fills holes in a grayscale image. Holes are local minima in the grayscale topography that are not connected to boundaries of the image. Gray level values adjacent to a hole are extrapolated across the hole. + +This filter is used to smooth over local minima without affecting the values of local maxima. If you take the difference between the output of this filter and the original image (and perhaps threshold the difference above a small value), you'll obtain a map of the local minima. + +This filter uses the itkGrayscaleGeodesicErodeImageFilter. It provides its own input as the "mask" input to the geodesic erosion. The "marker" image for the geodesic erosion is constructed such that boundary pixels match the boundary pixels of the input image and the interior pixels are set to the maximum pixel value in the input image. + + Geodesic morphology and the Fillhole algorithm is described in Chapter 6 of Pierre Soille's book "Morphological Image Analysis: Principles and Applications", Second Edition, Springer, 2003. + + A companion filter, Grayscale Grind Peak, removes peaks in grayscale images. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleFillHoleImageFilter + +contributor: Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = GrayscaleFillHoleImageFilterInputSpec + output_spec = GrayscaleFillHoleImageFilterOutputSpec + _cmd = "GrayscaleFillHoleImageFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} diff --git a/nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py b/nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py new file mode 100644 index 0000000000..28f694f77e --- /dev/null +++ b/nipype/interfaces/slicer/filtering/n4itkbiasfieldcorrection.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class N4ITKBiasFieldCorrectionInputSpec(CommandLineInputSpec): + inputimage = File( + desc="Input image where you observe signal inhomegeneity", + exists=True, + argstr="--inputimage %s") + maskimage = File( + desc= + "Binary mask that defines the structure of your interest. NOTE: This parameter is OPTIONAL. If the mask is not specified, the module will use internally Otsu thresholding to define this mask. Better processing results can often be obtained when a meaningful mask is defined.", + exists=True, + argstr="--maskimage %s") + outputimage = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Result of processing", + argstr="--outputimage %s") + outputbiasfield = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Recovered bias field (OPTIONAL)", + argstr="--outputbiasfield %s") + iterations = InputMultiPath( + traits.Int, + desc= + "Maximum number of iterations at each level of resolution. Larger values will increase execution time, but may lead to better results.", + sep=",", + argstr="--iterations %s") + convergencethreshold = traits.Float( + desc= + "Stopping criterion for the iterative bias estimation. Larger values will lead to smaller execution time.", + argstr="--convergencethreshold %f") + meshresolution = InputMultiPath( + traits.Float, + desc= + "Resolution of the initial bspline grid defined as a sequence of three numbers. The actual resolution will be defined by adding the bspline order (default is 3) to the resolution in each dimension specified here. For example, 1,1,1 will result in a 4x4x4 grid of control points. This parameter may need to be adjusted based on your input image. In the multi-resolution N4 framework, the resolution of the bspline grid at subsequent iterations will be doubled. The number of resolutions is implicitly defined by Number of iterations parameter (the size of this list is the number of resolutions)", + sep=",", + argstr="--meshresolution %s") + splinedistance = traits.Float( + desc= + "An alternative means to define the spline grid, by setting the distance between the control points. This parameter is used only if the grid resolution is not specified.", + argstr="--splinedistance %f") + shrinkfactor = traits.Int( + desc= + "Defines how much the image should be upsampled before estimating the inhomogeneity field. Increase if you want to reduce the execution time. 1 corresponds to the original resolution. Larger values will significantly reduce the computation time.", + argstr="--shrinkfactor %d") + bsplineorder = traits.Int( + desc= + "Order of B-spline used in the approximation. Larger values will lead to longer execution times, may result in overfitting and poor result.", + argstr="--bsplineorder %d") + weightimage = File( + desc="Weight Image", exists=True, argstr="--weightimage %s") + histogramsharpening = InputMultiPath( + traits.Float, + desc= + "A vector of up to three values. Non-zero values correspond to Bias Field Full Width at Half Maximum, Wiener filter noise, and Number of histogram bins.", + sep=",", + argstr="--histogramsharpening %s") + + +class N4ITKBiasFieldCorrectionOutputSpec(TraitedSpec): + outputimage = File(desc="Result of processing", exists=True) + outputbiasfield = File(desc="Recovered bias field (OPTIONAL)", exists=True) + + +class N4ITKBiasFieldCorrection(SEMLikeCommandLine): + """title: N4ITK MRI Bias correction + +category: Filtering + +description: Performs image bias correction using N4 algorithm. This module is based on the ITK filters contributed in the following publication: Tustison N, Gee J "N4ITK: Nick's N3 ITK Implementation For MRI Bias Field Correction", The Insight Journal 2009 January-June, http://hdl.handle.net/10380/3053 + +version: 9 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/N4ITKBiasFieldCorrection + +contributor: Nick Tustison (UPenn), Andrey Fedorov (SPL, BWH), Ron Kikinis (SPL, BWH) + +acknowledgements: The development of this module was partially supported by NIH grants R01 AA016748-01, R01 CA111288 and U01 CA151261 as well as by NA-MIC, NAC, NCIGT and the Slicer community. + +""" + + input_spec = N4ITKBiasFieldCorrectionInputSpec + output_spec = N4ITKBiasFieldCorrectionOutputSpec + _cmd = "N4ITKBiasFieldCorrection " + _outputs_filenames = { + 'outputimage': 'outputimage.nii', + 'outputbiasfield': 'outputbiasfield.nii' + } diff --git a/nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py b/nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py new file mode 100644 index 0000000000..6205b76b54 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/resamplescalarvectordwivolume.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class ResampleScalarVectorDWIVolumeInputSpec(CommandLineInputSpec): + inputVolume = File( + position=-2, + desc="Input Volume to be resampled", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Resampled Volume", + argstr="%s") + Reference = File( + desc="Reference Volume (spacing,size,orientation,origin)", + exists=True, + argstr="--Reference %s") + transformationFile = File(exists=True, argstr="--transformationFile %s") + defField = File( + desc= + "File containing the deformation field (3D vector image containing vectors with 3 components)", + exists=True, + argstr="--defField %s") + hfieldtype = traits.Enum( + "displacement", + "h-Field", + desc="Set if the deformation field is an h-Field", + argstr="--hfieldtype %s") + interpolation = traits.Enum( + "linear", + "nn", + "ws", + "bs", + desc= + "Sampling algorithm (linear or nn (nearest neighborhoor), ws (WindowedSinc), bs (BSpline) )", + argstr="--interpolation %s") + transform_order = traits.Enum( + "input-to-output", + "output-to-input", + desc="Select in what order the transforms are read", + argstr="--transform_order %s") + notbulk = traits.Bool( + desc= + "The transform following the BSpline transform is not set as a bulk transform for the BSpline transform", + argstr="--notbulk ") + spaceChange = traits.Bool( + desc= + "Space Orientation between transform and image is different (RAS/LPS) (warning: if the transform is a Transform Node in Slicer3, do not select)", + argstr="--spaceChange ") + rotation_point = traits.List( + desc= + "Rotation Point in case of rotation around a point (otherwise useless)", + argstr="--rotation_point %s") + centered_transform = traits.Bool( + desc= + "Set the center of the transformation to the center of the input image", + argstr="--centered_transform ") + image_center = traits.Enum( + "input", + "output", + desc= + "Image to use to center the transform (used only if \'Centered Transform\' is selected)", + argstr="--image_center %s") + Inverse_ITK_Transformation = traits.Bool( + desc= + "Inverse the transformation before applying it from output image to input image", + argstr="--Inverse_ITK_Transformation ") + spacing = InputMultiPath( + traits.Float, + desc="Spacing along each dimension (0 means use input spacing)", + sep=",", + argstr="--spacing %s") + size = InputMultiPath( + traits.Float, + desc="Size along each dimension (0 means use input size)", + sep=",", + argstr="--size %s") + origin = traits.List( + desc="Origin of the output Image", argstr="--origin %s") + direction_matrix = InputMultiPath( + traits.Float, + desc= + "9 parameters of the direction matrix by rows (ijk to LPS if LPS transform, ijk to RAS if RAS transform)", + sep=",", + argstr="--direction_matrix %s") + number_of_thread = traits.Int( + desc="Number of thread used to compute the output image", + argstr="--number_of_thread %d") + default_pixel_value = traits.Float( + desc= + "Default pixel value for samples falling outside of the input region", + argstr="--default_pixel_value %f") + window_function = traits.Enum( + "h", + "c", + "w", + "l", + "b", + desc= + "Window Function , h = Hamming , c = Cosine , w = Welch , l = Lanczos , b = Blackman", + argstr="--window_function %s") + spline_order = traits.Int(desc="Spline Order", argstr="--spline_order %d") + transform_matrix = InputMultiPath( + traits.Float, + desc= + "12 parameters of the transform matrix by rows ( --last 3 being translation-- )", + sep=",", + argstr="--transform_matrix %s") + transform = traits.Enum( + "rt", + "a", + desc="Transform algorithm, rt = Rigid Transform, a = Affine Transform", + argstr="--transform %s") + + +class ResampleScalarVectorDWIVolumeOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Resampled Volume", exists=True) + + +class ResampleScalarVectorDWIVolume(SEMLikeCommandLine): + """title: Resample Scalar/Vector/DWI Volume + +category: Filtering + +description: This module implements image and vector-image resampling through the use of itk Transforms.It can also handle diffusion weighted MRI image resampling. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. + +Warning: To resample DWMR Images, use nrrd input and output files. + +Warning: Do not use to resample Diffusion Tensor Images, tensors would not be reoriented + +version: 0.1 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleScalarVectorDWIVolume + +contributor: Francois Budin (UNC) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Information on the National Centers for Biomedical Computing can be obtained from http://nihroadmap.nih.gov/bioinformatics + +""" + + input_spec = ResampleScalarVectorDWIVolumeInputSpec + output_spec = ResampleScalarVectorDWIVolumeOutputSpec + _cmd = "ResampleScalarVectorDWIVolume " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} diff --git a/nipype/interfaces/slicer/filtering/tests/__init__.py b/nipype/interfaces/slicer/filtering/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py new file mode 100644 index 0000000000..9f2209c1eb --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_AddScalarVolumes.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..arithmetic import AddScalarVolumes + + +def test_AddScalarVolumes_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr='%s', + position=-3, + ), + inputVolume2=dict( + argstr='%s', + position=-2, + ), + order=dict(argstr='--order %s', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = AddScalarVolumes.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AddScalarVolumes_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = AddScalarVolumes.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py new file mode 100644 index 0000000000..a659aa47f7 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CastScalarVolume.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..arithmetic import CastScalarVolume + + +def test_CastScalarVolume_inputs(): + input_map = dict( + InputVolume=dict( + argstr='%s', + position=-2, + ), + OutputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + type=dict(argstr='--type %s', ), + ) + inputs = CastScalarVolume.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CastScalarVolume_outputs(): + output_map = dict(OutputVolume=dict(position=-1, ), ) + outputs = CastScalarVolume.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py new file mode 100644 index 0000000000..ae662cf2ba --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CheckerBoardFilter.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..checkerboardfilter import CheckerBoardFilter + + +def test_CheckerBoardFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + checkerPattern=dict( + argstr='--checkerPattern %s', + sep=',', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr='%s', + position=-3, + ), + inputVolume2=dict( + argstr='%s', + position=-2, + ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = CheckerBoardFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CheckerBoardFilter_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = CheckerBoardFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py new file mode 100644 index 0000000000..5613eb0c4c --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_CurvatureAnisotropicDiffusion.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..denoising import CurvatureAnisotropicDiffusion + + +def test_CurvatureAnisotropicDiffusion_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + conductance=dict(argstr='--conductance %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + iterations=dict(argstr='--iterations %d', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + timeStep=dict(argstr='--timeStep %f', ), + ) + inputs = CurvatureAnisotropicDiffusion.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CurvatureAnisotropicDiffusion_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = CurvatureAnisotropicDiffusion.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py new file mode 100644 index 0000000000..5db6c65c7e --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ExtractSkeleton.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..extractskeleton import ExtractSkeleton + + +def test_ExtractSkeleton_inputs(): + input_map = dict( + InputImageFileName=dict( + argstr='%s', + position=-2, + ), + OutputImageFileName=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + args=dict(argstr='%s', ), + dontPrune=dict(argstr='--dontPrune ', ), + environ=dict( + nohash=True, + usedefault=True, + ), + numPoints=dict(argstr='--numPoints %d', ), + pointsFile=dict(argstr='--pointsFile %s', ), + type=dict(argstr='--type %s', ), + ) + inputs = ExtractSkeleton.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ExtractSkeleton_outputs(): + output_map = dict(OutputImageFileName=dict(position=-1, ), ) + outputs = ExtractSkeleton.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py new file mode 100644 index 0000000000..ff46d6d308 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GaussianBlurImageFilter.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..denoising import GaussianBlurImageFilter + + +def test_GaussianBlurImageFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + sigma=dict(argstr='--sigma %f', ), + ) + inputs = GaussianBlurImageFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GaussianBlurImageFilter_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = GaussianBlurImageFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py new file mode 100644 index 0000000000..8ec7dbb156 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GradientAnisotropicDiffusion.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..denoising import GradientAnisotropicDiffusion + + +def test_GradientAnisotropicDiffusion_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + conductance=dict(argstr='--conductance %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + iterations=dict(argstr='--iterations %d', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + timeStep=dict(argstr='--timeStep %f', ), + ) + inputs = GradientAnisotropicDiffusion.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GradientAnisotropicDiffusion_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = GradientAnisotropicDiffusion.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py new file mode 100644 index 0000000000..062a23c293 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleFillHoleImageFilter.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..morphology import GrayscaleFillHoleImageFilter + + +def test_GrayscaleFillHoleImageFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = GrayscaleFillHoleImageFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GrayscaleFillHoleImageFilter_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = GrayscaleFillHoleImageFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py new file mode 100644 index 0000000000..edb6081ed3 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_GrayscaleGrindPeakImageFilter.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..morphology import GrayscaleGrindPeakImageFilter + + +def test_GrayscaleGrindPeakImageFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = GrayscaleGrindPeakImageFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GrayscaleGrindPeakImageFilter_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = GrayscaleGrindPeakImageFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py new file mode 100644 index 0000000000..f73690586d --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_HistogramMatching.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..histogrammatching import HistogramMatching + + +def test_HistogramMatching_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-3, + ), + numberOfHistogramLevels=dict(argstr='--numberOfHistogramLevels %d', ), + numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + referenceVolume=dict( + argstr='%s', + position=-2, + ), + threshold=dict(argstr='--threshold ', ), + ) + inputs = HistogramMatching.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_HistogramMatching_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = HistogramMatching.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py new file mode 100644 index 0000000000..0bae22c342 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ImageLabelCombine.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..imagelabelcombine import ImageLabelCombine + + +def test_ImageLabelCombine_inputs(): + input_map = dict( + InputLabelMap_A=dict( + argstr='%s', + position=-3, + ), + InputLabelMap_B=dict( + argstr='%s', + position=-2, + ), + OutputLabelMap=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + first_overwrites=dict(argstr='--first_overwrites ', ), + ) + inputs = ImageLabelCombine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ImageLabelCombine_outputs(): + output_map = dict(OutputLabelMap=dict(position=-1, ), ) + outputs = ImageLabelCombine.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py new file mode 100644 index 0000000000..cd04072890 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MaskScalarVolume.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..arithmetic import MaskScalarVolume + + +def test_MaskScalarVolume_inputs(): + input_map = dict( + InputVolume=dict( + argstr='%s', + position=-3, + ), + MaskVolume=dict( + argstr='%s', + position=-2, + ), + OutputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + label=dict(argstr='--label %d', ), + replace=dict(argstr='--replace %d', ), + ) + inputs = MaskScalarVolume.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MaskScalarVolume_outputs(): + output_map = dict(OutputVolume=dict(position=-1, ), ) + outputs = MaskScalarVolume.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py new file mode 100644 index 0000000000..35bb9496c9 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MedianImageFilter.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..denoising import MedianImageFilter + + +def test_MedianImageFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + neighborhood=dict( + argstr='--neighborhood %s', + sep=',', + ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = MedianImageFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MedianImageFilter_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = MedianImageFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py new file mode 100644 index 0000000000..6590c4b133 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_MultiplyScalarVolumes.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..arithmetic import MultiplyScalarVolumes + + +def test_MultiplyScalarVolumes_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr='%s', + position=-3, + ), + inputVolume2=dict( + argstr='%s', + position=-2, + ), + order=dict(argstr='--order %s', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = MultiplyScalarVolumes.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MultiplyScalarVolumes_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = MultiplyScalarVolumes.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py new file mode 100644 index 0000000000..c6ad0bf24d --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py @@ -0,0 +1,55 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..n4itkbiasfieldcorrection import N4ITKBiasFieldCorrection + + +def test_N4ITKBiasFieldCorrection_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + bsplineorder=dict(argstr='--bsplineorder %d', ), + convergencethreshold=dict(argstr='--convergencethreshold %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + histogramsharpening=dict( + argstr='--histogramsharpening %s', + sep=',', + ), + inputimage=dict(argstr='--inputimage %s', ), + iterations=dict( + argstr='--iterations %s', + sep=',', + ), + maskimage=dict(argstr='--maskimage %s', ), + meshresolution=dict( + argstr='--meshresolution %s', + sep=',', + ), + outputbiasfield=dict( + argstr='--outputbiasfield %s', + hash_files=False, + ), + outputimage=dict( + argstr='--outputimage %s', + hash_files=False, + ), + shrinkfactor=dict(argstr='--shrinkfactor %d', ), + splinedistance=dict(argstr='--splinedistance %f', ), + weightimage=dict(argstr='--weightimage %s', ), + ) + inputs = N4ITKBiasFieldCorrection.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_N4ITKBiasFieldCorrection_outputs(): + output_map = dict( + outputbiasfield=dict(), + outputimage=dict(), + ) + outputs = N4ITKBiasFieldCorrection.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py new file mode 100644 index 0000000000..ebe34fd9b3 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ResampleScalarVectorDWIVolume.py @@ -0,0 +1,69 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..resamplescalarvectordwivolume import ResampleScalarVectorDWIVolume + + +def test_ResampleScalarVectorDWIVolume_inputs(): + input_map = dict( + Inverse_ITK_Transformation=dict( + argstr='--Inverse_ITK_Transformation ', ), + Reference=dict(argstr='--Reference %s', ), + args=dict(argstr='%s', ), + centered_transform=dict(argstr='--centered_transform ', ), + defField=dict(argstr='--defField %s', ), + default_pixel_value=dict(argstr='--default_pixel_value %f', ), + direction_matrix=dict( + argstr='--direction_matrix %s', + sep=',', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + hfieldtype=dict(argstr='--hfieldtype %s', ), + image_center=dict(argstr='--image_center %s', ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + interpolation=dict(argstr='--interpolation %s', ), + notbulk=dict(argstr='--notbulk ', ), + number_of_thread=dict(argstr='--number_of_thread %d', ), + origin=dict(argstr='--origin %s', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + rotation_point=dict(argstr='--rotation_point %s', ), + size=dict( + argstr='--size %s', + sep=',', + ), + spaceChange=dict(argstr='--spaceChange ', ), + spacing=dict( + argstr='--spacing %s', + sep=',', + ), + spline_order=dict(argstr='--spline_order %d', ), + transform=dict(argstr='--transform %s', ), + transform_matrix=dict( + argstr='--transform_matrix %s', + sep=',', + ), + transform_order=dict(argstr='--transform_order %s', ), + transformationFile=dict(argstr='--transformationFile %s', ), + window_function=dict(argstr='--window_function %s', ), + ) + inputs = ResampleScalarVectorDWIVolume.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ResampleScalarVectorDWIVolume_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = ResampleScalarVectorDWIVolume.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py new file mode 100644 index 0000000000..d62589881f --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_SubtractScalarVolumes.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..arithmetic import SubtractScalarVolumes + + +def test_SubtractScalarVolumes_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr='%s', + position=-3, + ), + inputVolume2=dict( + argstr='%s', + position=-2, + ), + order=dict(argstr='--order %s', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = SubtractScalarVolumes.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SubtractScalarVolumes_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = SubtractScalarVolumes.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py new file mode 100644 index 0000000000..34de317104 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_ThresholdScalarVolume.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..thresholdscalarvolume import ThresholdScalarVolume + + +def test_ThresholdScalarVolume_inputs(): + input_map = dict( + InputVolume=dict( + argstr='%s', + position=-2, + ), + OutputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + lower=dict(argstr='--lower %d', ), + outsidevalue=dict(argstr='--outsidevalue %d', ), + threshold=dict(argstr='--threshold %d', ), + thresholdtype=dict(argstr='--thresholdtype %s', ), + upper=dict(argstr='--upper %d', ), + ) + inputs = ThresholdScalarVolume.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ThresholdScalarVolume_outputs(): + output_map = dict(OutputVolume=dict(position=-1, ), ) + outputs = ThresholdScalarVolume.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py new file mode 100644 index 0000000000..8e7890de85 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/tests/test_auto_VotingBinaryHoleFillingImageFilter.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..votingbinaryholefillingimagefilter import VotingBinaryHoleFillingImageFilter + + +def test_VotingBinaryHoleFillingImageFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + background=dict(argstr='--background %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + foreground=dict(argstr='--foreground %d', ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + majorityThreshold=dict(argstr='--majorityThreshold %d', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + radius=dict( + argstr='--radius %s', + sep=',', + ), + ) + inputs = VotingBinaryHoleFillingImageFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_VotingBinaryHoleFillingImageFilter_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = VotingBinaryHoleFillingImageFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/filtering/thresholdscalarvolume.py b/nipype/interfaces/slicer/filtering/thresholdscalarvolume.py new file mode 100644 index 0000000000..041ce10990 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/thresholdscalarvolume.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class ThresholdScalarVolumeInputSpec(CommandLineInputSpec): + InputVolume = File( + position=-2, desc="Input volume", exists=True, argstr="%s") + OutputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Thresholded input volume", + argstr="%s") + threshold = traits.Int(desc="Threshold value", argstr="--threshold %d") + lower = traits.Int(desc="Lower threshold value", argstr="--lower %d") + upper = traits.Int(desc="Upper threshold value", argstr="--upper %d") + outsidevalue = traits.Int( + desc= + "Set the voxels to this value if they fall outside the threshold range", + argstr="--outsidevalue %d") + thresholdtype = traits.Enum( + "Below", + "Above", + "Outside", + desc= + "What kind of threshold to perform. If Outside is selected, uses Upper and Lower values. If Below is selected, uses the ThresholdValue, if Above is selected, uses the ThresholdValue.", + argstr="--thresholdtype %s") + + +class ThresholdScalarVolumeOutputSpec(TraitedSpec): + OutputVolume = File( + position=-1, desc="Thresholded input volume", exists=True) + + +class ThresholdScalarVolume(SEMLikeCommandLine): + """title: Threshold Scalar Volume + +category: Filtering + +description:

Threshold an image.

Set image values to a user-specified outside value if they are below, above, or between simple threshold values.

ThresholdAbove: The values greater than or equal to the threshold value are set to OutsideValue.

ThresholdBelow: The values less than or equal to the threshold value are set to OutsideValue.

ThresholdOutside: The values outside the range Lower-Upper are set to OutsideValue.

Although all image types are supported on input, only signed types are produced.

+ +version: 0.1.0.$Revision: 2104 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/Threshold + +contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = ThresholdScalarVolumeInputSpec + output_spec = ThresholdScalarVolumeOutputSpec + _cmd = "ThresholdScalarVolume " + _outputs_filenames = {'OutputVolume': 'OutputVolume.nii'} diff --git a/nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py b/nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py new file mode 100644 index 0000000000..9c19799d04 --- /dev/null +++ b/nipype/interfaces/slicer/filtering/votingbinaryholefillingimagefilter.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class VotingBinaryHoleFillingImageFilterInputSpec(CommandLineInputSpec): + radius = InputMultiPath( + traits.Int, + desc="The radius of a hole to be filled", + sep=",", + argstr="--radius %s") + majorityThreshold = traits.Int( + desc= + "The number of pixels over 50% that will decide whether an OFF pixel will become ON or not. For example, if the neighborhood of a pixel has 124 pixels (excluding itself), the 50% will be 62, and if you set a Majority threshold of 5, that means that the filter will require 67 or more neighbor pixels to be ON in order to switch the current OFF pixel to ON.", + argstr="--majorityThreshold %d") + background = traits.Int( + desc="The value associated with the background (not object)", + argstr="--background %d") + foreground = traits.Int( + desc="The value associated with the foreground (object)", + argstr="--foreground %d") + inputVolume = File( + position=-2, + desc="Input volume to be filtered", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output filtered", + argstr="%s") + + +class VotingBinaryHoleFillingImageFilterOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output filtered", exists=True) + + +class VotingBinaryHoleFillingImageFilter(SEMLikeCommandLine): + """title: Voting Binary Hole Filling Image Filter + +category: Filtering + +description: Applies a voting operation in order to fill-in cavities. This can be used for smoothing contours and for filling holes in binary images. This technique is used frequently when segmenting complete organs that may have ducts or vasculature that may not have been included in the initial segmentation, e.g. lungs, kidneys, liver. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/VotingBinaryHoleFillingImageFilter + +contributor: Bill Lorensen (GE) + +acknowledgements: This command module was derived from Insight/Examples/Filtering/VotingBinaryHoleFillingImageFilter (copyright) Insight Software Consortium + +""" + + input_spec = VotingBinaryHoleFillingImageFilterInputSpec + output_spec = VotingBinaryHoleFillingImageFilterOutputSpec + _cmd = "VotingBinaryHoleFillingImageFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} diff --git a/nipype/interfaces/slicer/generate_classes.py b/nipype/interfaces/slicer/generate_classes.py new file mode 100644 index 0000000000..6fe3ae927f --- /dev/null +++ b/nipype/interfaces/slicer/generate_classes.py @@ -0,0 +1,555 @@ +# -*- coding: utf-8 -*- +"""This script generates Slicer Interfaces based on the CLI modules XML. CLI +modules are selected from the hardcoded list below and generated code is placed +in the cli_modules.py file (and imported in __init__.py). For this to work +correctly you must have your CLI executabes in $PATH""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, bytes, open +import xml.dom.minidom +import subprocess +import os +from shutil import rmtree + +import keyword +python_keywords = keyword.kwlist # If c++ SEM module uses one of these key words as a command line parameter, we need to modify variable + + +def force_to_valid_python_variable_name(old_name): + """ Valid c++ names are not always valid in python, so + provide alternate naming + + >>> force_to_valid_python_variable_name('lambda') + 'opt_lambda' + >>> force_to_valid_python_variable_name('inputVolume') + 'inputVolume' + """ + new_name = old_name + new_name = new_name.lstrip().rstrip() + if old_name in python_keywords: + new_name = 'opt_' + old_name + return new_name + + +def add_class_to_package(class_codes, class_names, module_name, package_dir): + module_python_filename = os.path.join(package_dir, "%s.py" % module_name) + f_m = open(module_python_filename, 'w') + f_i = open(os.path.join(package_dir, "__init__.py"), 'a+') + f_m.write("""# -*- coding: utf-8 -*- +\"\"\"Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.\"\"\"\n\n""" + ) + imports = """from __future__ import (print_function, division, unicode_literals, + absolute_import) +from ..base import (CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, + File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath) +import os\n\n\n""" + f_m.write(imports) + f_m.write("\n\n".join(class_codes)) + f_i.write("from %s import %s\n" % (module_name, ", ".join(class_names))) + f_m.close() + f_i.close() + + +def crawl_code_struct(code_struct, package_dir): + subpackages = [] + for k, v in code_struct.items(): + if isinstance(v, str) or isinstance(v, (str, bytes)): + module_name = k.lower() + class_name = k + class_code = v + add_class_to_package([class_code], [class_name], module_name, + package_dir) + else: + l1 = {} + l2 = {} + for key in list(v.keys()): + if (isinstance(v[key], str) + or isinstance(v[key], (str, bytes))): + l1[key] = v[key] + else: + l2[key] = v[key] + if l2: + v = l2 + subpackages.append(k.lower()) + f_i = open(os.path.join(package_dir, "__init__.py"), 'a+') + f_i.write("from %s import *\n" % k.lower()) + f_i.close() + new_pkg_dir = os.path.join(package_dir, k.lower()) + if os.path.exists(new_pkg_dir): + rmtree(new_pkg_dir) + os.mkdir(new_pkg_dir) + crawl_code_struct(v, new_pkg_dir) + if l1: + for ik, iv in l1.items(): + crawl_code_struct({ik: {ik: iv}}, new_pkg_dir) + elif l1: + v = l1 + module_name = k.lower() + add_class_to_package( + list(v.values()), list(v.keys()), module_name, package_dir) + if subpackages: + f = open(os.path.join(package_dir, "setup.py"), 'w') + f.write( + """# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +def configuration(parent_package='',top_path=None): + from numpy.distutils.misc_util import Configuration + + config = Configuration('{pkg_name}', parent_package, top_path) + + {sub_pks} + + return config + +if __name__ == '__main__': + from numpy.distutils.core import setup + setup(**configuration(top_path='').todict()) +""".format(pkg_name=package_dir.split("/")[-1], + sub_pks="\n ".join([ + "config.add_data_dir('%s')" % sub_pkg for sub_pkg in subpackages + ]))) + f.close() + + +def generate_all_classes(modules_list=[], + launcher=[], + redirect_x=False, + mipav_hacks=False): + """ modules_list contains all the SEM compliant tools that should have wrappers created for them. + launcher containtains the command line prefix wrapper arugments needed to prepare + a proper environment for each of the modules. + """ + all_code = {} + for module in modules_list: + print("=" * 80) + print("Generating Definition for module {0}".format(module)) + print("^" * 80) + package, code, module = generate_class( + module, launcher, redirect_x=redirect_x, mipav_hacks=mipav_hacks) + cur_package = all_code + module_name = package.strip().split(" ")[0].split(".")[-1] + for package in package.strip().split(" ")[0].split(".")[:-1]: + if package not in cur_package: + cur_package[package] = {} + cur_package = cur_package[package] + if module_name not in cur_package: + cur_package[module_name] = {} + cur_package[module_name][module] = code + if os.path.exists("__init__.py"): + os.unlink("__init__.py") + crawl_code_struct(all_code, os.getcwd()) + + +def generate_class(module, + launcher, + strip_module_name_prefix=True, + redirect_x=False, + mipav_hacks=False): + dom = grab_xml(module, launcher, mipav_hacks=mipav_hacks) + if strip_module_name_prefix: + module_name = module.split(".")[-1] + else: + module_name = module + inputTraits = [] + outputTraits = [] + outputs_filenames = {} + + # self._outputs_nodes = [] + + class_string = "\"\"\"" + + for desc_str in [ + 'title', 'category', 'description', 'version', 'documentation-url', + 'license', 'contributor', 'acknowledgements' + ]: + el = dom.getElementsByTagName(desc_str) + if el and el[0].firstChild and el[0].firstChild.nodeValue.strip(): + class_string += desc_str + ": " + el[0].firstChild.nodeValue.strip( + ) + "\n\n" + if desc_str == 'category': + category = el[0].firstChild.nodeValue.strip() + class_string += "\"\"\"" + + for paramGroup in dom.getElementsByTagName("parameters"): + indices = paramGroup.getElementsByTagName('index') + max_index = 0 + for index in indices: + if int(index.firstChild.nodeValue) > max_index: + max_index = int(index.firstChild.nodeValue) + for param in paramGroup.childNodes: + if param.nodeName in ['label', 'description', '#text', '#comment']: + continue + traitsParams = {} + + longFlagNode = param.getElementsByTagName('longflag') + if longFlagNode: + # Prefer to use longFlag as name if it is given, rather than the parameter name + longFlagName = longFlagNode[0].firstChild.nodeValue + # SEM automatically strips prefixed "--" or "-" from from xml before processing + # we need to replicate that behavior here The following + # two nodes in xml have the same behavior in the program + # --test + # test + longFlagName = longFlagName.lstrip(" -").rstrip(" ") + name = longFlagName + name = force_to_valid_python_variable_name(name) + traitsParams["argstr"] = "--" + longFlagName + " " + else: + name = param.getElementsByTagName('name')[ + 0].firstChild.nodeValue + name = force_to_valid_python_variable_name(name) + if param.getElementsByTagName('index'): + traitsParams["argstr"] = "" + else: + traitsParams["argstr"] = "--" + name + " " + + if param.getElementsByTagName( + 'description') and param.getElementsByTagName( + 'description')[0].firstChild: + traitsParams["desc"] = param.getElementsByTagName( + 'description')[0].firstChild.nodeValue.replace( + '"', "\\\"").replace("\n", ", ") + + argsDict = { + 'directory': '%s', + 'file': '%s', + 'integer': "%d", + 'double': "%f", + 'float': "%f", + 'image': "%s", + 'transform': "%s", + 'boolean': '', + 'string-enumeration': '%s', + 'string': "%s", + 'integer-enumeration': '%s', + 'table': '%s', + 'point': '%s', + 'region': '%s', + 'geometry': '%s' + } + + if param.nodeName.endswith('-vector'): + traitsParams["argstr"] += "%s" + else: + traitsParams["argstr"] += argsDict[param.nodeName] + + index = param.getElementsByTagName('index') + if index: + traitsParams["position"] = int( + index[0].firstChild.nodeValue) - (max_index + 1) + + desc = param.getElementsByTagName('description') + if index: + traitsParams["desc"] = desc[0].firstChild.nodeValue + + typesDict = { + 'integer': "traits.Int", + 'double': "traits.Float", + 'float': "traits.Float", + 'image': "File", + 'transform': "File", + 'boolean': "traits.Bool", + 'string': "traits.Str", + 'file': "File", + 'geometry': "File", + 'directory': "Directory", + 'table': "File", + 'point': "traits.List", + 'region': "traits.List" + } + + if param.nodeName.endswith('-enumeration'): + type = "traits.Enum" + values = [ + '"%s"' % str(el.firstChild.nodeValue).replace('"', '') + for el in param.getElementsByTagName('element') + ] + elif param.nodeName.endswith('-vector'): + type = "InputMultiPath" + if param.nodeName in [ + 'file', 'directory', 'image', 'geometry', 'transform', + 'table' + ]: + values = [ + "%s(exists=True)" % typesDict[param.nodeName.replace( + '-vector', '')] + ] + else: + values = [typesDict[param.nodeName.replace('-vector', '')]] + if mipav_hacks is True: + traitsParams["sep"] = ";" + else: + traitsParams["sep"] = ',' + elif param.getAttribute('multiple') == "true": + type = "InputMultiPath" + if param.nodeName in [ + 'file', 'directory', 'image', 'geometry', 'transform', + 'table' + ]: + values = ["%s(exists=True)" % typesDict[param.nodeName]] + elif param.nodeName in ['point', 'region']: + values = [ + "%s(traits.Float(), minlen=3, maxlen=3)" % + typesDict[param.nodeName] + ] + else: + values = [typesDict[param.nodeName]] + traitsParams["argstr"] += "..." + else: + values = [] + type = typesDict[param.nodeName] + + if param.nodeName in [ + 'file', 'directory', 'image', 'geometry', 'transform', + 'table' + ]: + if not param.getElementsByTagName('channel'): + raise RuntimeError( + "Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field.\n{0}". + format(traitsParams)) + elif param.getElementsByTagName('channel')[ + 0].firstChild.nodeValue == 'output': + traitsParams["hash_files"] = False + inputTraits.append( + "%s = traits.Either(traits.Bool, %s(%s), %s)" % + (name, type, + parse_values(values).replace("exists=True", ""), + parse_params(traitsParams))) + traitsParams["exists"] = True + traitsParams.pop("argstr") + traitsParams.pop("hash_files") + outputTraits.append("%s = %s(%s%s)" % + (name, type.replace("Input", "Output"), + parse_values(values), + parse_params(traitsParams))) + + outputs_filenames[name] = gen_filename_from_param( + param, name) + elif param.getElementsByTagName('channel')[ + 0].firstChild.nodeValue == 'input': + if param.nodeName in [ + 'file', 'directory', 'image', 'geometry', + 'transform', 'table' + ] and type not in ["InputMultiPath", "traits.List"]: + traitsParams["exists"] = True + inputTraits.append("%s = %s(%s%s)" % + (name, type, parse_values(values), + parse_params(traitsParams))) + else: + raise RuntimeError( + "Insufficient XML specification: each element of type 'file', 'directory', 'image', 'geometry', 'transform', or 'table' requires 'channel' field to be in ['input','output'].\n{0}". + format(traitsParams)) + else: # For all other parameter types, they are implicitly only input types + inputTraits.append("%s = %s(%s%s)" % + (name, type, parse_values(values), + parse_params(traitsParams))) + + if mipav_hacks: + blacklisted_inputs = ["maxMemoryUsage"] + inputTraits = [ + trait for trait in inputTraits + if trait.split()[0] not in blacklisted_inputs + ] + + compulsory_inputs = [ + 'xDefaultMem = traits.Int(desc="Set default maximum heap size", argstr="-xDefaultMem %d")', + 'xMaxProcess = traits.Int(1, desc="Set default maximum number of processes.", argstr="-xMaxProcess %d", usedefault=True)' + ] + inputTraits += compulsory_inputs + + input_spec_code = "class " + module_name + "InputSpec(CommandLineInputSpec):\n" + for trait in inputTraits: + input_spec_code += " " + trait + "\n" + + output_spec_code = "class " + module_name + "OutputSpec(TraitedSpec):\n" + if not outputTraits: + output_spec_code += " pass\n" + else: + for trait in outputTraits: + output_spec_code += " " + trait + "\n" + + output_filenames_code = "_outputs_filenames = {" + output_filenames_code += ",".join([ + "'%s':'%s'" % (key, value) for key, value in outputs_filenames.items() + ]) + output_filenames_code += "}" + + input_spec_code += "\n\n" + output_spec_code += "\n\n" + + template = """class %module_name%(SEMLikeCommandLine): + %class_str% + + input_spec = %module_name%InputSpec + output_spec = %module_name%OutputSpec + _cmd = "%launcher% %name% " + %output_filenames_code%\n""" + template += " _redirect_x = {0}\n".format(str(redirect_x)) + + main_class = template.replace('%class_str%', class_string).replace( + "%module_name%", module_name).replace("%name%", module).replace( + "%output_filenames_code%", output_filenames_code).replace( + "%launcher%", " ".join(launcher)) + + return category, input_spec_code + output_spec_code + main_class, module_name + + +def grab_xml(module, launcher, mipav_hacks=False): + # cmd = CommandLine(command = "Slicer3", args="--launch %s --xml"%module) + # ret = cmd.run() + command_list = launcher[:] # force copy to preserve original + command_list.extend([module, "--xml"]) + final_command = " ".join(command_list) + xmlReturnValue = subprocess.Popen( + final_command, stdout=subprocess.PIPE, shell=True).communicate()[0] + if mipav_hacks: + # workaround for a jist bug https://www.nitrc.org/tracker/index.php?func=detail&aid=7234&group_id=228&atid=942 + new_xml = "" + replace_closing_tag = False + for line in xmlReturnValue.splitlines(): + if line.strip() == "": + new_xml += "\n" + replace_closing_tag = True + elif replace_closing_tag and line.strip() == "": + new_xml += "\n" + replace_closing_tag = False + else: + new_xml += line + "\n" + + xmlReturnValue = new_xml + + # workaround for a JIST bug https://www.nitrc.org/tracker/index.php?func=detail&aid=7233&group_id=228&atid=942 + if xmlReturnValue.strip().endswith("XML"): + xmlReturnValue = xmlReturnValue.strip()[:-3] + if xmlReturnValue.strip().startswith( + "Error: Unable to set default atlas"): + xmlReturnValue = xmlReturnValue.strip()[len( + "Error: Unable to set default atlas"):] + try: + dom = xml.dom.minidom.parseString(xmlReturnValue.strip()) + except Exception as e: + print(xmlReturnValue.strip()) + raise e + return dom + + +# if ret.runtime.returncode == 0: +# return xml.dom.minidom.parseString(ret.runtime.stdout) +# else: +# raise Exception(cmd.cmdline + " failed:\n%s"%ret.runtime.stderr) + + +def parse_params(params): + list = [] + for key, value in params.items(): + if isinstance(value, (str, bytes)): + list.append('%s="%s"' % (key, value.replace('"', "'"))) + else: + list.append('%s=%s' % (key, value)) + + return ", ".join(list) + + +def parse_values(values): + values = ['%s' % value for value in values] + if len(values) > 0: + retstr = ", ".join(values) + ", " + else: + retstr = "" + return retstr + + +def gen_filename_from_param(param, base): + fileExtensions = param.getAttribute("fileExtensions") + if fileExtensions: + # It is possible that multiple file extensions can be specified in a + # comma separated list, This will extract just the first extension + firstFileExtension = fileExtensions.split(',')[0] + ext = firstFileExtension + else: + ext = { + 'image': '.nii', + 'transform': '.mat', + 'file': '', + 'directory': '', + 'geometry': '.vtk' + }[param.nodeName] + return base + ext + + +if __name__ == "__main__": + # NOTE: For now either the launcher needs to be found on the default path, or + # every tool in the modules list must be found on the default path + # AND calling the module with --xml must be supported and compliant. + modules_list = [ + 'MedianImageFilter', + 'CheckerBoardFilter', + 'EMSegmentCommandLine', + 'GrayscaleFillHoleImageFilter', + # 'CreateDICOMSeries', #missing channel + 'TractographyLabelMapSeeding', + 'IntensityDifferenceMetric', + 'DWIToDTIEstimation', + 'MaskScalarVolume', + 'ImageLabelCombine', + 'DTIimport', + 'OtsuThresholdImageFilter', + 'ExpertAutomatedRegistration', + 'ThresholdScalarVolume', + 'DWIUnbiasedNonLocalMeansFilter', + 'BRAINSFit', + 'MergeModels', + 'ResampleDTIVolume', + 'MultiplyScalarVolumes', + 'LabelMapSmoothing', + 'RigidRegistration', + 'VotingBinaryHoleFillingImageFilter', + 'BRAINSROIAuto', + 'RobustStatisticsSegmenter', + 'GradientAnisotropicDiffusion', + 'ProbeVolumeWithModel', + 'ModelMaker', + 'ExtractSkeleton', + 'GrayscaleGrindPeakImageFilter', + 'N4ITKBiasFieldCorrection', + 'BRAINSResample', + 'DTIexport', + 'VBRAINSDemonWarp', + 'ResampleScalarVectorDWIVolume', + 'ResampleScalarVolume', + 'OtsuThresholdSegmentation', + # 'ExecutionModelTour', + 'HistogramMatching', + 'BRAINSDemonWarp', + 'ModelToLabelMap', + 'GaussianBlurImageFilter', + 'DiffusionWeightedVolumeMasking', + 'GrayscaleModelMaker', + 'CastScalarVolume', + 'DicomToNrrdConverter', + 'AffineRegistration', + 'AddScalarVolumes', + 'LinearRegistration', + 'SimpleRegionGrowingSegmentation', + 'DWIJointRicianLMMSEFilter', + 'MultiResolutionAffineRegistration', + 'SubtractScalarVolumes', + 'DWIRicianLMMSEFilter', + 'OrientScalarVolume', + 'FiducialRegistration', + 'BSplineDeformableRegistration', + 'CurvatureAnisotropicDiffusion', + 'PETStandardUptakeValueComputation', + 'DiffusionTensorScalarMeasurements', + 'ACPCTransform', + 'EMSegmentTransformToNewFormat', + 'BSplineToDeformationField' + ] + + # SlicerExecutionModel compliant tools that are usually statically built, and don't need the Slicer3 --launcher + generate_all_classes(modules_list=modules_list, launcher=[]) + # Tools compliant with SlicerExecutionModel called from the Slicer environment (for shared lib compatibility) + # launcher = ['/home/raid3/gorgolewski/software/slicer/Slicer', '--launch'] + # generate_all_classes(modules_list=modules_list, launcher=launcher) + # generate_all_classes(modules_list=['BRAINSABC'], launcher=[] ) diff --git a/nipype/interfaces/slicer/legacy/__init__.py b/nipype/interfaces/slicer/legacy/__init__.py new file mode 100644 index 0000000000..75c6b9d327 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .diffusion import * +from .segmentation import OtsuThresholdSegmentation +from .filtering import OtsuThresholdImageFilter, ResampleScalarVolume +from .converters import BSplineToDeformationField +from .registration import (BSplineDeformableRegistration, AffineRegistration, + MultiResolutionAffineRegistration, + RigidRegistration, LinearRegistration, + ExpertAutomatedRegistration) diff --git a/nipype/interfaces/slicer/legacy/converters.py b/nipype/interfaces/slicer/legacy/converters.py new file mode 100644 index 0000000000..f5af1ad29b --- /dev/null +++ b/nipype/interfaces/slicer/legacy/converters.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class BSplineToDeformationFieldInputSpec(CommandLineInputSpec): + tfm = File(exists=True, argstr="--tfm %s") + refImage = File(exists=True, argstr="--refImage %s") + defImage = traits.Either( + traits.Bool, File(), hash_files=False, argstr="--defImage %s") + + +class BSplineToDeformationFieldOutputSpec(TraitedSpec): + defImage = File(exists=True) + + +class BSplineToDeformationField(SEMLikeCommandLine): + """title: BSpline to deformation field + +category: Legacy.Converters + +description: Create a dense deformation field from a bspline+bulk transform. + +version: 0.1.0.$Revision: 2104 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BSplineToDeformationField + +contributor: Andrey Fedorov (SPL, BWH) + +acknowledgements: This work is funded by NIH grants R01 CA111288 and U01 CA151261. + +""" + + input_spec = BSplineToDeformationFieldInputSpec + output_spec = BSplineToDeformationFieldOutputSpec + _cmd = "BSplineToDeformationField " + _outputs_filenames = {'defImage': 'defImage.nii'} diff --git a/nipype/interfaces/slicer/legacy/diffusion/__init__.py b/nipype/interfaces/slicer/legacy/diffusion/__init__.py new file mode 100644 index 0000000000..f66daabb5b --- /dev/null +++ b/nipype/interfaces/slicer/legacy/diffusion/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .denoising import DWIUnbiasedNonLocalMeansFilter diff --git a/nipype/interfaces/slicer/legacy/diffusion/denoising.py b/nipype/interfaces/slicer/legacy/diffusion/denoising.py new file mode 100644 index 0000000000..0cc8cce0f6 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/diffusion/denoising.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class DWIUnbiasedNonLocalMeansFilterInputSpec(CommandLineInputSpec): + rs = InputMultiPath( + traits.Int, + desc= + "The algorithm search for similar voxels in a neighborhood of this size (larger sizes than the default one are extremely slow).", + sep=",", + argstr="--rs %s") + rc = InputMultiPath( + traits.Int, + desc= + "Similarity between blocks is measured using windows of this size.", + sep=",", + argstr="--rc %s") + hp = traits.Float( + desc= + "This parameter is related to noise; the larger the parameter, the more agressive the filtering. Should be near 1, and only values between 0.8 and 1.2 are allowed", + argstr="--hp %f") + ng = traits.Int( + desc= + "The number of the closest gradients that are used to jointly filter a given gradient direction (a maximum of 5 is allowed).", + argstr="--ng %d") + re = InputMultiPath( + traits.Int, + desc= + "A neighborhood of this size is used to compute the statistics for noise estimation.", + sep=",", + argstr="--re %s") + inputVolume = File( + position=-2, desc="Input DWI volume.", exists=True, argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output DWI volume.", + argstr="%s") + + +class DWIUnbiasedNonLocalMeansFilterOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output DWI volume.", exists=True) + + +class DWIUnbiasedNonLocalMeansFilter(SEMLikeCommandLine): + """title: DWI Unbiased Non Local Means Filter + +category: Legacy.Diffusion.Denoising + +description: This module reduces noise (or unwanted detail) on a set of diffusion weighted images. For this, it filters the images using a Unbiased Non Local Means for Rician noise algorithm. It exploits not only the spatial redundancy, but the redundancy in similar gradient directions as well; it takes into account the N closest gradient directions to the direction being processed (a maximum of 5 gradient directions is allowed to keep a reasonable computational load, since we do not use neither similarity maps nor block-wise implementation). +The noise parameter is automatically estimated in the same way as in the jointLMMSE module. +A complete description of the algorithm may be found in: +Antonio Tristan-Vega and Santiago Aja-Fernandez, DWI filtering using joint information for DTI and HARDI, Medical Image Analysis, Volume 14, Issue 2, Pages 205-218. 2010. +Please, note that the execution of this filter is extremely slow, son only very conservative parameters (block size and search size as small as possible) should be used. Even so, its execution may take several hours. The advantage of this filter over joint LMMSE is its better preservation of edges and fine structures. + +version: 0.0.1.$Revision: 1 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/UnbiasedNonLocalMeansFilterForDWI + +contributor: Antonio Tristan Vega (UVa), Santiago Aja Fernandez (UVa) + +acknowledgements: Partially founded by grant number TEC2007-67073/TCM from the Comision Interministerial de Ciencia y Tecnologia (Spain). + +""" + + input_spec = DWIUnbiasedNonLocalMeansFilterInputSpec + output_spec = DWIUnbiasedNonLocalMeansFilterOutputSpec + _cmd = "DWIUnbiasedNonLocalMeansFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} diff --git a/nipype/interfaces/slicer/legacy/diffusion/tests/__init__.py b/nipype/interfaces/slicer/legacy/diffusion/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/slicer/legacy/diffusion/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py new file mode 100644 index 0000000000..0e25ffad9a --- /dev/null +++ b/nipype/interfaces/slicer/legacy/diffusion/tests/test_auto_DWIUnbiasedNonLocalMeansFilter.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..denoising import DWIUnbiasedNonLocalMeansFilter + + +def test_DWIUnbiasedNonLocalMeansFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + hp=dict(argstr='--hp %f', ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + ng=dict(argstr='--ng %d', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + rc=dict( + argstr='--rc %s', + sep=',', + ), + re=dict( + argstr='--re %s', + sep=',', + ), + rs=dict( + argstr='--rs %s', + sep=',', + ), + ) + inputs = DWIUnbiasedNonLocalMeansFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DWIUnbiasedNonLocalMeansFilter_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = DWIUnbiasedNonLocalMeansFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/legacy/filtering.py b/nipype/interfaces/slicer/legacy/filtering.py new file mode 100644 index 0000000000..aaed2350e0 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/filtering.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class OtsuThresholdImageFilterInputSpec(CommandLineInputSpec): + insideValue = traits.Int( + desc= + "The value assigned to pixels that are inside the computed threshold", + argstr="--insideValue %d") + outsideValue = traits.Int( + desc= + "The value assigned to pixels that are outside the computed threshold", + argstr="--outsideValue %d") + numberOfBins = traits.Int( + desc= + "This is an advanced parameter. The number of bins in the histogram used to model the probability mass function of the two intensity distributions. Small numbers of bins may result in a more conservative threshold. The default should suffice for most applications. Experimentation is the only way to see the effect of varying this parameter.", + argstr="--numberOfBins %d") + inputVolume = File( + position=-2, + desc="Input volume to be filtered", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output filtered", + argstr="%s") + + +class OtsuThresholdImageFilterOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output filtered", exists=True) + + +class OtsuThresholdImageFilter(SEMLikeCommandLine): + """title: Otsu Threshold Image Filter + +category: Legacy.Filtering + +description: This filter creates a binary thresholded image that separates an image into foreground and background components. The filter calculates the optimum threshold separating those two classes so that their combined spread (intra-class variance) is minimal (see http://en.wikipedia.org/wiki/Otsu%27s_method). Then the filter applies that threshold to the input image using the itkBinaryThresholdImageFilter. The numberOfHistogram bins can be set for the Otsu Calculator. The insideValue and outsideValue can be set for the BinaryThresholdImageFilter. The filter produces a labeled volume. + +The original reference is: + +N.Otsu, A threshold selection method from gray level histograms, IEEE Trans.Syst.ManCybern.SMC-9,62–66 1979. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OtsuThresholdImageFilter + +contributor: Bill Lorensen (GE) + +acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium + +""" + + input_spec = OtsuThresholdImageFilterInputSpec + output_spec = OtsuThresholdImageFilterOutputSpec + _cmd = "OtsuThresholdImageFilter " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + + +class ResampleScalarVolumeInputSpec(CommandLineInputSpec): + spacing = InputMultiPath( + traits.Float, + desc="Spacing along each dimension (0 means use input spacing)", + sep=",", + argstr="--spacing %s") + interpolation = traits.Enum( + "linear", + "nearestNeighbor", + "bspline", + "hamming", + "cosine", + "welch", + "lanczos", + "blackman", + desc= + "Sampling algorithm (linear, nearest neighbor, bspline(cubic) or windowed sinc). There are several sinc algorithms available as described in the following publication: Erik H. W. Meijering, Wiro J. Niessen, Josien P. W. Pluim, Max A. Viergever: Quantitative Comparison of Sinc-Approximating Kernels for Medical Image Interpolation. MICCAI 1999, pp. 210-217. Each window has a radius of 3;", + argstr="--interpolation %s") + InputVolume = File( + position=-2, + desc="Input volume to be resampled", + exists=True, + argstr="%s") + OutputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Resampled Volume", + argstr="%s") + + +class ResampleScalarVolumeOutputSpec(TraitedSpec): + OutputVolume = File(position=-1, desc="Resampled Volume", exists=True) + + +class ResampleScalarVolume(SEMLikeCommandLine): + """title: Resample Scalar Volume + +category: Legacy.Filtering + +description: Resampling an image is an important task in image analysis. It is especially important in the frame of image registration. This module implements image resampling through the use of itk Transforms. This module uses an Identity Transform. The resampling is controlled by the Output Spacing. "Resampling" is performed in space coordinates, not pixel/grid coordinates. It is quite important to ensure that image spacing is properly set on the images involved. The interpolator is required since the mapping from one space to the other will often require evaluation of the intensity of the image at non-grid positions. Several interpolators are available: linear, nearest neighbor, bspline and five flavors of sinc. The sinc interpolators, although more precise, are much slower than the linear and nearest neighbor interpolator. To resample label volumnes, nearest neighbor interpolation should be used exclusively. + +version: 0.1.0.$Revision: 20594 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ResampleVolume + +contributor: Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = ResampleScalarVolumeInputSpec + output_spec = ResampleScalarVolumeOutputSpec + _cmd = "ResampleScalarVolume " + _outputs_filenames = {'OutputVolume': 'OutputVolume.nii'} diff --git a/nipype/interfaces/slicer/legacy/registration.py b/nipype/interfaces/slicer/legacy/registration.py new file mode 100644 index 0000000000..7f73d85d82 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/registration.py @@ -0,0 +1,655 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class BSplineDeformableRegistrationInputSpec(CommandLineInputSpec): + iterations = traits.Int( + desc="Number of iterations", argstr="--iterations %d") + gridSize = traits.Int( + desc= + "Number of grid points on interior of the fixed image. Larger grid sizes allow for finer registrations.", + argstr="--gridSize %d") + histogrambins = traits.Int( + desc= + "Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a deformable registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", + argstr="--histogrambins %d") + spatialsamples = traits.Int( + desc= + "Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", + argstr="--spatialsamples %d") + constrain = traits.Bool( + desc= + "Constrain the deformation to the amount specified in Maximum Deformation", + argstr="--constrain ") + maximumDeformation = traits.Float( + desc= + "If Constrain Deformation is checked, limit the deformation to this amount.", + argstr="--maximumDeformation %f") + default = traits.Int( + desc= + "Default pixel value used if resampling a pixel outside of the volume.", + argstr="--default %d") + initialtransform = File( + desc= + "Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. This transform should be an affine or rigid transform. It is used an a bulk transform for the BSpline. Optional.", + exists=True, + argstr="--initialtransform %s") + FixedImageFileName = File( + position=-2, + desc="Fixed image to which to register", + exists=True, + argstr="%s") + MovingImageFileName = File( + position=-1, desc="Moving image", exists=True, argstr="%s") + outputtransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Transform calculated that aligns the fixed and moving image. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--outputtransform %s") + outputwarp = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Vector field that applies an equivalent warp as the BSpline. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional.", + argstr="--outputwarp %s") + resampledmovingfilename = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Resampled moving image to fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--resampledmovingfilename %s") + + +class BSplineDeformableRegistrationOutputSpec(TraitedSpec): + outputtransform = File( + desc= + "Transform calculated that aligns the fixed and moving image. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True) + outputwarp = File( + desc= + "Vector field that applies an equivalent warp as the BSpline. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional.", + exists=True) + resampledmovingfilename = File( + desc= + "Resampled moving image to fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True) + + +class BSplineDeformableRegistration(SEMLikeCommandLine): + """title: BSpline Deformable Registration + +category: Legacy.Registration + +description: Registers two images together using BSpline transform and mutual information. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BSplineDeformableRegistration + +contributor: Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = BSplineDeformableRegistrationInputSpec + output_spec = BSplineDeformableRegistrationOutputSpec + _cmd = "BSplineDeformableRegistration " + _outputs_filenames = { + 'resampledmovingfilename': 'resampledmovingfilename.nii', + 'outputtransform': 'outputtransform.txt', + 'outputwarp': 'outputwarp.nrrd' + } + + +class AffineRegistrationInputSpec(CommandLineInputSpec): + fixedsmoothingfactor = traits.Int( + desc= + "Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", + argstr="--fixedsmoothingfactor %d") + movingsmoothingfactor = traits.Int( + desc= + "Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", + argstr="--movingsmoothingfactor %d") + histogrambins = traits.Int( + desc= + "Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", + argstr="--histogrambins %d") + spatialsamples = traits.Int( + desc= + "Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", + argstr="--spatialsamples %d") + iterations = traits.Int( + desc="Number of iterations", argstr="--iterations %d") + translationscale = traits.Float( + desc= + "Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used is 1/(TranslationScale^2)). This parameter is used to \'weight\' or \'standardized\' the transform parameters and their effect on the registration objective function.", + argstr="--translationscale %f") + initialtransform = File( + desc= + "Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", + exists=True, + argstr="--initialtransform %s") + FixedImageFileName = File( + position=-2, + desc="Fixed image to which to register", + exists=True, + argstr="%s") + MovingImageFileName = File( + position=-1, desc="Moving image", exists=True, argstr="%s") + outputtransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--outputtransform %s") + resampledmovingfilename = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--resampledmovingfilename %s") + + +class AffineRegistrationOutputSpec(TraitedSpec): + outputtransform = File( + desc= + "Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True) + resampledmovingfilename = File( + desc= + "Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True) + + +class AffineRegistration(SEMLikeCommandLine): + """title: Affine Registration + +category: Legacy.Registration + +description: Registers two images together using an affine transform and mutual information. This module is often used to align images of different subjects or images of the same subject from different modalities. + +This module can smooth images prior to registration to mitigate noise and improve convergence. Many of the registration parameters require a working knowledge of the algorithm although the default parameters are sufficient for many registration tasks. + + + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/AffineRegistration + +contributor: Daniel Blezek (GE) + +acknowledgements: This module was developed by Daniel Blezek while at GE Research with contributions from Jim Miller. + +This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = AffineRegistrationInputSpec + output_spec = AffineRegistrationOutputSpec + _cmd = "AffineRegistration " + _outputs_filenames = { + 'resampledmovingfilename': 'resampledmovingfilename.nii', + 'outputtransform': 'outputtransform.txt' + } + + +class MultiResolutionAffineRegistrationInputSpec(CommandLineInputSpec): + fixedImage = File( + position=-2, + desc= + "Image which defines the space into which the moving image is registered", + exists=True, + argstr="%s") + movingImage = File( + position=-1, + desc= + "The transform goes from the fixed image's space into the moving image's space", + exists=True, + argstr="%s") + resampledImage = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Registration results", + argstr="--resampledImage %s") + saveTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Save the output transform from the registration", + argstr="--saveTransform %s") + fixedImageMask = File( + desc="Label image which defines a mask of interest for the fixed image", + exists=True, + argstr="--fixedImageMask %s") + fixedImageROI = traits.List( + desc="Label image which defines a ROI of interest for the fixed image", + argstr="--fixedImageROI %s") + numIterations = traits.Int( + desc="Number of iterations to run at each resolution level.", + argstr="--numIterations %d") + numLineIterations = traits.Int( + desc="Number of iterations to run at each resolution level.", + argstr="--numLineIterations %d") + stepSize = traits.Float( + desc="The maximum step size of the optimizer in voxels", + argstr="--stepSize %f") + stepTolerance = traits.Float( + desc="The maximum step size of the optimizer in voxels", + argstr="--stepTolerance %f") + metricTolerance = traits.Float(argstr="--metricTolerance %f") + + +class MultiResolutionAffineRegistrationOutputSpec(TraitedSpec): + resampledImage = File(desc="Registration results", exists=True) + saveTransform = File( + desc="Save the output transform from the registration", exists=True) + + +class MultiResolutionAffineRegistration(SEMLikeCommandLine): + """title: Robust Multiresolution Affine Registration + +category: Legacy.Registration + +description: Provides affine registration using multiple resolution levels and decomposed affine transforms. + +version: 0.1.0.$Revision: 2104 $(alpha) + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MultiResolutionAffineRegistration + +contributor: Casey B Goodlett (Utah) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = MultiResolutionAffineRegistrationInputSpec + output_spec = MultiResolutionAffineRegistrationOutputSpec + _cmd = "MultiResolutionAffineRegistration " + _outputs_filenames = { + 'resampledImage': 'resampledImage.nii', + 'saveTransform': 'saveTransform.txt' + } + + +class RigidRegistrationInputSpec(CommandLineInputSpec): + fixedsmoothingfactor = traits.Int( + desc= + "Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", + argstr="--fixedsmoothingfactor %d") + movingsmoothingfactor = traits.Int( + desc= + "Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", + argstr="--movingsmoothingfactor %d") + testingmode = traits.Bool( + desc= + "Enable testing mode. Input transform will be used to construct floating image. The floating image will be ignored if passed.", + argstr="--testingmode ") + histogrambins = traits.Int( + desc= + "Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", + argstr="--histogrambins %d") + spatialsamples = traits.Int( + desc= + "Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", + argstr="--spatialsamples %d") + iterations = InputMultiPath( + traits.Int, + desc= + "Comma separated list of iterations. Must have the same number of elements as the learning rate.", + sep=",", + argstr="--iterations %s") + learningrate = InputMultiPath( + traits.Float, + desc= + "Comma separated list of learning rates. Learning rate is a scale factor on the gradient of the registration objective function (gradient with respect to the parameters of the transformation) used to update the parameters of the transformation during optimization. Smaller values cause the optimizer to take smaller steps through the parameter space. Larger values are typically used early in the registration process to take large jumps in parameter space followed by smaller values to home in on the optimum value of the registration objective function. Default is: 0.01, 0.005, 0.0005, 0.0002. Must have the same number of elements as iterations.", + sep=",", + argstr="--learningrate %s") + translationscale = traits.Float( + desc= + "Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used 1/(TranslationScale^2)). This parameter is used to \'weight\' or \'standardized\' the transform parameters and their effect on the registration objective function.", + argstr="--translationscale %f") + initialtransform = File( + desc= + "Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", + exists=True, + argstr="--initialtransform %s") + FixedImageFileName = File( + position=-2, + desc="Fixed image to which to register", + exists=True, + argstr="%s") + MovingImageFileName = File( + position=-1, desc="Moving image", exists=True, argstr="%s") + outputtransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--outputtransform %s") + resampledmovingfilename = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--resampledmovingfilename %s") + + +class RigidRegistrationOutputSpec(TraitedSpec): + outputtransform = File( + desc= + "Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True) + resampledmovingfilename = File( + desc= + "Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True) + + +class RigidRegistration(SEMLikeCommandLine): + """title: Rigid Registration + +category: Legacy.Registration + +description: Registers two images together using a rigid transform and mutual information. + +This module was originally distributed as "Linear registration" but has been renamed to eliminate confusion with the "Affine registration" module. + +This module is often used to align images of different subjects or images of the same subject from different modalities. + +This module can smooth images prior to registration to mitigate noise and improve convergence. Many of the registration parameters require a working knowledge of the algorithm although the default parameters are sufficient for many registration tasks. + + + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RigidRegistration + +contributor: Daniel Blezek (GE) + +acknowledgements: This module was developed by Daniel Blezek while at GE Research with contributions from Jim Miller. + +This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = RigidRegistrationInputSpec + output_spec = RigidRegistrationOutputSpec + _cmd = "RigidRegistration " + _outputs_filenames = { + 'resampledmovingfilename': 'resampledmovingfilename.nii', + 'outputtransform': 'outputtransform.txt' + } + + +class LinearRegistrationInputSpec(CommandLineInputSpec): + fixedsmoothingfactor = traits.Int( + desc= + "Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", + argstr="--fixedsmoothingfactor %d") + movingsmoothingfactor = traits.Int( + desc= + "Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", + argstr="--movingsmoothingfactor %d") + histogrambins = traits.Int( + desc= + "Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", + argstr="--histogrambins %d") + spatialsamples = traits.Int( + desc= + "Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", + argstr="--spatialsamples %d") + iterations = InputMultiPath( + traits.Int, + desc= + "Comma separated list of iterations. Must have the same number of elements as the learning rate.", + sep=",", + argstr="--iterations %s") + learningrate = InputMultiPath( + traits.Float, + desc= + "Comma separated list of learning rates. Learning rate is a scale factor on the gradient of the registration objective function (gradient with respect to the parameters of the transformation) used to update the parameters of the transformation during optimization. Smaller values cause the optimizer to take smaller steps through the parameter space. Larger values are typically used early in the registration process to take large jumps in parameter space followed by smaller values to home in on the optimum value of the registration objective function. Default is: 0.01, 0.005, 0.0005, 0.0002. Must have the same number of elements as iterations.", + sep=",", + argstr="--learningrate %s") + translationscale = traits.Float( + desc= + "Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used 1/(TranslationScale^2)). This parameter is used to \'weight\' or \'standardized\' the transform parameters and their effect on the registration objective function.", + argstr="--translationscale %f") + initialtransform = File( + desc= + "Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", + exists=True, + argstr="--initialtransform %s") + FixedImageFileName = File( + position=-2, + desc="Fixed image to which to register", + exists=True, + argstr="%s") + MovingImageFileName = File( + position=-1, desc="Moving image", exists=True, argstr="%s") + outputtransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--outputtransform %s") + resampledmovingfilename = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + argstr="--resampledmovingfilename %s") + + +class LinearRegistrationOutputSpec(TraitedSpec): + outputtransform = File( + desc= + "Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True) + resampledmovingfilename = File( + desc= + "Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", + exists=True) + + +class LinearRegistration(SEMLikeCommandLine): + """title: Linear Registration + +category: Legacy.Registration + +description: Registers two images together using a rigid transform and mutual information. + +version: 0.1.0.$Revision: 19608 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/LinearRegistration + +contributor: Daniel Blezek (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = LinearRegistrationInputSpec + output_spec = LinearRegistrationOutputSpec + _cmd = "LinearRegistration " + _outputs_filenames = { + 'resampledmovingfilename': 'resampledmovingfilename.nii', + 'outputtransform': 'outputtransform.txt' + } + + +class ExpertAutomatedRegistrationInputSpec(CommandLineInputSpec): + fixedImage = File( + position=-2, + desc= + "Image which defines the space into which the moving image is registered", + exists=True, + argstr="%s") + movingImage = File( + position=-1, + desc= + "The transform goes from the fixed image's space into the moving image's space", + exists=True, + argstr="%s") + resampledImage = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Registration results", + argstr="--resampledImage %s") + loadTransform = File( + desc="Load a transform that is immediately applied to the moving image", + exists=True, + argstr="--loadTransform %s") + saveTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Save the transform that results from registration", + argstr="--saveTransform %s") + initialization = traits.Enum( + "None", + "Landmarks", + "ImageCenters", + "CentersOfMass", + "SecondMoments", + desc="Method to prime the registration process", + argstr="--initialization %s") + registration = traits.Enum( + "None", + "Initial", + "Rigid", + "Affine", + "BSpline", + "PipelineRigid", + "PipelineAffine", + "PipelineBSpline", + desc="Method for the registration process", + argstr="--registration %s") + metric = traits.Enum( + "MattesMI", + "NormCorr", + "MeanSqrd", + desc="Method to quantify image match", + argstr="--metric %s") + expectedOffset = traits.Float( + desc="Expected misalignment after initialization", + argstr="--expectedOffset %f") + expectedRotation = traits.Float( + desc="Expected misalignment after initialization", + argstr="--expectedRotation %f") + expectedScale = traits.Float( + desc="Expected misalignment after initialization", + argstr="--expectedScale %f") + expectedSkew = traits.Float( + desc="Expected misalignment after initialization", + argstr="--expectedSkew %f") + verbosityLevel = traits.Enum( + "Silent", + "Standard", + "Verbose", + desc="Level of detail of reporting progress", + argstr="--verbosityLevel %s") + sampleFromOverlap = traits.Bool( + desc= + "Limit metric evaluation to the fixed image region overlapped by the moving image", + argstr="--sampleFromOverlap ") + fixedImageMask = File( + desc="Image which defines a mask for the fixed image", + exists=True, + argstr="--fixedImageMask %s") + randomNumberSeed = traits.Int( + desc="Seed to generate a consistent random number sequence", + argstr="--randomNumberSeed %d") + numberOfThreads = traits.Int( + desc="Number of CPU threads to use", argstr="--numberOfThreads %d") + minimizeMemory = traits.Bool( + desc= + "Reduce the amount of memory required at the cost of increased computation time", + argstr="--minimizeMemory ") + interpolation = traits.Enum( + "NearestNeighbor", + "Linear", + "BSpline", + desc="Method for interpolation within the optimization process", + argstr="--interpolation %s") + fixedLandmarks = InputMultiPath( + traits.List(traits.Float(), minlen=3, maxlen=3), + desc="Ordered list of landmarks in the fixed image", + argstr="--fixedLandmarks %s...") + movingLandmarks = InputMultiPath( + traits.List(traits.Float(), minlen=3, maxlen=3), + desc="Ordered list of landmarks in the moving image", + argstr="--movingLandmarks %s...") + rigidMaxIterations = traits.Int( + desc="Maximum number of rigid optimization iterations", + argstr="--rigidMaxIterations %d") + rigidSamplingRatio = traits.Float( + desc= + "Portion of the image to use in computing the metric during rigid registration", + argstr="--rigidSamplingRatio %f") + affineMaxIterations = traits.Int( + desc="Maximum number of affine optimization iterations", + argstr="--affineMaxIterations %d") + affineSamplingRatio = traits.Float( + desc= + "Portion of the image to use in computing the metric during affine registration", + argstr="--affineSamplingRatio %f") + bsplineMaxIterations = traits.Int( + desc="Maximum number of bspline optimization iterations", + argstr="--bsplineMaxIterations %d") + bsplineSamplingRatio = traits.Float( + desc= + "Portion of the image to use in computing the metric during BSpline registration", + argstr="--bsplineSamplingRatio %f") + controlPointSpacing = traits.Int( + desc="Number of pixels between control points", + argstr="--controlPointSpacing %d") + + +class ExpertAutomatedRegistrationOutputSpec(TraitedSpec): + resampledImage = File(desc="Registration results", exists=True) + saveTransform = File( + desc="Save the transform that results from registration", exists=True) + + +class ExpertAutomatedRegistration(SEMLikeCommandLine): + """title: Expert Automated Registration + +category: Legacy.Registration + +description: Provides rigid, affine, and BSpline registration methods via a simple GUI + +version: 0.1.0.$Revision: 2104 $(alpha) + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ExpertAutomatedRegistration + +contributor: Stephen R Aylward (Kitware), Casey B Goodlett (Kitware) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = ExpertAutomatedRegistrationInputSpec + output_spec = ExpertAutomatedRegistrationOutputSpec + _cmd = "ExpertAutomatedRegistration " + _outputs_filenames = { + 'resampledImage': 'resampledImage.nii', + 'saveTransform': 'saveTransform.txt' + } diff --git a/nipype/interfaces/slicer/legacy/segmentation.py b/nipype/interfaces/slicer/legacy/segmentation.py new file mode 100644 index 0000000000..3500d50d50 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/segmentation.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class OtsuThresholdSegmentationInputSpec(CommandLineInputSpec): + brightObjects = traits.Bool( + desc= + "Segmenting bright objects on a dark background or dark objects on a bright background.", + argstr="--brightObjects ") + numberOfBins = traits.Int( + desc= + "This is an advanced parameter. The number of bins in the histogram used to model the probability mass function of the two intensity distributions. Small numbers of bins may result in a more conservative threshold. The default should suffice for most applications. Experimentation is the only way to see the effect of varying this parameter.", + argstr="--numberOfBins %d") + faceConnected = traits.Bool( + desc= + "This is an advanced parameter. Adjacent voxels are face connected. This affects the connected component algorithm. If this parameter is false, more regions are likely to be identified.", + argstr="--faceConnected ") + minimumObjectSize = traits.Int( + desc= + "Minimum size of object to retain. This parameter can be used to get rid of small regions in noisy images.", + argstr="--minimumObjectSize %d") + inputVolume = File( + position=-2, + desc="Input volume to be segmented", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output filtered", + argstr="%s") + + +class OtsuThresholdSegmentationOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output filtered", exists=True) + + +class OtsuThresholdSegmentation(SEMLikeCommandLine): + """title: Otsu Threshold Segmentation + +category: Legacy.Segmentation + +description: This filter creates a labeled image from a grayscale image. First, it calculates an optimal threshold that separates the image into foreground and background. This threshold separates those two classes so that their intra-class variance is minimal (see http://en.wikipedia.org/wiki/Otsu%27s_method). Then the filter runs a connected component algorithm to generate unique labels for each connected region of the foreground. Finally, the resulting image is relabeled to provide consecutive numbering. + +version: 1.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/OtsuThresholdSegmentation + +contributor: Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = OtsuThresholdSegmentationInputSpec + output_spec = OtsuThresholdSegmentationOutputSpec + _cmd = "OtsuThresholdSegmentation " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} diff --git a/nipype/interfaces/slicer/legacy/tests/__init__.py b/nipype/interfaces/slicer/legacy/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/slicer/legacy/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py new file mode 100644 index 0000000000..949cdc7f61 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_AffineRegistration.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import AffineRegistration + + +def test_AffineRegistration_inputs(): + input_map = dict( + FixedImageFileName=dict( + argstr='%s', + position=-2, + ), + MovingImageFileName=dict( + argstr='%s', + position=-1, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedsmoothingfactor=dict(argstr='--fixedsmoothingfactor %d', ), + histogrambins=dict(argstr='--histogrambins %d', ), + initialtransform=dict(argstr='--initialtransform %s', ), + iterations=dict(argstr='--iterations %d', ), + movingsmoothingfactor=dict(argstr='--movingsmoothingfactor %d', ), + outputtransform=dict( + argstr='--outputtransform %s', + hash_files=False, + ), + resampledmovingfilename=dict( + argstr='--resampledmovingfilename %s', + hash_files=False, + ), + spatialsamples=dict(argstr='--spatialsamples %d', ), + translationscale=dict(argstr='--translationscale %f', ), + ) + inputs = AffineRegistration.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_AffineRegistration_outputs(): + output_map = dict( + outputtransform=dict(), + resampledmovingfilename=dict(), + ) + outputs = AffineRegistration.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py new file mode 100644 index 0000000000..17339f8859 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineDeformableRegistration.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import BSplineDeformableRegistration + + +def test_BSplineDeformableRegistration_inputs(): + input_map = dict( + FixedImageFileName=dict( + argstr='%s', + position=-2, + ), + MovingImageFileName=dict( + argstr='%s', + position=-1, + ), + args=dict(argstr='%s', ), + constrain=dict(argstr='--constrain ', ), + default=dict(argstr='--default %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + gridSize=dict(argstr='--gridSize %d', ), + histogrambins=dict(argstr='--histogrambins %d', ), + initialtransform=dict(argstr='--initialtransform %s', ), + iterations=dict(argstr='--iterations %d', ), + maximumDeformation=dict(argstr='--maximumDeformation %f', ), + outputtransform=dict( + argstr='--outputtransform %s', + hash_files=False, + ), + outputwarp=dict( + argstr='--outputwarp %s', + hash_files=False, + ), + resampledmovingfilename=dict( + argstr='--resampledmovingfilename %s', + hash_files=False, + ), + spatialsamples=dict(argstr='--spatialsamples %d', ), + ) + inputs = BSplineDeformableRegistration.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BSplineDeformableRegistration_outputs(): + output_map = dict( + outputtransform=dict(), + outputwarp=dict(), + resampledmovingfilename=dict(), + ) + outputs = BSplineDeformableRegistration.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py new file mode 100644 index 0000000000..8335515c13 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_BSplineToDeformationField.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..converters import BSplineToDeformationField + + +def test_BSplineToDeformationField_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + defImage=dict( + argstr='--defImage %s', + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + refImage=dict(argstr='--refImage %s', ), + tfm=dict(argstr='--tfm %s', ), + ) + inputs = BSplineToDeformationField.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BSplineToDeformationField_outputs(): + output_map = dict(defImage=dict(), ) + outputs = BSplineToDeformationField.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py new file mode 100644 index 0000000000..7fa8b77d63 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ExpertAutomatedRegistration.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import ExpertAutomatedRegistration + + +def test_ExpertAutomatedRegistration_inputs(): + input_map = dict( + affineMaxIterations=dict(argstr='--affineMaxIterations %d', ), + affineSamplingRatio=dict(argstr='--affineSamplingRatio %f', ), + args=dict(argstr='%s', ), + bsplineMaxIterations=dict(argstr='--bsplineMaxIterations %d', ), + bsplineSamplingRatio=dict(argstr='--bsplineSamplingRatio %f', ), + controlPointSpacing=dict(argstr='--controlPointSpacing %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + expectedOffset=dict(argstr='--expectedOffset %f', ), + expectedRotation=dict(argstr='--expectedRotation %f', ), + expectedScale=dict(argstr='--expectedScale %f', ), + expectedSkew=dict(argstr='--expectedSkew %f', ), + fixedImage=dict( + argstr='%s', + position=-2, + ), + fixedImageMask=dict(argstr='--fixedImageMask %s', ), + fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ), + initialization=dict(argstr='--initialization %s', ), + interpolation=dict(argstr='--interpolation %s', ), + loadTransform=dict(argstr='--loadTransform %s', ), + metric=dict(argstr='--metric %s', ), + minimizeMemory=dict(argstr='--minimizeMemory ', ), + movingImage=dict( + argstr='%s', + position=-1, + ), + movingLandmarks=dict(argstr='--movingLandmarks %s...', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + randomNumberSeed=dict(argstr='--randomNumberSeed %d', ), + registration=dict(argstr='--registration %s', ), + resampledImage=dict( + argstr='--resampledImage %s', + hash_files=False, + ), + rigidMaxIterations=dict(argstr='--rigidMaxIterations %d', ), + rigidSamplingRatio=dict(argstr='--rigidSamplingRatio %f', ), + sampleFromOverlap=dict(argstr='--sampleFromOverlap ', ), + saveTransform=dict( + argstr='--saveTransform %s', + hash_files=False, + ), + verbosityLevel=dict(argstr='--verbosityLevel %s', ), + ) + inputs = ExpertAutomatedRegistration.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ExpertAutomatedRegistration_outputs(): + output_map = dict( + resampledImage=dict(), + saveTransform=dict(), + ) + outputs = ExpertAutomatedRegistration.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py new file mode 100644 index 0000000000..a55432f9b0 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_LinearRegistration.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import LinearRegistration + + +def test_LinearRegistration_inputs(): + input_map = dict( + FixedImageFileName=dict( + argstr='%s', + position=-2, + ), + MovingImageFileName=dict( + argstr='%s', + position=-1, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedsmoothingfactor=dict(argstr='--fixedsmoothingfactor %d', ), + histogrambins=dict(argstr='--histogrambins %d', ), + initialtransform=dict(argstr='--initialtransform %s', ), + iterations=dict( + argstr='--iterations %s', + sep=',', + ), + learningrate=dict( + argstr='--learningrate %s', + sep=',', + ), + movingsmoothingfactor=dict(argstr='--movingsmoothingfactor %d', ), + outputtransform=dict( + argstr='--outputtransform %s', + hash_files=False, + ), + resampledmovingfilename=dict( + argstr='--resampledmovingfilename %s', + hash_files=False, + ), + spatialsamples=dict(argstr='--spatialsamples %d', ), + translationscale=dict(argstr='--translationscale %f', ), + ) + inputs = LinearRegistration.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_LinearRegistration_outputs(): + output_map = dict( + outputtransform=dict(), + resampledmovingfilename=dict(), + ) + outputs = LinearRegistration.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py new file mode 100644 index 0000000000..7903fd1d5c --- /dev/null +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import MultiResolutionAffineRegistration + + +def test_MultiResolutionAffineRegistration_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedImage=dict( + argstr='%s', + position=-2, + ), + fixedImageMask=dict(argstr='--fixedImageMask %s', ), + fixedImageROI=dict(argstr='--fixedImageROI %s', ), + metricTolerance=dict(argstr='--metricTolerance %f', ), + movingImage=dict( + argstr='%s', + position=-1, + ), + numIterations=dict(argstr='--numIterations %d', ), + numLineIterations=dict(argstr='--numLineIterations %d', ), + resampledImage=dict( + argstr='--resampledImage %s', + hash_files=False, + ), + saveTransform=dict( + argstr='--saveTransform %s', + hash_files=False, + ), + stepSize=dict(argstr='--stepSize %f', ), + stepTolerance=dict(argstr='--stepTolerance %f', ), + ) + inputs = MultiResolutionAffineRegistration.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MultiResolutionAffineRegistration_outputs(): + output_map = dict( + resampledImage=dict(), + saveTransform=dict(), + ) + outputs = MultiResolutionAffineRegistration.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py new file mode 100644 index 0000000000..a1af6c71b3 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdImageFilter.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..filtering import OtsuThresholdImageFilter + + +def test_OtsuThresholdImageFilter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + insideValue=dict(argstr='--insideValue %d', ), + numberOfBins=dict(argstr='--numberOfBins %d', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + outsideValue=dict(argstr='--outsideValue %d', ), + ) + inputs = OtsuThresholdImageFilter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_OtsuThresholdImageFilter_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = OtsuThresholdImageFilter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py new file mode 100644 index 0000000000..02beeee464 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_OtsuThresholdSegmentation.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..segmentation import OtsuThresholdSegmentation + + +def test_OtsuThresholdSegmentation_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + brightObjects=dict(argstr='--brightObjects ', ), + environ=dict( + nohash=True, + usedefault=True, + ), + faceConnected=dict(argstr='--faceConnected ', ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + minimumObjectSize=dict(argstr='--minimumObjectSize %d', ), + numberOfBins=dict(argstr='--numberOfBins %d', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = OtsuThresholdSegmentation.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_OtsuThresholdSegmentation_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = OtsuThresholdSegmentation.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py new file mode 100644 index 0000000000..baa698246c --- /dev/null +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_ResampleScalarVolume.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..filtering import ResampleScalarVolume + + +def test_ResampleScalarVolume_inputs(): + input_map = dict( + InputVolume=dict( + argstr='%s', + position=-2, + ), + OutputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + interpolation=dict(argstr='--interpolation %s', ), + spacing=dict( + argstr='--spacing %s', + sep=',', + ), + ) + inputs = ResampleScalarVolume.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ResampleScalarVolume_outputs(): + output_map = dict(OutputVolume=dict(position=-1, ), ) + outputs = ResampleScalarVolume.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py new file mode 100644 index 0000000000..6c2a0eb072 --- /dev/null +++ b/nipype/interfaces/slicer/legacy/tests/test_auto_RigidRegistration.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..registration import RigidRegistration + + +def test_RigidRegistration_inputs(): + input_map = dict( + FixedImageFileName=dict( + argstr='%s', + position=-2, + ), + MovingImageFileName=dict( + argstr='%s', + position=-1, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedsmoothingfactor=dict(argstr='--fixedsmoothingfactor %d', ), + histogrambins=dict(argstr='--histogrambins %d', ), + initialtransform=dict(argstr='--initialtransform %s', ), + iterations=dict( + argstr='--iterations %s', + sep=',', + ), + learningrate=dict( + argstr='--learningrate %s', + sep=',', + ), + movingsmoothingfactor=dict(argstr='--movingsmoothingfactor %d', ), + outputtransform=dict( + argstr='--outputtransform %s', + hash_files=False, + ), + resampledmovingfilename=dict( + argstr='--resampledmovingfilename %s', + hash_files=False, + ), + spatialsamples=dict(argstr='--spatialsamples %d', ), + testingmode=dict(argstr='--testingmode ', ), + translationscale=dict(argstr='--translationscale %f', ), + ) + inputs = RigidRegistration.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RigidRegistration_outputs(): + output_map = dict( + outputtransform=dict(), + resampledmovingfilename=dict(), + ) + outputs = RigidRegistration.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/quantification/__init__.py b/nipype/interfaces/slicer/quantification/__init__.py new file mode 100644 index 0000000000..6054dddd59 --- /dev/null +++ b/nipype/interfaces/slicer/quantification/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .changequantification import IntensityDifferenceMetric +from .petstandarduptakevaluecomputation import PETStandardUptakeValueComputation diff --git a/nipype/interfaces/slicer/quantification/changequantification.py b/nipype/interfaces/slicer/quantification/changequantification.py new file mode 100644 index 0000000000..5abf1b1287 --- /dev/null +++ b/nipype/interfaces/slicer/quantification/changequantification.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class IntensityDifferenceMetricInputSpec(CommandLineInputSpec): + sensitivityThreshold = traits.Float( + desc= + "This parameter should be between 0 and 1, and defines how sensitive the metric should be to the intensity changes.", + argstr="--sensitivityThreshold %f") + changingBandSize = traits.Int( + desc= + "How far (in mm) from the boundary of the segmentation should the intensity changes be considered.", + argstr="--changingBandSize %d") + baselineVolume = File( + position=-4, + desc="Baseline volume to be compared to", + exists=True, + argstr="%s") + baselineSegmentationVolume = File( + position=-3, + desc= + "Label volume that contains segmentation of the structure of interest in the baseline volume.", + exists=True, + argstr="%s") + followupVolume = File( + position=-2, + desc="Followup volume to be compare to the baseline", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output volume to keep the results of change quantification.", + argstr="%s") + reportFileName = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Report file name", + argstr="--reportFileName %s") + + +class IntensityDifferenceMetricOutputSpec(TraitedSpec): + outputVolume = File( + position=-1, + desc="Output volume to keep the results of change quantification.", + exists=True) + reportFileName = File(desc="Report file name", exists=True) + + +class IntensityDifferenceMetric(SEMLikeCommandLine): + """title: + Intensity Difference Change Detection (FAST) + + +category: + Quantification.ChangeQuantification + + +description: + Quantifies the changes between two spatially aligned images based on the pixel-wise difference of image intensities. + + +version: 0.1 + +contributor: Andrey Fedorov + +acknowledgements: + + +""" + + input_spec = IntensityDifferenceMetricInputSpec + output_spec = IntensityDifferenceMetricOutputSpec + _cmd = "IntensityDifferenceMetric " + _outputs_filenames = { + 'outputVolume': 'outputVolume.nii', + 'reportFileName': 'reportFileName' + } diff --git a/nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py b/nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py new file mode 100644 index 0000000000..0edfca3fbb --- /dev/null +++ b/nipype/interfaces/slicer/quantification/petstandarduptakevaluecomputation.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class PETStandardUptakeValueComputationInputSpec(CommandLineInputSpec): + petDICOMPath = Directory( + desc= + "Input path to a directory containing a PET volume containing DICOM header information for SUV computation", + exists=True, + argstr="--petDICOMPath %s") + petVolume = File( + desc= + "Input PET volume for SUVbw computation (must be the same volume as pointed to by the DICOM path!).", + exists=True, + argstr="--petVolume %s") + labelMap = File( + desc="Input label volume containing the volumes of interest", + exists=True, + argstr="--labelMap %s") + color = File( + desc="Color table to to map labels to colors and names", + exists=True, + argstr="--color %s") + csvFile = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "A file holding the output SUV values in comma separated lines, one per label. Optional.", + argstr="--csvFile %s") + OutputLabel = traits.Str( + desc="List of labels for which SUV values were computed", + argstr="--OutputLabel %s") + OutputLabelValue = traits.Str( + desc="List of label values for which SUV values were computed", + argstr="--OutputLabelValue %s") + SUVMax = traits.Str(desc="SUV max for each label", argstr="--SUVMax %s") + SUVMean = traits.Str(desc="SUV mean for each label", argstr="--SUVMean %s") + SUVMin = traits.Str( + desc="SUV minimum for each label", argstr="--SUVMin %s") + + +class PETStandardUptakeValueComputationOutputSpec(TraitedSpec): + csvFile = File( + desc= + "A file holding the output SUV values in comma separated lines, one per label. Optional.", + exists=True) + + +class PETStandardUptakeValueComputation(SEMLikeCommandLine): + """title: PET Standard Uptake Value Computation + +category: Quantification + +description: Computes the standardized uptake value based on body weight. Takes an input PET image in DICOM and NRRD format (DICOM header must contain Radiopharmaceutical parameters). Produces a CSV file that contains patientID, studyDate, dose, labelID, suvmin, suvmax, suvmean, labelName for each volume of interest. It also displays some of the information as output strings in the GUI, the CSV file is optional in that case. The CSV file is appended to on each execution of the CLI. + +version: 0.1.0.$Revision: 8595 $(alpha) + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ComputeSUVBodyWeight + +contributor: Wendy Plesniak (SPL, BWH), Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) + +acknowledgements: This work is funded by the Harvard Catalyst, and the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = PETStandardUptakeValueComputationInputSpec + output_spec = PETStandardUptakeValueComputationOutputSpec + _cmd = "PETStandardUptakeValueComputation " + _outputs_filenames = {'csvFile': 'csvFile.csv'} diff --git a/nipype/interfaces/slicer/quantification/tests/__init__.py b/nipype/interfaces/slicer/quantification/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/slicer/quantification/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py new file mode 100644 index 0000000000..e46f046add --- /dev/null +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_IntensityDifferenceMetric.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..changequantification import IntensityDifferenceMetric + + +def test_IntensityDifferenceMetric_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + baselineSegmentationVolume=dict( + argstr='%s', + position=-3, + ), + baselineVolume=dict( + argstr='%s', + position=-4, + ), + changingBandSize=dict(argstr='--changingBandSize %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + followupVolume=dict( + argstr='%s', + position=-2, + ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + reportFileName=dict( + argstr='--reportFileName %s', + hash_files=False, + ), + sensitivityThreshold=dict(argstr='--sensitivityThreshold %f', ), + ) + inputs = IntensityDifferenceMetric.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_IntensityDifferenceMetric_outputs(): + output_map = dict( + outputVolume=dict(position=-1, ), + reportFileName=dict(), + ) + outputs = IntensityDifferenceMetric.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py new file mode 100644 index 0000000000..61141f65db --- /dev/null +++ b/nipype/interfaces/slicer/quantification/tests/test_auto_PETStandardUptakeValueComputation.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..petstandarduptakevaluecomputation import PETStandardUptakeValueComputation + + +def test_PETStandardUptakeValueComputation_inputs(): + input_map = dict( + OutputLabel=dict(argstr='--OutputLabel %s', ), + OutputLabelValue=dict(argstr='--OutputLabelValue %s', ), + SUVMax=dict(argstr='--SUVMax %s', ), + SUVMean=dict(argstr='--SUVMean %s', ), + SUVMin=dict(argstr='--SUVMin %s', ), + args=dict(argstr='%s', ), + color=dict(argstr='--color %s', ), + csvFile=dict( + argstr='--csvFile %s', + hash_files=False, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + labelMap=dict(argstr='--labelMap %s', ), + petDICOMPath=dict(argstr='--petDICOMPath %s', ), + petVolume=dict(argstr='--petVolume %s', ), + ) + inputs = PETStandardUptakeValueComputation.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PETStandardUptakeValueComputation_outputs(): + output_map = dict(csvFile=dict(), ) + outputs = PETStandardUptakeValueComputation.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/registration/__init__.py b/nipype/interfaces/slicer/registration/__init__.py new file mode 100644 index 0000000000..375b9b5416 --- /dev/null +++ b/nipype/interfaces/slicer/registration/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .specialized import (ACPCTransform, FiducialRegistration, + VBRAINSDemonWarp, BRAINSDemonWarp) +from .brainsresample import BRAINSResample +from .brainsfit import BRAINSFit diff --git a/nipype/interfaces/slicer/registration/brainsfit.py b/nipype/interfaces/slicer/registration/brainsfit.py new file mode 100644 index 0000000000..adbd733976 --- /dev/null +++ b/nipype/interfaces/slicer/registration/brainsfit.py @@ -0,0 +1,380 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class BRAINSFitInputSpec(CommandLineInputSpec): + fixedVolume = File( + desc= + "The fixed image for registration by mutual information optimization.", + exists=True, + argstr="--fixedVolume %s") + movingVolume = File( + desc= + "The moving image for registration by mutual information optimization.", + exists=True, + argstr="--movingVolume %s") + bsplineTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS BSpline", + argstr="--bsplineTransform %s") + linearTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS ---NOT--- BSpline", + argstr="--linearTransform %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "(optional) Output image for registration. NOTE: You must select either the outputTransform or the outputVolume option.", + argstr="--outputVolume %s") + initialTransform = File( + desc= + "Filename of transform used to initialize the registration. This CAN NOT be used with either CenterOfHeadLAlign, MomentsAlign, GeometryAlign, or initialTransform file.", + exists=True, + argstr="--initialTransform %s") + initializeTransformMode = traits.Enum( + "Off", + "useMomentsAlign", + "useCenterOfHeadAlign", + "useGeometryAlign", + "useCenterOfROIAlign", + desc= + "Determine how to initialize the transform center. GeometryAlign on assumes that the center of the voxel lattice of the images represent similar structures. MomentsAlign assumes that the center of mass of the images represent similar structures. useCenterOfHeadAlign attempts to use the top of head and shape of neck to drive a center of mass estimate. Off assumes that the physical space of the images are close, and that centering in terms of the image Origins is a good starting point. This flag is mutually exclusive with the initialTransform flag.", + argstr="--initializeTransformMode %s") + useRigid = traits.Bool( + desc= + "Perform a rigid registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useRigid ") + useScaleVersor3D = traits.Bool( + desc= + "Perform a ScaleVersor3D registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useScaleVersor3D ") + useScaleSkewVersor3D = traits.Bool( + desc= + "Perform a ScaleSkewVersor3D registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useScaleSkewVersor3D ") + useAffine = traits.Bool( + desc= + "Perform an Affine registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useAffine ") + useBSpline = traits.Bool( + desc= + "Perform a BSpline registration as part of the sequential registration steps. This family of options superceeds the use of transformType if any of them are set.", + argstr="--useBSpline ") + numberOfSamples = traits.Int( + desc= + "The number of voxels sampled for mutual information computation. Increase this for a slower, more careful fit. You can also limit the sampling focus with ROI masks and ROIAUTO mask generation.", + argstr="--numberOfSamples %d") + splineGridSize = InputMultiPath( + traits.Int, + desc= + "The number of subdivisions of the BSpline Grid to be centered on the image space. Each dimension must have at least 3 subdivisions for the BSpline to be correctly computed. ", + sep=",", + argstr="--splineGridSize %s") + numberOfIterations = InputMultiPath( + traits.Int, + desc= + "The maximum number of iterations to try before failing to converge. Use an explicit limit like 500 or 1000 to manage risk of divergence", + sep=",", + argstr="--numberOfIterations %s") + maskProcessingMode = traits.Enum( + "NOMASK", + "ROIAUTO", + "ROI", + desc= + "What mode to use for using the masks. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. The Region Of Interest mode (choose ROI) uses the masks to define what parts of the image should be used for computing the transform.", + argstr="--maskProcessingMode %s") + fixedBinaryVolume = File( + desc="Fixed Image binary mask volume, ONLY FOR MANUAL ROI mode.", + exists=True, + argstr="--fixedBinaryVolume %s") + movingBinaryVolume = File( + desc="Moving Image binary mask volume, ONLY FOR MANUAL ROI mode.", + exists=True, + argstr="--movingBinaryVolume %s") + outputFixedVolumeROI = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "The ROI automatically found in fixed image, ONLY FOR ROIAUTO mode.", + argstr="--outputFixedVolumeROI %s") + outputMovingVolumeROI = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "The ROI automatically found in moving image, ONLY FOR ROIAUTO mode.", + argstr="--outputMovingVolumeROI %s") + outputVolumePixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uint", + "uchar", + desc= + "The output image Pixel Type is the scalar datatype for representation of the Output Volume.", + argstr="--outputVolumePixelType %s") + backgroundFillValue = traits.Float( + desc="Background fill value for output image.", + argstr="--backgroundFillValue %f") + maskInferiorCutOffFromCenter = traits.Float( + desc= + "For use with --useCenterOfHeadAlign (and --maskProcessingMode ROIAUTO): the cut-off below the image centers, in millimeters, ", + argstr="--maskInferiorCutOffFromCenter %f") + scaleOutputValues = traits.Bool( + desc= + "If true, and the voxel values do not fit within the minimum and maximum values of the desired outputVolumePixelType, then linearly scale the min/max output image voxel values to fit within the min/max range of the outputVolumePixelType.", + argstr="--scaleOutputValues ") + interpolationMode = traits.Enum( + "NearestNeighbor", + "Linear", + "ResampleInPlace", + "BSpline", + "WindowedSinc", + "Hamming", + "Cosine", + "Welch", + "Lanczos", + "Blackman", + desc= + "Type of interpolation to be used when applying transform to moving volume. Options are Linear, NearestNeighbor, BSpline, WindowedSinc, or ResampleInPlace. The ResampleInPlace option will create an image with the same discrete voxel values and will adjust the origin and direction of the physical space interpretation.", + argstr="--interpolationMode %s") + minimumStepLength = InputMultiPath( + traits.Float, + desc= + "Each step in the optimization takes steps at least this big. When none are possible, registration is complete.", + sep=",", + argstr="--minimumStepLength %s") + translationScale = traits.Float( + desc= + "How much to scale up changes in position compared to unit rotational changes in radians -- decrease this to put more rotation in the search pattern.", + argstr="--translationScale %f") + reproportionScale = traits.Float( + desc= + "ScaleVersor3D 'Scale' compensation factor. Increase this to put more rescaling in a ScaleVersor3D or ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", + argstr="--reproportionScale %f") + skewScale = traits.Float( + desc= + "ScaleSkewVersor3D Skew compensation factor. Increase this to put more skew in a ScaleSkewVersor3D search pattern. 1.0 works well with a translationScale of 1000.0", + argstr="--skewScale %f") + maxBSplineDisplacement = traits.Float( + desc= + " Sets the maximum allowed displacements in image physical coordinates for BSpline control grid along each axis. A value of 0.0 indicates that the problem should be unbounded. NOTE: This only constrains the BSpline portion, and does not limit the displacement from the associated bulk transform. This can lead to a substantial reduction in computation time in the BSpline optimizer., ", + argstr="--maxBSplineDisplacement %f") + histogramMatch = traits.Bool( + desc= + "Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile. Do NOT use if registering images from different modailties.", + argstr="--histogramMatch ") + numberOfHistogramBins = traits.Int( + desc="The number of histogram levels", + argstr="--numberOfHistogramBins %d") + numberOfMatchPoints = traits.Int( + desc="the number of match points", argstr="--numberOfMatchPoints %d") + strippedOutputTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "File name for the rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overwritten if either bsplineTransform or linearTransform is set.", + argstr="--strippedOutputTransform %s") + transformType = InputMultiPath( + traits.Str, + desc= + "Specifies a list of registration types to be used. The valid types are, Rigid, ScaleVersor3D, ScaleSkewVersor3D, Affine, and BSpline. Specifiying more than one in a comma separated list will initialize the next stage with the previous results. If registrationClass flag is used, it overrides this parameter setting.", + sep=",", + argstr="--transformType %s") + outputTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", + argstr="--outputTransform %s") + fixedVolumeTimeIndex = traits.Int( + desc= + "The index in the time series for the 3D fixed image to fit, if 4-dimensional.", + argstr="--fixedVolumeTimeIndex %d") + movingVolumeTimeIndex = traits.Int( + desc= + "The index in the time series for the 3D moving image to fit, if 4-dimensional.", + argstr="--movingVolumeTimeIndex %d") + medianFilterSize = InputMultiPath( + traits.Int, + desc= + "The radius for the optional MedianImageFilter preprocessing in all 3 directions.", + sep=",", + argstr="--medianFilterSize %s") + removeIntensityOutliers = traits.Float( + desc= + "The half percentage to decide outliers of image intensities. The default value is zero, which means no outlier removal. If the value of 0.005 is given, the moduel will throw away 0.005 % of both tails, so 0.01% of intensities in total would be ignored in its statistic calculation. ", + argstr="--removeIntensityOutliers %f") + useCachingOfBSplineWeightsMode = traits.Enum( + "ON", + "OFF", + desc= + "This is a 5x speed advantage at the expense of requiring much more memory. Only relevant when transformType is BSpline.", + argstr="--useCachingOfBSplineWeightsMode %s") + useExplicitPDFDerivativesMode = traits.Enum( + "AUTO", + "ON", + "OFF", + desc= + "Using mode AUTO means OFF for BSplineDeformableTransforms and ON for the linear transforms. The ON alternative uses more memory to sometimes do a better job.", + argstr="--useExplicitPDFDerivativesMode %s") + ROIAutoDilateSize = traits.Float( + desc= + "This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", + argstr="--ROIAutoDilateSize %f") + ROIAutoClosingSize = traits.Float( + desc= + "This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the hole closing size in mm. It is rounded up to the nearest whole pixel size in each direction. The default is to use a closing size of 9mm. For mouse data this value may need to be reset to 0.9 or smaller.", + argstr="--ROIAutoClosingSize %f") + relaxationFactor = traits.Float( + desc= + "Internal debugging parameter, and should probably never be used from the command line. This will be removed in the future.", + argstr="--relaxationFactor %f") + maximumStepLength = traits.Float( + desc= + "Internal debugging parameter, and should probably never be used from the command line. This will be removed in the future.", + argstr="--maximumStepLength %f") + failureExitCode = traits.Int( + desc= + "If the fit fails, exit with this status code. (It can be used to force a successfult exit status of (0) if the registration fails due to reaching the maximum number of iterations.", + argstr="--failureExitCode %d") + writeTransformOnFailure = traits.Bool( + desc= + "Flag to save the final transform even if the numberOfIterations are reached without convergence. (Intended for use when --failureExitCode 0 )", + argstr="--writeTransformOnFailure ") + numberOfThreads = traits.Int( + desc= + "Explicitly specify the maximum number of threads to use. (default is auto-detected)", + argstr="--numberOfThreads %d") + forceMINumberOfThreads = traits.Int( + desc= + "Force the the maximum number of threads to use for non thread safe MI metric. CAUTION: Inconsistent results my arise!", + argstr="--forceMINumberOfThreads %d") + debugLevel = traits.Int( + desc= + "Display debug messages, and produce debug intermediate results. 0=OFF, 1=Minimal, 10=Maximum debugging.", + argstr="--debugLevel %d") + costFunctionConvergenceFactor = traits.Float( + desc= + " From itkLBFGSBOptimizer.h: Set/Get the CostFunctionConvergenceFactor. Algorithm terminates when the reduction in cost function is less than (factor * epsmcj) where epsmch is the machine precision. Typical values for factor: 1e+12 for low accuracy; 1e+7 for moderate accuracy and 1e+1 for extremely high accuracy. 1e+9 seems to work well., ", + argstr="--costFunctionConvergenceFactor %f") + projectedGradientTolerance = traits.Float( + desc= + " From itkLBFGSBOptimizer.h: Set/Get the ProjectedGradientTolerance. Algorithm terminates when the project gradient is below the tolerance. Default lbfgsb value is 1e-5, but 1e-4 seems to work well., ", + argstr="--projectedGradientTolerance %f") + gui = traits.Bool( + desc= + "Display intermediate image volumes for debugging. NOTE: This is not part of the standard build sytem, and probably does nothing on your installation.", + argstr="--gui ") + promptUser = traits.Bool( + desc= + "Prompt the user to hit enter each time an image is sent to the DebugImageViewer", + argstr="--promptUser ") + NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 = traits.Bool( + desc="DO NOT USE THIS FLAG", + argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 ") + NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 = traits.Bool( + desc="DO NOT USE THIS FLAG", + argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 ") + NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 = traits.Bool( + desc="DO NOT USE THIS FLAG", + argstr="--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 ") + permitParameterVariation = InputMultiPath( + traits.Int, + desc= + "A bit vector to permit linear transform parameters to vary under optimization. The vector order corresponds with transform parameters, and beyond the end ones fill in as a default. For instance, you can choose to rotate only in x (pitch) with 1,0,0; this is mostly for expert use in turning on and off individual degrees of freedom in rotation, translation or scaling without multiplying the number of transform representations; this trick is probably meaningless when tried with the general affine transform.", + sep=",", + argstr="--permitParameterVariation %s") + costMetric = traits.Enum( + "MMI", + "MSE", + "NC", + "MC", + desc= + "The cost metric to be used during fitting. Defaults to MMI. Options are MMI (Mattes Mutual Information), MSE (Mean Square Error), NC (Normalized Correlation), MC (Match Cardinality for binary images)", + argstr="--costMetric %s") + writeOutputTransformInFloat = traits.Bool( + desc= + "By default, the output registration transforms (either the output composite transform or each transform component) are written to the disk in double precision. If this flag is ON, the output transforms will be written in single (float) precision. It is especially important if the output transform is a displacement field transform, or it is a composite transform that includes several displacement fields.", + argstr="--writeOutputTransformInFloat ") + + +class BRAINSFitOutputSpec(TraitedSpec): + bsplineTransform = File( + desc= + "(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS BSpline", + exists=True) + linearTransform = File( + desc= + "(optional) Filename to which save the estimated transform. NOTE: You must set at least one output object (either a deformed image or a transform. NOTE: USE THIS ONLY IF THE FINAL TRANSFORM IS ---NOT--- BSpline", + exists=True) + outputVolume = File( + desc= + "(optional) Output image for registration. NOTE: You must select either the outputTransform or the outputVolume option.", + exists=True) + outputFixedVolumeROI = File( + desc= + "The ROI automatically found in fixed image, ONLY FOR ROIAUTO mode.", + exists=True) + outputMovingVolumeROI = File( + desc= + "The ROI automatically found in moving image, ONLY FOR ROIAUTO mode.", + exists=True) + strippedOutputTransform = File( + desc= + "File name for the rigid component of the estimated affine transform. Can be used to rigidly register the moving image to the fixed image. NOTE: This value is overwritten if either bsplineTransform or linearTransform is set.", + exists=True) + outputTransform = File( + desc= + "(optional) Filename to which save the (optional) estimated transform. NOTE: You must select either the outputTransform or the outputVolume option.", + exists=True) + + +class BRAINSFit(SEMLikeCommandLine): + """title: General Registration (BRAINS) + +category: Registration + +description: Register a three-dimensional volume to a reference volume (Mattes Mutual Information by default). Described in BRAINSFit: Mutual Information Registrations of Whole-Brain 3D Images, Using the Insight Toolkit, Johnson H.J., Harris G., Williams K., The Insight Journal, 2007. http://hdl.handle.net/1926/1291 + +version: 3.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSFit + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://wwww.psychiatry.uiowa.edu + +acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5) 1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard + +""" + + input_spec = BRAINSFitInputSpec + output_spec = BRAINSFitOutputSpec + _cmd = "BRAINSFit " + _outputs_filenames = { + 'outputVolume': 'outputVolume.nii', + 'bsplineTransform': 'bsplineTransform.mat', + 'outputTransform': 'outputTransform.mat', + 'outputFixedVolumeROI': 'outputFixedVolumeROI.nii', + 'strippedOutputTransform': 'strippedOutputTransform.mat', + 'outputMovingVolumeROI': 'outputMovingVolumeROI.nii', + 'linearTransform': 'linearTransform.mat' + } diff --git a/nipype/interfaces/slicer/registration/brainsresample.py b/nipype/interfaces/slicer/registration/brainsresample.py new file mode 100644 index 0000000000..a3b79681fd --- /dev/null +++ b/nipype/interfaces/slicer/registration/brainsresample.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class BRAINSResampleInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="Image To Warp", exists=True, argstr="--inputVolume %s") + referenceVolume = File( + desc= + "Reference image used only to define the output space. If not specified, the warping is done in the same space as the image to warp.", + exists=True, + argstr="--referenceVolume %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Resulting deformed image", + argstr="--outputVolume %s") + pixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uint", + "uchar", + "binary", + desc= + "Specifies the pixel type for the input/output images. The \'binary\' pixel type uses a modified algorithm whereby the image is read in as unsigned char, a signed distance map is created, signed distance map is resampled, and then a thresholded image of type unsigned char is written to disk.", + argstr="--pixelType %s") + deformationVolume = File( + desc="Displacement Field to be used to warp the image", + exists=True, + argstr="--deformationVolume %s") + warpTransform = File( + desc= + "Filename for the BRAINSFit transform used in place of the deformation field", + exists=True, + argstr="--warpTransform %s") + interpolationMode = traits.Enum( + "NearestNeighbor", + "Linear", + "ResampleInPlace", + "BSpline", + "WindowedSinc", + "Hamming", + "Cosine", + "Welch", + "Lanczos", + "Blackman", + desc= + "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s") + inverseTransform = traits.Bool( + desc= + "True/False is to compute inverse of given transformation. Default is false", + argstr="--inverseTransform ") + defaultValue = traits.Float( + desc="Default voxel value", argstr="--defaultValue %f") + gridSpacing = InputMultiPath( + traits.Int, + desc= + "Add warped grid to output image to help show the deformation that occured with specified spacing. A spacing of 0 in a dimension indicates that grid lines should be rendered to fall exactly (i.e. do not allow displacements off that plane). This is useful for makeing a 2D image of grid lines from the 3D space ", + sep=",", + argstr="--gridSpacing %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSResampleOutputSpec(TraitedSpec): + outputVolume = File(desc="Resulting deformed image", exists=True) + + +class BRAINSResample(SEMLikeCommandLine): + """title: Resample Image (BRAINS) + +category: Registration + +description: + This program resamples an image image using a deformation field or a transform (BSpline, Affine, Rigid, etc.). + + +version: 3.0.0 + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Modules:BRAINSResample + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Vincent Magnotta, Greg Harris, and Hans Johnson. + +acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + +""" + + input_spec = BRAINSResampleInputSpec + output_spec = BRAINSResampleOutputSpec + _cmd = "BRAINSResample " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} diff --git a/nipype/interfaces/slicer/registration/specialized.py b/nipype/interfaces/slicer/registration/specialized.py new file mode 100644 index 0000000000..9c6c3f5f20 --- /dev/null +++ b/nipype/interfaces/slicer/registration/specialized.py @@ -0,0 +1,615 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class ACPCTransformInputSpec(CommandLineInputSpec): + acpc = InputMultiPath( + traits.List(traits.Float(), minlen=3, maxlen=3), + desc= + "ACPC line, two fiducial points, one at the anterior commissure and one at the posterior commissure.", + argstr="--acpc %s...") + midline = InputMultiPath( + traits.List(traits.Float(), minlen=3, maxlen=3), + desc= + "The midline is a series of points defining the division between the hemispheres of the brain (the mid sagittal plane).", + argstr="--midline %s...") + outputTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "A transform filled in from the ACPC and Midline registration calculation", + argstr="--outputTransform %s") + debugSwitch = traits.Bool( + desc="Click if wish to see debugging output", argstr="--debugSwitch ") + + +class ACPCTransformOutputSpec(TraitedSpec): + outputTransform = File( + desc= + "A transform filled in from the ACPC and Midline registration calculation", + exists=True) + + +class ACPCTransform(SEMLikeCommandLine): + """title: ACPC Transform + +category: Registration.Specialized + +description:

Calculate a transformation from two lists of fiducial points.

ACPC line is two fiducial points, one at the anterior commissure and one at the posterior commissure. The resulting transform will bring the line connecting them to horizontal to the AP axis.

The midline is a series of points defining the division between the hemispheres of the brain (the mid sagittal plane). The resulting transform will put the output volume with the mid sagittal plane lined up with the AS plane.

Use the Filtering moduleResample Scalar/Vector/DWI Volumeto apply the transformation to a volume.

+ +version: 1.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ACPCTransform + +license: slicer3 + +contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = ACPCTransformInputSpec + output_spec = ACPCTransformOutputSpec + _cmd = "ACPCTransform " + _outputs_filenames = {'outputTransform': 'outputTransform.mat'} + + +class FiducialRegistrationInputSpec(CommandLineInputSpec): + fixedLandmarks = InputMultiPath( + traits.List(traits.Float(), minlen=3, maxlen=3), + desc="Ordered list of landmarks in the fixed image", + argstr="--fixedLandmarks %s...") + movingLandmarks = InputMultiPath( + traits.List(traits.Float(), minlen=3, maxlen=3), + desc="Ordered list of landmarks in the moving image", + argstr="--movingLandmarks %s...") + saveTransform = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="Save the transform that results from registration", + argstr="--saveTransform %s") + transformType = traits.Enum( + "Translation", + "Rigid", + "Similarity", + desc="Type of transform to produce", + argstr="--transformType %s") + rms = traits.Float(desc="Display RMS Error.", argstr="--rms %f") + outputMessage = traits.Str( + desc="Provides more information on the output", + argstr="--outputMessage %s") + + +class FiducialRegistrationOutputSpec(TraitedSpec): + saveTransform = File( + desc="Save the transform that results from registration", exists=True) + + +class FiducialRegistration(SEMLikeCommandLine): + """title: Fiducial Registration + +category: Registration.Specialized + +description: Computes a rigid, similarity or affine transform from a matched list of fiducials + +version: 0.1.0.$Revision$ + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/TransformFromFiducials + +contributor: Casey B Goodlett (Kitware), Dominik Meier (SPL, BWH) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = FiducialRegistrationInputSpec + output_spec = FiducialRegistrationOutputSpec + _cmd = "FiducialRegistration " + _outputs_filenames = {'saveTransform': 'saveTransform.txt'} + + +class VBRAINSDemonWarpInputSpec(CommandLineInputSpec): + movingVolume = InputMultiPath( + File(exists=True), + desc="Required: input moving image", + argstr="--movingVolume %s...") + fixedVolume = InputMultiPath( + File(exists=True), + desc="Required: input fixed (target) image", + argstr="--fixedVolume %s...") + inputPixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uchar", + desc= + "Input volumes will be typecast to this format: float|short|ushort|int|uchar", + argstr="--inputPixelType %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: output resampled moving image (will have the same physical space as the fixedVolume).", + argstr="--outputVolume %s") + outputDisplacementFieldVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Output deformation field vector image (will have the same physical space as the fixedVolume).", + argstr="--outputDisplacementFieldVolume %s") + outputPixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uchar", + desc= + "outputVolume will be typecast to this format: float|short|ushort|int|uchar", + argstr="--outputPixelType %s") + interpolationMode = traits.Enum( + "NearestNeighbor", + "Linear", + "ResampleInPlace", + "BSpline", + "WindowedSinc", + "Hamming", + "Cosine", + "Welch", + "Lanczos", + "Blackman", + desc= + "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s") + registrationFilterType = traits.Enum( + "Demons", + "FastSymmetricForces", + "Diffeomorphic", + "LogDemons", + "SymmetricLogDemons", + desc= + "Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic|LogDemons|SymmetricLogDemons", + argstr="--registrationFilterType %s") + smoothDisplacementFieldSigma = traits.Float( + desc= + "A gaussian smoothing value to be applied to the deformation feild at each iteration.", + argstr="--smoothDisplacementFieldSigma %f") + numberOfPyramidLevels = traits.Int( + desc= + "Number of image pyramid levels to use in the multi-resolution registration.", + argstr="--numberOfPyramidLevels %d") + minimumFixedPyramid = InputMultiPath( + traits.Int, + desc= + "The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + sep=",", + argstr="--minimumFixedPyramid %s") + minimumMovingPyramid = InputMultiPath( + traits.Int, + desc= + "The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + sep=",", + argstr="--minimumMovingPyramid %s") + arrayOfPyramidLevelIterations = InputMultiPath( + traits.Int, + desc="The number of iterations for each pyramid level", + sep=",", + argstr="--arrayOfPyramidLevelIterations %s") + histogramMatch = traits.Bool( + desc= + "Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", + argstr="--histogramMatch ") + numberOfHistogramBins = traits.Int( + desc="The number of histogram levels", + argstr="--numberOfHistogramBins %d") + numberOfMatchPoints = traits.Int( + desc="The number of match points for histrogramMatch", + argstr="--numberOfMatchPoints %d") + medianFilterSize = InputMultiPath( + traits.Int, + desc= + "Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", + sep=",", + argstr="--medianFilterSize %s") + initializeWithDisplacementField = File( + desc="Initial deformation field vector image file name", + exists=True, + argstr="--initializeWithDisplacementField %s") + initializeWithTransform = File( + desc="Initial Transform filename", + exists=True, + argstr="--initializeWithTransform %s") + makeBOBF = traits.Bool( + desc= + "Flag to make Brain-Only Background-Filled versions of the input and target volumes.", + argstr="--makeBOBF ") + fixedBinaryVolume = File( + desc="Mask filename for desired region of interest in the Fixed image.", + exists=True, + argstr="--fixedBinaryVolume %s") + movingBinaryVolume = File( + desc= + "Mask filename for desired region of interest in the Moving image.", + exists=True, + argstr="--movingBinaryVolume %s") + lowerThresholdForBOBF = traits.Int( + desc="Lower threshold for performing BOBF", + argstr="--lowerThresholdForBOBF %d") + upperThresholdForBOBF = traits.Int( + desc="Upper threshold for performing BOBF", + argstr="--upperThresholdForBOBF %d") + backgroundFillValue = traits.Int( + desc="Replacement value to overwrite background when performing BOBF", + argstr="--backgroundFillValue %d") + seedForBOBF = InputMultiPath( + traits.Int, + desc="coordinates in all 3 directions for Seed when performing BOBF", + sep=",", + argstr="--seedForBOBF %s") + neighborhoodForBOBF = InputMultiPath( + traits.Int, + desc= + "neighborhood in all 3 directions to be included when performing BOBF", + sep=",", + argstr="--neighborhoodForBOBF %s") + outputDisplacementFieldPrefix = traits.Str( + desc= + "Displacement field filename prefix for writing separate x, y, and z component images", + argstr="--outputDisplacementFieldPrefix %s") + outputCheckerboardVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + argstr="--outputCheckerboardVolume %s") + checkerboardPatternSubdivisions = InputMultiPath( + traits.Int, + desc="Number of Checkerboard subdivisions in all 3 directions", + sep=",", + argstr="--checkerboardPatternSubdivisions %s") + outputNormalized = traits.Bool( + desc= + "Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", + argstr="--outputNormalized ") + outputDebug = traits.Bool( + desc="Flag to write debugging images after each step.", + argstr="--outputDebug ") + weightFactors = InputMultiPath( + traits.Float, + desc="Weight fatctors for each input images", + sep=",", + argstr="--weightFactors %s") + gradient_type = traits.Enum( + "0", + "1", + "2", + desc= + "Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", + argstr="--gradient_type %s") + upFieldSmoothing = traits.Float( + desc="Smoothing sigma for the update field at each iteration", + argstr="--upFieldSmoothing %f") + max_step_length = traits.Float( + desc="Maximum length of an update vector (0: no restriction)", + argstr="--max_step_length %f") + use_vanilla_dem = traits.Bool( + desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem ") + gui = traits.Bool( + desc="Display intermediate image volumes for debugging", + argstr="--gui ") + promptUser = traits.Bool( + desc= + "Prompt the user to hit enter each time an image is sent to the DebugImageViewer", + argstr="--promptUser ") + numberOfBCHApproximationTerms = traits.Int( + desc="Number of terms in the BCH expansion", + argstr="--numberOfBCHApproximationTerms %d") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class VBRAINSDemonWarpOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: output resampled moving image (will have the same physical space as the fixedVolume).", + exists=True) + outputDisplacementFieldVolume = File( + desc= + "Output deformation field vector image (will have the same physical space as the fixedVolume).", + exists=True) + outputCheckerboardVolume = File( + desc= + "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + exists=True) + + +class VBRAINSDemonWarp(SEMLikeCommandLine): + """title: Vector Demon Registration (BRAINS) + +category: Registration.Specialized + +description: + This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp. + + + +version: 3.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSDemonWarp + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Hans J. Johnson and Greg Harris. + +acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + +""" + + input_spec = VBRAINSDemonWarpInputSpec + output_spec = VBRAINSDemonWarpOutputSpec + _cmd = "VBRAINSDemonWarp " + _outputs_filenames = { + 'outputVolume': 'outputVolume.nii', + 'outputCheckerboardVolume': 'outputCheckerboardVolume.nii', + 'outputDisplacementFieldVolume': 'outputDisplacementFieldVolume.nrrd' + } + + +class BRAINSDemonWarpInputSpec(CommandLineInputSpec): + movingVolume = File( + desc="Required: input moving image", + exists=True, + argstr="--movingVolume %s") + fixedVolume = File( + desc="Required: input fixed (target) image", + exists=True, + argstr="--fixedVolume %s") + inputPixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uchar", + desc= + "Input volumes will be typecast to this format: float|short|ushort|int|uchar", + argstr="--inputPixelType %s") + outputVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Required: output resampled moving image (will have the same physical space as the fixedVolume).", + argstr="--outputVolume %s") + outputDisplacementFieldVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Output deformation field vector image (will have the same physical space as the fixedVolume).", + argstr="--outputDisplacementFieldVolume %s") + outputPixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uchar", + desc= + "outputVolume will be typecast to this format: float|short|ushort|int|uchar", + argstr="--outputPixelType %s") + interpolationMode = traits.Enum( + "NearestNeighbor", + "Linear", + "ResampleInPlace", + "BSpline", + "WindowedSinc", + "Hamming", + "Cosine", + "Welch", + "Lanczos", + "Blackman", + desc= + "Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", + argstr="--interpolationMode %s") + registrationFilterType = traits.Enum( + "Demons", + "FastSymmetricForces", + "Diffeomorphic", + desc= + "Registration Filter Type: Demons|FastSymmetricForces|Diffeomorphic", + argstr="--registrationFilterType %s") + smoothDisplacementFieldSigma = traits.Float( + desc= + "A gaussian smoothing value to be applied to the deformation feild at each iteration.", + argstr="--smoothDisplacementFieldSigma %f") + numberOfPyramidLevels = traits.Int( + desc= + "Number of image pyramid levels to use in the multi-resolution registration.", + argstr="--numberOfPyramidLevels %d") + minimumFixedPyramid = InputMultiPath( + traits.Int, + desc= + "The shrink factor for the first level of the fixed image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + sep=",", + argstr="--minimumFixedPyramid %s") + minimumMovingPyramid = InputMultiPath( + traits.Int, + desc= + "The shrink factor for the first level of the moving image pyramid. (i.e. start at 1/16 scale, then 1/8, then 1/4, then 1/2, and finally full scale)", + sep=",", + argstr="--minimumMovingPyramid %s") + arrayOfPyramidLevelIterations = InputMultiPath( + traits.Int, + desc="The number of iterations for each pyramid level", + sep=",", + argstr="--arrayOfPyramidLevelIterations %s") + histogramMatch = traits.Bool( + desc= + "Histogram Match the input images. This is suitable for images of the same modality that may have different absolute scales, but the same overall intensity profile.", + argstr="--histogramMatch ") + numberOfHistogramBins = traits.Int( + desc="The number of histogram levels", + argstr="--numberOfHistogramBins %d") + numberOfMatchPoints = traits.Int( + desc="The number of match points for histrogramMatch", + argstr="--numberOfMatchPoints %d") + medianFilterSize = InputMultiPath( + traits.Int, + desc= + "Median filter radius in all 3 directions. When images have a lot of salt and pepper noise, this step can improve the registration.", + sep=",", + argstr="--medianFilterSize %s") + initializeWithDisplacementField = File( + desc="Initial deformation field vector image file name", + exists=True, + argstr="--initializeWithDisplacementField %s") + initializeWithTransform = File( + desc="Initial Transform filename", + exists=True, + argstr="--initializeWithTransform %s") + maskProcessingMode = traits.Enum( + "NOMASK", + "ROIAUTO", + "ROI", + "BOBF", + desc= + "What mode to use for using the masks: NOMASK|ROIAUTO|ROI|BOBF. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. Where the Region Of Interest mode uses the masks to define what parts of the image should be used for computing the deformation field. Brain Only Background Fill uses the masks to pre-process the input images by clipping and filling in the background with a predefined value.", + argstr="--maskProcessingMode %s") + fixedBinaryVolume = File( + desc="Mask filename for desired region of interest in the Fixed image.", + exists=True, + argstr="--fixedBinaryVolume %s") + movingBinaryVolume = File( + desc= + "Mask filename for desired region of interest in the Moving image.", + exists=True, + argstr="--movingBinaryVolume %s") + lowerThresholdForBOBF = traits.Int( + desc="Lower threshold for performing BOBF", + argstr="--lowerThresholdForBOBF %d") + upperThresholdForBOBF = traits.Int( + desc="Upper threshold for performing BOBF", + argstr="--upperThresholdForBOBF %d") + backgroundFillValue = traits.Int( + desc="Replacement value to overwrite background when performing BOBF", + argstr="--backgroundFillValue %d") + seedForBOBF = InputMultiPath( + traits.Int, + desc="coordinates in all 3 directions for Seed when performing BOBF", + sep=",", + argstr="--seedForBOBF %s") + neighborhoodForBOBF = InputMultiPath( + traits.Int, + desc= + "neighborhood in all 3 directions to be included when performing BOBF", + sep=",", + argstr="--neighborhoodForBOBF %s") + outputDisplacementFieldPrefix = traits.Str( + desc= + "Displacement field filename prefix for writing separate x, y, and z component images", + argstr="--outputDisplacementFieldPrefix %s") + outputCheckerboardVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + argstr="--outputCheckerboardVolume %s") + checkerboardPatternSubdivisions = InputMultiPath( + traits.Int, + desc="Number of Checkerboard subdivisions in all 3 directions", + sep=",", + argstr="--checkerboardPatternSubdivisions %s") + outputNormalized = traits.Bool( + desc= + "Flag to warp and write the normalized images to output. In normalized images the image values are fit-scaled to be between 0 and the maximum storage type value.", + argstr="--outputNormalized ") + outputDebug = traits.Bool( + desc="Flag to write debugging images after each step.", + argstr="--outputDebug ") + gradient_type = traits.Enum( + "0", + "1", + "2", + desc= + "Type of gradient used for computing the demons force (0 is symmetrized, 1 is fixed image, 2 is moving image)", + argstr="--gradient_type %s") + upFieldSmoothing = traits.Float( + desc="Smoothing sigma for the update field at each iteration", + argstr="--upFieldSmoothing %f") + max_step_length = traits.Float( + desc="Maximum length of an update vector (0: no restriction)", + argstr="--max_step_length %f") + use_vanilla_dem = traits.Bool( + desc="Run vanilla demons algorithm", argstr="--use_vanilla_dem ") + gui = traits.Bool( + desc="Display intermediate image volumes for debugging", + argstr="--gui ") + promptUser = traits.Bool( + desc= + "Prompt the user to hit enter each time an image is sent to the DebugImageViewer", + argstr="--promptUser ") + numberOfBCHApproximationTerms = traits.Int( + desc="Number of terms in the BCH expansion", + argstr="--numberOfBCHApproximationTerms %d") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSDemonWarpOutputSpec(TraitedSpec): + outputVolume = File( + desc= + "Required: output resampled moving image (will have the same physical space as the fixedVolume).", + exists=True) + outputDisplacementFieldVolume = File( + desc= + "Output deformation field vector image (will have the same physical space as the fixedVolume).", + exists=True) + outputCheckerboardVolume = File( + desc= + "Genete a checkerboard image volume between the fixedVolume and the deformed movingVolume.", + exists=True) + + +class BRAINSDemonWarp(SEMLikeCommandLine): + """title: Demon Registration (BRAINS) + +category: Registration.Specialized + +description: + This program finds a deformation field to warp a moving image onto a fixed image. The images must be of the same signal kind, and contain an image of the same kind of object. This program uses the Thirion Demons warp software in ITK, the Insight Toolkit. Additional information is available at: http://www.nitrc.org/projects/brainsdemonwarp. + + + +version: 3.0.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Modules:BRAINSDemonWarp + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: This tool was developed by Hans J. Johnson and Greg Harris. + +acknowledgements: The development of this tool was supported by funding from grants NS050568 and NS40068 from the National Institute of Neurological Disorders and Stroke and grants MH31593, MH40856, from the National Institute of Mental Health. + +""" + + input_spec = BRAINSDemonWarpInputSpec + output_spec = BRAINSDemonWarpOutputSpec + _cmd = "BRAINSDemonWarp " + _outputs_filenames = { + 'outputVolume': 'outputVolume.nii', + 'outputCheckerboardVolume': 'outputCheckerboardVolume.nii', + 'outputDisplacementFieldVolume': 'outputDisplacementFieldVolume.nrrd' + } diff --git a/nipype/interfaces/slicer/registration/tests/__init__.py b/nipype/interfaces/slicer/registration/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/slicer/registration/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py new file mode 100644 index 0000000000..454e290102 --- /dev/null +++ b/nipype/interfaces/slicer/registration/tests/test_auto_ACPCTransform.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import ACPCTransform + + +def test_ACPCTransform_inputs(): + input_map = dict( + acpc=dict(argstr='--acpc %s...', ), + args=dict(argstr='%s', ), + debugSwitch=dict(argstr='--debugSwitch ', ), + environ=dict( + nohash=True, + usedefault=True, + ), + midline=dict(argstr='--midline %s...', ), + outputTransform=dict( + argstr='--outputTransform %s', + hash_files=False, + ), + ) + inputs = ACPCTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ACPCTransform_outputs(): + output_map = dict(outputTransform=dict(), ) + outputs = ACPCTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py new file mode 100644 index 0000000000..c631f9b96e --- /dev/null +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSDemonWarp.py @@ -0,0 +1,103 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import BRAINSDemonWarp + + +def test_BRAINSDemonWarp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + arrayOfPyramidLevelIterations=dict( + argstr='--arrayOfPyramidLevelIterations %s', + sep=',', + ), + backgroundFillValue=dict(argstr='--backgroundFillValue %d', ), + checkerboardPatternSubdivisions=dict( + argstr='--checkerboardPatternSubdivisions %s', + sep=',', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), + fixedVolume=dict(argstr='--fixedVolume %s', ), + gradient_type=dict(argstr='--gradient_type %s', ), + gui=dict(argstr='--gui ', ), + histogramMatch=dict(argstr='--histogramMatch ', ), + initializeWithDisplacementField=dict( + argstr='--initializeWithDisplacementField %s', ), + initializeWithTransform=dict(argstr='--initializeWithTransform %s', ), + inputPixelType=dict(argstr='--inputPixelType %s', ), + interpolationMode=dict(argstr='--interpolationMode %s', ), + lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), + maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), + max_step_length=dict(argstr='--max_step_length %f', ), + medianFilterSize=dict( + argstr='--medianFilterSize %s', + sep=',', + ), + minimumFixedPyramid=dict( + argstr='--minimumFixedPyramid %s', + sep=',', + ), + minimumMovingPyramid=dict( + argstr='--minimumMovingPyramid %s', + sep=',', + ), + movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), + movingVolume=dict(argstr='--movingVolume %s', ), + neighborhoodForBOBF=dict( + argstr='--neighborhoodForBOBF %s', + sep=',', + ), + numberOfBCHApproximationTerms=dict( + argstr='--numberOfBCHApproximationTerms %d', ), + numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), + numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), + numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputCheckerboardVolume=dict( + argstr='--outputCheckerboardVolume %s', + hash_files=False, + ), + outputDebug=dict(argstr='--outputDebug ', ), + outputDisplacementFieldPrefix=dict( + argstr='--outputDisplacementFieldPrefix %s', ), + outputDisplacementFieldVolume=dict( + argstr='--outputDisplacementFieldVolume %s', + hash_files=False, + ), + outputNormalized=dict(argstr='--outputNormalized ', ), + outputPixelType=dict(argstr='--outputPixelType %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + promptUser=dict(argstr='--promptUser ', ), + registrationFilterType=dict(argstr='--registrationFilterType %s', ), + seedForBOBF=dict( + argstr='--seedForBOBF %s', + sep=',', + ), + smoothDisplacementFieldSigma=dict( + argstr='--smoothDisplacementFieldSigma %f', ), + upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), + upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d', ), + use_vanilla_dem=dict(argstr='--use_vanilla_dem ', ), + ) + inputs = BRAINSDemonWarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSDemonWarp_outputs(): + output_map = dict( + outputCheckerboardVolume=dict(), + outputDisplacementFieldVolume=dict(), + outputVolume=dict(), + ) + outputs = BRAINSDemonWarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py new file mode 100644 index 0000000000..bb62633d94 --- /dev/null +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSFit.py @@ -0,0 +1,142 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsfit import BRAINSFit + + +def test_BRAINSFit_inputs(): + input_map = dict( + NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00=dict( + argstr='--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_00 ', ), + NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01=dict( + argstr='--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_01 ', ), + NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02=dict( + argstr='--NEVER_USE_THIS_FLAG_IT_IS_OUTDATED_02 ', ), + ROIAutoClosingSize=dict(argstr='--ROIAutoClosingSize %f', ), + ROIAutoDilateSize=dict(argstr='--ROIAutoDilateSize %f', ), + args=dict(argstr='%s', ), + backgroundFillValue=dict(argstr='--backgroundFillValue %f', ), + bsplineTransform=dict( + argstr='--bsplineTransform %s', + hash_files=False, + ), + costFunctionConvergenceFactor=dict( + argstr='--costFunctionConvergenceFactor %f', ), + costMetric=dict(argstr='--costMetric %s', ), + debugLevel=dict(argstr='--debugLevel %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + failureExitCode=dict(argstr='--failureExitCode %d', ), + fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), + fixedVolume=dict(argstr='--fixedVolume %s', ), + fixedVolumeTimeIndex=dict(argstr='--fixedVolumeTimeIndex %d', ), + forceMINumberOfThreads=dict(argstr='--forceMINumberOfThreads %d', ), + gui=dict(argstr='--gui ', ), + histogramMatch=dict(argstr='--histogramMatch ', ), + initialTransform=dict(argstr='--initialTransform %s', ), + initializeTransformMode=dict(argstr='--initializeTransformMode %s', ), + interpolationMode=dict(argstr='--interpolationMode %s', ), + linearTransform=dict( + argstr='--linearTransform %s', + hash_files=False, + ), + maskInferiorCutOffFromCenter=dict( + argstr='--maskInferiorCutOffFromCenter %f', ), + maskProcessingMode=dict(argstr='--maskProcessingMode %s', ), + maxBSplineDisplacement=dict(argstr='--maxBSplineDisplacement %f', ), + maximumStepLength=dict(argstr='--maximumStepLength %f', ), + medianFilterSize=dict( + argstr='--medianFilterSize %s', + sep=',', + ), + minimumStepLength=dict( + argstr='--minimumStepLength %s', + sep=',', + ), + movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), + movingVolume=dict(argstr='--movingVolume %s', ), + movingVolumeTimeIndex=dict(argstr='--movingVolumeTimeIndex %d', ), + numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), + numberOfIterations=dict( + argstr='--numberOfIterations %s', + sep=',', + ), + numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), + numberOfSamples=dict(argstr='--numberOfSamples %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputFixedVolumeROI=dict( + argstr='--outputFixedVolumeROI %s', + hash_files=False, + ), + outputMovingVolumeROI=dict( + argstr='--outputMovingVolumeROI %s', + hash_files=False, + ), + outputTransform=dict( + argstr='--outputTransform %s', + hash_files=False, + ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + outputVolumePixelType=dict(argstr='--outputVolumePixelType %s', ), + permitParameterVariation=dict( + argstr='--permitParameterVariation %s', + sep=',', + ), + projectedGradientTolerance=dict( + argstr='--projectedGradientTolerance %f', ), + promptUser=dict(argstr='--promptUser ', ), + relaxationFactor=dict(argstr='--relaxationFactor %f', ), + removeIntensityOutliers=dict(argstr='--removeIntensityOutliers %f', ), + reproportionScale=dict(argstr='--reproportionScale %f', ), + scaleOutputValues=dict(argstr='--scaleOutputValues ', ), + skewScale=dict(argstr='--skewScale %f', ), + splineGridSize=dict( + argstr='--splineGridSize %s', + sep=',', + ), + strippedOutputTransform=dict( + argstr='--strippedOutputTransform %s', + hash_files=False, + ), + transformType=dict( + argstr='--transformType %s', + sep=',', + ), + translationScale=dict(argstr='--translationScale %f', ), + useAffine=dict(argstr='--useAffine ', ), + useBSpline=dict(argstr='--useBSpline ', ), + useCachingOfBSplineWeightsMode=dict( + argstr='--useCachingOfBSplineWeightsMode %s', ), + useExplicitPDFDerivativesMode=dict( + argstr='--useExplicitPDFDerivativesMode %s', ), + useRigid=dict(argstr='--useRigid ', ), + useScaleSkewVersor3D=dict(argstr='--useScaleSkewVersor3D ', ), + useScaleVersor3D=dict(argstr='--useScaleVersor3D ', ), + writeOutputTransformInFloat=dict( + argstr='--writeOutputTransformInFloat ', ), + writeTransformOnFailure=dict(argstr='--writeTransformOnFailure ', ), + ) + inputs = BRAINSFit.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSFit_outputs(): + output_map = dict( + bsplineTransform=dict(), + linearTransform=dict(), + outputFixedVolumeROI=dict(), + outputMovingVolumeROI=dict(), + outputTransform=dict(), + outputVolume=dict(), + strippedOutputTransform=dict(), + ) + outputs = BRAINSFit.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py new file mode 100644 index 0000000000..98ec5f4ff3 --- /dev/null +++ b/nipype/interfaces/slicer/registration/tests/test_auto_BRAINSResample.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..brainsresample import BRAINSResample + + +def test_BRAINSResample_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + defaultValue=dict(argstr='--defaultValue %f', ), + deformationVolume=dict(argstr='--deformationVolume %s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + gridSpacing=dict( + argstr='--gridSpacing %s', + sep=',', + ), + inputVolume=dict(argstr='--inputVolume %s', ), + interpolationMode=dict(argstr='--interpolationMode %s', ), + inverseTransform=dict(argstr='--inverseTransform ', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + pixelType=dict(argstr='--pixelType %s', ), + referenceVolume=dict(argstr='--referenceVolume %s', ), + warpTransform=dict(argstr='--warpTransform %s', ), + ) + inputs = BRAINSResample.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSResample_outputs(): + output_map = dict(outputVolume=dict(), ) + outputs = BRAINSResample.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py new file mode 100644 index 0000000000..36d42fe8df --- /dev/null +++ b/nipype/interfaces/slicer/registration/tests/test_auto_FiducialRegistration.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import FiducialRegistration + + +def test_FiducialRegistration_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedLandmarks=dict(argstr='--fixedLandmarks %s...', ), + movingLandmarks=dict(argstr='--movingLandmarks %s...', ), + outputMessage=dict(argstr='--outputMessage %s', ), + rms=dict(argstr='--rms %f', ), + saveTransform=dict( + argstr='--saveTransform %s', + hash_files=False, + ), + transformType=dict(argstr='--transformType %s', ), + ) + inputs = FiducialRegistration.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FiducialRegistration_outputs(): + output_map = dict(saveTransform=dict(), ) + outputs = FiducialRegistration.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py new file mode 100644 index 0000000000..b3255da1d3 --- /dev/null +++ b/nipype/interfaces/slicer/registration/tests/test_auto_VBRAINSDemonWarp.py @@ -0,0 +1,107 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import VBRAINSDemonWarp + + +def test_VBRAINSDemonWarp_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + arrayOfPyramidLevelIterations=dict( + argstr='--arrayOfPyramidLevelIterations %s', + sep=',', + ), + backgroundFillValue=dict(argstr='--backgroundFillValue %d', ), + checkerboardPatternSubdivisions=dict( + argstr='--checkerboardPatternSubdivisions %s', + sep=',', + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fixedBinaryVolume=dict(argstr='--fixedBinaryVolume %s', ), + fixedVolume=dict(argstr='--fixedVolume %s...', ), + gradient_type=dict(argstr='--gradient_type %s', ), + gui=dict(argstr='--gui ', ), + histogramMatch=dict(argstr='--histogramMatch ', ), + initializeWithDisplacementField=dict( + argstr='--initializeWithDisplacementField %s', ), + initializeWithTransform=dict(argstr='--initializeWithTransform %s', ), + inputPixelType=dict(argstr='--inputPixelType %s', ), + interpolationMode=dict(argstr='--interpolationMode %s', ), + lowerThresholdForBOBF=dict(argstr='--lowerThresholdForBOBF %d', ), + makeBOBF=dict(argstr='--makeBOBF ', ), + max_step_length=dict(argstr='--max_step_length %f', ), + medianFilterSize=dict( + argstr='--medianFilterSize %s', + sep=',', + ), + minimumFixedPyramid=dict( + argstr='--minimumFixedPyramid %s', + sep=',', + ), + minimumMovingPyramid=dict( + argstr='--minimumMovingPyramid %s', + sep=',', + ), + movingBinaryVolume=dict(argstr='--movingBinaryVolume %s', ), + movingVolume=dict(argstr='--movingVolume %s...', ), + neighborhoodForBOBF=dict( + argstr='--neighborhoodForBOBF %s', + sep=',', + ), + numberOfBCHApproximationTerms=dict( + argstr='--numberOfBCHApproximationTerms %d', ), + numberOfHistogramBins=dict(argstr='--numberOfHistogramBins %d', ), + numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ), + numberOfPyramidLevels=dict(argstr='--numberOfPyramidLevels %d', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + outputCheckerboardVolume=dict( + argstr='--outputCheckerboardVolume %s', + hash_files=False, + ), + outputDebug=dict(argstr='--outputDebug ', ), + outputDisplacementFieldPrefix=dict( + argstr='--outputDisplacementFieldPrefix %s', ), + outputDisplacementFieldVolume=dict( + argstr='--outputDisplacementFieldVolume %s', + hash_files=False, + ), + outputNormalized=dict(argstr='--outputNormalized ', ), + outputPixelType=dict(argstr='--outputPixelType %s', ), + outputVolume=dict( + argstr='--outputVolume %s', + hash_files=False, + ), + promptUser=dict(argstr='--promptUser ', ), + registrationFilterType=dict(argstr='--registrationFilterType %s', ), + seedForBOBF=dict( + argstr='--seedForBOBF %s', + sep=',', + ), + smoothDisplacementFieldSigma=dict( + argstr='--smoothDisplacementFieldSigma %f', ), + upFieldSmoothing=dict(argstr='--upFieldSmoothing %f', ), + upperThresholdForBOBF=dict(argstr='--upperThresholdForBOBF %d', ), + use_vanilla_dem=dict(argstr='--use_vanilla_dem ', ), + weightFactors=dict( + argstr='--weightFactors %s', + sep=',', + ), + ) + inputs = VBRAINSDemonWarp.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_VBRAINSDemonWarp_outputs(): + output_map = dict( + outputCheckerboardVolume=dict(), + outputDisplacementFieldVolume=dict(), + outputVolume=dict(), + ) + outputs = VBRAINSDemonWarp.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/segmentation/__init__.py b/nipype/interfaces/slicer/segmentation/__init__.py new file mode 100644 index 0000000000..d966f07e27 --- /dev/null +++ b/nipype/interfaces/slicer/segmentation/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .specialized import (RobustStatisticsSegmenter, EMSegmentCommandLine, + BRAINSROIAuto) +from .simpleregiongrowingsegmentation import SimpleRegionGrowingSegmentation diff --git a/nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py b/nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py new file mode 100644 index 0000000000..d466ccc1ac --- /dev/null +++ b/nipype/interfaces/slicer/segmentation/simpleregiongrowingsegmentation.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class SimpleRegionGrowingSegmentationInputSpec(CommandLineInputSpec): + smoothingIterations = traits.Int( + desc="Number of smoothing iterations", + argstr="--smoothingIterations %d") + timestep = traits.Float( + desc="Timestep for curvature flow", argstr="--timestep %f") + iterations = traits.Int( + desc="Number of iterations of region growing", + argstr="--iterations %d") + multiplier = traits.Float( + desc="Number of standard deviations to include in intensity model", + argstr="--multiplier %f") + neighborhood = traits.Int( + desc= + "The radius of the neighborhood over which to calculate intensity model", + argstr="--neighborhood %d") + labelvalue = traits.Int( + desc= + "The integer value (0-255) to use for the segmentation results. This will determine the color of the segmentation that will be generated by the Region growing algorithm", + argstr="--labelvalue %d") + seed = InputMultiPath( + traits.List(traits.Float(), minlen=3, maxlen=3), + desc="Seed point(s) for region growing", + argstr="--seed %s...") + inputVolume = File( + position=-2, + desc="Input volume to be filtered", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output filtered", + argstr="%s") + + +class SimpleRegionGrowingSegmentationOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Output filtered", exists=True) + + +class SimpleRegionGrowingSegmentation(SEMLikeCommandLine): + """title: Simple Region Growing Segmentation + +category: Segmentation + +description: A simple region growing segmentation algorithm based on intensity statistics. To create a list of fiducials (Seeds) for this algorithm, click on the tool bar icon of an arrow pointing to a starburst fiducial to enter the 'place a new object mode' and then use the fiducials module. This module uses the Slicer Command Line Interface (CLI) and the ITK filters CurvatureFlowImageFilter and ConfidenceConnectedImageFilter. + +version: 0.1.0.$Revision: 19904 $(alpha) + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/SimpleRegionGrowingSegmentation + +contributor: Jim Miller (GE) + +acknowledgements: This command module was derived from Insight/Examples (copyright) Insight Software Consortium + +""" + + input_spec = SimpleRegionGrowingSegmentationInputSpec + output_spec = SimpleRegionGrowingSegmentationOutputSpec + _cmd = "SimpleRegionGrowingSegmentation " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} diff --git a/nipype/interfaces/slicer/segmentation/specialized.py b/nipype/interfaces/slicer/segmentation/specialized.py new file mode 100644 index 0000000000..fdfeb74e37 --- /dev/null +++ b/nipype/interfaces/slicer/segmentation/specialized.py @@ -0,0 +1,294 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class RobustStatisticsSegmenterInputSpec(CommandLineInputSpec): + expectedVolume = traits.Float( + desc="The approximate volume of the object, in mL.", + argstr="--expectedVolume %f") + intensityHomogeneity = traits.Float( + desc= + "What is the homogeneity of intensity within the object? Given constant intensity at 1.0 score and extreme fluctuating intensity at 0.", + argstr="--intensityHomogeneity %f") + curvatureWeight = traits.Float( + desc= + "Given sphere 1.0 score and extreme rough bounday/surface 0 score, what is the expected smoothness of the object?", + argstr="--curvatureWeight %f") + labelValue = traits.Int( + desc="Label value of the output image", argstr="--labelValue %d") + maxRunningTime = traits.Float( + desc="The program will stop if this time is reached.", + argstr="--maxRunningTime %f") + originalImageFileName = File( + position=-3, + desc="Original image to be segmented", + exists=True, + argstr="%s") + labelImageFileName = File( + position=-2, + desc="Label image for initialization", + exists=True, + argstr="%s") + segmentedImageFileName = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Segmented image", + argstr="%s") + + +class RobustStatisticsSegmenterOutputSpec(TraitedSpec): + segmentedImageFileName = File( + position=-1, desc="Segmented image", exists=True) + + +class RobustStatisticsSegmenter(SEMLikeCommandLine): + """title: Robust Statistics Segmenter + +category: Segmentation.Specialized + +description: Active contour segmentation using robust statistic. + +version: 1.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/RobustStatisticsSegmenter + +contributor: Yi Gao (gatech), Allen Tannenbaum (gatech), Ron Kikinis (SPL, BWH) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health + +""" + + input_spec = RobustStatisticsSegmenterInputSpec + output_spec = RobustStatisticsSegmenterOutputSpec + _cmd = "RobustStatisticsSegmenter " + _outputs_filenames = { + 'segmentedImageFileName': 'segmentedImageFileName.nii' + } + + +class EMSegmentCommandLineInputSpec(CommandLineInputSpec): + mrmlSceneFileName = File( + desc="Active MRML scene that contains EMSegment algorithm parameters.", + exists=True, + argstr="--mrmlSceneFileName %s") + resultVolumeFileName = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "The file name that the segmentation result volume will be written to.", + argstr="--resultVolumeFileName %s") + targetVolumeFileNames = InputMultiPath( + File(exists=True), + desc= + "File names of target volumes (to be segmented). The number of target images must be equal to the number of target images specified in the parameter set, and these images must be spatially aligned.", + argstr="--targetVolumeFileNames %s...") + intermediateResultsDirectory = Directory( + desc= + "Directory where EMSegmenter will write intermediate data (e.g., aligned atlas data).", + exists=True, + argstr="--intermediateResultsDirectory %s") + parametersMRMLNodeName = traits.Str( + desc= + "The name of the EMSegment parameters node within the active MRML scene. Leave blank for default.", + argstr="--parametersMRMLNodeName %s") + disableMultithreading = traits.Int( + desc= + "Disable multithreading for the EMSegmenter algorithm only! Preprocessing might still run in multi-threaded mode. -1: Do not overwrite default value. 0: Disable. 1: Enable.", + argstr="--disableMultithreading %d") + dontUpdateIntermediateData = traits.Int( + desc= + "Disable update of intermediate results. -1: Do not overwrite default value. 0: Disable. 1: Enable.", + argstr="--dontUpdateIntermediateData %d") + verbose = traits.Bool(desc="Enable verbose output.", argstr="--verbose ") + loadTargetCentered = traits.Bool( + desc="Read target files centered.", argstr="--loadTargetCentered ") + loadAtlasNonCentered = traits.Bool( + desc="Read atlas files non-centered.", + argstr="--loadAtlasNonCentered ") + taskPreProcessingSetting = traits.Str( + desc="Specifies the different task parameter. Leave blank for default.", + argstr="--taskPreProcessingSetting %s") + keepTempFiles = traits.Bool( + desc= + "If flag is set then at the end of command the temporary files are not removed", + argstr="--keepTempFiles ") + resultStandardVolumeFileName = File( + desc= + "Used for testing. Compare segmentation results to this image and return EXIT_FAILURE if they do not match.", + exists=True, + argstr="--resultStandardVolumeFileName %s") + dontWriteResults = traits.Bool( + desc= + "Used for testing. Don't actually write the resulting labelmap to disk.", + argstr="--dontWriteResults ") + generateEmptyMRMLSceneAndQuit = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Used for testing. Only write a scene with default mrml parameters.", + argstr="--generateEmptyMRMLSceneAndQuit %s") + resultMRMLSceneFileName = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Write out the MRML scene after command line substitutions have been made.", + argstr="--resultMRMLSceneFileName %s") + disableCompression = traits.Bool( + desc="Don't use compression when writing result image to disk.", + argstr="--disableCompression ") + atlasVolumeFileNames = InputMultiPath( + File(exists=True), + desc= + "Use an alternative atlas to the one that is specified by the mrml file - note the order matters ! ", + argstr="--atlasVolumeFileNames %s...") + registrationPackage = traits.Str( + desc= + "specify the registration package for preprocessing (CMTK or BRAINS or PLASTIMATCH or DEMONS)", + argstr="--registrationPackage %s") + registrationAffineType = traits.Int( + desc= + "specify the accuracy of the affine registration. -2: Do not overwrite default, -1: Test, 0: Disable, 1: Fast, 2: Accurate", + argstr="--registrationAffineType %d") + registrationDeformableType = traits.Int( + desc= + "specify the accuracy of the deformable registration. -2: Do not overwrite default, -1: Test, 0: Disable, 1: Fast, 2: Accurate", + argstr="--registrationDeformableType %d") + + +class EMSegmentCommandLineOutputSpec(TraitedSpec): + resultVolumeFileName = File( + desc= + "The file name that the segmentation result volume will be written to.", + exists=True) + generateEmptyMRMLSceneAndQuit = File( + desc= + "Used for testing. Only write a scene with default mrml parameters.", + exists=True) + resultMRMLSceneFileName = File( + desc= + "Write out the MRML scene after command line substitutions have been made.", + exists=True) + + +class EMSegmentCommandLine(SEMLikeCommandLine): + """title: + EMSegment Command-line + + +category: + Segmentation.Specialized + + +description: + This module is used to simplify the process of segmenting large collections of images by providing a command line interface to the EMSegment algorithm for script and batch processing. + + +documentation-url: http://www.slicer.org/slicerWiki/index.php/Documentation/4.0/EMSegment_Command-line + +contributor: Sebastien Barre, Brad Davis, Kilian Pohl, Polina Golland, Yumin Yuan, Daniel Haehn + +acknowledgements: Many people and organizations have contributed to the funding, design, and development of the EMSegment algorithm and its various implementations. + + +""" + + input_spec = EMSegmentCommandLineInputSpec + output_spec = EMSegmentCommandLineOutputSpec + _cmd = "EMSegmentCommandLine " + _outputs_filenames = { + 'generateEmptyMRMLSceneAndQuit': 'generateEmptyMRMLSceneAndQuit', + 'resultMRMLSceneFileName': 'resultMRMLSceneFileName', + 'resultVolumeFileName': 'resultVolumeFileName.mhd' + } + + +class BRAINSROIAutoInputSpec(CommandLineInputSpec): + inputVolume = File( + desc="The input image for finding the largest region filled mask.", + exists=True, + argstr="--inputVolume %s") + outputROIMaskVolume = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="The ROI automatically found from the input image.", + argstr="--outputROIMaskVolume %s") + outputClippedVolumeROI = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc="The inputVolume clipped to the region of the brain mask.", + argstr="--outputClippedVolumeROI %s") + otsuPercentileThreshold = traits.Float( + desc="Parameter to the Otsu threshold algorithm.", + argstr="--otsuPercentileThreshold %f") + thresholdCorrectionFactor = traits.Float( + desc= + "A factor to scale the Otsu algorithm's result threshold, in case clipping mangles the image.", + argstr="--thresholdCorrectionFactor %f") + closingSize = traits.Float( + desc= + "The Closing Size (in millimeters) for largest connected filled mask. This value is divided by image spacing and rounded to the next largest voxel number.", + argstr="--closingSize %f") + ROIAutoDilateSize = traits.Float( + desc= + "This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", + argstr="--ROIAutoDilateSize %f") + outputVolumePixelType = traits.Enum( + "float", + "short", + "ushort", + "int", + "uint", + "uchar", + desc= + "The output image Pixel Type is the scalar datatype for representation of the Output Volume.", + argstr="--outputVolumePixelType %s") + numberOfThreads = traits.Int( + desc="Explicitly specify the maximum number of threads to use.", + argstr="--numberOfThreads %d") + + +class BRAINSROIAutoOutputSpec(TraitedSpec): + outputROIMaskVolume = File( + desc="The ROI automatically found from the input image.", exists=True) + outputClippedVolumeROI = File( + desc="The inputVolume clipped to the region of the brain mask.", + exists=True) + + +class BRAINSROIAuto(SEMLikeCommandLine): + """title: Foreground masking (BRAINS) + +category: Segmentation.Specialized + +description: This tool uses a combination of otsu thresholding and a closing operations to identify the most prominant foreground region in an image. + + +version: 2.4.1 + +license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt + +contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://wwww.psychiatry.uiowa.edu + +acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); Gregory Harris(1), Vincent Magnotta(1,2,3); Andriy Fedorov(5), fedorov -at- bwh.harvard.edu (Slicer integration); (1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering, 5=Surgical Planning Lab, Harvard) + +""" + + input_spec = BRAINSROIAutoInputSpec + output_spec = BRAINSROIAutoOutputSpec + _cmd = "BRAINSROIAuto " + _outputs_filenames = { + 'outputROIMaskVolume': 'outputROIMaskVolume.nii', + 'outputClippedVolumeROI': 'outputClippedVolumeROI.nii' + } diff --git a/nipype/interfaces/slicer/segmentation/tests/__init__.py b/nipype/interfaces/slicer/segmentation/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/slicer/segmentation/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py new file mode 100644 index 0000000000..89863fb730 --- /dev/null +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_BRAINSROIAuto.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import BRAINSROIAuto + + +def test_BRAINSROIAuto_inputs(): + input_map = dict( + ROIAutoDilateSize=dict(argstr='--ROIAutoDilateSize %f', ), + args=dict(argstr='%s', ), + closingSize=dict(argstr='--closingSize %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict(argstr='--inputVolume %s', ), + numberOfThreads=dict(argstr='--numberOfThreads %d', ), + otsuPercentileThreshold=dict(argstr='--otsuPercentileThreshold %f', ), + outputClippedVolumeROI=dict( + argstr='--outputClippedVolumeROI %s', + hash_files=False, + ), + outputROIMaskVolume=dict( + argstr='--outputROIMaskVolume %s', + hash_files=False, + ), + outputVolumePixelType=dict(argstr='--outputVolumePixelType %s', ), + thresholdCorrectionFactor=dict( + argstr='--thresholdCorrectionFactor %f', ), + ) + inputs = BRAINSROIAuto.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BRAINSROIAuto_outputs(): + output_map = dict( + outputClippedVolumeROI=dict(), + outputROIMaskVolume=dict(), + ) + outputs = BRAINSROIAuto.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py new file mode 100644 index 0000000000..09b0b1300f --- /dev/null +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_EMSegmentCommandLine.py @@ -0,0 +1,64 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import EMSegmentCommandLine + + +def test_EMSegmentCommandLine_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + atlasVolumeFileNames=dict(argstr='--atlasVolumeFileNames %s...', ), + disableCompression=dict(argstr='--disableCompression ', ), + disableMultithreading=dict(argstr='--disableMultithreading %d', ), + dontUpdateIntermediateData=dict( + argstr='--dontUpdateIntermediateData %d', ), + dontWriteResults=dict(argstr='--dontWriteResults ', ), + environ=dict( + nohash=True, + usedefault=True, + ), + generateEmptyMRMLSceneAndQuit=dict( + argstr='--generateEmptyMRMLSceneAndQuit %s', + hash_files=False, + ), + intermediateResultsDirectory=dict( + argstr='--intermediateResultsDirectory %s', ), + keepTempFiles=dict(argstr='--keepTempFiles ', ), + loadAtlasNonCentered=dict(argstr='--loadAtlasNonCentered ', ), + loadTargetCentered=dict(argstr='--loadTargetCentered ', ), + mrmlSceneFileName=dict(argstr='--mrmlSceneFileName %s', ), + parametersMRMLNodeName=dict(argstr='--parametersMRMLNodeName %s', ), + registrationAffineType=dict(argstr='--registrationAffineType %d', ), + registrationDeformableType=dict( + argstr='--registrationDeformableType %d', ), + registrationPackage=dict(argstr='--registrationPackage %s', ), + resultMRMLSceneFileName=dict( + argstr='--resultMRMLSceneFileName %s', + hash_files=False, + ), + resultStandardVolumeFileName=dict( + argstr='--resultStandardVolumeFileName %s', ), + resultVolumeFileName=dict( + argstr='--resultVolumeFileName %s', + hash_files=False, + ), + targetVolumeFileNames=dict(argstr='--targetVolumeFileNames %s...', ), + taskPreProcessingSetting=dict( + argstr='--taskPreProcessingSetting %s', ), + verbose=dict(argstr='--verbose ', ), + ) + inputs = EMSegmentCommandLine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EMSegmentCommandLine_outputs(): + output_map = dict( + generateEmptyMRMLSceneAndQuit=dict(), + resultMRMLSceneFileName=dict(), + resultVolumeFileName=dict(), + ) + outputs = EMSegmentCommandLine.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py new file mode 100644 index 0000000000..ed46177df0 --- /dev/null +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_RobustStatisticsSegmenter.py @@ -0,0 +1,43 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..specialized import RobustStatisticsSegmenter + + +def test_RobustStatisticsSegmenter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + curvatureWeight=dict(argstr='--curvatureWeight %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + expectedVolume=dict(argstr='--expectedVolume %f', ), + intensityHomogeneity=dict(argstr='--intensityHomogeneity %f', ), + labelImageFileName=dict( + argstr='%s', + position=-2, + ), + labelValue=dict(argstr='--labelValue %d', ), + maxRunningTime=dict(argstr='--maxRunningTime %f', ), + originalImageFileName=dict( + argstr='%s', + position=-3, + ), + segmentedImageFileName=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = RobustStatisticsSegmenter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_RobustStatisticsSegmenter_outputs(): + output_map = dict(segmentedImageFileName=dict(position=-1, ), ) + outputs = RobustStatisticsSegmenter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py new file mode 100644 index 0000000000..3c5e2124d0 --- /dev/null +++ b/nipype/interfaces/slicer/segmentation/tests/test_auto_SimpleRegionGrowingSegmentation.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..simpleregiongrowingsegmentation import SimpleRegionGrowingSegmentation + + +def test_SimpleRegionGrowingSegmentation_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + iterations=dict(argstr='--iterations %d', ), + labelvalue=dict(argstr='--labelvalue %d', ), + multiplier=dict(argstr='--multiplier %f', ), + neighborhood=dict(argstr='--neighborhood %d', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + seed=dict(argstr='--seed %s...', ), + smoothingIterations=dict(argstr='--smoothingIterations %d', ), + timestep=dict(argstr='--timestep %f', ), + ) + inputs = SimpleRegionGrowingSegmentation.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SimpleRegionGrowingSegmentation_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = SimpleRegionGrowingSegmentation.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/surface.py b/nipype/interfaces/slicer/surface.py new file mode 100644 index 0000000000..6a1dfe2cc0 --- /dev/null +++ b/nipype/interfaces/slicer/surface.py @@ -0,0 +1,367 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class MergeModelsInputSpec(CommandLineInputSpec): + Model1 = File(position=-3, desc="Model", exists=True, argstr="%s") + Model2 = File(position=-2, desc="Model", exists=True, argstr="%s") + ModelOutput = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Model", + argstr="%s") + + +class MergeModelsOutputSpec(TraitedSpec): + ModelOutput = File(position=-1, desc="Model", exists=True) + + +class MergeModels(SEMLikeCommandLine): + """title: Merge Models + +category: Surface Models + +description: Merge the polydata from two input models and output a new model with the added polydata. Uses the vtkAppendPolyData filter. Works on .vtp and .vtk surface files. + +version: $Revision$ + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/MergeModels + +contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Daniel Haehn (SPL, BWH) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = MergeModelsInputSpec + output_spec = MergeModelsOutputSpec + _cmd = "MergeModels " + _outputs_filenames = {'ModelOutput': 'ModelOutput.vtk'} + + +class ModelToLabelMapInputSpec(CommandLineInputSpec): + distance = traits.Float(desc="Sample distance", argstr="--distance %f") + InputVolume = File( + position=-3, desc="Input volume", exists=True, argstr="%s") + surface = File(position=-2, desc="Model", exists=True, argstr="%s") + OutputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="The label volume", + argstr="%s") + + +class ModelToLabelMapOutputSpec(TraitedSpec): + OutputVolume = File(position=-1, desc="The label volume", exists=True) + + +class ModelToLabelMap(SEMLikeCommandLine): + """title: Model To Label Map + +category: Surface Models + +description: Intersects an input model with an reference volume and produces an output label map. + +version: 0.1.0.$Revision: 8643 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/PolyDataToLabelMap + +contributor: Nicole Aucoin (SPL, BWH), Xiaodong Tao (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = ModelToLabelMapInputSpec + output_spec = ModelToLabelMapOutputSpec + _cmd = "ModelToLabelMap " + _outputs_filenames = {'OutputVolume': 'OutputVolume.nii'} + + +class GrayscaleModelMakerInputSpec(CommandLineInputSpec): + InputVolume = File( + position=-2, + desc="Volume containing the input grayscale data.", + exists=True, + argstr="%s") + OutputGeometry = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output that contains geometry model.", + argstr="%s") + threshold = traits.Float( + desc= + "Grayscale threshold of isosurface. The resulting surface of triangles separates the volume into voxels that lie above (inside) and below (outside) the threshold.", + argstr="--threshold %f") + name = traits.Str(desc="Name to use for this model.", argstr="--name %s") + smooth = traits.Int( + desc="Number of smoothing iterations. If 0, no smoothing will be done.", + argstr="--smooth %d") + decimate = traits.Float( + desc= + "Target reduction during decimation, as a decimal percentage reduction in the number of polygons. If 0, no decimation will be done.", + argstr="--decimate %f") + splitnormals = traits.Bool( + desc= + "Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affect measurements", + argstr="--splitnormals ") + pointnormals = traits.Bool( + desc= + "Calculate the point normals? Calculated point normals make the surface appear smooth. Without point normals, the surface will appear faceted.", + argstr="--pointnormals ") + + +class GrayscaleModelMakerOutputSpec(TraitedSpec): + OutputGeometry = File( + position=-1, desc="Output that contains geometry model.", exists=True) + + +class GrayscaleModelMaker(SEMLikeCommandLine): + """title: Grayscale Model Maker + +category: Surface Models + +description: Create 3D surface models from grayscale data. This module uses Marching Cubes to create an isosurface at a given threshold. The resulting surface consists of triangles that separate a volume into regions below and above the threshold. The resulting surface can be smoothed and decimated. This model works on continuous data while the module Model Maker works on labeled (or discrete) data. + +version: 3.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/GrayscaleModelMaker + +license: slicer3 + +contributor: Nicole Aucoin (SPL, BWH), Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = GrayscaleModelMakerInputSpec + output_spec = GrayscaleModelMakerOutputSpec + _cmd = "GrayscaleModelMaker " + _outputs_filenames = {'OutputGeometry': 'OutputGeometry.vtk'} + + +class ProbeVolumeWithModelInputSpec(CommandLineInputSpec): + InputVolume = File( + position=-3, + desc="Volume to use to 'paint' the model", + exists=True, + argstr="%s") + InputModel = File( + position=-2, desc="Input model", exists=True, argstr="%s") + OutputModel = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Output 'painted' model", + argstr="%s") + + +class ProbeVolumeWithModelOutputSpec(TraitedSpec): + OutputModel = File(position=-1, desc="Output 'painted' model", exists=True) + + +class ProbeVolumeWithModel(SEMLikeCommandLine): + """title: Probe Volume With Model + +category: Surface Models + +description: Paint a model by a volume (using vtkProbeFilter). + +version: 0.1.0.$Revision: 1892 $(alpha) + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ProbeVolumeWithModel + +contributor: Lauren O'Donnell (SPL, BWH) + +acknowledgements: BWH, NCIGT/LMI + +""" + + input_spec = ProbeVolumeWithModelInputSpec + output_spec = ProbeVolumeWithModelOutputSpec + _cmd = "ProbeVolumeWithModel " + _outputs_filenames = {'OutputModel': 'OutputModel.vtk'} + + +class LabelMapSmoothingInputSpec(CommandLineInputSpec): + labelToSmooth = traits.Int( + desc= + "The label to smooth. All others will be ignored. If no label is selected by the user, the maximum label in the image is chosen by default.", + argstr="--labelToSmooth %d") + numberOfIterations = traits.Int( + desc="The number of iterations of the level set AntiAliasing algorithm", + argstr="--numberOfIterations %d") + maxRMSError = traits.Float( + desc="The maximum RMS error.", argstr="--maxRMSError %f") + gaussianSigma = traits.Float( + desc="The standard deviation of the Gaussian kernel", + argstr="--gaussianSigma %f") + inputVolume = File( + position=-2, + desc="Input label map to smooth", + exists=True, + argstr="%s") + outputVolume = traits.Either( + traits.Bool, + File(), + position=-1, + hash_files=False, + desc="Smoothed label map", + argstr="%s") + + +class LabelMapSmoothingOutputSpec(TraitedSpec): + outputVolume = File(position=-1, desc="Smoothed label map", exists=True) + + +class LabelMapSmoothing(SEMLikeCommandLine): + """title: Label Map Smoothing + +category: Surface Models + +description: This filter smoothes a binary label map. With a label map as input, this filter runs an anti-alising algorithm followed by a Gaussian smoothing algorithm. The output is a smoothed label map. + +version: 1.0 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/LabelMapSmoothing + +contributor: Dirk Padfield (GE), Josh Cates (Utah), Ross Whitaker (Utah) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. This filter is based on work developed at the University of Utah, and implemented at GE Research. + +""" + + input_spec = LabelMapSmoothingInputSpec + output_spec = LabelMapSmoothingOutputSpec + _cmd = "LabelMapSmoothing " + _outputs_filenames = {'outputVolume': 'outputVolume.nii'} + + +class ModelMakerInputSpec(CommandLineInputSpec): + InputVolume = File( + position=-1, + desc= + "Input label map. The Input Volume drop down menu is populated with the label map volumes that are present in the scene, select one from which to generate models.", + exists=True, + argstr="%s") + color = File( + desc="Color table to make labels to colors and objects", + exists=True, + argstr="--color %s") + modelSceneFile = traits.Either( + traits.Bool, + InputMultiPath(File(), ), + hash_files=False, + desc= + "Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you.", + argstr="--modelSceneFile %s...") + name = traits.Str( + desc= + "Name to use for this model. Any text entered in the entry box will be the starting string for the created model file names. The label number and the color name will also be part of the file name. If making multiple models, use this as a prefix to the label and color name.", + argstr="--name %s") + generateAll = traits.Bool( + desc= + "Generate models for all labels in the input volume. select this option if you want to create all models that correspond to all values in a labelmap volume (using the Joint Smoothing option below is useful with this option). Ignores Labels, Start Label, End Label settings. Skips label 0.", + argstr="--generateAll ") + labels = InputMultiPath( + traits.Int, + desc= + "A comma separated list of label values from which to make models. f you specify a list of Labels, it will override any start/end label settings. If you click Generate All Models it will override the list of labels and any start/end label settings.", + sep=",", + argstr="--labels %s") + start = traits.Int( + desc= + "If you want to specify a continuous range of labels from which to generate models, enter the lower label here. Voxel value from which to start making models. Used instead of the label list to specify a range (make sure the label list is empty or it will over ride this).", + argstr="--start %d") + end = traits.Int( + desc= + "If you want to specify a continuous range of labels from which to generate models, enter the higher label here. Voxel value up to which to continue making models. Skip any values with zero voxels.", + argstr="--end %d") + skipUnNamed = traits.Bool( + desc= + "Select this to not generate models from labels that do not have names defined in the color look up table associated with the input label map. If true, only models which have an entry in the color table will be generated. If false, generate all models that exist within the label range.", + argstr="--skipUnNamed ") + jointsmooth = traits.Bool( + desc= + "This will ensure that all resulting models fit together smoothly, like jigsaw puzzle pieces. Otherwise the models will be smoothed independently and may overlap.", + argstr="--jointsmooth ") + smooth = traits.Int( + desc= + "Here you can set the number of smoothing iterations for Laplacian smoothing, or the degree of the polynomial approximating the windowed Sinc function. Use 0 if you wish no smoothing. ", + argstr="--smooth %d") + filtertype = traits.Enum( + "Sinc", + "Laplacian", + desc= + "You can control the type of smoothing done on the models by selecting a filter type of either Sinc or Laplacian.", + argstr="--filtertype %s") + decimate = traits.Float( + desc= + "Chose the target reduction in number of polygons as a decimal percentage (between 0 and 1) of the number of polygons. Specifies the percentage of triangles to be removed. For example, 0.1 means 10% reduction and 0.9 means 90% reduction.", + argstr="--decimate %f") + splitnormals = traits.Bool( + desc= + "Splitting normals is useful for visualizing sharp features. However it creates holes in surfaces which affects measurements.", + argstr="--splitnormals ") + pointnormals = traits.Bool( + desc= + "Turn this flag on if you wish to calculate the normal vectors for the points.", + argstr="--pointnormals ") + pad = traits.Bool( + desc= + "Pad the input volume with zero value voxels on all 6 faces in order to ensure the production of closed surfaces. Sets the origin translation and extent translation so that the models still line up with the unpadded input volume.", + argstr="--pad ") + saveIntermediateModels = traits.Bool( + desc= + "You can save a copy of the models after each of the intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation). These intermediate models are not saved in the mrml file, you have to load them manually after turning off deleting temporary files in they python console (View ->Python Interactor) using the following command slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff().", + argstr="--saveIntermediateModels ") + debug = traits.Bool( + desc= + "turn this flag on in order to see debugging output (look in the Error Log window that is accessed via the View menu)", + argstr="--debug ") + + +class ModelMakerOutputSpec(TraitedSpec): + modelSceneFile = OutputMultiPath( + File(exists=True), + desc= + "Generated models, under a model hierarchy node. Models are imported into Slicer under a model hierarchy node, and their colors are set by the color table associated with the input label map volume. The model hierarchy node must be created before running the model maker, by selecting Create New ModelHierarchy from the Models drop down menu. If you're running from the command line, a model hierarchy node in a new mrml scene will be created for you." + ) + + +class ModelMaker(SEMLikeCommandLine): + """title: Model Maker + +category: Surface Models + +description: Create 3D surface models from segmented data.

Models are imported into Slicer under a model hierarchy node in a MRML scene. The model colors are set by the color table associated with the input volume (these colours will only be visible if you load the model scene file).

Create Multiple:

If you specify a list of Labels, it will over ride any start/end label settings.

If you clickGenerate Allit will over ride the list of lables and any start/end label settings.

Model Maker Settings:

You can set the number of smoothing iterations, target reduction in number of polygons (decimal percentage). Use 0 and 1 if you wish no smoothing nor decimation.
You can set the flags to split normals or generate point normals in this pane as well.
You can save a copy of the models after intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation); these models are not saved in the mrml file, turn off deleting temporary files first in the python window:
slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff()

+ +version: 4.1 + +documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ModelMaker + +license: slicer4 + +contributor: Nicole Aucoin (SPL, BWH), Ron Kikinis (SPL, BWH), Bill Lorensen (GE) + +acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. + +""" + + input_spec = ModelMakerInputSpec + output_spec = ModelMakerOutputSpec + _cmd = "ModelMaker " + _outputs_filenames = {'modelSceneFile': 'modelSceneFile.mrml'} diff --git a/nipype/interfaces/slicer/tests/__init__.py b/nipype/interfaces/slicer/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/slicer/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py new file mode 100644 index 0000000000..2997e805f9 --- /dev/null +++ b/nipype/interfaces/slicer/tests/test_auto_DicomToNrrdConverter.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..converters import DicomToNrrdConverter + + +def test_DicomToNrrdConverter_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputDicomDirectory=dict(argstr='--inputDicomDirectory %s', ), + outputDirectory=dict( + argstr='--outputDirectory %s', + hash_files=False, + ), + outputVolume=dict(argstr='--outputVolume %s', ), + smallGradientThreshold=dict(argstr='--smallGradientThreshold %f', ), + useBMatrixGradientDirections=dict( + argstr='--useBMatrixGradientDirections ', ), + useIdentityMeaseurementFrame=dict( + argstr='--useIdentityMeaseurementFrame ', ), + writeProtocolGradientsFile=dict( + argstr='--writeProtocolGradientsFile ', ), + ) + inputs = DicomToNrrdConverter.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DicomToNrrdConverter_outputs(): + output_map = dict(outputDirectory=dict(), ) + outputs = DicomToNrrdConverter.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py new file mode 100644 index 0000000000..279d68e0ab --- /dev/null +++ b/nipype/interfaces/slicer/tests/test_auto_EMSegmentTransformToNewFormat.py @@ -0,0 +1,31 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utilities import EMSegmentTransformToNewFormat + + +def test_EMSegmentTransformToNewFormat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputMRMLFileName=dict(argstr='--inputMRMLFileName %s', ), + outputMRMLFileName=dict( + argstr='--outputMRMLFileName %s', + hash_files=False, + ), + templateFlag=dict(argstr='--templateFlag ', ), + ) + inputs = EMSegmentTransformToNewFormat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EMSegmentTransformToNewFormat_outputs(): + output_map = dict(outputMRMLFileName=dict(), ) + outputs = EMSegmentTransformToNewFormat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py new file mode 100644 index 0000000000..7ad8cac8e9 --- /dev/null +++ b/nipype/interfaces/slicer/tests/test_auto_GrayscaleModelMaker.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..surface import GrayscaleModelMaker + + +def test_GrayscaleModelMaker_inputs(): + input_map = dict( + InputVolume=dict( + argstr='%s', + position=-2, + ), + OutputGeometry=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + args=dict(argstr='%s', ), + decimate=dict(argstr='--decimate %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + name=dict(argstr='--name %s', ), + pointnormals=dict(argstr='--pointnormals ', ), + smooth=dict(argstr='--smooth %d', ), + splitnormals=dict(argstr='--splitnormals ', ), + threshold=dict(argstr='--threshold %f', ), + ) + inputs = GrayscaleModelMaker.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GrayscaleModelMaker_outputs(): + output_map = dict(OutputGeometry=dict(position=-1, ), ) + outputs = GrayscaleModelMaker.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py new file mode 100644 index 0000000000..bb3780495b --- /dev/null +++ b/nipype/interfaces/slicer/tests/test_auto_LabelMapSmoothing.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..surface import LabelMapSmoothing + + +def test_LabelMapSmoothing_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + gaussianSigma=dict(argstr='--gaussianSigma %f', ), + inputVolume=dict( + argstr='%s', + position=-2, + ), + labelToSmooth=dict(argstr='--labelToSmooth %d', ), + maxRMSError=dict(argstr='--maxRMSError %f', ), + numberOfIterations=dict(argstr='--numberOfIterations %d', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = LabelMapSmoothing.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_LabelMapSmoothing_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = LabelMapSmoothing.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/tests/test_auto_MergeModels.py b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py new file mode 100644 index 0000000000..6453957a79 --- /dev/null +++ b/nipype/interfaces/slicer/tests/test_auto_MergeModels.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..surface import MergeModels + + +def test_MergeModels_inputs(): + input_map = dict( + Model1=dict( + argstr='%s', + position=-3, + ), + Model2=dict( + argstr='%s', + position=-2, + ), + ModelOutput=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) + inputs = MergeModels.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MergeModels_outputs(): + output_map = dict(ModelOutput=dict(position=-1, ), ) + outputs = MergeModels.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py new file mode 100644 index 0000000000..ed182137cf --- /dev/null +++ b/nipype/interfaces/slicer/tests/test_auto_ModelMaker.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..surface import ModelMaker + + +def test_ModelMaker_inputs(): + input_map = dict( + InputVolume=dict( + argstr='%s', + position=-1, + ), + args=dict(argstr='%s', ), + color=dict(argstr='--color %s', ), + debug=dict(argstr='--debug ', ), + decimate=dict(argstr='--decimate %f', ), + end=dict(argstr='--end %d', ), + environ=dict( + nohash=True, + usedefault=True, + ), + filtertype=dict(argstr='--filtertype %s', ), + generateAll=dict(argstr='--generateAll ', ), + jointsmooth=dict(argstr='--jointsmooth ', ), + labels=dict( + argstr='--labels %s', + sep=',', + ), + modelSceneFile=dict( + argstr='--modelSceneFile %s...', + hash_files=False, + ), + name=dict(argstr='--name %s', ), + pad=dict(argstr='--pad ', ), + pointnormals=dict(argstr='--pointnormals ', ), + saveIntermediateModels=dict(argstr='--saveIntermediateModels ', ), + skipUnNamed=dict(argstr='--skipUnNamed ', ), + smooth=dict(argstr='--smooth %d', ), + splitnormals=dict(argstr='--splitnormals ', ), + start=dict(argstr='--start %d', ), + ) + inputs = ModelMaker.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ModelMaker_outputs(): + output_map = dict(modelSceneFile=dict(), ) + outputs = ModelMaker.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py new file mode 100644 index 0000000000..efd11f1040 --- /dev/null +++ b/nipype/interfaces/slicer/tests/test_auto_ModelToLabelMap.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..surface import ModelToLabelMap + + +def test_ModelToLabelMap_inputs(): + input_map = dict( + InputVolume=dict( + argstr='%s', + position=-3, + ), + OutputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + args=dict(argstr='%s', ), + distance=dict(argstr='--distance %f', ), + environ=dict( + nohash=True, + usedefault=True, + ), + surface=dict( + argstr='%s', + position=-2, + ), + ) + inputs = ModelToLabelMap.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ModelToLabelMap_outputs(): + output_map = dict(OutputVolume=dict(position=-1, ), ) + outputs = ModelToLabelMap.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py new file mode 100644 index 0000000000..f3d1908cd0 --- /dev/null +++ b/nipype/interfaces/slicer/tests/test_auto_OrientScalarVolume.py @@ -0,0 +1,35 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..converters import OrientScalarVolume + + +def test_OrientScalarVolume_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + inputVolume1=dict( + argstr='%s', + position=-2, + ), + orientation=dict(argstr='--orientation %s', ), + outputVolume=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + ) + inputs = OrientScalarVolume.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_OrientScalarVolume_outputs(): + output_map = dict(outputVolume=dict(position=-1, ), ) + outputs = OrientScalarVolume.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py new file mode 100644 index 0000000000..32a2fc2139 --- /dev/null +++ b/nipype/interfaces/slicer/tests/test_auto_ProbeVolumeWithModel.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..surface import ProbeVolumeWithModel + + +def test_ProbeVolumeWithModel_inputs(): + input_map = dict( + InputModel=dict( + argstr='%s', + position=-2, + ), + InputVolume=dict( + argstr='%s', + position=-3, + ), + OutputModel=dict( + argstr='%s', + hash_files=False, + position=-1, + ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) + inputs = ProbeVolumeWithModel.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ProbeVolumeWithModel_outputs(): + output_map = dict(OutputModel=dict(position=-1, ), ) + outputs = ProbeVolumeWithModel.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py new file mode 100644 index 0000000000..b0e1e2c3b0 --- /dev/null +++ b/nipype/interfaces/slicer/tests/test_auto_SlicerCommandLine.py @@ -0,0 +1,18 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import SlicerCommandLine + + +def test_SlicerCommandLine_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) + inputs = SlicerCommandLine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/slicer/utilities.py b/nipype/interfaces/slicer/utilities.py new file mode 100644 index 0000000000..5faf640570 --- /dev/null +++ b/nipype/interfaces/slicer/utilities.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# -*- coding: utf8 -*- +"""Autogenerated file - DO NOT EDIT +If you spot a bug, please report it on the mailing list and/or change the generator.""" + +from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath +import os + + +class EMSegmentTransformToNewFormatInputSpec(CommandLineInputSpec): + inputMRMLFileName = File( + desc= + "Active MRML scene that contains EMSegment algorithm parameters in the format before 3.6.3 - please include absolute file name in path.", + exists=True, + argstr="--inputMRMLFileName %s") + outputMRMLFileName = traits.Either( + traits.Bool, + File(), + hash_files=False, + desc= + "Write out the MRML scene after transformation to format 3.6.3 has been made. - has to be in the same directory as the input MRML file due to Slicer Core bug - please include absolute file name in path ", + argstr="--outputMRMLFileName %s") + templateFlag = traits.Bool( + desc= + "Set to true if the transformed mrml file should be used as template file ", + argstr="--templateFlag ") + + +class EMSegmentTransformToNewFormatOutputSpec(TraitedSpec): + outputMRMLFileName = File( + desc= + "Write out the MRML scene after transformation to format 3.6.3 has been made. - has to be in the same directory as the input MRML file due to Slicer Core bug - please include absolute file name in path ", + exists=True) + + +class EMSegmentTransformToNewFormat(SEMLikeCommandLine): + """title: + Transform MRML Files to New EMSegmenter Standard + + +category: + Utilities + + +description: + Transform MRML Files to New EMSegmenter Standard + + +""" + + input_spec = EMSegmentTransformToNewFormatInputSpec + output_spec = EMSegmentTransformToNewFormatOutputSpec + _cmd = "EMSegmentTransformToNewFormat " + _outputs_filenames = {'outputMRMLFileName': 'outputMRMLFileName.mrml'} diff --git a/nipype/interfaces/spm/__init__.py b/nipype/interfaces/spm/__init__.py new file mode 100644 index 0000000000..c4120db124 --- /dev/null +++ b/nipype/interfaces/spm/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Top-level namespace for spm.""" + +from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname, + scans_for_fnames) +from .preprocess import (FieldMap, SliceTiming, Realign, Coregister, Normalize, + Normalize12, Segment, Smooth, NewSegment, DARTEL, + DARTELNorm2MNI, CreateWarped, VBMSegment) +from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold, + OneSampleTTestDesign, TwoSampleTTestDesign, + PairedTTestDesign, MultipleRegressionDesign) +from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice, + ApplyInverseDeformation, ResliceToReference, DicomImport) diff --git a/nipype/interfaces/spm/base.py b/nipype/interfaces/spm/base.py new file mode 100644 index 0000000000..214a6e7a2f --- /dev/null +++ b/nipype/interfaces/spm/base.py @@ -0,0 +1,626 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The spm module provides basic functions for interfacing with SPM tools. + +In order to use the standalone MCR version of spm, you need to ensure that +the following commands are executed at the beginning of your script:: + + from nipype.interfaces import spm + matlab_cmd = '/path/to/run_spm8.sh /path/to/Compiler_Runtime/v713/ script' + spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True) + +you can test by calling:: + + spm.SPMCommand().version +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, object, str, bytes + +# Standard library imports +import os +from copy import deepcopy + +# Third-party imports +from nibabel import load +import numpy as np +from scipy.io import savemat + +# Local imports +from ... import logging +from ...utils import spm_docs as sd, NUMPY_MMAP +from ..base import (BaseInterface, traits, isdefined, InputMultiPath, + BaseInterfaceInputSpec, Directory, Undefined, ImageFile, + PackageInfo) +from ..matlab import MatlabCommand +from ...external.due import due, Doi, BibTeX + +__docformat__ = 'restructuredtext' +logger = logging.getLogger('nipype.interface') + + +def func_is_3d(in_file): + """Checks if input functional files are 3d.""" + + if isinstance(in_file, list): + return func_is_3d(in_file[0]) + else: + img = load(in_file, mmap=NUMPY_MMAP) + shape = img.shape + if len(shape) == 3 or (len(shape) == 4 and shape[3] == 1): + return True + else: + return False + + +def get_first_3dfile(in_files): + if not func_is_3d(in_files): + return None + if isinstance(in_files[0], list): + return in_files[0] + return in_files + + +def scans_for_fname(fname): + """Reads a nifti file and converts it to a numpy array storing + individual nifti volumes. + + Opens images so will fail if they are not found. + + """ + if isinstance(fname, list): + scans = np.zeros((len(fname), ), dtype=object) + for sno, f in enumerate(fname): + scans[sno] = '%s,1' % f + return scans + img = load(fname, mmap=NUMPY_MMAP) + if len(img.shape) == 3: + return np.array(('%s,1' % fname, ), dtype=object) + else: + n_scans = img.shape[3] + scans = np.zeros((n_scans, ), dtype=object) + for sno in range(n_scans): + scans[sno] = '%s,%d' % (fname, sno + 1) + return scans + + +def scans_for_fnames(fnames, keep4d=False, separate_sessions=False): + """Converts a list of files to a concatenated numpy array for each + volume. + + keep4d : boolean + keeps the entries of the numpy array as 4d files instead of + extracting the individual volumes. + separate_sessions: boolean + if 4d nifti files are being used, then separate_sessions + ensures a cell array per session is created in the structure. + + """ + flist = None + if not isinstance(fnames[0], list): + if func_is_3d(fnames[0]): + fnames = [fnames] + if separate_sessions or keep4d: + flist = np.zeros((len(fnames), ), dtype=object) + for i, f in enumerate(fnames): + if separate_sessions: + if keep4d: + if isinstance(f, list): + flist[i] = np.array(f, dtype=object) + else: + flist[i] = np.array([f], dtype=object) + else: + flist[i] = scans_for_fname(f) + else: + if keep4d: + flist[i] = f + else: + scans = scans_for_fname(f) + if flist is None: + flist = scans + else: + flist = np.concatenate((flist, scans)) + return flist + + +class Info(PackageInfo): + """Handles SPM version information + + If you use `SPMCommand.set_mlab_paths` to set alternate entries for + matlab_cmd, paths, and use_mcr, then you will need to use the same entries + to any call in the Info class to maintain memoization. Otherwise, it will + default to the parameters in the `getinfo` function below. + """ + _path = None + _name = None + _command = None + _paths = None + _version = None + + @classmethod + def path(klass, matlab_cmd=None, paths=None, use_mcr=None): + klass.getinfo(matlab_cmd, paths, use_mcr) + return klass._path + + @classmethod + def version(klass, matlab_cmd=None, paths=None, use_mcr=None): + klass.getinfo(matlab_cmd, paths, use_mcr) + return klass._version + + @classmethod + def name(klass, matlab_cmd=None, paths=None, use_mcr=None): + klass.getinfo(matlab_cmd, paths, use_mcr) + return klass._name + + @classmethod + def getinfo(klass, matlab_cmd=None, paths=None, use_mcr=None): + """ + Returns the path to the SPM directory in the Matlab path + If path not found, returns None. + + Parameters + ---------- + matlab_cmd: str + Sets the default matlab command. If None, the value of the + environment variable SPMMCRCMD will be used if set and use_mcr + is True or the environment variable FORCE_SPMMCR is set. + If one of FORCE_SPMMCR or SPMMCRCMD is not set, the existence + of the environment variable MATLABCMD is checked and its value + is used as the matlab command if possible. + If none of the above was successful, the fallback value of + 'matlab -nodesktop -nosplash' will be used. + paths : str + Add paths to matlab session + use_mcr : bool + Whether to use the MATLAB Common Runtime. In this case, the + matlab_cmd is expected to be a valid MCR call. + + Returns + ------- + spm_path : string representing path to SPM directory + + returns None of path not found + """ + + use_mcr = use_mcr or 'FORCE_SPMMCR' in os.environ + matlab_cmd = matlab_cmd or ((use_mcr and os.getenv('SPMMCRCMD')) + or os.getenv('MATLABCMD', 'matlab -nodesktop -nosplash')) + + if klass._name and klass._path and klass._version and \ + klass._command == matlab_cmd and klass._paths == paths: + + return { + 'name': klass._name, + 'path': klass._path, + 'release': klass._version + } + logger.debug('matlab command or path has changed. recomputing version.') + mlab = MatlabCommand(matlab_cmd=matlab_cmd, resource_monitor=False) + mlab.inputs.mfile = False + if paths: + mlab.inputs.paths = paths + if use_mcr: + mlab.inputs.nodesktop = Undefined + mlab.inputs.nosplash = Undefined + mlab.inputs.single_comp_thread = Undefined + mlab.inputs.mfile = True + mlab.inputs.uses_mcr = True + mlab.inputs.script = """ +if isempty(which('spm')), +throw(MException('SPMCheck:NotFound','SPM not in matlab path')); +end; +spm_path = spm('dir'); +[name, version] = spm('ver'); +fprintf(1, 'NIPYPE path:%s|name:%s|release:%s', spm_path, name, version); +exit; + """ + try: + out = mlab.run() + except (IOError, RuntimeError) as e: + # if no Matlab at all -- exception could be raised + # No Matlab -- no spm + logger.debug('%s', e) + klass._version = None + klass._path = None + klass._name = None + klass._command = matlab_cmd + klass._paths = paths + return None + + out = sd._strip_header(out.runtime.stdout) + out_dict = {} + for part in out.split('|'): + key, val = part.split(':') + out_dict[key] = val + + klass._version = out_dict['release'] + klass._path = out_dict['path'] + klass._name = out_dict['name'] + klass._command = matlab_cmd + klass._paths = paths + return out_dict + + +def no_spm(): + """ Checks if SPM is NOT installed + used with pytest.mark.skipif decorator to skip tests + that will fail if spm is not installed""" + + if 'NIPYPE_NO_MATLAB' in os.environ or Info.version() is None: + return True + else: + return False + + +class SPMCommandInputSpec(BaseInterfaceInputSpec): + matlab_cmd = traits.Str(desc='matlab command to use') + paths = InputMultiPath(Directory(), desc='Paths to add to matlabpath') + mfile = traits.Bool(True, desc='Run m-code using m-file', usedefault=True) + use_mcr = traits.Bool(desc='Run m-code using SPM MCR') + use_v8struct = traits.Bool( + True, + min_ver='8', + usedefault=True, + desc=('Generate SPM8 and higher ' + 'compatible jobs')) + + +class SPMCommand(BaseInterface): + """Extends `BaseInterface` class to implement SPM specific interfaces. + + WARNING: Pseudo prototype class, meant to be subclassed + """ + input_spec = SPMCommandInputSpec + _additional_metadata = ['field'] + + _jobtype = 'basetype' + _jobname = 'basename' + + _matlab_cmd = None + _paths = None + _use_mcr = None + + references_ = [{ + 'entry': + BibTeX( + "@book{FrackowiakFristonFrithDolanMazziotta1997," + "author={R.S.J. Frackowiak, K.J. Friston, C.D. Frith, R.J. Dolan, and J.C. Mazziotta}," + "title={Human Brain Function}," + "publisher={Academic Press USA}," + "year={1997}," + "}"), + 'description': + 'The fundamental text on Statistical Parametric Mapping (SPM)', + # 'path': "nipype.interfaces.spm", + 'tags': ['implementation'], + }] + + def __init__(self, **inputs): + super(SPMCommand, self).__init__(**inputs) + self.inputs.on_trait_change( + self._matlab_cmd_update, + ['matlab_cmd', 'mfile', 'paths', 'use_mcr']) + self._find_mlab_cmd_defaults() + self._check_mlab_inputs() + self._matlab_cmd_update() + + @classmethod + def set_mlab_paths(cls, matlab_cmd=None, paths=None, use_mcr=None): + cls._matlab_cmd = matlab_cmd + cls._paths = paths + cls._use_mcr = use_mcr + info_dict = Info.getinfo( + matlab_cmd=matlab_cmd, + paths=paths, + use_mcr=use_mcr) + + def _find_mlab_cmd_defaults(self): + # check if the user has set environment variables to enforce + # the standalone (MCR) version of SPM + if self._use_mcr or 'FORCE_SPMMCR' in os.environ: + self._use_mcr = True + if self._matlab_cmd is None: + try: + self._matlab_cmd = os.environ['SPMMCRCMD'] + except KeyError: + pass + + def _matlab_cmd_update(self): + # MatlabCommand has to be created here, + # because matlab_cmd is not a proper input + # and can be set only during init + self.mlab = MatlabCommand( + matlab_cmd=self.inputs.matlab_cmd, + mfile=self.inputs.mfile, + paths=self.inputs.paths, + resource_monitor=False) + self.mlab.inputs.script_file = 'pyscript_%s.m' % \ + self.__class__.__name__.split('.')[-1].lower() + if isdefined(self.inputs.use_mcr) and self.inputs.use_mcr: + self.mlab.inputs.nodesktop = Undefined + self.mlab.inputs.nosplash = Undefined + self.mlab.inputs.single_comp_thread = Undefined + self.mlab.inputs.uses_mcr = True + self.mlab.inputs.mfile = True + + @property + def version(self): + info_dict = Info.getinfo( + matlab_cmd=self.inputs.matlab_cmd, + paths=self.inputs.paths, + use_mcr=self.inputs.use_mcr) + if info_dict: + return '%s.%s' % (info_dict['name'].split('SPM')[-1], + info_dict['release']) + + @property + def jobtype(self): + return self._jobtype + + @property + def jobname(self): + return self._jobname + + def _check_mlab_inputs(self): + if not isdefined(self.inputs.matlab_cmd) and self._matlab_cmd: + self.inputs.matlab_cmd = self._matlab_cmd + if not isdefined(self.inputs.paths) and self._paths: + self.inputs.paths = self._paths + if not isdefined(self.inputs.use_mcr) and self._use_mcr: + self.inputs.use_mcr = self._use_mcr + + def _run_interface(self, runtime): + """Executes the SPM function using MATLAB.""" + self.mlab.inputs.script = self._make_matlab_command( + deepcopy(self._parse_inputs())) + results = self.mlab.run() + runtime.returncode = results.runtime.returncode + if self.mlab.inputs.uses_mcr: + if 'Skipped' in results.runtime.stdout: + self.raise_exception(runtime) + runtime.stdout = results.runtime.stdout + runtime.stderr = results.runtime.stderr + runtime.merged = results.runtime.merged + return runtime + + def _list_outputs(self): + """Determine the expected outputs based on inputs.""" + + raise NotImplementedError + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for SPM.""" + if spec.is_trait_type(traits.Bool): + return int(val) + elif spec.is_trait_type(traits.Tuple): + return list(val) + else: + return val + + def _parse_inputs(self, skip=()): + spmdict = {} + metadata = dict(field=lambda t: t is not None) + for name, spec in list(self.inputs.traits(**metadata).items()): + if skip and name in skip: + continue + value = getattr(self.inputs, name) + if not isdefined(value): + continue + field = spec.field + if '.' in field: + fields = field.split('.') + dictref = spmdict + for f in fields[:-1]: + if f not in list(dictref.keys()): + dictref[f] = {} + dictref = dictref[f] + dictref[fields[-1]] = self._format_arg(name, spec, value) + else: + spmdict[field] = self._format_arg(name, spec, value) + return [spmdict] + + def _reformat_dict_for_savemat(self, contents): + """Encloses a dict representation within hierarchical lists. + + In order to create an appropriate SPM job structure, a Python + dict storing the job needs to be modified so that each dict + embedded in dict needs to be enclosed as a list element. + + Examples + -------- + >>> a = SPMCommand()._reformat_dict_for_savemat(dict(a=1, + ... b=dict(c=2, d=3))) + >>> a == [{'a': 1, 'b': [{'c': 2, 'd': 3}]}] + True + + """ + newdict = {} + try: + for key, value in list(contents.items()): + if isinstance(value, dict): + if value: + newdict[key] = self._reformat_dict_for_savemat(value) + # if value is None, skip + else: + newdict[key] = value + + return [newdict] + except TypeError: + print('Requires dict input') + + def _generate_job(self, prefix='', contents=None): + """Recursive function to generate spm job specification as a string + + Parameters + ---------- + prefix : string + A string that needs to get + contents : dict + A non-tuple Python structure containing spm job + information gets converted to an appropriate sequence of + matlab commands. + + """ + jobstring = '' + if contents is None: + return jobstring + if isinstance(contents, list): + for i, value in enumerate(contents): + if prefix.endswith(")"): + newprefix = "%s,%d)" % (prefix[:-1], i + 1) + else: + newprefix = "%s(%d)" % (prefix, i + 1) + jobstring += self._generate_job(newprefix, value) + return jobstring + if isinstance(contents, dict): + for key, value in list(contents.items()): + newprefix = "%s.%s" % (prefix, key) + jobstring += self._generate_job(newprefix, value) + return jobstring + if isinstance(contents, np.ndarray): + if contents.dtype == np.dtype(object): + if prefix: + jobstring += "%s = {...\n" % (prefix) + else: + jobstring += "{...\n" + for i, val in enumerate(contents): + if isinstance(val, np.ndarray): + jobstring += self._generate_job( + prefix=None, contents=val) + elif isinstance(val, list): + items_format = [] + for el in val: + items_format += [ + '{}' if not isinstance(el, (str, bytes)) else + '\'{}\'' + ] + val_format = ', '.join(items_format).format + jobstring += '[{}];...\n'.format(val_format(*val)) + elif isinstance(val, (str, bytes)): + jobstring += '\'{}\';...\n'.format(val) + else: + jobstring += '%s;...\n' % str(val) + jobstring += '};\n' + else: + for i, val in enumerate(contents): + for field in val.dtype.fields: + if prefix: + newprefix = "%s(%d).%s" % (prefix, i + 1, field) + else: + newprefix = "(%d).%s" % (i + 1, field) + jobstring += self._generate_job(newprefix, val[field]) + return jobstring + if isinstance(contents, (str, bytes)): + jobstring += "%s = '%s';\n" % (prefix, contents) + return jobstring + jobstring += "%s = %s;\n" % (prefix, str(contents)) + return jobstring + + def _make_matlab_command(self, contents, postscript=None): + """Generates a mfile to build job structure + Parameters + ---------- + + contents : list + a list of dicts generated by _parse_inputs + in each subclass + + cwd : string + default os.getcwd() + + Returns + ------- + mscript : string + contents of a script called by matlab + + """ + cwd = os.getcwd() + mscript = """ + %% Generated by nipype.interfaces.spm + if isempty(which('spm')), + throw(MException('SPMCheck:NotFound', 'SPM not in matlab path')); + end + [name, version] = spm('ver'); + fprintf('SPM version: %s Release: %s\\n',name, version); + fprintf('SPM path: %s\\n', which('spm')); + spm('Defaults','fMRI'); + + if strcmp(name, 'SPM8') || strcmp(name(1:5), 'SPM12'), + spm_jobman('initcfg'); + spm_get_defaults('cmdline', 1); + end\n + """ + if self.mlab.inputs.mfile: + if (isdefined(self.inputs.use_v8struct) + and self.inputs.use_v8struct): + mscript += self._generate_job('jobs{1}.spm.%s.%s' % + (self.jobtype, + self.jobname), contents[0]) + else: + if self.jobname in [ + 'st', 'smooth', 'preproc', 'preproc8', 'fmri_spec', + 'fmri_est', 'factorial_design', 'defs' + ]: + # parentheses + mscript += self._generate_job('jobs{1}.%s{1}.%s(1)' % + (self.jobtype, + self.jobname), contents[0]) + else: + # curly brackets + mscript += self._generate_job('jobs{1}.%s{1}.%s{1}' % + (self.jobtype, + self.jobname), contents[0]) + else: + jobdef = { + 'jobs': [{ + self.jobtype: [{ + self.jobname: + self.reformat_dict_for_savemat(contents[0]) + }] + }] + } + savemat(os.path.join(cwd, 'pyjobs_%s.mat' % self.jobname), jobdef) + mscript += "load pyjobs_%s;\n\n" % self.jobname + mscript += """ + spm_jobman(\'run\', jobs);\n + """ + if self.inputs.use_mcr: + mscript += """ + if strcmp(name, 'SPM8') || strcmp(name(1:5), 'SPM12'), + close(\'all\', \'force\'); + end; + """ + if postscript is not None: + mscript += postscript + return mscript + + +class ImageFileSPM(ImageFile): + """ + Defines an ImageFile trait specific to SPM interfaces. + """ + + def __init__(self, + value='', + filter=None, + auto_set=False, + entries=0, + exists=False, + types=['nifti1', 'nifti2'], + allow_compressed=False, + **metadata): + """ Trait handles neuroimaging files. + + Parameters + ---------- + types : list + Strings of file format types accepted + compressed : boolean + Indicates whether the file format can compressed + """ + self.types = types + self.allow_compressed = allow_compressed + super(ImageFileSPM, + self).__init__(value, filter, auto_set, entries, exists, types, + allow_compressed, **metadata) diff --git a/nipype/interfaces/spm/model.py b/nipype/interfaces/spm/model.py new file mode 100644 index 0000000000..3e26ab6e2a --- /dev/null +++ b/nipype/interfaces/spm/model.py @@ -0,0 +1,1123 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The spm module provides basic functions for interfacing with matlab +and spm to access spm tools. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, bytes + +# Standard library imports +import os +from glob import glob + +# Third-party imports +import numpy as np +import scipy.io as sio + +# Local imports +from ... import logging +from ...utils.filemanip import (ensure_list, simplify_list, + split_filename) +from ..base import (Bunch, traits, TraitedSpec, File, Directory, + OutputMultiPath, InputMultiPath, isdefined) +from .base import (SPMCommand, SPMCommandInputSpec, scans_for_fnames, + ImageFileSPM) + +__docformat__ = 'restructuredtext' +iflogger = logging.getLogger('nipype.interface') + + +class Level1DesignInputSpec(SPMCommandInputSpec): + spm_mat_dir = Directory( + exists=True, field='dir', desc='directory to store SPM.mat file (opt)') + timing_units = traits.Enum( + 'secs', + 'scans', + field='timing.units', + desc='units for specification of onsets', + mandatory=True) + interscan_interval = traits.Float( + field='timing.RT', desc='Interscan interval in secs', mandatory=True) + microtime_resolution = traits.Int( + field='timing.fmri_t', + desc=('Number of time-bins per scan ' + 'in secs (opt)')) + microtime_onset = traits.Float( + field='timing.fmri_t0', + desc=('The onset/time-bin in seconds for ' + 'alignment (opt)')) + session_info = traits.Any( + field='sess', + desc=('Session specific information generated ' + 'by ``modelgen.SpecifyModel``'), + mandatory=True) + factor_info = traits.List( + traits.Dict(traits.Enum('name', 'levels')), + field='fact', + desc=('Factor specific information ' + 'file (opt)')) + bases = traits.Dict( + traits.Enum('hrf', 'fourier', 'fourier_han', 'gamma', 'fir'), + field='bases', + desc=""" + dict {'name':{'basesparam1':val,...}} + name : string + Name of basis function (hrf, fourier, fourier_han, + gamma, fir) + + hrf : + derivs : 2-element list + Model HRF Derivatives. No derivatives: [0,0], + Time derivatives : [1,0], Time and Dispersion + derivatives: [1,1] + fourier, fourier_han, gamma, fir: + length : int + Post-stimulus window length (in seconds) + order : int + Number of basis functions +""", + mandatory=True) + volterra_expansion_order = traits.Enum( + 1, 2, field='volt', desc=('Model interactions - ' + 'yes:1, no:2')) + global_intensity_normalization = traits.Enum( + 'none', + 'scaling', + field='global', + desc=('Global intensity ' + 'normalization - ' + 'scaling or none')) + mask_image = File( + exists=True, + field='mask', + desc='Image for explicitly masking the analysis') + mask_threshold = traits.Either( + traits.Enum('-Inf'), + traits.Float(), + desc="Thresholding for the mask", + default='-Inf', + usedefault=True) + model_serial_correlations = traits.Enum( + 'AR(1)', + 'FAST', + 'none', + field='cvi', + desc=('Model serial correlations ' + 'AR(1), FAST or none. FAST ' + 'is available in SPM12')) + + +class Level1DesignOutputSpec(TraitedSpec): + spm_mat_file = File(exists=True, desc='SPM mat file') + + +class Level1Design(SPMCommand): + """Generate an SPM design matrix + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=59 + + Examples + -------- + + >>> level1design = Level1Design() + >>> level1design.inputs.timing_units = 'secs' + >>> level1design.inputs.interscan_interval = 2.5 + >>> level1design.inputs.bases = {'hrf':{'derivs': [0,0]}} + >>> level1design.inputs.session_info = 'session_info.npz' + >>> level1design.run() # doctest: +SKIP + + """ + + input_spec = Level1DesignInputSpec + output_spec = Level1DesignOutputSpec + + _jobtype = 'stats' + _jobname = 'fmri_spec' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt in ['spm_mat_dir', 'mask_image']: + return np.array([str(val)], dtype=object) + if opt in ['session_info']: # , 'factor_info']: + if isinstance(val, dict): + return [val] + else: + return val + return super(Level1Design, self)._format_arg(opt, spec, val) + + def _parse_inputs(self): + """validate spm realign options if set to None ignore + """ + einputs = super(Level1Design, + self)._parse_inputs(skip=('mask_threshold')) + for sessinfo in einputs[0]['sess']: + sessinfo['scans'] = scans_for_fnames( + ensure_list(sessinfo['scans']), keep4d=False) + if not isdefined(self.inputs.spm_mat_dir): + einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object) + return einputs + + def _make_matlab_command(self, content): + """validates spm options and generates job structure + if mfile is True uses matlab .m file + else generates a job structure and saves in .mat + """ + if isdefined(self.inputs.mask_image): + # SPM doesn't handle explicit masking properly, especially + # when you want to use the entire mask image + postscript = "load SPM;\n" + postscript += ("SPM.xM.VM = spm_vol('%s');\n" % simplify_list( + self.inputs.mask_image)) + postscript += "SPM.xM.I = 0;\n" + postscript += "SPM.xM.T = [];\n" + postscript += ("SPM.xM.TH = ones(size(SPM.xM.TH))*(%s);\n" % + self.inputs.mask_threshold) + postscript += ("SPM.xM.xs = struct('Masking', " + "'explicit masking only');\n") + postscript += "save SPM SPM;\n" + else: + postscript = None + return super(Level1Design, self)._make_matlab_command( + content, postscript=postscript) + + def _list_outputs(self): + outputs = self._outputs().get() + spm = os.path.join(os.getcwd(), 'SPM.mat') + outputs['spm_mat_file'] = spm + return outputs + + +class EstimateModelInputSpec(SPMCommandInputSpec): + spm_mat_file = File( + exists=True, + field='spmmat', + copyfile=True, + mandatory=True, + desc='Absolute path to SPM.mat') + estimation_method = traits.Dict( + traits.Enum('Classical', 'Bayesian2', 'Bayesian'), + field='method', + mandatory=True, + desc=('Dictionary of either Classical: 1, Bayesian: 1, ' + 'or Bayesian2: 1 (dict)')) + write_residuals = traits.Bool( + field='write_residuals', desc="Write individual residual images") + flags = traits.Dict(desc='Additional arguments') + + +class EstimateModelOutputSpec(TraitedSpec): + mask_image = ImageFileSPM( + exists=True, desc='binary mask to constrain estimation') + beta_images = OutputMultiPath( + ImageFileSPM(exists=True), desc='design parameter estimates') + residual_image = ImageFileSPM( + exists=True, desc='Mean-squared image of the residuals') + residual_images = OutputMultiPath( + ImageFileSPM(exists=True), + desc="individual residual images (requires `write_residuals`") + RPVimage = ImageFileSPM(exists=True, desc='Resels per voxel image') + spm_mat_file = File(exists=True, desc='Updated SPM mat file') + labels = ImageFileSPM(exists=True, desc="label file") + SDerror = OutputMultiPath( + ImageFileSPM(exists=True), + desc="Images of the standard deviation of the error") + ARcoef = OutputMultiPath( + ImageFileSPM(exists=True), desc="Images of the AR coefficient") + Cbetas = OutputMultiPath( + ImageFileSPM(exists=True), desc="Images of the parameter posteriors") + SDbetas = OutputMultiPath( + ImageFileSPM(exists=True), + desc="Images of the standard deviation of parameter posteriors") + + +class EstimateModel(SPMCommand): + """Use spm_spm to estimate the parameters of a model + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=69 + + Examples + -------- + >>> est = EstimateModel() + >>> est.inputs.spm_mat_file = 'SPM.mat' + >>> est.inputs.estimation_method = {'Classical': 1} + >>> est.run() # doctest: +SKIP + """ + input_spec = EstimateModelInputSpec + output_spec = EstimateModelOutputSpec + _jobtype = 'stats' + _jobname = 'fmri_est' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt == 'spm_mat_file': + return np.array([str(val)], dtype=object) + if opt == 'estimation_method': + if isinstance(val, (str, bytes)): + return {'{}'.format(val): 1} + else: + return val + return super(EstimateModel, self)._format_arg(opt, spec, val) + + def _parse_inputs(self): + """validate spm realign options if set to None ignore + """ + einputs = super(EstimateModel, self)._parse_inputs(skip=('flags')) + if isdefined(self.inputs.flags): + einputs[0].update( + {flag: val + for (flag, val) in self.inputs.flags.items()}) + return einputs + + def _list_outputs(self): + outputs = self._outputs().get() + pth = os.path.dirname(self.inputs.spm_mat_file) + outtype = 'nii' if '12' in self.version.split('.')[0] else 'img' + spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False) + + betas = [vbeta.fname[0] for vbeta in spm['SPM'][0, 0].Vbeta[0]] + if ('Bayesian' in self.inputs.estimation_method.keys() + or 'Bayesian2' in self.inputs.estimation_method.keys()): + outputs['labels'] = os.path.join(pth, 'labels.{}'.format(outtype)) + outputs['SDerror'] = glob(os.path.join(pth, 'Sess*_SDerror*')) + outputs['ARcoef'] = glob(os.path.join(pth, 'Sess*_AR_*')) + if betas: + outputs['Cbetas'] = [ + os.path.join(pth, 'C{}'.format(beta)) for beta in betas + ] + outputs['SDbetas'] = [ + os.path.join(pth, 'SD{}'.format(beta)) for beta in betas + ] + + if 'Classical' in self.inputs.estimation_method.keys(): + outputs['residual_image'] = os.path.join( + pth, 'ResMS.{}'.format(outtype)) + outputs['RPVimage'] = os.path.join(pth, 'RPV.{}'.format(outtype)) + if self.inputs.write_residuals: + outputs['residual_images'] = glob(os.path.join(pth, 'Res_*')) + if betas: + outputs['beta_images'] = [ + os.path.join(pth, beta) for beta in betas + ] + + outputs['mask_image'] = os.path.join(pth, 'mask.{}'.format(outtype)) + outputs['spm_mat_file'] = os.path.join(pth, 'SPM.mat') + return outputs + + +class EstimateContrastInputSpec(SPMCommandInputSpec): + spm_mat_file = File( + exists=True, + field='spmmat', + desc='Absolute path to SPM.mat', + copyfile=True, + mandatory=True) + contrasts = traits.List( + traits.Either( + traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), + traits.List(traits.Float)), + traits.Tuple(traits.Str, traits.Enum('T'), traits.List(traits.Str), + traits.List(traits.Float), traits.List(traits.Float)), + traits.Tuple(traits.Str, traits.Enum('F'), + traits.List( + traits.Either( + traits.Tuple(traits.Str, traits.Enum('T'), + traits.List(traits.Str), + traits.List(traits.Float)), + traits.Tuple(traits.Str, traits.Enum('T'), + traits.List(traits.Str), + traits.List(traits.Float), + traits.List(traits.Float)))))), + desc="""List of contrasts with each contrast being a list of the form: + [('name', 'stat', [condition list], [weight list], [session list])] + If session list is None or not provided, all sessions are used. For + F contrasts, the condition list should contain previously defined + T-contrasts.""", + mandatory=True) + beta_images = InputMultiPath( + File(exists=True), + desc=('Parameter estimates of the ' + 'design matrix'), + copyfile=False, + mandatory=True) + residual_image = File( + exists=True, + desc='Mean-squared image of the residuals', + copyfile=False, + mandatory=True) + use_derivs = traits.Bool( + desc='use derivatives for estimation', xor=['group_contrast']) + group_contrast = traits.Bool( + desc='higher level contrast', xor=['use_derivs']) + + +class EstimateContrastOutputSpec(TraitedSpec): + con_images = OutputMultiPath( + File(exists=True), desc='contrast images from a t-contrast') + spmT_images = OutputMultiPath( + File(exists=True), desc='stat images from a t-contrast') + ess_images = OutputMultiPath( + File(exists=True), desc='contrast images from an F-contrast') + spmF_images = OutputMultiPath( + File(exists=True), desc='stat images from an F-contrast') + spm_mat_file = File(exists=True, desc='Updated SPM mat file') + + +class EstimateContrast(SPMCommand): + """Use spm_contrasts to estimate contrasts of interest + + Examples + -------- + >>> import nipype.interfaces.spm as spm + >>> est = spm.EstimateContrast() + >>> est.inputs.spm_mat_file = 'SPM.mat' + >>> cont1 = ('Task>Baseline','T', ['Task-Odd','Task-Even'],[0.5,0.5]) + >>> cont2 = ('Task-Odd>Task-Even','T', ['Task-Odd','Task-Even'],[1,-1]) + >>> contrasts = [cont1,cont2] + >>> est.inputs.contrasts = contrasts + >>> est.run() # doctest: +SKIP + + """ + + input_spec = EstimateContrastInputSpec + output_spec = EstimateContrastOutputSpec + _jobtype = 'stats' + _jobname = 'con' + + def _make_matlab_command(self, _): + """validates spm options and generates job structure + """ + contrasts = [] + cname = [] + for i, cont in enumerate(self.inputs.contrasts): + cname.insert(i, cont[0]) + contrasts.insert(i, + Bunch( + name=cont[0], + stat=cont[1], + conditions=cont[2], + weights=None, + sessions=None)) + if len(cont) >= 4: + contrasts[i].weights = cont[3] + if len(cont) >= 5: + contrasts[i].sessions = cont[4] + script = "% generated by nipype.interfaces.spm\n" + script += "spm_defaults;\n" + script += ("jobs{1}.stats{1}.con.spmmat = {'%s'};\n" % + self.inputs.spm_mat_file) + script += "load(jobs{1}.stats{1}.con.spmmat{:});\n" + script += "SPM.swd = '%s';\n" % os.getcwd() + script += "save(jobs{1}.stats{1}.con.spmmat{:},'SPM');\n" + script += "names = SPM.xX.name;\n" + # get names for columns + if (isdefined(self.inputs.group_contrast) + and self.inputs.group_contrast): + script += "condnames=names;\n" + else: + if self.inputs.use_derivs: + script += "pat = 'Sn\([0-9]*\) (.*)';\n" + else: + script += ("pat = 'Sn\([0-9]*\) (.*)\*bf\(1\)|Sn\([0-9]*\) " + ".*\*bf\([2-9]\)|Sn\([0-9]*\) (.*)';\n") + script += "t = regexp(names,pat,'tokens');\n" + # get sessidx for columns + script += "pat1 = 'Sn\(([0-9].*)\)\s.*';\n" + script += "t1 = regexp(names,pat1,'tokens');\n" + script += ("for i0=1:numel(t),condnames{i0}='';condsess(i0)=0;if " + "~isempty(t{i0}{1}),condnames{i0} = t{i0}{1}{1};" + "condsess(i0)=str2num(t1{i0}{1}{1});end;end;\n") + # BUILD CONTRAST SESSION STRUCTURE + for i, contrast in enumerate(contrasts): + if contrast.stat == 'T': + script += ("consess{%d}.tcon.name = '%s';\n" % + (i + 1, contrast.name)) + script += ( + "consess{%d}.tcon.convec = zeros(1,numel(names));\n" % + (i + 1)) + for c0, cond in enumerate(contrast.conditions): + script += ("idx = strmatch('%s',condnames,'exact');\n" % + (cond)) + script += (("if isempty(idx), throw(MException(" + "'CondName:Chk', sprintf('Condition %%s not " + "found in design','%s'))); end;\n") % cond) + if contrast.sessions: + for sno, sw in enumerate(contrast.sessions): + script += ("sidx = find(condsess(idx)==%d);\n" % + (sno + 1)) + script += (("consess{%d}.tcon.convec(idx(sidx)) " + "= %f;\n") % + (i + 1, sw * contrast.weights[c0])) + else: + script += ("consess{%d}.tcon.convec(idx) = %f;\n" % + (i + 1, contrast.weights[c0])) + for i, contrast in enumerate(contrasts): + if contrast.stat == 'F': + script += ("consess{%d}.fcon.name = '%s';\n" % + (i + 1, contrast.name)) + for cl0, fcont in enumerate(contrast.conditions): + try: + tidx = cname.index(fcont[0]) + except: + Exception("Contrast Estimate: could not get index of" + " T contrast. probably not defined prior " + "to the F contrasts") + script += (("consess{%d}.fcon.convec{%d} = " + "consess{%d}.tcon.convec;\n") % + (i + 1, cl0 + 1, tidx + 1)) + script += "jobs{1}.stats{1}.con.consess = consess;\n" + script += ("if strcmp(spm('ver'),'SPM8'), spm_jobman('initcfg');" + "jobs=spm_jobman('spm5tospm8',{jobs});end\n") + script += "spm_jobman('run',jobs);" + return script + + def _list_outputs(self): + outputs = self._outputs().get() + pth, _ = os.path.split(self.inputs.spm_mat_file) + spm = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False) + con_images = [] + spmT_images = [] + for con in spm['SPM'][0, 0].xCon[0]: + con_images.append(str(os.path.join(pth, con.Vcon[0, 0].fname[0]))) + spmT_images.append(str(os.path.join(pth, con.Vspm[0, 0].fname[0]))) + if con_images: + outputs['con_images'] = con_images + outputs['spmT_images'] = spmT_images + spm12 = '12' in self.version.split('.')[0] + if spm12: + ess = glob(os.path.join(pth, 'ess*.nii')) + else: + ess = glob(os.path.join(pth, 'ess*.img')) + if len(ess) > 0: + outputs['ess_images'] = sorted(ess) + if spm12: + spmf = glob(os.path.join(pth, 'spmF*.nii')) + else: + spmf = glob(os.path.join(pth, 'spmF*.img')) + if len(spmf) > 0: + outputs['spmF_images'] = sorted(spmf) + outputs['spm_mat_file'] = self.inputs.spm_mat_file + return outputs + + +class ThresholdInputSpec(SPMCommandInputSpec): + spm_mat_file = File( + exists=True, + desc='absolute path to SPM.mat', + copyfile=True, + mandatory=True) + stat_image = File( + exists=True, desc='stat image', copyfile=False, mandatory=True) + contrast_index = traits.Int( + mandatory=True, desc='which contrast in the SPM.mat to use') + use_fwe_correction = traits.Bool( + True, + usedefault=True, + desc=('whether to use FWE (Bonferroni) ' + 'correction for initial threshold ' + '(height_threshold_type has to be ' + 'set to p-value)')) + use_topo_fdr = traits.Bool( + True, + usedefault=True, + desc=('whether to use FDR over cluster extent ' + 'probabilities')) + height_threshold = traits.Float( + 0.05, + usedefault=True, + desc=('value for initial thresholding ' + '(defining clusters)')) + height_threshold_type = traits.Enum( + 'p-value', + 'stat', + usedefault=True, + desc=('Is the cluster forming ' + 'threshold a stat value or ' + 'p-value?')) + extent_fdr_p_threshold = traits.Float( + 0.05, + usedefault=True, + desc=('p threshold on FDR corrected ' + 'cluster size probabilities')) + extent_threshold = traits.Int( + 0, usedefault=True, desc='Minimum cluster size in voxels') + force_activation = traits.Bool( + False, + usedefault=True, + desc=('In case no clusters survive the ' + 'topological inference step this ' + 'will pick a culster with the highes ' + 'sum of t-values. Use with care.')) + + +class ThresholdOutputSpec(TraitedSpec): + thresholded_map = File(exists=True) + n_clusters = traits.Int() + pre_topo_fdr_map = File(exists=True) + pre_topo_n_clusters = traits.Int() + activation_forced = traits.Bool() + cluster_forming_thr = traits.Float() + + +class Threshold(SPMCommand): + """Topological FDR thresholding based on cluster extent/size. Smoothness is + estimated from GLM residuals but is assumed to be the same for all of the + voxels. + + Examples + -------- + + >>> thresh = Threshold() + >>> thresh.inputs.spm_mat_file = 'SPM.mat' + >>> thresh.inputs.stat_image = 'spmT_0001.img' + >>> thresh.inputs.contrast_index = 1 + >>> thresh.inputs.extent_fdr_p_threshold = 0.05 + >>> thresh.run() # doctest: +SKIP + """ + input_spec = ThresholdInputSpec + output_spec = ThresholdOutputSpec + + def _gen_thresholded_map_filename(self): + _, fname, ext = split_filename(self.inputs.stat_image) + return os.path.abspath(fname + "_thr" + ext) + + def _gen_pre_topo_map_filename(self): + _, fname, ext = split_filename(self.inputs.stat_image) + return os.path.abspath(fname + "_pre_topo_thr" + ext) + + def _make_matlab_command(self, _): + script = "con_index = %d;\n" % self.inputs.contrast_index + script += "cluster_forming_thr = %f;\n" % self.inputs.height_threshold + if self.inputs.use_fwe_correction: + script += "thresDesc = 'FWE';\n" + else: + script += "thresDesc = 'none';\n" + + if self.inputs.use_topo_fdr: + script += "use_topo_fdr = 1;\n" + else: + script += "use_topo_fdr = 0;\n" + + if self.inputs.force_activation: + script += "force_activation = 1;\n" + else: + script += "force_activation = 0;\n" + script += ("cluster_extent_p_fdr_thr = %f;\n" % + self.inputs.extent_fdr_p_threshold) + script += "stat_filename = '%s';\n" % self.inputs.stat_image + script += ("height_threshold_type = '%s';\n" % + self.inputs.height_threshold_type) + script += "extent_threshold = %d;\n" % self.inputs.extent_threshold + + script += "load %s;\n" % self.inputs.spm_mat_file + script += """ +FWHM = SPM.xVol.FWHM; +df = [SPM.xCon(con_index).eidf SPM.xX.erdf]; +STAT = SPM.xCon(con_index).STAT; +R = SPM.xVol.R; +S = SPM.xVol.S; +n = 1; + +switch thresDesc + case 'FWE' + cluster_forming_thr = spm_uc(cluster_forming_thr,df,STAT,R,n,S); + + case 'none' + if strcmp(height_threshold_type, 'p-value') + cluster_forming_thr = spm_u(cluster_forming_thr^(1/n),df,STAT); + end +end + +stat_map_vol = spm_vol(stat_filename); +[stat_map_data, stat_map_XYZmm] = spm_read_vols(stat_map_vol); + +Z = stat_map_data(:)'; +[x,y,z] = ind2sub(size(stat_map_data),(1:numel(stat_map_data))'); +XYZ = cat(1, x', y', z'); + +XYZth = XYZ(:, Z >= cluster_forming_thr); +Zth = Z(Z >= cluster_forming_thr); + +""" + script += (("spm_write_filtered(Zth,XYZth,stat_map_vol.dim'," + "stat_map_vol.mat,'thresholded map', '%s');\n") % + self._gen_pre_topo_map_filename()) + script += """ +max_size = 0; +max_size_index = 0; +th_nclusters = 0; +nclusters = 0; +if isempty(XYZth) + thresholded_XYZ = []; + thresholded_Z = []; +else + if use_topo_fdr + V2R = 1/prod(FWHM(stat_map_vol.dim > 1)); + [uc,Pc,ue] = spm_uc_clusterFDR(cluster_extent_p_fdr_thr,df,STAT,R,n,Z,XYZ,V2R,cluster_forming_thr); + end + + voxel_labels = spm_clusters(XYZth); + nclusters = max(voxel_labels); + + thresholded_XYZ = []; + thresholded_Z = []; + + for i = 1:nclusters + cluster_size = sum(voxel_labels==i); + if cluster_size > extent_threshold && (~use_topo_fdr || (cluster_size - uc) > -1) + thresholded_XYZ = cat(2, thresholded_XYZ, XYZth(:,voxel_labels == i)); + thresholded_Z = cat(2, thresholded_Z, Zth(voxel_labels == i)); + th_nclusters = th_nclusters + 1; + end + if force_activation + cluster_sum = sum(Zth(voxel_labels == i)); + if cluster_sum > max_size + max_size = cluster_sum; + max_size_index = i; + end + end + end +end + +activation_forced = 0; +if isempty(thresholded_XYZ) + if force_activation && max_size ~= 0 + thresholded_XYZ = XYZth(:,voxel_labels == max_size_index); + thresholded_Z = Zth(voxel_labels == max_size_index); + th_nclusters = 1; + activation_forced = 1; + else + thresholded_Z = [0]; + thresholded_XYZ = [1 1 1]'; + th_nclusters = 0; + end +end + +fprintf('activation_forced = %d\\n',activation_forced); +fprintf('pre_topo_n_clusters = %d\\n',nclusters); +fprintf('n_clusters = %d\\n',th_nclusters); +fprintf('cluster_forming_thr = %f\\n',cluster_forming_thr); + +""" + script += (("spm_write_filtered(thresholded_Z,thresholded_XYZ," + "stat_map_vol.dim',stat_map_vol.mat,'thresholded map'," + " '%s');\n") % self._gen_thresholded_map_filename()) + + return script + + def aggregate_outputs(self, runtime=None): + outputs = self._outputs() + setattr(outputs, 'thresholded_map', + self._gen_thresholded_map_filename()) + setattr(outputs, 'pre_topo_fdr_map', self._gen_pre_topo_map_filename()) + for line in runtime.stdout.split('\n'): + if line.startswith("activation_forced = "): + setattr(outputs, 'activation_forced', + line[len("activation_forced = "):].strip() == "1") + elif line.startswith("n_clusters = "): + setattr(outputs, 'n_clusters', + int(line[len("n_clusters = "):].strip())) + elif line.startswith("pre_topo_n_clusters = "): + setattr(outputs, 'pre_topo_n_clusters', + int(line[len("pre_topo_n_clusters = "):].strip())) + elif line.startswith("cluster_forming_thr = "): + setattr(outputs, 'cluster_forming_thr', + float(line[len("cluster_forming_thr = "):].strip())) + return outputs + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['thresholded_map'] = self._gen_thresholded_map_filename() + outputs['pre_topo_fdr_map'] = self._gen_pre_topo_map_filename() + return outputs + + +class ThresholdStatisticsInputSpec(SPMCommandInputSpec): + spm_mat_file = File( + exists=True, + desc='absolute path to SPM.mat', + copyfile=True, + mandatory=True) + stat_image = File( + exists=True, desc='stat image', copyfile=False, mandatory=True) + contrast_index = traits.Int( + mandatory=True, desc='which contrast in the SPM.mat to use') + height_threshold = traits.Float( + desc=('stat value for initial ' + 'thresholding (defining clusters)'), + mandatory=True) + extent_threshold = traits.Int( + 0, usedefault=True, desc="Minimum cluster size in voxels") + + +class ThresholdStatisticsOutputSpec(TraitedSpec): + voxelwise_P_Bonf = traits.Float() + voxelwise_P_RF = traits.Float() + voxelwise_P_uncor = traits.Float() + voxelwise_P_FDR = traits.Float() + clusterwise_P_RF = traits.Float() + clusterwise_P_FDR = traits.Float() + + +class ThresholdStatistics(SPMCommand): + """Given height and cluster size threshold calculate theoretical + probabilities concerning false positives + + Examples + -------- + + >>> thresh = ThresholdStatistics() + >>> thresh.inputs.spm_mat_file = 'SPM.mat' + >>> thresh.inputs.stat_image = 'spmT_0001.img' + >>> thresh.inputs.contrast_index = 1 + >>> thresh.inputs.height_threshold = 4.56 + >>> thresh.run() # doctest: +SKIP + """ + input_spec = ThresholdStatisticsInputSpec + output_spec = ThresholdStatisticsOutputSpec + + def _make_matlab_command(self, _): + script = "con_index = %d;\n" % self.inputs.contrast_index + script += "cluster_forming_thr = %f;\n" % self.inputs.height_threshold + script += "stat_filename = '%s';\n" % self.inputs.stat_image + script += "extent_threshold = %d;\n" % self.inputs.extent_threshold + script += "load '%s'\n" % self.inputs.spm_mat_file + script += """ +FWHM = SPM.xVol.FWHM; +df = [SPM.xCon(con_index).eidf SPM.xX.erdf]; +STAT = SPM.xCon(con_index).STAT; +R = SPM.xVol.R; +S = SPM.xVol.S; +n = 1; + +voxelwise_P_Bonf = spm_P_Bonf(cluster_forming_thr,df,STAT,S,n) +voxelwise_P_RF = spm_P_RF(1,0,cluster_forming_thr,df,STAT,R,n) + +stat_map_vol = spm_vol(stat_filename); +[stat_map_data, stat_map_XYZmm] = spm_read_vols(stat_map_vol); + +Z = stat_map_data(:); +Zum = Z; + + switch STAT + case 'Z' + VPs = (1-spm_Ncdf(Zum)).^n; + voxelwise_P_uncor = (1-spm_Ncdf(cluster_forming_thr)).^n + case 'T' + VPs = (1 - spm_Tcdf(Zum,df(2))).^n; + voxelwise_P_uncor = (1 - spm_Tcdf(cluster_forming_thr,df(2))).^n + case 'X' + VPs = (1-spm_Xcdf(Zum,df(2))).^n; + voxelwise_P_uncor = (1-spm_Xcdf(cluster_forming_thr,df(2))).^n + case 'F' + VPs = (1 - spm_Fcdf(Zum,df)).^n; + voxelwise_P_uncor = (1 - spm_Fcdf(cluster_forming_thr,df)).^n + end + VPs = sort(VPs); + +voxelwise_P_FDR = spm_P_FDR(cluster_forming_thr,df,STAT,n,VPs) + +V2R = 1/prod(FWHM(stat_map_vol.dim > 1)); + +clusterwise_P_RF = spm_P_RF(1,extent_threshold*V2R,cluster_forming_thr,df,STAT,R,n) + +[x,y,z] = ind2sub(size(stat_map_data),(1:numel(stat_map_data))'); +XYZ = cat(1, x', y', z'); + +[u, CPs, ue] = spm_uc_clusterFDR(0.05,df,STAT,R,n,Z,XYZ,V2R,cluster_forming_thr); + +clusterwise_P_FDR = spm_P_clusterFDR(extent_threshold*V2R,df,STAT,R,n,cluster_forming_thr,CPs') +""" + return script + + def aggregate_outputs(self, runtime=None, needed_outputs=None): + outputs = self._outputs() + cur_output = "" + for line in runtime.stdout.split('\n'): + if cur_output != "" and len(line.split()) != 0: + setattr(outputs, cur_output, float(line)) + cur_output = "" + continue + if (len(line.split()) != 0 and line.split()[0] in [ + "clusterwise_P_FDR", "clusterwise_P_RF", + "voxelwise_P_Bonf", "voxelwise_P_FDR", "voxelwise_P_RF", + "voxelwise_P_uncor" + ]): + cur_output = line.split()[0] + continue + + return outputs + + +class FactorialDesignInputSpec(SPMCommandInputSpec): + spm_mat_dir = Directory( + exists=True, field='dir', desc='directory to store SPM.mat file (opt)') + # Need to make an alias of InputMultiPath; the inputs below are not Path + covariates = InputMultiPath( + traits.Dict( + key_trait=traits.Enum('vector', 'name', 'interaction', + 'centering')), + field='cov', + desc=('covariate dictionary {vector, name, ' + 'interaction, centering}')) + threshold_mask_none = traits.Bool( + field='masking.tm.tm_none', + xor=['threshold_mask_absolute', 'threshold_mask_relative'], + desc='do not use threshold masking') + threshold_mask_absolute = traits.Float( + field='masking.tm.tma.athresh', + xor=['threshold_mask_none', 'threshold_mask_relative'], + desc='use an absolute threshold') + threshold_mask_relative = traits.Float( + field='masking.tm.tmr.rthresh', + xor=['threshold_mask_absolute', 'threshold_mask_none'], + desc=('threshold using a ' + 'proportion of the global ' + 'value')) + use_implicit_threshold = traits.Bool( + field='masking.im', + desc=('use implicit mask NaNs or ' + 'zeros to threshold')) + explicit_mask_file = File( + field='masking.em', # requires cell + desc='use an implicit mask file to threshold') + global_calc_omit = traits.Bool( + field='globalc.g_omit', + xor=['global_calc_mean', 'global_calc_values'], + desc='omit global calculation') + global_calc_mean = traits.Bool( + field='globalc.g_mean', + xor=['global_calc_omit', 'global_calc_values'], + desc='use mean for global calculation') + global_calc_values = traits.List( + traits.Float, + field='globalc.g_user.global_uval', + xor=['global_calc_mean', 'global_calc_omit'], + desc='omit global calculation') + no_grand_mean_scaling = traits.Bool( + field='globalm.gmsca.gmsca_no', + desc=('do not perform grand mean ' + 'scaling')) + global_normalization = traits.Enum( + 1, + 2, + 3, + field='globalm.glonorm', + desc=('global normalization None-1, ' + 'Proportional-2, ANCOVA-3')) + + +class FactorialDesignOutputSpec(TraitedSpec): + spm_mat_file = File(exists=True, desc='SPM mat file') + + +class FactorialDesign(SPMCommand): + """Base class for factorial designs + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=77 + + """ + + input_spec = FactorialDesignInputSpec + output_spec = FactorialDesignOutputSpec + _jobtype = 'stats' + _jobname = 'factorial_design' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt in ['spm_mat_dir', 'explicit_mask_file']: + return np.array([str(val)], dtype=object) + if opt in ['covariates']: + outlist = [] + mapping = { + 'name': 'cname', + 'vector': 'c', + 'interaction': 'iCFI', + 'centering': 'iCC' + } + for dictitem in val: + outdict = {} + for key, keyval in list(dictitem.items()): + outdict[mapping[key]] = keyval + outlist.append(outdict) + return outlist + return super(FactorialDesign, self)._format_arg(opt, spec, val) + + def _parse_inputs(self): + """validate spm realign options if set to None ignore + """ + einputs = super(FactorialDesign, self)._parse_inputs() + if not isdefined(self.inputs.spm_mat_dir): + einputs[0]['dir'] = np.array([str(os.getcwd())], dtype=object) + return einputs + + def _list_outputs(self): + outputs = self._outputs().get() + spm = os.path.join(os.getcwd(), 'SPM.mat') + outputs['spm_mat_file'] = spm + return outputs + + +class OneSampleTTestDesignInputSpec(FactorialDesignInputSpec): + in_files = traits.List( + File(exists=True), + field='des.t1.scans', + mandatory=True, + minlen=2, + desc='input files') + + +class OneSampleTTestDesign(FactorialDesign): + """Create SPM design for one sample t-test + + Examples + -------- + + >>> ttest = OneSampleTTestDesign() + >>> ttest.inputs.in_files = ['cont1.nii', 'cont2.nii'] + >>> ttest.run() # doctest: +SKIP + """ + + input_spec = OneSampleTTestDesignInputSpec + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt in ['in_files']: + return np.array(val, dtype=object) + return super(OneSampleTTestDesign, self)._format_arg(opt, spec, val) + + +class TwoSampleTTestDesignInputSpec(FactorialDesignInputSpec): + # very unlikely that you will have a single image in one group, so setting + # parameters to require at least two files in each group [SG] + group1_files = traits.List( + File(exists=True), + field='des.t2.scans1', + mandatory=True, + minlen=2, + desc='Group 1 input files') + group2_files = traits.List( + File(exists=True), + field='des.t2.scans2', + mandatory=True, + minlen=2, + desc='Group 2 input files') + dependent = traits.Bool( + field='des.t2.dept', + desc=('Are the measurements dependent between ' + 'levels')) + unequal_variance = traits.Bool( + field='des.t2.variance', + desc=('Are the variances equal or unequal ' + 'between groups')) + + +class TwoSampleTTestDesign(FactorialDesign): + """Create SPM design for two sample t-test + + Examples + -------- + + >>> ttest = TwoSampleTTestDesign() + >>> ttest.inputs.group1_files = ['cont1.nii', 'cont2.nii'] + >>> ttest.inputs.group2_files = ['cont1a.nii', 'cont2a.nii'] + >>> ttest.run() # doctest: +SKIP + """ + + input_spec = TwoSampleTTestDesignInputSpec + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt in ['group1_files', 'group2_files']: + return np.array(val, dtype=object) + return super(TwoSampleTTestDesign, self)._format_arg(opt, spec, val) + + +class PairedTTestDesignInputSpec(FactorialDesignInputSpec): + paired_files = traits.List( + traits.List(File(exists=True), minlen=2, maxlen=2), + field='des.pt.pair', + mandatory=True, + minlen=2, + desc='List of paired files') + grand_mean_scaling = traits.Bool( + field='des.pt.gmsca', desc='Perform grand mean scaling') + ancova = traits.Bool( + field='des.pt.ancova', desc='Specify ancova-by-factor regressors') + + +class PairedTTestDesign(FactorialDesign): + """Create SPM design for paired t-test + + Examples + -------- + + >>> pttest = PairedTTestDesign() + >>> pttest.inputs.paired_files = [['cont1.nii','cont1a.nii'],['cont2.nii','cont2a.nii']] + >>> pttest.run() # doctest: +SKIP + """ + + input_spec = PairedTTestDesignInputSpec + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt in ['paired_files']: + return [dict(scans=np.array(files, dtype=object)) for files in val] + return super(PairedTTestDesign, self)._format_arg(opt, spec, val) + + +class MultipleRegressionDesignInputSpec(FactorialDesignInputSpec): + in_files = traits.List( + File(exists=True), + field='des.mreg.scans', + mandatory=True, + minlen=2, + desc='List of files') + include_intercept = traits.Bool( + True, + field='des.mreg.incint', + usedefault=True, + desc='Include intercept in design') + user_covariates = InputMultiPath( + traits.Dict(key_trait=traits.Enum('vector', 'name', 'centering')), + field='des.mreg.mcov', + desc=('covariate dictionary {vector, ' + 'name, centering}')) + + +class MultipleRegressionDesign(FactorialDesign): + """Create SPM design for multiple regression + + Examples + -------- + + >>> mreg = MultipleRegressionDesign() + >>> mreg.inputs.in_files = ['cont1.nii','cont2.nii'] + >>> mreg.run() # doctest: +SKIP + """ + + input_spec = MultipleRegressionDesignInputSpec + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt in ['in_files']: + return np.array(val, dtype=object) + if opt in ['user_covariates']: + outlist = [] + mapping = {'name': 'cname', 'vector': 'c', 'centering': 'iCC'} + for dictitem in val: + outdict = {} + for key, keyval in list(dictitem.items()): + outdict[mapping[key]] = keyval + outlist.append(outdict) + return outlist + return (super(MultipleRegressionDesign, self)._format_arg( + opt, spec, val)) diff --git a/nipype/interfaces/spm/preprocess.py b/nipype/interfaces/spm/preprocess.py new file mode 100644 index 0000000000..cdf7a6e0e7 --- /dev/null +++ b/nipype/interfaces/spm/preprocess.py @@ -0,0 +1,2223 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""SPM wrappers for preprocessing data +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range + +import os +from copy import deepcopy + +# Third-party imports +import numpy as np + +# Local imports +from ...utils.filemanip import (fname_presuffix, ensure_list, + simplify_list, split_filename) +from ..base import (OutputMultiPath, TraitedSpec, isdefined, + traits, InputMultiPath, File, Str) +from .base import (SPMCommand, scans_for_fname, func_is_3d, + scans_for_fnames, SPMCommandInputSpec, ImageFileSPM) + +__docformat__ = 'restructuredtext' + + +class FieldMapInputSpec(SPMCommandInputSpec): + jobtype = traits.Enum('calculatevdm', 'applyvdm', usedefault=True, + desc='one of: calculatevdm, applyvdm') + phase_file = File(mandatory=True, exists=True, copyfile=False, + field='subj.data.presubphasemag.phase', + desc='presubstracted phase file') + magnitude_file = File(mandatory=True, exists=True, copyfile=False, + field='subj.data.presubphasemag.magnitude', + desc='presubstracted magnitude file') + echo_times = traits.Tuple(traits.Float, traits.Float, mandatory=True, + field='subj.defaults.defaultsval.et', + desc='short and long echo times') + maskbrain = traits.Bool(True, usedefault=True, + field='subj.defaults.defaultsval.maskbrain', + desc='masking or no masking of the brain') + blip_direction = traits.Enum(1, -1, mandatory=True, + field='subj.defaults.defaultsval.blipdir', + desc='polarity of the phase-encode blips') + total_readout_time = traits.Float(mandatory=True, + field='subj.defaults.defaultsval.tert', + desc='total EPI readout time') + epifm = traits.Bool(False, usedefault=True, + field='subj.defaults.defaultsval.epifm', + desc='epi-based field map'); + jacobian_modulation = traits.Bool(False, usedefault=True, + field='subj.defaults.defaultsval.ajm', + desc='jacobian modulation'); + # Unwarping defaults parameters + method = traits.Enum('Mark3D', 'Mark2D', 'Huttonish', usedefault=True, + desc='One of: Mark3D, Mark2D, Huttonish', + field='subj.defaults.defaultsval.uflags.method'); + unwarp_fwhm = traits.Range(low=0, value=10, usedefault=True, + field='subj.defaults.defaultsval.uflags.fwhm', + desc='gaussian smoothing kernel width'); + pad = traits.Range(low=0, value=0, usedefault=True, + field='subj.defaults.defaultsval.uflags.pad', + desc='padding kernel width'); + ws = traits.Bool(True, usedefault=True, + field='subj.defaults.defaultsval.uflags.ws', + desc='weighted smoothing'); + # Brain mask defaults parameters + template = File(copyfile=False, exists=True, + field='subj.defaults.defaultsval.mflags.template', + desc='template image for brain masking'); + mask_fwhm = traits.Range(low=0, value=5, usedefault=True, + field='subj.defaults.defaultsval.mflags.fwhm', + desc='gaussian smoothing kernel width'); + nerode = traits.Range(low=0, value=2, usedefault=True, + field='subj.defaults.defaultsval.mflags.nerode', + desc='number of erosions'); + ndilate = traits.Range(low=0, value=4, usedefault=True, + field='subj.defaults.defaultsval.mflags.ndilate', + desc='number of erosions'); + thresh = traits.Float(0.5, usedefault=True, + field='subj.defaults.defaultsval.mflags.thresh', + desc='threshold used to create brain mask from segmented data'); + reg = traits.Float(0.02, usedefault=True, + field='subj.defaults.defaultsval.mflags.reg', + desc='regularization value used in the segmentation'); + # EPI unwarping for quality check + epi_file = File(copyfile=False, exists=True, mandatory=True, + field='subj.session.epi', + desc='EPI to unwarp'); + matchvdm = traits.Bool(True, usedefault=True, + field='subj.matchvdm', + desc='match VDM to EPI'); + sessname = Str('_run-', usedefault=True, + field='subj.sessname', + desc='VDM filename extension'); + writeunwarped = traits.Bool(False, usedefault=True, + field='subj.writeunwarped', + desc='write unwarped EPI'); + anat_file = File(copyfile=False, exists=True, + field='subj.anat', + desc='anatomical image for comparison'); + matchanat = traits.Bool(True, usedefault=True, + field='subj.matchanat', + desc='match anatomical image to EPI'); + + +class FieldMapOutputSpec(TraitedSpec): + vdm = File(exists=True, desc='voxel difference map') + + +class FieldMap(SPMCommand): + """Use the fieldmap toolbox from spm to calculate the voxel displacement map (VDM). + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=173 + + To do + ----- + Deal with real/imag magnitude images and with the two phase files case. + + Examples + -------- + >>> from nipype.interfaces.spm import FieldMap + >>> fm = FieldMap() + >>> fm.inputs.phase_file = 'phase.nii' + >>> fm.inputs.magnitude_file = 'magnitude.nii' + >>> fm.inputs.echo_times = (5.19, 7.65) + >>> fm.inputs.blip_direction = 1 + >>> fm.inputs.total_readout_time = 15.6 + >>> fm.inputs.epi_file = 'epi.nii' + >>> fm.run() # doctest: +SKIP + + """ + + input_spec = FieldMapInputSpec + output_spec = FieldMapOutputSpec + _jobtype = 'tools' + _jobname = 'fieldmap' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt in ['phase_file', 'magnitude_file', 'anat_file', 'epi_file']: + return scans_for_fname(ensure_list(val)) + + return super(FieldMap, self)._format_arg(opt, spec, val) + + def _parse_inputs(self): + """validate spm fieldmap options if set to None ignore + """ + einputs = super(FieldMap, self)._parse_inputs() + return [{self.inputs.jobtype: einputs[0]}] + + def _list_outputs(self): + outputs = self._outputs().get() + jobtype = self.inputs.jobtype + if jobtype == "calculatevdm": + outputs['vdm'] = fname_presuffix(self.inputs.phase_file, prefix='vdm5_sc') + + return outputs + + +class SliceTimingInputSpec(SPMCommandInputSpec): + in_files = InputMultiPath( + traits.Either( + traits.List(ImageFileSPM(exists=True)), ImageFileSPM(exists=True)), + field='scans', + desc='list of filenames to apply slice timing', + mandatory=True, + copyfile=False) + num_slices = traits.Int( + field='nslices', desc='number of slices in a volume', mandatory=True) + time_repetition = traits.Float( + field='tr', + desc=('time between volume acquisitions' + '(start to start time)'), + mandatory=True) + time_acquisition = traits.Float( + field='ta', + desc=('time of volume acquisition. usually' + 'calculated as TR-(TR/num_slices)'), + mandatory=True) + slice_order = traits.List( + traits.Float(), + field='so', + desc=('1-based order or onset (in ms) in which ' + 'slices are acquired'), + mandatory=True) + ref_slice = traits.Int( + field='refslice', + desc='1-based Number of the reference slice or ' + 'reference time point if slice_order is in ' + 'onsets (ms)', + mandatory=True) + out_prefix = traits.String( + 'a', field='prefix', usedefault=True, desc='slicetimed output prefix') + + +class SliceTimingOutputSpec(TraitedSpec): + timecorrected_files = OutputMultiPath( + traits.Either(traits.List(File(exists=True)), File(exists=True)), + desc='slice time corrected files') + + +class SliceTiming(SPMCommand): + """Use spm to perform slice timing correction. + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=19 + + Examples + -------- + + >>> from nipype.interfaces.spm import SliceTiming + >>> st = SliceTiming() + >>> st.inputs.in_files = 'functional.nii' + >>> st.inputs.num_slices = 32 + >>> st.inputs.time_repetition = 6.0 + >>> st.inputs.time_acquisition = 6. - 6./32. + >>> st.inputs.slice_order = list(range(32,0,-1)) + >>> st.inputs.ref_slice = 1 + >>> st.run() # doctest: +SKIP + + """ + + input_spec = SliceTimingInputSpec + output_spec = SliceTimingOutputSpec + + _jobtype = 'temporal' + _jobname = 'st' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt == 'in_files': + return scans_for_fnames( + ensure_list(val), keep4d=False, separate_sessions=True) + return super(SliceTiming, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['timecorrected_files'] = [] + + filelist = ensure_list(self.inputs.in_files) + for f in filelist: + if isinstance(f, list): + run = [ + fname_presuffix(in_f, prefix=self.inputs.out_prefix) + for in_f in f + ] + else: + run = fname_presuffix(f, prefix=self.inputs.out_prefix) + outputs['timecorrected_files'].append(run) + return outputs + + +class RealignInputSpec(SPMCommandInputSpec): + in_files = InputMultiPath( + traits.Either(ImageFileSPM(exists=True), + traits.List(ImageFileSPM(exists=True))), + field='data', + mandatory=True, + copyfile=True, + desc='list of filenames to realign') + jobtype = traits.Enum( + 'estwrite', + 'estimate', + 'write', + desc='one of: estimate, write, estwrite', + usedefault=True) + quality = traits.Range( + low=0.0, + high=1.0, + field='eoptions.quality', + desc='0.1 = fast, 1.0 = precise') + fwhm = traits.Range( + low=0.0, field='eoptions.fwhm', desc='gaussian smoothing kernel width') + separation = traits.Range( + low=0.0, field='eoptions.sep', desc='sampling separation in mm') + register_to_mean = traits.Bool( + field='eoptions.rtm', + desc=('Indicate whether realignment is ' + 'done to the mean image')) + weight_img = File( + exists=True, + field='eoptions.weight', + desc='filename of weighting image') + interp = traits.Range( + low=0, + high=7, + field='eoptions.interp', + desc='degree of b-spline used for interpolation') + wrap = traits.List( + traits.Int(), + minlen=3, + maxlen=3, + field='eoptions.wrap', + desc='Check if interpolation should wrap in [x,y,z]') + write_which = traits.ListInt( + [2, 1], + field='roptions.which', + minlen=2, + maxlen=2, + usedefault=True, + desc='determines which images to reslice') + write_interp = traits.Range( + low=0, + high=7, + field='roptions.interp', + desc=('degree of b-spline used for ' + 'interpolation')) + write_wrap = traits.List( + traits.Int(), + minlen=3, + maxlen=3, + field='roptions.wrap', + desc=('Check if interpolation should wrap in ' + '[x,y,z]')) + write_mask = traits.Bool( + field='roptions.mask', desc='True/False mask output image') + out_prefix = traits.String( + 'r', + field='roptions.prefix', + usedefault=True, + desc='realigned output prefix') + + +class RealignOutputSpec(TraitedSpec): + mean_image = File(exists=True, desc='Mean image file from the realignment') + modified_in_files = OutputMultiPath( + traits.Either(traits.List(File(exists=True)), File(exists=True)), + desc=('Copies of all files passed to ' + 'in_files. Headers will have ' + 'been modified to align all ' + 'images with the first, or ' + 'optionally to first do that, ' + 'extract a mean image, and ' + 're-align to that mean image.')) + realigned_files = OutputMultiPath( + traits.Either(traits.List(File(exists=True)), File(exists=True)), + desc=('If jobtype is write or estwrite, ' + 'these will be the resliced files.' + ' Otherwise, they will be copies ' + 'of in_files that have had their ' + 'headers rewritten.')) + realignment_parameters = OutputMultiPath( + File(exists=True), + desc=('Estimated translation and ' + 'rotation parameters')) + + +class Realign(SPMCommand): + """Use spm_realign for estimating within modality rigid body alignment + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=25 + + Examples + -------- + + >>> import nipype.interfaces.spm as spm + >>> realign = spm.Realign() + >>> realign.inputs.in_files = 'functional.nii' + >>> realign.inputs.register_to_mean = True + >>> realign.run() # doctest: +SKIP + + """ + + input_spec = RealignInputSpec + output_spec = RealignOutputSpec + + _jobtype = 'spatial' + _jobname = 'realign' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt == 'in_files': + if self.inputs.jobtype == "write": + separate_sessions = False + else: + separate_sessions = True + return scans_for_fnames( + val, keep4d=False, separate_sessions=separate_sessions) + return super(Realign, self)._format_arg(opt, spec, val) + + def _parse_inputs(self): + """validate spm realign options if set to None ignore + """ + einputs = super(Realign, self)._parse_inputs() + return [{'%s' % (self.inputs.jobtype): einputs[0]}] + + def _list_outputs(self): + outputs = self._outputs().get() + resliced_all = self.inputs.write_which[0] > 0 + resliced_mean = self.inputs.write_which[1] > 0 + + if self.inputs.jobtype != "write": + if isdefined(self.inputs.in_files): + outputs['realignment_parameters'] = [] + for imgf in self.inputs.in_files: + if isinstance(imgf, list): + tmp_imgf = imgf[0] + else: + tmp_imgf = imgf + outputs['realignment_parameters'].append( + fname_presuffix( + tmp_imgf, prefix='rp_', suffix='.txt', use_ext=False)) + if not isinstance(imgf, list) and func_is_3d(imgf): + break + if self.inputs.jobtype == "estimate": + outputs['realigned_files'] = self.inputs.in_files + if (self.inputs.jobtype == "estimate" + or self.inputs.jobtype == "estwrite"): + outputs['modified_in_files'] = self.inputs.in_files + if self.inputs.jobtype == "write" or self.inputs.jobtype == "estwrite": + if isinstance(self.inputs.in_files[0], list): + first_image = self.inputs.in_files[0][0] + else: + first_image = self.inputs.in_files[0] + + if resliced_mean: + outputs['mean_image'] = fname_presuffix( + first_image, prefix='mean') + + if resliced_all: + outputs['realigned_files'] = [] + for idx, imgf in enumerate( + ensure_list(self.inputs.in_files)): + realigned_run = [] + if isinstance(imgf, list): + for i, inner_imgf in enumerate(ensure_list(imgf)): + newfile = fname_presuffix( + inner_imgf, prefix=self.inputs.out_prefix) + realigned_run.append(newfile) + else: + realigned_run = fname_presuffix( + imgf, prefix=self.inputs.out_prefix) + outputs['realigned_files'].append(realigned_run) + return outputs + + +class CoregisterInputSpec(SPMCommandInputSpec): + target = ImageFileSPM( + exists=True, + mandatory=True, + field='ref', + desc='reference file to register to', + copyfile=False) + source = InputMultiPath( + ImageFileSPM(exists=True), + field='source', + desc='file to register to target', + copyfile=True, + mandatory=True) + jobtype = traits.Enum( + 'estwrite', + 'estimate', + 'write', + desc='one of: estimate, write, estwrite', + usedefault=True) + apply_to_files = InputMultiPath( + File(exists=True), + field='other', + desc='files to apply transformation to', + copyfile=True) + cost_function = traits.Enum( + 'mi', + 'nmi', + 'ecc', + 'ncc', + field='eoptions.cost_fun', + desc="""cost function, one of: + 'mi' - Mutual Information, + 'nmi' - Normalised Mutual Information, + 'ecc' - Entropy Correlation Coefficient, + 'ncc' - Normalised Cross Correlation""") + fwhm = traits.List( + traits.Float(), + minlen=2, + maxlen=2, + field='eoptions.fwhm', + desc='gaussian smoothing kernel width (mm)') + separation = traits.List( + traits.Float(), field='eoptions.sep', desc='sampling separation in mm') + tolerance = traits.List( + traits.Float(), + field='eoptions.tol', + desc='acceptable tolerance for each of 12 params') + write_interp = traits.Range( + low=0, + high=7, + field='roptions.interp', + desc=('degree of b-spline used for ' + 'interpolation')) + write_wrap = traits.List( + traits.Int(), + minlen=3, + maxlen=3, + field='roptions.wrap', + desc=('Check if interpolation should wrap in ' + '[x,y,z]')) + write_mask = traits.Bool( + field='roptions.mask', desc='True/False mask output image') + out_prefix = traits.String( + 'r', + field='roptions.prefix', + usedefault=True, + desc='coregistered output prefix') + + +class CoregisterOutputSpec(TraitedSpec): + coregistered_source = OutputMultiPath( + File(exists=True), desc='Coregistered source files') + coregistered_files = OutputMultiPath( + File(exists=True), desc='Coregistered other files') + + +class Coregister(SPMCommand): + """Use spm_coreg for estimating cross-modality rigid body alignment + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=39 + + Examples + -------- + + >>> import nipype.interfaces.spm as spm + >>> coreg = spm.Coregister() + >>> coreg.inputs.target = 'functional.nii' + >>> coreg.inputs.source = 'structural.nii' + >>> coreg.run() # doctest: +SKIP + + """ + + input_spec = CoregisterInputSpec + output_spec = CoregisterOutputSpec + _jobtype = 'spatial' + _jobname = 'coreg' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if (opt == 'target' + or (opt == 'source' and self.inputs.jobtype != "write")): + return scans_for_fnames(ensure_list(val), keep4d=True) + if opt == 'apply_to_files': + return np.array(ensure_list(val), dtype=object) + if opt == 'source' and self.inputs.jobtype == "write": + if isdefined(self.inputs.apply_to_files): + return scans_for_fnames(val + self.inputs.apply_to_files) + else: + return scans_for_fnames(val) + return super(Coregister, self)._format_arg(opt, spec, val) + + def _parse_inputs(self): + """validate spm coregister options if set to None ignore + """ + if self.inputs.jobtype == "write": + einputs = (super(Coregister, self) + ._parse_inputs(skip=('jobtype', 'apply_to_files'))) + else: + einputs = super(Coregister, self)._parse_inputs(skip=('jobtype')) + jobtype = self.inputs.jobtype + return [{'%s' % (jobtype): einputs[0]}] + + def _list_outputs(self): + outputs = self._outputs().get() + + if self.inputs.jobtype == "estimate": + if isdefined(self.inputs.apply_to_files): + outputs['coregistered_files'] = self.inputs.apply_to_files + outputs['coregistered_source'] = self.inputs.source + elif (self.inputs.jobtype == "write" + or self.inputs.jobtype == "estwrite"): + if isdefined(self.inputs.apply_to_files): + outputs['coregistered_files'] = [] + for imgf in ensure_list(self.inputs.apply_to_files): + (outputs['coregistered_files'].append( + fname_presuffix(imgf, prefix=self.inputs.out_prefix))) + + outputs['coregistered_source'] = [] + for imgf in ensure_list(self.inputs.source): + (outputs['coregistered_source'].append( + fname_presuffix(imgf, prefix=self.inputs.out_prefix))) + + return outputs + + +class NormalizeInputSpec(SPMCommandInputSpec): + template = File( + exists=True, + field='eoptions.template', + desc='template file to normalize to', + mandatory=True, + xor=['parameter_file'], + copyfile=False) + source = InputMultiPath( + ImageFileSPM(exists=True), + field='subj.source', + xor=['parameter_file'], + desc='file to normalize to template', + mandatory=True, + copyfile=True) + jobtype = traits.Enum( + 'estwrite', + 'est', + 'write', + usedefault=True, + desc='Estimate, Write or do both') + apply_to_files = InputMultiPath( + traits.Either(File(exists=True), traits.List(File(exists=True))), + field='subj.resample', + desc='files to apply transformation to', + copyfile=True) + parameter_file = File( + field='subj.matname', + mandatory=True, + xor=['source', 'template'], + desc='normalization parameter file*_sn.mat', + copyfile=False) + source_weight = File( + field='subj.wtsrc', + desc='name of weighting image for source', + copyfile=False) + template_weight = File( + field='eoptions.weight', + desc='name of weighting image for template', + copyfile=False) + source_image_smoothing = traits.Float( + field='eoptions.smosrc', desc='source smoothing') + template_image_smoothing = traits.Float( + field='eoptions.smoref', desc='template smoothing') + affine_regularization_type = traits.Enum( + 'mni', + 'size', + 'none', + field='eoptions.regtype', + desc='mni, size, none') + DCT_period_cutoff = traits.Float( + field='eoptions.cutoff', desc='Cutoff of for DCT bases') + nonlinear_iterations = traits.Int( + field='eoptions.nits', + desc=('Number of iterations of ' + 'nonlinear warping')) + nonlinear_regularization = traits.Float( + field='eoptions.reg', + desc=('the amount of the ' + 'regularization for the ' + 'nonlinear part of the ' + 'normalization')) + write_preserve = traits.Bool( + field='roptions.preserve', + desc='True/False warped images are modulated') + write_bounding_box = traits.List( + traits.List(traits.Float(), minlen=3, maxlen=3), + field='roptions.bb', + minlen=2, + maxlen=2, + desc='3x2-element list of lists') + write_voxel_sizes = traits.List( + traits.Float(), + field='roptions.vox', + minlen=3, + maxlen=3, + desc='3-element list') + write_interp = traits.Range( + low=0, + high=7, + field='roptions.interp', + desc=('degree of b-spline used for ' + 'interpolation')) + write_wrap = traits.List( + traits.Int(), + field='roptions.wrap', + desc=('Check if interpolation should wrap in ' + '[x,y,z] - list of bools')) + out_prefix = traits.String( + 'w', + field='roptions.prefix', + usedefault=True, + desc='normalized output prefix') + + +class NormalizeOutputSpec(TraitedSpec): + normalization_parameters = OutputMultiPath( + File(exists=True), + desc=('MAT files containing ' + 'the normalization ' + 'parameters')) + normalized_source = OutputMultiPath( + File(exists=True), desc='Normalized source files') + normalized_files = OutputMultiPath( + File(exists=True), desc='Normalized other files') + + +class Normalize(SPMCommand): + """use spm_normalise for warping an image to a template + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=203 + + Examples + -------- + >>> import nipype.interfaces.spm as spm + >>> norm = spm.Normalize() + >>> norm.inputs.source = 'functional.nii' + >>> norm.run() # doctest: +SKIP + + """ + + input_spec = NormalizeInputSpec + output_spec = NormalizeOutputSpec + _jobtype = 'spatial' + _jobname = 'normalise' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt == 'template': + return scans_for_fname(ensure_list(val)) + if opt == 'source': + return scans_for_fname(ensure_list(val)) + if opt == 'apply_to_files': + return scans_for_fnames(ensure_list(val)) + if opt == 'parameter_file': + return np.array([simplify_list(val)], dtype=object) + if opt in ['write_wrap']: + if len(val) != 3: + raise ValueError('%s must have 3 elements' % opt) + return super(Normalize, self)._format_arg(opt, spec, val) + + def _parse_inputs(self): + """Validate spm normalize options if set to None ignore + """ + einputs = super( + Normalize, self)._parse_inputs(skip=('jobtype', 'apply_to_files')) + if isdefined(self.inputs.apply_to_files): + inputfiles = deepcopy(self.inputs.apply_to_files) + if isdefined(self.inputs.source): + inputfiles.extend(self.inputs.source) + einputs[0]['subj']['resample'] = scans_for_fnames(inputfiles) + jobtype = self.inputs.jobtype + if jobtype in ['estwrite', 'write']: + if not isdefined(self.inputs.apply_to_files): + if isdefined(self.inputs.source): + einputs[0]['subj']['resample'] = scans_for_fname( + self.inputs.source) + return [{'%s' % (jobtype): einputs[0]}] + + def _list_outputs(self): + outputs = self._outputs().get() + + jobtype = self.inputs.jobtype + if jobtype.startswith('est'): + outputs['normalization_parameters'] = [] + for imgf in ensure_list(self.inputs.source): + outputs['normalization_parameters'].append( + fname_presuffix(imgf, suffix='_sn.mat', use_ext=False)) + outputs['normalization_parameters'] = simplify_list( + outputs['normalization_parameters']) + + if self.inputs.jobtype == "estimate": + if isdefined(self.inputs.apply_to_files): + outputs['normalized_files'] = self.inputs.apply_to_files + outputs['normalized_source'] = self.inputs.source + elif 'write' in self.inputs.jobtype: + if (isdefined(self.inputs.write_preserve) + and self.inputs.write_preserve): + prefixNorm = ''.join(['m', self.inputs.out_prefix]) + else: + prefixNorm = self.inputs.out_prefix + outputs['normalized_files'] = [] + if isdefined(self.inputs.apply_to_files): + filelist = ensure_list(self.inputs.apply_to_files) + for f in filelist: + if isinstance(f, list): + run = [ + fname_presuffix(in_f, prefix=prefixNorm) + for in_f in f + ] + else: + run = [fname_presuffix(f, prefix=prefixNorm)] + outputs['normalized_files'].extend(run) + if isdefined(self.inputs.source): + outputs['normalized_source'] = [] + for imgf in ensure_list(self.inputs.source): + outputs['normalized_source'].append( + fname_presuffix(imgf, prefix=prefixNorm)) + + return outputs + + +class Normalize12InputSpec(SPMCommandInputSpec): + image_to_align = ImageFileSPM( + exists=True, + field='subj.vol', + desc=('file to estimate normalization parameters ' + 'with'), + xor=['deformation_file'], + mandatory=True, + copyfile=True) + apply_to_files = InputMultiPath( + traits.Either( + ImageFileSPM(exists=True), traits.List(ImageFileSPM(exists=True))), + field='subj.resample', + desc='files to apply transformation to', + copyfile=True) + deformation_file = ImageFileSPM( + field='subj.def', + mandatory=True, + xor=['image_to_align', 'tpm'], + copyfile=False, + desc=('file y_*.nii containing 3 deformation ' + 'fields for the deformation in x, y and z ' + 'dimension')) + jobtype = traits.Enum( + 'estwrite', + 'est', + 'write', + usedefault=True, + desc='Estimate, Write or do Both') + bias_regularization = traits.Enum( + 0, + 0.00001, + 0.0001, + 0.001, + 0.01, + 0.1, + 1, + 10, + field='eoptions.biasreg', + desc='no(0) - extremely heavy (10)') + bias_fwhm = traits.Enum( + 30, + 40, + 50, + 60, + 70, + 80, + 90, + 100, + 110, + 120, + 130, + 140, + 150, + 'Inf', + field='eoptions.biasfwhm', + desc='FWHM of Gaussian smoothness of bias') + tpm = File( + exists=True, + field='eoptions.tpm', + desc=('template in form of tissue probablitiy maps to ' + 'normalize to'), + xor=['deformation_file'], + copyfile=False) + affine_regularization_type = traits.Enum( + 'mni', 'size', 'none', field='eoptions.affreg', desc='mni, size, none') + warping_regularization = traits.List( + traits.Float(), + field='eoptions.reg', + minlen=5, + maxlen=5, + desc=('controls balance between ' + 'parameters and data')) + smoothness = traits.Float( + field='eoptions.fwhm', + desc=('value (in mm) to smooth the data before ' + 'normalization')) + sampling_distance = traits.Float( + field='eoptions.samp', + desc=('Sampling distance on data for ' + 'parameter estimation')) + write_bounding_box = traits.List( + traits.List(traits.Float(), minlen=3, maxlen=3), + field='woptions.bb', + minlen=2, + maxlen=2, + desc=('3x2-element list of lists ' + 'representing the bounding box ' + '(in mm) to be written')) + write_voxel_sizes = traits.List( + traits.Float(), + field='woptions.vox', + minlen=3, + maxlen=3, + desc=('3-element list representing the ' + 'voxel sizes (in mm) of the written ' + 'normalised images')) + write_interp = traits.Range( + low=0, + high=7, + field='woptions.interp', + desc=('degree of b-spline used for ' + 'interpolation')) + out_prefix = traits.String( + 'w', + field='woptions.prefix', + usedefault=True, + desc='Normalized output prefix') + + +class Normalize12OutputSpec(TraitedSpec): + deformation_field = OutputMultiPath( + File(exists=True), + desc=('NIfTI file containing 3 ' + 'deformation fields for the ' + 'deformation in x, y and z ' + 'dimension')) + normalized_image = OutputMultiPath( + File(exists=True), + desc=('Normalized file that needed to ' + 'be aligned')) + normalized_files = OutputMultiPath( + File(exists=True), desc='Normalized other files') + + +class Normalize12(SPMCommand): + """uses SPM12's new Normalise routine for warping an image to a template. + Spatial normalisation is now done via the segmentation routine (which was + known as ``New Segment`` in SPM8). Note that the normalisation in SPM12 + is done towards a file containing multiple tissue probability maps, which + was not the case in SPM8. + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=49 + + Examples + -------- + >>> import nipype.interfaces.spm as spm + >>> norm12 = spm.Normalize12() + >>> norm12.inputs.image_to_align = 'structural.nii' + >>> norm12.inputs.apply_to_files = 'functional.nii' + >>> norm12.run() # doctest: +SKIP + + """ + + input_spec = Normalize12InputSpec + output_spec = Normalize12OutputSpec + _jobtype = 'spatial' + _jobname = 'normalise' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt == 'tpm': + return scans_for_fname(ensure_list(val)) + if opt == 'image_to_align': + return scans_for_fname(ensure_list(val)) + if opt == 'apply_to_files': + return scans_for_fnames(ensure_list(val)) + if opt == 'deformation_file': + return np.array([simplify_list(val)], dtype=object) + if opt in ['nonlinear_regularization']: + if len(val) != 5: + raise ValueError('%s must have 5 elements' % opt) + return super(Normalize12, self)._format_arg(opt, spec, val) + + def _parse_inputs(self, skip=()): + """validate spm normalize options if set to None ignore + """ + einputs = super( + Normalize12, + self)._parse_inputs(skip=('jobtype', 'apply_to_files')) + if isdefined(self.inputs.apply_to_files): + inputfiles = deepcopy(self.inputs.apply_to_files) + if isdefined(self.inputs.image_to_align): + inputfiles.extend([self.inputs.image_to_align]) + einputs[0]['subj']['resample'] = scans_for_fnames(inputfiles) + jobtype = self.inputs.jobtype + if jobtype in ['estwrite', 'write']: + if not isdefined(self.inputs.apply_to_files): + if isdefined(self.inputs.image_to_align): + einputs[0]['subj']['resample'] = scans_for_fname( + self.inputs.image_to_align) + return [{'%s' % (jobtype): einputs[0]}] + + def _list_outputs(self): + outputs = self._outputs().get() + + jobtype = self.inputs.jobtype + if jobtype.startswith('est'): + outputs['deformation_field'] = [] + for imgf in ensure_list(self.inputs.image_to_align): + outputs['deformation_field'].append( + fname_presuffix(imgf, prefix='y_')) + outputs['deformation_field'] = simplify_list( + outputs['deformation_field']) + + if self.inputs.jobtype == "estimate": + if isdefined(self.inputs.apply_to_files): + outputs['normalized_files'] = self.inputs.apply_to_files + outputs['normalized_image'] = fname_presuffix( + self.inputs.image_to_align, prefix='w') + elif 'write' in self.inputs.jobtype: + outputs['normalized_files'] = [] + if isdefined(self.inputs.apply_to_files): + filelist = ensure_list(self.inputs.apply_to_files) + for f in filelist: + if isinstance(f, list): + run = [fname_presuffix(in_f, prefix='w') for in_f in f] + else: + run = [fname_presuffix(f, prefix='w')] + outputs['normalized_files'].extend(run) + if isdefined(self.inputs.image_to_align): + outputs['normalized_image'] = fname_presuffix( + self.inputs.image_to_align, prefix='w') + + return outputs + + +class SegmentInputSpec(SPMCommandInputSpec): + data = InputMultiPath( + ImageFileSPM(exists=True), + field='data', + desc='one scan per subject', + copyfile=False, + mandatory=True) + gm_output_type = traits.List( + traits.Bool(), + minlen=3, + maxlen=3, + field='output.GM', + desc= + """Options to produce grey matter images: c1*.img, wc1*.img and mwc1*.img. + None: [False,False,False], + Native Space: [False,False,True], + Unmodulated Normalised: [False,True,False], + Modulated Normalised: [True,False,False], + Native + Unmodulated Normalised: [False,True,True], + Native + Modulated Normalised: [True,False,True], + Native + Modulated + Unmodulated: [True,True,True], + Modulated + Unmodulated Normalised: [True,True,False]""") + wm_output_type = traits.List( + traits.Bool(), + minlen=3, + maxlen=3, + field='output.WM', + desc=""" + Options to produce white matter images: c2*.img, wc2*.img and mwc2*.img. + None: [False,False,False], + Native Space: [False,False,True], + Unmodulated Normalised: [False,True,False], + Modulated Normalised: [True,False,False], + Native + Unmodulated Normalised: [False,True,True], + Native + Modulated Normalised: [True,False,True], + Native + Modulated + Unmodulated: [True,True,True], + Modulated + Unmodulated Normalised: [True,True,False]""") + csf_output_type = traits.List( + traits.Bool(), + minlen=3, + maxlen=3, + field='output.CSF', + desc=""" + Options to produce CSF images: c3*.img, wc3*.img and mwc3*.img. + None: [False,False,False], + Native Space: [False,False,True], + Unmodulated Normalised: [False,True,False], + Modulated Normalised: [True,False,False], + Native + Unmodulated Normalised: [False,True,True], + Native + Modulated Normalised: [True,False,True], + Native + Modulated + Unmodulated: [True,True,True], + Modulated + Unmodulated Normalised: [True,True,False]""") + save_bias_corrected = traits.Bool( + field='output.biascor', + desc=('True/False produce a bias ' + 'corrected image')) + clean_masks = traits.Enum( + 'no', + 'light', + 'thorough', + field='output.cleanup', + desc=("clean using estimated brain mask " + "('no','light','thorough')")) + tissue_prob_maps = traits.List( + File(exists=True), + field='opts.tpm', + desc=('list of gray, white & csf prob. ' + '(opt,)')) + gaussians_per_class = traits.List( + traits.Int(), + field='opts.ngaus', + desc=('num Gaussians capture intensity ' + 'distribution')) + affine_regularization = traits.Enum( + 'mni', + 'eastern', + 'subj', + 'none', + '', + field='opts.regtype', + desc=('Possible options: "mni", ' + '"eastern", "subj", "none" ' + '(no reguralisation), "" ' + '(no affine registration)')) + warping_regularization = traits.Float( + field='opts.warpreg', + desc=('Controls balance between ' + 'parameters and data')) + warp_frequency_cutoff = traits.Float( + field='opts.warpco', desc='Cutoff of DCT bases') + bias_regularization = traits.Enum( + 0, + 0.00001, + 0.0001, + 0.001, + 0.01, + 0.1, + 1, + 10, + field='opts.biasreg', + desc='no(0) - extremely heavy (10)') + bias_fwhm = traits.Enum( + 30, + 40, + 50, + 60, + 70, + 80, + 90, + 100, + 110, + 120, + 130, + 'Inf', + field='opts.biasfwhm', + desc='FWHM of Gaussian smoothness of bias') + sampling_distance = traits.Float( + field='opts.samp', + desc=('Sampling distance on data for ' + 'parameter estimation')) + mask_image = File( + exists=True, + field='opts.msk', + desc='Binary image to restrict parameter estimation ') + + +class SegmentOutputSpec(TraitedSpec): + native_gm_image = File(desc='native space grey probability map') + normalized_gm_image = File(desc='normalized grey probability map', ) + modulated_gm_image = File( + desc=('modulated, normalized grey ' + 'probability map')) + native_wm_image = File(desc='native space white probability map') + normalized_wm_image = File(desc='normalized white probability map') + modulated_wm_image = File( + desc=('modulated, normalized white ' + 'probability map')) + native_csf_image = File(desc='native space csf probability map') + normalized_csf_image = File(desc='normalized csf probability map') + modulated_csf_image = File( + desc=('modulated, normalized csf ' + 'probability map')) + modulated_input_image = File( + deprecated='0.10', + new_name='bias_corrected_image', + desc='bias-corrected version of input image') + bias_corrected_image = File(desc='bias-corrected version of input image') + transformation_mat = File(exists=True, desc='Normalization transformation') + inverse_transformation_mat = File( + exists=True, desc='Inverse normalization info') + + +class Segment(SPMCommand): + """use spm_segment to separate structural images into different + tissue classes. + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=209 + + Examples + -------- + >>> import nipype.interfaces.spm as spm + >>> seg = spm.Segment() + >>> seg.inputs.data = 'structural.nii' + >>> seg.run() # doctest: +SKIP + + """ + + input_spec = SegmentInputSpec + output_spec = SegmentOutputSpec + + def __init__(self, **inputs): + _local_version = SPMCommand().version + if _local_version and '12.' in _local_version: + self._jobtype = 'tools' + self._jobname = 'oldseg' + else: + self._jobtype = 'spatial' + self._jobname = 'preproc' + + SPMCommand.__init__(self, **inputs) + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + clean_masks_dict = {'no': 0, 'light': 1, 'thorough': 2} + + if opt in ['data', 'tissue_prob_maps']: + if isinstance(val, list): + return scans_for_fnames(val) + else: + return scans_for_fname(val) + if 'output_type' in opt: + return [int(v) for v in val] + if opt == 'mask_image': + return scans_for_fname(val) + if opt == 'clean_masks': + return clean_masks_dict[val] + return super(Segment, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + f = self.inputs.data[0] + + for tidx, tissue in enumerate(['gm', 'wm', 'csf']): + outtype = '%s_output_type' % tissue + if isdefined(getattr(self.inputs, outtype)): + for idx, (image, prefix) in enumerate([('modulated', 'mw'), + ('normalized', + 'w'), ('native', '')]): + if getattr(self.inputs, outtype)[idx]: + outfield = '%s_%s_image' % (image, tissue) + outputs[outfield] = fname_presuffix( + f, prefix='%sc%d' % (prefix, tidx + 1)) + if (isdefined(self.inputs.save_bias_corrected) + and self.inputs.save_bias_corrected): + outputs['bias_corrected_image'] = fname_presuffix(f, prefix='m') + t_mat = fname_presuffix(f, suffix='_seg_sn.mat', use_ext=False) + outputs['transformation_mat'] = t_mat + invt_mat = fname_presuffix(f, suffix='_seg_inv_sn.mat', use_ext=False) + outputs['inverse_transformation_mat'] = invt_mat + return outputs + + +class NewSegmentInputSpec(SPMCommandInputSpec): + channel_files = InputMultiPath( + ImageFileSPM(exists=True), + mandatory=True, + desc="A list of files to be segmented", + field='channel', + copyfile=False) + channel_info = traits.Tuple( + traits.Float(), + traits.Float(), + traits.Tuple(traits.Bool, traits.Bool), + desc="""A tuple with the following fields: + - bias reguralisation (0-10) + - FWHM of Gaussian smoothness of bias + - which maps to save (Corrected, Field) - a tuple of two boolean values""", + field='channel') + tissues = traits.List( + traits.Tuple( + traits.Tuple(ImageFileSPM(exists=True), traits.Int()), + traits.Int(), traits.Tuple(traits.Bool, traits.Bool), + traits.Tuple(traits.Bool, traits.Bool)), + desc="""A list of tuples (one per tissue) with the following fields: + - tissue probability map (4D), 1-based index to frame + - number of gaussians + - which maps to save [Native, DARTEL] - a tuple of two boolean values + - which maps to save [Unmodulated, Modulated] - a tuple of two boolean values""", + field='tissue') + affine_regularization = traits.Enum( + 'mni', + 'eastern', + 'subj', + 'none', + field='warp.affreg', + desc='mni, eastern, subj, none ') + warping_regularization = traits.Either( + traits.List(traits.Float(), minlen=5, maxlen=5), + traits.Float(), + field='warp.reg', + desc=('Warping regularization ' + 'parameter(s). Accepts float ' + 'or list of floats (the ' + 'latter is required by ' + 'SPM12)')) + sampling_distance = traits.Float( + field='warp.samp', + desc=('Sampling distance on data for ' + 'parameter estimation')) + write_deformation_fields = traits.List( + traits.Bool(), + minlen=2, + maxlen=2, + field='warp.write', + desc=("Which deformation fields to " + "write:[Inverse, Forward]")) + + +class NewSegmentOutputSpec(TraitedSpec): + native_class_images = traits.List( + traits.List(File(exists=True)), desc='native space probability maps') + dartel_input_images = traits.List( + traits.List(File(exists=True)), desc='dartel imported class images') + normalized_class_images = traits.List( + traits.List(File(exists=True)), desc='normalized class images') + modulated_class_images = traits.List( + traits.List(File(exists=True)), + desc=('modulated+normalized class ' + 'images')) + transformation_mat = OutputMultiPath( + File(exists=True), desc='Normalization transformation') + bias_corrected_images = OutputMultiPath( + File(exists=True), desc='bias corrected images') + bias_field_images = OutputMultiPath( + File(exists=True), desc='bias field images') + forward_deformation_field = OutputMultiPath(File(exists=True)) + inverse_deformation_field = OutputMultiPath(File(exists=True)) + + +class NewSegment(SPMCommand): + """Use spm_preproc8 (New Segment) to separate structural images into + different tissue classes. Supports multiple modalities. + + NOTE: This interface currently supports single channel input only + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=43 + + Examples + -------- + >>> import nipype.interfaces.spm as spm + >>> seg = spm.NewSegment() + >>> seg.inputs.channel_files = 'structural.nii' + >>> seg.inputs.channel_info = (0.0001, 60, (True, True)) + >>> seg.run() # doctest: +SKIP + + For VBM pre-processing [http://www.fil.ion.ucl.ac.uk/~john/misc/VBMclass10.pdf], + TPM.nii should be replaced by /path/to/spm8/toolbox/Seg/TPM.nii + + >>> seg = NewSegment() + >>> seg.inputs.channel_files = 'structural.nii' + >>> tissue1 = (('TPM.nii', 1), 2, (True,True), (False, False)) + >>> tissue2 = (('TPM.nii', 2), 2, (True,True), (False, False)) + >>> tissue3 = (('TPM.nii', 3), 2, (True,False), (False, False)) + >>> tissue4 = (('TPM.nii', 4), 2, (False,False), (False, False)) + >>> tissue5 = (('TPM.nii', 5), 2, (False,False), (False, False)) + >>> seg.inputs.tissues = [tissue1, tissue2, tissue3, tissue4, tissue5] + >>> seg.run() # doctest: +SKIP + + """ + + input_spec = NewSegmentInputSpec + output_spec = NewSegmentOutputSpec + + def __init__(self, **inputs): + _local_version = SPMCommand().version + if _local_version and '12.' in _local_version: + self._jobtype = 'spatial' + self._jobname = 'preproc' + else: + self._jobtype = 'tools' + self._jobname = 'preproc8' + + SPMCommand.__init__(self, **inputs) + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + + if opt in ['channel_files', 'channel_info']: + # structure have to be recreated because of some weird traits error + new_channel = {} + new_channel['vols'] = scans_for_fnames(self.inputs.channel_files) + if isdefined(self.inputs.channel_info): + info = self.inputs.channel_info + new_channel['biasreg'] = info[0] + new_channel['biasfwhm'] = info[1] + new_channel['write'] = [int(info[2][0]), int(info[2][1])] + return [new_channel] + elif opt == 'tissues': + new_tissues = [] + for tissue in val: + new_tissue = {} + new_tissue['tpm'] = np.array( + [','.join([tissue[0][0], str(tissue[0][1])])], + dtype=object) + new_tissue['ngaus'] = tissue[1] + new_tissue['native'] = [int(tissue[2][0]), int(tissue[2][1])] + new_tissue['warped'] = [int(tissue[3][0]), int(tissue[3][1])] + new_tissues.append(new_tissue) + return new_tissues + elif opt == 'write_deformation_fields': + return super(NewSegment, self)._format_arg( + opt, spec, [int(val[0]), int(val[1])]) + else: + return super(NewSegment, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['native_class_images'] = [] + outputs['dartel_input_images'] = [] + outputs['normalized_class_images'] = [] + outputs['modulated_class_images'] = [] + outputs['transformation_mat'] = [] + outputs['bias_corrected_images'] = [] + outputs['bias_field_images'] = [] + outputs['inverse_deformation_field'] = [] + outputs['forward_deformation_field'] = [] + + n_classes = 5 + if isdefined(self.inputs.tissues): + n_classes = len(self.inputs.tissues) + for i in range(n_classes): + outputs['native_class_images'].append([]) + outputs['dartel_input_images'].append([]) + outputs['normalized_class_images'].append([]) + outputs['modulated_class_images'].append([]) + + for filename in self.inputs.channel_files: + pth, base, ext = split_filename(filename) + if isdefined(self.inputs.tissues): + for i, tissue in enumerate(self.inputs.tissues): + if tissue[2][0]: + outputs['native_class_images'][i].append( + os.path.join(pth, "c%d%s.nii" % (i + 1, base))) + if tissue[2][1]: + outputs['dartel_input_images'][i].append( + os.path.join(pth, "rc%d%s.nii" % (i + 1, base))) + if tissue[3][0]: + outputs['normalized_class_images'][i].append( + os.path.join(pth, "wc%d%s.nii" % (i + 1, base))) + if tissue[3][1]: + outputs['modulated_class_images'][i].append( + os.path.join(pth, "mwc%d%s.nii" % (i + 1, base))) + else: + for i in range(n_classes): + outputs['native_class_images'][i].append( + os.path.join(pth, "c%d%s.nii" % (i + 1, base))) + outputs['transformation_mat'].append( + os.path.join(pth, "%s_seg8.mat" % base)) + + if isdefined(self.inputs.write_deformation_fields): + if self.inputs.write_deformation_fields[0]: + outputs['inverse_deformation_field'].append( + os.path.join(pth, "iy_%s.nii" % base)) + if self.inputs.write_deformation_fields[1]: + outputs['forward_deformation_field'].append( + os.path.join(pth, "y_%s.nii" % base)) + + if isdefined(self.inputs.channel_info): + if self.inputs.channel_info[2][0]: + outputs['bias_corrected_images'].append( + os.path.join(pth, "m%s.nii" % (base))) + if self.inputs.channel_info[2][1]: + outputs['bias_field_images'].append( + os.path.join(pth, "BiasField_%s.nii" % (base))) + return outputs + + +class SmoothInputSpec(SPMCommandInputSpec): + in_files = InputMultiPath( + ImageFileSPM(exists=True), + field='data', + desc='list of files to smooth', + mandatory=True, + copyfile=False) + fwhm = traits.Either( + traits.List(traits.Float(), minlen=3, maxlen=3), + traits.Float(), + field='fwhm', + desc='3-list of fwhm for each dimension') + data_type = traits.Int( + field='dtype', desc='Data type of the output images') + implicit_masking = traits.Bool( + field='im', desc=('A mask implied by a particular' + 'voxel value')) + out_prefix = traits.String( + 's', field='prefix', usedefault=True, desc='smoothed output prefix') + + +class SmoothOutputSpec(TraitedSpec): + smoothed_files = OutputMultiPath(File(exists=True), desc='smoothed files') + + +class Smooth(SPMCommand): + """Use spm_smooth for 3D Gaussian smoothing of image volumes. + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=55 + + Examples + -------- + >>> import nipype.interfaces.spm as spm + >>> smooth = spm.Smooth() + >>> smooth.inputs.in_files = 'functional.nii' + >>> smooth.inputs.fwhm = [4, 4, 4] + >>> smooth.run() # doctest: +SKIP + """ + + input_spec = SmoothInputSpec + output_spec = SmoothOutputSpec + _jobtype = 'spatial' + _jobname = 'smooth' + + def _format_arg(self, opt, spec, val): + if opt in ['in_files']: + return scans_for_fnames(ensure_list(val)) + if opt == 'fwhm': + if not isinstance(val, list): + return [val, val, val] + if isinstance(val, list): + if len(val) == 1: + return [val[0], val[0], val[0]] + else: + return val + + return super(Smooth, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['smoothed_files'] = [] + + for imgf in ensure_list(self.inputs.in_files): + outputs['smoothed_files'].append( + fname_presuffix(imgf, prefix=self.inputs.out_prefix)) + return outputs + + +class DARTELInputSpec(SPMCommandInputSpec): + image_files = traits.List( + traits.List(ImageFileSPM(exists=True)), + desc="A list of files to be segmented", + field='warp.images', + copyfile=False, + mandatory=True) + template_prefix = traits.Str( + 'Template', + usedefault=True, + field='warp.settings.template', + desc='Prefix for template') + regularization_form = traits.Enum( + 'Linear', + 'Membrane', + 'Bending', + field='warp.settings.rform', + desc=('Form of regularization energy ' + 'term')) + iteration_parameters = traits.List( + traits.Tuple( + traits.Range(1, 10), + traits.Tuple(traits.Float, traits.Float, traits.Float), + traits.Enum(1, 2, 4, 8, 16, 32, 64, 128, 256, 512), + traits.Enum(0, 0.5, 1, 2, 4, 8, 16, 32)), + minlen=3, + maxlen=12, + field='warp.settings.param', + desc="""List of tuples for each iteration + - Inner iterations + - Regularization parameters + - Time points for deformation model + - smoothing parameter + """) + optimization_parameters = traits.Tuple( + traits.Float, + traits.Range(1, 8), + traits.Range(1, 8), + field='warp.settings.optim', + desc=""" + Optimization settings a tuple + - LM regularization + - cycles of multigrid solver + - relaxation iterations + """) + + +class DARTELOutputSpec(TraitedSpec): + final_template_file = File(exists=True, desc='final DARTEL template') + template_files = traits.List( + File(exists=True), + desc=('Templates from different stages of ' + 'iteration')) + dartel_flow_fields = traits.List( + File(exists=True), desc='DARTEL flow fields') + + +class DARTEL(SPMCommand): + """Use spm DARTEL to create a template and flow fields + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=185 + + Examples + -------- + >>> import nipype.interfaces.spm as spm + >>> dartel = spm.DARTEL() + >>> dartel.inputs.image_files = [['rc1s1.nii','rc1s2.nii'],['rc2s1.nii', 'rc2s2.nii']] + >>> dartel.run() # doctest: +SKIP + + """ + + input_spec = DARTELInputSpec + output_spec = DARTELOutputSpec + _jobtype = 'tools' + _jobname = 'dartel' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + + if opt in ['image_files']: + return scans_for_fnames(val, keep4d=True, separate_sessions=True) + elif opt == 'regularization_form': + mapper = {'Linear': 0, 'Membrane': 1, 'Bending': 2} + return mapper[val] + elif opt == 'iteration_parameters': + params = [] + for param in val: + new_param = {} + new_param['its'] = param[0] + new_param['rparam'] = list(param[1]) + new_param['K'] = param[2] + new_param['slam'] = param[3] + params.append(new_param) + return params + elif opt == 'optimization_parameters': + new_param = {} + new_param['lmreg'] = val[0] + new_param['cyc'] = val[1] + new_param['its'] = val[2] + return [new_param] + else: + return super(DARTEL, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['template_files'] = [] + for i in range(6): + outputs['template_files'].append( + os.path.realpath('%s_%d.nii' % (self.inputs.template_prefix, + i + 1))) + outputs['final_template_file'] = os.path.realpath( + '%s_6.nii' % self.inputs.template_prefix) + outputs['dartel_flow_fields'] = [] + for filename in self.inputs.image_files[0]: + pth, base, ext = split_filename(filename) + outputs['dartel_flow_fields'].append( + os.path.realpath('u_%s_%s%s' % + (base, self.inputs.template_prefix, ext))) + return outputs + + +class DARTELNorm2MNIInputSpec(SPMCommandInputSpec): + template_file = ImageFileSPM( + exists=True, + copyfile=False, + mandatory=True, + desc="DARTEL template", + field='mni_norm.template') + flowfield_files = InputMultiPath( + ImageFileSPM(exists=True), + mandatory=True, + desc="DARTEL flow fields u_rc1*", + field='mni_norm.data.subjs.flowfields') + apply_to_files = InputMultiPath( + ImageFileSPM(exists=True), + desc="Files to apply the transform to", + field='mni_norm.data.subjs.images', + mandatory=True, + copyfile=False) + voxel_size = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + desc="Voxel sizes for output file", + field='mni_norm.vox') + bounding_box = traits.Tuple( + traits.Float, + traits.Float, + traits.Float, + traits.Float, + traits.Float, + traits.Float, + desc="Voxel sizes for output file", + field='mni_norm.bb') + modulate = traits.Bool( + field='mni_norm.preserve', + desc=("Modulate out images - no modulation " + "preserves concentrations")) + fwhm = traits.Either( + traits.List(traits.Float(), minlen=3, maxlen=3), + traits.Float(), + field='mni_norm.fwhm', + desc='3-list of fwhm for each dimension') + + +class DARTELNorm2MNIOutputSpec(TraitedSpec): + normalized_files = OutputMultiPath( + File(exists=True), desc='Normalized files in MNI space') + normalization_parameter_file = File( + exists=True, desc=('Transform parameters to MNI ' + 'space')) + + +class DARTELNorm2MNI(SPMCommand): + """Use spm DARTEL to normalize data to MNI space + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=188 + + Examples + -------- + >>> import nipype.interfaces.spm as spm + >>> nm = spm.DARTELNorm2MNI() + >>> nm.inputs.template_file = 'Template_6.nii' + >>> nm.inputs.flowfield_files = ['u_rc1s1_Template.nii', 'u_rc1s3_Template.nii'] + >>> nm.inputs.apply_to_files = ['c1s1.nii', 'c1s3.nii'] + >>> nm.inputs.modulate = True + >>> nm.run() # doctest: +SKIP + + """ + + input_spec = DARTELNorm2MNIInputSpec + output_spec = DARTELNorm2MNIOutputSpec + _jobtype = 'tools' + _jobname = 'dartel' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt in ['template_file']: + return np.array([val], dtype=object) + elif opt in ['flowfield_files']: + return scans_for_fnames(val, keep4d=True) + elif opt in ['apply_to_files']: + return scans_for_fnames(val, keep4d=True, separate_sessions=True) + elif opt == 'voxel_size': + return list(val) + elif opt == 'bounding_box': + return list(val) + elif opt == 'fwhm': + if isinstance(val, list): + return val + else: + return [val, val, val] + else: + return super(DARTELNorm2MNI, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + pth, base, ext = split_filename(self.inputs.template_file) + outputs['normalization_parameter_file'] = os.path.realpath( + base + '_2mni.mat') + outputs['normalized_files'] = [] + prefix = "w" + if isdefined(self.inputs.modulate) and self.inputs.modulate: + prefix = 'm' + prefix + if not isdefined(self.inputs.fwhm) or self.inputs.fwhm > 0: + prefix = 's' + prefix + for filename in self.inputs.apply_to_files: + pth, base, ext = split_filename(filename) + outputs['normalized_files'].append( + os.path.realpath('%s%s%s' % (prefix, base, ext))) + + return outputs + + +class CreateWarpedInputSpec(SPMCommandInputSpec): + image_files = InputMultiPath( + ImageFileSPM(exists=True), + mandatory=True, + desc="A list of files to be warped", + field='crt_warped.images', + copyfile=False) + flowfield_files = InputMultiPath( + ImageFileSPM(exists=True), + copyfile=False, + desc="DARTEL flow fields u_rc1*", + field='crt_warped.flowfields', + mandatory=True) + iterations = traits.Range( + low=0, + high=9, + desc=("The number of iterations: log2(number of " + "time steps)"), + field='crt_warped.K') + interp = traits.Range( + low=0, + high=7, + field='crt_warped.interp', + desc='degree of b-spline used for interpolation') + modulate = traits.Bool( + field='crt_warped.jactransf', desc="Modulate images") + + +class CreateWarpedOutputSpec(TraitedSpec): + warped_files = traits.List(File(exists=True, desc='final warped files')) + + +class CreateWarped(SPMCommand): + """Apply a flow field estimated by DARTEL to create warped images + + http://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf#page=190 + + Examples + -------- + >>> import nipype.interfaces.spm as spm + >>> create_warped = spm.CreateWarped() + >>> create_warped.inputs.image_files = ['rc1s1.nii', 'rc1s2.nii'] + >>> create_warped.inputs.flowfield_files = ['u_rc1s1_Template.nii', 'u_rc1s2_Template.nii'] + >>> create_warped.run() # doctest: +SKIP + + """ + + input_spec = CreateWarpedInputSpec + output_spec = CreateWarpedOutputSpec + _jobtype = 'tools' + _jobname = 'dartel' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + + if opt in ['image_files']: + return scans_for_fnames(val, keep4d=True, separate_sessions=True) + if opt in ['flowfield_files']: + return scans_for_fnames(val, keep4d=True) + else: + return super(CreateWarped, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['warped_files'] = [] + for filename in self.inputs.image_files: + pth, base, ext = split_filename(filename) + if isdefined(self.inputs.modulate) and self.inputs.modulate: + outputs['warped_files'].append( + os.path.realpath('mw%s%s' % (base, ext))) + else: + outputs['warped_files'].append( + os.path.realpath('w%s%s' % (base, ext))) + return outputs + + +class ApplyDeformationFieldInputSpec(SPMCommandInputSpec): + in_files = InputMultiPath( + ImageFileSPM(exists=True), mandatory=True, field='fnames') + deformation_field = File(exists=True, mandatory=True, field='comp{1}.def') + reference_volume = ImageFileSPM( + exists=True, mandatory=True, field='comp{2}.id.space') + interp = traits.Range( + low=0, + high=7, + field='interp', + desc='degree of b-spline used for interpolation') + + +class ApplyDeformationFieldOutputSpec(TraitedSpec): + out_files = OutputMultiPath(File(exists=True)) + + +class ApplyDeformations(SPMCommand): + input_spec = ApplyDeformationFieldInputSpec + output_spec = ApplyDeformationFieldOutputSpec + + _jobtype = 'util' + _jobname = 'defs' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt in ['deformation_field', 'reference_volume']: + val = [val] + + if opt in ['deformation_field']: + return scans_for_fnames(val, keep4d=True, separate_sessions=False) + if opt in ['in_files', 'reference_volume']: + return scans_for_fnames(val, keep4d=False, separate_sessions=False) + + else: + return super(ApplyDeformations, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_files'] = [] + for filename in self.inputs.in_files: + _, fname = os.path.split(filename) + outputs['out_files'].append(os.path.realpath('w%s' % fname)) + return outputs + + +class VBMSegmentInputSpec(SPMCommandInputSpec): + + in_files = InputMultiPath( + ImageFileSPM(exists=True), + desc="A list of files to be segmented", + field='estwrite.data', + copyfile=False, + mandatory=True) + + tissues = ImageFileSPM( + exists=True, field='estwrite.tpm', desc='tissue probability map') + gaussians_per_class = traits.Tuple( + (2, 2, 2, 3, 4, 2), + *([traits.Int()] * 6), + usedefault=True, + desc='number of gaussians for each tissue class') + bias_regularization = traits.Enum( + 0.0001, (0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1, 10), + field='estwrite.opts.biasreg', + usedefault=True, + desc='no(0) - extremely heavy (10)') + + bias_fwhm = traits.Enum( + 60, (30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 'Inf'), + field='estwrite.opts.biasfwhm', + usedefault=True, + desc='FWHM of Gaussian smoothness of bias') + sampling_distance = traits.Float( + 3, + usedefault=True, + field='estwrite.opts.samp', + desc='Sampling distance on data for parameter estimation') + warping_regularization = traits.Float( + 4, + usedefault=True, + field='estwrite.opts.warpreg', + desc='Controls balance between parameters and data') + + spatial_normalization = traits.Enum( + 'high', + 'low', + usedefault=True, + ) + dartel_template = ImageFileSPM( + exists=True, field='estwrite.extopts.dartelwarp.normhigh.darteltpm') + use_sanlm_denoising_filter = traits.Range( + 0, + 2, + 2, + usedefault=True, + field='estwrite.extopts.sanlm', + desc="0=No denoising, 1=denoising,2=denoising multi-threaded") + mrf_weighting = traits.Float( + 0.15, usedefault=True, field='estwrite.extopts.mrf') + cleanup_partitions = traits.Int( + 1, + usedefault=True, + field='estwrite.extopts.cleanup', + desc="0=None,1=light,2=thorough") + display_results = traits.Bool( + True, usedefault=True, field='estwrite.extopts.print') + + gm_native = traits.Bool( + False, + usedefault=True, + field='estwrite.output.GM.native', + ) + gm_normalized = traits.Bool( + False, + usedefault=True, + field='estwrite.output.GM.warped', + ) + gm_modulated_normalized = traits.Range( + 0, + 2, + 2, + usedefault=True, + field='estwrite.output.GM.modulated', + desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only') + gm_dartel = traits.Range( + 0, + 2, + 0, + usedefault=True, + field='estwrite.output.GM.dartel', + desc="0=None,1=rigid(SPM8 default),2=affine") + + wm_native = traits.Bool( + False, + usedefault=True, + field='estwrite.output.WM.native', + ) + wm_normalized = traits.Bool( + False, + usedefault=True, + field='estwrite.output.WM.warped', + ) + wm_modulated_normalized = traits.Range( + 0, + 2, + 2, + usedefault=True, + field='estwrite.output.WM.modulated', + desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only') + wm_dartel = traits.Range( + 0, + 2, + 0, + usedefault=True, + field='estwrite.output.WM.dartel', + desc="0=None,1=rigid(SPM8 default),2=affine") + + csf_native = traits.Bool( + False, + usedefault=True, + field='estwrite.output.CSF.native', + ) + csf_normalized = traits.Bool( + False, + usedefault=True, + field='estwrite.output.CSF.warped', + ) + csf_modulated_normalized = traits.Range( + 0, + 2, + 2, + usedefault=True, + field='estwrite.output.CSF.modulated', + desc='0=none,1=affine+non-linear(SPM8 default),2=non-linear only') + csf_dartel = traits.Range( + 0, + 2, + 0, + usedefault=True, + field='estwrite.output.CSF.dartel', + desc="0=None,1=rigid(SPM8 default),2=affine") + + bias_corrected_native = traits.Bool( + False, + usedefault=True, + field='estwrite.output.bias.native', + ) + bias_corrected_normalized = traits.Bool( + True, + usedefault=True, + field='estwrite.output.bias.warped', + ) + bias_corrected_affine = traits.Bool( + False, + usedefault=True, + field='estwrite.output.bias.affine', + ) + + pve_label_native = traits.Bool( + False, usedefault=True, field='estwrite.output.label.native') + pve_label_normalized = traits.Bool( + False, usedefault=True, field='estwrite.output.label.warped') + pve_label_dartel = traits.Range( + 0, + 2, + 0, + usedefault=True, + field='estwrite.output.label.dartel', + desc="0=None,1=rigid(SPM8 default),2=affine") + + jacobian_determinant = traits.Bool( + False, usedefault=True, field='estwrite.jacobian.warped') + + deformation_field = traits.Tuple( + (0, 0), + traits.Bool, + traits.Bool, + usedefault=True, + field='estwrite.output.warps', + desc='forward and inverse field') + + +class VBMSegmentOuputSpec(TraitedSpec): + + native_class_images = traits.List( + traits.List(File(exists=True)), desc='native space probability maps') + dartel_input_images = traits.List( + traits.List(File(exists=True)), desc='dartel imported class images') + normalized_class_images = traits.List( + traits.List(File(exists=True)), desc='normalized class images') + modulated_class_images = traits.List( + traits.List(File(exists=True)), + desc=('modulated+normalized class ' + 'images')) + transformation_mat = OutputMultiPath( + File(exists=True), desc='Normalization transformation') + + bias_corrected_images = OutputMultiPath( + File(exists=True), desc='bias corrected images') + normalized_bias_corrected_images = OutputMultiPath( + File(exists=True), desc='bias corrected images') + + pve_label_native_images = OutputMultiPath(File(exists=True)) + pve_label_normalized_images = OutputMultiPath(File(exists=True)) + pve_label_registered_images = OutputMultiPath(File(exists=True)) + + forward_deformation_field = OutputMultiPath(File(exists=True)) + inverse_deformation_field = OutputMultiPath(File(exists=True)) + + jacobian_determinant_images = OutputMultiPath(File(exists=True)) + + +class VBMSegment(SPMCommand): + """Use VBM8 toolbox to separate structural images into different + tissue classes. + + Example + ------- + >>> import nipype.interfaces.spm as spm + >>> seg = spm.VBMSegment() + >>> seg.inputs.tissues = 'TPM.nii' + >>> seg.inputs.dartel_template = 'Template_1_IXI550_MNI152.nii' + >>> seg.inputs.bias_corrected_native = True + >>> seg.inputs.gm_native = True + >>> seg.inputs.wm_native = True + >>> seg.inputs.csf_native = True + >>> seg.inputs.pve_label_native = True + >>> seg.inputs.deformation_field = (True, False) + >>> seg.run() # doctest: +SKIP + """ + + input_spec = VBMSegmentInputSpec + output_spec = VBMSegmentOuputSpec + + _jobtype = 'tools' + _jobname = 'vbm8' + + def _list_outputs(self): + outputs = self._outputs().get() + + do_dartel = self.inputs.spatial_normalization + dartel_px = '' + if do_dartel: + dartel_px = 'r' + + outputs['native_class_images'] = [[], [], []] + outputs['dartel_input_images'] = [[], [], []] + outputs['normalized_class_images'] = [[], [], []] + outputs['modulated_class_images'] = [[], [], []] + + outputs['transformation_mat'] = [] + + outputs['bias_corrected_images'] = [] + outputs['normalized_bias_corrected_images'] = [] + + outputs['inverse_deformation_field'] = [] + outputs['forward_deformation_field'] = [] + outputs['jacobian_determinant_images'] = [] + + outputs['pve_label_native_images'] = [] + outputs['pve_label_normalized_images'] = [] + outputs['pve_label_registered_images'] = [] + + for filename in self.inputs.in_files: + pth, base, ext = split_filename(filename) + + outputs['transformation_mat'].append( + os.path.join(pth, "%s_seg8.mat" % base)) + + for i, tis in enumerate(['gm', 'wm', 'csf']): + # native space + + if getattr(self.inputs, '%s_native' % tis): + outputs['native_class_images'][i].append( + os.path.join(pth, "p%d%s.nii" % (i + 1, base))) + if getattr(self.inputs, '%s_dartel' % tis) == 1: + outputs['dartel_input_images'][i].append( + os.path.join(pth, "rp%d%s.nii" % (i + 1, base))) + elif getattr(self.inputs, '%s_dartel' % tis) == 2: + outputs['dartel_input_images'][i].append( + os.path.join(pth, "rp%d%s_affine.nii" % (i + 1, base))) + + # normalized space + if getattr(self.inputs, '%s_normalized' % tis): + outputs['normalized_class_images'][i].append( + os.path.join(pth, "w%sp%d%s.nii" % (dartel_px, i + 1, + base))) + + if getattr(self.inputs, '%s_modulated_normalized' % tis) == 1: + outputs['modulated_class_images'][i].append( + os.path.join(pth, "mw%sp%d%s.nii" % (dartel_px, i + 1, + base))) + elif getattr(self.inputs, + '%s_modulated_normalized' % tis) == 2: + outputs['normalized_class_images'][i].append( + os.path.join(pth, "m0w%sp%d%s.nii" % (dartel_px, i + 1, + base))) + + if self.inputs.pve_label_native: + outputs['pve_label_native_images'].append( + os.path.join(pth, "p0%s.nii" % (base))) + if self.inputs.pve_label_normalized: + outputs['pve_label_normalized_images'].append( + os.path.join(pth, "w%sp0%s.nii" % (dartel_px, base))) + if self.inputs.pve_label_dartel == 1: + outputs['pve_label_registered_images'].append( + os.path.join(pth, "rp0%s.nii" % (base))) + elif self.inputs.pve_label_dartel == 2: + outputs['pve_label_registered_images'].append( + os.path.join(pth, "rp0%s_affine.nii" % (base))) + + if self.inputs.bias_corrected_native: + outputs['bias_corrected_images'].append( + os.path.join(pth, "m%s.nii" % (base))) + if self.inputs.bias_corrected_normalized: + outputs['normalized_bias_corrected_images'].append( + os.path.join(pth, "wm%s%s.nii" % (dartel_px, base))) + + if self.inputs.deformation_field[0]: + outputs['forward_deformation_field'].append( + os.path.join(pth, "y_%s%s.nii" % (dartel_px, base))) + if self.inputs.deformation_field[1]: + outputs['inverse_deformation_field'].append( + os.path.join(pth, "iy_%s%s.nii" % (dartel_px, base))) + + if self.inputs.jacobian_determinant and do_dartel: + outputs['jacobian_determinant_images'].append( + os.path.join(pth, "jac_wrp1%s.nii" % (base))) + return outputs + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt in ['in_files']: + return scans_for_fnames(val, keep4d=True) + elif opt in ['spatial_normalization']: + if val == 'low': + return {'normlow': []} + elif opt in ['dartel_template']: + return np.array([val], dtype=object) + elif opt in ['deformation_field']: + return super(VBMSegment, self)._format_arg( + opt, spec, [int(val[0]), int(val[1])]) + else: + return super(VBMSegment, self)._format_arg(opt, spec, val) + + def _parse_inputs(self): + if self.inputs.spatial_normalization == 'low': + einputs = super(VBMSegment, self)._parse_inputs( + skip=('spatial_normalization', 'dartel_template')) + einputs[0]['estwrite']['extopts']['dartelwarp'] = {'normlow': 1} + return einputs + else: + return super(VBMSegment, + self)._parse_inputs(skip=('spatial_normalization')) diff --git a/nipype/interfaces/spm/tests/__init__.py b/nipype/interfaces/spm/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/spm/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py new file mode 100644 index 0000000000..1b1aa6edcd --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_Analyze2nii.py @@ -0,0 +1,39 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Analyze2nii + + +def test_Analyze2nii_inputs(): + input_map = dict( + analyze_file=dict(mandatory=True, ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + paths=dict(), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = Analyze2nii.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Analyze2nii_outputs(): + output_map = dict( + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + nifti_file=dict(), + paths=dict(), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + outputs = Analyze2nii.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py new file mode 100644 index 0000000000..bbc925ec9e --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_ApplyDeformations.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import ApplyDeformations + + +def test_ApplyDeformations_inputs(): + input_map = dict( + deformation_field=dict( + field='comp{1}.def', + mandatory=True, + ), + in_files=dict( + field='fnames', + mandatory=True, + ), + interp=dict(field='interp', ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + paths=dict(), + reference_volume=dict( + field='comp{2}.id.space', + mandatory=True, + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = ApplyDeformations.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ApplyDeformations_outputs(): + output_map = dict(out_files=dict(), ) + outputs = ApplyDeformations.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py new file mode 100644 index 0000000000..d1c35dbcd2 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_ApplyInverseDeformation.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ApplyInverseDeformation + + +def test_ApplyInverseDeformation_inputs(): + input_map = dict( + bounding_box=dict(field='comp{1}.inv.comp{1}.sn2def.bb', ), + deformation=dict( + field='comp{1}.inv.comp{1}.sn2def.matname', + xor=['deformation_field'], + ), + deformation_field=dict( + field='comp{1}.inv.comp{1}.def', + xor=['deformation'], + ), + in_files=dict( + field='fnames', + mandatory=True, + ), + interpolation=dict(field='interp', ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + paths=dict(), + target=dict(field='comp{1}.inv.space', ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + voxel_sizes=dict(field='comp{1}.inv.comp{1}.sn2def.vox', ), + ) + inputs = ApplyInverseDeformation.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ApplyInverseDeformation_outputs(): + output_map = dict(out_files=dict(), ) + outputs = ApplyInverseDeformation.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py new file mode 100644 index 0000000000..a686e95485 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_ApplyTransform.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ApplyTransform + + +def test_ApplyTransform_inputs(): + input_map = dict( + in_file=dict( + copyfile=True, + mandatory=True, + ), + mat=dict(mandatory=True, ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + out_file=dict(genfile=True, ), + paths=dict(), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = ApplyTransform.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ApplyTransform_outputs(): + output_map = dict(out_file=dict(), ) + outputs = ApplyTransform.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py new file mode 100644 index 0000000000..69695ca7b5 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_CalcCoregAffine.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import CalcCoregAffine + + +def test_CalcCoregAffine_inputs(): + input_map = dict( + invmat=dict(), + mat=dict(), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + moving=dict( + copyfile=False, + mandatory=True, + ), + paths=dict(), + target=dict(mandatory=True, ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = CalcCoregAffine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CalcCoregAffine_outputs(): + output_map = dict( + invmat=dict(), + mat=dict(), + ) + outputs = CalcCoregAffine.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_Coregister.py b/nipype/interfaces/spm/tests/test_auto_Coregister.py new file mode 100644 index 0000000000..957a2c84a9 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_Coregister.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Coregister + + +def test_Coregister_inputs(): + input_map = dict( + apply_to_files=dict( + copyfile=True, + field='other', + ), + cost_function=dict(field='eoptions.cost_fun', ), + fwhm=dict(field='eoptions.fwhm', ), + jobtype=dict(usedefault=True, ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + out_prefix=dict( + field='roptions.prefix', + usedefault=True, + ), + paths=dict(), + separation=dict(field='eoptions.sep', ), + source=dict( + copyfile=True, + field='source', + mandatory=True, + ), + target=dict( + copyfile=False, + field='ref', + mandatory=True, + ), + tolerance=dict(field='eoptions.tol', ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + write_interp=dict(field='roptions.interp', ), + write_mask=dict(field='roptions.mask', ), + write_wrap=dict(field='roptions.wrap', ), + ) + inputs = Coregister.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Coregister_outputs(): + output_map = dict( + coregistered_files=dict(), + coregistered_source=dict(), + ) + outputs = Coregister.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_CreateWarped.py b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py new file mode 100644 index 0000000000..dd98d30e37 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_CreateWarped.py @@ -0,0 +1,41 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import CreateWarped + + +def test_CreateWarped_inputs(): + input_map = dict( + flowfield_files=dict( + copyfile=False, + field='crt_warped.flowfields', + mandatory=True, + ), + image_files=dict( + copyfile=False, + field='crt_warped.images', + mandatory=True, + ), + interp=dict(field='crt_warped.interp', ), + iterations=dict(field='crt_warped.K', ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + modulate=dict(field='crt_warped.jactransf', ), + paths=dict(), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = CreateWarped.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CreateWarped_outputs(): + output_map = dict(warped_files=dict(), ) + outputs = CreateWarped.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_DARTEL.py b/nipype/interfaces/spm/tests/test_auto_DARTEL.py new file mode 100644 index 0000000000..cc1b9eee1b --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_DARTEL.py @@ -0,0 +1,44 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import DARTEL + + +def test_DARTEL_inputs(): + input_map = dict( + image_files=dict( + copyfile=False, + field='warp.images', + mandatory=True, + ), + iteration_parameters=dict(field='warp.settings.param', ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + optimization_parameters=dict(field='warp.settings.optim', ), + paths=dict(), + regularization_form=dict(field='warp.settings.rform', ), + template_prefix=dict( + field='warp.settings.template', + usedefault=True, + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = DARTEL.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DARTEL_outputs(): + output_map = dict( + dartel_flow_fields=dict(), + final_template_file=dict(), + template_files=dict(), + ) + outputs = DARTEL.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py new file mode 100644 index 0000000000..d25bde2e5d --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_DARTELNorm2MNI.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import DARTELNorm2MNI + + +def test_DARTELNorm2MNI_inputs(): + input_map = dict( + apply_to_files=dict( + copyfile=False, + field='mni_norm.data.subjs.images', + mandatory=True, + ), + bounding_box=dict(field='mni_norm.bb', ), + flowfield_files=dict( + field='mni_norm.data.subjs.flowfields', + mandatory=True, + ), + fwhm=dict(field='mni_norm.fwhm', ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + modulate=dict(field='mni_norm.preserve', ), + paths=dict(), + template_file=dict( + copyfile=False, + field='mni_norm.template', + mandatory=True, + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + voxel_size=dict(field='mni_norm.vox', ), + ) + inputs = DARTELNorm2MNI.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DARTELNorm2MNI_outputs(): + output_map = dict( + normalization_parameter_file=dict(), + normalized_files=dict(), + ) + outputs = DARTELNorm2MNI.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_DicomImport.py b/nipype/interfaces/spm/tests/test_auto_DicomImport.py new file mode 100644 index 0000000000..fec62ddeeb --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_DicomImport.py @@ -0,0 +1,48 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import DicomImport + + +def test_DicomImport_inputs(): + input_map = dict( + format=dict( + field='convopts.format', + usedefault=True, + ), + icedims=dict( + field='convopts.icedims', + usedefault=True, + ), + in_files=dict( + field='data', + mandatory=True, + ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + output_dir=dict( + field='outdir', + usedefault=True, + ), + output_dir_struct=dict( + field='root', + usedefault=True, + ), + paths=dict(), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = DicomImport.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DicomImport_outputs(): + output_map = dict(out_files=dict(), ) + outputs = DicomImport.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py new file mode 100644 index 0000000000..bc9bb9006e --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_EstimateContrast.py @@ -0,0 +1,50 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import EstimateContrast + + +def test_EstimateContrast_inputs(): + input_map = dict( + beta_images=dict( + copyfile=False, + mandatory=True, + ), + contrasts=dict(mandatory=True, ), + group_contrast=dict(xor=['use_derivs'], ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + paths=dict(), + residual_image=dict( + copyfile=False, + mandatory=True, + ), + spm_mat_file=dict( + copyfile=True, + field='spmmat', + mandatory=True, + ), + use_derivs=dict(xor=['group_contrast'], ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = EstimateContrast.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EstimateContrast_outputs(): + output_map = dict( + con_images=dict(), + ess_images=dict(), + spmF_images=dict(), + spmT_images=dict(), + spm_mat_file=dict(), + ) + outputs = EstimateContrast.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_EstimateModel.py b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py new file mode 100644 index 0000000000..05f511a0bd --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_EstimateModel.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import EstimateModel + + +def test_EstimateModel_inputs(): + input_map = dict( + estimation_method=dict( + field='method', + mandatory=True, + ), + flags=dict(), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + paths=dict(), + spm_mat_file=dict( + copyfile=True, + field='spmmat', + mandatory=True, + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + write_residuals=dict(field='write_residuals', ), + ) + inputs = EstimateModel.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_EstimateModel_outputs(): + output_map = dict( + ARcoef=dict(), + Cbetas=dict(), + RPVimage=dict(), + SDbetas=dict(), + SDerror=dict(), + beta_images=dict(), + labels=dict(), + mask_image=dict(), + residual_image=dict(), + residual_images=dict(), + spm_mat_file=dict(), + ) + outputs = EstimateModel.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py new file mode 100644 index 0000000000..38d2b1c6fb --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_FactorialDesign.py @@ -0,0 +1,58 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import FactorialDesign + + +def test_FactorialDesign_inputs(): + input_map = dict( + covariates=dict(field='cov', ), + explicit_mask_file=dict(field='masking.em', ), + global_calc_mean=dict( + field='globalc.g_mean', + xor=['global_calc_omit', 'global_calc_values'], + ), + global_calc_omit=dict( + field='globalc.g_omit', + xor=['global_calc_mean', 'global_calc_values'], + ), + global_calc_values=dict( + field='globalc.g_user.global_uval', + xor=['global_calc_mean', 'global_calc_omit'], + ), + global_normalization=dict(field='globalm.glonorm', ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), + paths=dict(), + spm_mat_dir=dict(field='dir', ), + threshold_mask_absolute=dict( + field='masking.tm.tma.athresh', + xor=['threshold_mask_none', 'threshold_mask_relative'], + ), + threshold_mask_none=dict( + field='masking.tm.tm_none', + xor=['threshold_mask_absolute', 'threshold_mask_relative'], + ), + threshold_mask_relative=dict( + field='masking.tm.tmr.rthresh', + xor=['threshold_mask_absolute', 'threshold_mask_none'], + ), + use_implicit_threshold=dict(field='masking.im', ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = FactorialDesign.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FactorialDesign_outputs(): + output_map = dict(spm_mat_file=dict(), ) + outputs = FactorialDesign.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_FieldMap.py b/nipype/interfaces/spm/tests/test_auto_FieldMap.py new file mode 100644 index 0000000000..43fbbcb8f4 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_FieldMap.py @@ -0,0 +1,128 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import FieldMap + + +def test_FieldMap_inputs(): + input_map = dict( + anat_file=dict( + copyfile=False, + field='subj.anat', + ), + blip_direction=dict( + field='subj.defaults.defaultsval.blipdir', + mandatory=True, + ), + echo_times=dict( + field='subj.defaults.defaultsval.et', + mandatory=True, + ), + epi_file=dict( + copyfile=False, + field='subj.session.epi', + mandatory=True, + ), + epifm=dict( + field='subj.defaults.defaultsval.epifm', + usedefault=True, + ), + jacobian_modulation=dict( + field='subj.defaults.defaultsval.ajm', + usedefault=True, + ), + jobtype=dict(usedefault=True, ), + magnitude_file=dict( + copyfile=False, + field='subj.data.presubphasemag.magnitude', + mandatory=True, + ), + mask_fwhm=dict( + field='subj.defaults.defaultsval.mflags.fwhm', + usedefault=True, + ), + maskbrain=dict( + field='subj.defaults.defaultsval.maskbrain', + usedefault=True, + ), + matchanat=dict( + field='subj.matchanat', + usedefault=True, + ), + matchvdm=dict( + field='subj.matchvdm', + usedefault=True, + ), + matlab_cmd=dict(), + method=dict( + field='subj.defaults.defaultsval.uflags.method', + usedefault=True, + ), + mfile=dict(usedefault=True, ), + ndilate=dict( + field='subj.defaults.defaultsval.mflags.ndilate', + usedefault=True, + ), + nerode=dict( + field='subj.defaults.defaultsval.mflags.nerode', + usedefault=True, + ), + pad=dict( + field='subj.defaults.defaultsval.uflags.pad', + usedefault=True, + ), + paths=dict(), + phase_file=dict( + copyfile=False, + field='subj.data.presubphasemag.phase', + mandatory=True, + ), + reg=dict( + field='subj.defaults.defaultsval.mflags.reg', + usedefault=True, + ), + sessname=dict( + field='subj.sessname', + usedefault=True, + ), + template=dict( + copyfile=False, + field='subj.defaults.defaultsval.mflags.template', + ), + thresh=dict( + field='subj.defaults.defaultsval.mflags.thresh', + usedefault=True, + ), + total_readout_time=dict( + field='subj.defaults.defaultsval.tert', + mandatory=True, + ), + unwarp_fwhm=dict( + field='subj.defaults.defaultsval.uflags.fwhm', + usedefault=True, + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + writeunwarped=dict( + field='subj.writeunwarped', + usedefault=True, + ), + ws=dict( + field='subj.defaults.defaultsval.uflags.ws', + usedefault=True, + ), + ) + inputs = FieldMap.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FieldMap_outputs(): + output_map = dict(vdm=dict(), ) + outputs = FieldMap.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_Level1Design.py b/nipype/interfaces/spm/tests/test_auto_Level1Design.py new file mode 100644 index 0000000000..7ad8ab8195 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_Level1Design.py @@ -0,0 +1,53 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Level1Design + + +def test_Level1Design_inputs(): + input_map = dict( + bases=dict( + field='bases', + mandatory=True, + ), + factor_info=dict(field='fact', ), + global_intensity_normalization=dict(field='global', ), + interscan_interval=dict( + field='timing.RT', + mandatory=True, + ), + mask_image=dict(field='mask', ), + mask_threshold=dict(usedefault=True, ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + microtime_onset=dict(field='timing.fmri_t0', ), + microtime_resolution=dict(field='timing.fmri_t', ), + model_serial_correlations=dict(field='cvi', ), + paths=dict(), + session_info=dict( + field='sess', + mandatory=True, + ), + spm_mat_dir=dict(field='dir', ), + timing_units=dict( + field='timing.units', + mandatory=True, + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + volterra_expansion_order=dict(field='volt', ), + ) + inputs = Level1Design.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Level1Design_outputs(): + output_map = dict(spm_mat_file=dict(), ) + outputs = Level1Design.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py new file mode 100644 index 0000000000..26957f2fbb --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_MultipleRegressionDesign.py @@ -0,0 +1,67 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import MultipleRegressionDesign + + +def test_MultipleRegressionDesign_inputs(): + input_map = dict( + covariates=dict(field='cov', ), + explicit_mask_file=dict(field='masking.em', ), + global_calc_mean=dict( + field='globalc.g_mean', + xor=['global_calc_omit', 'global_calc_values'], + ), + global_calc_omit=dict( + field='globalc.g_omit', + xor=['global_calc_mean', 'global_calc_values'], + ), + global_calc_values=dict( + field='globalc.g_user.global_uval', + xor=['global_calc_mean', 'global_calc_omit'], + ), + global_normalization=dict(field='globalm.glonorm', ), + in_files=dict( + field='des.mreg.scans', + mandatory=True, + ), + include_intercept=dict( + field='des.mreg.incint', + usedefault=True, + ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), + paths=dict(), + spm_mat_dir=dict(field='dir', ), + threshold_mask_absolute=dict( + field='masking.tm.tma.athresh', + xor=['threshold_mask_none', 'threshold_mask_relative'], + ), + threshold_mask_none=dict( + field='masking.tm.tm_none', + xor=['threshold_mask_absolute', 'threshold_mask_relative'], + ), + threshold_mask_relative=dict( + field='masking.tm.tmr.rthresh', + xor=['threshold_mask_absolute', 'threshold_mask_none'], + ), + use_implicit_threshold=dict(field='masking.im', ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + user_covariates=dict(field='des.mreg.mcov', ), + ) + inputs = MultipleRegressionDesign.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MultipleRegressionDesign_outputs(): + output_map = dict(spm_mat_file=dict(), ) + outputs = MultipleRegressionDesign.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_NewSegment.py b/nipype/interfaces/spm/tests/test_auto_NewSegment.py new file mode 100644 index 0000000000..3f03685e11 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_NewSegment.py @@ -0,0 +1,49 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import NewSegment + + +def test_NewSegment_inputs(): + input_map = dict( + affine_regularization=dict(field='warp.affreg', ), + channel_files=dict( + copyfile=False, + field='channel', + mandatory=True, + ), + channel_info=dict(field='channel', ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + paths=dict(), + sampling_distance=dict(field='warp.samp', ), + tissues=dict(field='tissue', ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + warping_regularization=dict(field='warp.reg', ), + write_deformation_fields=dict(field='warp.write', ), + ) + inputs = NewSegment.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_NewSegment_outputs(): + output_map = dict( + bias_corrected_images=dict(), + bias_field_images=dict(), + dartel_input_images=dict(), + forward_deformation_field=dict(), + inverse_deformation_field=dict(), + modulated_class_images=dict(), + native_class_images=dict(), + normalized_class_images=dict(), + transformation_mat=dict(), + ) + outputs = NewSegment.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize.py b/nipype/interfaces/spm/tests/test_auto_Normalize.py new file mode 100644 index 0000000000..fde0bf7fff --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_Normalize.py @@ -0,0 +1,78 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Normalize + + +def test_Normalize_inputs(): + input_map = dict( + DCT_period_cutoff=dict(field='eoptions.cutoff', ), + affine_regularization_type=dict(field='eoptions.regtype', ), + apply_to_files=dict( + copyfile=True, + field='subj.resample', + ), + jobtype=dict(usedefault=True, ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + nonlinear_iterations=dict(field='eoptions.nits', ), + nonlinear_regularization=dict(field='eoptions.reg', ), + out_prefix=dict( + field='roptions.prefix', + usedefault=True, + ), + parameter_file=dict( + copyfile=False, + field='subj.matname', + mandatory=True, + xor=['source', 'template'], + ), + paths=dict(), + source=dict( + copyfile=True, + field='subj.source', + mandatory=True, + xor=['parameter_file'], + ), + source_image_smoothing=dict(field='eoptions.smosrc', ), + source_weight=dict( + copyfile=False, + field='subj.wtsrc', + ), + template=dict( + copyfile=False, + field='eoptions.template', + mandatory=True, + xor=['parameter_file'], + ), + template_image_smoothing=dict(field='eoptions.smoref', ), + template_weight=dict( + copyfile=False, + field='eoptions.weight', + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + write_bounding_box=dict(field='roptions.bb', ), + write_interp=dict(field='roptions.interp', ), + write_preserve=dict(field='roptions.preserve', ), + write_voxel_sizes=dict(field='roptions.vox', ), + write_wrap=dict(field='roptions.wrap', ), + ) + inputs = Normalize.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Normalize_outputs(): + output_map = dict( + normalization_parameters=dict(), + normalized_files=dict(), + normalized_source=dict(), + ) + outputs = Normalize.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_Normalize12.py b/nipype/interfaces/spm/tests/test_auto_Normalize12.py new file mode 100644 index 0000000000..bf8da2dba1 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_Normalize12.py @@ -0,0 +1,67 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Normalize12 + + +def test_Normalize12_inputs(): + input_map = dict( + affine_regularization_type=dict(field='eoptions.affreg', ), + apply_to_files=dict( + copyfile=True, + field='subj.resample', + ), + bias_fwhm=dict(field='eoptions.biasfwhm', ), + bias_regularization=dict(field='eoptions.biasreg', ), + deformation_file=dict( + copyfile=False, + field='subj.def', + mandatory=True, + xor=['image_to_align', 'tpm'], + ), + image_to_align=dict( + copyfile=True, + field='subj.vol', + mandatory=True, + xor=['deformation_file'], + ), + jobtype=dict(usedefault=True, ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + out_prefix=dict( + field='woptions.prefix', + usedefault=True, + ), + paths=dict(), + sampling_distance=dict(field='eoptions.samp', ), + smoothness=dict(field='eoptions.fwhm', ), + tpm=dict( + copyfile=False, + field='eoptions.tpm', + xor=['deformation_file'], + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + warping_regularization=dict(field='eoptions.reg', ), + write_bounding_box=dict(field='woptions.bb', ), + write_interp=dict(field='woptions.interp', ), + write_voxel_sizes=dict(field='woptions.vox', ), + ) + inputs = Normalize12.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Normalize12_outputs(): + output_map = dict( + deformation_field=dict(), + normalized_files=dict(), + normalized_image=dict(), + ) + outputs = Normalize12.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py new file mode 100644 index 0000000000..dbb02a6275 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_OneSampleTTestDesign.py @@ -0,0 +1,62 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import OneSampleTTestDesign + + +def test_OneSampleTTestDesign_inputs(): + input_map = dict( + covariates=dict(field='cov', ), + explicit_mask_file=dict(field='masking.em', ), + global_calc_mean=dict( + field='globalc.g_mean', + xor=['global_calc_omit', 'global_calc_values'], + ), + global_calc_omit=dict( + field='globalc.g_omit', + xor=['global_calc_mean', 'global_calc_values'], + ), + global_calc_values=dict( + field='globalc.g_user.global_uval', + xor=['global_calc_mean', 'global_calc_omit'], + ), + global_normalization=dict(field='globalm.glonorm', ), + in_files=dict( + field='des.t1.scans', + mandatory=True, + ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), + paths=dict(), + spm_mat_dir=dict(field='dir', ), + threshold_mask_absolute=dict( + field='masking.tm.tma.athresh', + xor=['threshold_mask_none', 'threshold_mask_relative'], + ), + threshold_mask_none=dict( + field='masking.tm.tm_none', + xor=['threshold_mask_absolute', 'threshold_mask_relative'], + ), + threshold_mask_relative=dict( + field='masking.tm.tmr.rthresh', + xor=['threshold_mask_absolute', 'threshold_mask_none'], + ), + use_implicit_threshold=dict(field='masking.im', ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = OneSampleTTestDesign.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_OneSampleTTestDesign_outputs(): + output_map = dict(spm_mat_file=dict(), ) + outputs = OneSampleTTestDesign.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py new file mode 100644 index 0000000000..3e1662268a --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_PairedTTestDesign.py @@ -0,0 +1,64 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import PairedTTestDesign + + +def test_PairedTTestDesign_inputs(): + input_map = dict( + ancova=dict(field='des.pt.ancova', ), + covariates=dict(field='cov', ), + explicit_mask_file=dict(field='masking.em', ), + global_calc_mean=dict( + field='globalc.g_mean', + xor=['global_calc_omit', 'global_calc_values'], + ), + global_calc_omit=dict( + field='globalc.g_omit', + xor=['global_calc_mean', 'global_calc_values'], + ), + global_calc_values=dict( + field='globalc.g_user.global_uval', + xor=['global_calc_mean', 'global_calc_omit'], + ), + global_normalization=dict(field='globalm.glonorm', ), + grand_mean_scaling=dict(field='des.pt.gmsca', ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), + paired_files=dict( + field='des.pt.pair', + mandatory=True, + ), + paths=dict(), + spm_mat_dir=dict(field='dir', ), + threshold_mask_absolute=dict( + field='masking.tm.tma.athresh', + xor=['threshold_mask_none', 'threshold_mask_relative'], + ), + threshold_mask_none=dict( + field='masking.tm.tm_none', + xor=['threshold_mask_absolute', 'threshold_mask_relative'], + ), + threshold_mask_relative=dict( + field='masking.tm.tmr.rthresh', + xor=['threshold_mask_absolute', 'threshold_mask_none'], + ), + use_implicit_threshold=dict(field='masking.im', ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = PairedTTestDesign.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PairedTTestDesign_outputs(): + output_map = dict(spm_mat_file=dict(), ) + outputs = PairedTTestDesign.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_Realign.py b/nipype/interfaces/spm/tests/test_auto_Realign.py new file mode 100644 index 0000000000..ac2f5bbd92 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_Realign.py @@ -0,0 +1,59 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Realign + + +def test_Realign_inputs(): + input_map = dict( + fwhm=dict(field='eoptions.fwhm', ), + in_files=dict( + copyfile=True, + field='data', + mandatory=True, + ), + interp=dict(field='eoptions.interp', ), + jobtype=dict(usedefault=True, ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + out_prefix=dict( + field='roptions.prefix', + usedefault=True, + ), + paths=dict(), + quality=dict(field='eoptions.quality', ), + register_to_mean=dict(field='eoptions.rtm', ), + separation=dict(field='eoptions.sep', ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + weight_img=dict(field='eoptions.weight', ), + wrap=dict(field='eoptions.wrap', ), + write_interp=dict(field='roptions.interp', ), + write_mask=dict(field='roptions.mask', ), + write_which=dict( + field='roptions.which', + maxlen=2, + minlen=2, + usedefault=True, + ), + write_wrap=dict(field='roptions.wrap', ), + ) + inputs = Realign.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Realign_outputs(): + output_map = dict( + mean_image=dict(), + modified_in_files=dict(), + realigned_files=dict(), + realignment_parameters=dict(), + ) + outputs = Realign.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_Reslice.py b/nipype/interfaces/spm/tests/test_auto_Reslice.py new file mode 100644 index 0000000000..81299fc748 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_Reslice.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import Reslice + + +def test_Reslice_inputs(): + input_map = dict( + in_file=dict(mandatory=True, ), + interp=dict(usedefault=True, ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + out_file=dict(), + paths=dict(), + space_defining=dict(mandatory=True, ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = Reslice.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Reslice_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Reslice.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py new file mode 100644 index 0000000000..3e39ade181 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_ResliceToReference.py @@ -0,0 +1,36 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..utils import ResliceToReference + + +def test_ResliceToReference_inputs(): + input_map = dict( + bounding_box=dict(field='comp{2}.idbbvox.bb', ), + in_files=dict( + field='fnames', + mandatory=True, + ), + interpolation=dict(field='interp', ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + paths=dict(), + target=dict(field='comp{1}.id.space', ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + voxel_sizes=dict(field='comp{2}.idbbvox.vox', ), + ) + inputs = ResliceToReference.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ResliceToReference_outputs(): + output_map = dict(out_files=dict(), ) + outputs = ResliceToReference.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_SPMCommand.py b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py new file mode 100644 index 0000000000..7c1ba5cbc2 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_SPMCommand.py @@ -0,0 +1,21 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import SPMCommand + + +def test_SPMCommand_inputs(): + input_map = dict( + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + paths=dict(), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = SPMCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_Segment.py b/nipype/interfaces/spm/tests/test_auto_Segment.py new file mode 100644 index 0000000000..f6df46e1de --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_Segment.py @@ -0,0 +1,64 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Segment + + +def test_Segment_inputs(): + input_map = dict( + affine_regularization=dict(field='opts.regtype', ), + bias_fwhm=dict(field='opts.biasfwhm', ), + bias_regularization=dict(field='opts.biasreg', ), + clean_masks=dict(field='output.cleanup', ), + csf_output_type=dict(field='output.CSF', ), + data=dict( + copyfile=False, + field='data', + mandatory=True, + ), + gaussians_per_class=dict(field='opts.ngaus', ), + gm_output_type=dict(field='output.GM', ), + mask_image=dict(field='opts.msk', ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + paths=dict(), + sampling_distance=dict(field='opts.samp', ), + save_bias_corrected=dict(field='output.biascor', ), + tissue_prob_maps=dict(field='opts.tpm', ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + warp_frequency_cutoff=dict(field='opts.warpco', ), + warping_regularization=dict(field='opts.warpreg', ), + wm_output_type=dict(field='output.WM', ), + ) + inputs = Segment.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Segment_outputs(): + output_map = dict( + bias_corrected_image=dict(), + inverse_transformation_mat=dict(), + modulated_csf_image=dict(), + modulated_gm_image=dict(), + modulated_input_image=dict( + deprecated='0.10', + new_name='bias_corrected_image', + ), + modulated_wm_image=dict(), + native_csf_image=dict(), + native_gm_image=dict(), + native_wm_image=dict(), + normalized_csf_image=dict(), + normalized_gm_image=dict(), + normalized_wm_image=dict(), + transformation_mat=dict(), + ) + outputs = Segment.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_SliceTiming.py b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py new file mode 100644 index 0000000000..85fb1bc4e4 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_SliceTiming.py @@ -0,0 +1,57 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import SliceTiming + + +def test_SliceTiming_inputs(): + input_map = dict( + in_files=dict( + copyfile=False, + field='scans', + mandatory=True, + ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + num_slices=dict( + field='nslices', + mandatory=True, + ), + out_prefix=dict( + field='prefix', + usedefault=True, + ), + paths=dict(), + ref_slice=dict( + field='refslice', + mandatory=True, + ), + slice_order=dict( + field='so', + mandatory=True, + ), + time_acquisition=dict( + field='ta', + mandatory=True, + ), + time_repetition=dict( + field='tr', + mandatory=True, + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = SliceTiming.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SliceTiming_outputs(): + output_map = dict(timecorrected_files=dict(), ) + outputs = SliceTiming.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_Smooth.py b/nipype/interfaces/spm/tests/test_auto_Smooth.py new file mode 100644 index 0000000000..eea60ed619 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_Smooth.py @@ -0,0 +1,40 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import Smooth + + +def test_Smooth_inputs(): + input_map = dict( + data_type=dict(field='dtype', ), + fwhm=dict(field='fwhm', ), + implicit_masking=dict(field='im', ), + in_files=dict( + copyfile=False, + field='data', + mandatory=True, + ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + out_prefix=dict( + field='prefix', + usedefault=True, + ), + paths=dict(), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = Smooth.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Smooth_outputs(): + output_map = dict(smoothed_files=dict(), ) + outputs = Smooth.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_Threshold.py b/nipype/interfaces/spm/tests/test_auto_Threshold.py new file mode 100644 index 0000000000..078adb3a1b --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_Threshold.py @@ -0,0 +1,51 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import Threshold + + +def test_Threshold_inputs(): + input_map = dict( + contrast_index=dict(mandatory=True, ), + extent_fdr_p_threshold=dict(usedefault=True, ), + extent_threshold=dict(usedefault=True, ), + force_activation=dict(usedefault=True, ), + height_threshold=dict(usedefault=True, ), + height_threshold_type=dict(usedefault=True, ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + paths=dict(), + spm_mat_file=dict( + copyfile=True, + mandatory=True, + ), + stat_image=dict( + copyfile=False, + mandatory=True, + ), + use_fwe_correction=dict(usedefault=True, ), + use_mcr=dict(), + use_topo_fdr=dict(usedefault=True, ), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = Threshold.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Threshold_outputs(): + output_map = dict( + activation_forced=dict(), + cluster_forming_thr=dict(), + n_clusters=dict(), + pre_topo_fdr_map=dict(), + pre_topo_n_clusters=dict(), + thresholded_map=dict(), + ) + outputs = Threshold.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py new file mode 100644 index 0000000000..ccdc441e04 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_ThresholdStatistics.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import ThresholdStatistics + + +def test_ThresholdStatistics_inputs(): + input_map = dict( + contrast_index=dict(mandatory=True, ), + extent_threshold=dict(usedefault=True, ), + height_threshold=dict(mandatory=True, ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + paths=dict(), + spm_mat_file=dict( + copyfile=True, + mandatory=True, + ), + stat_image=dict( + copyfile=False, + mandatory=True, + ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = ThresholdStatistics.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_ThresholdStatistics_outputs(): + output_map = dict( + clusterwise_P_FDR=dict(), + clusterwise_P_RF=dict(), + voxelwise_P_Bonf=dict(), + voxelwise_P_FDR=dict(), + voxelwise_P_RF=dict(), + voxelwise_P_uncor=dict(), + ) + outputs = ThresholdStatistics.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py new file mode 100644 index 0000000000..4dfbc12570 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_TwoSampleTTestDesign.py @@ -0,0 +1,68 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..model import TwoSampleTTestDesign + + +def test_TwoSampleTTestDesign_inputs(): + input_map = dict( + covariates=dict(field='cov', ), + dependent=dict(field='des.t2.dept', ), + explicit_mask_file=dict(field='masking.em', ), + global_calc_mean=dict( + field='globalc.g_mean', + xor=['global_calc_omit', 'global_calc_values'], + ), + global_calc_omit=dict( + field='globalc.g_omit', + xor=['global_calc_mean', 'global_calc_values'], + ), + global_calc_values=dict( + field='globalc.g_user.global_uval', + xor=['global_calc_mean', 'global_calc_omit'], + ), + global_normalization=dict(field='globalm.glonorm', ), + group1_files=dict( + field='des.t2.scans1', + mandatory=True, + ), + group2_files=dict( + field='des.t2.scans2', + mandatory=True, + ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + no_grand_mean_scaling=dict(field='globalm.gmsca.gmsca_no', ), + paths=dict(), + spm_mat_dir=dict(field='dir', ), + threshold_mask_absolute=dict( + field='masking.tm.tma.athresh', + xor=['threshold_mask_none', 'threshold_mask_relative'], + ), + threshold_mask_none=dict( + field='masking.tm.tm_none', + xor=['threshold_mask_absolute', 'threshold_mask_relative'], + ), + threshold_mask_relative=dict( + field='masking.tm.tmr.rthresh', + xor=['threshold_mask_absolute', 'threshold_mask_none'], + ), + unequal_variance=dict(field='des.t2.variance', ), + use_implicit_threshold=dict(field='masking.im', ), + use_mcr=dict(), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + ) + inputs = TwoSampleTTestDesign.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_TwoSampleTTestDesign_outputs(): + output_map = dict(spm_mat_file=dict(), ) + outputs = TwoSampleTTestDesign.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_auto_VBMSegment.py b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py new file mode 100644 index 0000000000..6aaac2b489 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_auto_VBMSegment.py @@ -0,0 +1,164 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..preprocess import VBMSegment + + +def test_VBMSegment_inputs(): + input_map = dict( + bias_corrected_affine=dict( + field='estwrite.output.bias.affine', + usedefault=True, + ), + bias_corrected_native=dict( + field='estwrite.output.bias.native', + usedefault=True, + ), + bias_corrected_normalized=dict( + field='estwrite.output.bias.warped', + usedefault=True, + ), + bias_fwhm=dict( + field='estwrite.opts.biasfwhm', + usedefault=True, + ), + bias_regularization=dict( + field='estwrite.opts.biasreg', + usedefault=True, + ), + cleanup_partitions=dict( + field='estwrite.extopts.cleanup', + usedefault=True, + ), + csf_dartel=dict( + field='estwrite.output.CSF.dartel', + usedefault=True, + ), + csf_modulated_normalized=dict( + field='estwrite.output.CSF.modulated', + usedefault=True, + ), + csf_native=dict( + field='estwrite.output.CSF.native', + usedefault=True, + ), + csf_normalized=dict( + field='estwrite.output.CSF.warped', + usedefault=True, + ), + dartel_template=dict( + field='estwrite.extopts.dartelwarp.normhigh.darteltpm', ), + deformation_field=dict( + field='estwrite.output.warps', + usedefault=True, + ), + display_results=dict( + field='estwrite.extopts.print', + usedefault=True, + ), + gaussians_per_class=dict(usedefault=True, ), + gm_dartel=dict( + field='estwrite.output.GM.dartel', + usedefault=True, + ), + gm_modulated_normalized=dict( + field='estwrite.output.GM.modulated', + usedefault=True, + ), + gm_native=dict( + field='estwrite.output.GM.native', + usedefault=True, + ), + gm_normalized=dict( + field='estwrite.output.GM.warped', + usedefault=True, + ), + in_files=dict( + copyfile=False, + field='estwrite.data', + mandatory=True, + ), + jacobian_determinant=dict( + field='estwrite.jacobian.warped', + usedefault=True, + ), + matlab_cmd=dict(), + mfile=dict(usedefault=True, ), + mrf_weighting=dict( + field='estwrite.extopts.mrf', + usedefault=True, + ), + paths=dict(), + pve_label_dartel=dict( + field='estwrite.output.label.dartel', + usedefault=True, + ), + pve_label_native=dict( + field='estwrite.output.label.native', + usedefault=True, + ), + pve_label_normalized=dict( + field='estwrite.output.label.warped', + usedefault=True, + ), + sampling_distance=dict( + field='estwrite.opts.samp', + usedefault=True, + ), + spatial_normalization=dict(usedefault=True, ), + tissues=dict(field='estwrite.tpm', ), + use_mcr=dict(), + use_sanlm_denoising_filter=dict( + field='estwrite.extopts.sanlm', + usedefault=True, + ), + use_v8struct=dict( + min_ver='8', + usedefault=True, + ), + warping_regularization=dict( + field='estwrite.opts.warpreg', + usedefault=True, + ), + wm_dartel=dict( + field='estwrite.output.WM.dartel', + usedefault=True, + ), + wm_modulated_normalized=dict( + field='estwrite.output.WM.modulated', + usedefault=True, + ), + wm_native=dict( + field='estwrite.output.WM.native', + usedefault=True, + ), + wm_normalized=dict( + field='estwrite.output.WM.warped', + usedefault=True, + ), + ) + inputs = VBMSegment.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_VBMSegment_outputs(): + output_map = dict( + bias_corrected_images=dict(), + dartel_input_images=dict(), + forward_deformation_field=dict(), + inverse_deformation_field=dict(), + jacobian_determinant_images=dict(), + modulated_class_images=dict(), + native_class_images=dict(), + normalized_bias_corrected_images=dict(), + normalized_class_images=dict(), + pve_label_native_images=dict(), + pve_label_normalized_images=dict(), + pve_label_registered_images=dict(), + transformation_mat=dict(), + ) + outputs = VBMSegment.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/spm/tests/test_base.py b/nipype/interfaces/spm/tests/test_base.py new file mode 100644 index 0000000000..a8a23e8def --- /dev/null +++ b/nipype/interfaces/spm/tests/test_base.py @@ -0,0 +1,168 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import unicode_literals +from builtins import str, bytes + +import os +import numpy as np + +import pytest +from nipype.testing.fixtures import create_files_in_directory + +import nipype.interfaces.spm.base as spm +from nipype.interfaces.spm import no_spm +import nipype.interfaces.matlab as mlab +from nipype.interfaces.spm.base import SPMCommandInputSpec +from nipype.interfaces.base import traits + +mlab.MatlabCommand.set_default_matlab_cmd(os.getenv('MATLABCMD', 'matlab')) + + +def test_scan_for_fnames(create_files_in_directory): + filelist, outdir = create_files_in_directory + names = spm.scans_for_fnames(filelist, keep4d=True) + assert names[0] == filelist[0] + assert names[1] == filelist[1] + + +save_time = False +if not save_time: + + @pytest.mark.skipif(no_spm(), reason="spm is not installed") + def test_spm_path(): + spm_path = spm.Info.path() + if spm_path is not None: + assert isinstance(spm_path, (str, bytes)) + assert 'spm' in spm_path.lower() + + +def test_use_mfile(): + class TestClass(spm.SPMCommand): + input_spec = spm.SPMCommandInputSpec + + dc = TestClass() # dc = derived_class + assert dc.inputs.mfile + + +def test_find_mlab_cmd_defaults(): + saved_env = dict(os.environ) + + class TestClass(spm.SPMCommand): + pass + + # test without FORCE_SPMMCR, SPMMCRCMD set + for varname in ['FORCE_SPMMCR', 'SPMMCRCMD']: + try: + del os.environ[varname] + except KeyError: + pass + dc = TestClass() + assert dc._use_mcr is None + assert dc._matlab_cmd is None + # test with only FORCE_SPMMCR set + os.environ['FORCE_SPMMCR'] = '1' + dc = TestClass() + assert dc._use_mcr + assert dc._matlab_cmd is None + # test with both, FORCE_SPMMCR and SPMMCRCMD set + os.environ['SPMMCRCMD'] = 'spmcmd' + dc = TestClass() + assert dc._use_mcr + assert dc._matlab_cmd == 'spmcmd' + # restore environment + os.environ.clear() + os.environ.update(saved_env) + + +@pytest.mark.skipif(no_spm(), reason="spm is not installed") +def test_cmd_update(): + class TestClass(spm.SPMCommand): + input_spec = spm.SPMCommandInputSpec + + dc = TestClass() # dc = derived_class + dc.inputs.matlab_cmd = 'foo' + assert dc.mlab._cmd == 'foo' + + +def test_cmd_update2(): + class TestClass(spm.SPMCommand): + _jobtype = 'jobtype' + _jobname = 'jobname' + input_spec = spm.SPMCommandInputSpec + + dc = TestClass() # dc = derived_class + assert dc.jobtype == 'jobtype' + assert dc.jobname == 'jobname' + + +def test_reformat_dict_for_savemat(): + class TestClass(spm.SPMCommand): + input_spec = spm.SPMCommandInputSpec + + dc = TestClass() # dc = derived_class + out = dc._reformat_dict_for_savemat({'a': {'b': {'c': []}}}) + assert out == [{'a': [{'b': [{'c': []}]}]}] + + +def test_generate_job(create_files_in_directory): + class TestClass(spm.SPMCommand): + input_spec = spm.SPMCommandInputSpec + + dc = TestClass() # dc = derived_class + out = dc._generate_job() + assert out == '' + # struct array + contents = {'contents': [1, 2, 3, 4]} + out = dc._generate_job(contents=contents) + assert out == ('.contents(1) = 1;\n.contents(2) = 2;' + '\n.contents(3) = 3;\n.contents(4) = 4;\n') + # cell array of strings + filelist, outdir = create_files_in_directory + names = spm.scans_for_fnames(filelist, keep4d=True) + contents = {'files': names} + out = dc._generate_job(prefix='test', contents=contents) + assert out == "test.files = {...\n'a.nii';...\n'b.nii';...\n};\n" + # string assignment + contents = 'foo' + out = dc._generate_job(prefix='test', contents=contents) + assert out == "test = 'foo';\n" + # cell array of vectors + contents = {'onsets': np.array((1, ), dtype=object)} + contents['onsets'][0] = [1, 2, 3, 4] + out = dc._generate_job(prefix='test', contents=contents) + assert out == 'test.onsets = {...\n[1, 2, 3, 4];...\n};\n' + + +def test_bool(): + class TestClassInputSpec(SPMCommandInputSpec): + test_in = include_intercept = traits.Bool(field='testfield') + + class TestClass(spm.SPMCommand): + input_spec = TestClassInputSpec + _jobtype = 'jobtype' + _jobname = 'jobname' + + dc = TestClass() # dc = derived_class + dc.inputs.test_in = True + out = dc._make_matlab_command(dc._parse_inputs()) + assert out.find('jobs{1}.spm.jobtype.jobname.testfield = 1;') > 0, 1 + dc.inputs.use_v8struct = False + out = dc._make_matlab_command(dc._parse_inputs()) + assert out.find('jobs{1}.jobtype{1}.jobname{1}.testfield = 1;') > 0, 1 + + +def test_make_matlab_command(create_files_in_directory): + class TestClass(spm.SPMCommand): + _jobtype = 'jobtype' + _jobname = 'jobname' + input_spec = spm.SPMCommandInputSpec + + dc = TestClass() # dc = derived_class + filelist, outdir = create_files_in_directory + contents = {'contents': [1, 2, 3, 4]} + script = dc._make_matlab_command([contents]) + assert 'jobs{1}.spm.jobtype.jobname.contents(3) = 3;' in script + dc.inputs.use_v8struct = False + script = dc._make_matlab_command([contents]) + assert 'jobs{1}.jobtype{1}.jobname{1}.contents(3) = 3;' in script diff --git a/nipype/interfaces/spm/tests/test_model.py b/nipype/interfaces/spm/tests/test_model.py new file mode 100644 index 0000000000..a9cb957944 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_model.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import os + +import nipype.interfaces.spm.model as spm +import nipype.interfaces.matlab as mlab + +mlab.MatlabCommand.set_default_matlab_cmd(os.getenv('MATLABCMD', 'matlab')) + + +def test_level1design(): + assert spm.Level1Design._jobtype == 'stats' + assert spm.Level1Design._jobname == 'fmri_spec' + + +def test_estimatemodel(): + assert spm.EstimateModel._jobtype == 'stats' + assert spm.EstimateModel._jobname == 'fmri_est' + + +def test_estimatecontrast(): + assert spm.EstimateContrast._jobtype == 'stats' + assert spm.EstimateContrast._jobname == 'con' + + +def test_threshold(): + assert spm.Threshold._jobtype == 'basetype' + assert spm.Threshold._jobname == 'basename' + + +def test_factorialdesign(): + assert spm.FactorialDesign._jobtype == 'stats' + assert spm.FactorialDesign._jobname == 'factorial_design' + + +def test_onesamplettestdesign(): + assert spm.OneSampleTTestDesign._jobtype == 'stats' + assert spm.OneSampleTTestDesign._jobname == 'factorial_design' + + +def test_twosamplettestdesign(): + assert spm.TwoSampleTTestDesign._jobtype == 'stats' + assert spm.TwoSampleTTestDesign._jobname == 'factorial_design' diff --git a/nipype/interfaces/spm/tests/test_preprocess.py b/nipype/interfaces/spm/tests/test_preprocess.py new file mode 100644 index 0000000000..2b70b7bb54 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_preprocess.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import os + +import pytest +from nipype.testing.fixtures import create_files_in_directory + +import nipype.interfaces.spm as spm +from nipype.interfaces.spm import no_spm +import nipype.interfaces.matlab as mlab + +mlab.MatlabCommand.set_default_matlab_cmd(os.getenv('MATLABCMD', 'matlab')) + + +def test_slicetiming(): + assert spm.SliceTiming._jobtype == 'temporal' + assert spm.SliceTiming._jobname == 'st' + + +def test_slicetiming_list_outputs(create_files_in_directory): + filelist, outdir = create_files_in_directory + st = spm.SliceTiming(in_files=filelist[0]) + assert st._list_outputs()['timecorrected_files'][0][0] == 'a' + + +def test_realign(): + assert spm.Realign._jobtype == 'spatial' + assert spm.Realign._jobname == 'realign' + assert spm.Realign().inputs.jobtype == 'estwrite' + + +def test_realign_list_outputs(create_files_in_directory): + filelist, outdir = create_files_in_directory + rlgn = spm.Realign(in_files=filelist[0]) + assert rlgn._list_outputs()['realignment_parameters'][0].startswith('rp_') + assert rlgn._list_outputs()['realigned_files'][0].startswith('r') + assert rlgn._list_outputs()['mean_image'].startswith('mean') + + +def test_coregister(): + assert spm.Coregister._jobtype == 'spatial' + assert spm.Coregister._jobname == 'coreg' + assert spm.Coregister().inputs.jobtype == 'estwrite' + + +def test_coregister_list_outputs(create_files_in_directory): + filelist, outdir = create_files_in_directory + coreg = spm.Coregister(source=filelist[0]) + assert coreg._list_outputs()['coregistered_source'][0].startswith('r') + coreg = spm.Coregister(source=filelist[0], apply_to_files=filelist[1]) + assert coreg._list_outputs()['coregistered_files'][0].startswith('r') + + +def test_normalize(): + assert spm.Normalize._jobtype == 'spatial' + assert spm.Normalize._jobname == 'normalise' + assert spm.Normalize().inputs.jobtype == 'estwrite' + + +def test_normalize_list_outputs(create_files_in_directory): + filelist, outdir = create_files_in_directory + norm = spm.Normalize(source=filelist[0]) + assert norm._list_outputs()['normalized_source'][0].startswith('w') + norm = spm.Normalize(source=filelist[0], apply_to_files=filelist[1]) + assert norm._list_outputs()['normalized_files'][0].startswith('w') + + +def test_normalize12(): + assert spm.Normalize12._jobtype == 'spatial' + assert spm.Normalize12._jobname == 'normalise' + assert spm.Normalize12().inputs.jobtype == 'estwrite' + + +def test_normalize12_list_outputs(create_files_in_directory): + filelist, outdir = create_files_in_directory + norm12 = spm.Normalize12(image_to_align=filelist[0]) + assert norm12._list_outputs()['normalized_image'][0].startswith('w') + norm12 = spm.Normalize12( + image_to_align=filelist[0], apply_to_files=filelist[1]) + assert norm12._list_outputs()['normalized_files'][0].startswith('w') + + +@pytest.mark.skipif(no_spm(), reason="spm is not installed") +def test_segment(): + if spm.Info.name() == "SPM12": + assert spm.Segment()._jobtype == 'tools' + assert spm.Segment()._jobname == 'oldseg' + else: + assert spm.Segment()._jobtype == 'spatial' + assert spm.Segment()._jobname == 'preproc' + + +@pytest.mark.skipif(no_spm(), reason="spm is not installed") +def test_newsegment(): + if spm.Info.name() == "SPM12": + assert spm.NewSegment()._jobtype == 'spatial' + assert spm.NewSegment()._jobname == 'preproc' + else: + assert spm.NewSegment()._jobtype == 'tools' + assert spm.NewSegment()._jobname == 'preproc8' + + +def test_smooth(): + assert spm.Smooth._jobtype == 'spatial' + assert spm.Smooth._jobname == 'smooth' + + +def test_dartel(): + assert spm.DARTEL._jobtype == 'tools' + assert spm.DARTEL._jobname == 'dartel' + + +def test_dartelnorm2mni(): + assert spm.DARTELNorm2MNI._jobtype == 'tools' + assert spm.DARTELNorm2MNI._jobname == 'dartel' diff --git a/nipype/interfaces/spm/tests/test_utils.py b/nipype/interfaces/spm/tests/test_utils.py new file mode 100644 index 0000000000..a574fb90a7 --- /dev/null +++ b/nipype/interfaces/spm/tests/test_utils.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import os +import pytest +from nipype.testing import example_data +import nipype.interfaces.spm.utils as spmu +from nipype.interfaces.base import isdefined +from nipype.utils.filemanip import split_filename, fname_presuffix +from nipype.interfaces.base import TraitError + + +def test_coreg(): + moving = example_data(infile='functional.nii') + target = example_data(infile='T1.nii') + mat = example_data(infile='trans.mat') + coreg = spmu.CalcCoregAffine(matlab_cmd='mymatlab') + coreg.inputs.target = target + assert coreg.inputs.matlab_cmd == 'mymatlab' + coreg.inputs.moving = moving + assert not isdefined(coreg.inputs.mat) + pth, mov, _ = split_filename(moving) + _, tgt, _ = split_filename(target) + mat = os.path.join(pth, '%s_to_%s.mat' % (mov, tgt)) + invmat = fname_presuffix(mat, prefix='inverse_') + scrpt = coreg._make_matlab_command(None) + assert coreg.inputs.mat == mat + assert coreg.inputs.invmat == invmat + + +def test_apply_transform(): + moving = example_data(infile='functional.nii') + mat = example_data(infile='trans.mat') + applymat = spmu.ApplyTransform(matlab_cmd='mymatlab') + assert applymat.inputs.matlab_cmd == 'mymatlab' + applymat.inputs.in_file = moving + applymat.inputs.mat = mat + scrpt = applymat._make_matlab_command(None) + expected = '[p n e v] = spm_fileparts(V.fname);' + assert expected in scrpt + expected = 'V.mat = transform.M * V.mat;' + assert expected in scrpt + + +def test_reslice(): + moving = example_data(infile='functional.nii') + space_defining = example_data(infile='T1.nii') + reslice = spmu.Reslice(matlab_cmd='mymatlab_version') + assert reslice.inputs.matlab_cmd == 'mymatlab_version' + reslice.inputs.in_file = moving + reslice.inputs.space_defining = space_defining + assert reslice.inputs.interp == 0 + with pytest.raises(TraitError): + reslice.inputs.trait_set(interp='nearest') + with pytest.raises(TraitError): + reslice.inputs.trait_set(interp=10) + reslice.inputs.interp = 1 + script = reslice._make_matlab_command(None) + outfile = fname_presuffix(moving, prefix='r') + assert reslice.inputs.out_file == outfile + expected = '\nflags.mean=0;\nflags.which=1;\nflags.mask=0;' + assert expected in script.replace(' ', '') + expected_interp = 'flags.interp = 1;\n' + assert expected_interp in script + assert 'spm_reslice(invols, flags);' in script + + +def test_dicom_import(): + dicom = example_data(infile='dicomdir/123456-1-1.dcm') + di = spmu.DicomImport(matlab_cmd='mymatlab') + assert di.inputs.matlab_cmd == 'mymatlab' + assert di.inputs.output_dir_struct == 'flat' + assert di.inputs.output_dir == './converted_dicom' + assert di.inputs.format == 'nii' + assert not di.inputs.icedims + with pytest.raises(TraitError): + di.inputs.trait_set(output_dir_struct='wrong') + with pytest.raises(TraitError): + di.inputs.trait_set(format='FAT') + with pytest.raises(TraitError): + di.inputs.trait_set(in_files=['does_sfd_not_32fn_exist.dcm']) + di.inputs.in_files = [dicom] + assert di.inputs.in_files == [dicom] diff --git a/nipype/interfaces/spm/utils.py b/nipype/interfaces/spm/utils.py new file mode 100644 index 0000000000..275f0781a9 --- /dev/null +++ b/nipype/interfaces/spm/utils.py @@ -0,0 +1,513 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +import numpy as np + +from ...utils.filemanip import (split_filename, fname_presuffix, + ensure_list, simplify_list) +from ..base import (TraitedSpec, isdefined, File, traits, OutputMultiPath, + InputMultiPath) +from .base import (SPMCommandInputSpec, SPMCommand, scans_for_fnames, + scans_for_fname) + + +class Analyze2niiInputSpec(SPMCommandInputSpec): + analyze_file = File(exists=True, mandatory=True) + + +class Analyze2niiOutputSpec(SPMCommandInputSpec): + nifti_file = File(exists=True) + + +class Analyze2nii(SPMCommand): + + input_spec = Analyze2niiInputSpec + output_spec = Analyze2niiOutputSpec + + def _make_matlab_command(self, _): + script = "V = spm_vol('%s');\n" % self.inputs.analyze_file + _, name, _ = split_filename(self.inputs.analyze_file) + self.output_name = os.path.join(os.getcwd(), name + ".nii") + script += "[Y, XYZ] = spm_read_vols(V);\n" + script += "V.fname = '%s';\n" % self.output_name + script += "spm_write_vol(V, Y);\n" + + return script + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['nifti_file'] = self.output_name + return outputs + + +class CalcCoregAffineInputSpec(SPMCommandInputSpec): + target = File( + exists=True, + mandatory=True, + desc='target for generating affine transform') + moving = File( + exists=True, + mandatory=True, + copyfile=False, + desc=('volume transform can be applied to register with ' + 'target')) + mat = File(desc='Filename used to store affine matrix') + invmat = File(desc='Filename used to store inverse affine matrix') + + +class CalcCoregAffineOutputSpec(TraitedSpec): + mat = File(exists=True, desc='Matlab file holding transform') + invmat = File(desc='Matlab file holding inverse transform') + + +class CalcCoregAffine(SPMCommand): + """ Uses SPM (spm_coreg) to calculate the transform mapping + moving to target. Saves Transform in mat (matlab binary file) + Also saves inverse transform + + Examples + -------- + + >>> import nipype.interfaces.spm.utils as spmu + >>> coreg = spmu.CalcCoregAffine(matlab_cmd='matlab-spm8') + >>> coreg.inputs.target = 'structural.nii' + >>> coreg.inputs.moving = 'functional.nii' + >>> coreg.inputs.mat = 'func_to_struct.mat' + >>> coreg.run() # doctest: +SKIP + + .. note:: + + * the output file mat is saves as a matlab binary file + * calculating the transforms does NOT change either input image + it does not **move** the moving image, only calculates the transform + that can be used to move it + """ + + input_spec = CalcCoregAffineInputSpec + output_spec = CalcCoregAffineOutputSpec + + def _make_inv_file(self): + """ makes filename to hold inverse transform if not specified""" + invmat = fname_presuffix(self.inputs.mat, prefix='inverse_') + return invmat + + def _make_mat_file(self): + """ makes name for matfile if doesn exist""" + pth, mv, _ = split_filename(self.inputs.moving) + _, tgt, _ = split_filename(self.inputs.target) + mat = os.path.join(pth, '%s_to_%s.mat' % (mv, tgt)) + return mat + + def _make_matlab_command(self, _): + """checks for SPM, generates script""" + if not isdefined(self.inputs.mat): + self.inputs.mat = self._make_mat_file() + if not isdefined(self.inputs.invmat): + self.inputs.invmat = self._make_inv_file() + script = """ + target = '%s'; + moving = '%s'; + targetv = spm_vol(target); + movingv = spm_vol(moving); + x = spm_coreg(targetv, movingv); + M = spm_matrix(x); + save('%s' , 'M' ); + M = inv(M); + save('%s','M') + """ % (self.inputs.target, self.inputs.moving, self.inputs.mat, + self.inputs.invmat) + return script + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['mat'] = os.path.abspath(self.inputs.mat) + outputs['invmat'] = os.path.abspath(self.inputs.invmat) + return outputs + + +class ApplyTransformInputSpec(SPMCommandInputSpec): + in_file = File( + exists=True, + mandatory=True, + copyfile=True, + desc='file to apply transform to, (only updates header)') + mat = File( + exists=True, mandatory=True, desc='file holding transform to apply') + out_file = File(desc="output file name for transformed data", genfile=True) + + +class ApplyTransformOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='Transformed image file') + + +class ApplyTransform(SPMCommand): + """ Uses SPM to apply transform stored in a .mat file to given file + + Examples + -------- + + >>> import nipype.interfaces.spm.utils as spmu + >>> applymat = spmu.ApplyTransform() + >>> applymat.inputs.in_file = 'functional.nii' + >>> applymat.inputs.mat = 'func_to_struct.mat' + >>> applymat.run() # doctest: +SKIP + + """ + input_spec = ApplyTransformInputSpec + output_spec = ApplyTransformOutputSpec + + def _make_matlab_command(self, _): + """checks for SPM, generates script""" + outputs = self._list_outputs() + self.inputs.out_file = outputs['out_file'] + script = """ + infile = '%s'; + outfile = '%s' + transform = load('%s'); + + V = spm_vol(infile); + X = spm_read_vols(V); + [p n e v] = spm_fileparts(V.fname); + V.mat = transform.M * V.mat; + V.fname = fullfile(outfile); + spm_write_vol(V,X); + + """ % (self.inputs.in_file, self.inputs.out_file, self.inputs.mat) + # img_space = spm_get_space(infile); + # spm_get_space(infile, transform.M * img_space); + return script + + def _list_outputs(self): + outputs = self.output_spec().get() + if not isdefined(self.inputs.out_file): + outputs['out_file'] = os.path.abspath(self._gen_outfilename()) + else: + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + def _gen_outfilename(self): + _, name, _ = split_filename(self.inputs.in_file) + return name + '_trans.nii' + + +class ResliceInputSpec(SPMCommandInputSpec): + in_file = File( + exists=True, + mandatory=True, + desc='file to apply transform to, (only updates header)') + space_defining = File( + exists=True, + mandatory=True, + desc='Volume defining space to slice in_file into') + + interp = traits.Range( + low=0, + high=7, + usedefault=True, + desc='degree of b-spline used for interpolation' + '0 is nearest neighbor (default)') + + out_file = File(desc='Optional file to save resliced volume') + + +class ResliceOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='resliced volume') + + +class Reslice(SPMCommand): + """ uses spm_reslice to resample in_file into space of space_defining""" + + input_spec = ResliceInputSpec + output_spec = ResliceOutputSpec + + def _make_matlab_command(self, _): + """ generates script""" + if not isdefined(self.inputs.out_file): + self.inputs.out_file = fname_presuffix( + self.inputs.in_file, prefix='r') + script = """ + flags.mean = 0; + flags.which = 1; + flags.mask = 0; + flags.interp = %d; + infiles = strvcat(\'%s\', \'%s\'); + invols = spm_vol(infiles); + spm_reslice(invols, flags); + """ % (self.inputs.interp, self.inputs.space_defining, + self.inputs.in_file) + return script + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_file'] = os.path.abspath(self.inputs.out_file) + return outputs + + +class ApplyInverseDeformationInput(SPMCommandInputSpec): + in_files = InputMultiPath( + File(exists=True), + mandatory=True, + field='fnames', + desc='Files on which deformation is applied') + target = File( + exists=True, + field='comp{1}.inv.space', + desc='File defining target space') + deformation = File( + exists=True, + field='comp{1}.inv.comp{1}.sn2def.matname', + desc='SN SPM deformation file', + xor=['deformation_field']) + deformation_field = File( + exists=True, + field='comp{1}.inv.comp{1}.def', + desc='SN SPM deformation file', + xor=['deformation']) + interpolation = traits.Range( + low=0, + high=7, + field='interp', + desc='degree of b-spline used for interpolation') + + bounding_box = traits.List( + traits.Float(), + field='comp{1}.inv.comp{1}.sn2def.bb', + minlen=6, + maxlen=6, + desc='6-element list (opt)') + voxel_sizes = traits.List( + traits.Float(), + field='comp{1}.inv.comp{1}.sn2def.vox', + minlen=3, + maxlen=3, + desc='3-element list (opt)') + + +class ApplyInverseDeformationOutput(TraitedSpec): + out_files = OutputMultiPath(File(exists=True), desc='Transformed files') + + +class ApplyInverseDeformation(SPMCommand): + """ Uses spm to apply inverse deformation stored in a .mat file or a + deformation field to a given file + + Examples + -------- + + >>> import nipype.interfaces.spm.utils as spmu + >>> inv = spmu.ApplyInverseDeformation() + >>> inv.inputs.in_files = 'functional.nii' + >>> inv.inputs.deformation = 'struct_to_func.mat' + >>> inv.inputs.target = 'structural.nii' + >>> inv.run() # doctest: +SKIP + """ + + input_spec = ApplyInverseDeformationInput + output_spec = ApplyInverseDeformationOutput + + _jobtype = 'util' + _jobname = 'defs' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt == 'in_files': + return scans_for_fnames(ensure_list(val)) + if opt == 'target': + return scans_for_fname(ensure_list(val)) + if opt == 'deformation': + return np.array([simplify_list(val)], dtype=object) + if opt == 'deformation_field': + return np.array([simplify_list(val)], dtype=object) + return val + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_files'] = [] + for filename in self.inputs.in_files: + _, fname = os.path.split(filename) + outputs['out_files'].append(os.path.realpath('w%s' % fname)) + return outputs + + +class ResliceToReferenceInput(SPMCommandInputSpec): + in_files = InputMultiPath( + File(exists=True), + mandatory=True, + field='fnames', + desc='Files on which deformation is applied') + target = File( + exists=True, + field='comp{1}.id.space', + desc='File defining target space') + interpolation = traits.Range( + low=0, + high=7, + field='interp', + desc='degree of b-spline used for interpolation') + + bounding_box = traits.List( + traits.Float(), + field='comp{2}.idbbvox.bb', + minlen=6, + maxlen=6, + desc='6-element list (opt)') + voxel_sizes = traits.List( + traits.Float(), + field='comp{2}.idbbvox.vox', + minlen=3, + maxlen=3, + desc='3-element list (opt)') + + +class ResliceToReferenceOutput(TraitedSpec): + out_files = OutputMultiPath(File(exists=True), desc='Transformed files') + + +class ResliceToReference(SPMCommand): + """Uses spm to reslice a volume to a target image space or to a provided + voxel size and bounding box + + Examples + -------- + + >>> import nipype.interfaces.spm.utils as spmu + >>> r2ref = spmu.ResliceToReference() + >>> r2ref.inputs.in_files = 'functional.nii' + >>> r2ref.inputs.target = 'structural.nii' + >>> r2ref.run() # doctest: +SKIP + """ + + input_spec = ResliceToReferenceInput + output_spec = ResliceToReferenceOutput + + _jobtype = 'util' + _jobname = 'defs' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt == 'in_files': + return scans_for_fnames(ensure_list(val)) + if opt == 'target': + return scans_for_fname(ensure_list(val)) + if opt == 'deformation': + return np.array([simplify_list(val)], dtype=object) + if opt == 'deformation_field': + return np.array([simplify_list(val)], dtype=object) + return val + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['out_files'] = [] + for filename in self.inputs.in_files: + _, fname = os.path.split(filename) + outputs['out_files'].append(os.path.realpath('w%s' % fname)) + return outputs + + +class DicomImportInputSpec(SPMCommandInputSpec): + in_files = InputMultiPath( + File(exists=True), + mandatory=True, + field='data', + desc='dicom files to be converted') + output_dir_struct = traits.Enum( + 'flat', + 'series', + 'patname', + 'patid_date', + 'patid', + 'date_time', + field='root', + usedefault=True, + desc='directory structure for the output.') + output_dir = traits.Str( + './converted_dicom', + field='outdir', + usedefault=True, + desc='output directory.') + format = traits.Enum( + 'nii', + 'img', + field='convopts.format', + usedefault=True, + desc='output format.') + icedims = traits.Bool( + False, + field='convopts.icedims', + usedefault=True, + desc=('If image sorting fails, one can try using ' + 'the additional SIEMENS ICEDims information ' + 'to create unique filenames. Use this only if ' + 'there would be multiple volumes with exactly ' + 'the same file names.')) + + +class DicomImportOutputSpec(TraitedSpec): + out_files = OutputMultiPath(File(exists=True), desc='converted files') + + +class DicomImport(SPMCommand): + """ Uses spm to convert DICOM files to nii or img+hdr. + + Examples + -------- + + >>> import nipype.interfaces.spm.utils as spmu + >>> di = spmu.DicomImport() + >>> di.inputs.in_files = ['functional_1.dcm', 'functional_2.dcm'] + >>> di.run() # doctest: +SKIP + """ + + input_spec = DicomImportInputSpec + output_spec = DicomImportOutputSpec + + _jobtype = 'util' + _jobname = 'dicom' + + def _format_arg(self, opt, spec, val): + """Convert input to appropriate format for spm + """ + if opt == 'in_files': + return np.array(val, dtype=object) + if opt == 'output_dir': + return np.array([val], dtype=object) + if opt == 'output_dir': + return os.path.abspath(val) + if opt == 'icedims': + if val: + return 1 + return 0 + return super(DicomImport, self)._format_arg(opt, spec, val) + + def _run_interface(self, runtime): + od = os.path.abspath(self.inputs.output_dir) + if not os.path.isdir(od): + os.mkdir(od) + return super(DicomImport, self)._run_interface(runtime) + + def _list_outputs(self): + from glob import glob + outputs = self._outputs().get() + od = os.path.abspath(self.inputs.output_dir) + + ext = self.inputs.format + if self.inputs.output_dir_struct == "flat": + outputs['out_files'] = glob(os.path.join(od, '*.%s' % ext)) + elif self.inputs.output_dir_struct == 'series': + outputs['out_files'] = glob( + os.path.join(od, os.path.join('*', '*.%s' % ext))) + elif (self.inputs.output_dir_struct in [ + 'patid', 'date_time', 'patname' + ]): + outputs['out_files'] = glob( + os.path.join(od, os.path.join('*', '*', '*.%s' % ext))) + elif self.inputs.output_dir_struct == 'patid_date': + outputs['out_files'] = glob( + os.path.join(od, os.path.join('*', '*', '*', '*.%s' % ext))) + return outputs diff --git a/nipype/interfaces/tests/__init__.py b/nipype/interfaces/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py new file mode 100644 index 0000000000..d77e9d7509 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_BIDSDataGrabber.py @@ -0,0 +1,24 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import BIDSDataGrabber + + +def test_BIDSDataGrabber_inputs(): + input_map = dict( + base_dir=dict(mandatory=True, ), + output_query=dict(), + raise_on_empty=dict(usedefault=True, ), + return_type=dict(usedefault=True, ), + ) + inputs = BIDSDataGrabber.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_BIDSDataGrabber_outputs(): + output_map = dict() + outputs = BIDSDataGrabber.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_Bru2.py b/nipype/interfaces/tests/test_auto_Bru2.py new file mode 100644 index 0000000000..d386b3f095 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_Bru2.py @@ -0,0 +1,37 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..bru2nii import Bru2 + + +def test_Bru2_inputs(): + input_map = dict( + actual_size=dict(argstr='-a', ), + append_protocol_name=dict(argstr='-p', ), + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + force_conversion=dict(argstr='-f', ), + input_dir=dict( + argstr='%s', + mandatory=True, + position=-1, + ), + output_filename=dict( + argstr='-o %s', + genfile=True, + ), + ) + inputs = Bru2.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Bru2_outputs(): + output_map = dict(nii_file=dict(), ) + outputs = Bru2.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_C3d.py b/nipype/interfaces/tests/test_auto_C3d.py new file mode 100644 index 0000000000..9e74e82e2a --- /dev/null +++ b/nipype/interfaces/tests/test_auto_C3d.py @@ -0,0 +1,52 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..c3 import C3d + + +def test_C3d_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + interp=dict(argstr='-interpolation %s', ), + is_4d=dict(usedefault=True, ), + multicomp_split=dict( + argstr='-mcr', + position=0, + usedefault=True, + ), + out_file=dict( + argstr='-o %s', + position=-1, + xor=['out_files'], + ), + out_files=dict( + argstr='-oo %s', + position=-1, + xor=['out_file'], + ), + pix_type=dict(argstr='-type %s', ), + resample=dict(argstr='-resample %s', ), + scale=dict(argstr='-scale %s', ), + shift=dict(argstr='-shift %s', ), + smooth=dict(argstr='-smooth %s', ), + ) + inputs = C3d.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_C3d_outputs(): + output_map = dict(out_files=dict(), ) + outputs = C3d.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_C3dAffineTool.py b/nipype/interfaces/tests/test_auto_C3dAffineTool.py new file mode 100644 index 0000000000..510ea2f02a --- /dev/null +++ b/nipype/interfaces/tests/test_auto_C3dAffineTool.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..c3 import C3dAffineTool + + +def test_C3dAffineTool_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + fsl2ras=dict( + argstr='-fsl2ras', + position=4, + ), + itk_transform=dict( + argstr='-oitk %s', + hash_files=False, + position=5, + ), + reference_file=dict( + argstr='-ref %s', + position=1, + ), + source_file=dict( + argstr='-src %s', + position=2, + ), + transform_file=dict( + argstr='%s', + position=3, + ), + ) + inputs = C3dAffineTool.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_C3dAffineTool_outputs(): + output_map = dict(itk_transform=dict(), ) + outputs = C3dAffineTool.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_CopyMeta.py b/nipype/interfaces/tests/test_auto_CopyMeta.py new file mode 100644 index 0000000000..012edfa886 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_CopyMeta.py @@ -0,0 +1,24 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dcmstack import CopyMeta + + +def test_CopyMeta_inputs(): + input_map = dict( + dest_file=dict(mandatory=True, ), + exclude_classes=dict(), + include_classes=dict(), + src_file=dict(mandatory=True, ), + ) + inputs = CopyMeta.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CopyMeta_outputs(): + output_map = dict(dest_file=dict(), ) + outputs = CopyMeta.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_DataFinder.py b/nipype/interfaces/tests/test_auto_DataFinder.py new file mode 100644 index 0000000000..9a1b7418df --- /dev/null +++ b/nipype/interfaces/tests/test_auto_DataFinder.py @@ -0,0 +1,26 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import DataFinder + + +def test_DataFinder_inputs(): + input_map = dict( + ignore_regexes=dict(), + match_regex=dict(usedefault=True, ), + max_depth=dict(), + min_depth=dict(), + root_paths=dict(mandatory=True, ), + unpack_single=dict(usedefault=True, ), + ) + inputs = DataFinder.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DataFinder_outputs(): + output_map = dict() + outputs = DataFinder.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_DataGrabber.py b/nipype/interfaces/tests/test_auto_DataGrabber.py new file mode 100644 index 0000000000..d39db0b527 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_DataGrabber.py @@ -0,0 +1,26 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import DataGrabber + + +def test_DataGrabber_inputs(): + input_map = dict( + base_directory=dict(), + drop_blank_outputs=dict(usedefault=True, ), + raise_on_empty=dict(usedefault=True, ), + sort_filelist=dict(mandatory=True, ), + template=dict(mandatory=True, ), + template_args=dict(), + ) + inputs = DataGrabber.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DataGrabber_outputs(): + output_map = dict() + outputs = DataGrabber.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_DataSink.py b/nipype/interfaces/tests/test_auto_DataSink.py new file mode 100644 index 0000000000..da26854451 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_DataSink.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import DataSink + + +def test_DataSink_inputs(): + input_map = dict( + _outputs=dict(usedefault=True, ), + base_directory=dict(), + bucket=dict(), + container=dict(), + creds_path=dict(), + encrypt_bucket_keys=dict(), + local_copy=dict(), + parameterization=dict(usedefault=True, ), + regexp_substitutions=dict(), + remove_dest_dir=dict(usedefault=True, ), + strip_dir=dict(), + substitutions=dict(), + ) + inputs = DataSink.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DataSink_outputs(): + output_map = dict(out_file=dict(), ) + outputs = DataSink.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_Dcm2nii.py b/nipype/interfaces/tests/test_auto_Dcm2nii.py new file mode 100644 index 0000000000..0caa45a1f1 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_Dcm2nii.py @@ -0,0 +1,101 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dcm2nii import Dcm2nii + + +def test_Dcm2nii_inputs(): + input_map = dict( + anonymize=dict( + argstr='-a', + usedefault=True, + ), + args=dict(argstr='%s', ), + collapse_folders=dict( + argstr='-c', + usedefault=True, + ), + config_file=dict( + argstr='-b %s', + genfile=True, + ), + convert_all_pars=dict( + argstr='-v', + usedefault=True, + ), + date_in_filename=dict( + argstr='-d', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + events_in_filename=dict( + argstr='-e', + usedefault=True, + ), + gzip_output=dict( + argstr='-g', + usedefault=True, + ), + id_in_filename=dict( + argstr='-i', + usedefault=True, + ), + nii_output=dict( + argstr='-n', + usedefault=True, + ), + output_dir=dict( + argstr='-o %s', + genfile=True, + ), + protocol_in_filename=dict( + argstr='-p', + usedefault=True, + ), + reorient=dict(argstr='-r', ), + reorient_and_crop=dict( + argstr='-x', + usedefault=True, + ), + source_dir=dict( + argstr='%s', + mandatory=True, + position=-1, + xor=['source_names'], + ), + source_in_filename=dict( + argstr='-f', + usedefault=True, + ), + source_names=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + xor=['source_dir'], + ), + spm_analyze=dict( + argstr='-s', + xor=['nii_output'], + ), + ) + inputs = Dcm2nii.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Dcm2nii_outputs(): + output_map = dict( + bvals=dict(), + bvecs=dict(), + converted_files=dict(), + reoriented_and_cropped_files=dict(), + reoriented_files=dict(), + ) + outputs = Dcm2nii.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_Dcm2niix.py b/nipype/interfaces/tests/test_auto_Dcm2niix.py new file mode 100644 index 0000000000..5917f48583 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_Dcm2niix.py @@ -0,0 +1,85 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dcm2nii import Dcm2niix + + +def test_Dcm2niix_inputs(): + input_map = dict( + anon_bids=dict( + argstr='-ba', + requires=['bids_format'], + ), + args=dict(argstr='%s', ), + bids_format=dict( + argstr='-b', + usedefault=True, + ), + comment=dict(argstr='-c %s', ), + compress=dict( + argstr='-z %s', + usedefault=True, + ), + compression=dict(argstr='-%d', ), + crop=dict( + argstr='-x', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + has_private=dict( + argstr='-t', + usedefault=True, + ), + ignore_deriv=dict(argstr='-i', ), + merge_imgs=dict( + argstr='-m', + usedefault=True, + ), + out_filename=dict(argstr='-f %s', ), + output_dir=dict( + argstr='-o %s', + usedefault=True, + ), + philips_float=dict(argstr='-p', ), + series_numbers=dict(argstr='-n %s...', ), + single_file=dict( + argstr='-s', + usedefault=True, + ), + source_dir=dict( + argstr='%s', + mandatory=True, + position=-1, + xor=['source_names'], + ), + source_names=dict( + argstr='%s', + copyfile=False, + mandatory=True, + position=-1, + xor=['source_dir'], + ), + verbose=dict( + argstr='-v', + usedefault=True, + ), + ) + inputs = Dcm2niix.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Dcm2niix_outputs(): + output_map = dict( + bids=dict(), + bvals=dict(), + bvecs=dict(), + converted_files=dict(), + ) + outputs = Dcm2niix.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_DcmStack.py b/nipype/interfaces/tests/test_auto_DcmStack.py new file mode 100644 index 0000000000..53a5259af5 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_DcmStack.py @@ -0,0 +1,28 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dcmstack import DcmStack + + +def test_DcmStack_inputs(): + input_map = dict( + dicom_files=dict(mandatory=True, ), + embed_meta=dict(), + exclude_regexes=dict(), + force_read=dict(usedefault=True, ), + include_regexes=dict(), + out_ext=dict(usedefault=True, ), + out_format=dict(), + out_path=dict(), + ) + inputs = DcmStack.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_DcmStack_outputs(): + output_map = dict(out_file=dict(), ) + outputs = DcmStack.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_FreeSurferSource.py b/nipype/interfaces/tests/test_auto_FreeSurferSource.py new file mode 100644 index 0000000000..15ea9c66cd --- /dev/null +++ b/nipype/interfaces/tests/test_auto_FreeSurferSource.py @@ -0,0 +1,106 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import FreeSurferSource + + +def test_FreeSurferSource_inputs(): + input_map = dict( + hemi=dict(usedefault=True, ), + subject_id=dict(mandatory=True, ), + subjects_dir=dict(mandatory=True, ), + ) + inputs = FreeSurferSource.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_FreeSurferSource_outputs(): + output_map = dict( + BA_stats=dict( + altkey='BA', + loc='stats', + ), + T1=dict(loc='mri', ), + annot=dict( + altkey='*annot', + loc='label', + ), + aparc_a2009s_stats=dict( + altkey='aparc.a2009s', + loc='stats', + ), + aparc_aseg=dict( + altkey='aparc*aseg', + loc='mri', + ), + aparc_stats=dict( + altkey='aparc', + loc='stats', + ), + area_pial=dict( + altkey='area.pial', + loc='surf', + ), + aseg=dict(loc='mri', ), + aseg_stats=dict( + altkey='aseg', + loc='stats', + ), + avg_curv=dict(loc='surf', ), + brain=dict(loc='mri', ), + brainmask=dict(loc='mri', ), + curv=dict(loc='surf', ), + curv_pial=dict( + altkey='curv.pial', + loc='surf', + ), + curv_stats=dict( + altkey='curv', + loc='stats', + ), + entorhinal_exvivo_stats=dict( + altkey='entorhinal_exvivo', + loc='stats', + ), + filled=dict(loc='mri', ), + graymid=dict( + altkey=['graymid', 'midthickness'], + loc='surf', + ), + inflated=dict(loc='surf', ), + jacobian_white=dict(loc='surf', ), + label=dict( + altkey='*label', + loc='label', + ), + norm=dict(loc='mri', ), + nu=dict(loc='mri', ), + orig=dict(loc='mri', ), + pial=dict(loc='surf', ), + rawavg=dict(loc='mri', ), + ribbon=dict( + altkey='*ribbon', + loc='mri', + ), + smoothwm=dict(loc='surf', ), + sphere=dict(loc='surf', ), + sphere_reg=dict( + altkey='sphere.reg', + loc='surf', + ), + sulc=dict(loc='surf', ), + thickness=dict(loc='surf', ), + volume=dict(loc='surf', ), + white=dict(loc='surf', ), + wm=dict(loc='mri', ), + wmparc=dict(loc='mri', ), + wmparc_stats=dict( + altkey='wmparc', + loc='stats', + ), + ) + outputs = FreeSurferSource.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_GroupAndStack.py b/nipype/interfaces/tests/test_auto_GroupAndStack.py new file mode 100644 index 0000000000..a8f30e32f9 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_GroupAndStack.py @@ -0,0 +1,28 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dcmstack import GroupAndStack + + +def test_GroupAndStack_inputs(): + input_map = dict( + dicom_files=dict(mandatory=True, ), + embed_meta=dict(), + exclude_regexes=dict(), + force_read=dict(usedefault=True, ), + include_regexes=dict(), + out_ext=dict(usedefault=True, ), + out_format=dict(), + out_path=dict(), + ) + inputs = GroupAndStack.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_GroupAndStack_outputs(): + output_map = dict(out_list=dict(), ) + outputs = GroupAndStack.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_IOBase.py b/nipype/interfaces/tests/test_auto_IOBase.py new file mode 100644 index 0000000000..c2c2f96431 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_IOBase.py @@ -0,0 +1,12 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import IOBase + + +def test_IOBase_inputs(): + input_map = dict() + inputs = IOBase.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_JSONFileGrabber.py b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py new file mode 100644 index 0000000000..03a65cf6c2 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_JSONFileGrabber.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import JSONFileGrabber + + +def test_JSONFileGrabber_inputs(): + input_map = dict( + defaults=dict(), + in_file=dict(), + ) + inputs = JSONFileGrabber.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JSONFileGrabber_outputs(): + output_map = dict() + outputs = JSONFileGrabber.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_JSONFileSink.py b/nipype/interfaces/tests/test_auto_JSONFileSink.py new file mode 100644 index 0000000000..002997912b --- /dev/null +++ b/nipype/interfaces/tests/test_auto_JSONFileSink.py @@ -0,0 +1,23 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import JSONFileSink + + +def test_JSONFileSink_inputs(): + input_map = dict( + _outputs=dict(usedefault=True, ), + in_dict=dict(usedefault=True, ), + out_file=dict(), + ) + inputs = JSONFileSink.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_JSONFileSink_outputs(): + output_map = dict(out_file=dict(), ) + outputs = JSONFileSink.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_LookupMeta.py b/nipype/interfaces/tests/test_auto_LookupMeta.py new file mode 100644 index 0000000000..29100aaef7 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_LookupMeta.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dcmstack import LookupMeta + + +def test_LookupMeta_inputs(): + input_map = dict( + in_file=dict(mandatory=True, ), + meta_keys=dict(mandatory=True, ), + ) + inputs = LookupMeta.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_LookupMeta_outputs(): + output_map = dict() + outputs = LookupMeta.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_MatlabCommand.py b/nipype/interfaces/tests/test_auto_MatlabCommand.py new file mode 100644 index 0000000000..c1b971d25d --- /dev/null +++ b/nipype/interfaces/tests/test_auto_MatlabCommand.py @@ -0,0 +1,47 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..matlab import MatlabCommand + + +def test_MatlabCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + logfile=dict(argstr='-logfile %s', ), + mfile=dict(usedefault=True, ), + nodesktop=dict( + argstr='-nodesktop', + nohash=True, + usedefault=True, + ), + nosplash=dict( + argstr='-nosplash', + nohash=True, + usedefault=True, + ), + paths=dict(), + postscript=dict(usedefault=True, ), + prescript=dict(usedefault=True, ), + script=dict( + argstr='-r "%s;exit"', + mandatory=True, + position=-1, + ), + script_file=dict(usedefault=True, ), + single_comp_thread=dict( + argstr='-singleCompThread', + nohash=True, + ), + uses_mcr=dict( + nohash=True, + xor=['nodesktop', 'nosplash', 'single_comp_thread'], + ), + ) + inputs = MatlabCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_MergeNifti.py b/nipype/interfaces/tests/test_auto_MergeNifti.py new file mode 100644 index 0000000000..9e0a017c60 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_MergeNifti.py @@ -0,0 +1,26 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dcmstack import MergeNifti + + +def test_MergeNifti_inputs(): + input_map = dict( + in_files=dict(mandatory=True, ), + merge_dim=dict(), + out_ext=dict(usedefault=True, ), + out_format=dict(), + out_path=dict(), + sort_order=dict(), + ) + inputs = MergeNifti.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MergeNifti_outputs(): + output_map = dict(out_file=dict(), ) + outputs = MergeNifti.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_MeshFix.py b/nipype/interfaces/tests/test_auto_MeshFix.py new file mode 100644 index 0000000000..3cc1541d6d --- /dev/null +++ b/nipype/interfaces/tests/test_auto_MeshFix.py @@ -0,0 +1,98 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..meshfix import MeshFix + + +def test_MeshFix_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + cut_inner=dict(argstr='--cut-inner %d', ), + cut_outer=dict(argstr='--cut-outer %d', ), + decouple_inin=dict(argstr='--decouple-inin %d', ), + decouple_outin=dict(argstr='--decouple-outin %d', ), + decouple_outout=dict(argstr='--decouple-outout %d', ), + dilation=dict(argstr='--dilate %d', ), + dont_clean=dict(argstr='--no-clean', ), + environ=dict( + nohash=True, + usedefault=True, + ), + epsilon_angle=dict(argstr='-a %f', ), + finetuning_distance=dict( + argstr='%f', + requires=['finetuning_substeps'], + ), + finetuning_inwards=dict( + argstr='--fineTuneIn ', + requires=['finetuning_distance', 'finetuning_substeps'], + ), + finetuning_outwards=dict( + argstr='--fineTuneIn ', + requires=['finetuning_distance', 'finetuning_substeps'], + xor=['finetuning_inwards'], + ), + finetuning_substeps=dict( + argstr='%d', + requires=['finetuning_distance'], + ), + in_file1=dict( + argstr='%s', + mandatory=True, + position=1, + ), + in_file2=dict( + argstr='%s', + position=2, + ), + join_closest_components=dict( + argstr='-jc', + xor=['join_closest_components'], + ), + join_overlapping_largest_components=dict( + argstr='-j', + xor=['join_closest_components'], + ), + laplacian_smoothing_steps=dict(argstr='--smooth %d', ), + number_of_biggest_shells=dict(argstr='--shells %d', ), + out_filename=dict( + argstr='-o %s', + genfile=True, + ), + output_type=dict(usedefault=True, ), + quiet_mode=dict(argstr='-q', ), + remove_handles=dict(argstr='--remove-handles', ), + save_as_freesurfer_mesh=dict( + argstr='--fsmesh', + xor=['save_as_vrml', 'save_as_stl'], + ), + save_as_stl=dict( + argstr='--stl', + xor=['save_as_vmrl', 'save_as_freesurfer_mesh'], + ), + save_as_vmrl=dict( + argstr='--wrl', + xor=['save_as_stl', 'save_as_freesurfer_mesh'], + ), + set_intersections_to_one=dict(argstr='--intersect', ), + uniform_remeshing_steps=dict( + argstr='-u %d', + requires=['uniform_remeshing_vertices'], + ), + uniform_remeshing_vertices=dict( + argstr='--vertices %d', + requires=['uniform_remeshing_steps'], + ), + x_shift=dict(argstr='--smooth %d', ), + ) + inputs = MeshFix.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MeshFix_outputs(): + output_map = dict(mesh_file=dict(), ) + outputs = MeshFix.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_MySQLSink.py b/nipype/interfaces/tests/test_auto_MySQLSink.py new file mode 100644 index 0000000000..048699659a --- /dev/null +++ b/nipype/interfaces/tests/test_auto_MySQLSink.py @@ -0,0 +1,27 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import MySQLSink + + +def test_MySQLSink_inputs(): + input_map = dict( + config=dict( + mandatory=True, + xor=['host'], + ), + database_name=dict(mandatory=True, ), + host=dict( + mandatory=True, + requires=['username', 'password'], + usedefault=True, + xor=['config'], + ), + password=dict(), + table_name=dict(mandatory=True, ), + username=dict(), + ) + inputs = MySQLSink.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py b/nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py new file mode 100644 index 0000000000..88bc12dfa2 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_NiftiGeneratorBase.py @@ -0,0 +1,12 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dcmstack import NiftiGeneratorBase + + +def test_NiftiGeneratorBase_inputs(): + input_map = dict() + inputs = NiftiGeneratorBase.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_NilearnBaseInterface.py b/nipype/interfaces/tests/test_auto_NilearnBaseInterface.py new file mode 100644 index 0000000000..38e4cfd698 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_NilearnBaseInterface.py @@ -0,0 +1,12 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..nilearn import NilearnBaseInterface + + +def test_NilearnBaseInterface_inputs(): + input_map = dict() + inputs = NilearnBaseInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_PETPVC.py b/nipype/interfaces/tests/test_auto_PETPVC.py new file mode 100644 index 0000000000..c5283435d5 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_PETPVC.py @@ -0,0 +1,74 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..petpvc import PETPVC + + +def test_PETPVC_inputs(): + input_map = dict( + alpha=dict( + argstr='-a %.4f', + usedefault=True, + ), + args=dict(argstr='%s', ), + debug=dict( + argstr='-d', + usedefault=True, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + fwhm_x=dict( + argstr='-x %.4f', + mandatory=True, + ), + fwhm_y=dict( + argstr='-y %.4f', + mandatory=True, + ), + fwhm_z=dict( + argstr='-z %.4f', + mandatory=True, + ), + in_file=dict( + argstr='-i %s', + mandatory=True, + ), + mask_file=dict( + argstr='-m %s', + mandatory=True, + ), + n_deconv=dict( + argstr='-k %d', + usedefault=True, + ), + n_iter=dict( + argstr='-n %d', + usedefault=True, + ), + out_file=dict( + argstr='-o %s', + genfile=True, + hash_files=False, + ), + pvc=dict( + argstr='-p %s', + mandatory=True, + ), + stop_crit=dict( + argstr='-a %.4f', + usedefault=True, + ), + ) + inputs = PETPVC.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_PETPVC_outputs(): + output_map = dict(out_file=dict(), ) + outputs = PETPVC.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_Quickshear.py b/nipype/interfaces/tests/test_auto_Quickshear.py new file mode 100644 index 0000000000..7f39a6bc96 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_Quickshear.py @@ -0,0 +1,46 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..quickshear import Quickshear + + +def test_Quickshear_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + buff=dict( + argstr='%d', + position=4, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=1, + ), + mask_file=dict( + argstr='%s', + mandatory=True, + position=2, + ), + out_file=dict( + argstr='%s', + keep_extension=True, + name_source='in_file', + name_template='%s_defaced', + position=3, + ), + ) + inputs = Quickshear.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Quickshear_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Quickshear.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_Reorient.py b/nipype/interfaces/tests/test_auto_Reorient.py new file mode 100644 index 0000000000..2e45a1ca7f --- /dev/null +++ b/nipype/interfaces/tests/test_auto_Reorient.py @@ -0,0 +1,25 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..image import Reorient + + +def test_Reorient_inputs(): + input_map = dict( + in_file=dict(mandatory=True, ), + orientation=dict(usedefault=True, ), + ) + inputs = Reorient.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Reorient_outputs(): + output_map = dict( + out_file=dict(), + transform=dict(), + ) + outputs = Reorient.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_Rescale.py b/nipype/interfaces/tests/test_auto_Rescale.py new file mode 100644 index 0000000000..e180c82988 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_Rescale.py @@ -0,0 +1,24 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..image import Rescale + + +def test_Rescale_inputs(): + input_map = dict( + in_file=dict(mandatory=True, ), + invert=dict(), + percentile=dict(usedefault=True, ), + ref_file=dict(mandatory=True, ), + ) + inputs = Rescale.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Rescale_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Rescale.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_S3DataGrabber.py b/nipype/interfaces/tests/test_auto_S3DataGrabber.py new file mode 100644 index 0000000000..7c69413eb0 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_S3DataGrabber.py @@ -0,0 +1,29 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import S3DataGrabber + + +def test_S3DataGrabber_inputs(): + input_map = dict( + anon=dict(usedefault=True, ), + bucket=dict(mandatory=True, ), + bucket_path=dict(usedefault=True, ), + local_directory=dict(), + raise_on_empty=dict(usedefault=True, ), + region=dict(usedefault=True, ), + sort_filelist=dict(mandatory=True, ), + template=dict(mandatory=True, ), + template_args=dict(), + ) + inputs = S3DataGrabber.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_S3DataGrabber_outputs(): + output_map = dict() + outputs = S3DataGrabber.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py b/nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py new file mode 100644 index 0000000000..7777a8443e --- /dev/null +++ b/nipype/interfaces/tests/test_auto_SEMLikeCommandLine.py @@ -0,0 +1,18 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import SEMLikeCommandLine + + +def test_SEMLikeCommandLine_inputs(): + input_map = dict(args=dict(argstr='%s', + ), + environ=dict(nohash=True, + usedefault=True, + ), + ) + inputs = SEMLikeCommandLine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value + diff --git a/nipype/interfaces/tests/test_auto_SQLiteSink.py b/nipype/interfaces/tests/test_auto_SQLiteSink.py new file mode 100644 index 0000000000..ea03663c4c --- /dev/null +++ b/nipype/interfaces/tests/test_auto_SQLiteSink.py @@ -0,0 +1,15 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import SQLiteSink + + +def test_SQLiteSink_inputs(): + input_map = dict( + database_file=dict(mandatory=True, ), + table_name=dict(mandatory=True, ), + ) + inputs = SQLiteSink.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_SSHDataGrabber.py b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py new file mode 100644 index 0000000000..cc7aa22e38 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_SSHDataGrabber.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import SSHDataGrabber + + +def test_SSHDataGrabber_inputs(): + input_map = dict( + base_directory=dict(mandatory=True, ), + download_files=dict(usedefault=True, ), + drop_blank_outputs=dict(usedefault=True, ), + hostname=dict(mandatory=True, ), + password=dict(), + raise_on_empty=dict(usedefault=True, ), + sort_filelist=dict(mandatory=True, ), + ssh_log_to_file=dict(usedefault=True, ), + template=dict(mandatory=True, ), + template_args=dict(), + template_expression=dict(usedefault=True, ), + username=dict(), + ) + inputs = SSHDataGrabber.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SSHDataGrabber_outputs(): + output_map = dict() + outputs = SSHDataGrabber.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_SelectFiles.py b/nipype/interfaces/tests/test_auto_SelectFiles.py new file mode 100644 index 0000000000..bf438fb826 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_SelectFiles.py @@ -0,0 +1,24 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import SelectFiles + + +def test_SelectFiles_inputs(): + input_map = dict( + base_directory=dict(), + force_lists=dict(usedefault=True, ), + raise_on_empty=dict(usedefault=True, ), + sort_filelist=dict(usedefault=True, ), + ) + inputs = SelectFiles.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SelectFiles_outputs(): + output_map = dict() + outputs = SelectFiles.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_SignalExtraction.py b/nipype/interfaces/tests/test_auto_SignalExtraction.py new file mode 100644 index 0000000000..bc76f5261a --- /dev/null +++ b/nipype/interfaces/tests/test_auto_SignalExtraction.py @@ -0,0 +1,27 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..nilearn import SignalExtraction + + +def test_SignalExtraction_inputs(): + input_map = dict( + class_labels=dict(mandatory=True, ), + detrend=dict(usedefault=True, ), + in_file=dict(mandatory=True, ), + incl_shared_variance=dict(usedefault=True, ), + include_global=dict(usedefault=True, ), + label_files=dict(mandatory=True, ), + out_file=dict(usedefault=True, ), + ) + inputs = SignalExtraction.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SignalExtraction_outputs(): + output_map = dict(out_file=dict(), ) + outputs = SignalExtraction.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_SlicerCommandLine.py b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py new file mode 100644 index 0000000000..057628e879 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_SlicerCommandLine.py @@ -0,0 +1,26 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dynamic_slicer import SlicerCommandLine + + +def test_SlicerCommandLine_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + module=dict(), + ) + inputs = SlicerCommandLine.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SlicerCommandLine_outputs(): + output_map = dict() + outputs = SlicerCommandLine.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_SplitNifti.py b/nipype/interfaces/tests/test_auto_SplitNifti.py new file mode 100644 index 0000000000..e1f6539fab --- /dev/null +++ b/nipype/interfaces/tests/test_auto_SplitNifti.py @@ -0,0 +1,25 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..dcmstack import SplitNifti + + +def test_SplitNifti_inputs(): + input_map = dict( + in_file=dict(mandatory=True, ), + out_ext=dict(usedefault=True, ), + out_format=dict(), + out_path=dict(), + split_dim=dict(), + ) + inputs = SplitNifti.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_SplitNifti_outputs(): + output_map = dict(out_list=dict(), ) + outputs = SplitNifti.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_XNATSink.py b/nipype/interfaces/tests/test_auto_XNATSink.py new file mode 100644 index 0000000000..b4db5ec8d3 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_XNATSink.py @@ -0,0 +1,32 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import XNATSink + + +def test_XNATSink_inputs(): + input_map = dict( + _outputs=dict(usedefault=True, ), + assessor_id=dict(xor=['reconstruction_id'], ), + cache_dir=dict(), + config=dict( + mandatory=True, + xor=['server'], + ), + experiment_id=dict(mandatory=True, ), + project_id=dict(mandatory=True, ), + pwd=dict(), + reconstruction_id=dict(xor=['assessor_id'], ), + server=dict( + mandatory=True, + requires=['user', 'pwd'], + xor=['config'], + ), + share=dict(usedefault=True, ), + subject_id=dict(mandatory=True, ), + user=dict(), + ) + inputs = XNATSink.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_auto_XNATSource.py b/nipype/interfaces/tests/test_auto_XNATSource.py new file mode 100644 index 0000000000..8faa79af81 --- /dev/null +++ b/nipype/interfaces/tests/test_auto_XNATSource.py @@ -0,0 +1,34 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..io import XNATSource + + +def test_XNATSource_inputs(): + input_map = dict( + cache_dir=dict(), + config=dict( + mandatory=True, + xor=['server'], + ), + pwd=dict(), + query_template=dict(mandatory=True, ), + query_template_args=dict(usedefault=True, ), + server=dict( + mandatory=True, + requires=['user', 'pwd'], + xor=['config'], + ), + user=dict(), + ) + inputs = XNATSource.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_XNATSource_outputs(): + output_map = dict() + outputs = XNATSource.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/tests/test_extra_dcm2nii.py b/nipype/interfaces/tests/test_extra_dcm2nii.py new file mode 100644 index 0000000000..dd68454ad0 --- /dev/null +++ b/nipype/interfaces/tests/test_extra_dcm2nii.py @@ -0,0 +1,57 @@ +import os +import pytest +import shutil + +from nipype.interfaces.dcm2nii import Dcm2niix +no_dcm2niix = not bool(Dcm2niix().version) +no_datalad = False +try: + from datalad import api # to pull and grab data + from datalad.support.exceptions import IncompleteResultsError +except ImportError: + no_datalad = True + +DICOM_DIR = 'http://datasets-tests.datalad.org/dicoms/dcm2niix-tests' + + +def fetch_data(tmpdir, dicoms): + """Fetches some test DICOMs using datalad""" + data = os.path.join(tmpdir, 'data') + api.install(path=data, source=DICOM_DIR) + data = os.path.join(data, dicoms) + api.get(path=data) + return data + +@pytest.mark.skipif(no_datalad, reason="Datalad required") +@pytest.mark.skipif(no_dcm2niix, reason="Dcm2niix required") +def test_dcm2niix_dwi(tmpdir): + tmpdir.chdir() + try: + datadir = fetch_data(tmpdir.strpath, 'Siemens_Sag_DTI_20160825_145811') + except IncompleteResultsError as exc: + pytest.skip("Failed to fetch test data: %s" % str(exc)) + + def assert_dwi(eg, bids): + "Some assertions we will make" + assert eg.outputs.converted_files + assert eg.outputs.bvals + assert eg.outputs.bvecs + outputs = [y for x,y in eg.outputs.get().items()] + if bids: + # ensure all outputs are of equal lengths + assert len(set(map(len, outputs))) == 1 + else: + assert not eg2.outputs.bids + + dcm = Dcm2niix() + dcm.inputs.source_dir = datadir + dcm.inputs.out_filename = '%u%z' + eg1 = dcm.run() + assert_dwi(eg1, True) + + # now run specifying output directory and removing BIDS option + outdir = tmpdir.mkdir('conversion').strpath + dcm.inputs.output_dir = outdir + dcm.inputs.bids_format = False + eg2 = dcm.run() + assert_dwi(eg2, False) diff --git a/nipype/interfaces/tests/test_image.py b/nipype/interfaces/tests/test_image.py new file mode 100644 index 0000000000..bb4adf1d01 --- /dev/null +++ b/nipype/interfaces/tests/test_image.py @@ -0,0 +1,64 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import numpy as np +import nibabel as nb +import pytest + +from nibabel.orientations import axcodes2ornt, ornt_transform + +from ..image import _as_reoriented_backport, _orientations +from ... import LooseVersion + +nibabel22 = LooseVersion(nb.__version__) >= LooseVersion('2.2.0') + + +@pytest.mark.skipif(not nibabel22, + reason="Old nibabel - can't directly compare") +def test_reorientation_backport(): + pixdims = ((1, 1, 1), (2, 2, 3)) + data = np.random.normal(size=(17, 18, 19, 2)) + + for pixdim in pixdims: + # Generate a randomly rotated affine + angles = np.random.uniform(-np.pi, np.pi, 3) * [1, 0.5, 1] + rot = nb.eulerangles.euler2mat(*angles) + scale = np.diag(pixdim) + translation = np.array((17, 18, 19)) / 2 + affine = nb.affines.from_matvec(rot.dot(scale), translation) + + # Create image + img = nb.Nifti1Image(data, affine) + dim_info = {'freq': 0, 'phase': 1, 'slice': 2} + img.header.set_dim_info(**dim_info) + + # Find a random, non-identity transform + targ_ornt = orig_ornt = nb.io_orientation(affine) + while np.array_equal(targ_ornt, orig_ornt): + new_code = np.random.choice(_orientations) + targ_ornt = axcodes2ornt(new_code) + + identity = ornt_transform(orig_ornt, orig_ornt) + transform = ornt_transform(orig_ornt, targ_ornt) + + # Identity transform returns exact image + assert img.as_reoriented(identity) is img + assert _as_reoriented_backport(img, identity) is img + + reoriented_a = img.as_reoriented(transform) + reoriented_b = _as_reoriented_backport(img, transform) + + flips_only = img.shape == reoriented_a.shape + + # Reorientation changes affine and data array + assert not np.allclose(img.affine, reoriented_a.affine) + assert not (flips_only and + np.allclose(img.get_data(), reoriented_a.get_data())) + # Dimension info changes iff axes are reordered + assert flips_only == np.array_equal(img.header.get_dim_info(), + reoriented_a.header.get_dim_info()) + + # Both approaches produce equivalent images + assert np.allclose(reoriented_a.affine, reoriented_b.affine) + assert np.array_equal(reoriented_a.get_data(), reoriented_b.get_data()) + assert np.array_equal(reoriented_a.header.get_dim_info(), + reoriented_b.header.get_dim_info()) diff --git a/nipype/interfaces/tests/test_io.py b/nipype/interfaces/tests/test_io.py new file mode 100644 index 0000000000..abff491f36 --- /dev/null +++ b/nipype/interfaces/tests/test_io.py @@ -0,0 +1,689 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals +from builtins import str, zip, range, open +from future import standard_library +import os +import copy +import simplejson +import glob +import shutil +import os.path as op +import sys +from subprocess import Popen +import hashlib +from collections import namedtuple + +import pytest +import nipype +import nipype.interfaces.io as nio +from nipype.interfaces.base.traits_extension import isdefined +from nipype.interfaces.base import Undefined, TraitError +from nipype.utils.filemanip import dist_is_editable + +# Check for boto +noboto = False +try: + import boto + from boto.s3.connection import S3Connection, OrdinaryCallingFormat +except ImportError: + noboto = True + +# Check for boto3 +noboto3 = False +try: + import boto3 + from botocore.utils import fix_s3_host +except ImportError: + noboto3 = True + +# Check for paramiko +try: + import paramiko + no_paramiko = False + + # Check for localhost SSH Server + # FIXME: Tests requiring this are never run on CI + try: + proxy = None + client = paramiko.SSHClient() + client.load_system_host_keys() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + client.connect('127.0.0.1', username=os.getenv('USER'), sock=proxy, + timeout=10) + + no_local_ssh = False + + except (paramiko.SSHException, + paramiko.ssh_exception.NoValidConnectionsError, + OSError): + no_local_ssh = True + +except ImportError: + no_paramiko = True + no_local_ssh = True + +# Check for fakes3 +standard_library.install_aliases() +from subprocess import check_call, CalledProcessError +try: + ret_code = check_call(['which', 'fakes3'], stdout=open(os.devnull, 'wb')) + fakes3 = (ret_code == 0) +except CalledProcessError: + fakes3 = False + +# check for bids +have_pybids = True +try: + import bids + from bids import grabbids as gb + filepath = os.path.realpath(os.path.dirname(bids.__file__)) + datadir = os.path.realpath(os.path.join(filepath, 'tests/data/')) +except ImportError: + have_pybids = False + + +def test_datagrabber(): + dg = nio.DataGrabber() + assert dg.inputs.template == Undefined + assert dg.inputs.base_directory == Undefined + assert dg.inputs.template_args == {'outfiles': []} + + +@pytest.mark.skipif(noboto, reason="boto library is not available") +def test_s3datagrabber(): + dg = nio.S3DataGrabber() + assert dg.inputs.template == Undefined + assert dg.inputs.local_directory == Undefined + assert dg.inputs.template_args == {'outfiles': []} + + +templates1 = { + "model": "interfaces/{package}/model.py", + "preprocess": "interfaces/{package}/pre*.py" +} +templates2 = {"converter": "interfaces/dcm{to!s}nii.py"} +templates3 = {"model": "interfaces/{package.name}/model.py"} + + +@pytest.mark.parametrize("SF_args, inputs_att, expected", [ + ({ + "templates": templates1 + }, { + "package": "fsl" + }, { + "infields": ["package"], + "outfields": ["model", "preprocess"], + "run_output": { + "model": + op.join(op.dirname(nipype.__file__), "interfaces/fsl/model.py"), + "preprocess": + op.join( + op.dirname(nipype.__file__), "interfaces/fsl/preprocess.py") + }, + "node_output": ["model", "preprocess"] + }), + ({ + "templates": templates1, + "force_lists": True + }, { + "package": "spm" + }, { + "infields": ["package"], + "outfields": ["model", "preprocess"], + "run_output": { + "model": + [op.join(op.dirname(nipype.__file__), "interfaces/spm/model.py")], + "preprocess": [ + op.join( + op.dirname(nipype.__file__), + "interfaces/spm/preprocess.py") + ] + }, + "node_output": ["model", "preprocess"] + }), + ({ + "templates": templates1 + }, { + "package": "fsl", + "force_lists": ["model"] + }, { + "infields": ["package"], + "outfields": ["model", "preprocess"], + "run_output": { + "model": + [op.join(op.dirname(nipype.__file__), "interfaces/fsl/model.py")], + "preprocess": + op.join( + op.dirname(nipype.__file__), "interfaces/fsl/preprocess.py") + }, + "node_output": ["model", "preprocess"] + }), + ({ + "templates": templates2 + }, { + "to": 2 + }, { + "infields": ["to"], + "outfields": ["converter"], + "run_output": { + "converter": + op.join(op.dirname(nipype.__file__), "interfaces/dcm2nii.py") + }, + "node_output": ["converter"] + }), + ({ + "templates": templates3 + }, { + "package": namedtuple("package", ["name"])("fsl") + }, { + "infields": ["package"], + "outfields": ["model"], + "run_output": { + "model": + op.join(op.dirname(nipype.__file__), "interfaces/fsl/model.py") + }, + "node_output": ["model"] + }), +]) +def test_selectfiles(tmpdir, SF_args, inputs_att, expected): + tmpdir.chdir() + base_dir = op.dirname(nipype.__file__) + dg = nio.SelectFiles(base_directory=base_dir, **SF_args) + for key, val in inputs_att.items(): + setattr(dg.inputs, key, val) + + assert dg._infields == expected["infields"] + assert sorted(dg._outfields) == expected["outfields"] + assert sorted(dg._outputs().get()) == expected["node_output"] + + res = dg.run() + for key, val in expected["run_output"].items(): + assert getattr(res.outputs, key) == val + + +def test_selectfiles_valueerror(): + """Test ValueError when force_lists has field that isn't in template.""" + base_dir = op.dirname(nipype.__file__) + templates = { + "model": "interfaces/{package}/model.py", + "preprocess": "interfaces/{package}/pre*.py" + } + force_lists = ["model", "preprocess", "registration"] + sf = nio.SelectFiles( + templates, base_directory=base_dir, force_lists=force_lists) + with pytest.raises(ValueError): + sf.run() + + +@pytest.mark.skipif(noboto, reason="boto library is not available") +def test_s3datagrabber_communication(tmpdir): + dg = nio.S3DataGrabber( + infields=['subj_id', 'run_num'], outfields=['func', 'struct']) + dg.inputs.anon = True + dg.inputs.bucket = 'openfmri' + dg.inputs.bucket_path = 'ds001/' + dg.inputs.local_directory = tmpdir.strpath + dg.inputs.sort_filelist = True + dg.inputs.template = '*' + dg.inputs.field_template = dict( + func='%s/BOLD/task001_%s/bold.nii.gz', + struct='%s/anatomy/highres001_brain.nii.gz') + dg.inputs.subj_id = ['sub001', 'sub002'] + dg.inputs.run_num = ['run001', 'run003'] + dg.inputs.template_args = dict( + func=[['subj_id', 'run_num']], struct=[['subj_id']]) + res = dg.run() + func_outfiles = res.outputs.func + struct_outfiles = res.outputs.struct + + # check for all files + assert os.path.join( + dg.inputs.local_directory, + '/sub001/BOLD/task001_run001/bold.nii.gz') in func_outfiles[0] + assert os.path.exists(func_outfiles[0]) + assert os.path.join( + dg.inputs.local_directory, + '/sub001/anatomy/highres001_brain.nii.gz') in struct_outfiles[0] + assert os.path.exists(struct_outfiles[0]) + assert os.path.join( + dg.inputs.local_directory, + '/sub002/BOLD/task001_run003/bold.nii.gz') in func_outfiles[1] + assert os.path.exists(func_outfiles[1]) + assert os.path.join( + dg.inputs.local_directory, + '/sub002/anatomy/highres001_brain.nii.gz') in struct_outfiles[1] + assert os.path.exists(struct_outfiles[1]) + + +def test_datagrabber_order(tmpdir): + for file_name in [ + 'sub002_L1_R1.q', 'sub002_L1_R2.q', 'sub002_L2_R1.q', + 'sub002_L2_R2.qd', 'sub002_L3_R10.q', 'sub002_L3_R2.q' + ]: + tmpdir.join(file_name).open('a').close() + + dg = nio.DataGrabber(infields=['sid']) + dg.inputs.base_directory = tmpdir.strpath + dg.inputs.template = '%s_L%d_R*.q*' + dg.inputs.template_args = { + 'outfiles': [['sid', 1], ['sid', 2], ['sid', 3]] + } + dg.inputs.sid = 'sub002' + dg.inputs.sort_filelist = True + res = dg.run() + outfiles = res.outputs.outfiles + + assert 'sub002_L1_R1' in outfiles[0][0] + assert 'sub002_L1_R2' in outfiles[0][1] + assert 'sub002_L2_R1' in outfiles[1][0] + assert 'sub002_L2_R2' in outfiles[1][1] + assert 'sub002_L3_R2' in outfiles[2][0] + assert 'sub002_L3_R10' in outfiles[2][1] + + +def test_datasink(): + ds = nio.DataSink() + assert ds.inputs.parameterization + assert ds.inputs.base_directory == Undefined + assert ds.inputs.strip_dir == Undefined + assert ds.inputs._outputs == {} + + ds = nio.DataSink(base_directory='foo') + assert ds.inputs.base_directory == 'foo' + + ds = nio.DataSink(infields=['test']) + assert 'test' in ds.inputs.copyable_trait_names() + + +# Make dummy input file +@pytest.fixture(scope="module") +def dummy_input(request, tmpdir_factory): + ''' + Function to create a dummy file + ''' + # Init variables + + input_path = tmpdir_factory.mktemp('input_data').join( + 'datasink_test_s3.txt') + + # Create input file + input_path.write_binary(b'ABCD1234') + + # Return path + return str(input_path) + + +# Test datasink writes to s3 properly +@pytest.mark.skipif( + noboto3 or not fakes3, reason="boto3 or fakes3 library is not available") +def test_datasink_to_s3(dummy_input, tmpdir): + ''' + This function tests to see if the S3 functionality of a DataSink + works properly + ''' + # Init variables + ds = nio.DataSink() + bucket_name = 'test' + container = 'outputs' + attr_folder = 'text_file' + output_dir = 's3://' + bucket_name + # Local temporary filepaths for testing + fakes3_dir = tmpdir.strpath + input_path = dummy_input + + # Start up fake-S3 server + proc = Popen( + ['fakes3', '-r', fakes3_dir, '-p', '4567'], + stdout=open(os.devnull, 'wb')) + + # Init boto3 s3 resource to talk with fakes3 + resource = boto3.resource( + aws_access_key_id='mykey', + aws_secret_access_key='mysecret', + service_name='s3', + endpoint_url='http://127.0.0.1:4567', + use_ssl=False) + resource.meta.client.meta.events.unregister('before-sign.s3', fix_s3_host) + + # Create bucket + bucket = resource.create_bucket(Bucket=bucket_name) + + # Prep datasink + ds.inputs.base_directory = output_dir + ds.inputs.container = container + ds.inputs.bucket = bucket + setattr(ds.inputs, attr_folder, input_path) + + # Run datasink + ds.run() + + # Get MD5sums and compare + key = '/'.join([container, attr_folder, os.path.basename(input_path)]) + obj = bucket.Object(key=key) + dst_md5 = obj.e_tag.replace('"', '') + src_md5 = hashlib.md5(open(input_path, 'rb').read()).hexdigest() + + # Kill fakes3 + proc.kill() + + # Make sure md5sums match + assert src_md5 == dst_md5 + + +# Test AWS creds read from env vars +@pytest.mark.skipif( + noboto3 or not fakes3, reason="boto3 or fakes3 library is not available") +def test_aws_keys_from_env(): + ''' + Function to ensure the DataSink can successfully read in AWS + credentials from the environment variables + ''' + + # Init variables + ds = nio.DataSink() + aws_access_key_id = 'ABCDACCESS' + aws_secret_access_key = 'DEFGSECRET' + + # Set env vars + os.environ['AWS_ACCESS_KEY_ID'] = aws_access_key_id + os.environ['AWS_SECRET_ACCESS_KEY'] = aws_secret_access_key + + # Call function to return creds + access_key_test, secret_key_test = ds._return_aws_keys() + + # Assert match + assert aws_access_key_id == access_key_test + assert aws_secret_access_key == secret_key_test + + +# Test the local copy attribute +def test_datasink_localcopy(dummy_input, tmpdir): + ''' + Function to validate DataSink will make local copy via local_copy + attribute + ''' + + # Init variables + local_dir = tmpdir.strpath + container = 'outputs' + attr_folder = 'text_file' + + # Make dummy input file and datasink + input_path = dummy_input + + ds = nio.DataSink() + + # Set up datasink + ds.inputs.container = container + ds.inputs.local_copy = local_dir + + setattr(ds.inputs, attr_folder, input_path) + + # Expected local copy path + local_copy = os.path.join(local_dir, container, attr_folder, + os.path.basename(input_path)) + + # Run the datasink + ds.run() + + # Check md5sums of both + src_md5 = hashlib.md5(open(input_path, 'rb').read()).hexdigest() + dst_md5 = hashlib.md5(open(local_copy, 'rb').read()).hexdigest() + + # Perform test + assert src_md5 == dst_md5 + + +def test_datasink_substitutions(tmpdir): + indir = tmpdir.mkdir('-Tmp-nipype_ds_subs_in') + outdir = tmpdir.mkdir('-Tmp-nipype_ds_subs_out') + files = [] + for n in ['ababab.n', 'xabababyz.n']: + f = str(indir.join(n)) + files.append(f) + open(f, 'w') + ds = nio.DataSink( + parametrization=False, + base_directory=str(outdir), + substitutions=[('ababab', 'ABABAB')], + # end archoring ($) is used to assure operation on the filename + # instead of possible temporary directories names matches + # Patterns should be more comprehendable in the real-world usage + # cases since paths would be quite more sensible + regexp_substitutions=[(r'xABABAB(\w*)\.n$', r'a-\1-b.n'), + ('(.*%s)[-a]([^%s]*)$' % ((os.path.sep, ) * 2), + r'\1!\2')]) + setattr(ds.inputs, '@outdir', files) + ds.run() + assert sorted([os.path.basename(x) for + x in glob.glob(os.path.join(str(outdir), '*'))]) \ + == ['!-yz-b.n', 'ABABAB.n'] # so we got re used 2nd and both patterns + + +@pytest.fixture() +def _temp_analyze_files(tmpdir): + """Generate temporary analyze file pair.""" + img_dir = tmpdir.mkdir("img") + orig_img = img_dir.join("orig.img") + orig_hdr = img_dir.join("orig.hdr") + orig_img.open('w') + orig_hdr.open('w') + return orig_img.strpath, orig_hdr.strpath + + +def test_datasink_copydir_1(_temp_analyze_files, tmpdir): + orig_img, orig_hdr = _temp_analyze_files + outdir = tmpdir + pth, fname = os.path.split(orig_img) + ds = nio.DataSink( + base_directory=outdir.mkdir("basedir").strpath, parameterization=False) + setattr(ds.inputs, '@outdir', pth) + ds.run() + sep = os.path.sep + assert tmpdir.join('basedir', pth.split(sep)[-1], fname).check() + + +def test_datasink_copydir_2(_temp_analyze_files, tmpdir): + orig_img, orig_hdr = _temp_analyze_files + pth, fname = os.path.split(orig_img) + ds = nio.DataSink( + base_directory=tmpdir.mkdir("basedir").strpath, parameterization=False) + ds.inputs.remove_dest_dir = True + setattr(ds.inputs, 'outdir', pth) + ds.run() + sep = os.path.sep + assert not tmpdir.join('basedir', pth.split(sep)[-1], fname).check() + assert tmpdir.join('basedir', 'outdir', pth.split(sep)[-1], fname).check() + + +def test_datafinder_depth(tmpdir): + outdir = tmpdir.strpath + os.makedirs(os.path.join(outdir, '0', '1', '2', '3')) + + df = nio.DataFinder() + df.inputs.root_paths = os.path.join(outdir, '0') + for min_depth in range(4): + for max_depth in range(min_depth, 4): + df.inputs.min_depth = min_depth + df.inputs.max_depth = max_depth + result = df.run() + expected = [ + '{}'.format(x) for x in range(min_depth, max_depth + 1) + ] + for path, exp_fname in zip(result.outputs.out_paths, expected): + _, fname = os.path.split(path) + assert fname == exp_fname + + +def test_datafinder_unpack(tmpdir): + outdir = tmpdir.strpath + single_res = os.path.join(outdir, "findme.txt") + open(single_res, 'a').close() + open(os.path.join(outdir, "dontfindme"), 'a').close() + + df = nio.DataFinder() + df.inputs.root_paths = outdir + df.inputs.match_regex = '.+/(?P.+)\.txt' + df.inputs.unpack_single = True + result = df.run() + print(result.outputs.out_paths) + assert result.outputs.out_paths == single_res + + +def test_freesurfersource(): + fss = nio.FreeSurferSource() + assert fss.inputs.hemi == 'both' + assert fss.inputs.subject_id == Undefined + assert fss.inputs.subjects_dir == Undefined + + +def test_freesurfersource_incorrectdir(): + fss = nio.FreeSurferSource() + with pytest.raises(TraitError) as err: + fss.inputs.subjects_dir = 'path/to/no/existing/directory' + + +def test_jsonsink_input(): + + ds = nio.JSONFileSink() + assert ds.inputs._outputs == {} + + ds = nio.JSONFileSink(in_dict={'foo': 'var'}) + assert ds.inputs.in_dict == {'foo': 'var'} + + ds = nio.JSONFileSink(infields=['test']) + assert 'test' in ds.inputs.copyable_trait_names() + + +@pytest.mark.parametrize("inputs_attributes", [{ + 'new_entry': 'someValue' +}, { + 'new_entry': 'someValue', + 'test': 'testInfields' +}]) +def test_jsonsink(tmpdir, inputs_attributes): + tmpdir.chdir() + js = nio.JSONFileSink(infields=['test'], in_dict={'foo': 'var'}) + setattr(js.inputs, 'contrasts.alt', 'someNestedValue') + expected_data = {"contrasts": {"alt": "someNestedValue"}, "foo": "var"} + for key, val in inputs_attributes.items(): + setattr(js.inputs, key, val) + expected_data[key] = val + + res = js.run() + with open(res.outputs.out_file, 'r') as f: + data = simplejson.load(f) + + assert data == expected_data + + +# There are three reasons these tests will be skipped: +@pytest.mark.skipif(not have_pybids, + reason="Pybids is not installed") +@pytest.mark.skipif(sys.version_info < (3, 0), + reason="Pybids no longer supports Python 2") +@pytest.mark.skipif(not dist_is_editable('pybids'), + reason="Pybids is not installed in editable mode") +def test_bids_grabber(tmpdir): + tmpdir.chdir() + bg = nio.BIDSDataGrabber() + bg.inputs.base_dir = os.path.join(datadir, 'ds005') + bg.inputs.subject = '01' + results = bg.run() + assert 'sub-01_T1w.nii.gz' in map(os.path.basename, results.outputs.anat) + assert 'sub-01_task-mixedgamblestask_run-01_bold.nii.gz' in \ + map(os.path.basename, results.outputs.func) + + +@pytest.mark.skipif(not have_pybids, + reason="Pybids is not installed") +@pytest.mark.skipif(sys.version_info < (3, 0), + reason="Pybids no longer supports Python 2") +@pytest.mark.skipif(not dist_is_editable('pybids'), + reason="Pybids is not installed in editable mode") +def test_bids_fields(tmpdir): + tmpdir.chdir() + bg = nio.BIDSDataGrabber(infields = ['subject'], outfields = ['dwi']) + bg.inputs.base_dir = os.path.join(datadir, 'ds005') + bg.inputs.subject = '01' + bg.inputs.output_query['dwi'] = dict(modality='dwi') + results = bg.run() + assert 'sub-01_dwi.nii.gz' in map(os.path.basename, results.outputs.dwi) + + +@pytest.mark.skipif(not have_pybids, + reason="Pybids is not installed") +@pytest.mark.skipif(sys.version_info < (3, 0), + reason="Pybids no longer supports Python 2") +@pytest.mark.skipif(not dist_is_editable('pybids'), + reason="Pybids is not installed in editable mode") +def test_bids_infields_outfields(tmpdir): + tmpdir.chdir() + infields = ['infield1', 'infield2'] + outfields = ['outfield1', 'outfield2'] + bg = nio.BIDSDataGrabber(infields=infields) + for outfield in outfields: + bg.inputs.output_query[outfield] = {'key': 'value'} + + for infield in infields: + assert(infield in bg.inputs.traits()) + assert(not(isdefined(bg.inputs.get()[infield]))) + + for outfield in outfields: + assert(outfield in bg._outputs().traits()) + + # now try without defining outfields, we should get anat and func for free + bg = nio.BIDSDataGrabber() + for outfield in ['anat', 'func']: + assert outfield in bg._outputs().traits() + + +@pytest.mark.skipif(no_paramiko, reason="paramiko library is not available") +@pytest.mark.skipif(no_local_ssh, reason="SSH Server is not running") +def test_SSHDataGrabber(tmpdir): + """Test SSHDataGrabber by connecting to localhost and collecting some data. + """ + old_cwd = tmpdir.chdir() + + source_dir = tmpdir.mkdir('source') + source_hdr = source_dir.join('somedata.hdr') + source_dat = source_dir.join('somedata.img') + source_hdr.ensure() # create + source_dat.ensure() # create + + # ssh client that connects to localhost, current user, regardless of + # ~/.ssh/config + def _mock_get_ssh_client(self): + proxy = None + client = paramiko.SSHClient() + client.load_system_host_keys() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + client.connect('127.0.0.1', username=os.getenv('USER'), sock=proxy, + timeout=10) + return client + MockSSHDataGrabber = copy.copy(nio.SSHDataGrabber) + MockSSHDataGrabber._get_ssh_client = _mock_get_ssh_client + + # grabber to get files from source_dir matching test.hdr + ssh_grabber = MockSSHDataGrabber(infields=['test'], + outfields=['test_file']) + ssh_grabber.inputs.base_directory = str(source_dir) + ssh_grabber.inputs.hostname = '127.0.0.1' + ssh_grabber.inputs.field_template = dict(test_file='%s.hdr') + ssh_grabber.inputs.template = '' + ssh_grabber.inputs.template_args = dict(test_file=[['test']]) + ssh_grabber.inputs.test = 'somedata' + ssh_grabber.inputs.sort_filelist = True + + runtime = ssh_grabber.run() + + # did we successfully get the header? + assert runtime.outputs.test_file == str(tmpdir.join(source_hdr.basename)) + # did we successfully get the data? + assert (tmpdir.join(source_hdr.basename) # header file + .new(ext='.img') # data file + .check(file=True, exists=True)) # exists? + + old_cwd.chdir() diff --git a/nipype/interfaces/tests/test_matlab.py b/nipype/interfaces/tests/test_matlab.py new file mode 100644 index 0000000000..2576a379e7 --- /dev/null +++ b/nipype/interfaces/tests/test_matlab.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import os + +import pytest +import nipype.interfaces.matlab as mlab + +matlab_cmd = mlab.get_matlab_command() +no_matlab = matlab_cmd is None +if not no_matlab: + mlab.MatlabCommand.set_default_matlab_cmd(matlab_cmd) + + +def clean_workspace_and_get_default_script_file(): + # Make sure things are clean. + default_script_file = mlab.MatlabInputSpec().script_file + if os.path.exists(default_script_file): + os.remove( + default_script_file + ) # raise Exception('Default script file needed for tests; please remove %s!' % default_script_file) + return default_script_file + + +@pytest.mark.skipif(no_matlab, reason="matlab is not available") +def test_cmdline(): + default_script_file = clean_workspace_and_get_default_script_file() + + mi = mlab.MatlabCommand( + script='whos', script_file='testscript', mfile=False) + + assert mi.cmdline == \ + matlab_cmd + (' -nodesktop -nosplash -singleCompThread -r "fprintf(1,' + '\'Executing code at %s:\\n\',datestr(now));ver,try,' + 'whos,catch ME,fprintf(2,\'MATLAB code threw an ' + 'exception:\\n\');fprintf(2,\'%s\\n\',ME.message);if ' + 'length(ME.stack) ~= 0, fprintf(2,\'File:%s\\nName:%s\\n' + 'Line:%d\\n\',ME.stack.file,ME.stack.name,' + 'ME.stack.line);, end;end;;exit"') + + assert mi.inputs.script == 'whos' + assert mi.inputs.script_file == 'testscript' + assert not os.path.exists( + mi.inputs.script_file), 'scriptfile should not exist' + assert not os.path.exists( + default_script_file), 'default scriptfile should not exist.' + + +@pytest.mark.skipif(no_matlab, reason="matlab is not available") +def test_mlab_inputspec(): + default_script_file = clean_workspace_and_get_default_script_file() + spec = mlab.MatlabInputSpec() + for k in [ + 'paths', 'script', 'nosplash', 'mfile', 'logfile', 'script_file', + 'nodesktop' + ]: + assert k in spec.copyable_trait_names() + assert spec.nodesktop + assert spec.nosplash + assert spec.mfile + assert spec.script_file == default_script_file + + +@pytest.mark.skipif(no_matlab, reason="matlab is not available") +def test_mlab_init(): + default_script_file = clean_workspace_and_get_default_script_file() + + assert mlab.MatlabCommand._cmd == 'matlab' + assert mlab.MatlabCommand.input_spec == mlab.MatlabInputSpec + + assert mlab.MatlabCommand().cmd == matlab_cmd + mc = mlab.MatlabCommand(matlab_cmd='foo_m') + assert mc.cmd == 'foo_m' + + +@pytest.mark.skipif(no_matlab, reason="matlab is not available") +def test_run_interface(tmpdir): + default_script_file = clean_workspace_and_get_default_script_file() + + mc = mlab.MatlabCommand(matlab_cmd='foo_m') + assert not os.path.exists( + default_script_file), 'scriptfile should not exist 1.' + with pytest.raises(ValueError): + mc.run() # script is mandatory + assert not os.path.exists( + default_script_file), 'scriptfile should not exist 2.' + if os.path.exists(default_script_file): # cleanup + os.remove(default_script_file) + + mc.inputs.script = 'a=1;' + assert not os.path.exists( + default_script_file), 'scriptfile should not exist 3.' + with pytest.raises(IOError): + mc.run() # foo_m is not an executable + assert os.path.exists(default_script_file), 'scriptfile should exist 3.' + if os.path.exists(default_script_file): # cleanup + os.remove(default_script_file) + + cwd = tmpdir.chdir() + + # bypasses ubuntu dash issue + mc = mlab.MatlabCommand(script='foo;', paths=[tmpdir.strpath], mfile=True) + assert not os.path.exists( + default_script_file), 'scriptfile should not exist 4.' + with pytest.raises(RuntimeError): + mc.run() + assert os.path.exists(default_script_file), 'scriptfile should exist 4.' + if os.path.exists(default_script_file): # cleanup + os.remove(default_script_file) + + # bypasses ubuntu dash issue + res = mlab.MatlabCommand( + script='a=1;', paths=[tmpdir.strpath], mfile=True).run() + assert res.runtime.returncode == 0 + assert os.path.exists(default_script_file), 'scriptfile should exist 5.' + cwd.chdir() + + +@pytest.mark.skipif(no_matlab, reason="matlab is not available") +def test_set_matlabcmd(): + default_script_file = clean_workspace_and_get_default_script_file() + + mi = mlab.MatlabCommand() + mi.set_default_matlab_cmd('foo') + assert not os.path.exists( + default_script_file), 'scriptfile should not exist.' + assert mi._default_matlab_cmd == 'foo' + mi.set_default_matlab_cmd(matlab_cmd) diff --git a/nipype/interfaces/tests/test_nilearn.py b/nipype/interfaces/tests/test_nilearn.py new file mode 100644 index 0000000000..79432bc180 --- /dev/null +++ b/nipype/interfaces/tests/test_nilearn.py @@ -0,0 +1,200 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import os + +import numpy as np + +from ...testing import utils + +from .. import nilearn as iface +from ...pipeline import engine as pe + +import pytest +import numpy.testing as npt + +no_nilearn = True +try: + __import__('nilearn') + no_nilearn = False +except ImportError: + pass + + +@pytest.mark.skipif(no_nilearn, reason="the nilearn library is not available") +class TestSignalExtraction(): + + filenames = { + 'in_file': 'fmri.nii', + 'label_files': 'labels.nii', + '4d_label_file': '4dlabels.nii', + 'out_file': 'signals.tsv' + } + labels = ['CSF', 'GrayMatter', 'WhiteMatter'] + global_labels = ['GlobalSignal'] + labels + + @pytest.fixture(autouse=True, scope='class') + def setup_class(self, tmpdir_factory): + tempdir = tmpdir_factory.mktemp("test") + self.orig_dir = tempdir.chdir() + utils.save_toy_nii(self.fake_fmri_data, self.filenames['in_file']) + utils.save_toy_nii(self.fake_label_data, self.filenames['label_files']) + + def test_signal_extract_no_shared(self): + # run + iface.SignalExtraction( + in_file=self.filenames['in_file'], + label_files=self.filenames['label_files'], + class_labels=self.labels, + incl_shared_variance=False).run() + # assert + self.assert_expected_output(self.labels, self.base_wanted) + + def test_signal_extr_bad_label_list(self): + # run + with pytest.raises(ValueError): + iface.SignalExtraction( + in_file=self.filenames['in_file'], + label_files=self.filenames['label_files'], + class_labels=['bad'], + incl_shared_variance=False).run() + + def test_signal_extr_equiv_4d_no_shared(self): + self._test_4d_label( + self.base_wanted, + self.fake_equiv_4d_label_data, + incl_shared_variance=False) + + def test_signal_extr_4d_no_shared(self): + # set up & run & assert + self._test_4d_label( + self.fourd_wanted, + self.fake_4d_label_data, + incl_shared_variance=False) + + def test_signal_extr_global_no_shared(self): + # set up + wanted_global = [[-4. / 6], [-1. / 6], [3. / 6], [-1. / 6], [-7. / 6]] + for i, vals in enumerate(self.base_wanted): + wanted_global[i].extend(vals) + + # run + iface.SignalExtraction( + in_file=self.filenames['in_file'], + label_files=self.filenames['label_files'], + class_labels=self.labels, + include_global=True, + incl_shared_variance=False).run() + + # assert + self.assert_expected_output(self.global_labels, wanted_global) + + def test_signal_extr_4d_global_no_shared(self): + # set up + wanted_global = [[3. / 8], [-3. / 8], [1. / 8], [-7. / 8], [-9. / 8]] + for i, vals in enumerate(self.fourd_wanted): + wanted_global[i].extend(vals) + + # run & assert + self._test_4d_label( + wanted_global, + self.fake_4d_label_data, + include_global=True, + incl_shared_variance=False) + + def test_signal_extr_shared(self): + # set up + wanted = [] + for vol in range(self.fake_fmri_data.shape[3]): + volume = self.fake_fmri_data[:, :, :, vol].flatten() + wanted_row = [] + for reg in range(self.fake_4d_label_data.shape[3]): + region = self.fake_4d_label_data[:, :, :, reg].flatten() + wanted_row.append( + (volume * region).sum() / (region * region).sum()) + + wanted.append(wanted_row) + # run & assert + self._test_4d_label(wanted, self.fake_4d_label_data) + + def test_signal_extr_traits_valid(self): + ''' Test a node using the SignalExtraction interface. + Unlike interface.run(), node.run() checks the traits + ''' + # run + node = pe.Node( + iface.SignalExtraction( + in_file=os.path.abspath(self.filenames['in_file']), + label_files=os.path.abspath(self.filenames['label_files']), + class_labels=self.labels, + incl_shared_variance=False), + name='SignalExtraction') + node.run() + + # assert + # just checking that it passes trait validations + + def _test_4d_label(self, + wanted, + fake_labels, + include_global=False, + incl_shared_variance=True): + # set up + utils.save_toy_nii(fake_labels, self.filenames['4d_label_file']) + + # run + iface.SignalExtraction( + in_file=self.filenames['in_file'], + label_files=self.filenames['4d_label_file'], + class_labels=self.labels, + incl_shared_variance=incl_shared_variance, + include_global=include_global).run() + + wanted_labels = self.global_labels if include_global else self.labels + + # assert + self.assert_expected_output(wanted_labels, wanted) + + def assert_expected_output(self, labels, wanted): + with open(self.filenames['out_file'], 'r') as output: + got = [line.split() for line in output] + labels_got = got.pop(0) # remove header + assert labels_got == labels + assert len(got) == self.fake_fmri_data.shape[ + 3], 'num rows and num volumes' + # convert from string to float + got = [[float(num) for num in row] for row in got] + for i, time in enumerate(got): + assert len(labels) == len(time) + for j, segment in enumerate(time): + npt.assert_almost_equal(segment, wanted[i][j], decimal=1) + + +# dj: self doesnt have orig_dir at this point, not sure how to change it. +# should work without it +# def teardown_class(self): +# self.orig_dir.chdir() + + fake_fmri_data = np.array([[[[2, -1, 4, -2, 3], [4, -2, -5, -1, 0]], + [[-2, 0, 1, 4, 4], [-5, 3, -3, 1, -5]]], + [[[2, -2, -1, -2, -5], [3, 0, 3, -5, -2]], + [[-4, -2, -2, 1, -2], [3, 1, 4, -3, -2]]]]) + + fake_label_data = np.array([[[1, 0], [3, 1]], [[2, 0], [1, 3]]]) + + fake_equiv_4d_label_data = np.array( + [[[[1., 0., 0.], [0., 0., 0.]], [[0., 0., 1.], [1., 0., 0.]]], + [[[0., 1., 0.], [0., 0., 0.]], [[1., 0., 0.], [0., 0., 1.]]]]) + + base_wanted = [[-2.33333, 2, .5], [0, -2, .5], [-.3333333, -1, 2.5], + [0, -2, .5], [-1.3333333, -5, 1]] + + fake_4d_label_data = np.array([[[[0.2, 0.3, 0.5], [0.1, 0.1, 0.8]], + [[0.1, 0.3, 0.6], [0.3, 0.4, 0.3]]], + [[[0.2, 0.2, 0.6], [0., 0.3, 0.7]], + [[0.3, 0.3, 0.4], [0.3, 0.4, 0.3]]]]) + + fourd_wanted = [[-5.0652173913, -5.44565217391, 5.50543478261], [ + -7.02173913043, 11.1847826087, -4.33152173913 + ], [-19.0869565217, 21.2391304348, + -4.57608695652], [5.19565217391, -3.66304347826, -1.51630434783], + [-12.0, 3., 0.5]] diff --git a/nipype/interfaces/utility/__init__.py b/nipype/interfaces/utility/__init__.py new file mode 100644 index 0000000000..084acb569c --- /dev/null +++ b/nipype/interfaces/utility/__init__.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Package contains interfaces for using existing functionality in other packages + +Requires Packages to be installed +""" + +from .base import (IdentityInterface, Rename, Select, Split, Merge, + AssertEqual) +from .csv import CSVReader +from .wrappers import Function diff --git a/nipype/interfaces/utility/base.py b/nipype/interfaces/utility/base.py new file mode 100644 index 0000000000..f2da6cf2a6 --- /dev/null +++ b/nipype/interfaces/utility/base.py @@ -0,0 +1,431 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" + # changing to temporary directories + >>> tmp = getfixture('tmpdir') + >>> old = tmp.chdir() +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range + +from future import standard_library +standard_library.install_aliases() + +import os +import re +import numpy as np +import nibabel as nb + +from ..base import (traits, TraitedSpec, DynamicTraitedSpec, File, Undefined, + isdefined, OutputMultiPath, InputMultiPath, BaseInterface, + BaseInterfaceInputSpec, Str) +from ..io import IOBase, add_traits +from ...utils.filemanip import ensure_list, copyfile, split_filename + + +class IdentityInterface(IOBase): + """Basic interface class generates identity mappings + + Examples + -------- + + >>> from nipype.interfaces.utility import IdentityInterface + >>> ii = IdentityInterface(fields=['a', 'b'], mandatory_inputs=False) + >>> ii.inputs.a + + + >>> ii.inputs.a = 'foo' + >>> out = ii._outputs() + >>> out.a + + + >>> out = ii.run() + >>> out.outputs.a + 'foo' + + >>> ii2 = IdentityInterface(fields=['a', 'b'], mandatory_inputs=True) + >>> ii2.inputs.a = 'foo' + >>> out = ii2.run() # doctest: +SKIP + ValueError: IdentityInterface requires a value for input 'b' because it was listed in 'fields' Interface IdentityInterface failed to run. + """ + input_spec = DynamicTraitedSpec + output_spec = DynamicTraitedSpec + + def __init__(self, fields=None, mandatory_inputs=True, **inputs): + super(IdentityInterface, self).__init__(**inputs) + if fields is None or not fields: + raise ValueError( + 'Identity Interface fields must be a non-empty list') + # Each input must be in the fields. + for in_field in inputs: + if in_field not in fields: + raise ValueError( + 'Identity Interface input is not in the fields: %s' % + in_field) + self._fields = fields + self._mandatory_inputs = mandatory_inputs + add_traits(self.inputs, fields) + # Adding any traits wipes out all input values set in superclass initialization, + # even it the trait is not in the add_traits argument. The work-around is to reset + # the values after adding the traits. + self.inputs.trait_set(**inputs) + + def _add_output_traits(self, base): + return add_traits(base, self._fields) + + def _list_outputs(self): + # manual mandatory inputs check + if self._fields and self._mandatory_inputs: + for key in self._fields: + value = getattr(self.inputs, key) + if not isdefined(value): + msg = "%s requires a value for input '%s' because it was listed in 'fields'. \ + You can turn off mandatory inputs checking by passing mandatory_inputs = False to the constructor." % \ + (self.__class__.__name__, key) + raise ValueError(msg) + + outputs = self._outputs().get() + for key in self._fields: + val = getattr(self.inputs, key) + if isdefined(val): + outputs[key] = val + return outputs + + +class MergeInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + axis = traits.Enum( + 'vstack', + 'hstack', + usedefault=True, + desc= + 'direction in which to merge, hstack requires same number of elements in each input' + ) + no_flatten = traits.Bool( + False, + usedefault=True, + desc='append to outlist instead of extending in vstack mode') + ravel_inputs = traits.Bool( + False, usedefault=True, desc='ravel inputs when no_flatten is False') + + +class MergeOutputSpec(TraitedSpec): + out = traits.List(desc='Merged output') + + +def _ravel(in_val): + if not isinstance(in_val, list): + return in_val + flat_list = [] + for val in in_val: + raveled_val = _ravel(val) + if isinstance(raveled_val, list): + flat_list.extend(raveled_val) + else: + flat_list.append(raveled_val) + return flat_list + + +class Merge(IOBase): + """Basic interface class to merge inputs into a single list + + ``Merge(1)`` will merge a list of lists + + Examples + -------- + + >>> from nipype.interfaces.utility import Merge + >>> mi = Merge(3) + >>> mi.inputs.in1 = 1 + >>> mi.inputs.in2 = [2, 5] + >>> mi.inputs.in3 = 3 + >>> out = mi.run() + >>> out.outputs.out + [1, 2, 5, 3] + + >>> merge = Merge(1) + >>> merge.inputs.in1 = [1, [2, 5], 3] + >>> out = merge.run() + >>> out.outputs.out + [1, [2, 5], 3] + + >>> merge = Merge(1) + >>> merge.inputs.in1 = [1, [2, 5], 3] + >>> merge.inputs.ravel_inputs = True + >>> out = merge.run() + >>> out.outputs.out + [1, 2, 5, 3] + + >>> merge = Merge(1) + >>> merge.inputs.in1 = [1, [2, 5], 3] + >>> merge.inputs.no_flatten = True + >>> out = merge.run() + >>> out.outputs.out + [[1, [2, 5], 3]] + """ + input_spec = MergeInputSpec + output_spec = MergeOutputSpec + + def __init__(self, numinputs=0, **inputs): + super(Merge, self).__init__(**inputs) + self._numinputs = numinputs + if numinputs >= 1: + input_names = ['in%d' % (i + 1) for i in range(numinputs)] + else: + input_names = [] + add_traits(self.inputs, input_names) + + def _list_outputs(self): + outputs = self._outputs().get() + out = [] + + if self._numinputs < 1: + return outputs + else: + getval = lambda idx: getattr(self.inputs, 'in%d' % (idx + 1)) + values = [ + getval(idx) for idx in range(self._numinputs) + if isdefined(getval(idx)) + ] + + if self.inputs.axis == 'vstack': + for value in values: + if isinstance(value, list) and not self.inputs.no_flatten: + out.extend( + _ravel(value) if self.inputs.ravel_inputs else value) + else: + out.append(value) + else: + lists = [ensure_list(val) for val in values] + out = [[val[i] for val in lists] for i in range(len(lists[0]))] + outputs['out'] = out + return outputs + + +class RenameInputSpec(DynamicTraitedSpec): + + in_file = File(exists=True, mandatory=True, desc="file to rename") + keep_ext = traits.Bool( + desc=("Keep in_file extension, replace " + "non-extension component of name")) + format_string = Str( + mandatory=True, desc="Python formatting string for output template") + parse_string = Str(desc="Python regexp parse string to define " + "replacement inputs") + use_fullpath = traits.Bool( + False, usedefault=True, desc="Use full path as input to regex parser") + + +class RenameOutputSpec(TraitedSpec): + + out_file = traits.File( + exists=True, desc="softlink to original file with new name") + + +class Rename(IOBase): + """Change the name of a file based on a mapped format string. + + To use additional inputs that will be defined at run-time, the class + constructor must be called with the format template, and the fields + identified will become inputs to the interface. + + Additionally, you may set the parse_string input, which will be run + over the input filename with a regular expressions search, and will + fill in additional input fields from matched groups. Fields set with + inputs have precedence over fields filled in with the regexp match. + + Examples + -------- + + >>> from nipype.interfaces.utility import Rename + >>> rename1 = Rename() + >>> rename1.inputs.in_file = os.path.join(datadir, "zstat1.nii.gz") # datadir is a directory with exemplary files, defined in conftest.py + >>> rename1.inputs.format_string = "Faces-Scenes.nii.gz" + >>> res = rename1.run() # doctest: +SKIP + >>> res.outputs.out_file # doctest: +SKIP + 'Faces-Scenes.nii.gz" # doctest: +SKIP + + >>> rename2 = Rename(format_string="%(subject_id)s_func_run%(run)02d") + >>> rename2.inputs.in_file = os.path.join(datadir, "functional.nii") + >>> rename2.inputs.keep_ext = True + >>> rename2.inputs.subject_id = "subj_201" + >>> rename2.inputs.run = 2 + >>> res = rename2.run() # doctest: +SKIP + >>> res.outputs.out_file # doctest: +SKIP + 'subj_201_func_run02.nii' # doctest: +SKIP + + >>> rename3 = Rename(format_string="%(subject_id)s_%(seq)s_run%(run)02d.nii") + >>> rename3.inputs.in_file = os.path.join(datadir, "func_epi_1_1.nii") + >>> rename3.inputs.parse_string = "func_(?P\w*)_.*" + >>> rename3.inputs.subject_id = "subj_201" + >>> rename3.inputs.run = 2 + >>> res = rename3.run() # doctest: +SKIP + >>> res.outputs.out_file # doctest: +SKIP + 'subj_201_epi_run02.nii' # doctest: +SKIP + + """ + input_spec = RenameInputSpec + output_spec = RenameOutputSpec + + def __init__(self, format_string=None, **inputs): + super(Rename, self).__init__(**inputs) + if format_string is not None: + self.inputs.format_string = format_string + self.fmt_fields = re.findall(r"%\((.+?)\)", format_string) + add_traits(self.inputs, self.fmt_fields) + else: + self.fmt_fields = [] + + def _rename(self): + fmt_dict = dict() + if isdefined(self.inputs.parse_string): + if isdefined( + self.inputs.use_fullpath) and self.inputs.use_fullpath: + m = re.search(self.inputs.parse_string, self.inputs.in_file) + else: + m = re.search(self.inputs.parse_string, + os.path.split(self.inputs.in_file)[1]) + if m: + fmt_dict.update(m.groupdict()) + for field in self.fmt_fields: + val = getattr(self.inputs, field) + if isdefined(val): + fmt_dict[field] = getattr(self.inputs, field) + if self.inputs.keep_ext: + fmt_string = "".join([ + self.inputs.format_string, + split_filename(self.inputs.in_file)[2] + ]) + else: + fmt_string = self.inputs.format_string + return fmt_string % fmt_dict + + def _run_interface(self, runtime): + runtime.returncode = 0 + _ = copyfile(self.inputs.in_file, + os.path.join(os.getcwd(), self._rename())) + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs["out_file"] = os.path.join(os.getcwd(), self._rename()) + return outputs + + +class SplitInputSpec(BaseInterfaceInputSpec): + inlist = traits.List( + traits.Any, mandatory=True, desc='list of values to split') + splits = traits.List( + traits.Int, + mandatory=True, + desc='Number of outputs in each split - should add to number of inputs' + ) + squeeze = traits.Bool( + False, + usedefault=True, + desc='unfold one-element splits removing the list') + + +class Split(IOBase): + """Basic interface class to split lists into multiple outputs + + Examples + -------- + + >>> from nipype.interfaces.utility import Split + >>> sp = Split() + >>> _ = sp.inputs.trait_set(inlist=[1, 2, 3], splits=[2, 1]) + >>> out = sp.run() + >>> out.outputs.out1 + [1, 2] + + """ + + input_spec = SplitInputSpec + output_spec = DynamicTraitedSpec + + def _add_output_traits(self, base): + undefined_traits = {} + for i in range(len(self.inputs.splits)): + key = 'out%d' % (i + 1) + base.add_trait(key, traits.Any) + undefined_traits[key] = Undefined + base.trait_set(trait_change_notify=False, **undefined_traits) + return base + + def _list_outputs(self): + outputs = self._outputs().get() + if isdefined(self.inputs.splits): + if sum(self.inputs.splits) != len(self.inputs.inlist): + raise RuntimeError('sum of splits != num of list elements') + splits = [0] + splits.extend(self.inputs.splits) + splits = np.cumsum(splits) + for i in range(len(splits) - 1): + val = np.array( + self.inputs.inlist)[splits[i]:splits[i + 1]].tolist() + if self.inputs.squeeze and len(val) == 1: + val = val[0] + outputs['out%d' % (i + 1)] = val + return outputs + + +class SelectInputSpec(BaseInterfaceInputSpec): + inlist = InputMultiPath( + traits.Any, mandatory=True, desc='list of values to choose from') + index = InputMultiPath( + traits.Int, mandatory=True, desc='0-based indices of values to choose') + + +class SelectOutputSpec(TraitedSpec): + out = OutputMultiPath(traits.Any, desc='list of selected values') + + +class Select(IOBase): + """Basic interface class to select specific elements from a list + + Examples + -------- + + >>> from nipype.interfaces.utility import Select + >>> sl = Select() + >>> _ = sl.inputs.trait_set(inlist=[1, 2, 3, 4, 5], index=[3]) + >>> out = sl.run() + >>> out.outputs.out + 4 + + >>> _ = sl.inputs.trait_set(inlist=[1, 2, 3, 4, 5], index=[3, 4]) + >>> out = sl.run() + >>> out.outputs.out + [4, 5] + + """ + + input_spec = SelectInputSpec + output_spec = SelectOutputSpec + + def _list_outputs(self): + outputs = self._outputs().get() + out = np.array(self.inputs.inlist)[np.array( + self.inputs.index)].tolist() + outputs['out'] = out + return outputs + + +class AssertEqualInputSpec(BaseInterfaceInputSpec): + volume1 = File(exists=True, mandatory=True) + volume2 = File(exists=True, mandatory=True) + + +class AssertEqual(BaseInterface): + input_spec = AssertEqualInputSpec + + def _run_interface(self, runtime): + + data1 = nb.load(self.inputs.volume1).get_data() + data2 = nb.load(self.inputs.volume2).get_data() + + if not np.all(data1 == data2): + raise RuntimeError('Input images are not exactly equal') + return runtime diff --git a/nipype/interfaces/utility/csv.py b/nipype/interfaces/utility/csv.py new file mode 100644 index 0000000000..16c377e3b5 --- /dev/null +++ b/nipype/interfaces/utility/csv.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""CSV Handling utilities +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import zip, range, str, open + +from future import standard_library +standard_library.install_aliases() + +from ..base import (traits, TraitedSpec, DynamicTraitedSpec, File, + BaseInterface) +from ..io import add_traits + + +class CSVReaderInputSpec(DynamicTraitedSpec, TraitedSpec): + in_file = File( + exists=True, + mandatory=True, + desc='Input comma-seperated value (CSV) file') + header = traits.Bool( + False, + usedefault=True, + desc='True if the first line is a column header') + + +class CSVReader(BaseInterface): + """ + Examples + -------- + + >>> reader = CSVReader() # doctest: +SKIP + >>> reader.inputs.in_file = 'noHeader.csv' # doctest: +SKIP + >>> out = reader.run() # doctest: +SKIP + >>> out.outputs.column_0 == ['foo', 'bar', 'baz'] # doctest: +SKIP + True + >>> out.outputs.column_1 == ['hello', 'world', 'goodbye'] # doctest: +SKIP + True + >>> out.outputs.column_2 == ['300.1', '5', '0.3'] # doctest: +SKIP + True + + >>> reader = CSVReader() # doctest: +SKIP + >>> reader.inputs.in_file = 'header.csv' # doctest: +SKIP + >>> reader.inputs.header = True # doctest: +SKIP + >>> out = reader.run() # doctest: +SKIP + >>> out.outputs.files == ['foo', 'bar', 'baz'] # doctest: +SKIP + True + >>> out.outputs.labels == ['hello', 'world', 'goodbye'] # doctest: +SKIP + True + >>> out.outputs.erosion == ['300.1', '5', '0.3'] # doctest: +SKIP + True + + """ + input_spec = CSVReaderInputSpec + output_spec = DynamicTraitedSpec + _always_run = True + + def _append_entry(self, outputs, entry): + for key, value in zip(self._outfields, entry): + outputs[key].append(value) + return outputs + + def _parse_line(self, line): + line = line.replace('\n', '') + entry = [x.strip() for x in line.split(',')] + return entry + + def _get_outfields(self): + with open(self.inputs.in_file, 'r') as fid: + entry = self._parse_line(fid.readline()) + if self.inputs.header: + self._outfields = tuple(entry) + else: + self._outfields = tuple( + ['column_' + str(x) for x in range(len(entry))]) + return self._outfields + + def _run_interface(self, runtime): + self._get_outfields() + return runtime + + def _outputs(self): + return self._add_output_traits(super(CSVReader, self)._outputs()) + + def _add_output_traits(self, base): + return add_traits(base, self._get_outfields()) + + def _list_outputs(self): + outputs = self.output_spec().get() + isHeader = True + for key in self._outfields: + outputs[key] = [] # initialize outfields + with open(self.inputs.in_file, 'r') as fid: + for line in fid.readlines(): + if self.inputs.header and isHeader: # skip header line + isHeader = False + continue + entry = self._parse_line(line) + outputs = self._append_entry(outputs, entry) + return outputs diff --git a/nipype/interfaces/utility/tests/__init__.py b/nipype/interfaces/utility/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/utility/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/utility/tests/test_auto_AssertEqual.py b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py new file mode 100644 index 0000000000..284e0f4d62 --- /dev/null +++ b/nipype/interfaces/utility/tests/test_auto_AssertEqual.py @@ -0,0 +1,15 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import AssertEqual + + +def test_AssertEqual_inputs(): + input_map = dict( + volume1=dict(mandatory=True, ), + volume2=dict(mandatory=True, ), + ) + inputs = AssertEqual.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/utility/tests/test_auto_CSVReader.py b/nipype/interfaces/utility/tests/test_auto_CSVReader.py new file mode 100644 index 0000000000..8a51ca4170 --- /dev/null +++ b/nipype/interfaces/utility/tests/test_auto_CSVReader.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..csv import CSVReader + + +def test_CSVReader_inputs(): + input_map = dict( + header=dict(usedefault=True, ), + in_file=dict(mandatory=True, ), + ) + inputs = CSVReader.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_CSVReader_outputs(): + output_map = dict() + outputs = CSVReader.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/utility/tests/test_auto_Function.py b/nipype/interfaces/utility/tests/test_auto_Function.py new file mode 100644 index 0000000000..5c2505fe16 --- /dev/null +++ b/nipype/interfaces/utility/tests/test_auto_Function.py @@ -0,0 +1,19 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..wrappers import Function + + +def test_Function_inputs(): + input_map = dict(function_str=dict(mandatory=True, ), ) + inputs = Function.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Function_outputs(): + output_map = dict() + outputs = Function.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/utility/tests/test_auto_IdentityInterface.py b/nipype/interfaces/utility/tests/test_auto_IdentityInterface.py new file mode 100644 index 0000000000..97523d0b86 --- /dev/null +++ b/nipype/interfaces/utility/tests/test_auto_IdentityInterface.py @@ -0,0 +1,19 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import IdentityInterface + + +def test_IdentityInterface_inputs(): + input_map = dict() + inputs = IdentityInterface.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_IdentityInterface_outputs(): + output_map = dict() + outputs = IdentityInterface.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/utility/tests/test_auto_Merge.py b/nipype/interfaces/utility/tests/test_auto_Merge.py new file mode 100644 index 0000000000..71e7d2db0a --- /dev/null +++ b/nipype/interfaces/utility/tests/test_auto_Merge.py @@ -0,0 +1,23 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import Merge + + +def test_Merge_inputs(): + input_map = dict( + axis=dict(usedefault=True, ), + no_flatten=dict(usedefault=True, ), + ravel_inputs=dict(usedefault=True, ), + ) + inputs = Merge.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Merge_outputs(): + output_map = dict(out=dict(), ) + outputs = Merge.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/utility/tests/test_auto_Rename.py b/nipype/interfaces/utility/tests/test_auto_Rename.py new file mode 100644 index 0000000000..1e6e1cab34 --- /dev/null +++ b/nipype/interfaces/utility/tests/test_auto_Rename.py @@ -0,0 +1,25 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import Rename + + +def test_Rename_inputs(): + input_map = dict( + format_string=dict(mandatory=True, ), + in_file=dict(mandatory=True, ), + keep_ext=dict(), + parse_string=dict(), + use_fullpath=dict(usedefault=True, ), + ) + inputs = Rename.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Rename_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Rename.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/utility/tests/test_auto_Select.py b/nipype/interfaces/utility/tests/test_auto_Select.py new file mode 100644 index 0000000000..e241f7ed76 --- /dev/null +++ b/nipype/interfaces/utility/tests/test_auto_Select.py @@ -0,0 +1,22 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import Select + + +def test_Select_inputs(): + input_map = dict( + index=dict(mandatory=True, ), + inlist=dict(mandatory=True, ), + ) + inputs = Select.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Select_outputs(): + output_map = dict(out=dict(), ) + outputs = Select.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/utility/tests/test_auto_Split.py b/nipype/interfaces/utility/tests/test_auto_Split.py new file mode 100644 index 0000000000..8acbceef99 --- /dev/null +++ b/nipype/interfaces/utility/tests/test_auto_Split.py @@ -0,0 +1,23 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import Split + + +def test_Split_inputs(): + input_map = dict( + inlist=dict(mandatory=True, ), + splits=dict(mandatory=True, ), + squeeze=dict(usedefault=True, ), + ) + inputs = Split.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Split_outputs(): + output_map = dict() + outputs = Split.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/utility/tests/test_base.py b/nipype/interfaces/utility/tests/test_base.py new file mode 100644 index 0000000000..159454a7fc --- /dev/null +++ b/nipype/interfaces/utility/tests/test_base.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals +import os +import pytest + +from nipype.interfaces import utility +from nipype.interfaces.base import isdefined +import nipype.pipeline.engine as pe + + +def test_rename(tmpdir): + tmpdir.chdir() + + # Test very simple rename + _ = open("file.txt", "w").close() + rn = utility.Rename(in_file="file.txt", format_string="test_file1.txt") + res = rn.run() + outfile = tmpdir.join("test_file1.txt").strpath + assert res.outputs.out_file == outfile + assert os.path.exists(outfile) + + # Now a string-formatting version + rn = utility.Rename( + in_file="file.txt", + format_string="%(field1)s_file%(field2)d", + keep_ext=True) + # Test .input field creation + assert hasattr(rn.inputs, "field1") + assert hasattr(rn.inputs, "field2") + + # Set the inputs + rn.inputs.field1 = "test" + rn.inputs.field2 = 2 + res = rn.run() + outfile = tmpdir.join("test_file2.txt").strpath + assert res.outputs.out_file == outfile + assert os.path.exists(outfile) + + +@pytest.mark.parametrize("args, expected", [({}, ([0], [1, 2, 3])), + ({ + "squeeze": True + }, (0, [1, 2, 3]))]) +def test_split(tmpdir, args, expected): + tmpdir.chdir() + + node = pe.Node( + utility.Split(inlist=list(range(4)), splits=[1, 3], **args), + name='split_squeeze') + res = node.run() + assert res.outputs.out1 == expected[0] + assert res.outputs.out2 == expected[1] + + +@pytest.mark.parametrize("args, kwargs, in_lists, expected", [ + ([3], {}, [0, [1, 2], [3, 4, 5]], [0, 1, 2, 3, 4, 5]), + ([0], {}, None, None), + ([], {}, [], []), + ([], {}, [0, [1, 2], [3, 4, 5]], [0, [1, 2], [3, 4, 5]]), + ([3], { + 'axis': 'hstack' + }, [[0], [1, 2], [3, 4, 5]], [[0, 1, 3]]), + ([3], { + 'axis': 'hstack' + }, [[0, 1], [2, 3], [4, 5]], [[0, 2, 4], [1, 3, 5]]), + ([3], { + 'axis': 'hstack' + }, [[0, 1], [2, 3], [4, 5]], [[0, 2, 4], [1, 3, 5]]), +]) +def test_merge(tmpdir, args, kwargs, in_lists, expected): + tmpdir.chdir() + + node = pe.Node(utility.Merge(*args, **kwargs), name='merge') + + numinputs = args[0] if args else 0 + if numinputs >= 1: + for i in range(1, numinputs + 1): + setattr(node.inputs, 'in{:d}'.format(i), in_lists[i - 1]) + + res = node.run() + if numinputs < 1: + assert not isdefined(res.outputs.out) + else: + assert res.outputs.out == expected diff --git a/nipype/interfaces/utility/tests/test_csv.py b/nipype/interfaces/utility/tests/test_csv.py new file mode 100644 index 0000000000..a5c678153e --- /dev/null +++ b/nipype/interfaces/utility/tests/test_csv.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals + +from nipype.interfaces import utility + + +def test_csvReader(tmpdir): + header = "files,labels,erosion\n" + lines = ["foo,hello,300.1\n", "bar,world,5\n", "baz,goodbye,0.3\n"] + for x in range(2): + name = tmpdir.join("testfile.csv").strpath + with open(name, 'w') as fid: + reader = utility.CSVReader() + if x % 2 == 0: + fid.write(header) + reader.inputs.header = True + fid.writelines(lines) + fid.flush() + reader.inputs.in_file = name + out = reader.run() + if x % 2 == 0: + assert out.outputs.files == ['foo', 'bar', 'baz'] + assert out.outputs.labels == ['hello', 'world', 'goodbye'] + assert out.outputs.erosion == ['300.1', '5', '0.3'] + else: + assert out.outputs.column_0 == ['foo', 'bar', 'baz'] + assert out.outputs.column_1 == ['hello', 'world', 'goodbye'] + assert out.outputs.column_2 == ['300.1', '5', '0.3'] diff --git a/nipype/interfaces/utility/tests/test_wrappers.py b/nipype/interfaces/utility/tests/test_wrappers.py new file mode 100644 index 0000000000..392ae094b0 --- /dev/null +++ b/nipype/interfaces/utility/tests/test_wrappers.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals +import os +import pytest + +from nipype.interfaces import utility +import nipype.pipeline.engine as pe + +concat_sort = """\ +def concat_sort(in_arrays): + import numpy as np + all_vals = np.concatenate([arr.flatten() for arr in in_arrays]) + return np.sort(all_vals) +""" + + +def test_function(tmpdir): + tmpdir.chdir() + + def gen_random_array(size): + import numpy as np + return np.random.rand(size, size) + + f1 = pe.MapNode( + utility.Function( + input_names=['size'], + output_names=['random_array'], + function=gen_random_array), + name='random_array', + iterfield=['size']) + f1.inputs.size = [2, 3, 5] + + wf = pe.Workflow(name="test_workflow") + + def increment_array(in_array): + return in_array + 1 + + f2 = pe.MapNode( + utility.Function(function=increment_array), + name='increment_array', + iterfield=['in_array']) + + wf.connect(f1, 'random_array', f2, 'in_array') + + f3 = pe.Node(utility.Function(function=concat_sort), name="concat_sort") + + wf.connect(f2, 'out', f3, 'in_arrays') + wf.run() + + +def make_random_array(size): + return np.random.randn(size, size) + + +def should_fail(tmp): + tmp.chdir() + + node = pe.Node( + utility.Function( + input_names=["size"], + output_names=["random_array"], + function=make_random_array), + name="should_fail") + node.inputs.size = 10 + node.run() + + +def test_should_fail(tmpdir): + with pytest.raises(NameError): + should_fail(tmpdir) + + +def test_function_with_imports(tmpdir): + tmpdir.chdir() + + node = pe.Node( + utility.Function( + input_names=["size"], + output_names=["random_array"], + function=make_random_array, + imports=["import numpy as np"]), + name="should_not_fail") + print(node.inputs.function_str) + node.inputs.size = 10 + node.run() + + +def test_aux_connect_function(tmpdir): + """ This tests excution nodes with multiple inputs and auxiliary + function inside the Workflow connect function. + """ + tmpdir.chdir() + + wf = pe.Workflow(name="test_workflow") + + def _gen_tuple(size): + return [ + 1, + ] * size + + def _sum_and_sub_mul(a, b, c): + return (a + b) * c, (a - b) * c + + def _inc(x): + return x + 1 + + params = pe.Node( + utility.IdentityInterface(fields=['size', 'num']), name='params') + params.inputs.num = 42 + params.inputs.size = 1 + + gen_tuple = pe.Node( + utility.Function( + input_names=['size'], output_names=['tuple'], function=_gen_tuple), + name='gen_tuple') + + ssm = pe.Node( + utility.Function( + input_names=['a', 'b', 'c'], + output_names=['sum', 'sub'], + function=_sum_and_sub_mul), + name='sum_and_sub_mul') + + split = pe.Node(utility.Split(splits=[1, 1], squeeze=True), name='split') + + wf.connect([ + (params, gen_tuple, [(("size", _inc), "size")]), + (params, ssm, [(("num", _inc), "c")]), + (gen_tuple, split, [("tuple", "inlist")]), + (split, ssm, [ + (("out1", _inc), "a"), + ("out2", "b"), + ]), + ]) + + wf.run() diff --git a/nipype/interfaces/utility/wrappers.py b/nipype/interfaces/utility/wrappers.py new file mode 100644 index 0000000000..80a6f89738 --- /dev/null +++ b/nipype/interfaces/utility/wrappers.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +# changing to temporary directories + >>> tmp = getfixture('tmpdir') + >>> old = tmp.chdir() +""" + +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from future import standard_library +standard_library.install_aliases() + +from builtins import str, bytes + +from ... import logging +from ..base import (traits, DynamicTraitedSpec, Undefined, isdefined, + BaseInterfaceInputSpec) +from ..io import IOBase, add_traits +from ...utils.filemanip import ensure_list +from ...utils.functions import getsource, create_function_from_source + +iflogger = logging.getLogger('nipype.interface') + + +class FunctionInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): + function_str = traits.Str(mandatory=True, desc='code for function') + + +class Function(IOBase): + """Runs arbitrary function as an interface + + Examples + -------- + + >>> func = 'def func(arg1, arg2=5): return arg1 + arg2' + >>> fi = Function(input_names=['arg1', 'arg2'], output_names=['out']) + >>> fi.inputs.function_str = func + >>> res = fi.run(arg1=1) + >>> res.outputs.out + 6 + + """ + + input_spec = FunctionInputSpec + output_spec = DynamicTraitedSpec + + def __init__(self, + input_names=None, + output_names='out', + function=None, + imports=None, + **inputs): + """ + + Parameters + ---------- + + input_names: single str or list or None + names corresponding to function inputs + if ``None``, derive input names from function argument names + output_names: single str or list + names corresponding to function outputs (default: 'out'). + if list of length > 1, has to match the number of outputs + function : callable + callable python object. must be able to execute in an + isolated namespace (possibly in concert with the ``imports`` + parameter) + imports : list of strings + list of import statements that allow the function to execute + in an otherwise empty namespace + """ + + super(Function, self).__init__(**inputs) + if function: + if hasattr(function, '__call__'): + try: + self.inputs.function_str = getsource(function) + except IOError: + raise Exception('Interface Function does not accept ' + 'function objects defined interactively ' + 'in a python session') + else: + if input_names is None: + fninfo = function.__code__ + elif isinstance(function, (str, bytes)): + self.inputs.function_str = function + if input_names is None: + fninfo = create_function_from_source(function, + imports).__code__ + else: + raise Exception('Unknown type of function') + if input_names is None: + input_names = fninfo.co_varnames[:fninfo.co_argcount] + self.inputs.on_trait_change(self._set_function_string, 'function_str') + self._input_names = ensure_list(input_names) + self._output_names = ensure_list(output_names) + add_traits(self.inputs, [name for name in self._input_names]) + self.imports = imports + self._out = {} + for name in self._output_names: + self._out[name] = None + + def _set_function_string(self, obj, name, old, new): + if name == 'function_str': + if hasattr(new, '__call__'): + function_source = getsource(new) + fninfo = new.__code__ + elif isinstance(new, (str, bytes)): + function_source = new + fninfo = create_function_from_source(new, + self.imports).__code__ + self.inputs.trait_set( + trait_change_notify=False, **{ + '%s' % name: function_source + }) + # Update input traits + input_names = fninfo.co_varnames[:fninfo.co_argcount] + new_names = set(input_names) - set(self._input_names) + add_traits(self.inputs, list(new_names)) + self._input_names.extend(new_names) + + def _add_output_traits(self, base): + undefined_traits = {} + for key in self._output_names: + base.add_trait(key, traits.Any) + undefined_traits[key] = Undefined + base.trait_set(trait_change_notify=False, **undefined_traits) + return base + + def _run_interface(self, runtime): + # Create function handle + function_handle = create_function_from_source(self.inputs.function_str, + self.imports) + # Get function args + args = {} + for name in self._input_names: + value = getattr(self.inputs, name) + if isdefined(value): + args[name] = value + + out = function_handle(**args) + if len(self._output_names) == 1: + self._out[self._output_names[0]] = out + else: + if isinstance(out, tuple) and \ + (len(out) != len(self._output_names)): + raise RuntimeError('Mismatch in number of expected outputs') + + else: + for idx, name in enumerate(self._output_names): + self._out[name] = out[idx] + + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + for key in self._output_names: + outputs[key] = self._out[key] + return outputs diff --git a/nipype/interfaces/vista/__init__.py b/nipype/interfaces/vista/__init__.py new file mode 100644 index 0000000000..d0372042aa --- /dev/null +++ b/nipype/interfaces/vista/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from .vista import (Vnifti2Image, VtoMat) diff --git a/nipype/interfaces/vista/tests/__init__.py b/nipype/interfaces/vista/tests/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/interfaces/vista/tests/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py new file mode 100644 index 0000000000..785e87e8b1 --- /dev/null +++ b/nipype/interfaces/vista/tests/test_auto_Vnifti2Image.py @@ -0,0 +1,42 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..vista import Vnifti2Image + + +def test_Vnifti2Image_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + attributes=dict( + argstr='-attr %s', + position=2, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + position=1, + ), + out_file=dict( + argstr='-out %s', + hash_files=False, + keep_extension=False, + name_source=['in_file'], + name_template='%s.v', + position=-1, + ), + ) + inputs = Vnifti2Image.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_Vnifti2Image_outputs(): + output_map = dict(out_file=dict(), ) + outputs = Vnifti2Image.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/vista/tests/test_auto_VtoMat.py b/nipype/interfaces/vista/tests/test_auto_VtoMat.py new file mode 100644 index 0000000000..ee16266402 --- /dev/null +++ b/nipype/interfaces/vista/tests/test_auto_VtoMat.py @@ -0,0 +1,38 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..vista import VtoMat + + +def test_VtoMat_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='-in %s', + mandatory=True, + position=1, + ), + out_file=dict( + argstr='-out %s', + hash_files=False, + keep_extension=False, + name_source=['in_file'], + name_template='%s.mat', + position=-1, + ), + ) + inputs = VtoMat.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_VtoMat_outputs(): + output_map = dict(out_file=dict(), ) + outputs = VtoMat.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/vista/vista.py b/nipype/interfaces/vista/vista.py new file mode 100644 index 0000000000..5000036d02 --- /dev/null +++ b/nipype/interfaces/vista/vista.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from ..base import CommandLineInputSpec, CommandLine, TraitedSpec, File + + +class Vnifti2ImageInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='-in %s', + mandatory=True, + position=1, + desc='in file') + attributes = File( + exists=True, argstr='-attr %s', position=2, desc='attribute file') + out_file = File( + name_template="%s.v", + keep_extension=False, + argstr='-out %s', + hash_files=False, + position=-1, + desc='output data file', + name_source=["in_file"]) + + +class Vnifti2ImageOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='Output vista file') + + +class Vnifti2Image(CommandLine): + """ + Convert a nifti file into a vista file. + + Example + ------- + + >>> vimage = Vnifti2Image() + >>> vimage.inputs.in_file = 'image.nii' + >>> vimage.cmdline + 'vnifti2image -in image.nii -out image.v' + >>> vimage.run() # doctest: +SKIP + """ + + _cmd = 'vnifti2image' + input_spec = Vnifti2ImageInputSpec + output_spec = Vnifti2ImageOutputSpec + + +class VtoMatInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + argstr='-in %s', + mandatory=True, + position=1, + desc='in file') + out_file = File( + name_template="%s.mat", + keep_extension=False, + argstr='-out %s', + hash_files=False, + position=-1, + desc='output mat file', + name_source=["in_file"]) + + +class VtoMatOutputSpec(TraitedSpec): + out_file = File(exists=True, desc='Output mat file') + + +class VtoMat(CommandLine): + """ + Convert a nifti file into a vista file. + + Example + ------- + + >>> vimage = VtoMat() + >>> vimage.inputs.in_file = 'image.v' + >>> vimage.cmdline + 'vtomat -in image.v -out image.mat' + >>> vimage.run() # doctest: +SKIP + """ + + _cmd = 'vtomat' + input_spec = VtoMatInputSpec + output_spec = VtoMatOutputSpec diff --git a/nipype/interfaces/vtkbase.py b/nipype/interfaces/vtkbase.py new file mode 100644 index 0000000000..1ec66ea614 --- /dev/null +++ b/nipype/interfaces/vtkbase.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +vtkbase provides some helpers to use VTK through the tvtk package (mayavi) + +Code using tvtk should import it through this module +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +from .. import logging + +iflogger = logging.getLogger('nipype.interface') + +# Check that VTK can be imported and get version +_vtk_version = None +try: + import vtk + _vtk_version = (vtk.vtkVersion.GetVTKMajorVersion(), + vtk.vtkVersion.GetVTKMinorVersion()) +except ImportError: + iflogger.warning('VTK was not found') + +# Ensure that tvtk is loaded with the appropriate ETS_TOOLKIT env var +old_ets = os.getenv('ETS_TOOLKIT') +os.environ['ETS_TOOLKIT'] = 'null' +_have_tvtk = False +try: + from tvtk.api import tvtk + _have_tvtk = True +except ImportError: + iflogger.warning('tvtk wasn\'t found') + tvtk = None +finally: + if old_ets is not None: + os.environ['ETS_TOOLKIT'] = old_ets + else: + del os.environ['ETS_TOOLKIT'] + + +def vtk_version(): + """ Get VTK version """ + global _vtk_version + return _vtk_version + + +def no_vtk(): + """ Checks if VTK is installed and the python wrapper is functional """ + global _vtk_version + return _vtk_version is None + + +def no_tvtk(): + """ Checks if tvtk was found """ + global _have_tvtk + return not _have_tvtk + + +def vtk_old(): + """ Checks if VTK uses the old-style pipeline (VTK<6.0) """ + global _vtk_version + if _vtk_version is None: + raise RuntimeException('VTK is not correctly installed.') + return _vtk_version[0] < 6 + + +def configure_input_data(obj, data): + """ + Configure the input data for vtk pipeline object obj. + Copied from latest version of mayavi + """ + if vtk_old(): + obj.input = data + else: + obj.set_input_data(data) + + +def vtk_output(obj): + """ Configure the input data for vtk pipeline object obj.""" + if vtk_old(): + return obj.output + return obj.get_output() diff --git a/nipype/interfaces/workbench/__init__.py b/nipype/interfaces/workbench/__init__.py new file mode 100644 index 0000000000..5ced0d2fb3 --- /dev/null +++ b/nipype/interfaces/workbench/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from .metric import MetricResample diff --git a/nipype/interfaces/workbench/base.py b/nipype/interfaces/workbench/base.py new file mode 100644 index 0000000000..4adc9dc69b --- /dev/null +++ b/nipype/interfaces/workbench/base.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +The workbench module provides classes for interfacing with `connectome workbench +`_ tools. + +`Connectome Workbench is an open source, freely available visualization and + discovery tool used to map neuroimaging data, especially data generated by the + Human Connectome Project. +""" + +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os +import re + +from ... import logging +from ...utils.filemanip import split_filename +from ..base import CommandLine, PackageInfo + +iflogger = logging.getLogger('nipype.interface') + + +class Info(PackageInfo): + """ + Handle `wb_command` version information. + """ + + version_cmd = 'wb_command -version' + + @staticmethod + def parse_version(raw_info): + m = re.search(r'\nVersion (\S+)', raw_info) + return m.groups()[0] if m else None + + +class WBCommand(CommandLine): + """Base support for workbench commands.""" + + @property + def version(self): + return Info.version() + + def _gen_filename(self, name, outdir=None, suffix='', ext=None): + """Generate a filename based on the given parameters. + The filename will take the form: . + Parameters + ---------- + name : str + Filename to base the new filename on. + suffix : str + Suffix to add to the `basename`. (defaults is '' ) + ext : str + Extension to use for the new filename. + Returns + ------- + fname : str + New filename based on given parameters. + """ + if not name: + raise ValueError("Cannot generate filename - filename not set") + + _, fname, fext = split_filename(name) + if ext is None: + ext = fext + if outdir is None: + outdir = '.' + return os.path.join(outdir, fname + suffix + ext) diff --git a/nipype/interfaces/workbench/metric.py b/nipype/interfaces/workbench/metric.py new file mode 100644 index 0000000000..e5bbb60739 --- /dev/null +++ b/nipype/interfaces/workbench/metric.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""This module provides interfaces for workbench surface commands""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os + +from ..base import (TraitedSpec, File, traits, CommandLineInputSpec) +from .base import WBCommand +from ... import logging + +iflogger = logging.getLogger('nipype.interface') + + +class MetricResampleInputSpec(CommandLineInputSpec): + in_file = File( + exists=True, + mandatory=True, + argstr="%s", + position=0, + desc="The metric file to resample") + current_sphere = File( + exists=True, + mandatory=True, + argstr="%s", + position=1, + desc="A sphere surface with the mesh that the metric is currently on") + new_sphere = File( + exists=True, + mandatory=True, + argstr="%s", + position=2, + desc="A sphere surface that is in register with and" + " has the desired output mesh") + method = traits.Enum( + "ADAP_BARY_AREA", + "BARYCENTRIC", + argstr="%s", + mandatory=True, + position=3, + desc="The method name - ADAP_BARY_AREA method is recommended for" + " ordinary metric data, because it should use all data while" + " downsampling, unlike BARYCENTRIC. If ADAP_BARY_AREA is used," + " exactly one of area_surfs or area_metrics must be specified") + out_file = File( + name_source=["new_sphere"], + name_template="%s.out", + keep_extension=True, + argstr="%s", + position=4, + desc="The output metric") + area_surfs = traits.Bool( + position=5, + argstr="-area-surfs", + xor=["area_metrics"], + desc="Specify surfaces to do vertex area correction based on") + area_metrics = traits.Bool( + position=5, + argstr="-area-metrics", + xor=["area_surfs"], + desc="Specify vertex area metrics to do area correction based on") + current_area = File( + exists=True, + position=6, + argstr="%s", + desc="A relevant anatomical surface with mesh OR" + " a metric file with vertex areas for mesh") + new_area = File( + exists=True, + position=7, + argstr="%s", + desc="A relevant anatomical surface with mesh OR" + " a metric file with vertex areas for mesh") + roi_metric = File( + exists=True, + position=8, + argstr="-current-roi %s", + desc="Input roi on the current mesh used to exclude non-data vertices") + valid_roi_out = traits.Bool( + position=9, + argstr="-valid-roi-out", + desc="Output the ROI of vertices that got data from valid source vertices") + largest = traits.Bool( + position=10, + argstr="-largest", + desc="Use only the value of the vertex with the largest weight") + + +class MetricResampleOutputSpec(TraitedSpec): + out_file = File(exists=True, desc="the output metric") + roi_file = File(desc="ROI of vertices that got data from valid source vertices") + + +class MetricResample(WBCommand): + """ + Resample a metric file to a different mesh + + Resamples a metric file, given two spherical surfaces that are in + register. If ``ADAP_BARY_AREA`` is used, exactly one of -area-surfs or + ``-area-metrics`` must be specified. + + The ``ADAP_BARY_AREA`` method is recommended for ordinary metric data, + because it should use all data while downsampling, unlike ``BARYCENTRIC``. + The recommended areas option for most data is individual midthicknesses + for individual data, and averaged vertex area metrics from individual + midthicknesses for group average data. + + The ``-current-roi`` option only masks the input, the output may be slightly + dilated in comparison, consider using ``-metric-mask`` on the output when + using ``-current-roi``. + + The ``-largest option`` results in nearest vertex behavior when used with + ``BARYCENTRIC``. When resampling a binary metric, consider thresholding at + 0.5 after resampling rather than using ``-largest``. + + >>> from nipype.interfaces.workbench import MetricResample + >>> metres = MetricResample() + >>> metres.inputs.in_file = 'sub-01_task-rest_bold_space-fsaverage5.L.func.gii' + >>> metres.inputs.method = 'ADAP_BARY_AREA' + >>> metres.inputs.current_sphere = 'fsaverage5_std_sphere.L.10k_fsavg_L.surf.gii' + >>> metres.inputs.new_sphere = 'fs_LR-deformed_to-fsaverage.L.sphere.32k_fs_LR.surf.gii' + >>> metres.inputs.area_metrics = True + >>> metres.inputs.current_area = 'fsaverage5.L.midthickness_va_avg.10k_fsavg_L.shape.gii' + >>> metres.inputs.new_area = 'fs_LR.L.midthickness_va_avg.32k_fs_LR.shape.gii' + >>> metres.cmdline + 'wb_command -metric-resample sub-01_task-rest_bold_space-fsaverage5.L.func.gii \ + fsaverage5_std_sphere.L.10k_fsavg_L.surf.gii \ + fs_LR-deformed_to-fsaverage.L.sphere.32k_fs_LR.surf.gii \ + ADAP_BARY_AREA fs_LR-deformed_to-fsaverage.L.sphere.32k_fs_LR.surf.out \ + -area-metrics fsaverage5.L.midthickness_va_avg.10k_fsavg_L.shape.gii \ + fs_LR.L.midthickness_va_avg.32k_fs_LR.shape.gii' + """ + input_spec = MetricResampleInputSpec + output_spec = MetricResampleOutputSpec + _cmd = 'wb_command -metric-resample' + + def _format_arg(self, opt, spec, val): + if opt in ['current_area', 'new_area']: + if not self.inputs.area_surfs and not self.inputs.area_metrics: + raise ValueError("{} was set but neither area_surfs or" + " area_metrics were set".format(opt)) + if opt == "method": + if (val == "ADAP_BARY_AREA" and + not self.inputs.area_surfs and + not self.inputs.area_metrics): + raise ValueError("Exactly one of area_surfs or area_metrics" + " must be specified") + if opt == "valid_roi_out" and val: + # generate a filename and add it to argstr + roi_out = self._gen_filename(self.inputs.in_file, suffix='_roi') + iflogger.info("Setting roi output file as", roi_out) + spec.argstr += " " + roi_out + return super(MetricResample, self)._format_arg(opt, spec, val) + + def _list_outputs(self): + outputs = super(MetricResample, self)._list_outputs() + if self.inputs.valid_roi_out: + roi_file = self._gen_filename(self.inputs.in_file, suffix='_roi') + outputs['roi_file'] = os.path.abspath(roi_file) + return outputs diff --git a/nipype/interfaces/workbench/tests/__init__.py b/nipype/interfaces/workbench/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/interfaces/workbench/tests/test_auto_MetricResample.py b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py new file mode 100644 index 0000000000..46a66aa728 --- /dev/null +++ b/nipype/interfaces/workbench/tests/test_auto_MetricResample.py @@ -0,0 +1,85 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..metric import MetricResample + + +def test_MetricResample_inputs(): + input_map = dict( + area_metrics=dict( + argstr='-area-metrics', + position=5, + xor=['area_surfs'], + ), + area_surfs=dict( + argstr='-area-surfs', + position=5, + xor=['area_metrics'], + ), + args=dict(argstr='%s', ), + current_area=dict( + argstr='%s', + position=6, + ), + current_sphere=dict( + argstr='%s', + mandatory=True, + position=1, + ), + environ=dict( + nohash=True, + usedefault=True, + ), + in_file=dict( + argstr='%s', + mandatory=True, + position=0, + ), + largest=dict( + argstr='-largest', + position=10, + ), + method=dict( + argstr='%s', + mandatory=True, + position=3, + ), + new_area=dict( + argstr='%s', + position=7, + ), + new_sphere=dict( + argstr='%s', + mandatory=True, + position=2, + ), + out_file=dict( + argstr='%s', + keep_extension=True, + name_source=['new_sphere'], + name_template='%s.out', + position=4, + ), + roi_metric=dict( + argstr='-current-roi %s', + position=8, + ), + valid_roi_out=dict( + argstr='-valid-roi-out', + position=9, + ), + ) + inputs = MetricResample.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value +def test_MetricResample_outputs(): + output_map = dict( + out_file=dict(), + roi_file=dict(), + ) + outputs = MetricResample.output_spec() + + for key, metadata in list(output_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(outputs.traits()[key], metakey) == value diff --git a/nipype/interfaces/workbench/tests/test_auto_WBCommand.py b/nipype/interfaces/workbench/tests/test_auto_WBCommand.py new file mode 100644 index 0000000000..b496a270dd --- /dev/null +++ b/nipype/interfaces/workbench/tests/test_auto_WBCommand.py @@ -0,0 +1,18 @@ +# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT +from __future__ import unicode_literals +from ..base import WBCommand + + +def test_WBCommand_inputs(): + input_map = dict( + args=dict(argstr='%s', ), + environ=dict( + nohash=True, + usedefault=True, + ), + ) + inputs = WBCommand.input_spec() + + for key, metadata in list(input_map.items()): + for metakey, value in list(metadata.items()): + assert getattr(inputs.traits()[key], metakey) == value diff --git a/nipype/pipeline/__init__.py b/nipype/pipeline/__init__.py new file mode 100644 index 0000000000..badfda5ba0 --- /dev/null +++ b/nipype/pipeline/__init__.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Package contains modules for generating pipelines using interfaces + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +__docformat__ = 'restructuredtext' +from .engine import Node, MapNode, JoinNode, Workflow diff --git a/nipype/pipeline/engine/__init__.py b/nipype/pipeline/engine/__init__.py new file mode 100644 index 0000000000..e950086307 --- /dev/null +++ b/nipype/pipeline/engine/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Package contains modules for generating pipelines using interfaces + +""" + +from __future__ import absolute_import +__docformat__ = 'restructuredtext' +from .workflows import Workflow +from .nodes import Node, MapNode, JoinNode +from .utils import generate_expanded_graph diff --git a/nipype/pipeline/engine/base.py b/nipype/pipeline/engine/base.py new file mode 100644 index 0000000000..9d0bc3c699 --- /dev/null +++ b/nipype/pipeline/engine/base.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Defines functionality for pipelined execution of interfaces + +The `EngineBase` class implements the more general view of a task. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import object + +from copy import deepcopy +import re +import numpy as np + +from ... import config +from ...interfaces.base import DynamicTraitedSpec +from ...utils.filemanip import loadpkl, savepkl + + +class EngineBase(object): + """Defines common attributes and functions for workflows and nodes.""" + + def __init__(self, name=None, base_dir=None): + """ Initialize base parameters of a workflow or node + + Parameters + ---------- + name : string (mandatory) + Name of this node. Name must be alphanumeric and not contain any + special characters (e.g., '.', '@'). + base_dir : string + base output directory (will be hashed before creations) + default=None, which results in the use of mkdtemp + + """ + self._hierarchy = None + self._name = None + + self.base_dir = base_dir + self.config = deepcopy(config._sections) + self.name = name + + @property + def name(self): + return self._name + + @name.setter + def name(self, name): + if not name or not re.match(r'^[\w-]+$', name): + raise ValueError('[Workflow|Node] name "%s" is not valid.' % name) + self._name = name + + @property + def fullname(self): + if self._hierarchy: + return '%s.%s' % (self._hierarchy, self.name) + return self.name + + @property + def inputs(self): + raise NotImplementedError + + @property + def outputs(self): + raise NotImplementedError + + def clone(self, name): + """Clone an EngineBase object + + Parameters + ---------- + + name : string (mandatory) + A clone of node or workflow must have a new name + """ + if name == self.name: + raise ValueError('Cloning requires a new name, "%s" is ' + 'in use.' % name) + clone = deepcopy(self) + clone.name = name + if hasattr(clone, '_id'): + clone._id = name + return clone + + def _check_outputs(self, parameter): + return hasattr(self.outputs, parameter) + + def _check_inputs(self, parameter): + if isinstance(self.inputs, DynamicTraitedSpec): + return True + return hasattr(self.inputs, parameter) + + def __str__(self): + return self.fullname + + def save(self, filename=None): + if filename is None: + filename = 'temp.pklz' + savepkl(filename, self) + + def load(self, filename): + return loadpkl(filename) diff --git a/nipype/pipeline/engine/nodes.py b/nipype/pipeline/engine/nodes.py new file mode 100644 index 0000000000..5ac9e72fae --- /dev/null +++ b/nipype/pipeline/engine/nodes.py @@ -0,0 +1,1272 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Defines functionality for pipelined execution of interfaces + +The `Node` class provides core functionality for batch processing. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, str, bytes, open + +from collections import OrderedDict + +import os +import os.path as op +import shutil +import socket +from copy import deepcopy +from glob import glob +from logging import INFO + +from tempfile import mkdtemp +from future import standard_library + +from ... import config, logging +from ...utils.misc import flatten, unflatten, str2bool, dict_diff +from ...utils.filemanip import (md5, FileNotFoundError, ensure_list, + simplify_list, copyfiles, fnames_presuffix, + loadpkl, split_filename, load_json, makedirs, + emptydirs, savepkl, to_str, indirectory) + +from ...interfaces.base import (traits, InputMultiPath, CommandLine, Undefined, + DynamicTraitedSpec, Bunch, InterfaceResult, + Interface, isdefined) +from .utils import ( + _parameterization_dir, save_hashfile as _save_hashfile, load_resultfile as + _load_resultfile, save_resultfile as _save_resultfile, nodelist_runner as + _node_runner, strip_temp as _strip_temp, write_report, + clean_working_directory, merge_dict, evaluate_connect_function) +from .base import EngineBase + +standard_library.install_aliases() + +logger = logging.getLogger('nipype.workflow') + + +class Node(EngineBase): + """ + Wraps interface objects for use in pipeline + + A Node creates a sandbox-like directory for executing the underlying + interface. It will copy or link inputs into this directory to ensure that + input data are not overwritten. A hash of the input state is used to + determine if the Node inputs have changed and whether the node needs to be + re-executed. + + Examples + -------- + + >>> from nipype import Node + >>> from nipype.interfaces import spm + >>> realign = Node(spm.Realign(), 'realign') + >>> realign.inputs.in_files = 'functional.nii' + >>> realign.inputs.register_to_mean = True + >>> realign.run() # doctest: +SKIP + + """ + + def __init__(self, + interface, + name, + iterables=None, + itersource=None, + synchronize=False, + overwrite=None, + needed_outputs=None, + run_without_submitting=False, + n_procs=None, + mem_gb=0.20, + **kwargs): + """ + Parameters + ---------- + + interface : interface object + node specific interface (fsl.Bet(), spm.Coregister()) + + name : alphanumeric string + node specific name + + iterables : generator + Input field and list to iterate using the pipeline engine + for example to iterate over different frac values in fsl.Bet() + for a single field the input can be a tuple, otherwise a list + of tuples :: + + node.iterables = ('frac',[0.5,0.6,0.7]) + node.iterables = [('fwhm',[2,4]),('fieldx',[0.5,0.6,0.7])] + + If this node has an itersource, then the iterables values + is a dictionary which maps an iterable source field value + to the target iterables field values, e.g.: :: + + inputspec.iterables = ('images',['img1.nii', 'img2.nii']]) + node.itersource = ('inputspec', ['frac']) + node.iterables = ('frac', {'img1.nii': [0.5, 0.6], + 'img2.nii': [0.6, 0.7]}) + + If this node's synchronize flag is set, then an alternate + form of the iterables is a [fields, values] list, where + fields is the list of iterated fields and values is the + list of value tuples for the given fields, e.g.: :: + + node.synchronize = True + node.iterables = [('frac', 'threshold'), + [(0.5, True), + (0.6, False)]] + + itersource: tuple + The (name, fields) iterables source which specifies the name + of the predecessor iterable node and the input fields to use + from that source node. The output field values comprise the + key to the iterables parameter value mapping dictionary. + + synchronize: boolean + Flag indicating whether iterables are synchronized. + If the iterables are synchronized, then this iterable + node is expanded once per iteration over all of the + iterables values. + Otherwise, this iterable node is expanded once per + each permutation of the iterables values. + + overwrite : Boolean + Whether to overwrite contents of output directory if it already + exists. If directory exists and hash matches it + assumes that process has been executed + + needed_outputs : list of output_names + Force the node to keep only specific outputs. By default all + outputs are kept. Setting this attribute will delete any output + files and directories from the node's working directory that are + not part of the `needed_outputs`. + + run_without_submitting : boolean + Run the node without submitting to a job engine or to a + multiprocessing pool + + """ + # Make sure an interface is set, and that it is an Interface + if interface is None: + raise IOError('Interface must be provided') + if not isinstance(interface, Interface): + raise IOError('interface must be an instance of an Interface') + + super(Node, self).__init__(name, kwargs.get('base_dir')) + + self._interface = interface + self._hierarchy = None + self._got_inputs = False + self._originputs = None + self._output_dir = None + self._id = self.name # for compatibility with node expansion using iterables + + self.iterables = iterables + self.synchronize = synchronize + self.itersource = itersource + self.overwrite = overwrite + self.parameterization = [] + self.input_source = {} + self.plugin_args = {} + + self.run_without_submitting = run_without_submitting + self._mem_gb = mem_gb + self._n_procs = n_procs + + # Downstream n_procs + if hasattr(self._interface.inputs, + 'num_threads') and self._n_procs is not None: + self._interface.inputs.num_threads = self._n_procs + + # Initialize needed_outputs and hashes + self._hashvalue = None + self._hashed_inputs = None + self._needed_outputs = [] + self.needed_outputs = needed_outputs + self.config = None + + @property + def interface(self): + """Return the underlying interface object""" + return self._interface + + @property + def result(self): + """Get result from result file (do not hold it in memory)""" + return _load_resultfile(self.output_dir(), self.name)[0] + + @property + def inputs(self): + """Return the inputs of the underlying interface""" + return self._interface.inputs + + @property + def outputs(self): + """Return the output fields of the underlying interface""" + return self._interface._outputs() + + @property + def needed_outputs(self): + return self._needed_outputs + + @needed_outputs.setter + def needed_outputs(self, new_outputs): + """Needed outputs changes the hash, refresh if changed""" + new_outputs = sorted(list(set(new_outputs or []))) + if new_outputs != self._needed_outputs: + # Reset hash + self._hashvalue = None + self._hashed_inputs = None + self._needed_outputs = new_outputs + + @property + def mem_gb(self): + """Get estimated memory (GB)""" + if hasattr(self._interface, 'estimated_memory_gb'): + self._mem_gb = self._interface.estimated_memory_gb + logger.warning( + 'Setting "estimated_memory_gb" on Interfaces has been ' + 'deprecated as of nipype 1.0, please use Node.mem_gb.') + + return self._mem_gb + + @property + def n_procs(self): + """Get the estimated number of processes/threads""" + if self._n_procs is not None: + return self._n_procs + if hasattr(self._interface.inputs, 'num_threads') and isdefined( + self._interface.inputs.num_threads): + return self._interface.inputs.num_threads + return 1 + + @n_procs.setter + def n_procs(self, value): + """Set an estimated number of processes/threads""" + self._n_procs = value + + # Overwrite interface's dynamic input of num_threads + if hasattr(self._interface.inputs, 'num_threads'): + self._interface.inputs.num_threads = self._n_procs + + @property + def itername(self): + """Name for expanded iterable""" + itername = self._id + if self._hierarchy: + itername = '%s.%s' % (self._hierarchy, self._id) + return itername + + def output_dir(self): + """Return the location of the output directory for the node""" + # Output dir is cached + if self._output_dir: + return self._output_dir + + # Calculate & cache otherwise + if self.base_dir is None: + self.base_dir = mkdtemp() + outputdir = self.base_dir + if self._hierarchy: + outputdir = op.join(outputdir, *self._hierarchy.split('.')) + if self.parameterization: + params_str = ['{}'.format(p) for p in self.parameterization] + if not str2bool(self.config['execution']['parameterize_dirs']): + params_str = [_parameterization_dir(p) for p in params_str] + outputdir = op.join(outputdir, *params_str) + + self._output_dir = op.realpath(op.join(outputdir, self.name)) + return self._output_dir + + def set_input(self, parameter, val): + """Set interface input value""" + logger.debug('[Node] %s - setting input %s = %s', self.name, parameter, + to_str(val)) + setattr(self.inputs, parameter, deepcopy(val)) + + def get_output(self, parameter): + """Retrieve a particular output of the node""" + return getattr(self.result.outputs, parameter, None) + + def help(self): + """Print interface help""" + self._interface.help() + + def is_cached(self, rm_outdated=False): + """ + Check if the interface has been run previously, and whether + cached results are up-to-date. + """ + outdir = self.output_dir() + + # Update hash + hashed_inputs, hashvalue = self._get_hashval() + + # The output folder does not exist: not cached + if not op.exists(outdir): + logger.debug('[Node] Directory not found "%s".', outdir) + return False, False + + hashfile = op.join(outdir, '_0x%s.json' % hashvalue) + cached = op.exists(hashfile) + + # Check if updated + globhashes = glob(op.join(outdir, '_0x*.json')) + unfinished = [ + path for path in globhashes + if path.endswith('_unfinished.json') + ] + hashfiles = list(set(globhashes) - set(unfinished)) + logger.debug('[Node] Hashes: %s, %s, %s, %s', + hashed_inputs, hashvalue, hashfile, hashfiles) + + # No previous hashfiles found, we're all set. + if cached and len(hashfiles) == 1: + assert(hashfile == hashfiles[0]) + logger.debug('[Node] Up-to-date cache found for "%s".', self.fullname) + return True, True # Cached and updated + + if len(hashfiles) > 1: + if cached: + hashfiles.remove(hashfile) # Do not clean up the node, if cached + logger.warning('[Node] Found %d previous hashfiles indicating that the working ' + 'directory of node "%s" is stale, deleting old hashfiles.', + len(hashfiles), self.fullname) + for rmfile in hashfiles: + os.remove(rmfile) + + hashfiles = [hashfile] if cached else [] + + if not hashfiles: + logger.debug('[Node] No hashfiles found in "%s".', outdir) + assert(not cached) + return False, False + + # At this point only one hashfile is in the folder + # and we directly check whether it is updated + updated = hashfile == hashfiles[0] + if not updated: # Report differences depending on log verbosity + cached = True + logger.info('[Node] Outdated cache found for "%s".', self.fullname) + # If logging is more verbose than INFO (20), print diff between hashes + loglevel = logger.getEffectiveLevel() + if loglevel < INFO: # Lazy logging: only < INFO + exp_hash_file_base = split_filename(hashfiles[0])[1] + exp_hash = exp_hash_file_base[len('_0x'):] + logger.log(loglevel, "[Node] Old/new hashes = %s/%s", + exp_hash, hashvalue) + try: + prev_inputs = load_json(hashfiles[0]) + except Exception: + pass + else: + logger.log(loglevel, + dict_diff(prev_inputs, hashed_inputs, 10)) + + if rm_outdated: + os.remove(hashfiles[0]) + + assert(cached) # At this point, node is cached (may not be up-to-date) + return cached, updated + + def hash_exists(self, updatehash=False): + """ + Decorate the new `is_cached` method with hash updating + to maintain backwards compatibility. + """ + + # Get a dictionary with hashed filenames and a hashvalue + # of the dictionary itself. + cached, updated = self.is_cached(rm_outdated=True) + + outdir = self.output_dir() + hashfile = op.join(outdir, '_0x%s.json' % self._hashvalue) + + if updated: + return True, self._hashvalue, hashfile, self._hashed_inputs + + # Update only possible if it exists + if cached and updatehash: + logger.debug("[Node] Updating hash: %s", self._hashvalue) + _save_hashfile(hashfile, self._hashed_inputs) + + return cached, self._hashvalue, hashfile, self._hashed_inputs + + def run(self, updatehash=False): + """Execute the node in its directory. + + Parameters + ---------- + + updatehash: boolean + When the hash stored in the output directory as a result of a previous run + does not match that calculated for this execution, updatehash=True only + updates the hash without re-running. + """ + + if self.config is None: + self.config = {} + self.config = merge_dict(deepcopy(config._sections), self.config) + + outdir = self.output_dir() + force_run = self.overwrite or (self.overwrite is None and + self._interface.always_run) + + # Check hash, check whether run should be enforced + logger.info('[Node] Setting-up "%s" in "%s".', self.fullname, outdir) + cached, updated = self.is_cached() + + # If the node is cached, check on pklz files and finish + if not force_run and (updated or (not updated and updatehash)): + logger.debug("Only updating node hashes or skipping execution") + inputs_file = op.join(outdir, '_inputs.pklz') + if not op.exists(inputs_file): + logger.debug('Creating inputs file %s', inputs_file) + savepkl(inputs_file, self.inputs.get_traitsfree()) + + node_file = op.join(outdir, '_node.pklz') + if not op.exists(node_file): + logger.debug('Creating node file %s', node_file) + savepkl(node_file, self) + + result = self._run_interface(execute=False, + updatehash=updatehash and not updated) + logger.info('[Node] "%s" found cached%s.', self.fullname, + ' (and hash updated)' * (updatehash and not updated)) + return result + + if cached and updated and not isinstance(self, MapNode): + logger.debug('[Node] Rerunning cached, up-to-date node "%s"', self.fullname) + if not force_run and str2bool( + self.config['execution']['stop_on_first_rerun']): + raise Exception( + 'Cannot rerun when "stop_on_first_rerun" is set to True') + + # Remove any hashfile that exists at this point (re)running. + if cached: + for outdatedhash in glob(op.join(self.output_dir(), '_0x*.json')): + os.remove(outdatedhash) + + + # Hashfile while running + hashfile_unfinished = op.join( + outdir, '_0x%s_unfinished.json' % self._hashvalue) + + # Delete directory contents if this is not a MapNode or can't resume + can_resume = not (self._interface.can_resume and op.isfile(hashfile_unfinished)) + if can_resume and not isinstance(self, MapNode): + emptydirs(outdir, noexist_ok=True) + else: + logger.debug('[%sNode] Resume - hashfile=%s', + 'Map' * int(isinstance(self, MapNode)), + hashfile_unfinished) + + if isinstance(self, MapNode): + # remove old json files + for filename in glob(op.join(outdir, '_0x*.json')): + os.remove(filename) + + # Make sure outdir is created + makedirs(outdir, exist_ok=True) + + # Store runtime-hashfile, pre-execution report, the node and the inputs set. + _save_hashfile(hashfile_unfinished, self._hashed_inputs) + write_report( + self, report_type='preexec', is_mapnode=isinstance(self, MapNode)) + savepkl(op.join(outdir, '_node.pklz'), self) + savepkl(op.join(outdir, '_inputs.pklz'), self.inputs.get_traitsfree()) + + try: + result = self._run_interface(execute=True) + except Exception: + logger.warning('[Node] Error on "%s" (%s)', self.fullname, outdir) + # Tear-up after error + os.remove(hashfile_unfinished) + raise + + # Tear-up after success + shutil.move(hashfile_unfinished, + hashfile_unfinished.replace('_unfinished', '')) + write_report( + self, report_type='postexec', is_mapnode=isinstance(self, MapNode)) + logger.info('[Node] Finished "%s".', self.fullname) + return result + + def _get_hashval(self): + """Return a hash of the input state""" + self._get_inputs() + if self._hashvalue is None and self._hashed_inputs is None: + self._hashed_inputs, self._hashvalue = self.inputs.get_hashval( + hash_method=self.config['execution']['hash_method']) + rm_extra = self.config['execution']['remove_unnecessary_outputs'] + if str2bool(rm_extra) and self.needed_outputs: + hashobject = md5() + hashobject.update(self._hashvalue.encode()) + hashobject.update(str(self.needed_outputs).encode()) + self._hashvalue = hashobject.hexdigest() + self._hashed_inputs.append(('needed_outputs', self.needed_outputs)) + return self._hashed_inputs, self._hashvalue + + def _get_inputs(self): + """Retrieve inputs from pointers to results file + + This mechanism can be easily extended/replaced to retrieve data from + other data sources (e.g., XNAT, HTTP, etc.,.) + """ + if self._got_inputs: + return + + logger.debug('Setting node inputs') + for key, info in list(self.input_source.items()): + logger.debug('input: %s', key) + results_file = info[0] + logger.debug('results file: %s', results_file) + results = loadpkl(results_file) + output_value = Undefined + if isinstance(info[1], tuple): + output_name = info[1][0] + value = getattr(results.outputs, output_name) + if isdefined(value): + output_value = evaluate_connect_function( + info[1][1], info[1][2], value) + else: + output_name = info[1] + try: + output_value = results.outputs.trait_get()[output_name] + except AttributeError: + output_value = results.outputs.dictcopy()[output_name] + logger.debug('output: %s', output_name) + try: + self.set_input(key, deepcopy(output_value)) + except traits.TraitError as e: + msg = [ + 'Error setting node input:', + 'Node: %s' % self.name, + 'input: %s' % key, + 'results_file: %s' % results_file, + 'value: %s' % str(output_value) + ] + e.args = (e.args[0] + "\n" + '\n'.join(msg), ) + raise + + # Successfully set inputs + self._got_inputs = True + + def _update_hash(self): + for outdatedhash in glob(op.join(self.output_dir(), '_0x*.json')): + os.remove(outdatedhash) + _save_hashfile(self._hashvalue, self._hashed_inputs) + + def _run_interface(self, execute=True, updatehash=False): + if updatehash: + self._update_hash() + return self._load_results() + return self._run_command(execute) + + def _load_results(self): + cwd = self.output_dir() + result, aggregate, attribute_error = _load_resultfile(cwd, self.name) + # try aggregating first + if aggregate: + logger.debug('aggregating results') + if attribute_error: + old_inputs = loadpkl(op.join(cwd, '_inputs.pklz')) + self.inputs.trait_set(**old_inputs) + if not isinstance(self, MapNode): + self._copyfiles_to_wd(linksonly=True) + aggouts = self._interface.aggregate_outputs( + needed_outputs=self.needed_outputs) + runtime = Bunch( + cwd=cwd, + returncode=0, + environ=dict(os.environ), + hostname=socket.gethostname()) + result = InterfaceResult( + interface=self._interface.__class__, + runtime=runtime, + inputs=self._interface.inputs.get_traitsfree(), + outputs=aggouts) + _save_resultfile(result, cwd, self.name) + else: + logger.debug('aggregating mapnode results') + result = self._run_interface() + return result + + def _run_command(self, execute, copyfiles=True): + if not execute: + try: + result = self._load_results() + except (FileNotFoundError, AttributeError): + # if aggregation does not work, rerun the node + logger.info("[Node] Some of the outputs were not found: " + "rerunning node.") + copyfiles = False # OE: this was like this before, + execute = True # I'll keep them for safety + else: + logger.info('[Node] Cached "%s" - collecting precomputed outputs', + self.fullname) + return result + + outdir = self.output_dir() + # Run command: either execute is true or load_results failed. + result = InterfaceResult( + interface=self._interface.__class__, + runtime=Bunch( + cwd=outdir, + returncode=1, + environ=dict(os.environ), + hostname=socket.gethostname() + ), + inputs=self._interface.inputs.get_traitsfree()) + + if copyfiles: + self._originputs = deepcopy(self._interface.inputs) + self._copyfiles_to_wd(execute=execute) + + message = '[Node] Running "{}" ("{}.{}")'.format( + self.name, self._interface.__module__, + self._interface.__class__.__name__) + if issubclass(self._interface.__class__, CommandLine): + try: + with indirectory(outdir): + cmd = self._interface.cmdline + except Exception as msg: + result.runtime.stderr = '{}\n\n{}'.format( + getattr(result.runtime, 'stderr', ''), msg) + _save_resultfile(result, outdir, self.name) + raise + cmdfile = op.join(outdir, 'command.txt') + with open(cmdfile, 'wt') as fd: + print(cmd + "\n", file=fd) + message += ', a CommandLine Interface with command:\n{}'.format(cmd) + logger.info(message) + try: + result = self._interface.run(cwd=outdir) + except Exception as msg: + result.runtime.stderr = '%s\n\n%s'.format( + getattr(result.runtime, 'stderr', ''), msg) + _save_resultfile(result, outdir, self.name) + raise + + dirs2keep = None + if isinstance(self, MapNode): + dirs2keep = [op.join(outdir, 'mapflow')] + + result.outputs = clean_working_directory( + result.outputs, + outdir, + self._interface.inputs, + self.needed_outputs, + self.config, + dirs2keep=dirs2keep) + _save_resultfile(result, outdir, self.name) + + return result + + def _copyfiles_to_wd(self, execute=True, linksonly=False): + """copy files over and change the inputs""" + if not hasattr(self._interface, '_get_filecopy_info'): + # Nothing to be done + return + + logger.debug('copying files to wd [execute=%s, linksonly=%s]', execute, + linksonly) + + outdir = self.output_dir() + if execute and linksonly: + olddir = outdir + outdir = op.join(outdir, '_tempinput') + makedirs(outdir, exist_ok=True) + + for info in self._interface._get_filecopy_info(): + files = self.inputs.trait_get().get(info['key']) + if not isdefined(files) or not files: + continue + + infiles = ensure_list(files) + if execute: + if linksonly: + if not info['copy']: + newfiles = copyfiles( + infiles, [outdir], + copy=info['copy'], + create_new=True) + else: + newfiles = fnames_presuffix(infiles, newpath=outdir) + newfiles = _strip_temp(newfiles, + op.abspath(olddir).split( + op.sep)[-1]) + else: + newfiles = copyfiles( + infiles, [outdir], copy=info['copy'], create_new=True) + else: + newfiles = fnames_presuffix(infiles, newpath=outdir) + if not isinstance(files, list): + newfiles = simplify_list(newfiles) + setattr(self.inputs, info['key'], newfiles) + if execute and linksonly: + emptydirs(outdir, noexist_ok=True) + + def update(self, **opts): + """Update inputs""" + self.inputs.update(**opts) + + +class JoinNode(Node): + """Wraps interface objects that join inputs into a list. + + Examples + -------- + + >>> import nipype.pipeline.engine as pe + >>> from nipype import Node, JoinNode, Workflow + >>> from nipype.interfaces.utility import IdentityInterface + >>> from nipype.interfaces import (ants, dcm2nii, fsl) + >>> wf = Workflow(name='preprocess') + >>> inputspec = Node(IdentityInterface(fields=['image']), + ... name='inputspec') + >>> inputspec.iterables = [('image', + ... ['img1.nii', 'img2.nii', 'img3.nii'])] + >>> img2flt = Node(fsl.ImageMaths(out_data_type='float'), + ... name='img2flt') + >>> wf.connect(inputspec, 'image', img2flt, 'in_file') + >>> average = JoinNode(ants.AverageImages(), joinsource='inputspec', + ... joinfield='images', name='average') + >>> wf.connect(img2flt, 'out_file', average, 'images') + >>> realign = Node(fsl.FLIRT(), name='realign') + >>> wf.connect(img2flt, 'out_file', realign, 'in_file') + >>> wf.connect(average, 'output_average_image', realign, 'reference') + >>> strip = Node(fsl.BET(), name='strip') + >>> wf.connect(realign, 'out_file', strip, 'in_file') + + """ + + def __init__(self, + interface, + name, + joinsource, + joinfield=None, + unique=False, + **kwargs): + """ + + Parameters + ---------- + interface : interface object + node specific interface (fsl.Bet(), spm.Coregister()) + name : alphanumeric string + node specific name + joinsource : node name + name of the join predecessor iterable node + joinfield : string or list of strings + name(s) of list input fields that will be aggregated. + The default is all of the join node input fields. + unique : flag indicating whether to ignore duplicate input values + + See Node docstring for additional keyword arguments. + """ + super(JoinNode, self).__init__(interface, name, **kwargs) + + self._joinsource = None # The member should be defined + self.joinsource = joinsource # Let the setter do the job + """the join predecessor iterable node""" + + if not joinfield: + # default is the interface fields + joinfield = self._interface.inputs.copyable_trait_names() + elif isinstance(joinfield, (str, bytes)): + joinfield = [joinfield] + self.joinfield = joinfield + """the fields to join""" + + self._inputs = self._override_join_traits(self._interface.inputs, + self.joinfield) + """the override inputs""" + + self._unique = unique + """flag indicating whether to ignore duplicate input values""" + + self._next_slot_index = 0 + """the joinfield index assigned to an iterated input""" + + @property + def joinsource(self): + return self._joinsource + + @joinsource.setter + def joinsource(self, value): + """Set the joinsource property. If the given value is a Node, + then the joinsource is set to the node name. + """ + if isinstance(value, Node): + value = value.name + self._joinsource = value + + @property + def inputs(self): + """The JoinNode inputs include the join field overrides.""" + return self._inputs + + def _add_join_item_fields(self): + """Add new join item fields assigned to the next iterated + input + + This method is intended solely for workflow graph expansion. + + Examples + -------- + + >>> from nipype.interfaces.utility import IdentityInterface + >>> import nipype.pipeline.engine as pe + >>> from nipype import Node, JoinNode, Workflow + >>> inputspec = Node(IdentityInterface(fields=['image']), + ... name='inputspec'), + >>> join = JoinNode(IdentityInterface(fields=['images', 'mask']), + ... joinsource='inputspec', joinfield='images', name='join') + >>> join._add_join_item_fields() + {'images': 'imagesJ1'} + + Return the {base field: slot field} dictionary + """ + # create the new join item fields + idx = self._next_slot_index + newfields = dict([(field, self._add_join_item_field(field, idx)) + for field in self.joinfield]) + # increment the join slot index + logger.debug("Added the %s join item fields %s.", self, newfields) + self._next_slot_index += 1 + return newfields + + def _add_join_item_field(self, field, index): + """Add new join item fields qualified by the given index + + Return the new field name + """ + # the new field name + name = "%sJ%d" % (field, index + 1) + # make a copy of the join trait + trait = self._inputs.trait(field, False, True) + # add the join item trait to the override traits + self._inputs.add_trait(name, trait) + + return name + + def _override_join_traits(self, basetraits, fields): + """Convert the given join fields to accept an input that + is a list item rather than a list. Non-join fields + delegate to the interface traits. + + Return the override DynamicTraitedSpec + """ + dyntraits = DynamicTraitedSpec() + if fields is None: + fields = basetraits.copyable_trait_names() + else: + # validate the fields + for field in fields: + if not basetraits.trait(field): + raise ValueError("The JoinNode %s does not have a field" + " named %s" % (self.name, field)) + for name, trait in list(basetraits.items()): + # if a join field has a single inner trait, then the item + # trait is that inner trait. Otherwise, the item trait is + # a new Any trait. + if name in fields and len(trait.inner_traits) == 1: + item_trait = trait.inner_traits[0] + dyntraits.add_trait(name, item_trait) + setattr(dyntraits, name, Undefined) + logger.debug( + "Converted the join node %s field %s trait type from %s to %s", + self, name, trait.trait_type.info(), item_trait.info()) + else: + dyntraits.add_trait(name, traits.Any) + setattr(dyntraits, name, Undefined) + return dyntraits + + def _run_command(self, execute, copyfiles=True): + """Collates the join inputs prior to delegating to the superclass.""" + self._collate_join_field_inputs() + return super(JoinNode, self)._run_command(execute, copyfiles) + + def _collate_join_field_inputs(self): + """ + Collects each override join item field into the interface join + field input.""" + for field in self.inputs.copyable_trait_names(): + if field in self.joinfield: + # collate the join field + val = self._collate_input_value(field) + try: + setattr(self._interface.inputs, field, val) + except Exception as e: + raise ValueError(">>JN %s %s %s %s %s: %s" % + (self, field, val, + self.inputs.copyable_trait_names(), + self.joinfield, e)) + elif hasattr(self._interface.inputs, field): + # copy the non-join field + val = getattr(self._inputs, field) + if isdefined(val): + setattr(self._interface.inputs, field, val) + logger.debug("Collated %d inputs into the %s node join fields", + self._next_slot_index, self) + + def _collate_input_value(self, field): + """ + Collects the join item field values into a list or set value for + the given field, as follows: + + - If the field trait is a Set, then the values are collected into + a set. + + - Otherwise, the values are collected into a list which preserves + the iterables order. If the ``unique`` flag is set, then duplicate + values are removed but the iterables order is preserved. + """ + val = [ + self._slot_value(field, idx) + for idx in range(self._next_slot_index) + ] + basetrait = self._interface.inputs.trait(field) + if isinstance(basetrait.trait_type, traits.Set): + return set(val) + + if self._unique: + return list(OrderedDict.fromkeys(val)) + + return val + + def _slot_value(self, field, index): + slot_field = "%sJ%d" % (field, index + 1) + try: + return getattr(self._inputs, slot_field) + except AttributeError as e: + raise AttributeError( + "The join node %s does not have a slot field %s" + " to hold the %s value at index %d: %s" % (self, slot_field, + field, index, e)) + + +class MapNode(Node): + """Wraps interface objects that need to be iterated on a list of inputs. + + Examples + -------- + + >>> from nipype import MapNode + >>> from nipype.interfaces import fsl + >>> realign = MapNode(fsl.MCFLIRT(), 'in_file', 'realign') + >>> realign.inputs.in_file = ['functional.nii', + ... 'functional2.nii', + ... 'functional3.nii'] + >>> realign.run() # doctest: +SKIP + + """ + + def __init__(self, + interface, + iterfield, + name, + serial=False, + nested=False, + **kwargs): + """ + + Parameters + ---------- + interface : interface object + node specific interface (fsl.Bet(), spm.Coregister()) + iterfield : string or list of strings + name(s) of input fields that will receive a list of whatever kind + of input they take. the node will be run separately for each + value in these lists. for more than one input, the values are + paired (i.e. it does not compute a combinatorial product). + name : alphanumeric string + node specific name + serial : boolean + flag to enforce executing the jobs of the mapnode in a serial + manner rather than parallel + nested : boolean + support for nested lists. If set, the input list will be flattened + before running and the nested list structure of the outputs will + be resored. + + See Node docstring for additional keyword arguments. + """ + + super(MapNode, self).__init__(interface, name, **kwargs) + if isinstance(iterfield, (str, bytes)): + iterfield = [iterfield] + self.iterfield = iterfield + self.nested = nested + self._inputs = self._create_dynamic_traits( + self._interface.inputs, fields=self.iterfield) + self._inputs.on_trait_change(self._set_mapnode_input) + self._got_inputs = False + self._serial = serial + + def _create_dynamic_traits(self, basetraits, fields=None, nitems=None): + """Convert specific fields of a trait to accept multiple inputs + """ + output = DynamicTraitedSpec() + if fields is None: + fields = basetraits.copyable_trait_names() + for name, spec in list(basetraits.items()): + if name in fields and ((nitems is None) or (nitems > 1)): + logger.debug('adding multipath trait: %s', name) + if self.nested: + output.add_trait(name, InputMultiPath(traits.Any())) + else: + output.add_trait(name, InputMultiPath(spec.trait_type)) + else: + output.add_trait(name, traits.Trait(spec)) + setattr(output, name, Undefined) + value = getattr(basetraits, name) + if isdefined(value): + setattr(output, name, value) + value = getattr(output, name) + return output + + def set_input(self, parameter, val): + """ + Set interface input value or nodewrapper attribute + Priority goes to interface. + """ + logger.debug('setting nodelevel(%s) input %s = %s', to_str(self), + parameter, to_str(val)) + self._set_mapnode_input(parameter, deepcopy(val)) + + def _set_mapnode_input(self, name, newvalue): + logger.debug('setting mapnode(%s) input: %s -> %s', to_str(self), name, + to_str(newvalue)) + if name in self.iterfield: + setattr(self._inputs, name, newvalue) + else: + setattr(self._interface.inputs, name, newvalue) + + def _get_hashval(self): + """Compute hash including iterfield lists.""" + self._get_inputs() + + if self._hashvalue is not None and self._hashed_inputs is not None: + return self._hashed_inputs, self._hashvalue + + self._check_iterfield() + hashinputs = deepcopy(self._interface.inputs) + for name in self.iterfield: + hashinputs.remove_trait(name) + hashinputs.add_trait( + name, + InputMultiPath( + self._interface.inputs.traits()[name].trait_type)) + logger.debug('setting hashinput %s-> %s', name, + getattr(self._inputs, name)) + if self.nested: + setattr(hashinputs, name, flatten(getattr(self._inputs, name))) + else: + setattr(hashinputs, name, getattr(self._inputs, name)) + hashed_inputs, hashvalue = hashinputs.get_hashval( + hash_method=self.config['execution']['hash_method']) + rm_extra = self.config['execution']['remove_unnecessary_outputs'] + if str2bool(rm_extra) and self.needed_outputs: + hashobject = md5() + hashobject.update(hashvalue.encode()) + sorted_outputs = sorted(self.needed_outputs) + hashobject.update(str(sorted_outputs).encode()) + hashvalue = hashobject.hexdigest() + hashed_inputs.append(('needed_outputs', sorted_outputs)) + self._hashed_inputs, self._hashvalue = hashed_inputs, hashvalue + return self._hashed_inputs, self._hashvalue + + @property + def inputs(self): + return self._inputs + + @property + def outputs(self): + if self._interface._outputs(): + return Bunch(self._interface._outputs().trait_get()) + + def _make_nodes(self, cwd=None): + if cwd is None: + cwd = self.output_dir() + if self.nested: + nitems = len( + flatten( + ensure_list(getattr(self.inputs, self.iterfield[0])))) + else: + nitems = len( + ensure_list(getattr(self.inputs, self.iterfield[0]))) + for i in range(nitems): + nodename = '_%s%d' % (self.name, i) + node = Node( + deepcopy(self._interface), + n_procs=self._n_procs, + mem_gb=self._mem_gb, + overwrite=self.overwrite, + needed_outputs=self.needed_outputs, + run_without_submitting=self.run_without_submitting, + base_dir=op.join(cwd, 'mapflow'), + name=nodename) + node.plugin_args = self.plugin_args + node.interface.inputs.trait_set( + **deepcopy(self._interface.inputs.trait_get())) + node.interface.resource_monitor = self._interface.resource_monitor + for field in self.iterfield: + if self.nested: + fieldvals = flatten( + ensure_list(getattr(self.inputs, field))) + else: + fieldvals = ensure_list(getattr(self.inputs, field)) + logger.debug('setting input %d %s %s', i, field, fieldvals[i]) + setattr(node.inputs, field, fieldvals[i]) + node.config = self.config + yield i, node + + def _collate_results(self, nodes): + finalresult = InterfaceResult( + interface=[], + runtime=[], + provenance=[], + inputs=[], + outputs=self.outputs) + returncode = [] + for i, nresult, err in nodes: + finalresult.runtime.insert(i, None) + returncode.insert(i, err) + + if nresult: + if hasattr(nresult, 'runtime'): + finalresult.interface.insert(i, nresult.interface) + finalresult.inputs.insert(i, nresult.inputs) + finalresult.runtime[i] = nresult.runtime + if hasattr(nresult, 'provenance'): + finalresult.provenance.insert(i, nresult.provenance) + + if self.outputs: + for key, _ in list(self.outputs.items()): + rm_extra = ( + self.config['execution']['remove_unnecessary_outputs']) + if str2bool(rm_extra) and self.needed_outputs: + if key not in self.needed_outputs: + continue + values = getattr(finalresult.outputs, key) + if not isdefined(values): + values = [] + if nresult and nresult.outputs: + values.insert(i, nresult.outputs.trait_get()[key]) + else: + values.insert(i, None) + defined_vals = [isdefined(val) for val in values] + if any(defined_vals) and finalresult.outputs: + setattr(finalresult.outputs, key, values) + + if self.nested: + for key, _ in list(self.outputs.items()): + values = getattr(finalresult.outputs, key) + if isdefined(values): + values = unflatten(values, + ensure_list( + getattr(self.inputs, + self.iterfield[0]))) + setattr(finalresult.outputs, key, values) + + if returncode and any([code is not None for code in returncode]): + msg = [] + for i, code in enumerate(returncode): + if code is not None: + msg += ['Subnode %d failed' % i] + msg += ['Error: %s' % str(code)] + raise Exception('Subnodes of node: %s failed:\n%s' % + (self.name, '\n'.join(msg))) + + return finalresult + + def get_subnodes(self): + """Generate subnodes of a mapnode and write pre-execution report""" + self._get_inputs() + self._check_iterfield() + write_report(self, report_type='preexec', is_mapnode=True) + return [node for _, node in self._make_nodes()] + + def num_subnodes(self): + """Get the number of subnodes to iterate in this MapNode""" + self._get_inputs() + self._check_iterfield() + if self._serial: + return 1 + if self.nested: + return len( + ensure_list( + flatten(getattr(self.inputs, self.iterfield[0])))) + return len(ensure_list(getattr(self.inputs, self.iterfield[0]))) + + def _get_inputs(self): + old_inputs = self._inputs.trait_get() + self._inputs = self._create_dynamic_traits( + self._interface.inputs, fields=self.iterfield) + self._inputs.trait_set(**old_inputs) + super(MapNode, self)._get_inputs() + + def _check_iterfield(self): + """Checks iterfield + + * iterfield must be in inputs + * number of elements must match across iterfield + """ + for iterfield in self.iterfield: + if not isdefined(getattr(self.inputs, iterfield)): + raise ValueError(("Input %s was not set but it is listed " + "in iterfields.") % iterfield) + if len(self.iterfield) > 1: + first_len = len( + ensure_list(getattr(self.inputs, self.iterfield[0]))) + for iterfield in self.iterfield[1:]: + if first_len != len( + ensure_list(getattr(self.inputs, iterfield))): + raise ValueError( + ("All iterfields of a MapNode have to " + "have the same length. %s") % str(self.inputs)) + + def _run_interface(self, execute=True, updatehash=False): + """Run the mapnode interface + + This is primarily intended for serial execution of mapnode. A parallel + execution requires creation of new nodes that can be spawned + """ + self._check_iterfield() + cwd = self.output_dir() + if not execute: + return self._load_results() + + # Set up mapnode folder names + if self.nested: + nitems = len( + ensure_list( + flatten(getattr(self.inputs, self.iterfield[0])))) + else: + nitems = len( + ensure_list(getattr(self.inputs, self.iterfield[0]))) + nnametpl = '_%s{}' % self.name + nodenames = [nnametpl.format(i) for i in range(nitems)] + + # Run mapnode + result = self._collate_results( + _node_runner( + self._make_nodes(cwd), + updatehash=updatehash, + stop_first=str2bool( + self.config['execution']['stop_on_first_crash']))) + # And store results + _save_resultfile(result, cwd, self.name) + # remove any node directories no longer required + dirs2remove = [] + for path in glob(op.join(cwd, 'mapflow', '*')): + if op.isdir(path): + if path.split(op.sep)[-1] not in nodenames: + dirs2remove.append(path) + for path in dirs2remove: + logger.debug('[MapNode] Removing folder "%s".', path) + shutil.rmtree(path) + + return result diff --git a/nipype/pipeline/engine/report_template.html b/nipype/pipeline/engine/report_template.html new file mode 100644 index 0000000000..3fb66b4a02 --- /dev/null +++ b/nipype/pipeline/engine/report_template.html @@ -0,0 +1,264 @@ + + + + + + + + +

+ Flare imports
+ hierarchical edge bundling +

+
tension: +
+ + + + + diff --git a/nipype/pipeline/engine/report_template2.html b/nipype/pipeline/engine/report_template2.html new file mode 100644 index 0000000000..23aef9f44e --- /dev/null +++ b/nipype/pipeline/engine/report_template2.html @@ -0,0 +1,120 @@ + + + +Sankey Diagram + + + +

Nipype workflow: Sankey Diagram

+ +

+ +

+ + + + diff --git a/nipype/pipeline/engine/tests/__init__.py b/nipype/pipeline/engine/tests/__init__.py new file mode 100644 index 0000000000..99fb243f19 --- /dev/null +++ b/nipype/pipeline/engine/tests/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/pipeline/engine/tests/test_base.py b/nipype/pipeline/engine/tests/test_base.py new file mode 100644 index 0000000000..fd87aa6878 --- /dev/null +++ b/nipype/pipeline/engine/tests/test_base.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals + +import pytest +from ..base import EngineBase +from ....interfaces import base as nib +from ....interfaces import utility as niu +from ... import engine as pe + + +class InputSpec(nib.TraitedSpec): + input1 = nib.traits.Int(desc='a random int') + input2 = nib.traits.Int(desc='a random int') + input_file = nib.traits.File(desc='Random File') + + +class OutputSpec(nib.TraitedSpec): + output1 = nib.traits.List(nib.traits.Int, desc='outputs') + + +class EngineTestInterface(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output1'] = [1, self.inputs.input1] + return outputs + + +@pytest.mark.parametrize( + 'name', ['valid1', 'valid_node', 'valid-node', 'ValidNode0']) +def test_create(name): + base = EngineBase(name=name) + assert base.name == name + + +@pytest.mark.parametrize( + 'name', ['invalid*1', 'invalid.1', 'invalid@', 'in/valid', None]) +def test_create_invalid(name): + with pytest.raises(ValueError): + EngineBase(name=name) + + +def test_hierarchy(): + base = EngineBase(name='nodename') + base._hierarchy = 'some.history.behind' + + assert base.name == 'nodename' + assert base.fullname == 'some.history.behind.nodename' + + +def test_clone(): + base = EngineBase(name='nodename') + base2 = base.clone('newnodename') + + assert (base.base_dir == base2.base_dir and + base.config == base2.config and + base2.name == 'newnodename') + + with pytest.raises(ValueError): + base.clone('nodename') + +def test_clone_node_iterables(tmpdir): + tmpdir.chdir() + + def addstr(string): + return ('%s + 2' % string) + + subject_list = ['sub-001', 'sub-002'] + inputnode = pe.Node(niu.IdentityInterface(fields=['subject']), + name='inputnode') + inputnode.iterables = [('subject', subject_list)] + + node_1 = pe.Node(niu.Function(input_names='string', + output_names='string', + function=addstr), name='node_1') + node_2 = node_1.clone('node_2') + + workflow = pe.Workflow(name='iter_clone_wf') + workflow.connect([(inputnode, node_1, [('subject', 'string')]), + (node_1, node_2, [('string', 'string')])]) + workflow.run() diff --git a/nipype/pipeline/engine/tests/test_engine.py b/nipype/pipeline/engine/tests/test_engine.py new file mode 100644 index 0000000000..151849241c --- /dev/null +++ b/nipype/pipeline/engine/tests/test_engine.py @@ -0,0 +1,519 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Tests for the engine module +""" + +from __future__ import print_function +from __future__ import unicode_literals +from builtins import open +from copy import deepcopy +from glob import glob +import os + + +import pytest +from ... import engine as pe +from .test_base import EngineTestInterface + + +# Test graph expansion. The following set tests the building blocks +# of the graph expansion routine. +# XXX - SG I'll create a graphical version of these tests and actually +# ensure that all connections are tested later +@pytest.mark.parametrize( + "iterables, expected", + [ + ({ + "1": None + }, (1, 0)), # test1 + ({ + "1": dict(input1=lambda: [1, 2], input2=lambda: [1, 2]) + }, (4, 0)) # test2 + ]) +def test_1mod(iterables, expected): + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') + setattr(mod1, "iterables", iterables["1"]) + pipe.add_nodes([mod1]) + pipe._flatgraph = pipe._create_flat_graph() + pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) + assert len(pipe._execgraph.nodes()) == expected[0] + assert len(pipe._execgraph.edges()) == expected[1] + + +@pytest.mark.parametrize( + "iterables, expected", + [ + ({ + "1": {}, + "2": dict(input1=lambda: [1, 2]) + }, (3, 2)), # test3 + ({ + "1": dict(input1=lambda: [1, 2]), + "2": {} + }, (4, 2)), # test4 + ({ + "1": dict(input1=lambda: [1, 2]), + "2": dict(input1=lambda: [1, 2]) + }, (6, 4)) # test5 + ]) +def test_2mods(iterables, expected): + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') + mod2 = pe.Node(interface=EngineTestInterface(), name='mod2') + for nr in ["1", "2"]: + setattr(eval("mod" + nr), "iterables", iterables[nr]) + pipe.connect([(mod1, mod2, [('output1', 'input2')])]) + pipe._flatgraph = pipe._create_flat_graph() + pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) + assert len(pipe._execgraph.nodes()) == expected[0] + assert len(pipe._execgraph.edges()) == expected[1] + + +@pytest.mark.parametrize( + "iterables, expected, connect", + [ + ({ + "1": {}, + "2": dict(input1=lambda: [1, 2]), + "3": {} + }, (5, 4), ("1-2", "2-3")), # test6 + ({ + "1": dict(input1=lambda: [1, 2]), + "2": {}, + "3": {} + }, (5, 4), ("1-3", "2-3")), # test7 + ({ + "1": dict(input1=lambda: [1, 2]), + "2": dict(input1=lambda: [1, 2]), + "3": {} + }, (8, 8), ("1-3", "2-3")), # test8 + ]) +def test_3mods(iterables, expected, connect): + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') + mod2 = pe.Node(interface=EngineTestInterface(), name='mod2') + mod3 = pe.Node(interface=EngineTestInterface(), name='mod3') + for nr in ["1", "2", "3"]: + setattr(eval("mod" + nr), "iterables", iterables[nr]) + if connect == ("1-2", "2-3"): + pipe.connect([(mod1, mod2, [('output1', 'input2')]), + (mod2, mod3, [('output1', 'input2')])]) + elif connect == ("1-3", "2-3"): + pipe.connect([(mod1, mod3, [('output1', 'input1')]), + (mod2, mod3, [('output1', 'input2')])]) + else: + raise Exception( + "connect pattern is not implemented yet within the test function") + pipe._flatgraph = pipe._create_flat_graph() + pipe._execgraph = pe.generate_expanded_graph(deepcopy(pipe._flatgraph)) + assert len(pipe._execgraph.nodes()) == expected[0] + assert len(pipe._execgraph.edges()) == expected[1] + + edgenum = sorted([(len(pipe._execgraph.in_edges(node)) + + len(pipe._execgraph.out_edges(node))) + for node in pipe._execgraph.nodes()]) + assert edgenum[0] > 0 + + +def test_expansion(): + pipe1 = pe.Workflow(name='pipe1') + mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') + mod2 = pe.Node(interface=EngineTestInterface(), name='mod2') + pipe1.connect([(mod1, mod2, [('output1', 'input2')])]) + pipe2 = pe.Workflow(name='pipe2') + mod3 = pe.Node(interface=EngineTestInterface(), name='mod3') + mod4 = pe.Node(interface=EngineTestInterface(), name='mod4') + pipe2.connect([(mod3, mod4, [('output1', 'input2')])]) + pipe3 = pe.Workflow(name="pipe3") + pipe3.connect([(pipe1, pipe2, [('mod2.output1', 'mod4.input1')])]) + pipe4 = pe.Workflow(name="pipe4") + mod5 = pe.Node(interface=EngineTestInterface(), name='mod5') + pipe4.add_nodes([mod5]) + pipe5 = pe.Workflow(name="pipe5") + pipe5.add_nodes([pipe4]) + pipe6 = pe.Workflow(name="pipe6") + pipe6.connect([(pipe5, pipe3, [('pipe4.mod5.output1', + 'pipe2.mod3.input1')])]) + + pipe6._flatgraph = pipe6._create_flat_graph() + + +def test_iterable_expansion(): + wf1 = pe.Workflow(name='test') + node1 = pe.Node(EngineTestInterface(), name='node1') + node2 = pe.Node(EngineTestInterface(), name='node2') + node1.iterables = ('input1', [1, 2]) + wf1.connect(node1, 'output1', node2, 'input2') + wf3 = pe.Workflow(name='group') + for i in [0, 1, 2]: + wf3.add_nodes([wf1.clone(name='test%d' % i)]) + wf3._flatgraph = wf3._create_flat_graph() + assert len(pe.generate_expanded_graph(wf3._flatgraph).nodes()) == 12 + + +def test_synchronize_expansion(): + wf1 = pe.Workflow(name='test') + node1 = pe.Node(EngineTestInterface(), name='node1') + node1.iterables = [('input1', [1, 2]), ('input2', [3, 4, 5])] + node1.synchronize = True + node2 = pe.Node(EngineTestInterface(), name='node2') + wf1.connect(node1, 'output1', node2, 'input2') + wf3 = pe.Workflow(name='group') + for i in [0, 1, 2]: + wf3.add_nodes([wf1.clone(name='test%d' % i)]) + wf3._flatgraph = wf3._create_flat_graph() + # Each expanded graph clone has: + # 3 node1 expansion nodes and + # 1 node2 replicate per node1 replicate + # => 2 * 3 = 6 nodes per expanded subgraph + # => 18 nodes in the group + assert len(pe.generate_expanded_graph(wf3._flatgraph).nodes()) == 18 + + +def test_synchronize_tuples_expansion(): + wf1 = pe.Workflow(name='test') + + node1 = pe.Node(EngineTestInterface(), name='node1') + node2 = pe.Node(EngineTestInterface(), name='node2') + node1.iterables = [('input1', 'input2'), [(1, 3), (2, 4), (None, 5)]] + + node1.synchronize = True + + wf1.connect(node1, 'output1', node2, 'input2') + + wf3 = pe.Workflow(name='group') + for i in [0, 1, 2]: + wf3.add_nodes([wf1.clone(name='test%d' % i)]) + + wf3._flatgraph = wf3._create_flat_graph() + # Identical to test_synchronize_expansion + assert len(pe.generate_expanded_graph(wf3._flatgraph).nodes()) == 18 + + +def test_itersource_expansion(): + + wf1 = pe.Workflow(name='test') + node1 = pe.Node(EngineTestInterface(), name='node1') + node1.iterables = ('input1', [1, 2]) + + node2 = pe.Node(EngineTestInterface(), name='node2') + wf1.connect(node1, 'output1', node2, 'input1') + + node3 = pe.Node(EngineTestInterface(), name='node3') + node3.itersource = ('node1', 'input1') + node3.iterables = [('input1', {1: [3, 4], 2: [5, 6, 7]})] + + wf1.connect(node2, 'output1', node3, 'input1') + node4 = pe.Node(EngineTestInterface(), name='node4') + + wf1.connect(node3, 'output1', node4, 'input1') + + wf3 = pe.Workflow(name='group') + for i in [0, 1, 2]: + wf3.add_nodes([wf1.clone(name='test%d' % i)]) + + wf3._flatgraph = wf3._create_flat_graph() + + # each expanded graph clone has: + # 2 node1 expansion nodes, + # 1 node2 per node1 replicate, + # 2 node3 replicates for the node1 input1 value 1, + # 3 node3 replicates for the node1 input1 value 2 and + # 1 node4 successor per node3 replicate + # => 2 + 2 + (2 + 3) + 5 = 14 nodes per expanded graph clone + # => 3 * 14 = 42 nodes in the group + assert len(pe.generate_expanded_graph(wf3._flatgraph).nodes()) == 42 + + +def test_itersource_synchronize1_expansion(): + wf1 = pe.Workflow(name='test') + node1 = pe.Node(EngineTestInterface(), name='node1') + node1.iterables = [('input1', [1, 2]), ('input2', [3, 4])] + node1.synchronize = True + node2 = pe.Node(EngineTestInterface(), name='node2') + wf1.connect(node1, 'output1', node2, 'input1') + node3 = pe.Node(EngineTestInterface(), name='node3') + node3.itersource = ('node1', ['input1', 'input2']) + node3.iterables = [('input1', { + (1, 3): [5, 6] + }), ('input2', { + (1, 3): [7, 8], + (2, 4): [9] + })] + wf1.connect(node2, 'output1', node3, 'input1') + node4 = pe.Node(EngineTestInterface(), name='node4') + wf1.connect(node3, 'output1', node4, 'input1') + wf3 = pe.Workflow(name='group') + for i in [0, 1, 2]: + wf3.add_nodes([wf1.clone(name='test%d' % i)]) + wf3._flatgraph = wf3._create_flat_graph() + + # each expanded graph clone has: + # 2 node1 expansion nodes, + # 1 node2 per node1 replicate, + # 2 node3 replicates for the node1 input1 value 1, + # 3 node3 replicates for the node1 input1 value 2 and + # 1 node4 successor per node3 replicate + # => 2 + 2 + (2 + 3) + 5 = 14 nodes per expanded graph clone + # => 3 * 14 = 42 nodes in the group + assert len(pe.generate_expanded_graph(wf3._flatgraph).nodes()) == 42 + + +def test_itersource_synchronize2_expansion(): + wf1 = pe.Workflow(name='test') + + node1 = pe.Node(EngineTestInterface(), name='node1') + node1.iterables = [('input1', [1, 2]), ('input2', [3, 4])] + node1.synchronize = True + node2 = pe.Node(EngineTestInterface(), name='node2') + wf1.connect(node1, 'output1', node2, 'input1') + node3 = pe.Node(EngineTestInterface(), name='node3') + node3.itersource = ('node1', ['input1', 'input2']) + node3.synchronize = True + node3.iterables = [('input1', 'input2'), { + (1, 3): [(5, 7), (6, 8)], + (2, 4): [(None, 9)] + }] + wf1.connect(node2, 'output1', node3, 'input1') + node4 = pe.Node(EngineTestInterface(), name='node4') + wf1.connect(node3, 'output1', node4, 'input1') + wf3 = pe.Workflow(name='group') + for i in [0, 1, 2]: + wf3.add_nodes([wf1.clone(name='test%d' % i)]) + wf3._flatgraph = wf3._create_flat_graph() + + # each expanded graph clone has: + # 2 node1 expansion nodes, + # 1 node2 per node1 replicate, + # 2 node3 replicates for the node1 input1 value 1, + # 1 node3 replicates for the node1 input1 value 2 and + # 1 node4 successor per node3 replicate + # => 2 + 2 + (2 + 1) + 3 = 10 nodes per expanded graph clone + # => 3 * 10 = 30 nodes in the group + assert len(pe.generate_expanded_graph(wf3._flatgraph).nodes()) == 30 + + + +def test_old_config(tmpdir): + tmpdir.chdir() + wd = os.getcwd() + from nipype.interfaces.utility import Function + + def func1(): + return 1 + + def func2(a): + return a + 1 + + n1 = pe.Node( + Function(input_names=[], output_names=['a'], function=func1), + name='n1') + n2 = pe.Node( + Function(input_names=['a'], output_names=['b'], function=func2), + name='n2') + w1 = pe.Workflow(name='test') + modify = lambda x: x + 1 + n1.inputs.a = 1 + w1.connect(n1, ('a', modify), n2, 'a') + w1.base_dir = wd + + w1.config['execution']['crashdump_dir'] = wd + # generate outputs + + w1.run(plugin='Linear') + + +def test_mapnode_json(tmpdir): + """Tests that mapnodes don't generate excess jsons + """ + tmpdir.chdir() + wd = os.getcwd() + from nipype import MapNode, Function, Workflow + + def func1(in1): + return in1 + 1 + + n1 = MapNode( + Function(input_names=['in1'], output_names=['out'], function=func1), + iterfield=['in1'], + name='n1') + n1.inputs.in1 = [1] + w1 = Workflow(name='test') + w1.base_dir = wd + w1.config['execution']['crashdump_dir'] = wd + w1.add_nodes([n1]) + w1.run() + n1.inputs.in1 = [2] + w1.run() + # should rerun + n1.inputs.in1 = [1] + eg = w1.run() + + node = list(eg.nodes())[0] + outjson = glob(os.path.join(node.output_dir(), '_0x*.json')) + assert len(outjson) == 1 + + # check that multiple json's don't trigger rerun + with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp: + fp.write('dummy file') + w1.config['execution'].update(**{'stop_on_first_rerun': True}) + + w1.run() + + +def test_parameterize_dirs_false(tmpdir): + from ....interfaces.utility import IdentityInterface + from ....testing import example_data + + input_file = example_data('fsl_motion_outliers_fd.txt') + + n1 = pe.Node(EngineTestInterface(), name='Node1') + n1.iterables = ('input_file', (input_file, input_file)) + n1.interface.inputs.input1 = 1 + + n2 = pe.Node(IdentityInterface(fields='in1'), name='Node2') + + wf = pe.Workflow(name='Test') + wf.base_dir = tmpdir.strpath + wf.config['execution']['parameterize_dirs'] = False + wf.connect([(n1, n2, [('output1', 'in1')])]) + + wf.run() + + +def test_serial_input(tmpdir): + tmpdir.chdir() + wd = os.getcwd() + from nipype import MapNode, Function, Workflow + + def func1(in1): + return in1 + + n1 = MapNode( + Function(input_names=['in1'], output_names=['out'], function=func1), + iterfield=['in1'], + name='n1') + n1.inputs.in1 = [1, 2, 3] + + w1 = Workflow(name='test') + w1.base_dir = wd + w1.add_nodes([n1]) + # set local check + w1.config['execution'] = { + 'stop_on_first_crash': 'true', + 'local_hash_check': 'true', + 'crashdump_dir': wd, + 'poll_sleep_duration': 2 + } + + # test output of num_subnodes method when serial is default (False) + assert n1.num_subnodes() == len(n1.inputs.in1) + + # test running the workflow on default conditions + w1.run(plugin='MultiProc') + + # test output of num_subnodes method when serial is True + n1._serial = True + assert n1.num_subnodes() == 1 + + # test running the workflow on serial conditions + w1.run(plugin='MultiProc') + + +def test_write_graph_runs(tmpdir): + tmpdir.chdir() + + for graph in ('orig', 'flat', 'exec', 'hierarchical', 'colored'): + for simple in (True, False): + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') + mod2 = pe.Node(interface=EngineTestInterface(), name='mod2') + pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + try: + pipe.write_graph( + graph2use=graph, simple_form=simple, format='dot') + except Exception: + assert False, \ + 'Failed to plot {} {} graph'.format( + 'simple' if simple else 'detailed', graph) + + assert os.path.exists('graph.dot') or os.path.exists( + 'graph_detailed.dot') + try: + os.remove('graph.dot') + except OSError: + pass + try: + os.remove('graph_detailed.dot') + except OSError: + pass + + +def test_deep_nested_write_graph_runs(tmpdir): + tmpdir.chdir() + + for graph in ('orig', 'flat', 'exec', 'hierarchical', 'colored'): + for simple in (True, False): + pipe = pe.Workflow(name='pipe') + parent = pipe + for depth in range(10): + sub = pe.Workflow(name='pipe_nest_{}'.format(depth)) + parent.add_nodes([sub]) + parent = sub + mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') + parent.add_nodes([mod1]) + try: + pipe.write_graph( + graph2use=graph, simple_form=simple, format='dot') + except Exception as e: + assert False, \ + 'Failed to plot {} {} deep graph: {!s}'.format( + 'simple' if simple else 'detailed', graph, e) + + assert os.path.exists('graph.dot') or os.path.exists( + 'graph_detailed.dot') + try: + os.remove('graph.dot') + except OSError: + pass + try: + os.remove('graph_detailed.dot') + except OSError: + pass + + +def test_io_subclass(): + """Ensure any io subclass allows dynamic traits""" + from nipype.interfaces.io import IOBase + from nipype.interfaces.base import DynamicTraitedSpec + + class TestKV(IOBase): + _always_run = True + output_spec = DynamicTraitedSpec + + def _list_outputs(self): + outputs = {} + outputs['test'] = 1 + outputs['foo'] = 'bar' + return outputs + + wf = pe.Workflow('testkv') + + def testx2(test): + return test * 2 + + kvnode = pe.Node(TestKV(), name='testkv') + from nipype.interfaces.utility import Function + func = pe.Node( + Function( + input_names=['test'], output_names=['test2'], function=testx2), + name='func') + exception_not_raised = True + try: + wf.connect(kvnode, 'test', func, 'test') + except Exception as e: + if 'Module testkv has no output called test' in e: + exception_not_raised = False + assert exception_not_raised diff --git a/nipype/pipeline/engine/tests/test_join.py b/nipype/pipeline/engine/tests/test_join.py new file mode 100644 index 0000000000..77fc0f2fdf --- /dev/null +++ b/nipype/pipeline/engine/tests/test_join.py @@ -0,0 +1,661 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Tests for join expansion +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +from ... import engine as pe +from ....interfaces import base as nib +from ....interfaces.utility import IdentityInterface, Function, Merge +from ....interfaces.base import traits, File + + +class PickFirstSpec(nib.TraitedSpec): + in_files = traits.List( + File(exists=True), argstr="%s", position=2, mandatory=True) + + +class PickFirstOutSpec(nib.TraitedSpec): + output1 = File(exists=True) + + +class PickFirst(nib.BaseInterface): + input_spec = PickFirstSpec + output_spec = PickFirstOutSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output1'] = self.inputs.in_files[0] + return outputs + + +class IncrementInputSpec(nib.TraitedSpec): + input1 = nib.traits.Int(mandatory=True, desc='input') + inc = nib.traits.Int(usedefault=True, default_value=1, desc='increment') + + +class IncrementOutputSpec(nib.TraitedSpec): + output1 = nib.traits.Int(desc='ouput') + + +class IncrementInterface(nib.BaseInterface): + input_spec = IncrementInputSpec + output_spec = IncrementOutputSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output1'] = self.inputs.input1 + self.inputs.inc + return outputs + + +_sums = [] + +_sum_operands = [] + + +class SumInputSpec(nib.TraitedSpec): + input1 = nib.traits.List(nib.traits.Int, mandatory=True, desc='input') + + +class SumOutputSpec(nib.TraitedSpec): + output1 = nib.traits.Int(desc='ouput') + operands = nib.traits.List(nib.traits.Int, desc='operands') + + +class SumInterface(nib.BaseInterface): + input_spec = SumInputSpec + output_spec = SumOutputSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + global _sum + global _sum_operands + outputs = self._outputs().get() + outputs['operands'] = self.inputs.input1 + _sum_operands.append(outputs['operands']) + outputs['output1'] = sum(self.inputs.input1) + _sums.append(outputs['output1']) + return outputs + + +_set_len = None +"""The Set interface execution result.""" + + +class SetInputSpec(nib.TraitedSpec): + input1 = nib.traits.Set(nib.traits.Int, mandatory=True, desc='input') + + +class SetOutputSpec(nib.TraitedSpec): + output1 = nib.traits.Int(desc='ouput') + + +class SetInterface(nib.BaseInterface): + input_spec = SetInputSpec + output_spec = SetOutputSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + global _set_len + outputs = self._outputs().get() + _set_len = outputs['output1'] = len(self.inputs.input1) + return outputs + + +_products = [] +"""The Products interface execution results.""" + + +class ProductInputSpec(nib.TraitedSpec): + input1 = nib.traits.Int(mandatory=True, desc='input1') + input2 = nib.traits.Int(mandatory=True, desc='input2') + + +class ProductOutputSpec(nib.TraitedSpec): + output1 = nib.traits.Int(mandatory=True, desc='output') + + +class ProductInterface(nib.BaseInterface): + input_spec = ProductInputSpec + output_spec = ProductOutputSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + global _products + outputs = self._outputs().get() + outputs['output1'] = self.inputs.input1 * self.inputs.input2 + _products.append(outputs['output1']) + return outputs + + +def test_join_expansion(tmpdir): + tmpdir.chdir() + + # Make the workflow. + wf = pe.Workflow(name='test') + # the iterated input node + inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') + inputspec.iterables = [('n', [1, 2])] + # a pre-join node in the iterated path + pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') + wf.connect(inputspec, 'n', pre_join1, 'input1') + # another pre-join node in the iterated path + pre_join2 = pe.Node(IncrementInterface(), name='pre_join2') + wf.connect(pre_join1, 'output1', pre_join2, 'input1') + # the join node + join = pe.JoinNode( + SumInterface(), + joinsource='inputspec', + joinfield='input1', + name='join') + wf.connect(pre_join2, 'output1', join, 'input1') + # an uniterated post-join node + post_join1 = pe.Node(IncrementInterface(), name='post_join1') + wf.connect(join, 'output1', post_join1, 'input1') + # a post-join node in the iterated path + post_join2 = pe.Node(ProductInterface(), name='post_join2') + wf.connect(join, 'output1', post_join2, 'input1') + wf.connect(pre_join1, 'output1', post_join2, 'input2') + + result = wf.run() + + # the two expanded pre-join predecessor nodes feed into one join node + joins = [node for node in result.nodes() if node.name == 'join'] + assert len(joins) == 1, "The number of join result nodes is incorrect." + # the expanded graph contains 2 * 2 = 4 iteration pre-join nodes, 1 join + # node, 1 non-iterated post-join node and 2 * 1 iteration post-join nodes. + # Nipype factors away the IdentityInterface. + assert len( + result.nodes()) == 8, "The number of expanded nodes is incorrect." + # the join Sum result is (1 + 1 + 1) + (2 + 1 + 1) + assert len(_sums) == 1, "The number of join outputs is incorrect" + assert _sums[ + 0] == 7, "The join Sum output value is incorrect: %s." % _sums[0] + # the join input preserves the iterables input order + assert _sum_operands[0] == [3, 4], \ + "The join Sum input is incorrect: %s." % _sum_operands[0] + # there are two iterations of the post-join node in the iterable path + assert len(_products) == 2,\ + "The number of iterated post-join outputs is incorrect" + + +def test_node_joinsource(tmpdir): + """Test setting the joinsource to a Node.""" + tmpdir.chdir() + + # the iterated input node + inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') + inputspec.iterables = [('n', [1, 2])] + # the join node + join = pe.JoinNode( + SetInterface(), joinsource=inputspec, joinfield='input1', name='join') + + # the joinsource is the inputspec name + assert join.joinsource == inputspec.name, \ + "The joinsource is not set to the node name." + + +def test_set_join_node(tmpdir): + """Test collecting join inputs to a set.""" + tmpdir.chdir() + + # Make the workflow. + wf = pe.Workflow(name='test') + # the iterated input node + inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') + inputspec.iterables = [('n', [1, 2, 1, 3, 2])] + # a pre-join node in the iterated path + pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') + wf.connect(inputspec, 'n', pre_join1, 'input1') + # the set join node + join = pe.JoinNode( + SetInterface(), + joinsource='inputspec', + joinfield='input1', + name='join') + wf.connect(pre_join1, 'output1', join, 'input1') + + wf.run() + + # the join length is the number of unique inputs + assert _set_len == 3, \ + "The join Set output value is incorrect: %s." % _set_len + + +def test_unique_join_node(tmpdir): + """Test join with the ``unique`` flag set to True.""" + global _sum_operands + _sum_operands = [] + tmpdir.chdir() + + # Make the workflow. + wf = pe.Workflow(name='test') + # the iterated input node + inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') + inputspec.iterables = [('n', [3, 1, 2, 1, 3])] + # a pre-join node in the iterated path + pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') + wf.connect(inputspec, 'n', pre_join1, 'input1') + # the set join node + join = pe.JoinNode( + SumInterface(), + joinsource='inputspec', + joinfield='input1', + unique=True, + name='join') + wf.connect(pre_join1, 'output1', join, 'input1') + + wf.run() + + assert _sum_operands[0] == [4, 2, 3], \ + "The unique join output value is incorrect: %s." % _sum_operands[0] + + +def test_multiple_join_nodes(tmpdir): + """Test two join nodes, one downstream of the other.""" + global _products + _products = [] + tmpdir.chdir() + + # Make the workflow. + wf = pe.Workflow(name='test') + # the iterated input node + inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') + inputspec.iterables = [('n', [1, 2, 3])] + # a pre-join node in the iterated path + pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') + wf.connect(inputspec, 'n', pre_join1, 'input1') + # the first join node + join1 = pe.JoinNode( + IdentityInterface(fields=['vector']), + joinsource='inputspec', + joinfield='vector', + name='join1') + wf.connect(pre_join1, 'output1', join1, 'vector') + # an uniterated post-join node + post_join1 = pe.Node(SumInterface(), name='post_join1') + wf.connect(join1, 'vector', post_join1, 'input1') + # the downstream join node connected to both an upstream join + # path output and a separate input in the iterated path + join2 = pe.JoinNode( + IdentityInterface(fields=['vector', 'scalar']), + joinsource='inputspec', + joinfield='vector', + name='join2') + wf.connect(pre_join1, 'output1', join2, 'vector') + wf.connect(post_join1, 'output1', join2, 'scalar') + # a second post-join node + post_join2 = pe.Node(SumInterface(), name='post_join2') + wf.connect(join2, 'vector', post_join2, 'input1') + # a third post-join node + post_join3 = pe.Node(ProductInterface(), name='post_join3') + wf.connect(post_join2, 'output1', post_join3, 'input1') + wf.connect(join2, 'scalar', post_join3, 'input2') + + result = wf.run() + + # The expanded graph contains one pre_join1 replicate per inputspec + # replicate and one of each remaining node = 3 + 5 = 8 nodes. + # The replicated inputspec nodes are factored out of the expansion. + assert len(result.nodes()) == 8, \ + "The number of expanded nodes is incorrect." + # The outputs are: + # pre_join1: [2, 3, 4] + # post_join1: 9 + # join2: [2, 3, 4] and 9 + # post_join2: 9 + # post_join3: 9 * 9 = 81 + assert _products == [81], "The post-join product is incorrect" + + +def test_identity_join_node(tmpdir): + """Test an IdentityInterface join.""" + global _sum_operands + _sum_operands = [] + tmpdir.chdir() + + # Make the workflow. + wf = pe.Workflow(name='test') + # the iterated input node + inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') + inputspec.iterables = [('n', [1, 2, 3])] + # a pre-join node in the iterated path + pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') + wf.connect(inputspec, 'n', pre_join1, 'input1') + # the IdentityInterface join node + join = pe.JoinNode( + IdentityInterface(fields=['vector']), + joinsource='inputspec', + joinfield='vector', + name='join') + wf.connect(pre_join1, 'output1', join, 'vector') + # an uniterated post-join node + post_join1 = pe.Node(SumInterface(), name='post_join1') + wf.connect(join, 'vector', post_join1, 'input1') + + result = wf.run() + + # the expanded graph contains 1 * 3 iteration pre-join nodes, 1 join + # node and 1 post-join node. Nipype factors away the iterable input + # IdentityInterface but keeps the join IdentityInterface. + assert len(result.nodes()) == 5, \ + "The number of expanded nodes is incorrect." + assert _sum_operands[0] == [2, 3, 4], \ + "The join Sum input is incorrect: %s." % _sum_operands[0] + + +def test_multifield_join_node(tmpdir): + """Test join on several fields.""" + global _products + _products = [] + tmpdir.chdir() + + # Make the workflow. + wf = pe.Workflow(name='test') + # the iterated input node + inputspec = pe.Node(IdentityInterface(fields=['m', 'n']), name='inputspec') + inputspec.iterables = [('m', [1, 2]), ('n', [3, 4])] + # two pre-join nodes in a parallel iterated path + inc1 = pe.Node(IncrementInterface(), name='inc1') + wf.connect(inputspec, 'm', inc1, 'input1') + inc2 = pe.Node(IncrementInterface(), name='inc2') + wf.connect(inputspec, 'n', inc2, 'input1') + # the join node + join = pe.JoinNode( + IdentityInterface(fields=['vector1', 'vector2']), + joinsource='inputspec', + name='join') + wf.connect(inc1, 'output1', join, 'vector1') + wf.connect(inc2, 'output1', join, 'vector2') + # a post-join node + prod = pe.MapNode( + ProductInterface(), name='prod', iterfield=['input1', 'input2']) + wf.connect(join, 'vector1', prod, 'input1') + wf.connect(join, 'vector2', prod, 'input2') + + result = wf.run() + + # the iterables are expanded as the cartesian product of the iterables values. + # thus, the expanded graph contains 2 * (2 * 2) iteration pre-join nodes, 1 join + # node and 1 post-join node. + assert len(result.nodes()) == 10, \ + "The number of expanded nodes is incorrect." + # the product inputs are [2, 4], [2, 5], [3, 4], [3, 5] + assert set(_products) == set([8, 10, 12, 15]), \ + "The post-join products is incorrect: %s." % _products + + +def test_synchronize_join_node(tmpdir): + """Test join on an input node which has the ``synchronize`` flag set to True.""" + global _products + _products = [] + tmpdir.chdir() + + # Make the workflow. + wf = pe.Workflow(name='test') + # the iterated input node + inputspec = pe.Node(IdentityInterface(fields=['m', 'n']), name='inputspec') + inputspec.iterables = [('m', [1, 2]), ('n', [3, 4])] + inputspec.synchronize = True + # two pre-join nodes in a parallel iterated path + inc1 = pe.Node(IncrementInterface(), name='inc1') + wf.connect(inputspec, 'm', inc1, 'input1') + inc2 = pe.Node(IncrementInterface(), name='inc2') + wf.connect(inputspec, 'n', inc2, 'input1') + # the join node + join = pe.JoinNode( + IdentityInterface(fields=['vector1', 'vector2']), + joinsource='inputspec', + name='join') + wf.connect(inc1, 'output1', join, 'vector1') + wf.connect(inc2, 'output1', join, 'vector2') + # a post-join node + prod = pe.MapNode( + ProductInterface(), name='prod', iterfield=['input1', 'input2']) + wf.connect(join, 'vector1', prod, 'input1') + wf.connect(join, 'vector2', prod, 'input2') + + result = wf.run() + + # there are 3 iterables expansions. + # thus, the expanded graph contains 2 * 2 iteration pre-join nodes, 1 join + # node and 1 post-join node. + assert len(result.nodes()) == 6, \ + "The number of expanded nodes is incorrect." + # the product inputs are [2, 3] and [4, 5] + assert _products == [8, 15], \ + "The post-join products is incorrect: %s." % _products + + +def test_itersource_join_source_node(tmpdir): + """Test join on an input node which has an ``itersource``.""" + tmpdir.chdir() + + # Make the workflow. + wf = pe.Workflow(name='test') + # the iterated input node + inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') + inputspec.iterables = [('n', [1, 2])] + # an intermediate node in the first iteration path + pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') + wf.connect(inputspec, 'n', pre_join1, 'input1') + # an iterable pre-join node with an itersource + pre_join2 = pe.Node(ProductInterface(), name='pre_join2') + pre_join2.itersource = ('inputspec', 'n') + pre_join2.iterables = ('input1', {1: [3, 4], 2: [5, 6]}) + wf.connect(pre_join1, 'output1', pre_join2, 'input2') + # an intermediate node in the second iteration path + pre_join3 = pe.Node(IncrementInterface(), name='pre_join3') + wf.connect(pre_join2, 'output1', pre_join3, 'input1') + # the join node + join = pe.JoinNode( + IdentityInterface(fields=['vector']), + joinsource='pre_join2', + joinfield='vector', + name='join') + wf.connect(pre_join3, 'output1', join, 'vector') + # a join successor node + post_join1 = pe.Node(SumInterface(), name='post_join1') + wf.connect(join, 'vector', post_join1, 'input1') + + result = wf.run() + + # the expanded graph contains + # 1 pre_join1 replicate for each inputspec iteration, + # 2 pre_join2 replicates for each inputspec iteration, + # 1 pre_join3 for each pre_join2 iteration, + # 1 join replicate for each inputspec iteration and + # 1 post_join1 replicate for each join replicate = + # 2 + (2 * 2) + 4 + 2 + 2 = 14 expansion graph nodes. + # Nipype factors away the iterable input + # IdentityInterface but keeps the join IdentityInterface. + assert len(result.nodes()) == 14, \ + "The number of expanded nodes is incorrect." + # The first join inputs are: + # 1 + (3 * 2) and 1 + (4 * 2) + # The second join inputs are: + # 1 + (5 * 3) and 1 + (6 * 3) + # the post-join nodes execution order is indeterminate; + # therefore, compare the lists item-wise. + assert [16, 19] in _sum_operands, \ + "The join Sum input is incorrect: %s." % _sum_operands + assert [7, 9] in _sum_operands, \ + "The join Sum input is incorrect: %s." % _sum_operands + + +def test_itersource_two_join_nodes(tmpdir): + """Test join with a midstream ``itersource`` and an upstream + iterable.""" + tmpdir.chdir() + + # Make the workflow. + wf = pe.Workflow(name='test') + # the iterated input node + inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') + inputspec.iterables = [('n', [1, 2])] + # an intermediate node in the first iteration path + pre_join1 = pe.Node(IncrementInterface(), name='pre_join1') + wf.connect(inputspec, 'n', pre_join1, 'input1') + # an iterable pre-join node with an itersource + pre_join2 = pe.Node(ProductInterface(), name='pre_join2') + pre_join2.itersource = ('inputspec', 'n') + pre_join2.iterables = ('input1', {1: [3, 4], 2: [5, 6]}) + wf.connect(pre_join1, 'output1', pre_join2, 'input2') + # an intermediate node in the second iteration path + pre_join3 = pe.Node(IncrementInterface(), name='pre_join3') + wf.connect(pre_join2, 'output1', pre_join3, 'input1') + # the first join node + join1 = pe.JoinNode( + IdentityInterface(fields=['vector']), + joinsource='pre_join2', + joinfield='vector', + name='join1') + wf.connect(pre_join3, 'output1', join1, 'vector') + # a join successor node + post_join1 = pe.Node(SumInterface(), name='post_join1') + wf.connect(join1, 'vector', post_join1, 'input1') + # a summary join node + join2 = pe.JoinNode( + IdentityInterface(fields=['vector']), + joinsource='inputspec', + joinfield='vector', + name='join2') + wf.connect(post_join1, 'output1', join2, 'vector') + + result = wf.run() + + # the expanded graph contains the 14 test_itersource_join_source_node + # nodes plus the summary join node. + assert len(result.nodes()) == 15, \ + "The number of expanded nodes is incorrect." + + +def test_set_join_node_file_input(tmpdir): + """Test collecting join inputs to a set.""" + tmpdir.chdir() + open('test.nii', 'w+').close() + open('test2.nii', 'w+').close() + + # Make the workflow. + wf = pe.Workflow(name='test') + # the iterated input node + inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') + inputspec.iterables = [('n', [ + tmpdir.join('test.nii').strpath, + tmpdir.join('test2.nii').strpath + ])] + # a pre-join node in the iterated path + pre_join1 = pe.Node(IdentityInterface(fields=['n']), name='pre_join1') + wf.connect(inputspec, 'n', pre_join1, 'n') + # the set join node + join = pe.JoinNode( + PickFirst(), joinsource='inputspec', joinfield='in_files', name='join') + wf.connect(pre_join1, 'n', join, 'in_files') + + wf.run() + + +def test_nested_workflow_join(tmpdir): + """Test collecting join inputs within a nested workflow""" + tmpdir.chdir() + + # Make the nested workflow + def nested_wf(i, name='smallwf'): + # iterables with list of nums + inputspec = pe.Node(IdentityInterface(fields=['n']), name='inputspec') + inputspec.iterables = [('n', i)] + # increment each iterable before joining + pre_join = pe.Node(IncrementInterface(), name='pre_join') + # rejoin nums into list + join = pe.JoinNode( + IdentityInterface(fields=['n']), + joinsource='inputspec', + joinfield='n', + name='join') + # define and connect nested workflow + wf = pe.Workflow(name='wf_%d' % i[0]) + wf.connect(inputspec, 'n', pre_join, 'input1') + wf.connect(pre_join, 'output1', join, 'n') + return wf + + # master wf + meta_wf = pe.Workflow(name='meta', base_dir='.') + # add each mini-workflow to master + for i in [[1, 3], [2, 4]]: + mini_wf = nested_wf(i) + meta_wf.add_nodes([mini_wf]) + + result = meta_wf.run() + + # there should be six nodes in total + assert len(result.nodes()) == 6, \ + "The number of expanded nodes is incorrect." + + +def test_name_prefix_join(tmpdir): + tmpdir.chdir() + + def sq(x): + return x ** 2 + + wf = pe.Workflow('wf', base_dir=tmpdir.strpath) + square = pe.Node(Function(function=sq), name='square') + square.iterables = [('x', [1, 2])] + square_join = pe.JoinNode(Merge(1, ravel_inputs=True), + name='square_join', + joinsource='square', + joinfield=['in1']) + wf.connect(square, 'out', square_join, "in1") + wf.run() + + +def test_join_nestediters(tmpdir): + tmpdir.chdir() + + def exponent(x, p): + return x ** p + + wf = pe.Workflow('wf', base_dir=tmpdir.strpath) + + xs = pe.Node(IdentityInterface(['x']), + iterables=[('x', [1, 2])], + name='xs') + ps = pe.Node(IdentityInterface(['p']), + iterables=[('p', [3, 4])], + name='ps') + exp = pe.Node(Function(function=exponent), name='exp') + exp_joinx = pe.JoinNode(Merge(1, ravel_inputs=True), + name='exp_joinx', + joinsource='xs', + joinfield=['in1']) + exp_joinp = pe.JoinNode(Merge(1, ravel_inputs=True), + name='exp_joinp', + joinsource='ps', + joinfield=['in1']) + wf.connect([ + (xs, exp, [('x', 'x')]), + (ps, exp, [('p', 'p')]), + (exp, exp_joinx, [('out', 'in1')]), + (exp_joinx, exp_joinp, [('out', 'in1')])]) + + wf.run() diff --git a/nipype/pipeline/engine/tests/test_nodes.py b/nipype/pipeline/engine/tests/test_nodes.py new file mode 100644 index 0000000000..4a04b94766 --- /dev/null +++ b/nipype/pipeline/engine/tests/test_nodes.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import print_function, unicode_literals +from builtins import str +import os +from copy import deepcopy +import pytest + +from .... import config +from ....interfaces import utility as niu +from ... import engine as pe +from ..utils import merge_dict +from .test_base import EngineTestInterface +from .test_utils import UtilsTestInterface + +''' +Test for order of iterables + +import nipype.pipeline.engine as pe +import nipype.interfaces.utility as niu + +wf1 = pe.Workflow(name='wf1') +node1 = pe.Node(interface=niu.IdentityInterface(fields=['a1','b1']), name='node1') +node1.iterables = ('a1', [1,2]) +wf1.add_nodes([node1]) + +wf2 = pe.Workflow(name='wf2') +node2 = pe.Node(interface=niu.IdentityInterface(fields=['a2','b2']), name='node2') +wf2.add_nodes([node2]) +wf1.connect(node1, 'a1', wf2, 'node2.a2') + +node4 = pe.Node(interface=niu.IdentityInterface(fields=['a4','b4']), name='node4') +#node4.iterables = ('a4', [5,6]) +wf2.connect(node2, 'b2', node4, 'b4') + +wf3 = pe.Workflow(name='wf3') +node3 = pe.Node(interface=niu.IdentityInterface(fields=['a3','b3']), name='node3') +node3.iterables = ('b3', [3,4]) +wf3.add_nodes([node3]) +wf1.connect(wf3, 'node3.b3', wf2, 'node2.b2') + +wf1.base_dir = os.path.join(os.getcwd(),'testit') +wf1.run(inseries=True, createdirsonly=True) + +wf1.write_graph(graph2use='exec') +''' +''' +import nipype.pipeline.engine as pe +import nipype.interfaces.spm as spm +import os +from io import StringIO +from nipype.utils.config import config + +config.readfp(StringIO(""" +[execution] +remove_unnecessary_outputs = true +""")) + + +segment = pe.Node(interface=spm.Segment(), name="segment") +segment.inputs.data = os.path.abspath("data/T1.nii") +segment.inputs.gm_output_type = [True, True, True] +segment.inputs.wm_output_type = [True, True, True] + + +smooth_gm = pe.Node(interface=spm.Smooth(), name="smooth_gm") + +workflow = pe.Workflow(name="workflow_cleanup_test") +workflow.base_dir = os.path.abspath('./workflow_cleanup_test') + +workflow.connect([(segment, smooth_gm, [('native_gm_image','in_files')])]) + +workflow.run() + +#adding new node that uses one of the previously deleted outputs of segment; this should force segment to rerun +smooth_wm = pe.Node(interface=spm.Smooth(), name="smooth_wm") + +workflow.connect([(segment, smooth_wm, [('native_wm_image','in_files')])]) + +workflow.run() + +workflow.run() +''' + +# Node + + +def test_node_init(): + with pytest.raises(TypeError): + pe.Node() + with pytest.raises(IOError): + pe.Node(EngineTestInterface, name='test') + + +def test_node_get_output(): + mod1 = pe.Node(interface=EngineTestInterface(), name='mod1') + mod1.inputs.input1 = 1 + mod1.run() + assert mod1.get_output('output1') == [1, 1] + mod1._result = None + assert mod1.get_output('output1') == [1, 1] + + +def test_mapnode_iterfield_check(): + mod1 = pe.MapNode(EngineTestInterface(), iterfield=['input1'], name='mod1') + with pytest.raises(ValueError): + mod1._check_iterfield() + mod1 = pe.MapNode( + EngineTestInterface(), iterfield=['input1', 'input2'], name='mod1') + mod1.inputs.input1 = [1, 2] + mod1.inputs.input2 = 3 + with pytest.raises(ValueError): + mod1._check_iterfield() + + +@pytest.mark.parametrize("x_inp, f_exp", + [(3, [6]), ([2, 3], [4, 6]), ((2, 3), [4, 6]), + (range(3), [0, 2, 4]), ("Str", ["StrStr"]), + (["Str1", "Str2"], ["Str1Str1", "Str2Str2"])]) +def test_mapnode_iterfield_type(x_inp, f_exp): + from nipype import MapNode, Function + + def double_func(x): + return 2 * x + + double = Function(["x"], ["f_x"], double_func) + + double_node = MapNode(double, name="double", iterfield=["x"]) + double_node.inputs.x = x_inp + + res = double_node.run() + assert res.outputs.f_x == f_exp + + +def test_mapnode_nested(tmpdir): + tmpdir.chdir() + from nipype import MapNode, Function + + def func1(in1): + return in1 + 1 + + n1 = MapNode( + Function(input_names=['in1'], output_names=['out'], function=func1), + iterfield=['in1'], + nested=True, + name='n1') + n1.inputs.in1 = [[1, [2]], 3, [4, 5]] + n1.run() + assert n1.get_output('out') == [[2, [3]], 4, [5, 6]] + + n2 = MapNode( + Function(input_names=['in1'], output_names=['out'], function=func1), + iterfield=['in1'], + nested=False, + name='n1') + n2.inputs.in1 = [[1, [2]], 3, [4, 5]] + + with pytest.raises(Exception) as excinfo: + n2.run() + assert "can only concatenate list" in str(excinfo.value) + + +def test_mapnode_expansion(tmpdir): + tmpdir.chdir() + from nipype import MapNode, Function + + def func1(in1): + return in1 + 1 + + mapnode = MapNode( + Function(function=func1), + iterfield='in1', + name='mapnode', + n_procs=2, + mem_gb=2) + mapnode.inputs.in1 = [1, 2] + + for idx, node in mapnode._make_nodes(): + for attr in ('overwrite', 'run_without_submitting', 'plugin_args'): + assert getattr(node, attr) == getattr(mapnode, attr) + for attr in ('_n_procs', '_mem_gb'): + assert (getattr(node, attr) == getattr(mapnode, attr)) + + +def test_node_hash(tmpdir): + from nipype.interfaces.utility import Function + tmpdir.chdir() + + config.set_default_config() + config.set('execution', 'stop_on_first_crash', True) + config.set('execution', 'crashdump_dir', os.getcwd()) + + def func1(): + return 1 + + def func2(a): + return a + 1 + + n1 = pe.Node( + Function(input_names=[], output_names=['a'], function=func1), + name='n1') + n2 = pe.Node( + Function(input_names=['a'], output_names=['b'], function=func2), + name='n2') + w1 = pe.Workflow(name='test') + + def modify(x): + return x + 1 + n1.inputs.a = 1 + w1.connect(n1, ('a', modify), n2, 'a') + w1.base_dir = os.getcwd() + + # create dummy distributed plugin class + from nipype.pipeline.plugins.base import DistributedPluginBase + + # create a custom exception + class EngineTestException(Exception): + pass + + class RaiseError(DistributedPluginBase): + def _submit_job(self, node, updatehash=False): + raise EngineTestException( + 'Submit called - cached=%s, updated=%s' % node.is_cached()) + + # check if a proper exception is raised + with pytest.raises(EngineTestException) as excinfo: + w1.run(plugin=RaiseError()) + assert str(excinfo.value).startswith('Submit called') + + # generate outputs + w1.run(plugin='Linear') + # ensure plugin is being called + config.set('execution', 'local_hash_check', False) + + # rerun to ensure we have outputs + w1.run(plugin='Linear') + + # set local check + config.set('execution', 'local_hash_check', True) + w1 = pe.Workflow(name='test') + w1.connect(n1, ('a', modify), n2, 'a') + w1.base_dir = os.getcwd() + w1.run(plugin=RaiseError()) + + +def test_outputs_removal(tmpdir): + def test_function(arg1): + import os + file1 = os.path.join(os.getcwd(), 'file1.txt') + file2 = os.path.join(os.getcwd(), 'file2.txt') + with open(file1, 'wt') as fp: + fp.write('%d' % arg1) + with open(file2, 'wt') as fp: + fp.write('%d' % arg1) + return file1, file2 + + n1 = pe.Node( + niu.Function( + input_names=['arg1'], + output_names=['file1', 'file2'], + function=test_function), + base_dir=tmpdir.strpath, + name='testoutputs') + n1.inputs.arg1 = 1 + n1.config = {'execution': {'remove_unnecessary_outputs': True}} + n1.config = merge_dict(deepcopy(config._sections), n1.config) + n1.run() + assert tmpdir.join(n1.name, 'file1.txt').check() + assert tmpdir.join(n1.name, 'file1.txt').check() + n1.needed_outputs = ['file2'] + n1.run() + assert not tmpdir.join(n1.name, 'file1.txt').check() + assert tmpdir.join(n1.name, 'file2.txt').check() + + +def test_inputs_removal(tmpdir): + file1 = tmpdir.join('file1.txt') + file1.write('dummy_file') + n1 = pe.Node( + UtilsTestInterface(), base_dir=tmpdir.strpath, name='testinputs') + n1.inputs.in_file = file1.strpath + n1.config = {'execution': {'keep_inputs': True}} + n1.config = merge_dict(deepcopy(config._sections), n1.config) + n1.run() + assert tmpdir.join(n1.name, 'file1.txt').check() + n1.inputs.in_file = file1.strpath + n1.config = {'execution': {'keep_inputs': False}} + n1.config = merge_dict(deepcopy(config._sections), n1.config) + n1.overwrite = True + n1.run() + assert not tmpdir.join(n1.name, 'file1.txt').check() diff --git a/nipype/pipeline/engine/tests/test_utils.py b/nipype/pipeline/engine/tests/test_utils.py new file mode 100644 index 0000000000..42f8b2434e --- /dev/null +++ b/nipype/pipeline/engine/tests/test_utils.py @@ -0,0 +1,226 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Tests for the engine utils module +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, open + +import os +import sys +from copy import deepcopy +import pytest + +from ... import engine as pe +from ....interfaces import base as nib +from ....interfaces import utility as niu +from .... import config +from ..utils import clean_working_directory, write_workflow_prov + + +class InputSpec(nib.TraitedSpec): + in_file = nib.File(exists=True, copyfile=True) + + +class OutputSpec(nib.TraitedSpec): + output1 = nib.traits.List(nib.traits.Int, desc='outputs') + + +class UtilsTestInterface(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output1'] = [1] + return outputs + + +def test_identitynode_removal(tmpdir): + def test_function(arg1, arg2, arg3): + import numpy as np + return (np.array(arg1) + arg2 + arg3).tolist() + + wf = pe.Workflow(name="testidentity", base_dir=tmpdir.strpath) + + n1 = pe.Node( + niu.IdentityInterface(fields=['a', 'b']), + name='src', + base_dir=tmpdir.strpath) + n1.iterables = ('b', [0, 1, 2, 3]) + n1.inputs.a = [0, 1, 2, 3] + + n2 = pe.Node(niu.Select(), name='selector', base_dir=tmpdir.strpath) + wf.connect(n1, ('a', test_function, 1, -1), n2, 'inlist') + wf.connect(n1, 'b', n2, 'index') + + n3 = pe.Node( + niu.IdentityInterface(fields=['c', 'd']), + name='passer', + base_dir=tmpdir.strpath) + n3.inputs.c = [1, 2, 3, 4] + wf.connect(n2, 'out', n3, 'd') + + n4 = pe.Node(niu.Select(), name='selector2', base_dir=tmpdir.strpath) + wf.connect(n3, ('c', test_function, 1, -1), n4, 'inlist') + wf.connect(n3, 'd', n4, 'index') + + fg = wf._create_flat_graph() + wf._set_needed_outputs(fg) + eg = pe.generate_expanded_graph(deepcopy(fg)) + assert len(eg.nodes()) == 8 + + +def test_clean_working_directory(tmpdir): + class OutputSpec(nib.TraitedSpec): + files = nib.traits.List(nib.File) + others = nib.File() + + class InputSpec(nib.TraitedSpec): + infile = nib.File() + + outputs = OutputSpec() + inputs = InputSpec() + + filenames = [ + 'file.hdr', 'file.img', 'file.BRIK', 'file.HEAD', '_0x1234.json', + 'foo.txt' + ] + outfiles = [] + for filename in filenames: + outfile = tmpdir.join(filename) + outfile.write('dummy') + outfiles.append(outfile.strpath) + outputs.files = outfiles[:4:2] + outputs.others = outfiles[5] + inputs.infile = outfiles[-1] + needed_outputs = ['files'] + config.set_default_config() + assert os.path.exists(outfiles[5]) + config.set_default_config() + config.set('execution', 'remove_unnecessary_outputs', False) + out = clean_working_directory(outputs, tmpdir.strpath, inputs, + needed_outputs, deepcopy(config._sections)) + assert os.path.exists(outfiles[5]) + assert out.others == outfiles[5] + config.set('execution', 'remove_unnecessary_outputs', True) + out = clean_working_directory(outputs, tmpdir.strpath, inputs, + needed_outputs, deepcopy(config._sections)) + assert os.path.exists(outfiles[1]) + assert os.path.exists(outfiles[3]) + assert os.path.exists(outfiles[4]) + assert not os.path.exists(outfiles[5]) + assert out.others == nib.Undefined + assert len(out.files) == 2 + config.set_default_config() + + +def create_wf(name): + """Creates a workflow for the following tests""" + def fwhm(fwhm): + return fwhm + + pipe = pe.Workflow(name=name) + process = pe.Node( + niu.Function( + input_names=['fwhm'], output_names=['fwhm'], function=fwhm), + name='proc') + process.iterables = ('fwhm', [0]) + process2 = pe.Node( + niu.Function( + input_names=['fwhm'], output_names=['fwhm'], function=fwhm), + name='proc2') + process2.iterables = ('fwhm', [0]) + pipe.connect(process, 'fwhm', process2, 'fwhm') + return pipe + + +def test_multi_disconnected_iterable(tmpdir): + metawf = pe.Workflow(name='meta') + metawf.base_dir = tmpdir.strpath + metawf.add_nodes([create_wf('wf%d' % i) for i in range(30)]) + eg = metawf.run(plugin='Linear') + assert len(eg.nodes()) == 60 + + +def test_provenance(tmpdir): + metawf = pe.Workflow(name='meta') + metawf.base_dir = tmpdir.strpath + metawf.add_nodes([create_wf('wf%d' % i) for i in range(1)]) + eg = metawf.run(plugin='Linear') + prov_base = tmpdir.join('workflow_provenance_test').strpath + psg = write_workflow_prov(eg, prov_base, format='all') + assert len(psg.bundles) == 2 + assert len(psg.get_records()) == 7 + + +def dummy_func(value): + return value + 1 + + +@pytest.mark.skipif( + sys.version_info < (3, 0), reason="the famous segfault #1788") +def test_mapnode_crash(tmpdir): + """Test mapnode crash when stop_on_first_crash is True""" + cwd = os.getcwd() + node = pe.MapNode( + niu.Function( + input_names=['WRONG'], + output_names=['newstring'], + function=dummy_func), + iterfield=['WRONG'], + name='myfunc') + node.inputs.WRONG = ['string{}'.format(i) for i in range(3)] + node.config = deepcopy(config._sections) + node.config['execution']['stop_on_first_crash'] = True + node.base_dir = tmpdir.strpath + with pytest.raises(TypeError): + node.run() + os.chdir(cwd) + + +@pytest.mark.skipif( + sys.version_info < (3, 0), reason="the famous segfault #1788") +def test_mapnode_crash2(tmpdir): + """Test mapnode crash when stop_on_first_crash is False""" + cwd = os.getcwd() + node = pe.MapNode( + niu.Function( + input_names=['WRONG'], + output_names=['newstring'], + function=dummy_func), + iterfield=['WRONG'], + name='myfunc') + node.inputs.WRONG = ['string{}'.format(i) for i in range(3)] + node.base_dir = tmpdir.strpath + + with pytest.raises(Exception): + node.run() + os.chdir(cwd) + + +@pytest.mark.skipif( + sys.version_info < (3, 0), reason="the famous segfault #1788") +def test_mapnode_crash3(tmpdir): + """Test mapnode crash when mapnode is embedded in a workflow""" + tmpdir.chdir() + node = pe.MapNode( + niu.Function( + input_names=['WRONG'], + output_names=['newstring'], + function=dummy_func), + iterfield=['WRONG'], + name='myfunc') + node.inputs.WRONG = ['string{}'.format(i) for i in range(3)] + wf = pe.Workflow('testmapnodecrash') + wf.add_nodes([node]) + wf.base_dir = tmpdir.strpath + # changing crashdump dir to cwl (to avoid problems with read-only systems) + wf.config["execution"]["crashdump_dir"] = os.getcwd() + with pytest.raises(RuntimeError): + wf.run(plugin='Linear') diff --git a/nipype/pipeline/engine/tests/test_workflows.py b/nipype/pipeline/engine/tests/test_workflows.py new file mode 100644 index 0000000000..0cc7f2142f --- /dev/null +++ b/nipype/pipeline/engine/tests/test_workflows.py @@ -0,0 +1,275 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Tests for the engine workflows module +""" +from glob import glob +import os +from shutil import rmtree +from itertools import product +import pytest +import networkx as nx + +from .... import config +from ....interfaces import utility as niu +from ... import engine as pe +from .test_base import EngineTestInterface +from .test_utils import UtilsTestInterface + + +def test_init(): + with pytest.raises(TypeError): + pe.Workflow() + pipe = pe.Workflow(name='pipe') + assert type(pipe._graph) == nx.DiGraph + + +def test_connect(): + pipe = pe.Workflow(name='pipe') + mod2 = pe.Node(EngineTestInterface(), name='mod2') + mod1 = pe.Node(EngineTestInterface(), name='mod1') + pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + + assert mod1 in pipe._graph.nodes() + assert mod2 in pipe._graph.nodes() + assert pipe._graph.get_edge_data(mod1, mod2) == { + 'connect': [('output1', 'input1')] + } + + +def test_add_nodes(): + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(EngineTestInterface(), name='mod1') + mod2 = pe.Node(EngineTestInterface(), name='mod2') + pipe.add_nodes([mod1, mod2]) + + assert mod1 in pipe._graph.nodes() + assert mod2 in pipe._graph.nodes() + + +def test_disconnect(): + a = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='a') + b = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='b') + flow1 = pe.Workflow(name='test') + flow1.connect(a, 'a', b, 'a') + flow1.disconnect(a, 'a', b, 'a') + assert list(flow1._graph.edges()) == [] + + +def test_workflow_add(): + n1 = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='n1') + n2 = pe.Node(niu.IdentityInterface(fields=['c', 'd']), name='n2') + n3 = pe.Node(niu.IdentityInterface(fields=['c', 'd']), name='n1') + w1 = pe.Workflow(name='test') + w1.connect(n1, 'a', n2, 'c') + for node in [n1, n2, n3]: + with pytest.raises(IOError): + w1.add_nodes([node]) + with pytest.raises(IOError): + w1.connect([(w1, n2, [('n1.a', 'd')])]) + + +def test_doubleconnect(): + a = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='a') + b = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='b') + flow1 = pe.Workflow(name='test') + flow1.connect(a, 'a', b, 'a') + with pytest.raises(Exception) as excinfo: + flow1.connect(a, 'b', b, 'a') + assert "Trying to connect" in str(excinfo.value) + + c = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='c') + flow1 = pe.Workflow(name='test2') + with pytest.raises(Exception) as excinfo: + flow1.connect([(a, c, [('b', 'b')]), (b, c, [('a', 'b')])]) + assert "Trying to connect" in str(excinfo.value) + + +def test_duplicate_node_check(): + + wf = pe.Workflow(name="testidentity") + + original_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + selector1 = pe.Node(niu.Select(), name="selector1") + selector1.inputs.index = original_list[:-1] + selector1.inputs.inlist = original_list + selector2 = pe.Node(niu.Select(), name="selector2") + selector2.inputs.index = original_list[:-2] + selector3 = pe.Node(niu.Select(), name="selector3") + selector3.inputs.index = original_list[:-3] + selector4 = pe.Node(niu.Select(), name="selector3") + selector4.inputs.index = original_list[:-4] + + wf_connections = [ + (selector1, selector2, [("out", "inlist")]), + (selector2, selector3, [("out", "inlist")]), + (selector3, selector4, [("out", "inlist")]), + ] + + with pytest.raises(IOError) as excinfo: + wf.connect(wf_connections) + assert 'Duplicate node name "selector3" found.' == str(excinfo.value) + + +def _test_function(arg1): + import os + file1 = os.path.join(os.getcwd(), 'file1.txt') + file2 = os.path.join(os.getcwd(), 'file2.txt') + file3 = os.path.join(os.getcwd(), 'file3.txt') + file4 = os.path.join(os.getcwd(), 'subdir', 'file4.txt') + os.mkdir("subdir") + for filename in [file1, file2, file3, file4]: + with open(filename, 'wt') as fp: + fp.write('%d' % arg1) + return file1, file2, os.path.join(os.getcwd(), "subdir") + + +def _test_function2(in_file, arg): + import os + with open(in_file, 'rt') as fp: + in_arg = fp.read() + + file1 = os.path.join(os.getcwd(), 'file1.txt') + file2 = os.path.join(os.getcwd(), 'file2.txt') + file3 = os.path.join(os.getcwd(), 'file3.txt') + files = [file1, file2, file3] + for filename in files: + with open(filename, 'wt') as fp: + fp.write('%d' % arg + in_arg) + return file1, file2, 1 + + +def _test_function3(arg): + return arg + + +@pytest.mark.parametrize( + 'plugin, remove_unnecessary_outputs, keep_inputs', + list(product(['Linear', 'MultiProc'], [False, True], [True, False]))) +def test_outputs_removal_wf(tmpdir, plugin, remove_unnecessary_outputs, + keep_inputs): + config.set_default_config() + config.set('execution', 'remove_unnecessary_outputs', + remove_unnecessary_outputs) + config.set('execution', 'keep_inputs', keep_inputs) + + n1 = pe.Node( + niu.Function( + output_names=['out_file1', 'out_file2', 'dir'], + function=_test_function), + name='n1', + base_dir=tmpdir.strpath) + n1.inputs.arg1 = 1 + + n2 = pe.Node( + niu.Function( + output_names=['out_file1', 'out_file2', 'n'], + function=_test_function2), + name='n2', + base_dir=tmpdir.strpath) + n2.inputs.arg = 2 + + n3 = pe.Node( + niu.Function( + output_names=['n'], + function=_test_function3), + name='n3', + base_dir=tmpdir.strpath) + + wf = pe.Workflow( + name="node_rem_test" + plugin, base_dir=tmpdir.strpath) + + wf.connect(n1, "out_file1", n2, "in_file") + wf.run(plugin=plugin) + + # Necessary outputs HAVE to exist + assert os.path.exists( + os.path.join(wf.base_dir, wf.name, n1.name, 'file1.txt')) + assert os.path.exists( + os.path.join(wf.base_dir, wf.name, n2.name, 'file1.txt')) + assert os.path.exists( + os.path.join(wf.base_dir, wf.name, n2.name, 'file2.txt')) + + # Unnecessary outputs exist only iff remove_unnecessary_outputs is True + assert os.path.exists( + os.path.join(wf.base_dir, wf.name, n1.name, + 'file2.txt')) is not remove_unnecessary_outputs + assert os.path.exists( + os.path.join(wf.base_dir, wf.name, n1.name, "subdir", + 'file4.txt')) is not remove_unnecessary_outputs + assert os.path.exists( + os.path.join(wf.base_dir, wf.name, n1.name, + 'file3.txt')) is not remove_unnecessary_outputs + assert os.path.exists( + os.path.join(wf.base_dir, wf.name, n2.name, + 'file3.txt')) is not remove_unnecessary_outputs + + n4 = pe.Node(UtilsTestInterface(), name='n4', base_dir=tmpdir.strpath) + wf.connect(n2, "out_file1", n4, "in_file") + + def pick_first(l): + return l[0] + + wf.connect(n4, ("output1", pick_first), n3, "arg") + rmtree(os.path.join(wf.base_dir, wf.name)) + wf.run(plugin=plugin) + + # Test necessary outputs + assert os.path.exists( + os.path.join(wf.base_dir, wf.name, n2.name, 'file1.txt')) + assert os.path.exists( + os.path.join(wf.base_dir, wf.name, n2.name, 'file1.txt')) + + # Test unnecessary outputs + assert os.path.exists( + os.path.join(wf.base_dir, wf.name, n2.name, + 'file2.txt')) is not remove_unnecessary_outputs + + # Test keep_inputs + assert os.path.exists( + os.path.join(wf.base_dir, wf.name, n4.name, + 'file1.txt')) is keep_inputs + + +def _test_function4(): + raise FileNotFoundError('Generic error') + + +def test_config_setting(tmpdir): + tmpdir.chdir() + wf = pe.Workflow('config') + wf.base_dir = os.getcwd() + + crashdir = os.path.join(os.getcwd(), 'crashdir') + os.mkdir(crashdir) + wf.config = {"execution": {"crashdump_dir": crashdir}} + + n1 = pe.Node(niu.Function(function=_test_function4), + name='errorfunc') + wf.add_nodes([n1]) + try: + wf.run() + except RuntimeError: + pass + + fl = glob(os.path.join(crashdir, 'crash*')) + assert len(fl) == 1 + + # Now test node overwrite + crashdir2 = os.path.join(os.getcwd(), 'crashdir2') + os.mkdir(crashdir2) + crashdir3 = os.path.join(os.getcwd(), 'crashdir3') + os.mkdir(crashdir3) + wf.config = {"execution": {"crashdump_dir": crashdir3}} + n1.config = {"execution": {"crashdump_dir": crashdir2}} + + try: + wf.run() + except RuntimeError: + pass + + fl = glob(os.path.join(crashdir2, 'crash*')) + assert len(fl) == 1 + fl = glob(os.path.join(crashdir3, 'crash*')) + assert len(fl) == 0 diff --git a/nipype/pipeline/engine/utils.py b/nipype/pipeline/engine/utils.py new file mode 100644 index 0000000000..4ec36afe68 --- /dev/null +++ b/nipype/pipeline/engine/utils.py @@ -0,0 +1,1674 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Utility routines for workflow graphs""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, open, next, zip, range + +import os +import sys +import pickle +from collections import defaultdict +import re +from copy import deepcopy +from glob import glob + +from traceback import format_exception +from hashlib import sha1 +import gzip + +from functools import reduce + +import numpy as np +import networkx as nx +from future import standard_library + +from ... import logging, config, LooseVersion +from ...utils.filemanip import ( + relpath, + makedirs, + fname_presuffix, + to_str, + ensure_list, + get_related_files, + FileNotFoundError, + save_json, + savepkl, + write_rst_header, + write_rst_dict, + write_rst_list, +) +from ...utils.misc import str2bool +from ...utils.functions import create_function_from_source +from ...interfaces.base import (Bunch, CommandLine, isdefined, Undefined, + InterfaceResult, traits) +from ...interfaces.utility import IdentityInterface +from ...utils.provenance import ProvStore, pm, nipype_ns, get_id + +try: + from inspect import signature +except ImportError: + from funcsigs import signature + +standard_library.install_aliases() +logger = logging.getLogger('nipype.workflow') +PY3 = sys.version_info[0] > 2 + +try: + dfs_preorder = nx.dfs_preorder +except AttributeError: + dfs_preorder = nx.dfs_preorder_nodes + logger.debug('networkx 1.4 dev or higher detected') + + +def _parameterization_dir(param): + """ + Returns the directory name for the given parameterization string as follows: + - If the parameterization is longer than 32 characters, then + return the SHA-1 hex digest. + - Otherwise, return the parameterization unchanged. + """ + if len(param) > 32: + return sha1(param.encode()).hexdigest() + return param + + +def save_hashfile(hashfile, hashed_inputs): + """Store a hashfile""" + try: + save_json(hashfile, hashed_inputs) + except (IOError, TypeError): + err_type = sys.exc_info()[0] + if err_type is TypeError: + # XXX - SG current workaround is to just + # create the hashed file and not put anything + # in it + with open(hashfile, 'wt') as fd: + fd.writelines(str(hashed_inputs)) + + logger.debug('Unable to write a particular type to the json file') + else: + logger.critical('Unable to open the file in write mode: %s', + hashfile) + + +def nodelist_runner(nodes, updatehash=False, stop_first=False): + """ + A generator that iterates and over a list of ``nodes`` and + executes them. + + """ + for i, node in nodes: + err = None + result = None + try: + result = node.run(updatehash=updatehash) + except Exception: + if stop_first: + raise + + result = node.result + err = [] + if result.runtime and hasattr(result.runtime, 'traceback'): + err = [result.runtime.traceback] + + err += format_exception(*sys.exc_info()) + err = '\n'.join(err) + finally: + yield i, result, err + + +def write_report(node, report_type=None, is_mapnode=False): + """Write a report file for a node""" + if not str2bool(node.config['execution']['create_report']): + return + + if report_type not in ['preexec', 'postexec']: + logger.warning('[Node] Unknown report type "%s".', report_type) + return + + cwd = node.output_dir() + report_dir = os.path.join(cwd, '_report') + report_file = os.path.join(report_dir, 'report.rst') + makedirs(report_dir, exist_ok=True) + + logger.debug('[Node] Writing %s-exec report to "%s"', report_type[:-4], + report_file) + if report_type.startswith('pre'): + lines = [ + write_rst_header('Node: %s' % get_print_name(node), level=0), + write_rst_list( + ['Hierarchy : %s' % node.fullname, + 'Exec ID : %s' % node._id]), + write_rst_header('Original Inputs', level=1), + write_rst_dict(node.inputs.trait_get()), + ] + with open(report_file, 'wt') as fp: + fp.write('\n'.join(lines)) + return + + lines = [ + write_rst_header('Execution Inputs', level=1), + write_rst_dict(node.inputs.trait_get()), + ] + + result = node.result # Locally cache result + outputs = result.outputs + + if outputs is None: + with open(report_file, 'at') as fp: + fp.write('\n'.join(lines)) + return + + lines.append(write_rst_header('Execution Outputs', level=1)) + + if isinstance(outputs, Bunch): + lines.append(write_rst_dict(outputs.dictcopy())) + elif outputs: + lines.append(write_rst_dict(outputs.trait_get())) + + if is_mapnode: + lines.append(write_rst_header('Subnode reports', level=1)) + nitems = len(ensure_list(getattr(node.inputs, node.iterfield[0]))) + subnode_report_files = [] + for i in range(nitems): + nodecwd = os.path.join(cwd, 'mapflow', '_%s%d' % (node.name, i), + '_report', 'report.rst') + subnode_report_files.append('subnode %d : %s' % (i, nodecwd)) + + lines.append(write_rst_list(subnode_report_files)) + + with open(report_file, 'at') as fp: + fp.write('\n'.join(lines)) + return + + lines.append(write_rst_header('Runtime info', level=1)) + # Init rst dictionary of runtime stats + rst_dict = { + 'hostname': result.runtime.hostname, + 'duration': result.runtime.duration, + 'working_dir': result.runtime.cwd, + 'prev_wd': getattr(result.runtime, 'prevcwd', ''), + } + + if hasattr(result.runtime, 'cmdline'): + rst_dict['command'] = result.runtime.cmdline + + # Try and insert memory/threads usage if available + if hasattr(result.runtime, 'mem_peak_gb'): + rst_dict['mem_peak_gb'] = result.runtime.mem_peak_gb + + if hasattr(result.runtime, 'cpu_percent'): + rst_dict['cpu_percent'] = result.runtime.cpu_percent + + lines.append(write_rst_dict(rst_dict)) + + # Collect terminal output + if hasattr(result.runtime, 'merged'): + lines += [ + write_rst_header('Terminal output', level=2), + write_rst_list(result.runtime.merged), + ] + if hasattr(result.runtime, 'stdout'): + lines += [ + write_rst_header('Terminal - standard output', level=2), + write_rst_list(result.runtime.stdout), + ] + if hasattr(result.runtime, 'stderr'): + lines += [ + write_rst_header('Terminal - standard error', level=2), + write_rst_list(result.runtime.stderr), + ] + + # Store environment + if hasattr(result.runtime, 'environ'): + lines += [ + write_rst_header('Environment', level=2), + write_rst_dict(result.runtime.environ), + ] + + with open(report_file, 'at') as fp: + fp.write('\n'.join(lines)) + return + + +def save_resultfile(result, cwd, name): + """Save a result pklz file to ``cwd``""" + resultsfile = os.path.join(cwd, 'result_%s.pklz' % name) + if result.outputs: + try: + outputs = result.outputs.trait_get() + except AttributeError: + outputs = result.outputs.dictcopy() # outputs was a bunch + result.outputs.set(**modify_paths(outputs, relative=True, basedir=cwd)) + + savepkl(resultsfile, result) + logger.debug('saved results in %s', resultsfile) + + if result.outputs: + result.outputs.set(**outputs) + + +def load_resultfile(path, name): + """ + Load InterfaceResult file from path + + Parameter + --------- + + path : base_dir of node + name : name of node + + Returns + ------- + + result : InterfaceResult structure + aggregate : boolean indicating whether node should aggregate_outputs + attribute error : boolean indicating whether there was some mismatch in + versions of traits used to store result and hence node needs to + rerun + """ + aggregate = True + resultsoutputfile = os.path.join(path, 'result_%s.pklz' % name) + result = None + attribute_error = False + if os.path.exists(resultsoutputfile): + pkl_file = gzip.open(resultsoutputfile, 'rb') + try: + result = pickle.load(pkl_file) + except UnicodeDecodeError: + # Was this pickle created with Python 2.x? + pickle.load(pkl_file, fix_imports=True, encoding='utf-8') + logger.warning('Successfully loaded pkl in compatibility mode') + except (traits.TraitError, AttributeError, ImportError, + EOFError) as err: + if isinstance(err, (AttributeError, ImportError)): + attribute_error = True + logger.debug('attribute error: %s probably using ' + 'different trait pickled file', str(err)) + else: + logger.debug( + 'some file does not exist. hence trait cannot be set') + else: + if result.outputs: + try: + outputs = result.outputs.trait_get() + except AttributeError: + outputs = result.outputs.dictcopy() # outputs == Bunch + try: + result.outputs.set( + **modify_paths(outputs, relative=False, basedir=path)) + except FileNotFoundError: + logger.debug('conversion to full path results in ' + 'non existent file') + aggregate = False + pkl_file.close() + logger.debug('Aggregate: %s', aggregate) + return result, aggregate, attribute_error + + +def strip_temp(files, wd): + """Remove temp from a list of file paths""" + out = [] + for f in files: + if isinstance(f, list): + out.append(strip_temp(f, wd)) + else: + out.append(f.replace(os.path.join(wd, '_tempinput'), wd)) + return out + + +def _write_inputs(node): + lines = [] + nodename = node.fullname.replace('.', '_') + for key, _ in list(node.inputs.items()): + val = getattr(node.inputs, key) + if isdefined(val): + if isinstance(val, (str, bytes)): + try: + func = create_function_from_source(val) + except RuntimeError: + lines.append("%s.inputs.%s = '%s'" % (nodename, key, val)) + else: + funcname = [ + name for name in func.__globals__ + if name != '__builtins__' + ][0] + lines.append(pickle.loads(val)) + if funcname == nodename: + lines[-1] = lines[-1].replace(' %s(' % funcname, + ' %s_1(' % funcname) + funcname = '%s_1' % funcname + lines.append( + 'from nipype.utils.functions import getsource') + lines.append("%s.inputs.%s = getsource(%s)" % + (nodename, key, funcname)) + else: + lines.append('%s.inputs.%s = %s' % (nodename, key, val)) + return lines + + +def format_node(node, format='python', include_config=False): + """Format a node in a given output syntax.""" + from .nodes import MapNode + lines = [] + name = node.fullname.replace('.', '_') + if format == 'python': + klass = node.interface + importline = 'from %s import %s' % (klass.__module__, + klass.__class__.__name__) + comment = '# Node: %s' % node.fullname + spec = signature(node.interface.__init__) + args = [p.name for p in list(spec.parameters.values())] + args = args[1:] + if args: + filled_args = [] + for arg in args: + if hasattr(node.interface, '_%s' % arg): + filled_args.append('%s=%s' % + (arg, + getattr(node.interface, '_%s' % arg))) + args = ', '.join(filled_args) + else: + args = '' + klass_name = klass.__class__.__name__ + if isinstance(node, MapNode): + nodedef = '%s = MapNode(%s(%s), iterfield=%s, name="%s")' \ + % (name, klass_name, args, node.iterfield, name) + else: + nodedef = '%s = Node(%s(%s), name="%s")' \ + % (name, klass_name, args, name) + lines = [importline, comment, nodedef] + + if include_config: + lines = [ + importline, "from future import standard_library", + "standard_library.install_aliases()", + "from collections import OrderedDict", comment, nodedef + ] + lines.append('%s.config = %s' % (name, node.config)) + + if node.iterables is not None: + lines.append('%s.iterables = %s' % (name, node.iterables)) + lines.extend(_write_inputs(node)) + + return lines + + +def modify_paths(object, relative=True, basedir=None): + """Convert paths in data structure to either full paths or relative paths + + Supports combinations of lists, dicts, tuples, strs + + Parameters + ---------- + + relative : boolean indicating whether paths should be set relative to the + current directory + basedir : default os.getcwd() + what base directory to use as default + """ + if not basedir: + basedir = os.getcwd() + if isinstance(object, dict): + out = {} + for key, val in sorted(object.items()): + if isdefined(val): + out[key] = modify_paths( + val, relative=relative, basedir=basedir) + elif isinstance(object, (list, tuple)): + out = [] + for val in object: + if isdefined(val): + out.append( + modify_paths(val, relative=relative, basedir=basedir)) + if isinstance(object, tuple): + out = tuple(out) + else: + if isdefined(object): + if isinstance(object, (str, bytes)) and os.path.isfile(object): + if relative: + if config.getboolean('execution', 'use_relative_paths'): + out = relpath(object, start=basedir) + else: + out = object + else: + out = os.path.abspath(os.path.join(basedir, object)) + if not os.path.exists(out): + raise IOError('File %s not found' % out) + else: + out = object + else: + raise TypeError("Object {} is undefined".format(object)) + return out + + +def get_print_name(node, simple_form=True): + """Get the name of the node + + For example, a node containing an instance of interfaces.fsl.BET + would be called nodename.BET.fsl + + """ + name = node.fullname + if hasattr(node, '_interface'): + pkglist = node.interface.__class__.__module__.split('.') + interface = node.interface.__class__.__name__ + destclass = '' + if len(pkglist) > 2: + destclass = '.%s' % pkglist[2] + if simple_form: + name = node.fullname + destclass + else: + name = '.'.join([node.fullname, interface]) + destclass + if simple_form: + parts = name.split('.') + if len(parts) > 2: + return ' ('.join(parts[1:]) + ')' + elif len(parts) == 2: + return parts[1] + return name + + +def _create_dot_graph(graph, show_connectinfo=False, simple_form=True): + """Create a graph that can be pickled. + + Ensures that edge info is pickleable. + """ + logger.debug('creating dot graph') + pklgraph = nx.DiGraph() + for edge in graph.edges(): + data = graph.get_edge_data(*edge) + srcname = get_print_name(edge[0], simple_form=simple_form) + destname = get_print_name(edge[1], simple_form=simple_form) + if show_connectinfo: + pklgraph.add_edge(srcname, destname, l=str(data['connect'])) + else: + pklgraph.add_edge(srcname, destname) + return pklgraph + + +def _write_detailed_dot(graph, dotfilename): + r""" + Create a dot file with connection info :: + + digraph structs { + node [shape=record]; + struct1 [label=" left| middle| right"]; + struct2 [label=" one| two"]; + struct3 [label="hello\nworld |{ b |{c| d|e}| f}| g | h"]; + struct1:f1 -> struct2:f0; + struct1:f0 -> struct2:f1; + struct1:f2 -> struct3:here; + } + """ + text = ['digraph structs {', 'node [shape=record];'] + # write nodes + edges = [] + for n in nx.topological_sort(graph): + nodename = str(n) + inports = [] + for u, v, d in graph.in_edges(nbunch=n, data=True): + for cd in d['connect']: + if isinstance(cd[0], (str, bytes)): + outport = cd[0] + else: + outport = cd[0][0] + inport = cd[1] + ipstrip = 'in%s' % _replacefunk(inport) + opstrip = 'out%s' % _replacefunk(outport) + edges.append( + '%s:%s:e -> %s:%s:w;' % (str(u).replace('.', ''), opstrip, + str(v).replace('.', ''), ipstrip)) + if inport not in inports: + inports.append(inport) + inputstr = ['{IN'] + [ + '| %s' % (_replacefunk(ip), ip) for ip in sorted(inports) + ] + ['}'] + outports = [] + for u, v, d in graph.out_edges(nbunch=n, data=True): + for cd in d['connect']: + if isinstance(cd[0], (str, bytes)): + outport = cd[0] + else: + outport = cd[0][0] + if outport not in outports: + outports.append(outport) + outputstr = ['{OUT'] + [ + '| %s' % (_replacefunk(oport), oport) + for oport in sorted(outports) + ] + ['}'] + srcpackage = '' + if hasattr(n, '_interface'): + pkglist = n.interface.__class__.__module__.split('.') + if len(pkglist) > 2: + srcpackage = pkglist[2] + srchierarchy = '.'.join(nodename.split('.')[1:-1]) + nodenamestr = '{ %s | %s | %s }' % (nodename.split('.')[-1], + srcpackage, srchierarchy) + text += [ + '%s [label="%s|%s|%s"];' % + (nodename.replace('.', ''), ''.join(inputstr), nodenamestr, + ''.join(outputstr)) + ] + # write edges + for edge in sorted(edges): + text.append(edge) + text.append('}') + with open(dotfilename, 'wt') as filep: + filep.write('\n'.join(text)) + return text + + +def _replacefunk(x): + return x.replace('_', '').replace('.', '').replace('@', '').replace( + '-', '') + + +# Graph manipulations for iterable expansion +def _get_valid_pathstr(pathstr): + """Remove disallowed characters from path + + Removes: [][ (){}?:<>#!|"';] + Replaces: ',' -> '.' + """ + if not isinstance(pathstr, (str, bytes)): + pathstr = to_str(pathstr) + pathstr = pathstr.replace(os.sep, '..') + pathstr = re.sub(r'''[][ (){}?:<>#!|"';]''', '', pathstr) + pathstr = pathstr.replace(',', '.') + return pathstr + + +def expand_iterables(iterables, synchronize=False): + if synchronize: + return synchronize_iterables(iterables) + return list(walk(list(iterables.items()))) + + +def count_iterables(iterables, synchronize=False): + """Return the number of iterable expansion nodes. + + If synchronize is True, then the count is the maximum number + of iterables value lists. + Otherwise, the count is the product of the iterables value + list sizes. + """ + op = max if synchronize else lambda x, y: x * y + return reduce(op, [len(func()) for _, func in list(iterables.items())]) + + +def walk(children, level=0, path=None, usename=True): + """Generate all the full paths in a tree, as a dict. + + Examples + -------- + >>> from nipype.pipeline.engine.utils import walk + >>> iterables = [('a', lambda: [1, 2]), ('b', lambda: [3, 4])] + >>> [val['a'] for val in walk(iterables)] + [1, 1, 2, 2] + >>> [val['b'] for val in walk(iterables)] + [3, 4, 3, 4] + """ + # Entry point + if level == 0: + path = {} + # Exit condition + if not children: + yield path.copy() + return + # Tree recursion + head, tail = children[0], children[1:] + name, func = head + for child in func(): + # We can use the arg name or the tree level as a key + if usename: + path[name] = child + else: + path[level] = child + # Recurse into the next level + for child_paths in walk(tail, level + 1, path, usename): + yield child_paths + + +def synchronize_iterables(iterables): + """Synchronize the given iterables in item-wise order. + + Return: the {field: value} dictionary list + + Examples + -------- + >>> from nipype.pipeline.engine.utils import synchronize_iterables + >>> iterables = dict(a=lambda: [1, 2], b=lambda: [3, 4]) + >>> synced = synchronize_iterables(iterables) + >>> synced == [{'a': 1, 'b': 3}, {'a': 2, 'b': 4}] + True + >>> iterables = dict(a=lambda: [1, 2], b=lambda: [3], c=lambda: [4, 5, 6]) + >>> synced = synchronize_iterables(iterables) + >>> synced == [{'a': 1, 'b': 3, 'c': 4}, {'a': 2, 'c': 5}, {'c': 6}] + True + """ + out_list = [] + iterable_items = [(field, iter(fvals())) + for field, fvals in sorted(iterables.items())] + while True: + cur_dict = {} + for field, iter_values in iterable_items: + try: + cur_dict[field] = next(iter_values) + except StopIteration: + pass + if cur_dict: + out_list.append(cur_dict) + else: + break + + return out_list + + +def evaluate_connect_function(function_source, args, first_arg): + func = create_function_from_source(function_source) + try: + output_value = func(first_arg, *list(args)) + except NameError as e: + if e.args[0].startswith("global name") and \ + e.args[0].endswith("is not defined"): + e.args = (e.args[0], + ("Due to engine constraints all imports have to be done " + "inside each function definition")) + raise e + return output_value + + +def get_levels(G): + levels = {} + for n in nx.topological_sort(G): + levels[n] = 0 + for pred in G.predecessors(n): + levels[n] = max(levels[n], levels[pred] + 1) + return levels + + +def _merge_graphs(supergraph, + nodes, + subgraph, + nodeid, + iterables, + prefix, + synchronize=False): + """Merges two graphs that share a subset of nodes. + + If the subgraph needs to be replicated for multiple iterables, the + merge happens with every copy of the subgraph. Assumes that edges + between nodes of supergraph and subgraph contain data. + + Parameters + ---------- + supergraph : networkx graph + Parent graph from which subgraph was selected + nodes : networkx nodes + Nodes of the parent graph from which the subgraph was initially + constructed. + subgraph : networkx graph + A subgraph that contains as a subset nodes from the supergraph. + These nodes connect the subgraph to the supergraph + nodeid : string + Identifier of a node for which parameterization has been sought + iterables : dict of functions + see `pipeline.NodeWrapper` for iterable requirements + + Returns + ------- + Returns a merged graph containing copies of the subgraph with + appropriate edge connections to the supergraph. + + """ + # Retrieve edge information connecting nodes of the subgraph to other + # nodes of the supergraph. + supernodes = supergraph.nodes() + ids = [n._hierarchy + n._id for n in supernodes] + if len(np.unique(ids)) != len(ids): + # This should trap the problem of miswiring when multiple iterables are + # used at the same level. The use of the template below for naming + # updates to nodes is the general solution. + raise Exception(("Execution graph does not have a unique set of node " + "names. Please rerun the workflow")) + edgeinfo = {} + for n in list(subgraph.nodes()): + nidx = ids.index(n._hierarchy + n._id) + for edge in supergraph.in_edges(list(supernodes)[nidx]): + # make sure edge is not part of subgraph + if edge[0] not in subgraph.nodes(): + if n._hierarchy + n._id not in list(edgeinfo.keys()): + edgeinfo[n._hierarchy + n._id] = [] + edgeinfo[n._hierarchy + n._id].append( + (edge[0], supergraph.get_edge_data(*edge))) + supergraph.remove_nodes_from(nodes) + # Add copies of the subgraph depending on the number of iterables + iterable_params = expand_iterables(iterables, synchronize) + # If there are no iterable subgraphs, then return + if not iterable_params: + return supergraph + # Make an iterable subgraph node id template + count = len(iterable_params) + template = '.%s%%0%dd' % (prefix, np.ceil(np.log10(count))) + # Copy the iterable subgraphs + for i, params in enumerate(iterable_params): + Gc = deepcopy(subgraph) + ids = [n._hierarchy + n._id for n in Gc.nodes()] + nodeidx = ids.index(nodeid) + rootnode = list(Gc.nodes())[nodeidx] + paramstr = '' + for key, val in sorted(params.items()): + paramstr = '{}_{}_{}'.format(paramstr, _get_valid_pathstr(key), + _get_valid_pathstr(val)) + rootnode.set_input(key, val) + + logger.debug('Parameterization: paramstr=%s', paramstr) + levels = get_levels(Gc) + for n in Gc.nodes(): + # update parameterization of the node to reflect the location of + # the output directory. For example, if the iterables along a + # path of the directed graph consisted of the variables 'a' and + # 'b', then every node in the path including and after the node + # with iterable 'b' will be placed in a directory + # _a_aval/_b_bval/. + + path_length = levels[n] + # enter as negative numbers so that earlier iterables with longer + # path lengths get precedence in a sort + paramlist = [(-path_length, paramstr)] + if n.parameterization: + n.parameterization = paramlist + n.parameterization + else: + n.parameterization = paramlist + supergraph.add_nodes_from(Gc.nodes()) + supergraph.add_edges_from(Gc.edges(data=True)) + for node in Gc.nodes(): + if node._hierarchy + node._id in list(edgeinfo.keys()): + for info in edgeinfo[node._hierarchy + node._id]: + supergraph.add_edges_from([(info[0], node, info[1])]) + node._id += template % i + return supergraph + + +def _connect_nodes(graph, srcnode, destnode, connection_info): + """Add a connection between two nodes + """ + data = graph.get_edge_data(srcnode, destnode, default=None) + if not data: + data = {'connect': connection_info} + graph.add_edges_from([(srcnode, destnode, data)]) + else: + data['connect'].extend(connection_info) + + +def _remove_nonjoin_identity_nodes(graph, keep_iterables=False): + """Remove non-join identity nodes from the given graph + + Iterable nodes are retained if and only if the keep_iterables + flag is set to True. + """ + # if keep_iterables is False, then include the iterable + # and join nodes in the nodes to delete + for node in _identity_nodes(graph, not keep_iterables): + if not hasattr(node, 'joinsource'): + _remove_identity_node(graph, node) + return graph + + +def _identity_nodes(graph, include_iterables): + """Return the IdentityInterface nodes in the graph + + The nodes are in topological sort order. The iterable nodes + are included if and only if the include_iterables flag is set + to True. + """ + return [ + node for node in nx.topological_sort(graph) + if isinstance(node.interface, IdentityInterface) and ( + include_iterables or getattr(node, 'iterables') is None) + ] + + +def _remove_identity_node(graph, node): + """Remove identity nodes from an execution graph + """ + portinputs, portoutputs = _node_ports(graph, node) + for field, connections in list(portoutputs.items()): + if portinputs: + _propagate_internal_output(graph, node, field, connections, + portinputs) + else: + _propagate_root_output(graph, node, field, connections) + graph.remove_nodes_from([node]) + logger.debug("Removed the identity node %s from the graph.", node) + + +def _node_ports(graph, node): + """Return the given node's input and output ports + + The return value is the (inputs, outputs) dictionaries. + The inputs is a {destination field: (source node, source field)} + dictionary. + The outputs is a {source field: destination items} dictionary, + where each destination item is a + (destination node, destination field, source field) tuple. + """ + portinputs = {} + portoutputs = {} + for u, _, d in graph.in_edges(node, data=True): + for src, dest in d['connect']: + portinputs[dest] = (u, src) + for _, v, d in graph.out_edges(node, data=True): + for src, dest in d['connect']: + if isinstance(src, tuple): + srcport = src[0] + else: + srcport = src + if srcport not in portoutputs: + portoutputs[srcport] = [] + portoutputs[srcport].append((v, dest, src)) + return (portinputs, portoutputs) + + +def _propagate_root_output(graph, node, field, connections): + """Propagates the given graph root node output port + field connections to the out-edge destination nodes.""" + for destnode, inport, src in connections: + value = getattr(node.inputs, field) + if isinstance(src, tuple): + value = evaluate_connect_function(src[1], src[2], value) + destnode.set_input(inport, value) + + +def _propagate_internal_output(graph, node, field, connections, portinputs): + """Propagates the given graph internal node output port + field connections to the out-edge source node and in-edge + destination nodes.""" + for destnode, inport, src in connections: + if field in portinputs: + srcnode, srcport = portinputs[field] + if isinstance(srcport, tuple) and isinstance(src, tuple): + src_func = srcport[1].split("\\n")[0] + dst_func = src[1].split("\\n")[0] + raise ValueError("Does not support two inline functions " + "in series ('{}' and '{}'), found when " + "connecting {} to {}. Please use a Function " + "node.".format(src_func, dst_func, srcnode, + destnode)) + + connect = graph.get_edge_data( + srcnode, destnode, default={ + 'connect': [] + }) + if isinstance(src, tuple): + connect['connect'].append(((srcport, src[1], src[2]), inport)) + else: + connect = {'connect': [(srcport, inport)]} + old_connect = graph.get_edge_data( + srcnode, destnode, default={ + 'connect': [] + }) + old_connect['connect'] += connect['connect'] + graph.add_edges_from([(srcnode, destnode, old_connect)]) + else: + value = getattr(node.inputs, field) + if isinstance(src, tuple): + value = evaluate_connect_function(src[1], src[2], value) + destnode.set_input(inport, value) + + +def generate_expanded_graph(graph_in): + """Generates an expanded graph based on node parameterization + + Parameterization is controlled using the `iterables` field of the + pipeline elements. Thus if there are two nodes with iterables a=[1,2] + and b=[3,4] this procedure will generate a graph with sub-graphs + parameterized as (a=1,b=3), (a=1,b=4), (a=2,b=3) and (a=2,b=4). + """ + logger.debug("PE: expanding iterables") + graph_in = _remove_nonjoin_identity_nodes(graph_in, keep_iterables=True) + # standardize the iterables as {(field, function)} dictionaries + for node in graph_in.nodes(): + if node.iterables: + _standardize_iterables(node) + allprefixes = list('abcdefghijklmnopqrstuvwxyz') + + # the iterable nodes + inodes = _iterable_nodes(graph_in) + logger.debug("Detected iterable nodes %s", inodes) + # while there is an iterable node, expand the iterable node's + # subgraphs + while inodes: + inode = inodes[0] + logger.debug("Expanding the iterable node %s...", inode) + + # the join successor nodes of the current iterable node + jnodes = [ + node for node in graph_in.nodes() + if hasattr(node, 'joinsource') and inode.name == node.joinsource + and nx.has_path(graph_in, inode, node) + ] + + # excise the join in-edges. save the excised edges in a + # {jnode: {source name: (destination name, edge data)}} + # dictionary + jedge_dict = {} + for jnode in jnodes: + in_edges = jedge_dict[jnode] = {} + edges2remove = [] + for src, dest, data in graph_in.in_edges(jnode, True): + in_edges[src.itername] = data + edges2remove.append((src, dest)) + + for src, dest in edges2remove: + graph_in.remove_edge(src, dest) + logger.debug("Excised the %s -> %s join node in-edge.", src, + dest) + + if inode.itersource: + # the itersource is a (node name, fields) tuple + src_name, src_fields = inode.itersource + # convert a single field to a list + if isinstance(src_fields, (str, bytes)): + src_fields = [src_fields] + # find the unique iterable source node in the graph + try: + iter_src = next((node for node in graph_in.nodes() + if node.name == src_name + and nx.has_path(graph_in, node, inode))) + except StopIteration: + raise ValueError("The node %s itersource %s was not found" + " among the iterable predecessor nodes" % + (inode, src_name)) + logger.debug("The node %s has iterable source node %s", inode, + iter_src) + # look up the iterables for this particular itersource descendant + # using the iterable source ancestor values as a key + iterables = {} + # the source node iterables values + src_values = [ + getattr(iter_src.inputs, field) for field in src_fields + ] + # if there is one source field, then the key is the the source value, + # otherwise the key is the tuple of source values + if len(src_values) == 1: + key = src_values[0] + else: + key = tuple(src_values) + # The itersource iterables is a {field: lookup} dictionary, where the + # lookup is a {source key: iteration list} dictionary. Look up the + # current iterable value using the predecessor itersource input values. + iter_dict = dict([(field, lookup[key]) + for field, lookup in inode.iterables + if key in lookup]) + + # convert the iterables to the standard {field: function} format + + def make_field_func(*pair): + return pair[0], lambda: pair[1] + + iterables = dict( + [make_field_func(*pair) for pair in list(iter_dict.items())]) + else: + iterables = inode.iterables.copy() + inode.iterables = None + logger.debug('node: %s iterables: %s', inode, iterables) + + # collect the subnodes to expand + subnodes = [s for s in dfs_preorder(graph_in, inode)] + prior_prefix = [re.findall(r'\.(.)I', s._id) for s in subnodes if s._id] + prior_prefix = sorted([l for item in prior_prefix for l in item]) + if not prior_prefix: + iterable_prefix = 'a' + else: + if prior_prefix[-1] == 'z': + raise ValueError('Too many iterables in the workflow') + iterable_prefix =\ + allprefixes[allprefixes.index(prior_prefix[-1]) + 1] + logger.debug(('subnodes:', subnodes)) + + # append a suffix to the iterable node id + inode._id += '.%sI' % iterable_prefix + + # merge the iterated subgraphs + # dj: the behaviour of .copy changes in version 2 + if LooseVersion(nx.__version__) < LooseVersion('2'): + subgraph = graph_in.subgraph(subnodes) + else: + subgraph = graph_in.subgraph(subnodes).copy() + graph_in = _merge_graphs(graph_in, subnodes, subgraph, + inode._hierarchy + inode._id, iterables, + iterable_prefix, inode.synchronize) + + # reconnect the join nodes + for jnode in jnodes: + # the {node id: edge data} dictionary for edges connecting + # to the join node in the unexpanded graph + old_edge_dict = jedge_dict[jnode] + # the edge source node replicates + expansions = defaultdict(list) + for node in graph_in.nodes(): + for src_id in list(old_edge_dict.keys()): + # Drop the original JoinNodes; only concerned with + # generated Nodes + if hasattr(node, 'joinfield') and node.itername == src_id: + continue + # Patterns: + # - src_id : Non-iterable node + # - src_id.[a-z]\d+ : + # IdentityInterface w/ iterables or nested JoinNode + # - src_id.[a-z]I.[a-z]\d+ : + # Non-IdentityInterface w/ iterables + # - src_idJ\d+ : JoinNode(IdentityInterface) + if re.match(src_id + r'((\.[a-z](I\.[a-z])?|J)\d+)?$', + node.itername): + expansions[src_id].append(node) + for in_id, in_nodes in list(expansions.items()): + logger.debug("The join node %s input %s was expanded" + " to %d nodes.", jnode, in_id, len(in_nodes)) + # preserve the node iteration order by sorting on the node id + for in_nodes in list(expansions.values()): + in_nodes.sort(key=lambda node: node._id) + + # the number of join source replicates. + iter_cnt = count_iterables(iterables, inode.synchronize) + # make new join node fields to connect to each replicated + # join in-edge source node. + slot_dicts = [ + jnode._add_join_item_fields() for _ in range(iter_cnt) + ] + # for each join in-edge, connect every expanded source node + # which matches on the in-edge source name to the destination + # join node. Qualify each edge connect join field name by + # appending the next join slot index, e.g. the connect + # from two expanded nodes from field 'out_file' to join + # field 'in' are qualified as ('out_file', 'in1') and + # ('out_file', 'in2'), resp. This preserves connection port + # integrity. + for old_id, in_nodes in list(expansions.items()): + # reconnect each replication of the current join in-edge + # source + for in_idx, in_node in enumerate(in_nodes): + olddata = old_edge_dict[old_id] + newdata = deepcopy(olddata) + # the (source, destination) field tuples + connects = newdata['connect'] + # the join fields connected to the source + join_fields = [ + field for _, field in connects + if field in jnode.joinfield + ] + # the {field: slot fields} maps assigned to the input + # node, e.g. {'image': 'imageJ3', 'mask': 'maskJ3'} + # for the third join source expansion replicate of a + # join node with join fields image and mask + slots = slot_dicts[in_idx] + for con_idx, connect in enumerate(connects): + src_field, dest_field = connect + # qualify a join destination field name + if dest_field in slots: + slot_field = slots[dest_field] + connects[con_idx] = (src_field, slot_field) + logger.debug( + "Qualified the %s -> %s join field %s as %s.", + in_node, jnode, dest_field, slot_field) + graph_in.add_edge(in_node, jnode, **newdata) + logger.debug("Connected the join node %s subgraph to the" + " expanded join point %s", jnode, in_node) + + # nx.write_dot(graph_in, '%s_post.dot' % node) + # the remaining iterable nodes + inodes = _iterable_nodes(graph_in) + + for node in graph_in.nodes(): + if node.parameterization: + node.parameterization = [ + param for _, param in sorted(node.parameterization) + ] + logger.debug("PE: expanding iterables ... done") + + return _remove_nonjoin_identity_nodes(graph_in) + + +def _iterable_nodes(graph_in): + """Returns the iterable nodes in the given graph and their join + dependencies. + + The nodes are ordered as follows: + + - nodes without an itersource precede nodes with an itersource + - nodes without an itersource are sorted in reverse topological order + - nodes with an itersource are sorted in topological order + + This order implies the following: + + - every iterable node without an itersource is expanded before any + node with an itersource + + - every iterable node without an itersource is expanded before any + of it's predecessor iterable nodes without an itersource + + - every node with an itersource is expanded before any of it's + successor nodes with an itersource + + Return the iterable nodes list + """ + nodes = nx.topological_sort(graph_in) + inodes = [node for node in nodes if node.iterables is not None] + inodes_no_src = [node for node in inodes if not node.itersource] + inodes_src = [node for node in inodes if node.itersource] + inodes_no_src.reverse() + return inodes_no_src + inodes_src + + +def _standardize_iterables(node): + """Converts the given iterables to a {field: function} dictionary, + if necessary, where the function returns a list.""" + if not node.iterables: + return + iterables = node.iterables + # The candidate iterable fields + fields = set(node.inputs.copyable_trait_names()) + # A synchronize iterables node without an itersource can be in + # [fields, value tuples] format rather than + # [(field, value list), (field, value list), ...] + if node.synchronize: + if len(iterables) == 2: + first, last = iterables + if all((isinstance(item, (str, bytes)) and item in fields + for item in first)): + iterables = _transpose_iterables(first, last) + + # Convert a tuple to a list + if isinstance(iterables, tuple): + iterables = [iterables] + # Validate the standard [(field, values)] format + _validate_iterables(node, iterables, fields) + # Convert a list to a dictionary + if isinstance(iterables, list): + # Convert a values list to a function. This is a legacy + # Nipype requirement with unknown rationale. + if not node.itersource: + + def make_field_func(*pair): + return pair[0], lambda: pair[1] + + iter_items = [ + make_field_func(*field_value1) for field_value1 in iterables + ] + iterables = dict(iter_items) + node.iterables = iterables + + +def _validate_iterables(node, iterables, fields): + """ + Raise TypeError if an iterables member is not iterable. + + Raise ValueError if an iterables member is not a (field, values) pair. + + Raise ValueError if an iterable field is not in the inputs. + """ + # The iterables can be a {field: value list} dictionary. + if isinstance(iterables, dict): + iterables = list(iterables.items()) + elif not isinstance(iterables, tuple) and not isinstance(iterables, list): + raise ValueError("The %s iterables type is not a list or a dictionary:" + " %s" % (node.name, iterables.__class__)) + for item in iterables: + try: + if len(item) != 2: + raise ValueError("The %s iterables is not a [(field, values)]" + " list" % node.name) + except TypeError as e: + raise TypeError("A %s iterables member is not iterable: %s" % + (node.name, e)) + field, _ = item + if field not in fields: + raise ValueError("The %s iterables field is unrecognized: %s" % + (node.name, field)) + + +def _transpose_iterables(fields, values): + """ + Converts the given fields and tuple values into a standardized + iterables value. + + If the input values is a synchronize iterables dictionary, then + the result is a (field, {key: values}) list. + + Otherwise, the result is a list of (field: value list) pairs. + """ + if isinstance(values, dict): + transposed = dict([(field, defaultdict(list)) for field in fields]) + for key, tuples in list(values.items()): + for kvals in tuples: + for idx, val in enumerate(kvals): + if val is not None: + transposed[fields[idx]][key].append(val) + return list(transposed.items()) + + return list( + zip(fields, [[v for v in list(transpose) if v is not None] + for transpose in zip(*values)])) + + +def export_graph(graph_in, + base_dir=None, + show=False, + use_execgraph=False, + show_connectinfo=False, + dotfilename='graph.dot', + format='png', + simple_form=True): + """ Displays the graph layout of the pipeline + + This function requires that pygraphviz and matplotlib are available on + the system. + + Parameters + ---------- + + show : boolean + Indicate whether to generate pygraphviz output fromn + networkx. default [False] + + use_execgraph : boolean + Indicates whether to use the specification graph or the + execution graph. default [False] + + show_connectioninfo : boolean + Indicates whether to show the edge data on the graph. This + makes the graph rather cluttered. default [False] + """ + graph = deepcopy(graph_in) + if use_execgraph: + graph = generate_expanded_graph(graph) + logger.debug('using execgraph') + else: + logger.debug('using input graph') + if base_dir is None: + base_dir = os.getcwd() + + makedirs(base_dir, exist_ok=True) + out_dot = fname_presuffix( + dotfilename, suffix='_detailed.dot', use_ext=False, newpath=base_dir) + _write_detailed_dot(graph, out_dot) + + # Convert .dot if format != 'dot' + outfname, res = _run_dot(out_dot, format_ext=format) + if res is not None and res.runtime.returncode: + logger.warning('dot2png: %s', res.runtime.stderr) + + pklgraph = _create_dot_graph(graph, show_connectinfo, simple_form) + simple_dot = fname_presuffix( + dotfilename, suffix='.dot', use_ext=False, newpath=base_dir) + nx.drawing.nx_pydot.write_dot(pklgraph, simple_dot) + + # Convert .dot if format != 'dot' + simplefname, res = _run_dot(simple_dot, format_ext=format) + if res is not None and res.runtime.returncode: + logger.warning('dot2png: %s', res.runtime.stderr) + + if show: + pos = nx.graphviz_layout(pklgraph, prog='dot') + nx.draw(pklgraph, pos) + if show_connectinfo: + nx.draw_networkx_edge_labels(pklgraph, pos) + + return simplefname if simple_form else outfname + + +def format_dot(dotfilename, format='png'): + """Dump a directed graph (Linux only; install via `brew` on OSX)""" + try: + formatted_dot, _ = _run_dot(dotfilename, format_ext=format) + except IOError as ioe: + if "could not be found" in str(ioe): + raise IOError("Cannot draw directed graph; executable 'dot' is unavailable") + else: + raise ioe + return formatted_dot + + +def _run_dot(dotfilename, format_ext): + if format_ext == 'dot': + return dotfilename, None + + dot_base = os.path.splitext(dotfilename)[0] + formatted_dot = '{}.{}'.format(dot_base, format_ext) + cmd = 'dot -T{} -o"{}" "{}"'.format(format_ext, formatted_dot, dotfilename) + res = CommandLine(cmd, terminal_output='allatonce', + resource_monitor=False).run() + return formatted_dot, res + + +def get_all_files(infile): + files = [infile] + if infile.endswith(".img"): + files.append(infile[:-4] + ".hdr") + files.append(infile[:-4] + ".mat") + if infile.endswith(".img.gz"): + files.append(infile[:-7] + ".hdr.gz") + return files + + +def walk_outputs(object): + """Extract every file and directory from a python structure + """ + out = [] + if isinstance(object, dict): + for _, val in sorted(object.items()): + if isdefined(val): + out.extend(walk_outputs(val)) + elif isinstance(object, (list, tuple)): + for val in object: + if isdefined(val): + out.extend(walk_outputs(val)) + else: + if isdefined(object) and isinstance(object, (str, bytes)): + if os.path.islink(object) or os.path.isfile(object): + out = [(filename, 'f') for filename in get_all_files(object)] + elif os.path.isdir(object): + out = [(object, 'd')] + return out + + +def walk_files(cwd): + for path, _, files in os.walk(cwd): + for f in files: + yield os.path.join(path, f) + + +def clean_working_directory(outputs, + cwd, + inputs, + needed_outputs, + config, + files2keep=None, + dirs2keep=None): + """Removes all files not needed for further analysis from the directory + """ + if not outputs: + return + outputs_to_keep = list(outputs.trait_get().keys()) + if needed_outputs and \ + str2bool(config['execution']['remove_unnecessary_outputs']): + outputs_to_keep = needed_outputs + # build a list of needed files + output_files = [] + outputdict = outputs.trait_get() + for output in outputs_to_keep: + output_files.extend(walk_outputs(outputdict[output])) + needed_files = [path for path, type in output_files if type == 'f'] + if str2bool(config['execution']['keep_inputs']): + input_files = [] + inputdict = inputs.trait_get() + input_files.extend(walk_outputs(inputdict)) + needed_files += [path for path, type in input_files if type == 'f'] + for extra in [ + '_0x*.json', 'provenance.*', 'pyscript*.m', 'pyjobs*.mat', + 'command.txt', 'result*.pklz', '_inputs.pklz', '_node.pklz' + ]: + needed_files.extend(glob(os.path.join(cwd, extra))) + if files2keep: + needed_files.extend(ensure_list(files2keep)) + needed_dirs = [path for path, type in output_files if type == 'd'] + if dirs2keep: + needed_dirs.extend(ensure_list(dirs2keep)) + for extra in ['_nipype', '_report']: + needed_dirs.extend(glob(os.path.join(cwd, extra))) + temp = [] + for filename in needed_files: + temp.extend(get_related_files(filename)) + needed_files = temp + logger.debug('Needed files: %s', ';'.join(needed_files)) + logger.debug('Needed dirs: %s', ';'.join(needed_dirs)) + files2remove = [] + if str2bool(config['execution']['remove_unnecessary_outputs']): + for f in walk_files(cwd): + if f not in needed_files: + if not needed_dirs: + files2remove.append(f) + elif not any([f.startswith(dname) for dname in needed_dirs]): + files2remove.append(f) + else: + if not str2bool(config['execution']['keep_inputs']): + input_files = [] + inputdict = inputs.trait_get() + input_files.extend(walk_outputs(inputdict)) + input_files = [path for path, type in input_files if type == 'f'] + for f in walk_files(cwd): + if f in input_files and f not in needed_files: + files2remove.append(f) + logger.debug('Removing files: %s', ';'.join(files2remove)) + for f in files2remove: + os.remove(f) + for key in outputs.copyable_trait_names(): + if key not in outputs_to_keep: + setattr(outputs, key, Undefined) + return outputs + + +def merge_dict(d1, d2, merge=lambda x, y: y): + """ + Merges two dictionaries, non-destructively, combining + values on duplicate keys as defined by the optional merge + function. The default behavior replaces the values in d1 + with corresponding values in d2. (There is no other generally + applicable merge strategy, but often you'll have homogeneous + types in your dicts, so specifying a merge technique can be + valuable.) + + Examples: + + >>> d1 = {'a': 1, 'c': 3, 'b': 2} + >>> d2 = merge_dict(d1, d1) + >>> len(d2) + 3 + >>> [d2[k] for k in ['a', 'b', 'c']] + [1, 2, 3] + + >>> d3 = merge_dict(d1, d1, lambda x,y: x+y) + >>> len(d3) + 3 + >>> [d3[k] for k in ['a', 'b', 'c']] + [2, 4, 6] + + """ + if not isinstance(d1, dict): + return merge(d1, d2) + result = dict(d1) + if d2 is None: + return result + for k, v in list(d2.items()): + if k in result: + result[k] = merge_dict(result[k], v, merge=merge) + else: + result[k] = v + return result + + +def merge_bundles(g1, g2): + for rec in g2.get_records(): + g1._add_record(rec) + return g1 + + +def write_workflow_prov(graph, filename=None, format='all'): + """Write W3C PROV Model JSON file + """ + if not filename: + filename = os.path.join(os.getcwd(), 'workflow_provenance') + + ps = ProvStore() + + processes = [] + nodes = graph.nodes() + for node in nodes: + result = node.result + classname = node.interface.__class__.__name__ + _, hashval, _, _ = node.hash_exists() + attrs = { + pm.PROV["type"]: nipype_ns[classname], + pm.PROV["label"]: '_'.join((classname, node.name)), + nipype_ns['hashval']: hashval + } + process = ps.g.activity(get_id(), None, None, attrs) + if isinstance(result.runtime, list): + process.add_attributes({pm.PROV["type"]: nipype_ns["MapNode"]}) + # add info about sub processes + for idx, runtime in enumerate(result.runtime): + subresult = InterfaceResult( + result.interface[idx], runtime, outputs={}) + if result.inputs: + if idx < len(result.inputs): + subresult.inputs = result.inputs[idx] + if result.outputs: + for key, _ in list(result.outputs.items()): + values = getattr(result.outputs, key) + if isdefined(values) and idx < len(values): + subresult.outputs[key] = values[idx] + sub_doc = ProvStore().add_results(subresult) + sub_bundle = pm.ProvBundle( + sub_doc.get_records(), identifier=get_id()) + ps.g.add_bundle(sub_bundle) + bundle_entity = ps.g.entity( + sub_bundle.identifier, + other_attributes={ + 'prov:type': pm.PROV_BUNDLE + }) + ps.g.wasGeneratedBy(bundle_entity, process) + else: + process.add_attributes({pm.PROV["type"]: nipype_ns["Node"]}) + if result.provenance: + prov_doc = result.provenance + else: + prov_doc = ProvStore().add_results(result) + result_bundle = pm.ProvBundle( + prov_doc.get_records(), identifier=get_id()) + ps.g.add_bundle(result_bundle) + bundle_entity = ps.g.entity( + result_bundle.identifier, + other_attributes={ + 'prov:type': pm.PROV_BUNDLE + }) + ps.g.wasGeneratedBy(bundle_entity, process) + processes.append(process) + + # add dependencies (edges) + # Process->Process + for idx, edgeinfo in enumerate(graph.in_edges()): + ps.g.wasStartedBy( + processes[list(nodes).index(edgeinfo[1])], + starter=processes[list(nodes).index(edgeinfo[0])]) + + # write provenance + ps.write_provenance(filename, format=format) + return ps.g + + +def write_workflow_resources(graph, filename=None, append=None): + """ + Generate a JSON file with profiling traces that can be loaded + in a pandas DataFrame or processed with JavaScript like D3.js + """ + import simplejson as json + + # Overwrite filename if nipype config is set + filename = config.get('monitoring', 'summary_file', filename) + + # If filename still does not make sense, store in $PWD + if not filename: + filename = os.path.join(os.getcwd(), 'resource_monitor.json') + + if append is None: + append = str2bool(config.get('monitoring', 'summary_append', 'true')) + + big_dict = { + 'time': [], + 'name': [], + 'interface': [], + 'rss_GiB': [], + 'vms_GiB': [], + 'cpus': [], + 'mapnode': [], + 'params': [], + } + + # If file exists, just append new profile information + # If we append different runs, then we will see different + # "bursts" of timestamps corresponding to those executions. + if append and os.path.isfile(filename): + with open(filename, 'r' if PY3 else 'rb') as rsf: + big_dict = json.load(rsf) + + for _, node in enumerate(graph.nodes()): + nodename = node.fullname + classname = node.interface.__class__.__name__ + + params = '' + if node.parameterization: + params = '_'.join(['{}'.format(p) for p in node.parameterization]) + + try: + rt_list = node.result.runtime + except Exception: + logger.warning('Could not access runtime info for node %s' + ' (%s interface)', nodename, classname) + continue + + if not isinstance(rt_list, list): + rt_list = [rt_list] + + for subidx, runtime in enumerate(rt_list): + try: + nsamples = len(runtime.prof_dict['time']) + except AttributeError: + logger.warning( + 'Could not retrieve profiling information for node "%s" ' + '(mapflow %d/%d).', nodename, subidx + 1, len(rt_list)) + continue + + for key in ['time', 'cpus', 'rss_GiB', 'vms_GiB']: + big_dict[key] += runtime.prof_dict[key] + + big_dict['interface'] += [classname] * nsamples + big_dict['name'] += [nodename] * nsamples + big_dict['mapnode'] += [subidx] * nsamples + big_dict['params'] += [params] * nsamples + + with open(filename, 'w' if PY3 else 'wb') as rsf: + json.dump(big_dict, rsf, ensure_ascii=False) + + return filename + + +def topological_sort(graph, depth_first=False): + """Returns a depth first sorted order if depth_first is True + """ + nodesort = list(nx.topological_sort(graph)) + if not depth_first: + return nodesort, None + logger.debug("Performing depth first search") + nodes = [] + groups = [] + group = 0 + G = nx.Graph() + G.add_nodes_from(graph.nodes()) + G.add_edges_from(graph.edges()) + components = nx.connected_components(G) + for desc in components: + group += 1 + indices = [] + for node in desc: + indices.append(nodesort.index(node)) + nodes.extend( + np.array(nodesort)[np.array(indices)[np.argsort(indices)]] + .tolist()) + for node in desc: + nodesort.remove(node) + groups.extend([group] * len(desc)) + return nodes, groups diff --git a/nipype/pipeline/engine/workflows.py b/nipype/pipeline/engine/workflows.py new file mode 100644 index 0000000000..d2f040786e --- /dev/null +++ b/nipype/pipeline/engine/workflows.py @@ -0,0 +1,1045 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Defines functionality for pipelined execution of interfaces + +The `Workflow` class provides core functionality for batch processing. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, bytes, open + +import os +import os.path as op +import sys +from datetime import datetime +from copy import deepcopy +import pickle +import shutil + +import numpy as np +import networkx as nx + +from ... import config, logging +from ...utils.misc import str2bool +from ...utils.functions import (getsource, create_function_from_source) + +from ...interfaces.base import (traits, TraitedSpec, TraitDictObject, + TraitListObject) +from ...utils.filemanip import save_json, makedirs, to_str +from .utils import (generate_expanded_graph, export_graph, write_workflow_prov, + write_workflow_resources, format_dot, topological_sort, + get_print_name, merge_dict, format_node) + +from .base import EngineBase +from .nodes import MapNode + +# Py2 compat: http://python-future.org/compatible_idioms.html#collections-counter-and-ordereddict +from future import standard_library +standard_library.install_aliases() + +logger = logging.getLogger('nipype.workflow') + + +class Workflow(EngineBase): + """Controls the setup and execution of a pipeline of processes.""" + + def __init__(self, name, base_dir=None): + """Create a workflow object. + + Parameters + ---------- + name : alphanumeric string + unique identifier for the workflow + base_dir : string, optional + path to workflow storage + + """ + super(Workflow, self).__init__(name, base_dir) + self._graph = nx.DiGraph() + + # PUBLIC API + def clone(self, name): + """Clone a workflow + + .. note:: + + Will reset attributes used for executing workflow. See + _init_runtime_fields. + + Parameters + ---------- + + name: alphanumeric name + unique name for the workflow + + """ + clone = super(Workflow, self).clone(name) + clone._reset_hierarchy() + return clone + + # Graph creation functions + def connect(self, *args, **kwargs): + """Connect nodes in the pipeline. + + This routine also checks if inputs and outputs are actually provided by + the nodes that are being connected. + + Creates edges in the directed graph using the nodes and edges specified + in the `connection_list`. Uses the NetworkX method + DiGraph.add_edges_from. + + Parameters + ---------- + + args : list or a set of four positional arguments + + Four positional arguments of the form:: + + connect(source, sourceoutput, dest, destinput) + + source : nodewrapper node + sourceoutput : string (must be in source.outputs) + dest : nodewrapper node + destinput : string (must be in dest.inputs) + + A list of 3-tuples of the following form:: + + [(source, target, + [('sourceoutput/attribute', 'targetinput'), + ...]), + ...] + + Or:: + + [(source, target, [(('sourceoutput1', func, arg2, ...), + 'targetinput'), ...]), + ...] + sourceoutput1 will always be the first argument to func + and func will be evaluated and the results sent ot targetinput + + currently func needs to define all its needed imports within the + function as we use the inspect module to get at the source code + and execute it remotely + """ + if len(args) == 1: + connection_list = args[0] + elif len(args) == 4: + connection_list = [(args[0], args[2], [(args[1], args[3])])] + else: + raise TypeError('connect() takes either 4 arguments, or 1 list of' + ' connection tuples (%d args given)' % len(args)) + + disconnect = False + if kwargs: + disconnect = kwargs.get('disconnect', False) + + if disconnect: + self.disconnect(connection_list) + return + + newnodes = [] + for srcnode, destnode, _ in connection_list: + if self in [srcnode, destnode]: + msg = ('Workflow connect cannot contain itself as node:' + ' src[%s] dest[%s] workflow[%s]') % (srcnode, destnode, + self.name) + + raise IOError(msg) + if (srcnode not in newnodes) and not self._has_node(srcnode): + newnodes.append(srcnode) + if (destnode not in newnodes) and not self._has_node(destnode): + newnodes.append(destnode) + if newnodes: + self._check_nodes(newnodes) + for node in newnodes: + if node._hierarchy is None: + node._hierarchy = self.name + not_found = [] + connected_ports = {} + for srcnode, destnode, connects in connection_list: + if destnode not in connected_ports: + connected_ports[destnode] = [] + # check to see which ports of destnode are already + # connected. + if not disconnect and (destnode in self._graph.nodes()): + for edge in self._graph.in_edges(destnode): + data = self._graph.get_edge_data(*edge) + for sourceinfo, destname in data['connect']: + if destname not in connected_ports[destnode]: + connected_ports[destnode] += [destname] + for source, dest in connects: + # Currently datasource/sink/grabber.io modules + # determine their inputs/outputs depending on + # connection settings. Skip these modules in the check + if dest in connected_ports[destnode]: + raise Exception("""\ +Trying to connect %s:%s to %s:%s but input '%s' of node '%s' is already +connected. +""" % (srcnode, source, destnode, dest, dest, destnode)) + if not (hasattr(destnode, '_interface') and + ('.io' in str(destnode._interface.__class__) or any([ + '.io' in str(val) + for val in destnode._interface.__class__.__bases__ + ]))): + if not destnode._check_inputs(dest): + not_found.append(['in', destnode.name, dest]) + if not (hasattr(srcnode, '_interface') and + ('.io' in str(srcnode._interface.__class__) or any([ + '.io' in str(val) + for val in srcnode._interface.__class__.__bases__ + ]))): + if isinstance(source, tuple): + # handles the case that source is specified + # with a function + sourcename = source[0] + elif isinstance(source, (str, bytes)): + sourcename = source + else: + raise Exception( + ('Unknown source specification in ' + 'connection from output of %s') % srcnode.name) + if sourcename and not srcnode._check_outputs(sourcename): + not_found.append(['out', srcnode.name, sourcename]) + connected_ports[destnode] += [dest] + infostr = [] + for info in not_found: + infostr += [ + "Module %s has no %sput called %s\n" % (info[1], info[0], + info[2]) + ] + if not_found: + raise Exception( + '\n'.join(['Some connections were not found'] + infostr)) + + # turn functions into strings + for srcnode, destnode, connects in connection_list: + for idx, (src, dest) in enumerate(connects): + if isinstance(src, + tuple) and not isinstance(src[1], (str, bytes)): + function_source = getsource(src[1]) + connects[idx] = ((src[0], function_source, src[2:]), dest) + + # add connections + for srcnode, destnode, connects in connection_list: + edge_data = self._graph.get_edge_data(srcnode, destnode, None) + if edge_data: + logger.debug('(%s, %s): Edge data exists: %s', srcnode, + destnode, to_str(edge_data)) + for data in connects: + if data not in edge_data['connect']: + edge_data['connect'].append(data) + if disconnect: + logger.debug('Removing connection: %s', to_str(data)) + edge_data['connect'].remove(data) + if edge_data['connect']: + self._graph.add_edges_from([(srcnode, destnode, + edge_data)]) + else: + # pass + logger.debug('Removing connection: %s->%s', srcnode, + destnode) + self._graph.remove_edges_from([(srcnode, destnode)]) + elif not disconnect: + logger.debug('(%s, %s): No edge data', srcnode, destnode) + self._graph.add_edges_from([(srcnode, destnode, { + 'connect': connects + })]) + edge_data = self._graph.get_edge_data(srcnode, destnode) + logger.debug('(%s, %s): new edge data: %s', srcnode, destnode, + to_str(edge_data)) + + def disconnect(self, *args): + """Disconnect nodes + See the docstring for connect for format. + """ + if len(args) == 1: + connection_list = args[0] + elif len(args) == 4: + connection_list = [(args[0], args[2], [(args[1], args[3])])] + else: + raise TypeError('disconnect() takes either 4 arguments, or 1 list ' + 'of connection tuples (%d args given)' % len(args)) + + for srcnode, dstnode, conn in connection_list: + logger.debug('disconnect(): %s->%s %s', srcnode, dstnode, + to_str(conn)) + if self in [srcnode, dstnode]: + raise IOError( + 'Workflow connect cannot contain itself as node: src[%s] ' + 'dest[%s] workflow[%s]') % (srcnode, dstnode, self.name) + + # If node is not in the graph, not connected + if not self._has_node(srcnode) or not self._has_node(dstnode): + continue + + edge_data = self._graph.get_edge_data(srcnode, dstnode, { + 'connect': [] + }) + ed_conns = [(c[0], c[1]) for c in edge_data['connect']] + + remove = [] + for edge in conn: + if edge in ed_conns: + # idx = ed_conns.index(edge) + remove.append((edge[0], edge[1])) + + logger.debug('disconnect(): remove list %s', to_str(remove)) + for el in remove: + edge_data['connect'].remove(el) + logger.debug('disconnect(): removed connection %s', to_str(el)) + + if not edge_data['connect']: + self._graph.remove_edge(srcnode, dstnode) + else: + self._graph.add_edges_from([(srcnode, dstnode, edge_data)]) + + def add_nodes(self, nodes): + """ Add nodes to a workflow + + Parameters + ---------- + nodes : list + A list of EngineBase-based objects + """ + newnodes = [] + all_nodes = self._get_all_nodes() + for node in nodes: + if self._has_node(node): + raise IOError('Node %s already exists in the workflow' % node) + if isinstance(node, Workflow): + for subnode in node._get_all_nodes(): + if subnode in all_nodes: + raise IOError(('Subnode %s of node %s already exists ' + 'in the workflow') % (subnode, node)) + newnodes.append(node) + if not newnodes: + logger.debug('no new nodes to add') + return + for node in newnodes: + if not issubclass(node.__class__, EngineBase): + raise Exception('Node %s must be a subclass of EngineBase', + node) + self._check_nodes(newnodes) + for node in newnodes: + if node._hierarchy is None: + node._hierarchy = self.name + self._graph.add_nodes_from(newnodes) + + def remove_nodes(self, nodes): + """ Remove nodes from a workflow + + Parameters + ---------- + nodes : list + A list of EngineBase-based objects + """ + self._graph.remove_nodes_from(nodes) + + # Input-Output access + @property + def inputs(self): + return self._get_inputs() + + @property + def outputs(self): + return self._get_outputs() + + def get_node(self, name): + """Return an internal node by name + """ + nodenames = name.split('.') + nodename = nodenames[0] + outnode = [ + node for node in self._graph.nodes() + if str(node).endswith('.' + nodename) + ] + if outnode: + outnode = outnode[0] + if nodenames[1:] and issubclass(outnode.__class__, Workflow): + outnode = outnode.get_node('.'.join(nodenames[1:])) + else: + outnode = None + return outnode + + def list_node_names(self): + """List names of all nodes in a workflow + """ + outlist = [] + for node in nx.topological_sort(self._graph): + if isinstance(node, Workflow): + outlist.extend([ + '.'.join((node.name, nodename)) + for nodename in node.list_node_names() + ]) + else: + outlist.append(node.name) + return sorted(outlist) + + def write_graph(self, + dotfilename='graph.dot', + graph2use='hierarchical', + format="png", + simple_form=True): + """Generates a graphviz dot file and a png file + + Parameters + ---------- + + graph2use: 'orig', 'hierarchical' (default), 'flat', 'exec', 'colored' + orig - creates a top level graph without expanding internal + workflow nodes; + flat - expands workflow nodes recursively; + hierarchical - expands workflow nodes recursively with a + notion on hierarchy; + colored - expands workflow nodes recursively with a + notion on hierarchy in color; + exec - expands workflows to depict iterables + + format: 'png', 'svg' + + simple_form: boolean (default: True) + Determines if the node name used in the graph should be of the form + 'nodename (package)' when True or 'nodename.Class.package' when + False. + + """ + graphtypes = ['orig', 'flat', 'hierarchical', 'exec', 'colored'] + if graph2use not in graphtypes: + raise ValueError('Unknown graph2use keyword. Must be one of: ' + + str(graphtypes)) + base_dir, dotfilename = op.split(dotfilename) + if base_dir == '': + if self.base_dir: + base_dir = self.base_dir + if self.name: + base_dir = op.join(base_dir, self.name) + else: + base_dir = os.getcwd() + base_dir = makedirs(base_dir, exist_ok=True) + if graph2use in ['hierarchical', 'colored']: + if self.name[:1].isdigit(): # these graphs break if int + raise ValueError('{} graph failed, workflow name cannot begin ' + 'with a number'.format(graph2use)) + dotfilename = op.join(base_dir, dotfilename) + self.write_hierarchical_dotfile( + dotfilename=dotfilename, + colored=graph2use == "colored", + simple_form=simple_form) + outfname = format_dot(dotfilename, format=format) + else: + graph = self._graph + if graph2use in ['flat', 'exec']: + graph = self._create_flat_graph() + if graph2use == 'exec': + graph = generate_expanded_graph(deepcopy(graph)) + outfname = export_graph( + graph, + base_dir, + dotfilename=dotfilename, + format=format, + simple_form=simple_form) + + logger.info( + 'Generated workflow graph: %s (graph2use=%s, simple_form=%s).' % + (outfname, graph2use, simple_form)) + return outfname + + def write_hierarchical_dotfile(self, + dotfilename=None, + colored=False, + simple_form=True): + dotlist = ['digraph %s{' % self.name] + dotlist.append( + self._get_dot( + prefix=' ', colored=colored, simple_form=simple_form)) + dotlist.append('}') + dotstr = '\n'.join(dotlist) + if dotfilename: + fp = open(dotfilename, 'wt') + fp.writelines(dotstr) + fp.close() + else: + logger.info(dotstr) + + def export(self, + filename=None, + prefix="output", + format="python", + include_config=False): + """Export object into a different format + + Parameters + ---------- + filename: string + file to save the code to; overrides prefix + prefix: string + prefix to use for output file + format: string + one of "python" + include_config: boolean + whether to include node and workflow config values + + """ + formats = ["python"] + if format not in formats: + raise ValueError('format must be one of: %s' % '|'.join(formats)) + flatgraph = self._create_flat_graph() + nodes = nx.topological_sort(flatgraph) + + all_lines = None + lines = ['# Workflow'] + importlines = [ + 'from nipype.pipeline.engine import Workflow, ' + 'Node, MapNode' + ] + functions = {} + if format == "python": + connect_template = '%s.connect(%%s, %%s, %%s, "%%s")' % self.name + connect_template2 = '%s.connect(%%s, "%%s", %%s, "%%s")' \ + % self.name + wfdef = '%s = Workflow("%s")' % (self.name, self.name) + lines.append(wfdef) + if include_config: + lines.append('%s.config = %s' % (self.name, self.config)) + for idx, node in enumerate(nodes): + nodename = node.fullname.replace('.', '_') + # write nodes + nodelines = format_node( + node, format='python', include_config=include_config) + for line in nodelines: + if line.startswith('from'): + if line not in importlines: + importlines.append(line) + else: + lines.append(line) + # write connections + for u, _, d in flatgraph.in_edges(nbunch=node, data=True): + for cd in d['connect']: + if isinstance(cd[0], tuple): + args = list(cd[0]) + if args[1] in functions: + funcname = functions[args[1]] + else: + func = create_function_from_source(args[1]) + funcname = [ + name for name in func.__globals__ + if name != '__builtins__' + ][0] + functions[args[1]] = funcname + args[1] = funcname + args = tuple([arg for arg in args if arg]) + line_args = (u.fullname.replace('.', '_'), args, + nodename, cd[1]) + line = connect_template % line_args + line = line.replace("'%s'" % funcname, funcname) + lines.append(line) + else: + line_args = (u.fullname.replace('.', '_'), cd[0], + nodename, cd[1]) + lines.append(connect_template2 % line_args) + functionlines = ['# Functions'] + for function in functions: + functionlines.append(pickle.loads(function).rstrip()) + all_lines = importlines + functionlines + lines + + if not filename: + filename = '%s%s.py' % (prefix, self.name) + with open(filename, 'wt') as fp: + fp.writelines('\n'.join(all_lines)) + return all_lines + + def run(self, plugin=None, plugin_args=None, updatehash=False): + """ Execute the workflow + + Parameters + ---------- + + plugin: plugin name or object + Plugin to use for execution. You can create your own plugins for + execution. + plugin_args : dictionary containing arguments to be sent to plugin + constructor. see individual plugin doc strings for details. + """ + if plugin is None: + plugin = config.get('execution', 'plugin') + if not isinstance(plugin, (str, bytes)): + runner = plugin + else: + name = '.'.join(__name__.split('.')[:-2] + ['plugins']) + try: + __import__(name) + except ImportError: + msg = 'Could not import plugin module: %s' % name + logger.error(msg) + raise ImportError(msg) + else: + plugin_mod = getattr(sys.modules[name], '%sPlugin' % plugin) + runner = plugin_mod(plugin_args=plugin_args) + flatgraph = self._create_flat_graph() + self.config = merge_dict(deepcopy(config._sections), self.config) + logger.info('Workflow %s settings: %s', self.name, + to_str(sorted(self.config))) + self._set_needed_outputs(flatgraph) + execgraph = generate_expanded_graph(deepcopy(flatgraph)) + for index, node in enumerate(execgraph.nodes()): + node.config = merge_dict(deepcopy(self.config), node.config) + node.base_dir = self.base_dir + node.index = index + if isinstance(node, MapNode): + node.use_plugin = (plugin, plugin_args) + self._configure_exec_nodes(execgraph) + if str2bool(self.config['execution']['create_report']): + self._write_report_info(self.base_dir, self.name, execgraph) + runner.run(execgraph, updatehash=updatehash, config=self.config) + datestr = datetime.utcnow().strftime('%Y%m%dT%H%M%S') + if str2bool(self.config['execution']['write_provenance']): + prov_base = op.join(self.base_dir, + 'workflow_provenance_%s' % datestr) + logger.info('Provenance file prefix: %s' % prov_base) + write_workflow_prov(execgraph, prov_base, format='all') + + if config.resource_monitor: + base_dir = self.base_dir or os.getcwd() + write_workflow_resources( + execgraph, + filename=op.join(base_dir, self.name, 'resource_monitor.json')) + return execgraph + + # PRIVATE API AND FUNCTIONS + + def _write_report_info(self, workingdir, name, graph): + if workingdir is None: + workingdir = os.getcwd() + report_dir = op.join(workingdir, name) + makedirs(report_dir, exist_ok=True) + shutil.copyfile( + op.join(op.dirname(__file__), 'report_template.html'), + op.join(report_dir, 'index.html')) + shutil.copyfile( + op.join(op.dirname(__file__), '..', '..', 'external', 'd3.js'), + op.join(report_dir, 'd3.js')) + nodes, groups = topological_sort(graph, depth_first=True) + graph_file = op.join(report_dir, 'graph1.json') + json_dict = {'nodes': [], 'links': [], 'groups': [], 'maxN': 0} + for i, node in enumerate(nodes): + report_file = "%s/_report/report.rst" % \ + node.output_dir().replace(report_dir, '') + result_file = "%s/result_%s.pklz" % \ + (node.output_dir().replace(report_dir, ''), + node.name) + json_dict['nodes'].append( + dict( + name='%d_%s' % (i, node.name), + report=report_file, + result=result_file, + group=groups[i])) + maxN = 0 + for gid in np.unique(groups): + procs = [i for i, val in enumerate(groups) if val == gid] + N = len(procs) + if N > maxN: + maxN = N + json_dict['groups'].append( + dict(procs=procs, total=N, name='Group_%05d' % gid)) + json_dict['maxN'] = maxN + for u, v in graph.in_edges(): + json_dict['links'].append( + dict(source=nodes.index(u), target=nodes.index(v), value=1)) + save_json(graph_file, json_dict) + graph_file = op.join(report_dir, 'graph.json') + # Avoid RuntimeWarning: divide by zero encountered in log10 + num_nodes = len(nodes) + if num_nodes > 0: + index_name = np.ceil(np.log10(num_nodes)).astype(int) + else: + index_name = 0 + template = '%%0%dd_' % index_name + + def getname(u, i): + name_parts = u.fullname.split('.') + # return '.'.join(name_parts[:-1] + [template % i + name_parts[-1]]) + return template % i + name_parts[-1] + + json_dict = [] + for i, node in enumerate(nodes): + imports = [] + for u, v in graph.in_edges(nbunch=node): + imports.append(getname(u, nodes.index(u))) + json_dict.append( + dict( + name=getname(node, i), + size=1, + group=groups[i], + imports=imports)) + save_json(graph_file, json_dict) + + def _set_needed_outputs(self, graph): + """Initialize node with list of which outputs are needed.""" + rm_outputs = self.config['execution']['remove_unnecessary_outputs'] + if not str2bool(rm_outputs): + return + for node in graph.nodes(): + node.needed_outputs = [] + for edge in graph.out_edges(node): + data = graph.get_edge_data(*edge) + sourceinfo = [ + v1[0] if isinstance(v1, tuple) else v1 + for v1, v2 in data['connect'] + ] + node.needed_outputs += [ + v for v in sourceinfo if v not in node.needed_outputs + ] + if node.needed_outputs: + node.needed_outputs = sorted(node.needed_outputs) + + def _configure_exec_nodes(self, graph): + """Ensure that each node knows where to get inputs from + """ + for node in graph.nodes(): + node.input_source = {} + for edge in graph.in_edges(node): + data = graph.get_edge_data(*edge) + for sourceinfo, field in data['connect']: + node.input_source[field] = \ + (op.join(edge[0].output_dir(), + 'result_%s.pklz' % edge[0].name), + sourceinfo) + + def _check_nodes(self, nodes): + """Checks if any of the nodes are already in the graph + + """ + node_names = [node.name for node in self._graph.nodes()] + node_lineage = [node._hierarchy for node in self._graph.nodes()] + for node in nodes: + if node.name in node_names: + idx = node_names.index(node.name) + try: + this_node_lineage = node_lineage[idx] + except IndexError: + raise IOError( + 'Duplicate node name "%s" found.' % node.name) + else: + if this_node_lineage in [node._hierarchy, self.name]: + raise IOError( + 'Duplicate node name "%s" found.' % node.name) + else: + node_names.append(node.name) + + def _has_attr(self, parameter, subtype='in'): + """Checks if a parameter is available as an input or output + """ + if subtype == 'in': + subobject = self.inputs + else: + subobject = self.outputs + attrlist = parameter.split('.') + cur_out = subobject + for attr in attrlist: + if not hasattr(cur_out, attr): + return False + cur_out = getattr(cur_out, attr) + return True + + def _get_parameter_node(self, parameter, subtype='in'): + """Returns the underlying node corresponding to an input or + output parameter + """ + if subtype == 'in': + subobject = self.inputs + else: + subobject = self.outputs + attrlist = parameter.split('.') + cur_out = subobject + for attr in attrlist[:-1]: + cur_out = getattr(cur_out, attr) + return cur_out.traits()[attrlist[-1]].node + + def _check_outputs(self, parameter): + return self._has_attr(parameter, subtype='out') + + def _check_inputs(self, parameter): + return self._has_attr(parameter, subtype='in') + + def _get_inputs(self): + """Returns the inputs of a workflow + + This function does not return any input ports that are already + connected + """ + inputdict = TraitedSpec() + for node in self._graph.nodes(): + inputdict.add_trait(node.name, traits.Instance(TraitedSpec)) + if isinstance(node, Workflow): + setattr(inputdict, node.name, node.inputs) + else: + taken_inputs = [] + for _, _, d in self._graph.in_edges(nbunch=node, data=True): + for cd in d['connect']: + taken_inputs.append(cd[1]) + unconnectedinputs = TraitedSpec() + for key, trait in list(node.inputs.items()): + if key not in taken_inputs: + unconnectedinputs.add_trait(key, + traits.Trait( + trait, node=node)) + value = getattr(node.inputs, key) + setattr(unconnectedinputs, key, value) + setattr(inputdict, node.name, unconnectedinputs) + getattr(inputdict, node.name).on_trait_change(self._set_input) + return inputdict + + def _get_outputs(self): + """Returns all possible output ports that are not already connected + """ + outputdict = TraitedSpec() + for node in self._graph.nodes(): + outputdict.add_trait(node.name, traits.Instance(TraitedSpec)) + if isinstance(node, Workflow): + setattr(outputdict, node.name, node.outputs) + elif node.outputs: + outputs = TraitedSpec() + for key, _ in list(node.outputs.items()): + outputs.add_trait(key, traits.Any(node=node)) + setattr(outputs, key, None) + setattr(outputdict, node.name, outputs) + return outputdict + + def _set_input(self, objekt, name, newvalue): + """Trait callback function to update a node input + """ + objekt.traits()[name].node.set_input(name, newvalue) + + def _set_node_input(self, node, param, source, sourceinfo): + """Set inputs of a node given the edge connection""" + if isinstance(sourceinfo, (str, bytes)): + val = source.get_output(sourceinfo) + elif isinstance(sourceinfo, tuple): + if callable(sourceinfo[1]): + val = sourceinfo[1](source.get_output(sourceinfo[0]), + *sourceinfo[2:]) + newval = val + if isinstance(val, TraitDictObject): + newval = dict(val) + if isinstance(val, TraitListObject): + newval = val[:] + logger.debug('setting node input: %s->%s', param, to_str(newval)) + node.set_input(param, deepcopy(newval)) + + def _get_all_nodes(self): + allnodes = [] + for node in self._graph.nodes(): + if isinstance(node, Workflow): + allnodes.extend(node._get_all_nodes()) + else: + allnodes.append(node) + return allnodes + + def _has_node(self, wanted_node): + for node in self._graph.nodes(): + if wanted_node == node: + return True + if isinstance(node, Workflow): + if node._has_node(wanted_node): + return True + return False + + def _create_flat_graph(self): + """Make a simple DAG where no node is a workflow.""" + logger.debug('Creating flat graph for workflow: %s', self.name) + workflowcopy = deepcopy(self) + workflowcopy._generate_flatgraph() + return workflowcopy._graph + + def _reset_hierarchy(self): + """Reset the hierarchy on a graph + """ + for node in self._graph.nodes(): + if isinstance(node, Workflow): + node._reset_hierarchy() + for innernode in node._graph.nodes(): + innernode._hierarchy = '.'.join((self.name, + innernode._hierarchy)) + else: + node._hierarchy = self.name + + def _generate_flatgraph(self): + """Generate a graph containing only Nodes or MapNodes + """ + logger.debug('expanding workflow: %s', self) + nodes2remove = [] + if not nx.is_directed_acyclic_graph(self._graph): + raise Exception(('Workflow: %s is not a directed acyclic graph ' + '(DAG)') % self.name) + nodes = nx.topological_sort(self._graph) + for node in nodes: + logger.debug('processing node: %s', node) + if isinstance(node, Workflow): + nodes2remove.append(node) + # use in_edges instead of in_edges_iter to allow + # disconnections to take place properly. otherwise, the + # edge dict is modified. + # dj: added list() for networkx ver.2 + for u, _, d in list( + self._graph.in_edges(nbunch=node, data=True)): + logger.debug('in: connections-> %s', to_str(d['connect'])) + for cd in deepcopy(d['connect']): + logger.debug("in: %s", to_str(cd)) + dstnode = node._get_parameter_node(cd[1], subtype='in') + srcnode = u + srcout = cd[0] + dstin = cd[1].split('.')[-1] + logger.debug('in edges: %s %s %s %s', srcnode, srcout, + dstnode, dstin) + self.disconnect(u, cd[0], node, cd[1]) + self.connect(srcnode, srcout, dstnode, dstin) + # do not use out_edges_iter for reasons stated in in_edges + # dj: for ver 2 use list(out_edges) + for _, v, d in list( + self._graph.out_edges(nbunch=node, data=True)): + logger.debug('out: connections-> %s', to_str(d['connect'])) + for cd in deepcopy(d['connect']): + logger.debug("out: %s", to_str(cd)) + dstnode = v + if isinstance(cd[0], tuple): + parameter = cd[0][0] + else: + parameter = cd[0] + srcnode = node._get_parameter_node( + parameter, subtype='out') + if isinstance(cd[0], tuple): + srcout = list(cd[0]) + srcout[0] = parameter.split('.')[-1] + srcout = tuple(srcout) + else: + srcout = parameter.split('.')[-1] + dstin = cd[1] + logger.debug('out edges: %s %s %s %s', srcnode, srcout, + dstnode, dstin) + self.disconnect(node, cd[0], v, cd[1]) + self.connect(srcnode, srcout, dstnode, dstin) + # expand the workflow node + # logger.debug('expanding workflow: %s', node) + node._generate_flatgraph() + for innernode in node._graph.nodes(): + innernode._hierarchy = '.'.join((self.name, + innernode._hierarchy)) + self._graph.add_nodes_from(node._graph.nodes()) + self._graph.add_edges_from(node._graph.edges(data=True)) + if nodes2remove: + self._graph.remove_nodes_from(nodes2remove) + logger.debug('finished expanding workflow: %s', self) + + def _get_dot(self, + prefix=None, + hierarchy=None, + colored=False, + simple_form=True, + level=0): + """Create a dot file with connection info + """ + if prefix is None: + prefix = ' ' + if hierarchy is None: + hierarchy = [] + colorset = [ + '#FFFFC8', # Y + '#0000FF', + '#B4B4FF', + '#E6E6FF', # B + '#FF0000', + '#FFB4B4', + '#FFE6E6', # R + '#00A300', + '#B4FFB4', + '#E6FFE6', # G + '#0000FF', + '#B4B4FF' + ] # loop B + if level > len(colorset) - 2: + level = 3 # Loop back to blue + + dotlist = ['%slabel="%s";' % (prefix, self.name)] + for node in nx.topological_sort(self._graph): + fullname = '.'.join(hierarchy + [node.fullname]) + nodename = fullname.replace('.', '_') + if not isinstance(node, Workflow): + node_class_name = get_print_name(node, simple_form=simple_form) + if not simple_form: + node_class_name = '.'.join(node_class_name.split('.')[1:]) + if hasattr(node, 'iterables') and node.iterables: + dotlist.append(('%s[label="%s", shape=box3d,' + 'style=filled, color=black, colorscheme' + '=greys7 fillcolor=2];') % + (nodename, node_class_name)) + else: + if colored: + dotlist.append( + ('%s[label="%s", style=filled,' + ' fillcolor="%s"];') % (nodename, node_class_name, + colorset[level])) + else: + dotlist.append(('%s[label="%s"];') % (nodename, + node_class_name)) + + for node in nx.topological_sort(self._graph): + if isinstance(node, Workflow): + fullname = '.'.join(hierarchy + [node.fullname]) + nodename = fullname.replace('.', '_') + dotlist.append('subgraph cluster_%s {' % nodename) + if colored: + dotlist.append(prefix + prefix + 'edge [color="%s"];' % + (colorset[level + 1])) + dotlist.append(prefix + prefix + 'style=filled;') + dotlist.append(prefix + prefix + 'fillcolor="%s";' % + (colorset[level + 2])) + dotlist.append( + node._get_dot( + prefix=prefix + prefix, + hierarchy=hierarchy + [self.name], + colored=colored, + simple_form=simple_form, + level=level + 3)) + dotlist.append('}') + else: + for subnode in self._graph.successors(node): + if node._hierarchy != subnode._hierarchy: + continue + if not isinstance(subnode, Workflow): + nodefullname = '.'.join(hierarchy + [node.fullname]) + subnodefullname = '.'.join( + hierarchy + [subnode.fullname]) + nodename = nodefullname.replace('.', '_') + subnodename = subnodefullname.replace('.', '_') + for _ in self._graph.get_edge_data(node, + subnode)['connect']: + dotlist.append('%s -> %s;' % (nodename, + subnodename)) + logger.debug('connection: %s', dotlist[-1]) + # add between workflow connections + for u, v, d in self._graph.edges(data=True): + uname = '.'.join(hierarchy + [u.fullname]) + vname = '.'.join(hierarchy + [v.fullname]) + for src, dest in d['connect']: + uname1 = uname + vname1 = vname + if isinstance(src, tuple): + srcname = src[0] + else: + srcname = src + if '.' in srcname: + uname1 += '.' + '.'.join(srcname.split('.')[:-1]) + if '.' in dest and '@' not in dest: + if not isinstance(v, Workflow): + if 'datasink' not in \ + str(v._interface.__class__).lower(): + vname1 += '.' + '.'.join(dest.split('.')[:-1]) + else: + vname1 += '.' + '.'.join(dest.split('.')[:-1]) + if uname1.split('.')[:-1] != vname1.split('.')[:-1]: + dotlist.append('%s -> %s;' % (uname1.replace('.', '_'), + vname1.replace('.', '_'))) + logger.debug('cross connection: %s', dotlist[-1]) + return ('\n' + prefix).join(dotlist) diff --git a/nipype/pipeline/plugins/__init__.py b/nipype/pipeline/plugins/__init__.py new file mode 100644 index 0000000000..e3c797a10a --- /dev/null +++ b/nipype/pipeline/plugins/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from .debug import DebugPlugin +from .linear import LinearPlugin +from .pbs import PBSPlugin +from .oar import OARPlugin +from .sge import SGEPlugin +from .condor import CondorPlugin +from .dagman import CondorDAGManPlugin +from .multiproc import MultiProcPlugin +from .legacymultiproc import LegacyMultiProcPlugin +from .ipython import IPythonPlugin +from .somaflow import SomaFlowPlugin +from .pbsgraph import PBSGraphPlugin +from .sgegraph import SGEGraphPlugin +from .lsf import LSFPlugin +from .slurm import SLURMPlugin +from .slurmgraph import SLURMGraphPlugin + +from . import semaphore_singleton diff --git a/nipype/pipeline/plugins/base.py b/nipype/pipeline/plugins/base.py new file mode 100644 index 0000000000..122d7e57cd --- /dev/null +++ b/nipype/pipeline/plugins/base.py @@ -0,0 +1,610 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Common graph operations for execution +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, object, open + +import sys +from copy import deepcopy +from glob import glob +import os +import shutil +from time import sleep, time +from traceback import format_exception + +import numpy as np +import scipy.sparse as ssp + +from ... import logging +from ...utils.filemanip import loadpkl +from ...utils.misc import str2bool +from ..engine.utils import (nx, dfs_preorder, topological_sort) +from ..engine import MapNode +from .tools import report_crash, report_nodes_not_run, create_pyscript + +logger = logging.getLogger('nipype.workflow') + + +class PluginBase(object): + """ + Base class for plugins + + """ + + def __init__(self, plugin_args=None): + if plugin_args is None: + plugin_args = {} + self.plugin_args = plugin_args + self._config = None + self._status_callback = plugin_args.get('status_callback') + + def run(self, graph, config, updatehash=False): + """ + The core plugin member that should be implemented by + all plugins. + + graph: a networkx, flattened :abbr:`DAG (Directed Acyclic Graph)` + to be executed + + config: a nipype.config object + + updatehash: + + """ + raise NotImplementedError + + +class DistributedPluginBase(PluginBase): + """ + Execute workflow with a distribution engine + + Relevant class attributes + ------------------------- + + procs: list (N) of underlying interface elements to be processed + proc_done: a boolean numpy array (N,) signifying whether a process has been + submitted for execution + proc_pending: a boolean numpy array (N,) signifying whether a + process is currently running. + depidx: a boolean matrix (NxN) storing the dependency structure accross + processes. Process dependencies are derived from each column. + + Combinations of ``proc_done`` and ``proc_pending`` + -------------------------------------------------- + + +------------+---------------+--------------------------------+ + | proc_done | proc_pending | outcome | + +============+===============+================================+ + | True | False | Process is finished | + +------------+---------------+--------------------------------+ + | True | True | Process is currently being run | + +------------+---------------+--------------------------------+ + | False | False | Process is queued | + +------------+---------------+--------------------------------+ + | False | True | INVALID COMBINATION | + +------------+---------------+--------------------------------+ + """ + + def __init__(self, plugin_args=None): + """ + Initialize runtime attributes to none + + """ + super(DistributedPluginBase, self).__init__(plugin_args=plugin_args) + self.procs = None + self.depidx = None + self.refidx = None + self.mapnodes = None + self.mapnodesubids = None + self.proc_done = None + self.proc_pending = None + self.pending_tasks = [] + self.max_jobs = self.plugin_args.get('max_jobs', np.inf) + + def _prerun_check(self, graph): + """Stub method to validate/massage graph and nodes before running""" + + def _postrun_check(self): + """Stub method to close any open resources""" + + def run(self, graph, config, updatehash=False): + """ + Executes a pre-defined pipeline using distributed approaches + """ + logger.info("Running in parallel.") + self._config = config + poll_sleep_secs = float(config['execution']['poll_sleep_duration']) + + self._prerun_check(graph) + # Generate appropriate structures for worker-manager model + self._generate_dependency_list(graph) + self.mapnodes = [] + self.mapnodesubids = {} + # setup polling - TODO: change to threaded model + notrun = [] + + old_progress_stats = None + old_presub_stats = None + while not np.all(self.proc_done) or np.any(self.proc_pending): + loop_start = time() + # Check if a job is available (jobs with all dependencies run) + # https://github.com/nipy/nipype/pull/2200#discussion_r141605722 + jobs_ready = np.nonzero(~self.proc_done & + (self.depidx.sum(0) == 0))[1] + + progress_stats = (len(self.proc_done), + np.sum(self.proc_done ^ self.proc_pending), + np.sum(self.proc_done & self.proc_pending), + len(jobs_ready), len(self.pending_tasks), + np.sum(~self.proc_done & ~self.proc_pending)) + display_stats = progress_stats != old_progress_stats + if display_stats: + logger.debug('Progress: %d jobs, %d/%d/%d ' + '(done/running/ready), %d/%d ' + '(pending_tasks/waiting).', *progress_stats) + old_progress_stats = progress_stats + toappend = [] + # trigger callbacks for any pending results + while self.pending_tasks: + taskid, jobid = self.pending_tasks.pop() + try: + result = self._get_result(taskid) + except Exception: + notrun.append( + self._clean_queue(jobid, graph)) + else: + if result: + if result['traceback']: + notrun.append( + self._clean_queue(jobid, graph, result=result)) + else: + self._task_finished_cb(jobid) + self._remove_node_dirs() + self._clear_task(taskid) + else: + assert self.proc_done[jobid] and \ + self.proc_pending[jobid] + toappend.insert(0, (taskid, jobid)) + + if toappend: + self.pending_tasks.extend(toappend) + + num_jobs = len(self.pending_tasks) + presub_stats = (num_jobs, + np.sum(self.proc_done & self.proc_pending)) + display_stats = display_stats or presub_stats != old_presub_stats + if display_stats: + logger.debug('Tasks currently running: %d. Pending: %d.', + *presub_stats) + old_presub_stats = presub_stats + if num_jobs < self.max_jobs: + self._send_procs_to_workers(updatehash=updatehash, graph=graph) + elif display_stats: + logger.debug('Not submitting (max jobs reached)') + + sleep_til = loop_start + poll_sleep_secs + sleep(max(0, sleep_til - time())) + + self._remove_node_dirs() + report_nodes_not_run(notrun) + + # close any open resources + self._postrun_check() + + def _get_result(self, taskid): + raise NotImplementedError + + def _submit_job(self, node, updatehash=False): + raise NotImplementedError + + def _report_crash(self, node, result=None): + tb = None + if result is not None: + node._result = result['result'] + tb = result['traceback'] + node._traceback = tb + return report_crash(node, traceback=tb) + + def _clear_task(self, taskid): + raise NotImplementedError + + def _clean_queue(self, jobid, graph, result=None): + logger.debug('Clearing %d from queue', jobid) + + if self._status_callback: + self._status_callback(self.procs[jobid], 'exception') + if result is None: + result = {'result': None, + 'traceback': '\n'.join(format_exception(*sys.exc_info()))} + + if str2bool(self._config['execution']['stop_on_first_crash']): + raise RuntimeError("".join(result['traceback'])) + crashfile = self._report_crash(self.procs[jobid], result=result) + if jobid in self.mapnodesubids: + # remove current jobid + self.proc_pending[jobid] = False + self.proc_done[jobid] = True + # remove parent mapnode + jobid = self.mapnodesubids[jobid] + self.proc_pending[jobid] = False + self.proc_done[jobid] = True + # remove dependencies from queue + return self._remove_node_deps(jobid, crashfile, graph) + + def _submit_mapnode(self, jobid): + if jobid in self.mapnodes: + return True + self.mapnodes.append(jobid) + mapnodesubids = self.procs[jobid].get_subnodes() + numnodes = len(mapnodesubids) + logger.debug('Adding %d jobs for mapnode %s', numnodes, + self.procs[jobid]) + for i in range(numnodes): + self.mapnodesubids[self.depidx.shape[0] + i] = jobid + self.procs.extend(mapnodesubids) + self.depidx = ssp.vstack( + (self.depidx, + ssp.lil_matrix(np.zeros( + (numnodes, self.depidx.shape[1])))), 'lil') + self.depidx = ssp.hstack( + (self.depidx, + ssp.lil_matrix(np.zeros( + (self.depidx.shape[0], numnodes)))), 'lil') + self.depidx[-numnodes:, jobid] = 1 + self.proc_done = np.concatenate((self.proc_done, + np.zeros(numnodes, dtype=bool))) + self.proc_pending = np.concatenate((self.proc_pending, + np.zeros(numnodes, dtype=bool))) + return False + + def _send_procs_to_workers(self, updatehash=False, graph=None): + """ + Sends jobs to workers + """ + + while not np.all(self.proc_done): + num_jobs = len(self.pending_tasks) + if np.isinf(self.max_jobs): + slots = None + else: + slots = max(0, self.max_jobs - num_jobs) + logger.debug('Slots available: %s', slots) + if (num_jobs >= self.max_jobs) or (slots == 0): + break + + # Check if a job is available (jobs with all dependencies run) + # https://github.com/nipy/nipype/pull/2200#discussion_r141605722 + jobids = np.nonzero(~self.proc_done & (self.depidx.sum(0) == 0))[1] + + if len(jobids) > 0: + # send all available jobs + logger.info('Pending[%d] Submitting[%d] jobs Slots[%s]', + num_jobs, len(jobids[:slots]), slots or 'inf') + + for jobid in jobids[:slots]: + if isinstance(self.procs[jobid], MapNode): + try: + num_subnodes = self.procs[jobid].num_subnodes() + except Exception: + self._clean_queue(jobid, graph) + self.proc_pending[jobid] = False + continue + if num_subnodes > 1: + submit = self._submit_mapnode(jobid) + if not submit: + continue + # change job status in appropriate queues + self.proc_done[jobid] = True + self.proc_pending[jobid] = True + # Send job to task manager and add to pending tasks + logger.info('Submitting: %s ID: %d', + self.procs[jobid], jobid) + if self._status_callback: + self._status_callback(self.procs[jobid], 'start') + + if not self._local_hash_check(jobid, graph): + if self.procs[jobid].run_without_submitting: + logger.debug('Running node %s on master thread', + self.procs[jobid]) + try: + self.procs[jobid].run() + except Exception: + self._clean_queue(jobid, graph) + self._task_finished_cb(jobid) + self._remove_node_dirs() + else: + tid = self._submit_job( + deepcopy(self.procs[jobid]), + updatehash=updatehash) + if tid is None: + self.proc_done[jobid] = False + self.proc_pending[jobid] = False + else: + self.pending_tasks.insert(0, (tid, jobid)) + logger.info('Finished submitting: %s ID: %d', + self.procs[jobid], jobid) + else: + break + + def _local_hash_check(self, jobid, graph): + if not str2bool( + self.procs[jobid].config['execution']['local_hash_check']): + return False + + try: + cached, updated = self.procs[jobid].is_cached() + except Exception: + logger.warning( + 'Error while checking node hash, forcing re-run. ' + 'Although this error may not prevent the workflow from running, ' + 'it could indicate a major problem. Please report a new issue ' + 'at https://github.com/nipy/nipype/issues adding the following ' + 'information:\n\n\tNode: %s\n\tInterface: %s.%s\n\tTraceback:\n%s', + self.procs[jobid], + self.procs[jobid].interface.__module__, + self.procs[jobid].interface.__class__.__name__, + '\n'.join(format_exception(*sys.exc_info())) + ) + return False + + logger.debug('Checking hash "%s" locally: cached=%s, updated=%s.', + self.procs[jobid], cached, updated) + overwrite = self.procs[jobid].overwrite + always_run = self.procs[jobid].interface.always_run + + if cached and updated and (overwrite is False or + overwrite is None and not always_run): + logger.debug('Skipping cached node %s with ID %s.', + self.procs[jobid], jobid) + try: + self._task_finished_cb(jobid, cached=True) + self._remove_node_dirs() + except Exception: + logger.debug('Error skipping cached node %s (%s).\n\n%s', + self.procs[jobid], jobid, + '\n'.join(format_exception(*sys.exc_info()))) + self._clean_queue(jobid, graph) + self.proc_pending[jobid] = False + return True + return False + + def _task_finished_cb(self, jobid, cached=False): + """ Extract outputs and assign to inputs of dependent tasks + + This is called when a job is completed. + """ + logger.info('[Job %d] %s (%s).', jobid, 'Cached' + if cached else 'Completed', self.procs[jobid]) + if self._status_callback: + self._status_callback(self.procs[jobid], 'end') + # Update job and worker queues + self.proc_pending[jobid] = False + # update the job dependency structure + rowview = self.depidx.getrowview(jobid) + rowview[rowview.nonzero()] = 0 + if jobid not in self.mapnodesubids: + self.refidx[self.refidx[:, jobid].nonzero()[0], jobid] = 0 + + def _generate_dependency_list(self, graph): + """ Generates a dependency list for a list of graphs. + """ + self.procs, _ = topological_sort(graph) + try: + self.depidx = nx.to_scipy_sparse_matrix( + graph, nodelist=self.procs, format='lil') + except: + self.depidx = nx.to_scipy_sparse_matrix(graph, nodelist=self.procs) + self.refidx = deepcopy(self.depidx) + self.refidx.astype = np.int + self.proc_done = np.zeros(len(self.procs), dtype=bool) + self.proc_pending = np.zeros(len(self.procs), dtype=bool) + + def _remove_node_deps(self, jobid, crashfile, graph): + subnodes = [s for s in dfs_preorder(graph, self.procs[jobid])] + for node in subnodes: + idx = self.procs.index(node) + self.proc_done[idx] = True + self.proc_pending[idx] = False + return dict( + node=self.procs[jobid], dependents=subnodes, crashfile=crashfile) + + def _remove_node_dirs(self): + """Removes directories whose outputs have already been used up + """ + if str2bool(self._config['execution']['remove_node_directories']): + indices = np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0] + for idx in indices: + if idx in self.mapnodesubids: + continue + if self.proc_done[idx] and (not self.proc_pending[idx]): + self.refidx[idx, idx] = -1 + outdir = self.procs[idx].output_dir() + logger.info(('[node dependencies finished] ' + 'removing node: %s from directory %s') % + (self.procs[idx]._id, outdir)) + shutil.rmtree(outdir) + + +class SGELikeBatchManagerBase(DistributedPluginBase): + """Execute workflow with SGE/OGE/PBS like batch system + """ + + def __init__(self, template, plugin_args=None): + super(SGELikeBatchManagerBase, self).__init__(plugin_args=plugin_args) + self._template = template + self._qsub_args = None + if plugin_args: + if 'template' in plugin_args: + self._template = plugin_args['template'] + if os.path.isfile(self._template): + with open(self._template) as tpl_file: + self._template = tpl_file.read() + if 'qsub_args' in plugin_args: + self._qsub_args = plugin_args['qsub_args'] + self._pending = {} + + def _is_pending(self, taskid): + """Check if a task is pending in the batch system + """ + raise NotImplementedError + + def _submit_batchtask(self, scriptfile, node): + """Submit a task to the batch system + """ + raise NotImplementedError + + def _get_result(self, taskid): + if taskid not in self._pending: + raise Exception('Task %d not found' % taskid) + if self._is_pending(taskid): + return None + node_dir = self._pending[taskid] + # MIT HACK + # on the pbs system at mit the parent node directory needs to be + # accessed before internal directories become available. there + # is a disconnect when the queueing engine knows a job is + # finished to when the directories become statable. + t = time() + timeout = float(self._config['execution']['job_finished_timeout']) + timed_out = True + while (time() - t) < timeout: + try: + glob(os.path.join(node_dir, 'result_*.pklz')).pop() + timed_out = False + break + except Exception as e: + logger.debug(e) + sleep(2) + if timed_out: + result_data = { + 'hostname': 'unknown', + 'result': None, + 'traceback': None + } + results_file = None + try: + error_message = ('Job id ({0}) finished or terminated, but ' + 'results file does not exist after ({1}) ' + 'seconds. Batch dir contains crashdump file ' + 'if node raised an exception.\n' + 'Node working directory: ({2}) '.format( + taskid, timeout, node_dir)) + raise IOError(error_message) + except IOError as e: + result_data['traceback'] = '\n'.join(format_exception(*sys.exc_info())) + else: + results_file = glob(os.path.join(node_dir, 'result_*.pklz'))[0] + result_data = loadpkl(results_file) + result_out = dict(result=None, traceback=None) + if isinstance(result_data, dict): + result_out['result'] = result_data['result'] + result_out['traceback'] = result_data['traceback'] + result_out['hostname'] = result_data['hostname'] + if results_file: + crash_file = os.path.join(node_dir, 'crashstore.pklz') + os.rename(results_file, crash_file) + else: + result_out['result'] = result_data + return result_out + + def _submit_job(self, node, updatehash=False): + """submit job and return taskid + """ + pyscript = create_pyscript(node, updatehash=updatehash) + batch_dir, name = os.path.split(pyscript) + name = '.'.join(name.split('.')[:-1]) + batchscript = '\n'.join((self._template, '%s %s' % (sys.executable, + pyscript))) + batchscriptfile = os.path.join(batch_dir, 'batchscript_%s.sh' % name) + with open(batchscriptfile, 'wt') as fp: + fp.writelines(batchscript) + return self._submit_batchtask(batchscriptfile, node) + + def _clear_task(self, taskid): + del self._pending[taskid] + + +class GraphPluginBase(PluginBase): + """Base class for plugins that distribute graphs to workflows + """ + + def __init__(self, plugin_args=None): + if plugin_args and plugin_args.get('status_callback'): + logger.warning('status_callback not supported for Graph submission' + ' plugins') + super(GraphPluginBase, self).__init__(plugin_args=plugin_args) + + def run(self, graph, config, updatehash=False): + pyfiles = [] + dependencies = {} + self._config = config + nodes = list(nx.topological_sort(graph)) + logger.debug('Creating executable python files for each node') + for idx, node in enumerate(nodes): + pyfiles.append( + create_pyscript( + node, updatehash=updatehash, store_exception=False)) + dependencies[idx] = [ + nodes.index(prevnode) + for prevnode in list(graph.predecessors(node))] + self._submit_graph(pyfiles, dependencies, nodes) + + def _get_args(self, node, keywords): + values = () + for keyword in keywords: + value = getattr(self, "_" + keyword) + if keyword == "template" and os.path.isfile(value): + with open(value) as f: + value = f.read() + if (hasattr(node, "plugin_args") + and isinstance(node.plugin_args, dict) + and keyword in node.plugin_args): + if (keyword == "template" + and os.path.isfile(node.plugin_args[keyword])): + with open(node.plugin_args[keyword]) as f: + tmp_value = f.read() + else: + tmp_value = node.plugin_args[keyword] + + if ('overwrite' in node.plugin_args + and node.plugin_args['overwrite']): + value = tmp_value + else: + value += tmp_value + values += (value, ) + return values + + def _submit_graph(self, pyfiles, dependencies, nodes): + """ + pyfiles: list of files corresponding to a topological sort + dependencies: dictionary of dependencies based on the toplogical sort + """ + raise NotImplementedError + + def _get_result(self, taskid): + if taskid not in self._pending: + raise Exception('Task %d not found' % taskid) + if self._is_pending(taskid): + return None + node_dir = self._pending[taskid] + + glob(os.path.join(node_dir, 'result_*.pklz')).pop() + + results_file = glob(os.path.join(node_dir, 'result_*.pklz'))[0] + result_data = loadpkl(results_file) + result_out = dict(result=None, traceback=None) + + if isinstance(result_data, dict): + result_out['result'] = result_data['result'] + result_out['traceback'] = result_data['traceback'] + result_out['hostname'] = result_data['hostname'] + if results_file: + crash_file = os.path.join(node_dir, 'crashstore.pklz') + os.rename(results_file, crash_file) + else: + result_out['result'] = result_data + + return result_out diff --git a/nipype/pipeline/plugins/condor.py b/nipype/pipeline/plugins/condor.py new file mode 100644 index 0000000000..9f5ca632e5 --- /dev/null +++ b/nipype/pipeline/plugins/condor.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +"""Parallel workflow execution via Condor +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +from time import sleep + +from ...interfaces.base import CommandLine +from ... import logging +from .base import SGELikeBatchManagerBase, logger +iflogger = logging.getLogger('nipype.interface') + + +class CondorPlugin(SGELikeBatchManagerBase): + """Execute using Condor + + This plugin doesn't work with a plain stock-Condor installation, but + requires a 'qsub' emulation script for Condor, called 'condor_qsub'. + This script is shipped with the Condor package from NeuroDebian, or can be + downloaded from its Git repository at + + http://anonscm.debian.org/gitweb/?p=pkg-exppsy/condor.git;a=blob_plain;f=debian/condor_qsub;hb=HEAD + + The plugin_args input to run can be used to control the Condor execution. + Currently supported options are: + + - template : template to use for batch job submission. This can be an + SGE-style script with the (limited) set of options supported + by condor_qsub + - qsub_args : arguments to be prepended to the job execution script in the + qsub call + """ + + def __init__(self, **kwargs): + template = """ +#$ -V +#$ -S /bin/sh + """ + self._retry_timeout = 2 + self._max_tries = 2 + if 'plugin_args' in kwargs and kwargs['plugin_args']: + if 'retry_timeout' in kwargs['plugin_args']: + self._retry_timeout = kwargs['plugin_args']['retry_timeout'] + if 'max_tries' in kwargs['plugin_args']: + self._max_tries = kwargs['plugin_args']['max_tries'] + super(CondorPlugin, self).__init__(template, **kwargs) + + def _is_pending(self, taskid): + cmd = CommandLine( + 'condor_q', resource_monitor=False, terminal_output='allatonce') + cmd.inputs.args = '%d' % taskid + # check condor cluster + oldlevel = iflogger.level + iflogger.setLevel(logging.getLevelName('CRITICAL')) + result = cmd.run(ignore_exception=True) + iflogger.setLevel(oldlevel) + if result.runtime.stdout.count('\n%d' % taskid): + return True + return False + + def _submit_batchtask(self, scriptfile, node): + cmd = CommandLine( + 'condor_qsub', + environ=dict(os.environ), + resource_monitor=False, + terminal_output='allatonce') + path = os.path.dirname(scriptfile) + qsubargs = '' + if self._qsub_args: + qsubargs = self._qsub_args + if 'qsub_args' in node.plugin_args: + if 'overwrite' in node.plugin_args and\ + node.plugin_args['overwrite']: + qsubargs = node.plugin_args['qsub_args'] + else: + qsubargs += (" " + node.plugin_args['qsub_args']) + if self._qsub_args: + qsubargs = self._qsub_args + if '-o' not in qsubargs: + qsubargs = '%s -o %s' % (qsubargs, path) + if '-e' not in qsubargs: + qsubargs = '%s -e %s' % (qsubargs, path) + if node._hierarchy: + jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy, + node._id)) + else: + jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id)) + jobnameitems = jobname.split('.') + jobnameitems.reverse() + jobname = '.'.join(jobnameitems) + cmd.inputs.args = '%s -N %s %s' % (qsubargs, jobname, scriptfile) + oldlevel = iflogger.level + iflogger.setLevel(logging.getLevelName('CRITICAL')) + tries = 0 + while True: + try: + result = cmd.run() + except Exception as e: + if tries < self._max_tries: + tries += 1 + sleep(self._retry_timeout) # sleep 2 seconds and try again + else: + iflogger.setLevel(oldlevel) + raise RuntimeError('\n'.join((('Could not submit condor ' + 'cluster' + ' for node %s') % node._id, + str(e)))) + else: + break + iflogger.setLevel(oldlevel) + # retrieve condor clusterid + taskid = int(result.runtime.stdout.split(' ')[2]) + self._pending[taskid] = node.output_dir() + logger.debug('submitted condor cluster: %d for node %s' % (taskid, + node._id)) + return taskid diff --git a/nipype/pipeline/plugins/dagman.py b/nipype/pipeline/plugins/dagman.py new file mode 100644 index 0000000000..28b766f2ea --- /dev/null +++ b/nipype/pipeline/plugins/dagman.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +"""Parallel workflow execution via Condor DAGMan +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +import os +import sys +import uuid +import time +from warnings import warn + +from .base import GraphPluginBase, logger +from ...interfaces.base import CommandLine + + +class CondorDAGManPlugin(GraphPluginBase): + """Execute using Condor DAGMan + + The plugin_args input to run can be used to control the DAGMan execution. + The value of most arguments can be a literal string or a filename, where in + the latter case the content of the file will be used as the argument value. + + Currently supported options are: + + - submit_template : submit spec template for individual jobs in a DAG (see + CondorDAGManPlugin.default_submit_template for the default. + - initial_specs : additional submit specs that are prepended to any job's + submit file + - override_specs : additional submit specs that are appended to any job's + submit file + - wrapper_cmd : path to an executable that will be started instead of a node + script. This is useful for wrapper script that execute certain + functionality prior or after a node runs. If this option is + given the wrapper command is called with the respective Python + executable and the path to the node script as final arguments + - wrapper_args : optional additional arguments to a wrapper command + - dagman_args : arguments to be prepended to the arguments of the + condor_submit_dag call + - block : if True the plugin call will block until Condor has finished + processing the entire workflow (default: False) + """ + + default_submit_template = """ +universe = vanilla +notification = Never +executable = %(executable)s +arguments = %(nodescript)s +output = %(basename)s.out +error = %(basename)s.err +log = %(basename)s.log +getenv = True +""" + + def _get_str_or_file(self, arg): + if os.path.isfile(arg): + with open(arg) as f: + content = f.read() + else: + content = arg + return content + + # XXX feature wishlist + # - infer data file dependencies from jobs + # - infer CPU requirements from jobs + # - infer memory requirements from jobs + # - looks like right now all jobs come in here, regardless of whether they + # actually have to run. would be good to be able to decide whether they + # actually have to be scheduled (i.e. output already exist). + def __init__(self, **kwargs): + for var, id_, val in \ + (('_template', 'submit_template', self.default_submit_template), + ('_initial_specs', 'template', ''), + ('_initial_specs', 'initial_specs', ''), + ('_override_specs', 'submit_specs', ''), + ('_override_specs', 'override_specs', ''), + ('_wrapper_cmd', 'wrapper_cmd', None), + ('_wrapper_args', 'wrapper_args', ''), + ('_block', 'block', False), + ('_dagman_args', 'dagman_args', '')): + if 'plugin_args' in kwargs \ + and not kwargs['plugin_args'] is None \ + and id_ in kwargs['plugin_args']: + if id_ == 'wrapper_cmd': + val = os.path.abspath(kwargs['plugin_args'][id_]) + elif id_ == 'block': + val = kwargs['plugin_args'][id_] + else: + val = self._get_str_or_file(kwargs['plugin_args'][id_]) + setattr(self, var, val) + # TODO remove after some time + if 'plugin_args' in kwargs \ + and not kwargs['plugin_args'] is None: + plugin_args = kwargs['plugin_args'] + if 'template' in plugin_args: + warn( + "the 'template' argument is deprecated, use 'initial_specs' instead" + ) + if 'submit_specs' in plugin_args: + warn( + "the 'submit_specs' argument is deprecated, use 'override_specs' instead" + ) + super(CondorDAGManPlugin, self).__init__(**kwargs) + + def _submit_graph(self, pyfiles, dependencies, nodes): + # location of all scripts, place dagman output in here too + batch_dir, _ = os.path.split(pyfiles[0]) + # DAG description filename + dagfilename = os.path.join(batch_dir, 'workflow-%s.dag' % uuid.uuid4()) + with open(dagfilename, 'wt') as dagfileptr: + # loop over all scripts, create submit files, and define them + # as jobs in the DAG + for idx, pyscript in enumerate(pyfiles): + node = nodes[idx] + # XXX redundant with previous value? or could it change between + # scripts? + template, initial_specs, override_specs, wrapper_cmd, wrapper_args = \ + self._get_args(node, + ["template", "initial_specs", + "override_specs", "wrapper_cmd", + "wrapper_args"]) + # add required slots to the template + template = '%s\n%s\n%s\nqueue\n' % ('%(initial_specs)s', + template, + '%(override_specs)s') + batch_dir, name = os.path.split(pyscript) + name = '.'.join(name.split('.')[:-1]) + specs = dict( + # TODO make parameter for this, + initial_specs=initial_specs, + executable=sys.executable, + nodescript=pyscript, + basename=os.path.join(batch_dir, name), + override_specs=override_specs) + if wrapper_cmd is not None: + specs['executable'] = wrapper_cmd + specs['nodescript'] = \ + '%s %s %s' % (wrapper_args % specs, # give access to variables + sys.executable, + pyscript) + submitspec = template % specs + # write submit spec for this job + submitfile = os.path.join(batch_dir, '%s.submit' % name) + with open(submitfile, 'wt') as submitfileprt: + submitfileprt.writelines(submitspec) + submitfileprt.close() + # define job in DAG + dagfileptr.write('JOB %i %s\n' % (idx, submitfile)) + # define dependencies in DAG + for child in dependencies: + parents = dependencies[child] + if len(parents): + dagfileptr.write('PARENT %s CHILD %i\n' % + (' '.join([str(i) for i in parents]), + child)) + # hand over DAG to condor_dagman + cmd = CommandLine( + 'condor_submit_dag', + environ=dict(os.environ), + resource_monitor=False, + terminal_output='allatonce') + # needs -update_submit or re-running a workflow will fail + cmd.inputs.args = '%s -update_submit %s' % (self._dagman_args, + dagfilename) + cmd.run() + logger.info('submitted all jobs to Condor DAGMan') + if self._block: + # wait for DAGMan to settle down, no time wasted it is already running + time.sleep(10) + if not os.path.exists('%s.condor.sub' % dagfilename): + raise EnvironmentError( + "DAGMan did not create its submit file, please check the logs" + ) + # wait for completion + logger.info('waiting for DAGMan to finish') + lockfilename = '%s.lock' % dagfilename + while os.path.exists(lockfilename): + time.sleep(5) diff --git a/nipype/pipeline/plugins/debug.py b/nipype/pipeline/plugins/debug.py new file mode 100644 index 0000000000..9921bb9cf4 --- /dev/null +++ b/nipype/pipeline/plugins/debug.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Debug plugin +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import networkx as nx +from .base import PluginBase, logger + + +class DebugPlugin(PluginBase): + """Execute workflow in series + """ + + def __init__(self, plugin_args=None): + super(DebugPlugin, self).__init__(plugin_args=plugin_args) + if plugin_args and "callable" in plugin_args and \ + hasattr(plugin_args['callable'], '__call__'): + self._callable = plugin_args['callable'] + else: + raise ValueError('plugin_args must contain a callable function') + + def run(self, graph, config, updatehash=False): + """Executes a pre-defined pipeline in a serial order. + + Parameters + ---------- + + graph : networkx digraph + defines order of execution + """ + + if not isinstance(graph, nx.DiGraph): + raise ValueError('Input must be a networkx digraph object') + logger.info("Executing debug plugin") + for node in nx.topological_sort(graph): + self._callable(node, graph) diff --git a/nipype/pipeline/plugins/ipython.py b/nipype/pipeline/plugins/ipython.py new file mode 100644 index 0000000000..aa20f935c1 --- /dev/null +++ b/nipype/pipeline/plugins/ipython.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Parallel workflow execution via IPython controller +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from future import standard_library +standard_library.install_aliases() +from future.utils import raise_from + +from pickle import dumps + +import sys +from .base import (DistributedPluginBase, logger, report_crash) + +IPython_not_loaded = False +try: + from IPython import __version__ as IPyversion + from ipyparallel.error import TimeoutError +except: + IPython_not_loaded = True + + +def execute_task(pckld_task, node_config, updatehash): + from socket import gethostname + from traceback import format_exc + from nipype import config, logging + traceback = None + result = None + import os + cwd = os.getcwd() + try: + config.update_config(node_config) + logging.update_logging(config) + from pickle import loads + task = loads(pckld_task) + result = task.run(updatehash=updatehash) + except: + traceback = format_exc() + from pickle import loads + task = loads(pckld_task) + result = task.result + os.chdir(cwd) + return result, traceback, gethostname() + + +class IPythonPlugin(DistributedPluginBase): + """Execute workflow with ipython + """ + + def __init__(self, plugin_args=None): + if IPython_not_loaded: + raise ImportError('Please install ipyparallel to use this plugin.') + super(IPythonPlugin, self).__init__(plugin_args=plugin_args) + valid_args = ('url_file', 'profile', 'cluster_id', 'context', 'debug', + 'timeout', 'config', 'username', 'sshserver', 'sshkey', + 'password', 'paramiko') + self.client_args = { + arg: plugin_args[arg] + for arg in valid_args if arg in plugin_args + } + self.iparallel = None + self.taskclient = None + self.taskmap = {} + self._taskid = 0 + + def run(self, graph, config, updatehash=False): + """Executes a pre-defined pipeline is distributed approaches + based on IPython's ipyparallel processing interface + """ + # retrieve clients again + try: + name = 'ipyparallel' + __import__(name) + self.iparallel = sys.modules[name] + except ImportError as e: + raise_from( + ImportError("ipyparallel not found. Parallel execution " + "will be unavailable"), e) + try: + self.taskclient = self.iparallel.Client(**self.client_args) + except Exception as e: + if isinstance(e, TimeoutError): + raise_from(Exception("No IPython clients found."), e) + if isinstance(e, IOError): + raise_from( + Exception("ipcluster/ipcontroller has not been started"), + e) + if isinstance(e, ValueError): + raise_from(Exception("Ipython kernel not installed"), e) + else: + raise e + return super(IPythonPlugin, self).run( + graph, config, updatehash=updatehash) + + def _get_result(self, taskid): + if taskid not in self.taskmap: + raise ValueError('Task %d not in pending list' % taskid) + if self.taskmap[taskid].ready(): + result, traceback, hostname = self.taskmap[taskid].get() + result_out = dict(result=None, traceback=None) + result_out['result'] = result + result_out['traceback'] = traceback + result_out['hostname'] = hostname + return result_out + else: + return None + + def _submit_job(self, node, updatehash=False): + pckld_node = dumps(node, 2) + result_object = self.taskclient.load_balanced_view().apply( + execute_task, pckld_node, node.config, updatehash) + self._taskid += 1 + self.taskmap[self._taskid] = result_object + return self._taskid + + def _report_crash(self, node, result=None): + if result and result['traceback']: + node._result = result['result'] + node._traceback = result['traceback'] + return report_crash(node, traceback=result['traceback']) + else: + return report_crash(node) + + def _clear_task(self, taskid): + if IPyversion >= '0.11': + logger.debug("Clearing id: %d" % taskid) + self.taskclient.purge_results(self.taskmap[taskid]) + del self.taskmap[taskid] diff --git a/nipype/pipeline/plugins/legacymultiproc.py b/nipype/pipeline/plugins/legacymultiproc.py new file mode 100644 index 0000000000..d93e6e77d1 --- /dev/null +++ b/nipype/pipeline/plugins/legacymultiproc.py @@ -0,0 +1,382 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Parallel workflow execution via multiprocessing + +Support for child processes running as non-daemons based on +http://stackoverflow.com/a/8963618/1183453 +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +# Import packages +import os +from multiprocessing import Process, Pool, cpu_count, pool +from traceback import format_exception +import sys +from logging import INFO +import gc + +from copy import deepcopy +import numpy as np +from ... import logging +from ...utils.profiler import get_system_total_memory_gb +from ..engine import MapNode +from .base import DistributedPluginBase + +try: + from textwrap import indent +except ImportError: + + def indent(text, prefix): + """ A textwrap.indent replacement for Python < 3.3 """ + if not prefix: + return text + splittext = text.splitlines(True) + return prefix + prefix.join(splittext) + + +# Init logger +logger = logging.getLogger('nipype.workflow') + + +# Run node +def run_node(node, updatehash, taskid): + """Function to execute node.run(), catch and log any errors and + return the result dictionary + + Parameters + ---------- + node : nipype Node instance + the node to run + updatehash : boolean + flag for updating hash + taskid : int + an identifier for this task + + Returns + ------- + result : dictionary + dictionary containing the node runtime results and stats + """ + + # Init variables + result = dict(result=None, traceback=None, taskid=taskid) + + # Try and execute the node via node.run() + try: + result['result'] = node.run(updatehash=updatehash) + except: # noqa: E722, intendedly catch all here + result['traceback'] = format_exception(*sys.exc_info()) + result['result'] = node.result + + # Return the result dictionary + return result + + +class NonDaemonProcess(Process): + """A non-daemon process to support internal multiprocessing. + """ + + def _get_daemon(self): + return False + + def _set_daemon(self, value): + pass + + daemon = property(_get_daemon, _set_daemon) + + +class NonDaemonPool(pool.Pool): + """A process pool with non-daemon processes. + """ + Process = NonDaemonProcess + + +class LegacyMultiProcPlugin(DistributedPluginBase): + """ + Execute workflow with multiprocessing, not sending more jobs at once + than the system can support. + + The plugin_args input to run can be used to control the multiprocessing + execution and defining the maximum amount of memory and threads that + should be used. When those parameters are not specified, + the number of threads and memory of the system is used. + + System consuming nodes should be tagged:: + + memory_consuming_node.mem_gb = 8 + thread_consuming_node.n_procs = 16 + + The default number of threads and memory are set at node + creation, and are 1 and 0.25GB respectively. + + Currently supported options are: + + - non_daemon : boolean flag to execute as non-daemon processes + - n_procs: maximum number of threads to be executed in parallel + - memory_gb: maximum memory (in GB) that can be used at once. + - raise_insufficient: raise error if the requested resources for + a node over the maximum `n_procs` and/or `memory_gb` + (default is ``True``). + - scheduler: sort jobs topologically (``'tsort'``, default value) + or prioritize jobs by, first, memory consumption and, second, + number of threads (``'mem_thread'`` option). + - maxtasksperchild: number of nodes to run on each process before + refreshing the worker (default: 10). + + """ + + def __init__(self, plugin_args=None): + # Init variables and instance attributes + super(LegacyMultiProcPlugin, self).__init__(plugin_args=plugin_args) + self._taskresult = {} + self._task_obj = {} + self._taskid = 0 + + # Cache current working directory and make sure we + # change to it when workers are set up + self._cwd = os.getcwd() + + # Read in options or set defaults. + non_daemon = self.plugin_args.get('non_daemon', True) + maxtasks = self.plugin_args.get('maxtasksperchild', 10) + self.processors = self.plugin_args.get('n_procs', cpu_count()) + self.memory_gb = self.plugin_args.get( + 'memory_gb', # Allocate 90% of system memory + get_system_total_memory_gb() * 0.9) + self.raise_insufficient = self.plugin_args.get('raise_insufficient', + True) + + # Instantiate different thread pools for non-daemon processes + logger.debug('[LegacyMultiProc] Starting in "%sdaemon" mode (n_procs=%d, ' + 'mem_gb=%0.2f, cwd=%s)', 'non' * int(non_daemon), + self.processors, self.memory_gb, self._cwd) + + NipypePool = NonDaemonPool if non_daemon else Pool + try: + self.pool = NipypePool( + processes=self.processors, + maxtasksperchild=maxtasks, + initializer=os.chdir, + initargs=(self._cwd,) + ) + except TypeError: + # Python < 3.2 does not have maxtasksperchild + # When maxtasksperchild is not set, initializer is not to be + # called + self.pool = NipypePool(processes=self.processors) + + self._stats = None + + def _async_callback(self, args): + # Make sure runtime is not left at a dubious working directory + os.chdir(self._cwd) + self._taskresult[args['taskid']] = args + + def _get_result(self, taskid): + return self._taskresult.get(taskid) + + def _clear_task(self, taskid): + del self._task_obj[taskid] + + def _submit_job(self, node, updatehash=False): + self._taskid += 1 + + # Don't allow streaming outputs + if getattr(node.interface, 'terminal_output', '') == 'stream': + node.interface.terminal_output = 'allatonce' + + self._task_obj[self._taskid] = self.pool.apply_async( + run_node, (node, updatehash, self._taskid), + callback=self._async_callback) + + logger.debug('[LegacyMultiProc] Submitted task %s (taskid=%d).', + node.fullname, self._taskid) + return self._taskid + + def _prerun_check(self, graph): + """Check if any node exeeds the available resources""" + tasks_mem_gb = [] + tasks_num_th = [] + for node in graph.nodes(): + tasks_mem_gb.append(node.mem_gb) + tasks_num_th.append(node.n_procs) + + if np.any(np.array(tasks_mem_gb) > self.memory_gb): + logger.warning( + 'Some nodes exceed the total amount of memory available ' + '(%0.2fGB).', self.memory_gb) + if self.raise_insufficient: + raise RuntimeError('Insufficient resources available for job') + + if np.any(np.array(tasks_num_th) > self.processors): + logger.warning( + 'Some nodes demand for more threads than available (%d).', + self.processors) + if self.raise_insufficient: + raise RuntimeError('Insufficient resources available for job') + + def _postrun_check(self): + self.pool.close() + + def _check_resources(self, running_tasks): + """ + Make sure there are resources available + """ + free_memory_gb = self.memory_gb + free_processors = self.processors + for _, jobid in running_tasks: + free_memory_gb -= min(self.procs[jobid].mem_gb, free_memory_gb) + free_processors -= min(self.procs[jobid].n_procs, free_processors) + + return free_memory_gb, free_processors + + def _send_procs_to_workers(self, updatehash=False, graph=None): + """ + Sends jobs to workers when system resources are available. + """ + + # Check to see if a job is available (jobs with all dependencies run) + # See https://github.com/nipy/nipype/pull/2200#discussion_r141605722 + # See also https://github.com/nipy/nipype/issues/2372 + jobids = np.flatnonzero(~self.proc_done & + (self.depidx.sum(axis=0) == 0).__array__()) + + # Check available resources by summing all threads and memory used + free_memory_gb, free_processors = self._check_resources( + self.pending_tasks) + + stats = (len(self.pending_tasks), len(jobids), free_memory_gb, + self.memory_gb, free_processors, self.processors) + if self._stats != stats: + tasks_list_msg = '' + + if logger.level <= INFO: + running_tasks = [ + ' * %s' % self.procs[jobid].fullname + for _, jobid in self.pending_tasks + ] + if running_tasks: + tasks_list_msg = '\nCurrently running:\n' + tasks_list_msg += '\n'.join(running_tasks) + tasks_list_msg = indent(tasks_list_msg, ' ' * 21) + logger.info( + '[LegacyMultiProc] Running %d tasks, and %d jobs ready. Free ' + 'memory (GB): %0.2f/%0.2f, Free processors: %d/%d.%s', + len(self.pending_tasks), len(jobids), free_memory_gb, + self.memory_gb, free_processors, self.processors, + tasks_list_msg) + self._stats = stats + + if free_memory_gb < 0.01 or free_processors == 0: + logger.debug('No resources available') + return + + if len(jobids) + len(self.pending_tasks) == 0: + logger.debug('No tasks are being run, and no jobs can ' + 'be submitted to the queue. Potential deadlock') + return + + jobids = self._sort_jobs( + jobids, scheduler=self.plugin_args.get('scheduler')) + + # Run garbage collector before potentially submitting jobs + gc.collect() + + # Submit jobs + for jobid in jobids: + # First expand mapnodes + if isinstance(self.procs[jobid], MapNode): + try: + num_subnodes = self.procs[jobid].num_subnodes() + except Exception: + traceback = format_exception(*sys.exc_info()) + self._clean_queue( + jobid, + graph, + result={ + 'result': None, + 'traceback': traceback + }) + self.proc_pending[jobid] = False + continue + if num_subnodes > 1: + submit = self._submit_mapnode(jobid) + if not submit: + continue + + # Check requirements of this job + next_job_gb = min(self.procs[jobid].mem_gb, self.memory_gb) + next_job_th = min(self.procs[jobid].n_procs, self.processors) + + # If node does not fit, skip at this moment + if next_job_th > free_processors or next_job_gb > free_memory_gb: + logger.debug('Cannot allocate job %d (%0.2fGB, %d threads).', + jobid, next_job_gb, next_job_th) + continue + + free_memory_gb -= next_job_gb + free_processors -= next_job_th + logger.debug('Allocating %s ID=%d (%0.2fGB, %d threads). Free: ' + '%0.2fGB, %d threads.', self.procs[jobid].fullname, + jobid, next_job_gb, next_job_th, free_memory_gb, + free_processors) + + # change job status in appropriate queues + self.proc_done[jobid] = True + self.proc_pending[jobid] = True + + # If cached and up-to-date just retrieve it, don't run + if self._local_hash_check(jobid, graph): + continue + + # updatehash and run_without_submitting are also run locally + if updatehash or self.procs[jobid].run_without_submitting: + logger.debug('Running node %s on master thread', + self.procs[jobid]) + try: + self.procs[jobid].run(updatehash=updatehash) + except Exception: + traceback = format_exception(*sys.exc_info()) + self._clean_queue( + jobid, + graph, + result={ + 'result': None, + 'traceback': traceback + }) + + # Release resources + self._task_finished_cb(jobid) + self._remove_node_dirs() + free_memory_gb += next_job_gb + free_processors += next_job_th + # Display stats next loop + self._stats = None + + # Clean up any debris from running node in main process + gc.collect() + continue + + # Task should be submitted to workers + # Send job to task manager and add to pending tasks + if self._status_callback: + self._status_callback(self.procs[jobid], 'start') + tid = self._submit_job( + deepcopy(self.procs[jobid]), updatehash=updatehash) + if tid is None: + self.proc_done[jobid] = False + self.proc_pending[jobid] = False + else: + self.pending_tasks.insert(0, (tid, jobid)) + # Display stats next loop + self._stats = None + + def _sort_jobs(self, jobids, scheduler='tsort'): + if scheduler == 'mem_thread': + return sorted( + jobids, + key=lambda item: (self.procs[item].mem_gb, self.procs[item].n_procs) + ) + return jobids diff --git a/nipype/pipeline/plugins/linear.py b/nipype/pipeline/plugins/linear.py new file mode 100644 index 0000000000..2180d614ad --- /dev/null +++ b/nipype/pipeline/plugins/linear.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Local serial workflow execution +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os + +import networkx as nx +from .base import (PluginBase, logger, report_crash, report_nodes_not_run, + str2bool) +from ..engine.utils import dfs_preorder, topological_sort + + +class LinearPlugin(PluginBase): + """Execute workflow in series + """ + + def run(self, graph, config, updatehash=False): + """Executes a pre-defined pipeline in a serial order. + + Parameters + ---------- + + graph : networkx digraph + defines order of execution + """ + + if not isinstance(graph, nx.DiGraph): + raise ValueError('Input must be a networkx digraph object') + logger.info("Running serially.") + old_wd = os.getcwd() + notrun = [] + donotrun = [] + nodes, _ = topological_sort(graph) + for node in nodes: + try: + if node in donotrun: + continue + if self._status_callback: + self._status_callback(node, 'start') + node.run(updatehash=updatehash) + if self._status_callback: + self._status_callback(node, 'end') + except: + os.chdir(old_wd) + if str2bool(config['execution']['stop_on_first_crash']): + raise + # bare except, but i really don't know where a + # node might fail + crashfile = report_crash(node) + # remove dependencies from queue + subnodes = [s for s in dfs_preorder(graph, node)] + notrun.append( + dict(node=node, dependents=subnodes, crashfile=crashfile)) + donotrun.extend(subnodes) + if self._status_callback: + self._status_callback(node, 'exception') + report_nodes_not_run(notrun) diff --git a/nipype/pipeline/plugins/lsf.py b/nipype/pipeline/plugins/lsf.py new file mode 100644 index 0000000000..bdaabc31e6 --- /dev/null +++ b/nipype/pipeline/plugins/lsf.py @@ -0,0 +1,124 @@ +# -*- coding: utf-8 -*- +"""Parallel workflow execution via LSF +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +import re +from time import sleep + +from ... import logging +from ...interfaces.base import CommandLine +from .base import SGELikeBatchManagerBase, logger +iflogger = logging.getLogger('nipype.interface') + + +class LSFPlugin(SGELikeBatchManagerBase): + """Execute using LSF Cluster Submission + + The plugin_args input to run can be used to control the LSF execution. + Currently supported options are: + + - template : template to use for batch job submission + - bsub_args : arguments to be prepended to the job execution script in the + bsub call + + """ + + def __init__(self, **kwargs): + template = """ +#$ -S /bin/sh + """ + self._retry_timeout = 2 + self._max_tries = 2 + self._bsub_args = '' + if 'plugin_args' in kwargs and kwargs['plugin_args']: + if 'retry_timeout' in kwargs['plugin_args']: + self._retry_timeout = kwargs['plugin_args']['retry_timeout'] + if 'max_tries' in kwargs['plugin_args']: + self._max_tries = kwargs['plugin_args']['max_tries'] + if 'bsub_args' in kwargs['plugin_args']: + self._bsub_args = kwargs['plugin_args']['bsub_args'] + super(LSFPlugin, self).__init__(template, **kwargs) + + def _is_pending(self, taskid): + """LSF lists a status of 'PEND' when a job has been submitted but is + waiting to be picked up, and 'RUN' when it is actively being processed. + But _is_pending should return True until a job has finished and is + ready to be checked for completeness. So return True if status is + either 'PEND' or 'RUN'""" + cmd = CommandLine( + 'bjobs', resource_monitor=False, terminal_output='allatonce') + cmd.inputs.args = '%d' % taskid + # check lsf task + oldlevel = iflogger.level + iflogger.setLevel(logging.getLevelName('CRITICAL')) + result = cmd.run(ignore_exception=True) + iflogger.setLevel(oldlevel) + # logger.debug(result.runtime.stdout) + if 'DONE' in result.runtime.stdout or 'EXIT' in result.runtime.stdout: + return False + else: + return True + + def _submit_batchtask(self, scriptfile, node): + cmd = CommandLine( + 'bsub', + environ=dict(os.environ), + resource_monitor=False, + terminal_output='allatonce') + bsubargs = '' + if self._bsub_args: + bsubargs = self._bsub_args + if 'bsub_args' in node.plugin_args: + if 'overwrite' in node.plugin_args and\ + node.plugin_args['overwrite']: + bsubargs = node.plugin_args['bsub_args'] + else: + bsubargs += (" " + node.plugin_args['bsub_args']) + if '-o' not in bsubargs: # -o outfile + bsubargs = '%s -o %s' % (bsubargs, scriptfile + ".log") + if '-e' not in bsubargs: + # -e error file + bsubargs = '%s -e %s' % (bsubargs, scriptfile + ".log") + if node._hierarchy: + jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy, + node._id)) + else: + jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id)) + jobnameitems = jobname.split('.') + jobnameitems.reverse() + jobname = '.'.join(jobnameitems) + cmd.inputs.args = '%s -J %s sh %s' % (bsubargs, jobname, + scriptfile) # -J job_name_spec + logger.debug('bsub ' + cmd.inputs.args) + oldlevel = iflogger.level + iflogger.setLevel(logging.getLevelName('CRITICAL')) + tries = 0 + while True: + try: + result = cmd.run() + except Exception as e: + if tries < self._max_tries: + tries += 1 + sleep( + self._retry_timeout) # sleep 2 seconds and try again. + else: + iflogger.setLevel(oldlevel) + raise RuntimeError('\n'.join((('Could not submit lsf task' + ' for node %s') % node._id, + str(e)))) + else: + break + iflogger.setLevel(oldlevel) + # retrieve lsf taskid + match = re.search('<(\d*)>', result.runtime.stdout) + if match: + taskid = int(match.groups()[0]) + else: + raise ScriptError("Can't parse submission job output id: %s" % + result.runtime.stdout) + self._pending[taskid] = node.output_dir() + logger.debug('submitted lsf task: %d for node %s' % (taskid, node._id)) + return taskid diff --git a/nipype/pipeline/plugins/multiproc.py b/nipype/pipeline/plugins/multiproc.py new file mode 100644 index 0000000000..c89a6af8e8 --- /dev/null +++ b/nipype/pipeline/plugins/multiproc.py @@ -0,0 +1,351 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Parallel workflow execution via multiprocessing + +Support for child processes running as non-daemons based on +http://stackoverflow.com/a/8963618/1183453 +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +# Import packages +import os +from multiprocessing import cpu_count +from concurrent.futures import ProcessPoolExecutor +from traceback import format_exception +import sys +from logging import INFO +import gc + +from copy import deepcopy +import numpy as np +from ... import logging +from ...utils.profiler import get_system_total_memory_gb +from ..engine import MapNode +from .base import DistributedPluginBase + +try: + from textwrap import indent +except ImportError: + + def indent(text, prefix): + """ A textwrap.indent replacement for Python < 3.3 """ + if not prefix: + return text + splittext = text.splitlines(True) + return prefix + prefix.join(splittext) + + +# Init logger +logger = logging.getLogger('nipype.workflow') + + +# Run node +def run_node(node, updatehash, taskid): + """Function to execute node.run(), catch and log any errors and + return the result dictionary + + Parameters + ---------- + node : nipype Node instance + the node to run + updatehash : boolean + flag for updating hash + taskid : int + an identifier for this task + + Returns + ------- + result : dictionary + dictionary containing the node runtime results and stats + """ + + # Init variables + result = dict(result=None, traceback=None, taskid=taskid) + + # Try and execute the node via node.run() + try: + result['result'] = node.run(updatehash=updatehash) + except: # noqa: E722, intendedly catch all here + result['traceback'] = format_exception(*sys.exc_info()) + result['result'] = node.result + + # Return the result dictionary + return result + + +class MultiProcPlugin(DistributedPluginBase): + """ + Execute workflow with multiprocessing, not sending more jobs at once + than the system can support. + + The plugin_args input to run can be used to control the multiprocessing + execution and defining the maximum amount of memory and threads that + should be used. When those parameters are not specified, + the number of threads and memory of the system is used. + + System consuming nodes should be tagged:: + + memory_consuming_node.mem_gb = 8 + thread_consuming_node.n_procs = 16 + + The default number of threads and memory are set at node + creation, and are 1 and 0.25GB respectively. + + Currently supported options are: + + - non_daemon : boolean flag to execute as non-daemon processes + - n_procs: maximum number of threads to be executed in parallel + - memory_gb: maximum memory (in GB) that can be used at once. + - raise_insufficient: raise error if the requested resources for + a node over the maximum `n_procs` and/or `memory_gb` + (default is ``True``). + - scheduler: sort jobs topologically (``'tsort'``, default value) + or prioritize jobs by, first, memory consumption and, second, + number of threads (``'mem_thread'`` option). + - maxtasksperchild: number of nodes to run on each process before + refreshing the worker (default: 10). + + """ + + def __init__(self, plugin_args=None): + # Init variables and instance attributes + super(MultiProcPlugin, self).__init__(plugin_args=plugin_args) + self._taskresult = {} + self._task_obj = {} + self._taskid = 0 + + # Cache current working directory and make sure we + # change to it when workers are set up + self._cwd = os.getcwd() + + # Read in options or set defaults. + self.processors = self.plugin_args.get('n_procs', cpu_count()) + self.memory_gb = self.plugin_args.get( + 'memory_gb', # Allocate 90% of system memory + get_system_total_memory_gb() * 0.9) + self.raise_insufficient = self.plugin_args.get('raise_insufficient', + True) + + # Instantiate different thread pools for non-daemon processes + logger.debug('[MultiProc] Starting (n_procs=%d, ' + 'mem_gb=%0.2f, cwd=%s)', + self.processors, self.memory_gb, self._cwd) + + self.pool = ProcessPoolExecutor(max_workers=self.processors) + + self._stats = None + + def _async_callback(self, args): + # Make sure runtime is not left at a dubious working directory + os.chdir(self._cwd) + result = args.result() + self._taskresult[result['taskid']] = result + + def _get_result(self, taskid): + return self._taskresult.get(taskid) + + def _clear_task(self, taskid): + del self._task_obj[taskid] + + def _submit_job(self, node, updatehash=False): + self._taskid += 1 + + # Don't allow streaming outputs + if getattr(node.interface, 'terminal_output', '') == 'stream': + node.interface.terminal_output = 'allatonce' + + result_future = self.pool.submit(run_node, node, updatehash, self._taskid) + result_future.add_done_callback(self._async_callback) + self._task_obj[self._taskid] = result_future + + logger.debug('[MultiProc] Submitted task %s (taskid=%d).', + node.fullname, self._taskid) + return self._taskid + + def _prerun_check(self, graph): + """Check if any node exeeds the available resources""" + tasks_mem_gb = [] + tasks_num_th = [] + for node in graph.nodes(): + tasks_mem_gb.append(node.mem_gb) + tasks_num_th.append(node.n_procs) + + if np.any(np.array(tasks_mem_gb) > self.memory_gb): + logger.warning( + 'Some nodes exceed the total amount of memory available ' + '(%0.2fGB).', self.memory_gb) + if self.raise_insufficient: + raise RuntimeError('Insufficient resources available for job') + + if np.any(np.array(tasks_num_th) > self.processors): + logger.warning( + 'Some nodes demand for more threads than available (%d).', + self.processors) + if self.raise_insufficient: + raise RuntimeError('Insufficient resources available for job') + + def _postrun_check(self): + self.pool.shutdown() + + def _check_resources(self, running_tasks): + """ + Make sure there are resources available + """ + free_memory_gb = self.memory_gb + free_processors = self.processors + for _, jobid in running_tasks: + free_memory_gb -= min(self.procs[jobid].mem_gb, free_memory_gb) + free_processors -= min(self.procs[jobid].n_procs, free_processors) + + return free_memory_gb, free_processors + + def _send_procs_to_workers(self, updatehash=False, graph=None): + """ + Sends jobs to workers when system resources are available. + """ + + # Check to see if a job is available (jobs with all dependencies run) + # See https://github.com/nipy/nipype/pull/2200#discussion_r141605722 + # See also https://github.com/nipy/nipype/issues/2372 + jobids = np.flatnonzero(~self.proc_done & + (self.depidx.sum(axis=0) == 0).__array__()) + + # Check available resources by summing all threads and memory used + free_memory_gb, free_processors = self._check_resources( + self.pending_tasks) + + stats = (len(self.pending_tasks), len(jobids), free_memory_gb, + self.memory_gb, free_processors, self.processors) + if self._stats != stats: + tasks_list_msg = '' + + if logger.level <= INFO: + running_tasks = [ + ' * %s' % self.procs[jobid].fullname + for _, jobid in self.pending_tasks + ] + if running_tasks: + tasks_list_msg = '\nCurrently running:\n' + tasks_list_msg += '\n'.join(running_tasks) + tasks_list_msg = indent(tasks_list_msg, ' ' * 21) + logger.info( + '[MultiProc] Running %d tasks, and %d jobs ready. Free ' + 'memory (GB): %0.2f/%0.2f, Free processors: %d/%d.%s', + len(self.pending_tasks), len(jobids), free_memory_gb, + self.memory_gb, free_processors, self.processors, + tasks_list_msg) + self._stats = stats + + if free_memory_gb < 0.01 or free_processors == 0: + logger.debug('No resources available') + return + + if len(jobids) + len(self.pending_tasks) == 0: + logger.debug('No tasks are being run, and no jobs can ' + 'be submitted to the queue. Potential deadlock') + return + + jobids = self._sort_jobs( + jobids, scheduler=self.plugin_args.get('scheduler')) + + # Run garbage collector before potentially submitting jobs + gc.collect() + + # Submit jobs + for jobid in jobids: + # First expand mapnodes + if isinstance(self.procs[jobid], MapNode): + try: + num_subnodes = self.procs[jobid].num_subnodes() + except Exception: + traceback = format_exception(*sys.exc_info()) + self._clean_queue( + jobid, + graph, + result={ + 'result': None, + 'traceback': traceback + }) + self.proc_pending[jobid] = False + continue + if num_subnodes > 1: + submit = self._submit_mapnode(jobid) + if not submit: + continue + + # Check requirements of this job + next_job_gb = min(self.procs[jobid].mem_gb, self.memory_gb) + next_job_th = min(self.procs[jobid].n_procs, self.processors) + + # If node does not fit, skip at this moment + if next_job_th > free_processors or next_job_gb > free_memory_gb: + logger.debug('Cannot allocate job %d (%0.2fGB, %d threads).', + jobid, next_job_gb, next_job_th) + continue + + free_memory_gb -= next_job_gb + free_processors -= next_job_th + logger.debug('Allocating %s ID=%d (%0.2fGB, %d threads). Free: ' + '%0.2fGB, %d threads.', self.procs[jobid].fullname, + jobid, next_job_gb, next_job_th, free_memory_gb, + free_processors) + + # change job status in appropriate queues + self.proc_done[jobid] = True + self.proc_pending[jobid] = True + + # If cached and up-to-date just retrieve it, don't run + if self._local_hash_check(jobid, graph): + continue + + # updatehash and run_without_submitting are also run locally + if updatehash or self.procs[jobid].run_without_submitting: + logger.debug('Running node %s on master thread', + self.procs[jobid]) + try: + self.procs[jobid].run(updatehash=updatehash) + except Exception: + traceback = format_exception(*sys.exc_info()) + self._clean_queue( + jobid, + graph, + result={ + 'result': None, + 'traceback': traceback + }) + + # Release resources + self._task_finished_cb(jobid) + self._remove_node_dirs() + free_memory_gb += next_job_gb + free_processors += next_job_th + # Display stats next loop + self._stats = None + + # Clean up any debris from running node in main process + gc.collect() + continue + + # Task should be submitted to workers + # Send job to task manager and add to pending tasks + if self._status_callback: + self._status_callback(self.procs[jobid], 'start') + tid = self._submit_job( + deepcopy(self.procs[jobid]), updatehash=updatehash) + if tid is None: + self.proc_done[jobid] = False + self.proc_pending[jobid] = False + else: + self.pending_tasks.insert(0, (tid, jobid)) + # Display stats next loop + self._stats = None + + def _sort_jobs(self, jobids, scheduler='tsort'): + if scheduler == 'mem_thread': + return sorted( + jobids, + key=lambda item: (self.procs[item].mem_gb, self.procs[item].n_procs) + ) + return jobids diff --git a/nipype/pipeline/plugins/oar.py b/nipype/pipeline/plugins/oar.py new file mode 100644 index 0000000000..c68b42379f --- /dev/null +++ b/nipype/pipeline/plugins/oar.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +"""Parallel workflow execution via OAR http://oar.imag.fr +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from builtins import str, open +import os +import stat +from time import sleep +import subprocess +import simplejson as json + +from ... import logging +from ...interfaces.base import CommandLine +from .base import SGELikeBatchManagerBase, logger +iflogger = logging.getLogger('nipype.interface') + + +class OARPlugin(SGELikeBatchManagerBase): + """Execute using OAR + + The plugin_args input to run can be used to control the OAR execution. + Currently supported options are: + + - template : template to use for batch job submission + - oarsub_args : arguments to be prepended to the job execution + script in the oarsub call + - max_jobname_len: maximum length of the job name. Default 15. + + """ + + # Addtional class variables + _max_jobname_len = 15 + _oarsub_args = '' + + def __init__(self, **kwargs): + template = """ +# oarsub -J + """ + self._retry_timeout = 2 + self._max_tries = 2 + self._max_jobname_length = 15 + if 'plugin_args' in kwargs and kwargs['plugin_args']: + if 'oarsub_args' in kwargs['plugin_args']: + self._oarsub_args = kwargs['plugin_args']['oarsub_args'] + if 'retry_timeout' in kwargs['plugin_args']: + self._retry_timeout = kwargs['plugin_args']['retry_timeout'] + if 'max_tries' in kwargs['plugin_args']: + self._max_tries = kwargs['plugin_args']['max_tries'] + if 'max_jobname_len' in kwargs['plugin_args']: + self._max_jobname_len = \ + kwargs['plugin_args']['max_jobname_len'] + super(OARPlugin, self).__init__(template, **kwargs) + + def _is_pending(self, taskid): + # subprocess.Popen requires taskid to be a string + proc = subprocess.Popen( + ['oarstat', '-J', '-s', '-j', taskid], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + o, e = proc.communicate() + parsed_result = json.loads(o)[taskid].lower() + is_pending = (('error' not in parsed_result) + and ('terminated' not in parsed_result)) + return is_pending + + def _submit_batchtask(self, scriptfile, node): + cmd = CommandLine( + 'oarsub', + environ=dict(os.environ), + resource_monitor=False, + terminal_output='allatonce') + path = os.path.dirname(scriptfile) + oarsubargs = '' + if self._oarsub_args: + oarsubargs = self._oarsub_args + if 'oarsub_args' in node.plugin_args: + if ('overwrite' in node.plugin_args + and node.plugin_args['overwrite']): + oarsubargs = node.plugin_args['oarsub_args'] + else: + oarsubargs += (" " + node.plugin_args['oarsub_args']) + + if node._hierarchy: + jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy, + node._id)) + else: + jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id)) + jobnameitems = jobname.split('.') + jobnameitems.reverse() + jobname = '.'.join(jobnameitems) + jobname = jobname[0:self._max_jobname_len] + + if '-O' not in oarsubargs: + oarsubargs = '%s -O %s' % (oarsubargs, + os.path.join(path, jobname + '.stdout')) + if '-E' not in oarsubargs: + oarsubargs = '%s -E %s' % (oarsubargs, + os.path.join(path, jobname + '.stderr')) + if '-J' not in oarsubargs: + oarsubargs = '%s -J' % (oarsubargs) + + os.chmod(scriptfile, stat.S_IEXEC | stat.S_IREAD | stat.S_IWRITE) + cmd.inputs.args = '%s -n %s -S %s' % (oarsubargs, jobname, scriptfile) + + oldlevel = iflogger.level + iflogger.setLevel(logging.getLevelName('CRITICAL')) + tries = 0 + while True: + try: + result = cmd.run() + except Exception as e: + if tries < self._max_tries: + tries += 1 + sleep(self._retry_timeout) + # sleep 2 seconds and try again. + else: + iflogger.setLevel(oldlevel) + raise RuntimeError('\n'.join((('Could not submit OAR task' + ' for node %s') % node._id, + str(e)))) + else: + break + iflogger.setLevel(oldlevel) + # retrieve OAR taskid + + o = '' + add = False + for line in result.runtime.stdout.splitlines(): + if line.strip().startswith('{'): + add = True + if add: + o += line + '\n' + if line.strip().startswith('}'): + break + taskid = json.loads(o)['job_id'] + self._pending[taskid] = node.output_dir() + logger.debug('submitted OAR task: %s for node %s' % (taskid, node._id)) + return taskid diff --git a/nipype/pipeline/plugins/pbs.py b/nipype/pipeline/plugins/pbs.py new file mode 100644 index 0000000000..0738638765 --- /dev/null +++ b/nipype/pipeline/plugins/pbs.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +"""Parallel workflow execution via PBS/Torque +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, open + +import os +from time import sleep + +from ... import logging +from ...interfaces.base import CommandLine +from .base import SGELikeBatchManagerBase, logger + +iflogger = logging.getLogger('nipype.interface') + + +class PBSPlugin(SGELikeBatchManagerBase): + """Execute using PBS/Torque + + The plugin_args input to run can be used to control the SGE execution. + Currently supported options are: + + - template : template to use for batch job submission + - qsub_args : arguments to be prepended to the job execution script in the + qsub call + - max_jobname_len: maximum length of the job name. Default 15. + + """ + + # Addtional class variables + _max_jobname_len = 15 + + def __init__(self, **kwargs): + template = """ +#PBS -V + """ + self._retry_timeout = 2 + self._max_tries = 2 + self._max_jobname_length = 15 + if 'plugin_args' in kwargs and kwargs['plugin_args']: + if 'retry_timeout' in kwargs['plugin_args']: + self._retry_timeout = kwargs['plugin_args']['retry_timeout'] + if 'max_tries' in kwargs['plugin_args']: + self._max_tries = kwargs['plugin_args']['max_tries'] + if 'max_jobname_len' in kwargs['plugin_args']: + self._max_jobname_len = kwargs['plugin_args'][ + 'max_jobname_len'] + super(PBSPlugin, self).__init__(template, **kwargs) + + def _is_pending(self, taskid): + result = CommandLine('qstat -f {}'.format(taskid), + environ=dict(os.environ), + terminal_output='file_split', + resource_monitor=False, + ignore_exception=True).run() + + stdout = result.runtime.stdout + stderr = result.runtime.stderr + errmsg = 'Unknown Job Id' + success = 'Job has finished' + if (success in stderr) or ('job_state = C' in stdout): + return False + else: + return errmsg not in stderr + + def _submit_batchtask(self, scriptfile, node): + cmd = CommandLine( + 'qsub', + environ=dict(os.environ), + resource_monitor=False, + terminal_output='allatonce') + path = os.path.dirname(scriptfile) + qsubargs = '' + if self._qsub_args: + qsubargs = self._qsub_args + if 'qsub_args' in node.plugin_args: + if 'overwrite' in node.plugin_args and \ + node.plugin_args['overwrite']: + qsubargs = node.plugin_args['qsub_args'] + else: + qsubargs += (" " + node.plugin_args['qsub_args']) + if '-o' not in qsubargs: + qsubargs = '%s -o %s' % (qsubargs, path) + if '-e' not in qsubargs: + qsubargs = '%s -e %s' % (qsubargs, path) + if node._hierarchy: + jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy, + node._id)) + else: + jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id)) + jobnameitems = jobname.split('.') + jobnameitems.reverse() + jobname = '.'.join(jobnameitems) + jobname = jobname[0:self._max_jobname_len] + cmd.inputs.args = '%s -N %s %s' % (qsubargs, jobname, scriptfile) + + oldlevel = iflogger.level + iflogger.setLevel(logging.getLevelName('CRITICAL')) + tries = 0 + while True: + try: + result = cmd.run() + except Exception as e: + if tries < self._max_tries: + tries += 1 + # sleep 2 seconds and try again. + sleep(self._retry_timeout) + else: + iflogger.setLevel(oldlevel) + raise RuntimeError( + 'Could not submit pbs task for node {}\n{}'.format( + node._id, e)) + else: + break + iflogger.setLevel(oldlevel) + # retrieve pbs taskid + taskid = result.runtime.stdout.split('.')[0] + self._pending[taskid] = node.output_dir() + logger.debug('submitted pbs task: {} for node {}'.format( + taskid, node._id)) + + return taskid diff --git a/nipype/pipeline/plugins/pbsgraph.py b/nipype/pipeline/plugins/pbsgraph.py new file mode 100644 index 0000000000..68fc651f5f --- /dev/null +++ b/nipype/pipeline/plugins/pbsgraph.py @@ -0,0 +1,65 @@ +"""Parallel workflow execution via PBS/Torque +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +import os +import sys + +from ...interfaces.base import CommandLine +from .sgegraph import SGEGraphPlugin +from .base import logger + + +class PBSGraphPlugin(SGEGraphPlugin): + """Execute using PBS/Torque + + The plugin_args input to run can be used to control the SGE execution. + Currently supported options are: + + - template : template to use for batch job submission + - qsub_args : arguments to be prepended to the job execution script in the + qsub call + + """ + _template = """ +#PBS -V +""" + + def _submit_graph(self, pyfiles, dependencies, nodes): + batch_dir, _ = os.path.split(pyfiles[0]) + submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh') + with open(submitjobsfile, 'wt') as fp: + fp.writelines('#!/usr/bin/env sh\n') + for idx, pyscript in enumerate(pyfiles): + node = nodes[idx] + template, qsub_args = self._get_args(node, + ["template", "qsub_args"]) + + batch_dir, name = os.path.split(pyscript) + name = '.'.join(name.split('.')[:-1]) + batchscript = '\n'.join((template, '%s %s' % (sys.executable, + pyscript))) + batchscriptfile = os.path.join(batch_dir, + 'batchscript_%s.sh' % name) + with open(batchscriptfile, 'wt') as batchfp: + batchfp.writelines(batchscript) + batchfp.close() + deps = '' + if idx in dependencies: + values = [ + '$job%05d' % jobid for jobid in dependencies[idx] + ] + if len(values): + deps = '-W depend=afterok:%s' % ':'.join(values) + fp.writelines('job%05d=`qsub %s %s %s`\n' % + (idx, deps, qsub_args, batchscriptfile)) + cmd = CommandLine( + 'sh', + environ=dict(os.environ), + resource_monitor=False, + terminal_output='allatonce') + cmd.inputs.args = '%s' % submitjobsfile + cmd.run() + logger.info('submitted all jobs to queue') diff --git a/nipype/pipeline/plugins/semaphore_singleton.py b/nipype/pipeline/plugins/semaphore_singleton.py new file mode 100644 index 0000000000..96dfe657bd --- /dev/null +++ b/nipype/pipeline/plugins/semaphore_singleton.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import threading +semaphore = threading.Semaphore(0) diff --git a/nipype/pipeline/plugins/sge.py b/nipype/pipeline/plugins/sge.py new file mode 100644 index 0000000000..a4ce28297c --- /dev/null +++ b/nipype/pipeline/plugins/sge.py @@ -0,0 +1,450 @@ +# -*- coding: utf-8 -*- +"""Parallel workflow execution via SGE +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from builtins import object + +import os +import pwd +import re +import subprocess +import time + +import xml.dom.minidom + +import random + +from ... import logging +from ...interfaces.base import CommandLine +from .base import SGELikeBatchManagerBase, logger +iflogger = logging.getLogger('nipype.interface') +DEBUGGING_PREFIX = str(int(random.uniform(100, 999))) + + +def sge_debug_print(message): + """ Needed for debugging on big jobs. Once this is fully vetted, it can be removed. + """ + logger.debug(DEBUGGING_PREFIX + " " + "=!" * 3 + " " + message) + # print DEBUGGING_PREFIX + " " + "=!" * 3 + " " + message + + +class QJobInfo(object): + """Information about a single job created by OGE/SGE or similar + Each job is responsible for knowing it's own refresh state + :author Hans J. Johnson + """ + + def __init__(self, job_num, job_queue_state, job_time, job_queue_name, + job_slots, qsub_command_line): + # self._jobName = None # Ascii text name of job not unique + self._job_num = int( + job_num + ) # The primary unique identifier for this job, must be an integer! + # self._jobOwn = None # Who owns this job + self._job_queue_state = str( + job_queue_state) # ["running","zombie",...??] + # self._jobActionState = str(jobActionState) # ['r','qw','S',...??] + self._job_time = job_time # The job start time + self._job_info_creation_time = time.time( + ) # When this job was created (for comparing against initalization) + self._job_queue_name = job_queue_name # Where the job is running + self._job_slots = int(job_slots) # How many slots are being used + self._qsub_command_line = qsub_command_line + + def __repr__(self): + return '{:<8d}{:12}{:<3d}{:20}{:8}{}'.format( + self._job_num, self._job_queue_state, self._job_slots, + time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(self._job_time)), + self._job_queue_name, self._qsub_command_line) + + def is_initializing(self): + return self._job_queue_state == "initializing" + + def is_zombie(self): + return self._job_queue_state == "zombie" or self._job_queue_state == "finished" + + def is_running(self): + return self._job_queue_state == "running" + + def is_pending(self): + return self._job_queue_state == "pending" + + def is_job_state_pending(self): + """ Return True, unless job is in the "zombie" status + """ + time_diff = (time.time() - self._job_info_creation_time) + if self.is_zombie(): + sge_debug_print( + "DONE! QJobInfo.IsPending found in 'zombie' list, returning False so claiming done!\n{0}". + format(self)) + is_pending_status = False # Job explicitly found as being completed! + elif self.is_initializing() and (time_diff > 600): + # if initializing for more than 5 minute, failure due to + # initialization and completion before registration + sge_debug_print( + "FAILURE! QJobInfo.IsPending found long running at {1} seconds" + "'initializing' returning False for to break loop!\n{0}". + format(self, time_diff)) + is_pending_status = True # Job initialization took too long, so report! + else: # self.is_running() || self.is_pending(): + is_pending_status = True # Job cache last listed as running + return is_pending_status # The job is in one of the hold states + + def update_info(self, job_queue_state, job_time, job_queue_name, + job_slots): + self._job_queue_state = job_queue_state + self._job_time = job_time + self._job_queue_name = job_queue_name + self._job_slots = int(job_slots) + + def set_state(self, new_state): + self._job_queue_state = new_state + + +class QstatSubstitute(object): + """A wrapper for Qstat to avoid overloading the + SGE/OGS server with rapid continuous qstat requests""" + + def __init__(self, + qstat_instant_executable='qstat', + qstat_cached_executable='qstat'): + """ + :param qstat_instant_executable: + :param qstat_cached_executable: + """ + self._qstat_instant_executable = qstat_instant_executable + self._qstat_cached_executable = qstat_cached_executable + self._out_of_scope_jobs = list() # Initialize first + self._task_dictionary = dict( + ) # {'taskid': QJobInfo(), .... } The dictionaryObject + self._remove_old_jobs() + + def _remove_old_jobs(self): + """ This is only called during initialization of the function for the purpose + of identifying jobs that are not part of this run of nipype. They + are jobs that existed prior to starting a new jobs, so they are irrelevant. + """ + self._run_qstat("QstatInitialization", True) + # If qstat does not exist on this system, then quietly + # fail during init + + def add_startup_job(self, taskid, qsub_command_line): + """ + :param taskid: The job id + :param qsub_command_line: When initializing, re-use the job_queue_name + :return: NONE + """ + taskid = int(taskid) # Ensure that it is an integer + self._task_dictionary[taskid] = QJobInfo(taskid, "initializing", + time.time(), "noQueue", 1, + qsub_command_line) + + @staticmethod + def _qacct_verified_complete(taskid): + """ request definitive job completion information for the current job + from the qacct report + """ + sge_debug_print("WARNING: " + "CONTACTING qacct for finished jobs, " + "{0}: {1}".format(time.time(), "Verifying Completion")) + + this_command = 'qacct' + qacct_retries = 10 + is_complete = False + while qacct_retries > 0: + qacct_retries -= 1 + try: + proc = subprocess.Popen( + [ + this_command, '-o', + pwd.getpwuid(os.getuid())[0], '-j', + str(taskid) + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + qacct_result, _ = proc.communicate() + if qacct_result.find(str(taskid)): + is_complete = True + sge_debug_print( + "NOTE: qacct for jobs\n{0}".format(qacct_result)) + break + except: + sge_debug_print("NOTE: qacct call failed") + time.sleep(5) + pass + return is_complete + + def _parse_qstat_job_list(self, xml_job_list): + current_jobs_parsed = list() + for current_job_element in xml_job_list: + # jobname = current_job_element.getElementsByTagName('JB_name')[0].childNodes[0].data + # jobown = + # current_job_element.getElementsByTagName('JB_owner')[0].childNodes[0].data + try: + job_queue_name = current_job_element.getElementsByTagName( + 'queue_name')[0].childNodes[0].data + except: + job_queue_name = "unknown" + try: + job_slots = int( + current_job_element.getElementsByTagName('slots')[0] + .childNodes[0].data) + except: + job_slots = -1 + job_queue_state = current_job_element.getAttribute('state') + job_num = int( + current_job_element.getElementsByTagName('JB_job_number')[0] + .childNodes[0].data) + try: + job_time_text = current_job_element.getElementsByTagName( + 'JAT_start_time')[0].childNodes[0].data + job_time = float( + time.mktime( + time.strptime(job_time_text, "%Y-%m-%dT%H:%M:%S"))) + except: + job_time = float(0.0) + # Make job entry + + task_id = int(job_num) + if task_id in self._task_dictionary: + self._task_dictionary[task_id].update_info( + job_queue_state, job_time, job_queue_name, job_slots) + sge_debug_print("Updating job: {0}".format( + self._task_dictionary[task_id])) + current_jobs_parsed.append(task_id) + # Changed from job_num as "in" is used to check which does not cast + else: + # Any Job that was not explicitly added with qsub command is + # out of scope + self._out_of_scope_jobs.append(task_id) + + # To ensure that every job is in the dictionary has a state reported + # by the SGE environment, it is necessary to explicitly check jobs + # that are not reported by the qstat command to determine if they + # were started and finished, and pushed out of the window of review + # before their state being recorded. The qacct command is slower, but + # much more robust for ensuring that a job has completed. + for dictionary_job in list(self._task_dictionary.keys()): + if dictionary_job not in current_jobs_parsed: + is_completed = self._qacct_verified_complete(dictionary_job) + if is_completed: + self._task_dictionary[dictionary_job].set_state("zombie") + else: + sge_debug_print("ERROR: Job not in current parselist, " + "and not in done list {0}: {1}".format( + dictionary_job, + self._task_dictionary[dictionary_job])) + pass + if self._task_dictionary[dictionary_job].is_initializing(): + is_completed = self._qacct_verified_complete(dictionary_job) + if is_completed: + self._task_dictionary[dictionary_job].set_state("zombie") + else: + sge_debug_print( + "ERROR: Job not in still in intializing mode, " + "and not in done list {0}: {1}".format( + dictionary_job, + self._task_dictionary[dictionary_job])) + pass + + def _run_qstat(self, reason_for_qstat, force_instant=True): + """ request all job information for the current user in xmlformat. + See documentation from java documentation: + http://arc.liv.ac.uk/SGE/javadocs/jgdi/com/sun/grid/jgdi/monitoring/filter/JobStateFilter.html + -s r gives running jobs + -s z gives recently completed jobs (**recently** is very ambiguous) + -s s suspended jobs + """ + sge_debug_print("WARNING: CONTACTING qmaster for jobs, " + "{0}: {1}".format(time.time(), reason_for_qstat)) + if force_instant: + this_command = self._qstat_instant_executable + else: + this_command = self._qstat_cached_executable + + qstat_retries = 10 + while qstat_retries > 0: + qstat_retries -= 1 + try: + proc = subprocess.Popen( + [ + this_command, '-u', + pwd.getpwuid(os.getuid())[0], '-xml', '-s', 'psrz' + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + qstat_xml_result, _ = proc.communicate() + dom = xml.dom.minidom.parseString(qstat_xml_result) + jobs = dom.getElementsByTagName('job_info') + run = jobs[0] + runjobs = run.getElementsByTagName('job_list') + self._parse_qstat_job_list(runjobs) + break + except Exception as inst: + exception_message = "QstatParsingError:\n\t{0}\n\t{1}\n".format( + type(inst), # the exception instance + inst # __str__ allows args to printed directly + ) + sge_debug_print(exception_message) + time.sleep(5) + pass + + def print_dictionary(self): + """For debugging""" + for vv in list(self._task_dictionary.values()): + sge_debug_print(str(vv)) + + def is_job_pending(self, task_id): + task_id = int(task_id) # Ensure that it is an integer + # Check if the task is in the dictionary first (before running qstat) + if task_id in self._task_dictionary: + # Trust the cache, only False if state='zombie' + job_is_pending = self._task_dictionary[ + task_id].is_job_state_pending() + # Double check pending jobs in case of change (since we don't check at the beginning) + if job_is_pending: + self._run_qstat( + "checking job pending status {0}".format(task_id), False) + job_is_pending = self._task_dictionary[ + task_id].is_job_state_pending() + else: + self._run_qstat("checking job pending status {0}".format(task_id), + True) + if task_id in self._task_dictionary: + # Trust the cache, only False if state='zombie' + job_is_pending = self._task_dictionary[ + task_id].is_job_state_pending() + else: + sge_debug_print("ERROR: Job {0} not in task list, " + "even after forced qstat!".format(task_id)) + job_is_pending = False + if not job_is_pending: + sge_debug_print( + "DONE! Returning for {0} claiming done!".format(task_id)) + if task_id in self._task_dictionary: + sge_debug_print( + "NOTE: Adding {0} to OutOfScopeJobs list!".format(task_id)) + self._out_of_scope_jobs.append(int(task_id)) + self._task_dictionary.pop(task_id) + else: + sge_debug_print("ERROR: Job {0} not in task list, " + "but attempted to be removed!".format(task_id)) + return job_is_pending + + +def qsub_sanitize_job_name(testjobname): + """ Ensure that qsub job names must begin with a letter. + + Numbers and punctuation are not allowed. + + >>> qsub_sanitize_job_name('01') + 'J01' + >>> qsub_sanitize_job_name('a01') + 'a01' + """ + if testjobname[0].isalpha(): + return testjobname + else: + return 'J' + testjobname + + +class SGEPlugin(SGELikeBatchManagerBase): + """Execute using SGE (OGE not tested) + + The plugin_args input to run can be used to control the SGE execution. + Currently supported options are: + + - template : template to use for batch job submission + - qsub_args : arguments to be prepended to the job execution script in the + qsub call + + """ + + def __init__(self, **kwargs): + template = """ +#$ -V +#$ -S /bin/sh + """ + self._retry_timeout = 2 + self._max_tries = 2 + instant_qstat = 'qstat' + cached_qstat = 'qstat' + + if 'plugin_args' in kwargs and kwargs['plugin_args']: + if 'retry_timeout' in kwargs['plugin_args']: + self._retry_timeout = kwargs['plugin_args']['retry_timeout'] + if 'max_tries' in kwargs['plugin_args']: + self._max_tries = kwargs['plugin_args']['max_tries'] + if 'qstatProgramPath' in kwargs['plugin_args']: + instant_qstat = kwargs['plugin_args']['qstatProgramPath'] + if 'qstatCachedProgramPath' in kwargs['plugin_args']: + cached_qstat = kwargs['plugin_args']['qstatCachedProgramPath'] + self._refQstatSubstitute = QstatSubstitute(instant_qstat, cached_qstat) + + super(SGEPlugin, self).__init__(template, **kwargs) + + def _is_pending(self, taskid): + return self._refQstatSubstitute.is_job_pending(int(taskid)) + + def _submit_batchtask(self, scriptfile, node): + cmd = CommandLine( + 'qsub', + environ=dict(os.environ), + resource_monitor=False, + terminal_output='allatonce') + path = os.path.dirname(scriptfile) + qsubargs = '' + if self._qsub_args: + qsubargs = self._qsub_args + if 'qsub_args' in node.plugin_args: + if 'overwrite' in node.plugin_args and \ + node.plugin_args['overwrite']: + qsubargs = node.plugin_args['qsub_args'] + else: + qsubargs += (" " + node.plugin_args['qsub_args']) + if '-o' not in qsubargs: + qsubargs = '%s -o %s' % (qsubargs, path) + if '-e' not in qsubargs: + qsubargs = '%s -e %s' % (qsubargs, path) + if node._hierarchy: + jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy, + node._id)) + else: + jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id)) + jobnameitems = jobname.split('.') + jobnameitems.reverse() + jobname = '.'.join(jobnameitems) + jobname = qsub_sanitize_job_name(jobname) + cmd.inputs.args = '%s -N %s %s' % (qsubargs, jobname, scriptfile) + oldlevel = iflogger.level + iflogger.setLevel(logging.getLevelName('CRITICAL')) + tries = 0 + result = list() + while True: + try: + result = cmd.run() + except Exception as e: + if tries < self._max_tries: + tries += 1 + time.sleep( + self._retry_timeout) # sleep 2 seconds and try again. + else: + iflogger.setLevel(oldlevel) + raise RuntimeError('\n'.join((('Could not submit sge task' + ' for node %s') % node._id, + str(e)))) + else: + break + iflogger.setLevel(oldlevel) + # retrieve sge taskid + lines = [line for line in result.runtime.stdout.split('\n') if line] + taskid = int( + re.match("Your job ([0-9]*) .* has been submitted", + lines[-1]).groups()[0]) + self._pending[taskid] = node.output_dir() + self._refQstatSubstitute.add_startup_job(taskid, cmd.cmdline) + logger.debug('submitted sge task: %d for node %s with %s' % + (taskid, node._id, cmd.cmdline)) + return taskid diff --git a/nipype/pipeline/plugins/sgegraph.py b/nipype/pipeline/plugins/sgegraph.py new file mode 100644 index 0000000000..fa07d6a436 --- /dev/null +++ b/nipype/pipeline/plugins/sgegraph.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +"""Parallel workflow execution via SGE +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +import os +import sys + +from ...interfaces.base import CommandLine +from .base import (GraphPluginBase, logger) + + +def node_completed_status(checknode): + """ + A function to determine if a node has previously completed it's work + :param checknode: The node to check the run status + :return: boolean value True indicates that the node does not need to be run. + """ + """ TODO: place this in the base.py file and refactor """ + node_state_does_not_require_overwrite = ( + checknode.overwrite is False or + (checknode.overwrite is None and not checknode._interface.always_run)) + hash_exists = False + try: + hash_exists, _, _, _ = checknode.hash_exists() + except Exception: + hash_exists = False + return (hash_exists and node_state_does_not_require_overwrite) + + +class SGEGraphPlugin(GraphPluginBase): + """Execute using SGE + + The plugin_args input to run can be used to control the SGE execution. + Currently supported options are: + + - template : template to use for batch job submission + - qsub_args : arguments to be prepended to the job execution script in the + qsub call + + """ + _template = """ +#!/bin/bash +#$ -V +#$ -S /bin/bash +""" + + def __init__(self, **kwargs): + self._qsub_args = '' + self._dont_resubmit_completed_jobs = False + if 'plugin_args' in kwargs and kwargs['plugin_args']: + plugin_args = kwargs['plugin_args'] + if 'template' in plugin_args: + self._template = plugin_args['template'] + if os.path.isfile(self._template): + self._template = open(self._template).read() + if 'qsub_args' in plugin_args: + self._qsub_args = plugin_args['qsub_args'] + if 'dont_resubmit_completed_jobs' in plugin_args: + self._dont_resubmit_completed_jobs = plugin_args[ + 'dont_resubmit_completed_jobs'] + super(SGEGraphPlugin, self).__init__(**kwargs) + + def _submit_graph(self, pyfiles, dependencies, nodes): + def make_job_name(jobnumber, nodeslist): + """ + - jobnumber: The index number of the job to create + - nodeslist: The name of the node being processed + - return: A string representing this job to be displayed by SGE + """ + job_name = 'j{0}_{1}'.format(jobnumber, nodeslist[jobnumber]._id) + # Condition job_name to be a valid bash identifier (i.e. - is invalid) + job_name = job_name.replace('-', '_').replace('.', '_').replace( + ':', '_') + return job_name + + batch_dir, _ = os.path.split(pyfiles[0]) + submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh') + + cache_doneness_per_node = dict() + if self._dont_resubmit_completed_jobs: # A future parameter for controlling this behavior could be added here + for idx, pyscript in enumerate(pyfiles): + node = nodes[idx] + node_status_done = node_completed_status(node) + + # if the node itself claims done, then check to ensure all + # dependancies are also done + if node_status_done and idx in dependencies: + for child_idx in dependencies[idx]: + if child_idx in cache_doneness_per_node: + child_status_done = cache_doneness_per_node[ + child_idx] + else: + child_status_done = node_completed_status( + nodes[child_idx]) + node_status_done = node_status_done and child_status_done + + cache_doneness_per_node[idx] = node_status_done + + with open(submitjobsfile, 'wt') as fp: + fp.writelines('#!/usr/bin/env bash\n') + fp.writelines('# Condense format attempted\n') + for idx, pyscript in enumerate(pyfiles): + node = nodes[idx] + if cache_doneness_per_node.get(idx, False): + continue + else: + template, qsub_args = self._get_args( + node, ["template", "qsub_args"]) + + batch_dir, name = os.path.split(pyscript) + name = '.'.join(name.split('.')[:-1]) + batchscript = '\n'.join( + (template, '%s %s' % (sys.executable, pyscript))) + batchscriptfile = os.path.join(batch_dir, + 'batchscript_%s.sh' % name) + + batchscriptoutfile = batchscriptfile + '.o' + batchscripterrfile = batchscriptfile + '.e' + + with open(batchscriptfile, 'wt') as batchfp: + batchfp.writelines(batchscript) + batchfp.close() + deps = '' + if idx in dependencies: + values = ' ' + for jobid in dependencies[idx]: + # Avoid dependancies of done jobs + if not self._dont_resubmit_completed_jobs or not cache_doneness_per_node[jobid]: + values += "${{{0}}},".format( + make_job_name(jobid, nodes)) + if values != ' ': # i.e. if some jobs were added to dependency list + values = values.rstrip(',') + deps = '-hold_jid%s' % values + jobname = make_job_name(idx, nodes) + # Do not use default output locations if they are set in self._qsub_args + stderrFile = '' + if self._qsub_args.count('-e ') == 0: + stderrFile = '-e {errFile}'.format( + errFile=batchscripterrfile) + stdoutFile = '' + if self._qsub_args.count('-o ') == 0: + stdoutFile = '-o {outFile}'.format( + outFile=batchscriptoutfile) + full_line = '{jobNm}=$(qsub {outFileOption} {errFileOption} {extraQSubArgs} {dependantIndex} -N {jobNm} {batchscript} | awk \'{{print $3}}\')\n'.format( + jobNm=jobname, + outFileOption=stdoutFile, + errFileOption=stderrFile, + extraQSubArgs=qsub_args, + dependantIndex=deps, + batchscript=batchscriptfile) + fp.writelines(full_line) + cmd = CommandLine( + 'bash', + environ=dict(os.environ), + resource_monitor=False, + terminal_output='allatonce') + cmd.inputs.args = '%s' % submitjobsfile + cmd.run() + logger.info('submitted all jobs to queue') diff --git a/nipype/pipeline/plugins/slurm.py b/nipype/pipeline/plugins/slurm.py new file mode 100644 index 0000000000..285d2d6584 --- /dev/null +++ b/nipype/pipeline/plugins/slurm.py @@ -0,0 +1,136 @@ +''' +Created on Aug 2, 2013 + +@author: chadcumba + +Parallel workflow execution with SLURM +''' +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +import os +import re +from time import sleep + +from ... import logging +from ...interfaces.base import CommandLine +from .base import SGELikeBatchManagerBase, logger + +iflogger = logging.getLogger('nipype.interface') + + +class SLURMPlugin(SGELikeBatchManagerBase): + ''' + Execute using SLURM + + The plugin_args input to run can be used to control the SLURM execution. + Currently supported options are: + + - template : template to use for batch job submission + + - sbatch_args: arguments to pass prepend to the sbatch call + + + ''' + + def __init__(self, **kwargs): + + template = "#!/bin/bash" + + self._retry_timeout = 2 + self._max_tries = 2 + self._template = template + self._sbatch_args = None + self._jobid_re = "Submitted batch job ([0-9]*)" + + if 'plugin_args' in kwargs and kwargs['plugin_args']: + if 'retry_timeout' in kwargs['plugin_args']: + self._retry_timeout = kwargs['plugin_args']['retry_timeout'] + if 'max_tries' in kwargs['plugin_args']: + self._max_tries = kwargs['plugin_args']['max_tries'] + if 'jobid_re' in kwargs['plugin_args']: + self._jobid_re = kwargs['plugin_args']['jobid_re'] + if 'template' in kwargs['plugin_args']: + self._template = kwargs['plugin_args']['template'] + if os.path.isfile(self._template): + with open(self._template) as f: + self._template = f.read() + if 'sbatch_args' in kwargs['plugin_args']: + self._sbatch_args = kwargs['plugin_args']['sbatch_args'] + self._pending = {} + super(SLURMPlugin, self).__init__(self._template, **kwargs) + + def _is_pending(self, taskid): + # subprocess.Popen requires taskid to be a string + res = CommandLine( + 'squeue', + args=' '.join(['-j', '%s' % taskid]), + resource_monitor=False, + terminal_output='allatonce').run() + return res.runtime.stdout.find(str(taskid)) > -1 + + def _submit_batchtask(self, scriptfile, node): + """ + This is more or less the _submit_batchtask from sge.py with flipped + variable names, different command line switches, and different output + formatting/processing + """ + cmd = CommandLine( + 'sbatch', + environ=dict(os.environ), + resource_monitor=False, + terminal_output='allatonce') + path = os.path.dirname(scriptfile) + + sbatch_args = '' + if self._sbatch_args: + sbatch_args = self._sbatch_args + if 'sbatch_args' in node.plugin_args: + if 'overwrite' in node.plugin_args and\ + node.plugin_args['overwrite']: + sbatch_args = node.plugin_args['sbatch_args'] + else: + sbatch_args += (" " + node.plugin_args['sbatch_args']) + if '-o' not in sbatch_args: + sbatch_args = '%s -o %s' % (sbatch_args, + os.path.join(path, 'slurm-%j.out')) + if '-e' not in sbatch_args: + sbatch_args = '%s -e %s' % (sbatch_args, + os.path.join(path, 'slurm-%j.out')) + if node._hierarchy: + jobname = '.'.join((dict(os.environ)['LOGNAME'], node._hierarchy, + node._id)) + else: + jobname = '.'.join((dict(os.environ)['LOGNAME'], node._id)) + jobnameitems = jobname.split('.') + jobnameitems.reverse() + jobname = '.'.join(jobnameitems) + cmd.inputs.args = '%s -J %s %s' % (sbatch_args, jobname, scriptfile) + oldlevel = iflogger.level + iflogger.setLevel(logging.getLevelName('CRITICAL')) + tries = 0 + while True: + try: + result = cmd.run() + except Exception as e: + if tries < self._max_tries: + tries += 1 + # sleep 2 seconds and try again. + sleep(self._retry_timeout) + else: + iflogger.setLevel(oldlevel) + raise RuntimeError('\n'.join( + (('Could not submit sbatch task' + ' for node %s') % node._id, str(e)))) + else: + break + logger.debug('Ran command ({0})'.format(cmd.cmdline)) + iflogger.setLevel(oldlevel) + # retrieve taskid + lines = [line for line in result.runtime.stdout.split('\n') if line] + taskid = int(re.match(self._jobid_re, lines[-1]).groups()[0]) + self._pending[taskid] = node.output_dir() + logger.debug('submitted sbatch task: %d for node %s' % (taskid, + node._id)) + return taskid diff --git a/nipype/pipeline/plugins/slurmgraph.py b/nipype/pipeline/plugins/slurmgraph.py new file mode 100644 index 0000000000..b4013163cb --- /dev/null +++ b/nipype/pipeline/plugins/slurmgraph.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +"""Parallel workflow execution via SLURM +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +import os +import sys + +from ...interfaces.base import CommandLine +from .base import (GraphPluginBase, logger) + + +def node_completed_status(checknode): + """ + A function to determine if a node has previously completed it's work + :param checknode: The node to check the run status + :return: boolean value True indicates that the node does not need to be run. + """ + """ TODO: place this in the base.py file and refactor """ + node_state_does_not_require_overwrite = ( + checknode.overwrite is False or + (checknode.overwrite is None and not checknode._interface.always_run)) + hash_exists = False + try: + hash_exists, _, _, _ = checknode.hash_exists() + except Exception: + hash_exists = False + return (hash_exists and node_state_does_not_require_overwrite) + + +class SLURMGraphPlugin(GraphPluginBase): + """Execute using SLURM + + The plugin_args input to run can be used to control the SGE execution. + Currently supported options are: + + - template : template to use for batch job submission + - qsub_args : arguments to be prepended to the job execution script in the + qsub call + + """ + _template = "#!/bin/bash" + + def __init__(self, **kwargs): + self._sbatch_args = '' + if 'plugin_args' in kwargs and kwargs['plugin_args']: + if 'retry_timeout' in kwargs['plugin_args']: + self._retry_timeout = kwargs['plugin_args']['retry_timeout'] + if 'max_tries' in kwargs['plugin_args']: + self._max_tries = kwargs['plugin_args']['max_tries'] + if 'template' in kwargs['plugin_args']: + self._template = kwargs['plugin_args']['template'] + if os.path.isfile(self._template): + self._template = open(self._template).read() + if 'sbatch_args' in kwargs['plugin_args']: + self._sbatch_args = kwargs['plugin_args']['sbatch_args'] + if 'dont_resubmit_completed_jobs' in kwargs['plugin_args']: + self._dont_resubmit_completed_jobs = kwargs['plugin_args'][ + 'dont_resubmit_completed_jobs'] + else: + self._dont_resubmit_completed_jobs = False + super(SLURMGraphPlugin, self).__init__(**kwargs) + + def _submit_graph(self, pyfiles, dependencies, nodes): + def make_job_name(jobnumber, nodeslist): + """ + - jobnumber: The index number of the job to create + - nodeslist: The name of the node being processed + - return: A string representing this job to be displayed by SLURM + """ + job_name = 'j{0}_{1}'.format(jobnumber, nodeslist[jobnumber]._id) + # Condition job_name to be a valid bash identifier (i.e. - is invalid) + job_name = job_name.replace('-', '_').replace('.', '_').replace( + ':', '_') + return job_name + + batch_dir, _ = os.path.split(pyfiles[0]) + submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh') + + cache_doneness_per_node = dict() + if self._dont_resubmit_completed_jobs: # A future parameter for controlling this behavior could be added here + for idx, pyscript in enumerate(pyfiles): + node = nodes[idx] + node_status_done = node_completed_status(node) + + # if the node itself claims done, then check to ensure all + # dependancies are also done + if node_status_done and idx in dependencies: + for child_idx in dependencies[idx]: + if child_idx in cache_doneness_per_node: + child_status_done = cache_doneness_per_node[ + child_idx] + else: + child_status_done = node_completed_status( + nodes[child_idx]) + node_status_done = node_status_done and child_status_done + + cache_doneness_per_node[idx] = node_status_done + + with open(submitjobsfile, 'wt') as fp: + fp.writelines('#!/usr/bin/env bash\n') + fp.writelines('# Condense format attempted\n') + for idx, pyscript in enumerate(pyfiles): + node = nodes[idx] + if cache_doneness_per_node.get(idx, False): + continue + else: + template, sbatch_args = self._get_args( + node, ["template", "sbatch_args"]) + + batch_dir, name = os.path.split(pyscript) + name = '.'.join(name.split('.')[:-1]) + batchscript = '\n'.join( + (template, '%s %s' % (sys.executable, pyscript))) + batchscriptfile = os.path.join(batch_dir, + 'batchscript_%s.sh' % name) + + batchscriptoutfile = batchscriptfile + '.o' + batchscripterrfile = batchscriptfile + '.e' + + with open(batchscriptfile, 'wt') as batchfp: + batchfp.writelines(batchscript) + batchfp.close() + deps = '' + if idx in dependencies: + values = '' + for jobid in dependencies[idx]: + # Avoid dependancies of done jobs + if not self._dont_resubmit_completed_jobs or not cache_doneness_per_node[jobid]: + values += "${{{0}}}:".format( + make_job_name(jobid, nodes)) + if values != '': # i.e. if some jobs were added to dependency list + values = values.rstrip(':') + deps = '--dependency=afterok:%s' % values + jobname = make_job_name(idx, nodes) + # Do not use default output locations if they are set in self._sbatch_args + stderrFile = '' + if self._sbatch_args.count('-e ') == 0: + stderrFile = '-e {errFile}'.format( + errFile=batchscripterrfile) + stdoutFile = '' + if self._sbatch_args.count('-o ') == 0: + stdoutFile = '-o {outFile}'.format( + outFile=batchscriptoutfile) + full_line = '{jobNm}=$(sbatch {outFileOption} {errFileOption} {extraSBatchArgs} {dependantIndex} -J {jobNm} {batchscript} | awk \'/^Submitted/ {{print $4}}\')\n'.format( + jobNm=jobname, + outFileOption=stdoutFile, + errFileOption=stderrFile, + extraSBatchArgs=sbatch_args, + dependantIndex=deps, + batchscript=batchscriptfile) + fp.writelines(full_line) + cmd = CommandLine( + 'bash', + environ=dict(os.environ), + resource_monitor=False, + terminal_output='allatonce') + cmd.inputs.args = '%s' % submitjobsfile + cmd.run() + logger.info('submitted all jobs to queue') diff --git a/nipype/pipeline/plugins/somaflow.py b/nipype/pipeline/plugins/somaflow.py new file mode 100644 index 0000000000..174b277c6f --- /dev/null +++ b/nipype/pipeline/plugins/somaflow.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +"""Parallel workflow execution via PBS/Torque +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +import sys + +from .base import (GraphPluginBase, logger) + +soma_not_loaded = False +try: + from soma.workflow.client import (Job, Workflow, WorkflowController, + Helper) +except: + soma_not_loaded = True + + +class SomaFlowPlugin(GraphPluginBase): + """Execute using Soma workflow + """ + + def __init__(self, plugin_args=None): + if soma_not_loaded: + raise ImportError('SomaFlow could not be imported') + super(SomaFlowPlugin, self).__init__(plugin_args=plugin_args) + + def _submit_graph(self, pyfiles, dependencies, nodes): + jobs = [] + soma_deps = [] + for idx, fname in enumerate(pyfiles): + name = os.path.splitext(os.path.split(fname)[1])[0] + jobs.append(Job(command=[sys.executable, fname], name=name)) + for key, values in list(dependencies.items()): + for val in values: + soma_deps.append((jobs[val], jobs[key])) + + wf = Workflow(jobs, soma_deps) + logger.info('serializing workflow') + Helper.serialize('workflow', wf) + controller = WorkflowController() + logger.info('submitting workflow') + wf_id = controller.submit_workflow(wf) + Helper.wait_workflow(wf_id, controller) diff --git a/nipype/pipeline/plugins/tests/__init__.py b/nipype/pipeline/plugins/tests/__init__.py new file mode 100644 index 0000000000..99fb243f19 --- /dev/null +++ b/nipype/pipeline/plugins/tests/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/pipeline/plugins/tests/test_base.py b/nipype/pipeline/plugins/tests/test_base.py new file mode 100644 index 0000000000..49928cfe4b --- /dev/null +++ b/nipype/pipeline/plugins/tests/test_base.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Tests for the engine module +""" +import numpy as np +import scipy.sparse as ssp + + +def test_scipy_sparse(): + foo = ssp.lil_matrix(np.eye(3, k=1)) + goo = foo.getrowview(0) + goo[goo.nonzero()] = 0 + assert foo[0, 1] == 0 + + +''' +Can use the following code to test that a mapnode crash continues successfully +Need to put this into a nose-test with a timeout + +import nipype.interfaces.utility as niu +import nipype.pipeline.engine as pe + +wf = pe.Workflow(name='test') + +def func(arg1): + if arg1 == 2: + raise Exception('arg cannot be ' + str(arg1)) + return arg1 + +funkynode = pe.MapNode(niu.Function(function=func, input_names=['arg1'], + output_names=['out']), + iterfield=['arg1'], + name = 'functor') +funkynode.inputs.arg1 = [1,2] + +wf.add_nodes([funkynode]) +wf.base_dir = '/tmp' + +wf.run(plugin='MultiProc') +''' diff --git a/nipype/pipeline/plugins/tests/test_callback.py b/nipype/pipeline/plugins/tests/test_callback.py new file mode 100644 index 0000000000..29c5cbd404 --- /dev/null +++ b/nipype/pipeline/plugins/tests/test_callback.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Tests for workflow callbacks +""" + +from builtins import object + +import pytest +import sys +import nipype.interfaces.utility as niu +import nipype.pipeline.engine as pe + + +def func(): + return + + +def bad_func(): + raise Exception + + +class Status(object): + def __init__(self): + self.statuses = [] + + def callback(self, node, status, result=None): + self.statuses.append((node, status)) + + +def test_callback_normal(tmpdir): + tmpdir.chdir() + + so = Status() + wf = pe.Workflow(name='test', base_dir=tmpdir.strpath) + f_node = pe.Node( + niu.Function(function=func, input_names=[], output_names=[]), + name='f_node') + wf.add_nodes([f_node]) + wf.config['execution'] = {'crashdump_dir': wf.base_dir} + wf.run(plugin="Linear", plugin_args={'status_callback': so.callback}) + assert len(so.statuses) == 2 + for (n, s) in so.statuses: + assert n.name == 'f_node' + assert so.statuses[0][1] == 'start' + assert so.statuses[1][1] == 'end' + + +def test_callback_exception(tmpdir): + tmpdir.chdir() + + so = Status() + wf = pe.Workflow(name='test', base_dir=tmpdir.strpath) + f_node = pe.Node( + niu.Function(function=bad_func, input_names=[], output_names=[]), + name='f_node') + wf.add_nodes([f_node]) + wf.config['execution'] = {'crashdump_dir': wf.base_dir} + try: + wf.run(plugin="Linear", plugin_args={'status_callback': so.callback}) + except: + pass + assert len(so.statuses) == 2 + for (n, s) in so.statuses: + assert n.name == 'f_node' + assert so.statuses[0][1] == 'start' + assert so.statuses[1][1] == 'exception' + + +def test_callback_multiproc_normal(tmpdir): + tmpdir.chdir() + + so = Status() + wf = pe.Workflow(name='test', base_dir=tmpdir.strpath) + f_node = pe.Node( + niu.Function(function=func, input_names=[], output_names=[]), + name='f_node') + wf.add_nodes([f_node]) + wf.config['execution']['crashdump_dir'] = wf.base_dir + wf.config['execution']['poll_sleep_duration'] = 2 + wf.run(plugin='MultiProc', plugin_args={'status_callback': so.callback}) + assert len(so.statuses) == 2 + for (n, s) in so.statuses: + assert n.name == 'f_node' + assert so.statuses[0][1] == 'start' + assert so.statuses[1][1] == 'end' + + +def test_callback_multiproc_exception(tmpdir): + tmpdir.chdir() + + so = Status() + wf = pe.Workflow(name='test', base_dir=tmpdir.strpath) + f_node = pe.Node( + niu.Function(function=bad_func, input_names=[], output_names=[]), + name='f_node') + wf.add_nodes([f_node]) + wf.config['execution'] = {'crashdump_dir': wf.base_dir} + + try: + wf.run( + plugin='MultiProc', plugin_args={ + 'status_callback': so.callback + }) + except: + pass + assert len(so.statuses) == 2 + for (n, s) in so.statuses: + assert n.name == 'f_node' + assert so.statuses[0][1] == 'start' + assert so.statuses[1][1] == 'exception' diff --git a/nipype/pipeline/plugins/tests/test_debug.py b/nipype/pipeline/plugins/tests/test_debug.py new file mode 100644 index 0000000000..bd06ecb775 --- /dev/null +++ b/nipype/pipeline/plugins/tests/test_debug.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +import os +import nipype.interfaces.base as nib + +import pytest +import nipype.pipeline.engine as pe + + +class InputSpec(nib.TraitedSpec): + input1 = nib.traits.Int(desc='a random int') + input2 = nib.traits.Int(desc='a random int') + + +class OutputSpec(nib.TraitedSpec): + output1 = nib.traits.List(nib.traits.Int, desc='outputs') + + +class DebugTestInterface(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output1'] = [1, self.inputs.input1] + return outputs + + +def callme(node, graph): + pass + + +def test_debug(tmpdir): + tmpdir.chdir() + + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(DebugTestInterface(), name='mod1') + mod2 = pe.MapNode(DebugTestInterface(), iterfield=['input1'], name='mod2') + + pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe.base_dir = os.getcwd() + mod1.inputs.input1 = 1 + + run_wf = lambda: pipe.run(plugin="Debug") + with pytest.raises(ValueError): + run_wf() + + exc = None + try: + pipe.run(plugin="Debug", plugin_args={'callable': callme}) + except Exception as e: + exc = e + + assert exc is None, 'unexpected exception caught' diff --git a/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py b/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py new file mode 100644 index 0000000000..a83d426ada --- /dev/null +++ b/nipype/pipeline/plugins/tests/test_legacymultiproc_nondaemon.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Testing module for functions and classes from multiproc.py +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, open + +# Import packages +import os +import sys +from tempfile import mkdtemp +from shutil import rmtree +import pytest + +import nipype.pipeline.engine as pe +from nipype.interfaces.utility import Function + + +def mytestFunction(insum=0): + ''' + Run a multiprocessing job and spawn child processes. + ''' + + # need to import here since this is executed as an external process + import multiprocessing + import os + import tempfile + import time + + numberOfThreads = 2 + + # list of processes + t = [None] * numberOfThreads + + # list of alive flags + a = [None] * numberOfThreads + + # list of tempFiles + f = [None] * numberOfThreads + + def dummyFunction(filename): + ''' + This function writes the value 45 to the given filename. + ''' + j = 0 + for i in range(0, 10): + j += i + + # j is now 45 (0+1+2+3+4+5+6+7+8+9) + + with open(filename, 'w') as f: + f.write(str(j)) + + for n in range(numberOfThreads): + + # mark thread as alive + a[n] = True + + # create a temp file to use as the data exchange container + tmpFile = tempfile.mkstemp('.txt', 'test_engine_')[1] + f[n] = tmpFile # keep track of the temp file + t[n] = multiprocessing.Process(target=dummyFunction, args=(tmpFile, )) + # fire up the job + t[n].start() + + # block until all processes are done + allDone = False + while not allDone: + + time.sleep(1) + + for n in range(numberOfThreads): + + a[n] = t[n].is_alive() + + if not any(a): + # if no thread is alive + allDone = True + + # here, all processes are done + + # read in all temp files and sum them up + total = insum + for ff in f: + with open(ff) as fd: + total += int(fd.read()) + os.remove(ff) + + return total + + +def run_multiproc_nondaemon_with_flag(nondaemon_flag): + ''' + Start a pipe with two nodes using the resource multiproc plugin and + passing the nondaemon_flag. + ''' + + cur_dir = os.getcwd() + temp_dir = mkdtemp(prefix='test_engine_') + os.chdir(temp_dir) + + pipe = pe.Workflow(name='pipe') + + f1 = pe.Node( + interface=Function( + function=mytestFunction, + input_names=['insum'], + output_names=['sum_out']), + name='f1') + f2 = pe.Node( + interface=Function( + function=mytestFunction, + input_names=['insum'], + output_names=['sum_out']), + name='f2') + + pipe.connect([(f1, f2, [('sum_out', 'insum')])]) + pipe.base_dir = os.getcwd() + f1.inputs.insum = 0 + + pipe.config['execution']['stop_on_first_crash'] = True + + # execute the pipe using the LegacyMultiProc plugin with 2 processes and the + # non_daemon flag to enable child processes which start other + # multiprocessing jobs + execgraph = pipe.run( + plugin="LegacyMultiProc", + plugin_args={ + 'n_procs': 2, + 'non_daemon': nondaemon_flag + }) + + names = [ + '.'.join((node._hierarchy, node.name)) for node in execgraph.nodes() + ] + node = list(execgraph.nodes())[names.index('pipe.f2')] + result = node.get_output('sum_out') + os.chdir(cur_dir) + rmtree(temp_dir) + return result + + +def test_run_multiproc_nondaemon_false(): + ''' + This is the entry point for the test. Two times a pipe of several + multiprocessing jobs gets executed. First, without the nondaemon flag. + Second, with the nondaemon flag. + + Since the processes of the pipe start child processes, the execution only + succeeds when the non_daemon flag is on. + ''' + shouldHaveFailed = False + try: + # with nondaemon_flag = False, the execution should fail + run_multiproc_nondaemon_with_flag(False) + except: + shouldHaveFailed = True + assert shouldHaveFailed + + +def test_run_multiproc_nondaemon_true(): + # with nondaemon_flag = True, the execution should succeed + result = run_multiproc_nondaemon_with_flag(True) + assert result == 180 # n_procs (2) * numberOfThreads (2) * 45 == 180 diff --git a/nipype/pipeline/plugins/tests/test_linear.py b/nipype/pipeline/plugins/tests/test_linear.py new file mode 100644 index 0000000000..6484432baa --- /dev/null +++ b/nipype/pipeline/plugins/tests/test_linear.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +import os +import nipype.interfaces.base as nib + +import nipype.pipeline.engine as pe + + +class InputSpec(nib.TraitedSpec): + input1 = nib.traits.Int(desc='a random int') + input2 = nib.traits.Int(desc='a random int') + + +class OutputSpec(nib.TraitedSpec): + output1 = nib.traits.List(nib.traits.Int, desc='outputs') + + +class LinearTestInterface(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output1'] = [1, self.inputs.input1] + return outputs + + +def test_run_in_series(tmpdir): + tmpdir.chdir() + + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(interface=LinearTestInterface(), name='mod1') + mod2 = pe.MapNode( + interface=LinearTestInterface(), iterfield=['input1'], name='mod2') + pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe.base_dir = os.getcwd() + mod1.inputs.input1 = 1 + execgraph = pipe.run(plugin="Linear") + names = [ + '.'.join((node._hierarchy, node.name)) for node in execgraph.nodes() + ] + node = list(execgraph.nodes())[names.index('pipe.mod1')] + result = node.get_output('output1') + assert result == [1, 1] diff --git a/nipype/pipeline/plugins/tests/test_multiproc.py b/nipype/pipeline/plugins/tests/test_multiproc.py new file mode 100644 index 0000000000..7ba9001c39 --- /dev/null +++ b/nipype/pipeline/plugins/tests/test_multiproc.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Test the resource management of MultiProc +""" +import os +import pytest +from nipype.pipeline import engine as pe +from nipype.interfaces import base as nib + + +class InputSpec(nib.TraitedSpec): + input1 = nib.traits.Int(desc='a random int') + input2 = nib.traits.Int(desc='a random int') + + +class OutputSpec(nib.TraitedSpec): + output1 = nib.traits.List(nib.traits.Int, desc='outputs') + + +class MultiprocTestInterface(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output1'] = [1, self.inputs.input1] + return outputs + + +def test_run_multiproc(tmpdir): + tmpdir.chdir() + + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(MultiprocTestInterface(), name='mod1') + mod2 = pe.MapNode( + MultiprocTestInterface(), iterfield=['input1'], name='mod2') + pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe.base_dir = os.getcwd() + mod1.inputs.input1 = 1 + pipe.config['execution']['poll_sleep_duration'] = 2 + execgraph = pipe.run(plugin="MultiProc") + names = [node.fullname for node in execgraph.nodes()] + node = list(execgraph.nodes())[names.index('pipe.mod1')] + result = node.get_output('output1') + assert result == [1, 1] + + +class InputSpecSingleNode(nib.TraitedSpec): + input1 = nib.traits.Int(desc='a random int') + input2 = nib.traits.Int(desc='a random int') + + +class OutputSpecSingleNode(nib.TraitedSpec): + output1 = nib.traits.Int(desc='a random int') + + +class SingleNodeTestInterface(nib.BaseInterface): + input_spec = InputSpecSingleNode + output_spec = OutputSpecSingleNode + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output1'] = self.inputs.input1 + return outputs + + +def test_no_more_memory_than_specified(tmpdir): + tmpdir.chdir() + pipe = pe.Workflow(name='pipe') + n1 = pe.Node(SingleNodeTestInterface(), name='n1', mem_gb=1) + n2 = pe.Node(SingleNodeTestInterface(), name='n2', mem_gb=1) + n3 = pe.Node(SingleNodeTestInterface(), name='n3', mem_gb=1) + n4 = pe.Node(SingleNodeTestInterface(), name='n4', mem_gb=1) + + pipe.connect(n1, 'output1', n2, 'input1') + pipe.connect(n1, 'output1', n3, 'input1') + pipe.connect(n2, 'output1', n4, 'input1') + pipe.connect(n3, 'output1', n4, 'input2') + n1.inputs.input1 = 1 + + max_memory = 0.5 + with pytest.raises(RuntimeError): + pipe.run( + plugin='MultiProc', + plugin_args={ + 'memory_gb': max_memory, + 'n_procs': 2 + }) + + +def test_no_more_threads_than_specified(tmpdir): + tmpdir.chdir() + + pipe = pe.Workflow(name='pipe') + n1 = pe.Node(SingleNodeTestInterface(), name='n1', n_procs=2) + n2 = pe.Node(SingleNodeTestInterface(), name='n2', n_procs=2) + n3 = pe.Node(SingleNodeTestInterface(), name='n3', n_procs=4) + n4 = pe.Node(SingleNodeTestInterface(), name='n4', n_procs=2) + + pipe.connect(n1, 'output1', n2, 'input1') + pipe.connect(n1, 'output1', n3, 'input1') + pipe.connect(n2, 'output1', n4, 'input1') + pipe.connect(n3, 'output1', n4, 'input2') + n1.inputs.input1 = 4 + + max_threads = 2 + with pytest.raises(RuntimeError): + pipe.run(plugin='MultiProc', plugin_args={'n_procs': max_threads}) + + +def test_hold_job_until_procs_available(tmpdir): + tmpdir.chdir() + + pipe = pe.Workflow(name='pipe') + n1 = pe.Node(SingleNodeTestInterface(), name='n1', n_procs=2) + n2 = pe.Node(SingleNodeTestInterface(), name='n2', n_procs=2) + n3 = pe.Node(SingleNodeTestInterface(), name='n3', n_procs=2) + n4 = pe.Node(SingleNodeTestInterface(), name='n4', n_procs=2) + + pipe.connect(n1, 'output1', n2, 'input1') + pipe.connect(n1, 'output1', n3, 'input1') + pipe.connect(n2, 'output1', n4, 'input1') + pipe.connect(n3, 'output1', n4, 'input2') + n1.inputs.input1 = 4 + + max_threads = 2 + pipe.run(plugin='MultiProc', plugin_args={'n_procs': max_threads}) diff --git a/nipype/pipeline/plugins/tests/test_oar.py b/nipype/pipeline/plugins/tests/test_oar.py new file mode 100644 index 0000000000..fd4f0b950c --- /dev/null +++ b/nipype/pipeline/plugins/tests/test_oar.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +import os +from shutil import rmtree +from tempfile import mkdtemp + +import nipype.interfaces.base as nib +import pytest +import nipype.pipeline.engine as pe + + +class InputSpec(nib.TraitedSpec): + input1 = nib.traits.Int(desc='a random int') + input2 = nib.traits.Int(desc='a random int') + + +class OutputSpec(nib.TraitedSpec): + output1 = nib.traits.List(nib.traits.Int, desc='outputs') + + +class OarTestInterface(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output1'] = [1, self.inputs.input1] + return outputs + + +@pytest.mark.xfail(reason="not known") +def test_run_oar(): + cur_dir = os.getcwd() + temp_dir = mkdtemp(prefix='test_engine_', dir=os.getcwd()) + os.chdir(temp_dir) + + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(interface=OarTestInterface(), name='mod1') + mod2 = pe.MapNode( + interface=OarTestInterface(), iterfield=['input1'], name='mod2') + pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe.base_dir = os.getcwd() + mod1.inputs.input1 = 1 + execgraph = pipe.run(plugin="OAR") + names = [ + '.'.join((node._hierarchy, node.name)) for node in execgraph.nodes() + ] + node = list(execgraph.nodes())[names.index('pipe.mod1')] + result = node.get_output('output1') + assert result == [1, 1] + os.chdir(cur_dir) + rmtree(temp_dir) diff --git a/nipype/pipeline/plugins/tests/test_pbs.py b/nipype/pipeline/plugins/tests/test_pbs.py new file mode 100644 index 0000000000..f6aa6c88e0 --- /dev/null +++ b/nipype/pipeline/plugins/tests/test_pbs.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +import os +from shutil import rmtree +from tempfile import mkdtemp +from time import sleep + +import nipype.interfaces.base as nib +import pytest +import nipype.pipeline.engine as pe + + +class InputSpec(nib.TraitedSpec): + input1 = nib.traits.Int(desc='a random int') + input2 = nib.traits.Int(desc='a random int') + + +class OutputSpec(nib.TraitedSpec): + output1 = nib.traits.List(nib.traits.Int, desc='outputs') + + +class PbsTestInterface(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output1'] = [1, self.inputs.input1] + return outputs + + +@pytest.mark.xfail(reason="not known") +def test_run_pbsgraph(): + cur_dir = os.getcwd() + temp_dir = mkdtemp(prefix='test_engine_') + os.chdir(temp_dir) + + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(interface=PbsTestInterface(), name='mod1') + mod2 = pe.MapNode( + interface=PbsTestInterface(), iterfield=['input1'], name='mod2') + pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe.base_dir = os.getcwd() + mod1.inputs.input1 = 1 + execgraph = pipe.run(plugin="PBSGraph") + names = [ + '.'.join((node._hierarchy, node.name)) for node in execgraph.nodes() + ] + node = list(execgraph.nodes())[names.index('pipe.mod1')] + result = node.get_output('output1') + assert result == [1, 1] + os.chdir(cur_dir) + rmtree(temp_dir) diff --git a/nipype/pipeline/plugins/tests/test_somaflow.py b/nipype/pipeline/plugins/tests/test_somaflow.py new file mode 100644 index 0000000000..68cefcdc17 --- /dev/null +++ b/nipype/pipeline/plugins/tests/test_somaflow.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +import os +from time import sleep + +import nipype.interfaces.base as nib +import pytest +import nipype.pipeline.engine as pe + +from nipype.pipeline.plugins.somaflow import soma_not_loaded + + +class InputSpec(nib.TraitedSpec): + input1 = nib.traits.Int(desc='a random int') + input2 = nib.traits.Int(desc='a random int') + + +class OutputSpec(nib.TraitedSpec): + output1 = nib.traits.List(nib.traits.Int, desc='outputs') + + +class SomaTestInterface(nib.BaseInterface): + input_spec = InputSpec + output_spec = OutputSpec + + def _run_interface(self, runtime): + runtime.returncode = 0 + return runtime + + def _list_outputs(self): + outputs = self._outputs().get() + outputs['output1'] = [1, self.inputs.input1] + return outputs + + +@pytest.mark.skipif(soma_not_loaded, reason="soma not loaded") +def test_run_somaflow(tmpdir): + tmpdir.chdir() + + pipe = pe.Workflow(name='pipe') + mod1 = pe.Node(interface=SomaTestInterface(), name='mod1') + mod2 = pe.MapNode( + interface=SomaTestInterface(), iterfield=['input1'], name='mod2') + pipe.connect([(mod1, mod2, [('output1', 'input1')])]) + pipe.base_dir = os.getcwd() + mod1.inputs.input1 = 1 + execgraph = pipe.run(plugin="SomaFlow") + names = [ + '.'.join((node._hierarchy, node.name)) for node in execgraph.nodes() + ] + node = list(execgraph.nodes())[names.index('pipe.mod1')] + result = node.get_output('output1') + assert result == [1, 1] diff --git a/nipype/pipeline/plugins/tests/test_tools.py b/nipype/pipeline/plugins/tests/test_tools.py new file mode 100644 index 0000000000..49b2de63dc --- /dev/null +++ b/nipype/pipeline/plugins/tests/test_tools.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Tests for the engine module +""" +import numpy as np +import scipy.sparse as ssp +import re + +import mock + +from nipype.pipeline.plugins.tools import report_crash + + +def test_report_crash(): + with mock.patch('pickle.dump', mock.MagicMock()) as mock_pickle_dump: + with mock.patch('nipype.pipeline.plugins.tools.format_exception', + mock.MagicMock()): # see iss 1517 + mock_pickle_dump.return_value = True + mock_node = mock.MagicMock(name='mock_node') + mock_node._id = 'an_id' + mock_node.config = { + 'execution': { + 'crashdump_dir': '.', + 'crashfile_format': 'pklz', + } + } + + actual_crashfile = report_crash(mock_node) + + expected_crashfile = re.compile( + '.*/crash-.*-an_id-[0-9a-f\-]*.pklz') + + assert expected_crashfile.match( + actual_crashfile).group() == actual_crashfile + assert mock_pickle_dump.call_count == 1 + + +''' +Can use the following code to test that a mapnode crash continues successfully +Need to put this into a nose-test with a timeout + +import nipype.interfaces.utility as niu +import nipype.pipeline.engine as pe + +wf = pe.Workflow(name='test') + +def func(arg1): + if arg1 == 2: + raise Exception('arg cannot be ' + str(arg1)) + return arg1 + +funkynode = pe.MapNode(niu.Function(function=func, input_names=['arg1'], + output_names=['out']), + iterfield=['arg1'], + name = 'functor') +funkynode.inputs.arg1 = [1,2] + +wf.add_nodes([funkynode]) +wf.base_dir = '/tmp' + +wf.run(plugin='MultiProc') +''' diff --git a/nipype/pipeline/plugins/tools.py b/nipype/pipeline/plugins/tools.py new file mode 100644 index 0000000000..54fffd2398 --- /dev/null +++ b/nipype/pipeline/plugins/tools.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Common graph operations for execution +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +import os +import getpass +from socket import gethostname +import sys +import uuid +from time import strftime +from traceback import format_exception + +from ... import logging +from ...utils.filemanip import savepkl, crash2txt, makedirs + +logger = logging.getLogger('nipype.workflow') + + +def report_crash(node, traceback=None, hostname=None): + """Writes crash related information to a file + """ + name = node._id + host = None + if node.result and getattr(node.result, 'runtime'): + if isinstance(node.result.runtime, list): + host = node.result.runtime[0].hostname + else: + host = node.result.runtime.hostname + + # Try everything to fill in the host + host = host or hostname or gethostname() + logger.error('Node %s failed to run on host %s.', name, host) + if not traceback: + traceback = format_exception(*sys.exc_info()) + timeofcrash = strftime('%Y%m%d-%H%M%S') + try: + login_name = getpass.getuser() + except KeyError: + login_name = 'UID{:d}'.format(os.getuid()) + crashfile = 'crash-%s-%s-%s-%s' % (timeofcrash, login_name, name, + str(uuid.uuid4())) + crashdir = node.config['execution'].get('crashdump_dir', os.getcwd()) + + makedirs(crashdir, exist_ok=True) + crashfile = os.path.join(crashdir, crashfile) + + if node.config['execution']['crashfile_format'].lower() in ['text', 'txt']: + crashfile += '.txt' + else: + crashfile += '.pklz' + + logger.error('Saving crash info to %s\n%s', crashfile, ''.join(traceback)) + if crashfile.endswith('.txt'): + crash2txt(crashfile, dict(node=node, traceback=traceback)) + else: + savepkl(crashfile, dict(node=node, traceback=traceback), + versioning=True) + return crashfile + + +def report_nodes_not_run(notrun): + """List nodes that crashed with crashfile info + + Optionally displays dependent nodes that weren't executed as a result of + the crash. + """ + if notrun: + logger.info("***********************************") + for info in notrun: + logger.error("could not run node: %s" % '.'.join( + (info['node']._hierarchy, info['node']._id))) + logger.info("crashfile: %s" % info['crashfile']) + logger.debug("The following dependent nodes were not run") + for subnode in info['dependents']: + logger.debug(subnode._id) + logger.info("***********************************") + raise RuntimeError(('Workflow did not execute cleanly. ' + 'Check log for details')) + + +def create_pyscript(node, updatehash=False, store_exception=True): + # pickle node + timestamp = strftime('%Y%m%d_%H%M%S') + if node._hierarchy: + suffix = '%s_%s_%s' % (timestamp, node._hierarchy, node._id) + batch_dir = os.path.join(node.base_dir, + node._hierarchy.split('.')[0], 'batch') + else: + suffix = '%s_%s' % (timestamp, node._id) + batch_dir = os.path.join(node.base_dir, 'batch') + if not os.path.exists(batch_dir): + os.makedirs(batch_dir) + pkl_file = os.path.join(batch_dir, 'node_%s.pklz' % suffix) + savepkl(pkl_file, dict(node=node, updatehash=updatehash)) + mpl_backend = node.config["execution"]["matplotlib_backend"] + # create python script to load and trap exception + cmdstr = """import os +import sys + +can_import_matplotlib = True #Silently allow matplotlib to be ignored +try: + import matplotlib + matplotlib.use('%s') +except ImportError: + can_import_matplotlib = False + pass + +from nipype import config, logging +from nipype.utils.filemanip import loadpkl, savepkl +from socket import gethostname +from traceback import format_exception +info = None +pklfile = '%s' +batchdir = '%s' +from nipype.utils.filemanip import loadpkl, savepkl +try: + if not sys.version_info < (2, 7): + from collections import OrderedDict + config_dict=%s + config.update_config(config_dict) + ## Only configure matplotlib if it was successfully imported, + ## matplotlib is an optional component to nipype + if can_import_matplotlib: + config.update_matplotlib() + logging.update_logging(config) + traceback=None + cwd = os.getcwd() + info = loadpkl(pklfile) + result = info['node'].run(updatehash=info['updatehash']) +except Exception as e: + etype, eval, etr = sys.exc_info() + traceback = format_exception(etype,eval,etr) + if info is None or not os.path.exists(info['node'].output_dir()): + result = None + resultsfile = os.path.join(batchdir, 'crashdump_%s.pklz') + else: + result = info['node'].result + resultsfile = os.path.join(info['node'].output_dir(), + 'result_%%s.pklz'%%info['node'].name) +""" + if store_exception: + cmdstr += """ + savepkl(resultsfile, dict(result=result, hostname=gethostname(), + traceback=traceback)) +""" + else: + cmdstr += """ + if info is None: + savepkl(resultsfile, dict(result=result, hostname=gethostname(), + traceback=traceback)) + else: + from nipype.pipeline.plugins.base import report_crash + report_crash(info['node'], traceback, gethostname()) + raise Exception(e) +""" + cmdstr = cmdstr % (mpl_backend, pkl_file, batch_dir, node.config, suffix) + pyscript = os.path.join(batch_dir, 'pyscript_%s.py' % suffix) + with open(pyscript, 'wt') as fp: + fp.writelines(cmdstr) + return pyscript diff --git a/nipype/pkg_info.py b/nipype/pkg_info.py new file mode 100644 index 0000000000..f2aa07030d --- /dev/null +++ b/nipype/pkg_info.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from future import standard_library +standard_library.install_aliases() +from builtins import open +import configparser + +import os +import sys +import subprocess + +COMMIT_INFO_FNAME = 'COMMIT_INFO.txt' +PY3 = sys.version_info[0] >= 3 + + +def pkg_commit_hash(pkg_path): + ''' Get short form of commit hash given directory `pkg_path` + + There should be a file called 'COMMIT_INFO.txt' in `pkg_path`. This is a + file in INI file format, with at least one section: ``commit hash`` and two + variables ``archive_subst_hash`` and ``install_hash``. The first has a + substitution pattern in it which may have been filled by the execution of + ``git archive`` if this is an archive generated that way. The second is + filled in by the installation, if the installation is from a git archive. + + We get the commit hash from (in order of preference): + + * A substituted value in ``archive_subst_hash`` + * A written commit hash value in ``install_hash` + * git's output, if we are in a git repository + + If all these fail, we return a not-found placeholder tuple + + Parameters + ---------- + pkg_path : str + directory containing package + + Returns + ------- + hash_from : str + Where we got the hash from - description + hash_str : str + short form of hash + ''' + # Try and get commit from written commit text file + pth = os.path.join(pkg_path, COMMIT_INFO_FNAME) + if not os.path.isfile(pth): + raise IOError('Missing commit info file %s' % pth) + cfg_parser = configparser.RawConfigParser() + with open(pth, encoding='utf-8') as fp: + cfg_parser.readfp(fp) + archive_subst = cfg_parser.get('commit hash', 'archive_subst_hash') + if not archive_subst.startswith('$Format'): # it has been substituted + return 'archive substitution', archive_subst + install_subst = cfg_parser.get('commit hash', 'install_hash') + if install_subst != '': + return 'installation', install_subst + # maybe we are in a repository + proc = subprocess.Popen( + 'git rev-parse --short HEAD', + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=pkg_path, + shell=True) + repo_commit, _ = proc.communicate() + if repo_commit: + if PY3: + repo_commit = repo_commit.decode() + return 'repository', repo_commit.strip() + return '(none found)', '' + + +def get_pkg_info(pkg_path): + ''' Return dict describing the context of this package + + Parameters + ---------- + pkg_path : str + path containing __init__.py for package + + Returns + ------- + context : dict + with named parameters of interest + ''' + src, hsh = pkg_commit_hash(pkg_path) + from .info import VERSION + if not PY3: + src, hsh, VERSION = src.encode(), hsh.encode(), VERSION.encode() + import networkx + import nibabel + import numpy + import scipy + import traits + return dict( + pkg_path=pkg_path, + commit_source=src, + commit_hash=hsh, + nipype_version=VERSION, + sys_version=sys.version, + sys_executable=sys.executable, + sys_platform=sys.platform, + numpy_version=numpy.__version__, + scipy_version=scipy.__version__, + networkx_version=networkx.__version__, + nibabel_version=nibabel.__version__, + traits_version=traits.__version__) diff --git a/nipype/pytest.ini b/nipype/pytest.ini new file mode 100644 index 0000000000..70f12b64aa --- /dev/null +++ b/nipype/pytest.ini @@ -0,0 +1,6 @@ +[pytest] +norecursedirs = .git build dist doc nipype/external tools examples src +addopts = --doctest-modules -n auto +doctest_optionflags = ALLOW_UNICODE NORMALIZE_WHITESPACE +env = + PYTHONHASHSEED=0 diff --git a/nipype/refs.py b/nipype/refs.py new file mode 100644 index 0000000000..0478d7ceed --- /dev/null +++ b/nipype/refs.py @@ -0,0 +1,20 @@ +# Use duecredit (duecredit.org) to provide a citation to relevant work to +# be cited. This does nothing, unless the user has duecredit installed, +# And calls this with duecredit (as in `python -m duecredit script.py`): +from .external.due import due, Doi, BibTeX + +due.cite( + Doi("10.3389/fninf.2011.00013"), + description="A flexible, lightweight and extensible neuroimaging data" + " processing framework in Python", + path="nipype", + tags=["implementation"], +) + +due.cite( + Doi("10.5281/zenodo.50186"), + description="A flexible, lightweight and extensible neuroimaging data" + " processing framework in Python", + path="nipype", + tags=["implementation"], +) diff --git a/nipype/scripts/__init__.py b/nipype/scripts/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/scripts/cli.py b/nipype/scripts/cli.py new file mode 100644 index 0000000000..59d8672cfb --- /dev/null +++ b/nipype/scripts/cli.py @@ -0,0 +1,258 @@ +#!python +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from io import open + +import click + +from .instance import list_interfaces +from .utils import ( + CONTEXT_SETTINGS, + UNKNOWN_OPTIONS, + ExistingDirPath, + ExistingFilePath, + UnexistingFilePath, + RegularExpression, + PythonModule, + check_not_none, +) + +from .. import __version__ + + +# declare the CLI group +@click.group(context_settings=CONTEXT_SETTINGS) +def cli(): + pass + + +@cli.command(context_settings=CONTEXT_SETTINGS) +@click.argument('logdir', type=ExistingDirPath, callback=check_not_none) +@click.option( + '-r', + '--regex', + type=RegularExpression(), + callback=check_not_none, + help='Regular expression to be searched in each traceback.') +def search(logdir, regex): + """Search for tracebacks content. + + Search for traceback inside a folder of nipype crash log files that match + a given regular expression. + + Examples:\n + nipypecli search nipype/wd/log -r '.*subject123.*' + """ + from .crash_files import iter_tracebacks + + for file, trace in iter_tracebacks(logdir): + if regex.search(trace): + click.echo("-" * len(file)) + click.echo(file) + click.echo("-" * len(file)) + click.echo(trace) + + +@cli.command(context_settings=CONTEXT_SETTINGS) +@click.argument('crashfile', type=ExistingFilePath, callback=check_not_none) +@click.option( + '-r', '--rerun', is_flag=True, flag_value=True, help='Rerun crashed node.') +@click.option( + '-d', + '--debug', + is_flag=True, + flag_value=True, + help='Enable Python debugger when re-executing.') +@click.option( + '-i', + '--ipydebug', + is_flag=True, + flag_value=True, + help='Enable IPython debugger when re-executing.') +@click.option( + '-w', + '--dir', + type=ExistingDirPath, + help='Directory where to run the node in.') +def crash(crashfile, rerun, debug, ipydebug, dir): + """Display Nipype crash files. + + For certain crash files, one can rerun a failed node in a temp directory. + + Examples:\n + nipypecli crash crashfile.pklz\n + nipypecli crash crashfile.pklz -r -i\n + """ + from .crash_files import display_crash_file + + debug = 'ipython' if ipydebug else debug + if debug == 'ipython': + import sys + from IPython.core import ultratb + sys.excepthook = ultratb.FormattedTB( + mode='Verbose', color_scheme='Linux', call_pdb=1) + display_crash_file(crashfile, rerun, debug, dir) + + +@cli.command(context_settings=CONTEXT_SETTINGS) +@click.argument('pklz_file', type=ExistingFilePath, callback=check_not_none) +def show(pklz_file): + """Print the content of Nipype node .pklz file. + + Examples:\n + nipypecli show node.pklz + """ + from pprint import pprint + from ..utils.filemanip import loadpkl + + pkl_data = loadpkl(pklz_file) + pprint(pkl_data) + + +@cli.command(context_settings=UNKNOWN_OPTIONS) +@click.argument( + 'module', type=PythonModule(), required=False, callback=check_not_none) +@click.argument('interface', type=str, required=False) +@click.option( + '--list', + is_flag=True, + flag_value=True, + help='List the available Interfaces inside the given module.') +@click.option( + '-h', + '--help', + is_flag=True, + flag_value=True, + help='Show help message and exit.') +@click.pass_context +def run(ctx, module, interface, list, help): + """Run a Nipype Interface. + + Examples:\n + nipypecli run nipype.interfaces.nipy --list\n + nipypecli run nipype.interfaces.nipy ComputeMask --help + """ + import argparse + from .utils import add_args_options + from ..utils.nipype_cmd import run_instance + + # print run command help if no arguments are given + module_given = bool(module) + if not module_given: + click.echo(ctx.command.get_help(ctx)) + + # print the list of available interfaces for the given module + elif (module_given and list) or (module_given and not interface): + iface_names = list_interfaces(module) + click.echo('Available Interfaces:') + for if_name in iface_names: + click.echo(' {}'.format(if_name)) + + # check the interface + elif (module_given and interface): + # create the argument parser + description = "Run {}".format(interface) + prog = " ".join( + [ctx.command_path, module.__name__, interface] + ctx.args) + iface_parser = argparse.ArgumentParser( + description=description, prog=prog) + + # instantiate the interface + node = getattr(module, interface)() + iface_parser = add_args_options(iface_parser, node) + + if not ctx.args: + # print the interface help + try: + iface_parser.print_help() + except: + print('An error ocurred when trying to print the full' + 'command help, printing usage.') + finally: + iface_parser.print_usage() + else: + # run the interface + args = iface_parser.parse_args(args=ctx.args) + run_instance(node, args) + + +@cli.command(context_settings=CONTEXT_SETTINGS) +def version(): + """Print current version of Nipype.""" + click.echo(__version__) + + +@cli.group() +def convert(): + """Export nipype interfaces to other formats.""" + pass + + +@convert.command(context_settings=CONTEXT_SETTINGS) +@click.option( + "-i", + "--interface", + type=str, + required=True, + help="Name of the Nipype interface to export.") +@click.option( + "-m", + "--module", + type=PythonModule(), + required=True, + callback=check_not_none, + help="Module where the interface is defined.") +@click.option( + "-o", + "--output", + type=UnexistingFilePath, + required=True, + callback=check_not_none, + help="JSON file name where the Boutiques descriptor will be " + "written.") +@click.option( + "-t", + "--ignored-template-inputs", + type=str, + multiple=True, + help="Interface inputs ignored in path template creations.") +@click.option( + "-d", + "--docker-image", + type=str, + help="Name of the Docker image where the Nipype interface is " + "available.") +@click.option( + "-r", + "--docker-index", + type=str, + help="Docker index where the Docker image is stored (e.g. " + "http://index.docker.io).") +@click.option( + "-n", + "--ignore-template-numbers", + is_flag=True, + flag_value=True, + help="Ignore all numbers in path template creations.") +@click.option( + "-v", + "--verbose", + is_flag=True, + flag_value=True, + help="Enable verbose output.") +def boutiques(interface, module, output, ignored_template_inputs, docker_image, + docker_index, ignore_template_numbers, verbose): + """Nipype to Boutiques exporter. + + See Boutiques specification at https://github.com/boutiques/schema. + """ + from nipype.utils.nipype2boutiques import generate_boutiques_descriptor + + # Generates JSON string + json_string = generate_boutiques_descriptor( + module, interface, ignored_template_inputs, docker_image, docker_index, + verbose, ignore_template_numbers) + + # Writes JSON string to file + with open(output, 'w') as f: + f.write(json_string) diff --git a/nipype/scripts/crash_files.py b/nipype/scripts/crash_files.py new file mode 100644 index 0000000000..b7b83dff5c --- /dev/null +++ b/nipype/scripts/crash_files.py @@ -0,0 +1,88 @@ +"""Utilities to manipulate and search through .pklz crash files.""" + +import re +import sys +import os.path as op +from glob import glob + +from traits.trait_errors import TraitError +from nipype.utils.filemanip import loadcrash + + +def load_pklz_traceback(crash_filepath): + """Return the traceback message in the given crash file.""" + try: + data = loadcrash(crash_filepath) + except TraitError as te: + return str(te) + except: + raise + else: + return '\n'.join(data['traceback']) + + +def iter_tracebacks(logdir): + """Return an iterator over each file path and + traceback field inside `logdir`. + Parameters + ---------- + logdir: str + Path to the log folder. + + field: str + Field name to be read from the crash file. + + Yields + ------ + path_file: str + + traceback: str + """ + crash_files = sorted(glob(op.join(logdir, '*.pkl*'))) + + for cf in crash_files: + yield cf, load_pklz_traceback(cf) + + +def display_crash_file(crashfile, rerun, debug, directory): + """display crash file content and rerun if required""" + from nipype.utils.filemanip import loadcrash + + crash_data = loadcrash(crashfile) + node = None + if 'node' in crash_data: + node = crash_data['node'] + tb = crash_data['traceback'] + print("\n") + print("File: %s" % crashfile) + + if node: + print("Node: %s" % node) + if node.base_dir: + print("Working directory: %s" % node.output_dir()) + else: + print("Node crashed before execution") + print("\n") + print("Node inputs:") + print(node.inputs) + print("\n") + print("Traceback: ") + print(''.join(tb)) + print("\n") + + if rerun: + if node is None: + print("No node in crashfile. Cannot rerun") + return + print("Rerunning node") + node.base_dir = directory + node.config = {'execution': {'crashdump_dir': '/tmp'}} + try: + node.run() + except: + if debug and debug != 'ipython': + import pdb + pdb.post_mortem() + else: + raise + print("\n") diff --git a/nipype/scripts/instance.py b/nipype/scripts/instance.py new file mode 100644 index 0000000000..1f44a43bda --- /dev/null +++ b/nipype/scripts/instance.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +""" +Import lib and class meta programming utilities. +""" +import inspect +import importlib + +from ..interfaces.base import Interface + + +def import_module(module_path): + """Import any module to the global Python environment. + The module_path argument specifies what module to import in + absolute or relative terms (e.g. either pkg.mod or ..mod). + If the name is specified in relative terms, then the package argument + must be set to the name of the package which is to act as the anchor + for resolving the package name (e.g. import_module('..mod', + 'pkg.subpkg') will import pkg.mod). + + Parameters + ---------- + module_path: str + Path to the module to be imported + + Returns + ------- + The specified module will be inserted into sys.modules and returned. + """ + try: + mod = importlib.import_module(module_path) + except: + raise ImportError( + 'Error when importing object {}.'.format(module_path)) + else: + return mod + + +def list_interfaces(module): + """Return a list with the names of the Interface subclasses inside + the given module. + """ + iface_names = [] + for k, v in sorted(list(module.__dict__.items())): + if inspect.isclass(v) and issubclass(v, Interface): + iface_names.append(k) + return iface_names diff --git a/nipype/scripts/utils.py b/nipype/scripts/utils.py new file mode 100644 index 0000000000..f4b8a86fb1 --- /dev/null +++ b/nipype/scripts/utils.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- +""" +Utilities for the CLI functions. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from builtins import bytes, str + +import re +import click +import json + +from .instance import import_module +from ..interfaces.base import InputMultiPath, traits + +# different context options +CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) +UNKNOWN_OPTIONS = dict(allow_extra_args=True, ignore_unknown_options=True) + +# specification of existing ParamTypes +ExistingDirPath = click.Path(exists=True, file_okay=False, resolve_path=True) +ExistingFilePath = click.Path(exists=True, dir_okay=False, resolve_path=True) +UnexistingFilePath = click.Path(dir_okay=False, resolve_path=True) + + +# validators +def check_not_none(ctx, param, value): + if value is None: + raise click.BadParameter('got {}.'.format(value)) + return value + + +# declare custom click.ParamType +class RegularExpression(click.ParamType): + name = 'regex' + + def convert(self, value, param, ctx): + try: + rex = re.compile(value, re.IGNORECASE) + except ValueError: + self.fail('%s is not a valid regular expression.' % value, param, + ctx) + else: + return rex + + +class PythonModule(click.ParamType): + name = 'Python module path' + + def convert(self, value, param, ctx): + try: + module = import_module(value) + except ValueError: + self.fail('%s is not a valid Python module.' % value, param, ctx) + else: + return module + + +def add_args_options(arg_parser, interface): + """Add arguments to `arg_parser` to create a CLI for `interface`.""" + inputs = interface.input_spec() + for name, spec in sorted(interface.inputs.traits(transient=None).items()): + desc = "\n".join(interface._get_trait_desc(inputs, name, + spec))[len(name) + 2:] + # Escape any % signs with a % + desc = desc.replace('%', '%%') + args = {} + has_multiple_inner_traits = False + + if spec.is_trait_type(traits.Bool): + args["default"] = getattr(inputs, name) + args["action"] = 'store_true' + + # current support is for simple trait types + if not spec.inner_traits: + if not spec.is_trait_type(traits.TraitCompound): + trait_type = type(spec.trait_type.default_value) + if trait_type in (bytes, str, int, float): + if trait_type == bytes: + trait_type = str + args["type"] = trait_type + elif len(spec.inner_traits) == 1: + trait_type = type(spec.inner_traits[0].trait_type.default_value) + if trait_type == bytes: + trait_type = str + if trait_type in (bytes, bool, str, int, float): + args["type"] = trait_type + else: + if len(spec.inner_traits) > 1: + if not spec.is_trait_type(traits.Dict): + has_multiple_inner_traits = True + + if getattr(spec, "mandatory", False): + if spec.is_trait_type(InputMultiPath): + args["nargs"] = "+" + elif spec.is_trait_type(traits.List): + if (spec.trait_type.minlen == spec.trait_type.maxlen) and \ + spec.trait_type.maxlen: + args["nargs"] = spec.trait_type.maxlen + else: + args["nargs"] = "+" + elif spec.is_trait_type(traits.Dict): + args["type"] = json.loads + + if has_multiple_inner_traits: + raise NotImplementedError( + ('This interface cannot be used. via the' + ' command line as multiple inner traits' + ' are currently not supported for mandatory' + ' argument: {}.'.format(name))) + arg_parser.add_argument(name, help=desc, **args) + else: + if spec.is_trait_type(InputMultiPath): + args["nargs"] = "*" + elif spec.is_trait_type(traits.List): + if (spec.trait_type.minlen == spec.trait_type.maxlen) and \ + spec.trait_type.maxlen: + args["nargs"] = spec.trait_type.maxlen + else: + args["nargs"] = "*" + if not has_multiple_inner_traits: + arg_parser.add_argument( + "--%s" % name, dest=name, help=desc, **args) + + return arg_parser diff --git a/nipype/sphinxext/__init__.py b/nipype/sphinxext/__init__.py new file mode 100644 index 0000000000..5f8ba4da2b --- /dev/null +++ b/nipype/sphinxext/__init__.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, absolute_import, + unicode_literals) diff --git a/nipype/sphinxext/plot_workflow.py b/nipype/sphinxext/plot_workflow.py new file mode 100644 index 0000000000..7fa0769401 --- /dev/null +++ b/nipype/sphinxext/plot_workflow.py @@ -0,0 +1,768 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +:mod:`nipype.sphinxext.plot_workflow` -- Workflow plotting extension +==================================================================== + + +A directive for including a nipype workflow graph in a Sphinx document. + +This code is forked from the plot_figure sphinx extension of matplotlib. + +By default, in HTML output, `workflow` will include a .png file with a +link to a high-res .png. In LaTeX output, it will include a +.pdf. +The source code for the workflow may be included as **inline content** to +the directive `workflow`:: + + .. workflow :: + :graph2use: flat + :simple_form: no + + from nipype.workflows.dmri.camino.connectivity_mapping import create_connectivity_pipeline + wf = create_connectivity_pipeline() + + +For example, the following graph has been generated inserting the previous +code block in this documentation: + +.. workflow :: + :graph2use: flat + :simple_form: no + + from nipype.workflows.dmri.camino.connectivity_mapping import create_connectivity_pipeline + wf = create_connectivity_pipeline() + + +Options +------- + +The ``workflow`` directive supports the following options: + graph2use : {'hierarchical', 'colored', 'flat', 'orig', 'exec'} + Specify the type of graph to be generated. + simple_form: bool + Whether the graph will be in detailed or simple form. + format : {'python', 'doctest'} + Specify the format of the input + include-source : bool + Whether to display the source code. The default can be changed + using the `workflow_include_source` variable in conf.py + encoding : str + If this source file is in a non-UTF8 or non-ASCII encoding, + the encoding must be specified using the `:encoding:` option. + The encoding will not be inferred using the ``-*- coding -*-`` + metacomment. + +Additionally, this directive supports all of the options of the +`image` directive, except for `target` (since workflow will add its own +target). These include `alt`, `height`, `width`, `scale`, `align` and +`class`. + +Configuration options +--------------------- + +The workflow directive has the following configuration options: + graph2use + Select a graph type to use + simple_form + determines if the node name shown in the visualization is either of the form nodename + (package) when set to True or nodename.Class.package when set to False. + wf_include_source + Default value for the include-source option + wf_html_show_source_link + Whether to show a link to the source in HTML. + wf_pre_code + Code that should be executed before each workflow. + wf_basedir + Base directory, to which ``workflow::`` file names are relative + to. (If None or empty, file names are relative to the + directory where the file containing the directive is.) + wf_formats + File formats to generate. List of tuples or strings:: + [(suffix, dpi), suffix, ...] + that determine the file format and the DPI. For entries whose + DPI was omitted, sensible defaults are chosen. When passing from + the command line through sphinx_build the list should be passed as + suffix:dpi,suffix:dpi, .... + wf_html_show_formats + Whether to show links to the files in HTML. + wf_rcparams + A dictionary containing any non-standard rcParams that should + be applied before each workflow. + wf_apply_rcparams + By default, rcParams are applied when `context` option is not used in + a workflow directive. This configuration option overrides this behavior + and applies rcParams before each workflow. + wf_working_directory + By default, the working directory will be changed to the directory of + the example, so the code can get at its data files, if any. Also its + path will be added to `sys.path` so it can import any helper modules + sitting beside it. This configuration option can be used to specify + a central directory (also added to `sys.path`) where data files and + helper modules for all code are located. + wf_template + Provide a customized template for preparing restructured text. + +""" +from __future__ import print_function, division, absolute_import, unicode_literals + +import sys +import os +import shutil +import io +import re +import textwrap +from os.path import relpath +from errno import EEXIST +import traceback + +missing_imports = [] +try: + from docutils.parsers.rst import directives + from docutils.parsers.rst.directives.images import Image + align = Image.align +except ImportError as e: + missing_imports = [str(e)] + +try: + # Sphinx depends on either Jinja or Jinja2 + import jinja2 + + def format_template(template, **kw): + return jinja2.Template(template).render(**kw) +except ImportError as e: + missing_imports.append(str(e)) + try: + import jinja + + def format_template(template, **kw): + return jinja.from_string(template, **kw) + + missing_imports.pop() + except ImportError as e: + missing_imports.append(str(e)) + +from builtins import str, bytes + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + + +def _mkdirp(folder): + """ + Equivalent to bash's mkdir -p + """ + if sys.version_info > (3, 4, 1): + os.makedirs(folder, exist_ok=True) + return folder + + try: + os.makedirs(folder) + except OSError as exc: + if exc.errno != EEXIST or not os.path.isdir(folder): + raise + + return folder + + +def wf_directive(name, arguments, options, content, lineno, content_offset, + block_text, state, state_machine): + if len(missing_imports) == 0: + return run(arguments, content, options, state_machine, state, lineno) + else: + raise ImportError('\n'.join(missing_imports)) + + +wf_directive.__doc__ = __doc__ + + +def _option_boolean(arg): + if not arg or not arg.strip(): + # no argument given, assume used as a flag + return True + elif arg.strip().lower() in ('no', '0', 'false'): + return False + elif arg.strip().lower() in ('yes', '1', 'true'): + return True + else: + raise ValueError('"%s" unknown boolean' % arg) + + +def _option_graph2use(arg): + return directives.choice( + arg, ('hierarchical', 'colored', 'flat', 'orig', 'exec')) + + +def _option_context(arg): + if arg in [None, 'reset', 'close-figs']: + return arg + raise ValueError("argument should be None or 'reset' or 'close-figs'") + + +def _option_format(arg): + return directives.choice(arg, ('python', 'doctest')) + + +def _option_align(arg): + return directives.choice( + arg, ("top", "middle", "bottom", "left", "center", "right")) + + +def mark_wf_labels(app, document): + """ + To make graphs referenceable, we need to move the reference from + the "htmlonly" (or "latexonly") node to the actual figure node + itself. + """ + for name, explicit in list(document.nametypes.items()): + if not explicit: + continue + labelid = document.nameids[name] + if labelid is None: + continue + node = document.ids[labelid] + if node.tagname in ('html_only', 'latex_only'): + for n in node: + if n.tagname == 'figure': + sectname = name + for c in n: + if c.tagname == 'caption': + sectname = c.astext() + break + + node['ids'].remove(labelid) + node['names'].remove(name) + n['ids'].append(labelid) + n['names'].append(name) + document.settings.env.labels[name] = \ + document.settings.env.docname, labelid, sectname + break + + +def setup(app): + setup.app = app + setup.config = app.config + setup.confdir = app.confdir + + options = { + 'alt': directives.unchanged, + 'height': directives.length_or_unitless, + 'width': directives.length_or_percentage_or_unitless, + 'scale': directives.nonnegative_int, + 'align': _option_align, + 'class': directives.class_option, + 'include-source': _option_boolean, + 'format': _option_format, + 'context': _option_context, + 'nofigs': directives.flag, + 'encoding': directives.encoding, + 'graph2use': _option_graph2use, + 'simple_form': _option_boolean + } + + app.add_directive('workflow', wf_directive, True, (0, 2, False), **options) + app.add_config_value('graph2use', 'hierarchical', 'html') + app.add_config_value('simple_form', True, 'html') + app.add_config_value('wf_pre_code', None, True) + app.add_config_value('wf_include_source', False, True) + app.add_config_value('wf_html_show_source_link', True, True) + app.add_config_value('wf_formats', ['png', 'svg', 'pdf'], True) + app.add_config_value('wf_basedir', None, True) + app.add_config_value('wf_html_show_formats', True, True) + app.add_config_value('wf_rcparams', {}, True) + app.add_config_value('wf_apply_rcparams', False, True) + app.add_config_value('wf_working_directory', None, True) + app.add_config_value('wf_template', None, True) + + app.connect('doctree-read'.encode() + if PY2 else 'doctree-read', mark_wf_labels) + + metadata = {'parallel_read_safe': True, 'parallel_write_safe': True} + return metadata + + +# ------------------------------------------------------------------------------ +# Doctest handling +# ------------------------------------------------------------------------------ + + +def contains_doctest(text): + try: + # check if it's valid Python as-is + compile(text, '', 'exec') + return False + except SyntaxError: + pass + r = re.compile(r'^\s*>>>', re.M) + m = r.search(text) + return bool(m) + + +def unescape_doctest(text): + """ + Extract code from a piece of text, which contains either Python code + or doctests. + """ + if not contains_doctest(text): + return text + + code = "" + for line in text.split("\n"): + m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) + if m: + code += m.group(2) + "\n" + elif line.strip(): + code += "# " + line.strip() + "\n" + else: + code += "\n" + return code + + +def remove_coding(text): + """ + Remove the coding comment, which exec doesn't like. + """ + sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE) + return sub_re.sub("", text) + + +# ------------------------------------------------------------------------------ +# Template +# ------------------------------------------------------------------------------ + +TEMPLATE = """ +{{ source_code }} +{{ only_html }} + {% for img in images %} + .. figure:: {{ build_dir }}/{{ img.basename }}.{{ default_fmt }} + {% for option in options -%} + {{ option }} + {% endfor %} + {% if html_show_formats and multi_image -%} + ( + {%- for fmt in img.formats -%} + {%- if not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + ) + {%- endif -%} + {{ caption }} + {% endfor %} + {% if source_link or (html_show_formats and not multi_image) %} + ( + {%- if source_link -%} + `Source code <{{ source_link }}>`__ + {%- endif -%} + {%- if html_show_formats and not multi_image -%} + {%- for img in images -%} + {%- for fmt in img.formats -%} + {%- if source_link or not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + {%- endfor -%} + {%- endif -%} + ) + {% endif %} +{{ only_latex }} + {% for img in images %} + {% if 'pdf' in img.formats -%} + .. figure:: {{ build_dir }}/{{ img.basename }}.pdf + {% for option in options -%} + {{ option }} + {% endfor %} + {{ caption }} + {% endif -%} + {% endfor %} +{{ only_texinfo }} + {% for img in images %} + .. image:: {{ build_dir }}/{{ img.basename }}.png + {% for option in options -%} + {{ option }} + {% endfor %} + {% endfor %} +""" + +exception_template = """ +.. htmlonly:: + [`source code <%(linkdir)s/%(basename)s.py>`__] +Exception occurred rendering plot. +""" + +# the context of the plot for all directives specified with the +# :context: option +wf_context = dict() + + +class ImageFile(object): + def __init__(self, basename, dirname): + self.basename = basename + self.dirname = dirname + self.formats = [] + + def filename(self, fmt): + return os.path.join(self.dirname, "%s.%s" % (self.basename, fmt)) + + def filenames(self): + return [self.filename(fmt) for fmt in self.formats] + + +def out_of_date(original, derived): + """ + Returns True if derivative is out-of-date wrt original, + both of which are full file paths. + """ + return (not os.path.exists(derived) + or (os.path.exists(original) + and os.stat(derived).st_mtime < os.stat(original).st_mtime)) + + +class GraphError(RuntimeError): + pass + + +def run_code(code, code_path, ns=None, function_name=None): + """ + Import a Python module from a path, and run the function given by + name, if function_name is not None. + """ + + # Change the working directory to the directory of the example, so + # it can get at its data files, if any. Add its path to sys.path + # so it can import any helper modules sitting beside it. + pwd = str(os.getcwd()) + old_sys_path = list(sys.path) + if setup.config.wf_working_directory is not None: + try: + os.chdir(setup.config.wf_working_directory) + except OSError as err: + raise OSError( + str(err) + '\n`wf_working_directory` option in' + 'Sphinx configuration file must be a valid ' + 'directory path') + except TypeError as err: + raise TypeError( + str(err) + '\n`wf_working_directory` option in ' + 'Sphinx configuration file must be a string or ' + 'None') + sys.path.insert(0, setup.config.wf_working_directory) + elif code_path is not None: + dirname = os.path.abspath(os.path.dirname(code_path)) + os.chdir(dirname) + sys.path.insert(0, dirname) + + # Reset sys.argv + old_sys_argv = sys.argv + sys.argv = [code_path] + + # Redirect stdout + stdout = sys.stdout + if PY3: + sys.stdout = io.StringIO() + else: + from cStringIO import StringIO + sys.stdout = StringIO() + + # Assign a do-nothing print function to the namespace. There + # doesn't seem to be any other way to provide a way to (not) print + # that works correctly across Python 2 and 3. + def _dummy_print(*arg, **kwarg): + pass + + try: + try: + code = unescape_doctest(code) + if ns is None: + ns = {} + if not ns: + if setup.config.wf_pre_code is not None: + exec(str(setup.config.wf_pre_code), ns) + ns['print'] = _dummy_print + if "__main__" in code: + exec("__name__ = '__main__'", ns) + code = remove_coding(code) + exec(code, ns) + if function_name is not None: + exec(function_name + "()", ns) + except (Exception, SystemExit) as err: + raise GraphError(traceback.format_exc()) + finally: + os.chdir(pwd) + sys.argv = old_sys_argv + sys.path[:] = old_sys_path + sys.stdout = stdout + return ns + + +def get_wf_formats(config): + default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200} + formats = [] + wf_formats = config.wf_formats + if isinstance(wf_formats, (str, bytes)): + # String Sphinx < 1.3, Split on , to mimic + # Sphinx 1.3 and later. Sphinx 1.3 always + # returns a list. + wf_formats = wf_formats.split(',') + for fmt in wf_formats: + if isinstance(fmt, (str, bytes)): + if ':' in fmt: + suffix, dpi = fmt.split(':') + formats.append((str(suffix), int(dpi))) + else: + formats.append((fmt, default_dpi.get(fmt, 80))) + elif isinstance(fmt, (tuple, list)) and len(fmt) == 2: + formats.append((str(fmt[0]), int(fmt[1]))) + else: + raise GraphError('invalid image format "%r" in wf_formats' % fmt) + return formats + + +def render_figures(code, + code_path, + output_dir, + output_base, + context, + function_name, + config, + graph2use, + simple_form, + context_reset=False, + close_figs=False): + """ + Run a nipype workflow creation script and save the graph in *output_dir*. + Save the images under *output_dir* with file names derived from + *output_base* + """ + formats = get_wf_formats(config) + ns = wf_context if context else {} + if context_reset: + wf_context.clear() + + run_code(code, code_path, ns, function_name) + img = ImageFile(output_base, output_dir) + + for fmt, dpi in formats: + try: + img_path = img.filename(fmt) + imgname, ext = os.path.splitext(os.path.basename(img_path)) + ns['wf'].base_dir = output_dir + src = ns['wf'].write_graph( + imgname, + format=ext[1:], + graph2use=graph2use, + simple_form=simple_form) + shutil.move(src, img_path) + except Exception: + raise GraphError(traceback.format_exc()) + + img.formats.append(fmt) + + return [(code, [img])] + + +def run(arguments, content, options, state_machine, state, lineno): + document = state_machine.document + config = document.settings.env.config + nofigs = 'nofigs' in options + + formats = get_wf_formats(config) + default_fmt = formats[0][0] + + graph2use = options.get('graph2use', 'hierarchical') + simple_form = options.get('simple_form', True) + + options.setdefault('include-source', config.wf_include_source) + keep_context = 'context' in options + context_opt = None if not keep_context else options['context'] + + rst_file = document.attributes['source'] + rst_dir = os.path.dirname(rst_file) + + if len(arguments): + if not config.wf_basedir: + source_file_name = os.path.join(setup.app.builder.srcdir, + directives.uri(arguments[0])) + else: + source_file_name = os.path.join(setup.confdir, config.wf_basedir, + directives.uri(arguments[0])) + + # If there is content, it will be passed as a caption. + caption = '\n'.join(content) + + # If the optional function name is provided, use it + if len(arguments) == 2: + function_name = arguments[1] + else: + function_name = None + + with io.open(source_file_name, 'r', encoding='utf-8') as fd: + code = fd.read() + output_base = os.path.basename(source_file_name) + else: + source_file_name = rst_file + code = textwrap.dedent("\n".join([str(c) for c in content])) + counter = document.attributes.get('_wf_counter', 0) + 1 + document.attributes['_wf_counter'] = counter + base, _ = os.path.splitext(os.path.basename(source_file_name)) + output_base = '%s-%d.py' % (base, counter) + function_name = None + caption = '' + + base, source_ext = os.path.splitext(output_base) + if source_ext in ('.py', '.rst', '.txt'): + output_base = base + else: + source_ext = '' + + # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames + output_base = output_base.replace('.', '-') + + # is it in doctest format? + is_doctest = contains_doctest(code) + if 'format' in options: + if options['format'] == 'python': + is_doctest = False + else: + is_doctest = True + + # determine output directory name fragment + source_rel_name = relpath(source_file_name, setup.confdir) + source_rel_dir = os.path.dirname(source_rel_name) + while source_rel_dir.startswith(os.path.sep): + source_rel_dir = source_rel_dir[1:] + + # build_dir: where to place output files (temporarily) + build_dir = os.path.join( + os.path.dirname(setup.app.doctreedir), 'wf_directive', source_rel_dir) + # get rid of .. in paths, also changes pathsep + # see note in Python docs for warning about symbolic links on Windows. + # need to compare source and dest paths at end + build_dir = os.path.normpath(build_dir) + + if not os.path.exists(build_dir): + os.makedirs(build_dir) + + # output_dir: final location in the builder's directory + dest_dir = os.path.abspath( + os.path.join(setup.app.builder.outdir, source_rel_dir)) + if not os.path.exists(dest_dir): + os.makedirs(dest_dir) # no problem here for me, but just use built-ins + + # how to link to files from the RST file + dest_dir_link = os.path.join( + relpath(setup.confdir, rst_dir), source_rel_dir).replace( + os.path.sep, '/') + try: + build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') + except ValueError: + # on Windows, relpath raises ValueError when path and start are on + # different mounts/drives + build_dir_link = build_dir + source_link = dest_dir_link + '/' + output_base + source_ext + + # make figures + try: + results = render_figures( + code, + source_file_name, + build_dir, + output_base, + keep_context, + function_name, + config, + graph2use, + simple_form, + context_reset=context_opt == 'reset', + close_figs=context_opt == 'close-figs') + errors = [] + except GraphError as err: + reporter = state.memo.reporter + sm = reporter.system_message( + 2, + "Exception occurred in plotting %s\n from %s:\n%s" % + (output_base, source_file_name, err), + line=lineno) + results = [(code, [])] + errors = [sm] + + # Properly indent the caption + caption = '\n'.join( + ' ' + line.strip() for line in caption.split('\n')) + + # generate output restructuredtext + total_lines = [] + for j, (code_piece, images) in enumerate(results): + if options['include-source']: + if is_doctest: + lines = [''] + lines += [row.rstrip() for row in code_piece.split('\n')] + else: + lines = ['.. code-block:: python', ''] + lines += [ + ' %s' % row.rstrip() for row in code_piece.split('\n') + ] + source_code = "\n".join(lines) + else: + source_code = "" + + if nofigs: + images = [] + + opts = [ + ':%s: %s' % (key, val) for key, val in list(options.items()) + if key in ('alt', 'height', 'width', 'scale', 'align', 'class') + ] + + only_html = ".. only:: html" + only_latex = ".. only:: latex" + only_texinfo = ".. only:: texinfo" + + # Not-None src_link signals the need for a source link in the generated + # html + if j == 0 and config.wf_html_show_source_link: + src_link = source_link + else: + src_link = None + + result = format_template( + config.wf_template or TEMPLATE, + default_fmt=default_fmt, + dest_dir=dest_dir_link, + build_dir=build_dir_link, + source_link=src_link, + multi_image=len(images) > 1, + only_html=only_html, + only_latex=only_latex, + only_texinfo=only_texinfo, + options=opts, + images=images, + source_code=source_code, + html_show_formats=config.wf_html_show_formats and len(images), + caption=caption) + + total_lines.extend(result.split("\n")) + total_lines.extend("\n") + + if total_lines: + state_machine.insert_input(total_lines, source=source_file_name) + + # copy image files to builder's output directory, if necessary + _mkdirp(dest_dir) + for code_piece, images in results: + for img in images: + for fn in img.filenames(): + destimg = os.path.join(dest_dir, os.path.basename(fn)) + if fn != destimg: + shutil.copyfile(fn, destimg) + + # copy script (if necessary) + target_name = os.path.join(dest_dir, output_base + source_ext) + with io.open(target_name, 'w', encoding="utf-8") as f: + if source_file_name == rst_file: + code_escaped = unescape_doctest(code) + else: + code_escaped = code + f.write(code_escaped) + + return errors diff --git a/nipype/testing/__init__.py b/nipype/testing/__init__.py new file mode 100644 index 0000000000..9d57ba87af --- /dev/null +++ b/nipype/testing/__init__.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""The testing directory contains a small set of imaging files to be +used for doctests only. +""" + +import os + +# Discover directory path +filepath = os.path.abspath(__file__) +basedir = os.path.dirname(filepath) + +funcfile = os.path.join(basedir, 'data', 'functional.nii') +anatfile = os.path.join(basedir, 'data', 'structural.nii') +template = funcfile +transfm = funcfile + +from . import decorators as dec +from .utils import package_check, TempFATFS + +skipif = dec.skipif + + +def example_data(infile='functional.nii'): + """returns path to empty example data files for doc tests + it will raise an exception if filename is not in the directory""" + + filepath = os.path.abspath(__file__) + basedir = os.path.dirname(filepath) + outfile = os.path.join(basedir, 'data', infile) + if not os.path.exists(outfile): + raise IOError('%s empty data file does NOT exist' % outfile) + + return outfile diff --git a/nipype/testing/data/4d_dwi.nii b/nipype/testing/data/4d_dwi.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/A.scheme b/nipype/testing/data/A.scheme new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/A_qmat.Bdouble b/nipype/testing/data/A_qmat.Bdouble new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/A_recon_params.Bdouble b/nipype/testing/data/A_recon_params.Bdouble new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/BrainSegmentationPrior01.nii.gz b/nipype/testing/data/BrainSegmentationPrior01.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/BrainSegmentationPrior02.nii.gz b/nipype/testing/data/BrainSegmentationPrior02.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/BrainSegmentationPrior03.nii.gz b/nipype/testing/data/BrainSegmentationPrior03.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/BrainSegmentationPrior04.nii.gz b/nipype/testing/data/BrainSegmentationPrior04.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/FLASH1.mgz b/nipype/testing/data/FLASH1.mgz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/FLASH2.mgz b/nipype/testing/data/FLASH2.mgz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/FLASH3.mgz b/nipype/testing/data/FLASH3.mgz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/Fred+orig b/nipype/testing/data/Fred+orig new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/FreeSurferColorLUT.txt b/nipype/testing/data/FreeSurferColorLUT.txt new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/nipype/testing/data/FreeSurferColorLUT.txt @@ -0,0 +1 @@ + diff --git a/nipype/testing/data/FreeSurferColorLUT_adapted_aparc+aseg_out.pck b/nipype/testing/data/FreeSurferColorLUT_adapted_aparc+aseg_out.pck new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/MASK_average_thal_right.nii b/nipype/testing/data/MASK_average_thal_right.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/NWARP b/nipype/testing/data/NWARP new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/PD.mgz b/nipype/testing/data/PD.mgz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/ProbabilityMaskOfStudyTemplate.nii.gz b/nipype/testing/data/ProbabilityMaskOfStudyTemplate.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/Q25_warp+tlrc.HEAD b/nipype/testing/data/Q25_warp+tlrc.HEAD new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/QSH_peaks.Bdouble b/nipype/testing/data/QSH_peaks.Bdouble new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/README b/nipype/testing/data/README new file mode 100644 index 0000000000..550854c57e --- /dev/null +++ b/nipype/testing/data/README @@ -0,0 +1,6 @@ +This directory contains empty, dummy files which are meant to be used +in the doctests of nipype. For verion 0.3 of nipype, we're using +Traits and for input files, the code checks to confirm the assigned +files actually exist. It doesn't matter what the files are, or even +if they contain "real data", only that they exist. Again, these files +are only meant to serve as documentation in the doctests. diff --git a/nipype/testing/data/ROI_scale500.nii.gz b/nipype/testing/data/ROI_scale500.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/SPM.mat b/nipype/testing/data/SPM.mat new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/SubjectA.Bfloat b/nipype/testing/data/SubjectA.Bfloat new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/T1.mgz b/nipype/testing/data/T1.mgz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/T1.nii b/nipype/testing/data/T1.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/T1.nii.gz b/nipype/testing/data/T1.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/T1_brain.nii b/nipype/testing/data/T1_brain.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/T1map.nii.gz b/nipype/testing/data/T1map.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/TI4D.nii.gz b/nipype/testing/data/TI4D.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/TPM.nii b/nipype/testing/data/TPM.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/Template_1_IXI550_MNI152.nii b/nipype/testing/data/Template_1_IXI550_MNI152.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/Template_6.nii b/nipype/testing/data/Template_6.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/TransformParameters.0.txt b/nipype/testing/data/TransformParameters.0.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/afni_output.3D b/nipype/testing/data/afni_output.3D new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/allFA.nii b/nipype/testing/data/allFA.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/all_FA.nii.gz b/nipype/testing/data/all_FA.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/anat_coreg.mif b/nipype/testing/data/anat_coreg.mif new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/anatomical.nii b/nipype/testing/data/anatomical.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/ants_Affine.txt b/nipype/testing/data/ants_Affine.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/ants_Warp.nii.gz b/nipype/testing/data/ants_Warp.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/ants_deformed.nii.gz b/nipype/testing/data/ants_deformed.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/aparc+aseg.nii b/nipype/testing/data/aparc+aseg.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/aseg.mgz b/nipype/testing/data/aseg.mgz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/asl.nii.gz b/nipype/testing/data/asl.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/atlas.nii.gz b/nipype/testing/data/atlas.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/b0.nii b/nipype/testing/data/b0.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/b0.nii.gz b/nipype/testing/data/b0.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/b0_b0rev.nii b/nipype/testing/data/b0_b0rev.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/ballstickfit_data.Bfloat b/nipype/testing/data/ballstickfit_data.Bfloat new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/bedpostxout/do_not_delete.txt b/nipype/testing/data/bedpostxout/do_not_delete.txt new file mode 100644 index 0000000000..9c5c450dfa --- /dev/null +++ b/nipype/testing/data/bedpostxout/do_not_delete.txt @@ -0,0 +1 @@ +This file has to be here because git ignores empty folders. diff --git a/nipype/testing/data/brain_mask.nii b/nipype/testing/data/brain_mask.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/brain_study_template.nii.gz b/nipype/testing/data/brain_study_template.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/brain_track.Bdouble b/nipype/testing/data/brain_track.Bdouble new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/brukerdir/fid b/nipype/testing/data/brukerdir/fid new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/brukerdir/pdata/1/2dseq b/nipype/testing/data/brukerdir/pdata/1/2dseq new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/bvals b/nipype/testing/data/bvals new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/bvals.scheme b/nipype/testing/data/bvals.scheme new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/bvecs b/nipype/testing/data/bvecs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/bvecs.scheme b/nipype/testing/data/bvecs.scheme new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/c1s1.nii b/nipype/testing/data/c1s1.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/c1s3.nii b/nipype/testing/data/c1s3.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/clustering.mat b/nipype/testing/data/clustering.mat new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/cmatrix.mat b/nipype/testing/data/cmatrix.mat new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/complex.nii b/nipype/testing/data/complex.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/config.ini b/nipype/testing/data/config.ini new file mode 100644 index 0000000000..d21b828212 --- /dev/null +++ b/nipype/testing/data/config.ini @@ -0,0 +1,2 @@ +[BOOL] +ManualNIfTIConv=0 diff --git a/nipype/testing/data/cont1.nii b/nipype/testing/data/cont1.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/cont1a.nii b/nipype/testing/data/cont1a.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/cont2.nii b/nipype/testing/data/cont2.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/cont2a.nii b/nipype/testing/data/cont2a.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/converted.trk b/nipype/testing/data/converted.trk new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/cope.nii.gz b/nipype/testing/data/cope.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/cope1.nii.gz b/nipype/testing/data/cope1.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/cope1run1.nii.gz b/nipype/testing/data/cope1run1.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/cope1run2.nii.gz b/nipype/testing/data/cope1run2.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/cope2run1.nii.gz b/nipype/testing/data/cope2run1.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/cope2run2.nii.gz b/nipype/testing/data/cope2run2.nii.gz new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/cortex.label b/nipype/testing/data/cortex.label new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/cov_split.mat b/nipype/testing/data/cov_split.mat new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/csd.mif b/nipype/testing/data/csd.mif new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/data.Bfloat b/nipype/testing/data/data.Bfloat new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/db.xml b/nipype/testing/data/db.xml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/degree.csv b/nipype/testing/data/degree.csv new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/degree.mat b/nipype/testing/data/degree.mat new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/design.con b/nipype/testing/data/design.con new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/design.mat b/nipype/testing/data/design.mat new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/dicomdir/123456-1-1.dcm b/nipype/testing/data/dicomdir/123456-1-1.dcm new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/diffusion.nii b/nipype/testing/data/diffusion.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/diffusion_weighted.nii b/nipype/testing/data/diffusion_weighted.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/dilated_wm_mask.nii b/nipype/testing/data/dilated_wm_mask.nii new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/dirs.txt b/nipype/testing/data/dirs.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/dofrun1 b/nipype/testing/data/dofrun1 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/dofrun2 b/nipype/testing/data/dofrun2 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/testing/data/ds003_sub-01_mc.DVARS b/nipype/testing/data/ds003_sub-01_mc.DVARS new file mode 100644 index 0000000000..bad890e227 --- /dev/null +++ b/nipype/testing/data/ds003_sub-01_mc.DVARS @@ -0,0 +1,19 @@ +2.02915 5.2016 1.74221 +1.54871 3.97002 1.18108 +0.921419 2.362 0.784497 +1.26058 3.23142 0.734119 +1.00079 2.56548 0.787452 +0.929074 2.38163 0.828835 +0.741207 1.90004 0.746263 +1.07913 2.7663 0.779829 +1.2969 3.32452 0.73856 +0.767387 1.96715 0.772047 +0.847059 2.17138 0.774103 +0.984061 2.52258 0.88097 +0.852897 2.18635 0.794655 +0.927778 2.3783 0.756786 +0.857544 2.19826 0.796125 +0.780098 1.99973 0.731265 +1.05496 2.70434 0.788584 +1.32099 3.38628 0.831803 +0.691529 1.77269 0.738788 diff --git a/nipype/testing/data/ds003_sub-01_mc.nii.gz b/nipype/testing/data/ds003_sub-01_mc.nii.gz new file mode 100644 index 0000000000000000000000000000000000000000..d322483b946b7cdd3ab80f6a8842efd07eeb9032 GIT binary patch literal 168086 zcmV(vK_7zr?Cvg9jGx_i-uu4mUF+Vz?w@y`b=H}gwa@H5bDrn>{d|XMYWyFJ z85%koGWB7oVWpw@Kac-)$TXb)|JU8||DK8JQ$zY+=T(yW^Dh728>5E?ZFpso>VKP~ zp%FBoSF3<#|0nzUzjs9YHUFR2hf;yG?rwVwo?wk$EBvrvMm{DVS%YX*pfESi7iHR| zV&2h8@!;KL;T+z8CFa|ZX&!_H9Sm@{+7`#m^QA}aOc40U8!s*;p=!RpxM?0E0>2lE zx#x3*NteY!Ub9sESiD6zoKF_trz%COPTeun+8HrJ?Qv^foa8sa20l+lpx0b`F)jU= z*m~29w(bJ0boneCGH(deecDuIKY@}8TqtGcMzJNa8c_`bcPDsY@TWzRp@AE89;M^= zpxc?pFQ7LeO=L)msP&bH6`}T%hvHK%aVOw@dl#C`)qh6^W%OGll!0%VJis1I>CkfE4E<>1$9vonBQ-5giI> zWlB8d57WdoO{oJTNRYj z+_xswB5?psY}r5)Ex*#MuJtthxgkA{IDyDkEwEvGt~7H_SM(Zt8&U1F$o%RvI^{0o zzIR@buAw6ZPD&NCb?Zf-T@p>X{D^iRyG#a=DwkM1Zd zUxBha8^my(7+O9(nL>Xtwdi0$U0nSrW1%@M{pd*l99mHAd2b3xx**PL7NSEZXH1lKzB~qAXrtoJvc; z>#faE@M^5IM#l>6o5kb(r;!-B%UDz#&leZJ&ljy7i^cii))e{pvv^wATu?u`NQvE! zzZZistED%-57dM1t2xq&`*!e|?2j+;LAY{gI9{wsN3f~}i+Tl!Ry&HsG!Uk* zwS;9rE&3bYhr`MZSbYvSbj_D!g3E%)Wy!!Ov_RjPl`m(;jW17 zo-JL@3C0||k9bm=CsuS`PN^>*k^AURRG+EE{!R*>e@d6vS$(E218z{8q%gYqa+D}& zT!owFO)y%cArT&HEZwu=!tz`6DJO{Dx=$BAW0H_{bgi^w z2%{wU3JOPi(eQ#Tv>@y!MfA|)xoZ8suh!nX3%^mnML%hSyEexuFVN1-9jNiNrnnz9 z5@9}t(uIcpsJ^-$nLSpDx{77=tM?c3SaE?mm%SjphZbyZtIZc957WlsH)+ZKwX}Eh z2J!J<34Z4WL37JuDRsv{6t}Cz$feaVynI1?PhLeoG&0HX^CfDbq0j9$zarJPg@d=IprpnJf97XNbI&(HX!%%7yPX2L>MEK%NERuJt3-A+0PD=44{U_esIR)3dcT%7s!$LEgdQItVLi| zhNx5A5bgH$rf;V`X;qOi?b@D7*^8oScJ)m$>&jnT9JU3+LxIY0M;I8)mhQ(Y(6)UZ zcH8w3i!*F#*6A`zvAaka@&n{pyqh|eJ*1s0E>iJ>4dnOpm-w2hEfn|1z#-cLiVn%r z)S_1C{`&~brdEh8_e!a5+3B{;;cAfd~hi= z8v`-QWVvM4&>OQAMk0`!(2zE-393dqGsA>uO)}?*C6?SPM9$^A^m$v>H?kce6aTsC$_?u4upyS*?4ve2 z_PkBcIu#N==7}+T(=fYBjnoiv5+_&c(3?Jo=*oYusPv982OYEJkINi*T&@c@JE`D_ zS?2tGu`WN$`bGU;U7~@yr^V`T1t>XwPTFtr1S1Z&CCgqqd}X{9n=Vtb>Y6e4|7^xl z=at;`pCMl~*5P%rX8ib_9)}J*Pa)f?Ma0c1*llr4`hNF0Caf{1tx5k;he?jyqO}iC zy=}<{ip^Q^q6wFeG2_S(LoO?}v7v8o^+nm{dhm5zsTSvLM>qTW;fBamuQmU=$jdL%S z2=Dn7v_fIWgU5NYhOHkrY;@;}WGCKJ<;axn$Y1kpxz#}vc6^&p`@bv^_D6<6<6x#V z-7f%%PgLSr$`~3m;5Wt1G~>j#cI-ICnj7<-dF3!${?^}|Z+jVXeu4%EuMHu?F)qTm z?|8iKyii)dDHz9emSRK6UJ;a3M>^Ru&UvolJcSbNK`98|`3+OG{)%z)?$DuMw!A~bmb2!&@~bTh-Z9aDJJ%nh zij`Nyn3s28t}Eb|>W9=t>C(VnZfLeK6mOf%!%y9x2#vZgdao&^aF;7&H0L-yG~GKG3YqW22HxfNjJB-z+!M5*7Pexd%Hy_e)1HV&%(u!hMi)v zS-B|RzDDc{>?Hy{|6%R)Soog^L1az~{AZ-V_+#>WZo8;htG zRziwxg!So%kSw|j*%Ci7AwyGiep7;+CAlal=mgn6LkwG&BpvBwjozJ?;@M(9k&)O> ze0X|XxJGxNI|?WIJ*Wk_{mrK%b_*!9PK&JFT*QPorx32z{CTm?@O+Uf1%C6wu!YNz zdMI4%v?RquQTy%EsdyZARhs7e2#0NZlX9DaJFRly#1vb$U*XE>fhu;^mve_5DyFM8 z-1nx8&Fsv0i{&$Fx9p)n_bWKdDLD4ykaX(L1}v{Jq?R+ZI5f(Uk1cTJ zCtn;{E82~DcT?UkoY}R)k{g>@@<6o)O}xL4;yqT04IYu`6}3uwv^Em^Hf$A+L(k9* z9T)!l)}3#(aOa8FJo$1DKXwyNJb8~9-_F#;E#Mu_`^S)6ml_k^+7G&c#xEh>@oCsEur8bnhPswjHLPP$l_PFkTJbbMML z5iTpogzQM{@6{R&HI7JamLheQd12|eWE^gO5I;Z7z*Dgu3wBzGo^CAk0_TXYdk2dH zFSW$`XBkkcd%thz2wXepgHK8;%xhUHJ=1YQYyFuRdHOZZ`Bvc7)|&`SGZYmiDsjoD zv*_A6S%lmAi??oX(M>NKg$0SISrve$r_9jNIa$)b?uL4=5%~J=E@ICsgk#(;vHVPb zTKPDUE?rzrYrof0-}fu&R!1G$cD0r0|8@uF`nHGG9~JtxjFI$zJK;&!UTDAW4-&1u zi?N4l$#&L1vZ&VKF@KDBX|Wz3clb+Ywznu@ofXwobr8AM18`-&A0jR-mNt|$!>;E0 zp=r5B%y-;QlYf}8RBO!dR+_N=85xfrZN~O71{|Vc!1nP+Y0B9$(PB~oT%ub+>AX~` z*ggOs-nJ1*XGYQ~GUv3V7QE)BC1)F}SbK(oo3Jf+9AwTP4NW;N%$Was+K0XbT*D~$ z<`^`*RC?1@t-aVTI@Z6Z6SG}--Z~{m$Ej=Pk=lc*mHcC)Eg$}D#Wy14T%E3D#k2zy zS0IU^2ZJ&2@-b=3-;F{wgVgNaN|;SpSBt6%oDPlxWy#}zZ~VloBpcU z{hy2bA~ zYE9Z}!Oe9|d6B=2zZ#yP%%E@~{TPn6mp4k$&r^{#uvmQEai8Apu;XXVRqP$A~{QH?JTPM2kn|q#o_>GFwC(C)$FD>@nkW9bRNw{z80-bS=*rGjO`l085gL$3c zne+n(vW+P?>=@Nms`!eRJwHu%=AQE{dB%Aiz8b!iD%x)lm#`ffi<{t=`o7T0cqy>0 zInHNw!$^Z%M32@GGe+(gmHU^|z+alY@PLd9UEh;aMiK>#C>2@SFK~a}2rPQ(0#j!z z^i7*Ad6_D){!?GXZO+Cfofw=6Jc^SV5aCHb#MAHo^t&P9M4hyTYH4y}y71%7xf#C85WY zez5y77q_2yL+_s@tn{O#M~^I^yV?hFs_D4>UnHgsEX1rK4Y)T&E~aESibJ(}BF^P8 zw0p0_j2*o&Gr$+42iar9VM&}_V{tZ4z8@izl14LL#!Nei#=g(SbzYR5OK7iHJOimN(;<1 z*AwEf*{ zs%kXlZKh`2$yv#*R7$QA)_mJuk4McIMV6n9#H5m7RL*oof>EY4V4ff5oZN)BLobOm z-G}sgi4FfeqF|5Dwyfpi$Um*s4Bpe4Cq7kj)Y(_foL*-j=V`x^v+lTP~?k@tgxT{3phWvkW82E~*v_o(1A;$zthP z+$fB{*-scb-J=llCJsr)}65;1M z;*8Y_Dd5%&EPuLK%-Q#XbT2saE(aU-ov2{jtG3)W!ig^*x92HigQBkD!q`KUzRQ|7tyQw^D@*o^QSgsKM^;R?KsIamwv^yf_AtOvqH*R)dlQ-0QUxh^499{$rCE>`$)-wFI8M0?8=obta;i& zC133#=c?>uG``nK5v5v=mwSA0Ixj_9-`fkee+FaX`66MocPmX(=avblYgU9!{gga=lP3EHr%?n=6o320qTf(2$mMp3A2n5qjIe-TH$T*@NW#vu8p5vX zw0L=cDS0pYLM!`f@yN;t6trk8wJJ{#0cP8=Xais?cfqS_OQftxlyWN+XpDD&v2Pg8 zpN)gVxl;(Z6)y_jccP(JW61TrD{Z|rR7k2?oEtbEKaHATmc9ZLL*(dLyFkk6B}ehB zrno*d0aw*v;J9r9*2?yxM=uXC*0{4ex0{Fu*oD!{!eMeV6t^|I;d6lsN3JN*cS*A3 zwZjBmk~*Q^D%!3dJoac`-iY_45in@Sr*ies#lye=U)5`v(@hWswEz73j0{Yz_VP-yj?xCSv$qCoCE=OIj6g zi;~MTpzv5H6mQqiy%JrnIcUb-A58dfs+`BamvP<&87I|#ppu9EY5i;y(eZpBHV<{e zrkSZyMVviME;AafmBP21Cr$2jnrt2$vin5~9%HHC!)hI|9%IJsCvB!qy6eSUk9a&C zXoVJj7FhTwLz*zx1~bZ?aA0IUa)Kud(+v@HWb6}?xf$@%=6c*U%9xE3?o-~Q7PRF2 zD@0f~N5KbEe4pV2i~eJz&rhvzNyi=m1zptax*H>|E)f-P22l62n@O>F6P@|8fhN3e zL&J-vi7G`t5@TGEywMD^i|r9RWstOSr3vQxsL=GDKL)NH1ig3Fxa=gv)!P?Dnf`Y% zf8cT9wl`2zbvcdoem(KYRt}$YrYLwO$B=C)(wwsj+Q$`|qPZieM z7>jf-chTjqu4q~P1d4X~h>h%xi&ZKlCd%>qoe}b!6Q#&ZL-Z_FqUL5R465jZnQnbC zui1D6dlur*jt#JHpMyb&#Irl$Sn+Tu@;-Xv))E^`IwZr|CJEA)Mb>Ed#SKHIcf@t8 z5Lo3#py$^2UdBA3QH|g06=l%zl57S|!PFVo0K-b4!k) zdZtKMSs-+$3=dKQv47&JdpyY!QBM8eE%}Bev~5_&dMDpN?nIdUz$KB!$Am#u1ACGJKhDghgM? zG45xoMDJB7{Z9pHs0Jqb_`W4sH`0T?}e(NsTELcJp2EGuDna?nfyTIv)60a|)uzFISlr_#4$G@pyW!?g= z4t01kK1F1716_+=MKAZ3l2gkn>e|GYjH;W7uL)_0>EnP0x#qa0_OEcK6zSMI6I?Sf z!^uu=Fdh)E_C!PR$D~;NQ0Md)p*Cdm+Ml{jS|n;`*275*h5Wk;-y4mQKEnZ9{bQuM zFJ?%cZ3)fZc5t}ti%ygCactsy#Qk#>ciKA(&u|}6vQ$(2%UX>4F@D%R)B-Cw=&f&si!0?Y-(`;d z8HQMMP!9Ld1=6_vW(Zs%$GB$(NLpiqqcL9iH9Q0^krOaYCmy@sWx=3z95fX{Xzk#D z*q=sd_sATbx5%(*cCF(@Mw$y7Tonh@B&w~ zPVqujvMvHzYUB8M8>ICx!sva^r74S?P-T-Ztym<7uCXP?FV{zQgfSH6j(DT~p1DEJ zSUSoDslkr;TxN^r;+M4ch%vr6YCz}Qd#U3RL!2BoSi1FIAM5H((R-H>{`9s$`6zo_ ze{KhbnLoZ6Ipg>904%9ip}e0ynzk}R5ou%5aAQc`Mwl^iq;&APCYDdNMNB^#3|?7b zUXB%dGNwc4u;P~1otiEa>X5F1GHf&fclAid9n5ebFx=E<$eEmFb zszMPoTL+<|&G2STqm;ZmU0NGwh2T21f4ww;>@jfo{zf?Z%SBoQiGvL?VQ_t{=+r`; znKop?Xp;|$4;w=jpo3j|bkMebqI7Gm94l|S;#Zv^G+L@N<+y&Z|9S)g78YXmDpS#H zo}muqi=@v z>yBu#SOLo~_SkT3TX)5Y$X-${lEGZfhOPX{%1T8O>(Tbk_^CS7qc z#G^Gbj4IQ?ejPasT^vxUUT=GJ2mBhX!ux6){M=v-J8ukKNrolX#&{p6gRcof z8aUva6gFOlzh8RbY`|tzbt;3RO&j4{S|kP=uM#PDs)Xb@PmB)H71P7FV9leJFzM=u zR=&;Qm=+4`|4G)#EwB-);h674irimZM)qV>9L(I$I|xZPxnDD+w=PUqQ) zX)VShwuK#TTFW8Vo+&x3u*XKbXe3+N3*C~VV!X4A*54p{Q1V_p@j5LE?KJ2(kE3^W zj-)+flPD=Tqt-u0q@x#B#jTc7pZnvHbvEMG|AoteeIom62FaJ5qz)NbWM@!A?t7Qe z8UJb$J15ih9}usu?7_gjW6+T6jZg7eQo{YN_?TlPR!zH%{f-O8!<2<|=k7haZ2OHy zm)moWg8?7ue~k>bhth+_zQXzEUL1Y66n#!~z@zXCsWiAZ?*GyhoxFFU+^M6;`DjFA z4YpEE*GJUjsv|poGvT_zEV_5@tMIlp7Zul!p#6&_*f_L3=FUr%=9G4XN9ak6IB^t} zE#D%+thvxh(xh)650P1#55Jvl#zt~YG8md9MoMGF4gE2~+{H)?bZiHY(gJCNnicil zP7^=IT@rI$?P$J1Upn}G3~e_mpq$n9v~~4z>XZ;q=B8y-FK-Zg|eetSV7QD0~^pQWVTdKBb+ z3d#36_SFJdHJdvLHJ)|xT zm#AlvoF+}Wg)bRB(J?biayr!tCyyUS+89kb`>T>B-_zy%@EUTAxh)dU*NIJ+c8CR8 zZd9SQlbjZ%(#n$xvR!x^4(nUOv1pkzwxSiRzT|?RtPyt;r&4*@e5$$7k%qK2rPUj~ zX@!X~X+~O;iOPp!TYJ#QlsaKDDG&1}*ki=~WGQWlAJ!~L$L@G-(ZW(j(IyTwyQ2?f zIG-1S&e+Cou15pp*Vw4R+JRj1g3jt{}EbHTXv)j-@0 zFAy)|6UDf{D@5I-RunYygQy;q!Gn4sXn-g3r`o{f#9YaCnjIboxx#Z= zTXfkS3NxE@Se@JjX`+YdZm?WD@a`k>Q)|$Twl8M6kf+ygMDBumPaV({=kbMa{2G;yPNI7RO` zL)MS9dB-ku9<1rao5xu2kte@N<*|l(%(o}wjGrQ%&Bewc({Na}UTSThjAL#i#6Eu$ zdg|^;QR_meaNcR!)6Rs~uD9f(md4yvcMt7ZGlDWgGRWUPjWn!_#G!lX`0%_+ay(W5 z=eqmC_smv0S#pNbSKX!i&n{8<^pCV?v>q4uf280iS7<@l6Iv5|i_UB3($YT*#gN6l zp)sONay*;@|1p)K&(%G2Z?HZ)pHlJ58%|tprpp61eWlvsXS7pCpChIka@M0yl=D2E z2KqGmj99t_OW1-?pl1V=OJ4A;2QPbznX5TxoW1}GE^G&!S9bb(!)Xh;OMyuFK_Hq zGj5%z@l)4rQ3eg$aFLF`)@9>MFUa#t1i2TOk^h(V;&jvz+=?87Kg-)77I^^-TI z%oz%Y2}wxHzKAJ{lf>NWonnnlPFsMsL>|Py)TuVjTweC1}Pma6a zhf4d^T=7c;q4HxAYRz^ar1~80+TTayPlfoL+g{v92VpW(DgKmxN1VnP%sm!`=CMA| zQOS`zB20Qc+YXUo@d$8!g10+O#p~4GVoZ982wk>O?A_W+?3*`3%stpy+&*I_Myl39 zsZ2tTGAmS#H%HRyMCp@>E7F!OMD13YIC(Z*h|br<35^g6ndL=a#u!u0h!m=tKZOdU znEfVuE+&+;#r~$b(yNnU7_r<`3_ImZGg`c&vF>kZ z;5{RrrKR@We=>GDXTh-kD_tyB7c7K4Ymap4$uVT7 zYE$mX1N3*nOVa*i#NF1}aMgBuZr0p|6GN=H`A#z)(Nt zlaj30J=DExL;9N9Y(G-Y$A&6cUTDO>+nDjyW=byiH01t=wAiuCl-W&>Z64K8V)$`U zdSg0_f8Lbh>mQ@XFEje}@Esj1vgg^;z4(o}1s7D9@$jks9QMG3lSB0RZxb7?E;r(k zlj|r)$`xS^qfy>{mE!Z#O{(oDTI;^vlN=$cYKo|1{$*Lk)O#kAKv|u_t{haS`9{O@wnwwsfU?54`x01GD*i#P~;NsbZTs z|K6+O#Pg24yvCQe&vD{0uVielUVCunK6-t9jc_`VfQ_%ZV9tZ3QsLP)kgbo!>7VbR zG2y%Tp?`~Z=GyXDbqy#!JM%?X1rOIWVExP^^kC{aQGNX!8ukR^VR~DPdzU7aUvk33 zj6R6mun=c^{DPs^T@f?5n7S>zOxK*NDQoy9S~O-f4fEM2DkFa&vM3cdGeYse-2)lA zG1B;JuCRU^g$-ki@a)WDc-1{a-#!CHVp)ZdP1`Jng|8JiZ9|0r!+*GaZyIV(b;jfJ zCa|?tp@siksndm4&|H-RKWlCA{_}JEcy2CEl~{>`vtFX+VLj&V?krwE@ez*-v_-oW zML2va6M8BBxYOJk$NJ2cywYtjsdpYcjeNw5f#G7k(NWPOt1TIsIn#yk0Frgir7u6` zQc#g5)h%%mM;6zjOSnMcYfqG2n8zmuko+$mNb+X=%~zIZxtleB5-D4enyD|8mkAz!sm zUA=9<<0{Sg+ae2I^4@|Yi_G~|M}1bM|D|ucwKybY8XeJh72EcAK+e$}Qls-yq__Pd zhF<(gSHD~G`@;&3kG12OBkXzNb|=;|vf+fTR{X@nkk@M(a=2QD+$+k(#=Dcyb>1DR z!cG0W)`ZacK33d+kv-ddwBg3hF1)vuf_c6fcUK44eX!wQmgYQavlC^f5^V^1N z^eFD6xV~`?%Ie-o8|VMUkO%IR<88=yU0rxblo$8kpyU>RRNVB3H7DgO*v{U9wbQNn z;7U`Dji{$y7b}G#A_cRp4oKJiH{$ylL)w1)9}T)~&s(m$uo&yeDLq_x`AlC<$WZt1 zM++_)Y{CA0O!?TxJ+$j^kvNzVgLiXMrFVvr=!k9N5}l$x9?qQ5*_{LbxpB=t4{q7d zn~Q=RxmTzeryV!t>4nDJJ!vU*S(P9T%SQV0rZqj6tg;|BCzKkOpH$v`Jx}4aoJ6sYIcyaI-Rb#xKP>DKEkkaEnYN?#H{6Q5bkM< zSNG;iZL{sLeSI8?fA4@Ki^X5rW;~BoiGy7rg8Ijc(7>$SGHUTY?%yR{TSk@Ll!+yHT*(>oN5 zT88`Hvr*K_58FKySa4~Sw0)KeR;qAJl0C+LJ!_#Izg_sZ?@Ob4%_Uj?VwyDUBz6C> zoD3&xQNo($qWr0vQG%M`aDy|>D$}In&7JVCqz|}ELrie|Dx6$vC}jH|(x}m5=LH5_ za9M|)X8oq8RX1qDeM^cT(oWoq?u(~xUMQcEC*5t^3Yqiv;PTZq;>5&lbo#ZrPKFur z!#75JZI2o6Xko%8yY%^S2YqflaF`O`uNK8(IfjmFinTwBC7-aNlW0Y!)0skvnhUd0Ze&E!Ifw4=jMKNrgBu zw~^WhIrE=83f|nH;!zW9Sy5oc?Nv6+E##aVZ^>&$D>&@MURrpyljyuO40-SBq#yfs z;@=xPTI-K(m0Z@^i65$&IOm~~J572*+C!2={n{D0 zZcrgj9Jd-Cd%bCYKRfP{>%u{&t@&lSBOlJQ=g{AlJbi^DH)yN4xWt@$Zn5O19pBNZ zF$Ln;mtZk5viD?)%ugQm1G;pn}#d{(U^vO9A2FR|sTzwNl{w=GX^VZrA&8*|iJ zQ%)LnjQ;cs6|eeDgTaA9Y0j|}{OwRG6v{ia^N9_gzolf`a|&*rYsb~+oj4}bnim~4 zXSWY#d?Ce%AIx1!1*QGO*M6ahyt_owZrvNL3{|43W-2{xs^V&e8|xi+;DNQyZ2i=e zWd^ofTW!YIZphdp<1HN;>Pk9IF2Q+YYfRjmC%u{5675#x;7!Y?;?umlq#IzvE}vYu zp@%b{TIR+pzA1UsB1>*@L6c84OQcq@LL|PDpwr$SzYpd~17CX~ZFV>8J^veh9Q0{- z|HJgGLdj{CcKpcAkteUU;FIsQd0W3cGEH1BM(nFXeNrcMJm7-6RWXvQp#ya4y5L&d zG<3-RjnLBFqU*CIBqL28;AhTxR*f`rT>`nu3q{)Q23+|!2LEDxF*{$0dAAaz&bwW( zed8dMRc2%1!e~V9KaA@cZN=yjABB~TFO>v87xxm}#T<*x*ya+1f&=|<;b3#ry;0!t zw?wJM1V?CGkHWT3CotW5EQ0!u!dKZM1V6qGt*p-oG_J>t{7me=+8cZKc7@ip2>iI> ziS$X9$VQU1^tT!Ic5}vXt!da6*B|}8a&SBUC44T)gjKAQh}~o?<~Mx_p)A3@9pj-B zL1?OPkNv}B=-z9-r1RYhp8wp@Eq)rZL-wPz#7DT6G>Anjn$Qr#Y4lK)O5cpkDR^La zF+Y13%oa*;=wXE$3Kdo@StxBCqCjP~H?$gy;n}>m@KUrQ7x9>8P54JOm-YD23PY~F z@r+t6i=}mw=8DSFe4M@EgmLR+IFgtyHH8E0vil%)y|wr-*q@?f8|ZD2344t&;ntCI z_AR&K*f16MNY>?fTSw5h>4xG#Lon=YZIQ4jNjlfs7xR8?fSc^1SU&XubsVYY)wgnf z{LPxHI@t4*iB7!hx{AGKDEZjocQkE4jo=lF5dFpxo@)~&tDfypXm26{ALP-Jc6NNl z#GaK?tvKDmmV@`Zampw)3*J+5^?qw!lPu>$tw&M2f+Gkv>Vo8yV(C`wAS`$4FS_l% zLu00T@Ybm=ysMct=e4opem<^jzu1v&&MVmVqZOYYtl(=IYsoZ3OZ;A^uDQ~+(#_b( z_@cE~v_1QRe499OVH0Z}7-z+YV9i@!+w<~)cI@jZX9q1g4}W99S&P?^ySIzD+;uRz z8?KV9Oh;kjs;**U;{h7nRmH2-yt+Bul7o${I401JuXRwf$A6YwIzZ0Ok^d+;xGVWR z{)>-OgRw9#Mr!*g5W83JMB){Y#VZ|F7^=8_h?0|*D|wt+hpLCE_@GkB_-n!5!AI!i zr5X`@s}RRa>~Ya=n6%MMfmanHV31lWjFYyK_8>F<{@a?L?@_UCi9N^vr{sg}6l}ca z2OTQwO;v%`A|s$J(%odp9G)VzA8n7I3?GEVeTJv2IbB$Af@~L9@T~hbJmQZ%7fw^~ z1RqV-)l8%4u@lACPh)YXhc)hAu|SDky0qnx8}9bQ%A*NqN1xoZIKHVwh1C^=k895JPAj#Lt8 zfJ3bfG4QH0?k09a`}iDeNv^`s;5tmdUxkiGRzO~of*V_gpkwnOygcWK4_?O5s?>+s zmRZuVNE>KpO)FF90yWP~{RZ2&I(bis{lGo`TYo@hHa0eNd5iI#Tn=rq}L zUv(DPZLi>BMBBr>jTPq9U)1HYv zx1ZAJr*cm3B;$MAjQP%0Q#NcR3y2TbwSO z3&Y%XV$1#2^sS8!pH%1cBZ0=edXJ1dbd~Wo^>4cN_#Hj-?Ms<648@?C!Fcynh1J~_ zNYg4D@X|L3Q3xnz*6foi;4N&QEkB?fs=$|2X?s|tl_HRnV4?jnj zvOs9futBwn9mdU$m&`Xhq1Mg~H!XT#X4qbAzL77C&i12V%Z=30qm102uczyiT9ai= zjOcu6Io4hAMgD3zc5S!7SHFx ze{Uk99~ud_#78JSl7$hOy>ViaErw2(;mbTbw0Jd13YcpJ$2v3o&hW*|w?HRZ5E>>< z#AK5~G-{S1`Arrw{!GB~UW3pqYaq6)a)thUQ*3BqiK_*pB|}>?JhPQ!&5V{9=oAD+ z$Kja0H3q(sv(fLtIQ$zOruO^J=$)_5DQmmqs+}`@Ev!(cr^Mmay`?J0|D^2hrU=<+ ziO=7h5i0M26PlB;JS`2=Cq`hLXICuu?SK+rD@+@tjpQOLG?iIlZCgt`i5V?9E;GSm zxj8hySs_T%9d#ZO-kGK&I(ADIzKDO6^ZY=xvfq#;U#tc-+^gp7=o zuZ%>=$VhfJN%XtF$Nl?!&gXp2`}KM~U;mu@O#vxA)$#VOHT1TwMrFSfcvIAdgR45> zc)Jn(t1rOuY9LN+7vugwE&N)gi6is0A(Op9D0w7C*GXl}`>F!HOexnLuSWK1Kj!$k zkcpiyGSj11n2PBN_W9pUMC|m&p9XC#lGVW8Tk5#n5h;vT5yO0xDw408;j!x`BfjiDl|(a6rZa-|tw% zJ3k%$wfD07TDjyA-%5e+>&VpVG)0GX(YpF(det<6=KIcM>)x!vrUGNY&j2r7*9e<+ zL{Jn-@ZDxRbaw4$2YO|YsoX;0 z>=Y{$U($m@oH^vKC&A{iCo?+XNMH5$&{Wit{qHkmdF~-qKiEhfrLUNm$xHOK=!e6x z`Z)eV6I08h1XWithQI*Nj=N){RUPJNrm(hHfu=qzAiwslG$Qc?jrn0u%UfKShW~mz zQ@4dohan`EVw^~i7P1HFqL;TiGOP5lY27+Zyr#}Puk2vyM|A0pg@KeedQ$WBWR|Oa z17}oxVRKgpW@m97c}0v)L4@p8CNSLPhD~pL5j1ug z?ktPJf~#4WxOEA_z7KyU*uI(5bUnSXHBi4ddm}BT5xAQ_Z1bJscYI zr$B$@Y=oco#XN&>7(enw=Eh!dd}0EpGHt{-h~eB%7Xy?N1$R#^C_Cz4u%8J=jJCqx z;bT!AwHk?U63|*P3RcP^@MypQR8%@)Y?T?(pBQ5EdQIF)RYmULY~gj3AvEmFVRP6N zNm>%jRJFt0a04vbYKuv2MwmTW3BI${;hv;~xjEm3*;?|b9P?d>?J~vq4GV;!UdoXD zE{oOomC!ao42@4($d6J+hN=mkhS*|SvNO)~?g`7?#uylyT{ zt6@vH7T!El#V6O*Li#KfG{xCq`*mGdCJ2ZPZ9r4%Fm~`ESZK*~*6@4}yMJXI%UF38 zy`K+8*&XS;?$g7-6cL(sFBfDCR4}?)9sBL|;eB!r03|@$zx=HX*_&~hZ1K1p_Qqlc zyI5Jx>`$b!$cz_w@h%M0UaP|{z!)#X)v;?_oUqeg1#iq{F=?_`-B_bX&mvpbF{@^{_;mKlUsAh48tZLa_AR8UB%; z;W|e=C=Nu#xSX%>vK@^s>iSSN1#738}&ZDamLvQ+C{3^Gee9@b!FUCA18bo zs0z1{itr6q!R=!TxY)-OcCSrw@sKU#)2G2PdmwVUU2*c8A*Rn$N8{9g!nYz740DPsk`R2AVW=D7aH2o(c$us>K2Ue%Vkl5B^v6cYr_lIkbO6!Yy=ajSD*T@4qrpfQ zd!C44`AQ3J9SU%-mVtN0X2J271N=@ap~8PCy6)uTPswR4kQ>U{hHYb8C+}b*R~=)o zw=QL|KR;sByHdPx2*MEQya)T~;mYl$g6ni`_$yjsg;@&1j+(KG*ad9hp7M(dI5EYday0;_9L@&5+?NM3Dbu~LVWo|XsD<% z*XS2W9iPZX&rP5OKby(K@F%7HHR9itmHEWI4RkGJ3I$b8VCU#G%2hIPtIz{Bql$&P zzTS}Zk!8coOVM9x7}FW2M$7LOlkwLVlqP4&--avlOV`(v*6VL?CG)Y!LKLosmV4k4jy9tLHvU`LMJWodE7l%zRtw~acwlxfVV zu~c5tO4i!nscm#EC5%*}QLfTVZ100?yF6jLzbDjkp5czAEbYyILEpng+~)0bnzGe| z3f?8NsN7l>vp<^D*FB|?Bks_SD1B1i_yiA&hhf;>6k%k`Sm?xG#k53O>hRGOWr?lcqm2r0RQS^yNb}dw#1Y^?9mAvE4UW{(d=fU3iWiN>XMTN;6R2<_z(} zWWl4-0Z+b0A?5M}ls=VXLn_nRcf|!Pp?fn^ebbj>UbV5)FDzNcCs`JiQGuBb6LG`O z6UU#LV9APD;Yp?rf_GU!bf7;5KMO)pTN2z9E}>|!6>}=eWPQ^-Sm>Y{jCi>N2UCOa zWzGORxibV4A`Ia@D@mB^VhYcn=5TBRIxPZl`9~NIbE#kETtGrUdFEi;fk?y4P|1FZ z#G-A`K2La;Vu}AA=-|(sg~GB}HJtA+W!g(cC~tfOqX{PL$-5NhCG#J9I_4Z3lX{39 z*%HOhD)eLDdtH+HTNq+{nPTb=HT-)NE8OlfLZ7pPpe}n4A&qbG9_+BiF;Nxd(gwQIY!0PNU4j zN9k;70ezL9MuL=Kcch)6aPel6-POQyrrgI))d?7~#|n!PClnX=ME~DcA>Ww5+T&JJ z%)3h3JGhbBWFOOKrPmZaqK#tCHxci5nvPEyOrr*=vANknc-r8E@LijPQxAOLZS)iD5mZ(j*GsAHa^LZO8o|Lr}20Kp0fD z7{`sY*~sUMSk$NhQomG9OC067o~IVy8e`6N28((3)lTZP-$}DwOvtRyZzg^#W~ql) zfDbMdVlOShvjahF#8*{%!)@v8*s&y(9UfVw%Dc}Vr;i7w(Ub+Lq*a(i zSCY1}9g<|+sXi+B)Mudj?PF$s{tyLF4Yig(q*uweXky+cS|6#vU&*x7zPvk>`QSO3 z2Q*P?R3;7VPGYBT`M}6_mmnu3quk;cb6b9r%H}Eaz^R7(*E9>B(o2ymedwSm`(BYu zt`aW_ROOTEztg?_t0;A$BeOU?32_mbLcE?2N`}v5K{GSxZmWp5j5p;fYYn(p=1Z!5 zlS3n??4{qQmH78r67Cz`NtL=|>1@#?_Ov1hDjA!Ef5sy*Zg>x7Ry2|vEPLi%)DJ91#arX@pzQE`nExR(DY?p^Kg~O{bcG?y}ImQE)qwnM6X9vok zx}Q#8_(hTb|Nc)y2Nj%G^YU_3Ql~iK-$!jclRsoIAPob}>cWCAhMeb+zn%;*@qkI(&Dm=c2J)M0K2PQAX zPrv@K^h^-ctnJWCVG#!0OM`F8P0YnwCeb^}QihA^W)A4Y3N2d3SFaDv67A3aLqYmel&{faR5_U0c@JW@XBe|y{W|f& zrv~K%mq4|j4L-~iq0}*0=wD@u#*67lF@FK`TPkeQL~mxYXgm9wzmNG^j$|u67O~pI z;cQBwI=fO*gm>jjK!JTR@3J*4C#DEDdg&uZDHZOIMa;V@ky&`%W4wASg+|$t@_c2= z9TrdTu7=TL+xyJ?{&$R?whw*tfz}oYbdE0-6kiy^*D@1UZ2~*dC7}zS_E3n^P0D7=bx8%dZO!?FwVqQmb{B(2^Ip5ks7P~W9`Ox*K zi@6}o2)l^9PB~icSwWJrHVUs&;}e@CJaw-zA3M&1Z?MwklJO$`Y=#2=_^y*G;&0P7 z&{ZnD94$?D_jlF<)Ay$(=Vl^F6CH_=GTJ z?q4V2`_8Cx`@MyvHZzlbtecOw*Y^tne#;QJzJ|R%eu2s^Sny3PJ$auSCS2j8F`t#u zpKCuh;@Ub!ydcbsJ4UN;8s(SgSH`+nq+P)*IQb(&^Y?l`afa8vvCyQ+7HpgaVs? zQ%t%Bk7+jMU$YH)_dN^V`pJlo6>0G~R?2+glfU$4&=}IwH)FjD=i!@oqTqeV6A77l z_;15$HYA{yp4e&e%wR+Ap7-p^Lr?qGekd$BiK ze^DSTh1H_bhzobem06ky^ve)-TlRrV=|;TyBgekZeT)!abtbM=XE*Q&wwgEKw8euB zduGr4|NDY&pDpmxT807B9Pqo&3?*;kgoejP*!?~g@fjBE2aRUt!Dm=#g)2oblt?va zPo3}5$@F<7MO^vAHd-39uES@LRyGLwk1Vh>F;2LX?}R;5a^RRWiJi1_rfAQrBz$~9 zJ~waBpgor;efLY6x}bql5>F9XD^cB?ajeR31%AYu!QEt&ATjR^kGHZ+X=gplm$^#G z!Y^w3&`m?Hchc$A9rWE+k*`ktN!7A%skxw(8kJ74nt+p7bHN1_wb|1CF%>`i1+(ng z%Sm)uiH}*Y!Z%*jT!rtsr@~8pq#3sF5LxckvhMgq{mm<8S(sHTD;g_o!6+f zP)A-P(<)BD-8*$cw(L)!Zai6J>F{f(O}Xq@Q||admv1VNaIGjE9+z*#Qyz)7N8zXEUu0~vr6;$QxOue&@4RHs+h-W?<8p?4UXCFT$odF3GYb8B@q% ztuh~4C7k(tuAlQrXead^)p6%>QdO;?9vqJW0xdTZa81MfY`N;19IwJYY|>>KA6=Me+bZVx ztOuLk_7*M|w?PRBXjSQpwW=m4s>l`wOtwHk+!(ma-NU!OI_&-S5>}l(p7yD(q{mCP zl3ef0wAD9{Hb%%$NsSA0HY-74!vK5=F~Qz|Wa0g4CzPA{Kqp3)`EUHi7OlQYYKOY$ zVw5a5h*su@e#rCQ3SBfa=K+ac>5!e=AXYanKAQ<&8-sg;zziSP-ahoPTvqXd66x4X?v2mpQ^cLD?xj3Q`FUHc^@ZZzFp6h?;L5& zE%dbc$&>0l?34yCdwGG5{_Wj?v!ot4`nDaHmF{Dvsuu72Ld@S*tgzS&TgC&$K6q7K-O5y1W7`q;ldUeGeKgkga@ z)~R;j)@emr{`DM93pL;Y8YbK{)r`+A(dO4m<@u#uSyFD>%kqAg!AaB``HDSo;_*V^ zX1D|&r}u@0ODfW}{vfgPIQw=uo7~&FNvD^HFA{yAW5H2$zJD%z-{TcbUxpwsv=<^} zZ4f*mUYK;-2nnAj)7*}ZKYPhc>TTF@z zlT0qQy3Yb%%WzN42(`ELP~Dj<)Gg3Lo|hG(d(VQ#xRdbs?7+O*+SsMTPGqtwoH~aj z(BQFJ)X_bHJ@YKZVb@_;u4af6xnjIppCR=1G{$L13!H7WOY7Znl_&`U#cq|2{(JF$Rk>3(AqCbtkr%zzE;^H);?a)&+mcRLyB>|^EOlY z_=JA$HsY*RmlwoJc)=?Z&XO$n&1Dkq{Lp|8j%cTKIhWXsf;f1LF~KuQmN3_0I3kRc znc~!J+I3H=d3RI(;i?`Vt!K>VRoL(;r;PclQ-<7En#1M$bou#vGw5FT1-SBFFh7ze z%;`NHLuZd?yUm-ZSC%bbx!j5`9xmZacN+8c8CHBT-7<89wO52nsLn!mS?zaeF|P;QwL}Ory&2Q|v)smMQQF<%axjtaKd>10LvU&Ibh< z^5)A1TrEL|mwBA0-`SU$$%d`aR(FD3L!@AvCqeplU)bE($`piy)L&l2xA__I)mc*h z2r}i1mKgHyDf;~1$WE$E9#0<|4A{=5Aqcu?jAJcHLcFX#6naib1ML_vmBP@AhfZ&fS1z$IB7 zcyCSGhZ^*et!nA2W5^9b?efkDCgpEa5??KRZn1G$KKF~C< zMBzhaygZO1csXgK_PimkxZC5plYrd6>99O}0!gyBap%Q(eE78wpC8RZ{QU7aWH$5@9zQ|!tdLl?2{-^Q|SHFgAhr9%Crh=pl7)x^Qij0cH%f!H2#JuvyoPtqjqnnAyk3`0jrc z@aZL)?)pYuZ)?eO)ffs_?#*iIR^!Yo3#5Fo#+=KWgnq|NVO;Ho*d4E-7x0UjpQ<90 zPz^pn(B--g`n-CqDgQpnfFFL`L=RSLkcQE1?7G?uJyeb0lDyp%pRfaCo>9X-lNp` z@QKYN?Fx3rFiah%1zk@)oZq7jb(b*VhMy|J-{@n3l;`KOaagc@ zI+E{J;^JU+*8J6$DTS)Dy3F_JLN-3CdSSSW)HhtE5VBYw=~p9#(Ox>3x=w-~CVepY zo;%9q#$ZK71dgcYVv6iu7@vxT_`k(4cJ;;biJlN2HiG!4CL}33NIAJth#n}tcc~cO z^X?%ve5hdufQoH|DS|7>(A>RoEh%fXt_JV6O%&?kQlP)MH96nZPDfjMmX2yjPHg!m&8v z;A;&8o>oHUU_ zj!&#`T2>QJUaBGAQ5zG_JHT6i3G}uegMV2&`b_?dgYCDmz4{Dz^g>KMV1RoAl@T*Q z87&=B|5+0&2=}!x@U{d2#YT8mHwg##Rzt?jkBQ56F!$|eSZKyY*0FgJ^X|NfUE93i zIZPiGD>d0=RJBLv6^Oe-+VKha$=h5u!!BlfI zhVchw@K+v)wYpZ=@JxaxqZLA^vjwWo8slclI5a)!!T#MFN+}o1DY5Gg-MMmwlDq%W zzo!={Hq(S|W=k1Gbq-GaHN{dtF-+d43)kz6QL(@ZQLj=UjxJy~dSsJt_6yQzy+PkM zo}j;5yC{EG6D^tMMLjELus@4qF|pPJf7gle)n$?J)58#D(mnJ!nv2+rd)as0bh2I6 zNcMIwN&DMjN*mEilyi|jd3cb~Z%=mN>mpgz7Kt8% z5aN3shr{F8mc@g~tuc?RV=9=j-Wa0-UGSl9EMDiVK!39|9Ba*I8&h=oh!9e*1DywA)V_=r)Xtlu`kp#&T~ouwnd%7Mse=AdnlO;|iy4Ql z;I5*GGHp3nPPE5;Yh}#H(?iEvE5vrGqepV2@V-I=OItLt)I<*7w3MN?PX%RV7N||N zL}a1_)4!-;;0ztC{H%iO5jxPguZ9h$RiNsoiLkA!1)mYRcsj7^D8^A&6&&=Jfvkrb?o3=E{82E#nHCM;y#W*- z=t9MK4*d7sh4)%@CZ05n^%00gZEb|m5rL?7eT;2X!r|E+u$Ki_cr#&04I~-;VYO2o9o4L!LKT+I@(5A)L*?~%p)pH5`4J!@G>RU-2`^$KlwR$x`680D23NR3v;;};MN z=P2T5?|;HxJ!NF8EV>2)2vqq=!?44o*p7!@fZig(cSZm=^ zp#oOR1PZ4Nl!1CN7WY-a;uT}zTyg-H3iVj@!oF;craFth5yIZQmSu)Pd03rmj(3@I zxX`MNdId4=)g%i;HY&hpstjs}slw-t1c^Pou;5cA{Dw+cq*#MpRkvb0*F8j<5H8IN zbDW7#z=L<6gbmL{XuC36sIF3j`9K9szF-LICx+E^LP=sM#EZ*uC*>~wc$MK!a~gsJ z2jS=@9Zd9;=ItyQ#F{8z=;JlQ!C7K_->rq@8_F0~V+>({IsS#&;@6)Z_)#+ja>FMf zV3$G*dS20F&m?|z;DdJqK3=Ga_LaR^>+4IH<2R>+E-xCeEOq9-eh&B>64be|zgod}$ zHFov`#rHyfst7wiDB^ypGFp!-;cK!wEbV2{JSs#OaaRQkIyI0&B0P~%fOnfdUKH8j z@(Cjh)RAV)8$-PDHoznmF?9S?(Dg+YF9NiYyH5$@(^d*;&05%SNE|Wn_q=Jv{YytsfYBp zJrK=fvDwy?c`lmE{^{njf%mgm>7F#Ue#=JI({nEyr4!3G?=xkQQ+(0gYJwvpHL&SW zoN)WQ9Y$XWLxPGOv+P^Rb{-Z{*;-D+KD9H2xEiKy*vb5KXOZ$tBP!pvhl#}1cyxRy z4loPINdkmli~8Z@!c6p8@fU4Jj<73}Qz-gL6*cg5il19e8wxklo4XfiSVRQrY!=wW z?nB6*I1?K@4B+CrMhM+J9)mY)vJEQDuyT)M&NJfa&#-24w);r~9!t1xzB13NxkqQJ zrcl|*KsIU1S(pns_&&`YJ@+OF*Nw+vHOYMUmhoTb-JME`zm}`at?~4U*cX)57zOlgB^K# zhHQN8_>Nz4{LQFu?EZmpcCn8yvlmTac^WFr-f0kQ|D_5CHO9d?cP?}MSkI=Km{9Q^ zZ&EUyM?O(GG;ewX&Hu8I{ymSR#v^-3a2QB)ejaA)*MCLZi2?A~m?ume?2Z&oZMMRv zn+^J0MV>R2cx9OqkLk{*V0TqoQZ#9ErxUy1UbTtw?gX$LUcCFtdF z-0EsZ>`*!S)$)R7Zxr!qDbH!AbCDYDssP>(t$BSFjmv4L?d=-{NWWQ9U~3 z-HOvE+%QUOtH6Ja0*UgFR=10p)Gnl!pw+Z2elVqdR;8i}ClaQqQ-zm==D7Eyg`L(U zKjsGeoWBL*9-HHGkIjOftUJ7}#3SRlEW3P5M6I7pXpWvE9XL|U9&B-@k1Mq)r{8VX zd#xsA#GGSQh4O3~3x(xkTO?J)2^mtZ{bINP2fj|i;R8x+Q)xQezblGO^2}pVAN$jx zRd1P_f;k&wsLW!;#fZu#j0v{FXb*EVS}zd9#Ts}u!3Zz+kHe~=VK6px8*uK>b--VfI6cS*! zd?zZ(&f%FziG5zF%6_08IwM}fV)71r3Fq)KvBvPrx(KyS5>j)`pzSsgox2L46!Qpi zPmS2!qEzU#BxVlFjN<<8UO+c}5=-fFSM z=E2yUX^!!a(}n&MC!$CAcgW7jWZ|PWQHkDD(wXvw_I#A#YZvSAJp&ZD`Q6WC9(0%V z=S-w|rGd=gR6a!A_E^3;MKDfSiY@kfY}&gB=2PiU`wV5ls{aK=(Z3R9aq~JnwiZh2{j3%hMFnyO>1ntG6=$bqP4S_o$#!u>s2zAF|(94w3($8rra-nVw#| zMSa}bDgCMf|18r^k>769Ca33AuhvBOk7v@R4T((gnGd3M^Mn&g@$h+nm|2#eBzGBQ z{=wFeo2gsyZ87q^YVCKLkkLvl>y`MhN)_%l{2RqCjiN;#d$5S4QE1dF5LjXWj(JXJ zB}+EY?|CBr>ADI3wqKu{@aJUNnL}RUJ+x?)GH*#R;E3y>!Jeb(dF3Ql(CG{RuzX<& zAB+0QPOP=flYDkQqc3aq_`1h>e8-*#G*7mSzU!2dYk(3Ty;{N>@7L2RHxJqo6TxN{ zt;A@TEMeg8VaQT!#PMr(WPW@<)wy+$v9A(eRNX<_4V3t}qra%J>o*gc&- z5UqSF&nld!qfc_KaCf0Ms?Y9(qx?2j+q{Wdr+*{Q2RBLQUmLy5(&m%z%Ws&6IVBSbPs7lw;Vdq!Q*fS&M?7 zdeCWk5qakD}S#>#He)$Wj{<%Xc(HqFnIFiYv zZ9&os!p+y2!pmV3QT1G%J#FqmSF79TcgGuQby4HRB2BJzL(G#~#N3T5@h{`Q5nowH zGjAHwWBs4#wRSvI^!ExCj&Z1YpUL`L$J4{J?X+mH3is-1#AgpTx!648j9@#c2`^OK=v}ug&-|ss#n<(C-7HnU zZ=5FYZ)v~-6;=2u9Xa0hLxVS$De}XGwbW5|ff*cFjbE4U3VxBT*w(E{C7<5Wu22)+ z{obB0lGoPdE~hrG%_@U^^;wQ&u%5cu&L6y)w<4Z zEvTgKR&yTL-jg5pHRZonm~ff({rP_*jCqERF?V$|=V(&nJF{}=cla*0YC;I&(sl^b zZ;i*e*||)&-#$uB(dS-H*1R8ja06z;>-!w3j z$I`m(*3A9ueB3Qe7QXpSz)`In+;KR?W*OAdmc?3pN}M5oHqx9MY;)qWa+bX4x0n}u zsPg5LP80Lq%}l)4pyJSA>Ip7DVwAJzjSvU7yIpx$sYo(!+RD6Cx91#{t; z3D+v=^u~QudTs`NAAf?Sjr)r9%rx|V5RB2+by0U|nb7~D4I+;&L}g0>48Cr}p3IlH z{$m1*?pw@uek@|qmb;kj6fZXV^j|EF{gioD z=09p`k>kc!BIv(=7R+Jzc(fTD6@rFrg|W;p)+qi;^;2|twzWRLzQULbE+)KBv>Bh% zWyl|9>+-Z~Dm*htm6uIupar(Y?1%3HTsU@92;Kh%{c^_BflYc`Tw%(mh8XcbZ!Gz( z>pFb(MKyjPOOK~Mknq^eBL4ZZI=7k8LhkPxnQUzg3N#)G8(qKR#l#7;q*aHHu{YyA z@0jwhBlP%sCkgja(BUq}jrhbnBL4iTK7aF7lPfQ}O$+~BU><+hVRZ0EL0hgH8G+Um zEA_Z_N2IymWzXGq{QrN73^~`9@Jn5K{NgliuAd>{4~#VU-<8+Nd)jg4S)YzI;xode zqla<7P=!Wc>LiP~ru_32EB?ycod0@Y$*≷LWz?{9=R-->_Dj4_czhFP5F4eLuFc zc`{33QMyS;tet}*-xBtz@hUz3Y{5rQw&epoZ1@}fUbl-Ip5#vT@79P`EyGP(WQOp|h1vYT$a^|As$F8TyvI)aBBP@FyMyLRpGkc@QejSu7>m*%?2l8?DF^Vd6pOMiyo z#y<;ec@ZfLd2NA9Ro+vU;ylLmDjA}I53 zAJdI;pC~t7WwL`}Jb((pQh)YSHGM z^9}j#-J1N?R1JPJU5yV~=taq1H*qRr2%;b63A?wff@$e7)>Pe2moHiJ*PHbD>l8yi z-O`x92-oHM?Gk?ZjSlypro;W3^|`-)ISmjEW39V_F@0r)aANfdY+q+W+)|&rZ?xt9 zb;kVYZgZZeEM*o-#h1k}EwgZ(I9Vi2Ys`c8Z3kMO zX2O+sTk(y)68_K7oFD&X!pF7faIHONJZhu?KX_4#hXm;I$Nud!d;KP+cp?VX|F#QX z2W`XDn!jxQJ2k%curXgNo1?+mSCepSx=Dx*-{N+P^uD95PyKl1KAs!Mg?AGG<9%%7r^VIq2 zZJVg!`9!v`Sjx@CTZF$~ry_QU9$Vm(Ko#>0`JyqlT;0T++v!>H8UuUYrfJMOi$(lz zqc(q>*G{T$tjTL+BaH73$5)3`!Flyi?90x^%$+ZpqR&Gzi!|cHdRR*{*OJdxwBeV% z4f)7)9sc%>EWcK>j^dvTXWO?k7+f=gnnIi~;-@*7r4zagyGa#f=!JE zO!Lj~a(ED2=8VNiBiW!QC-K^dA?Ae* z6vK=$Yx`p1M70QluMI9&ZN->NJ}gBvfVywDkn!FwS~Xaa&zh*p_kL@oRa+uSMs+2- z{%`~4A23CZk_5d+CJ7%JtzgSe>RbT|WDkG{tU6YPmg5M5G8G6^G$}6kTUrPVXCU@1E!B8SOnt(jZA0 z=X+l_5|W6DA7vED&d5kAX-Q>OWMw9lSt>Fj*<`QmY$B2MfBrAdi*r8bKKHn;?|OCg zy6G?jMJ{y;_eMcmnjm{P1y?Osvg#erNiErmA9pq7f!cEZ<$wuasPy62s?E8_MJe}EdXQlv z8Nalqm?qz`WYcyG$L<|DLbux!@NFq)h8jmH|G6>$1qHupE9L2eoWB}w!K0$gxU@vd zH>b*Ye2faecZ`!{#BUfSdZ1=~ics6b1%<~-as6d)DtoWNN9{4;LnayX4TFsNm<85+ ztHgx+#2E8vRT=N~s)kQB0VI9iTp>N!4D|Ea|E3eH1h< z5nfrz;PuW68w+N_Ylc5Q`$Zz^#X)5zvuBl;`m&^xdhEW+A)Hw=5<52z$JHqTNY!$H zgPR@IOi($QiVh+U!}ENYsJYioC4*Yu-!r{n@--(i?MyeH1B4u!At`2ZNdNx|qvr7&+e0lnG^ESs_chxRYP&dL3- z#K0Rmk-8Z7Lk|lvpI=%mf8uXSdLZLutEN&u^$ z{S@oEuSKu*y)d|o0=%OFXL`smJ4UbC7UsSuo-+_8e(Rc1gkE`3wCoYP%m>vLa$hCUv9%@{*_T|d>QpU z+dz4(PigJxuM|3`fsU#Vq)U-*Y{>AXJO-G2q+LtS7y*cv?#P8awr3#^$t5@$~? zW{G?D(P@c<4_{}>BX67Y#&R2;blrlFe{9OLdg}9eJ66#MoAGRa@JRG2ut%-ILgCCW z2b7g8#+IgsY3%tHZIknn-e!DHot!&D#D7ewCee`+HaU78vZmM} ze8~b~?j0Na<129O+C8SC`-I$o%J@^|8g^Fd^WJ76eqHHh4Adpu$xDl8PT5RzrQ2ED z^%S^&bwE;5s<5ik92zNek)6JU6-8{KovG^lYMlYMIi%0WJW!6^FX0~r34eL!BN>T; zsGFBQ`SqE zN{=^ey-$@kUFi7sCX6rk#k-~ExL#n4faFvmOU)cRn#?d!lQ7`UVYsR0vPn@xX=7tC z*}U0G`?QN`Uo zELi!|29}ZVnN3+##p-I@*|Oj|+=v{E)q@rIogzcqMKRh>&llbvH$%SC3;Z;9#k!wE zp`Q`~$5DqcSVhb}t#V|O-srHj!dLi|pMfdf0kBOqMP;QFe&-Ak@-9Od>ZZWhEE5!^ zbwNr}0Bmyo@w$C3>eKTPo3jlT9n$e<@HA8!jmLf4AiO(jir?3apra{){^%LP##Aw~ zgN^a$h6i4F3)uEO9P{qXMW}osruoN0TYEUNx(>jqGrmZ?2;A#sfqt(Ia3stW^SWgV zkD6upStds7A2}|6vWH__A6&gUAJyYlq1p03Sat4=%u_x{I&2M{6b*bHpaHi75(Muu zM3<$Lg$)kM`B!LSMSv#q)Xgx%+yg)Mr9j1c3$6rbqWRlQwC)W7FR_LBLK#jU7b8GR zjKiuDJRLAXI5Jg=$*1(;+fg6#k$R|5X8U@dLkLQKjxFC?P;YPx+tX{Y;A#X~oNZB& zD#9#<7~fm;5UUm`ObL=;%{2wix0vGNpg=_2IE%Y6Axz^+0ULb#2&>nqVY^uz^XYXB zgC_)`?4bgq`sm~9a6KIC94k!qGsM3l1zhAVNb}nSpU1OV%0geV4T+=O=8Gt_-#R)v ztQV;qOk$EjhfqDq9n-1|@pZNcZeL@C;X@TjaIu8#_ApdLcV-*i{ix~5VY2OTl~S+P zDz)}61v;N3ZwpJR=wZp4)y5$y*$P_ky}yxaXVsB=%VC;Xe}|^3t|IZ(r!4L16GU2c!KUYC@Uqs!{gq3EZU>B^ zSZ#%pJADxH{wjFmGUnRXm!fmlkwL&(nm+RYecNeAr)PF%#ZQy4r@{gS>lIk@K!oER zR|pe>#HckfhB(C>U!9lX!hBs87POI>Rf$RMhnS*oI8l!|N$mFhbI5x!299bN#vn2&;VUraPM zfcAVnv@a9E{GK6p{+J@XS}w<~!BTWOpp7qc?a@Bo4gWO+z_0Tn*v&}9!?Zkjh9_cc zaChV?ajJe_g4_*aSUD^3cg!;3o|1nPLJzn7ETG-e73MeRqbYeADwhvJ z-P$08B?}17wnX6!bFBJgfXOoD-VaK#VZ{JQK)1gFVf%a)R6z{~qx4Xd=!j*mHgHhx z=kSMam`HMb>tTediH7){Yk=|eRmiPS!JBAJMB3V6;(sZEJ~M&mMLkRk)5iCnA}o0& z!DdaV5+5C~C{G`<0Da(v7-lkET%WBAr~m%{zf*nqeNx4)_7oxZoK%?`l$?516My_v zaIe$|w{|)q@P!kcdz&Gm$_5gdBjkT|5xQ6lp8hHbKc#}fi7E&#PZ55HYC^Z!9Fr!9 zvF^1VULG_?>CA4}SdodD4Yg>_U5PaPfmpXu8?VzfP&HQ%J`?^50fj$>)cjZ>rn*fy z`{R%B)Y}4H{mkHaBM6e?=izfrmxY>)VS`uqVLq?!;oFTc9Q83o<70E&BoPe8=%Y|) zm|!av5&GhX3fQtg{mydWGYMFlg~=NTC(NuH?Wvn16k~# zGcdj6kGD#%8G9s7-OnxC|=#Iz`;sWmT1?5$<1_Fzs?a% zW2z?G^Cbh;31(<4QiaJ7eS{woLFH>FgbXyk%OvvKIZ z=L9^a-$v`%eTW*Eg-6Fcuy3XTN>6B@TZ#cVlVSMM1ww<58AcR}@jF3^k4JQI#lsB8 z3p%27Qcv`FJrPlMAsAEQg1dpnSXLp0-evwr;?9%xrsLnYe)r$=exWy&{Uc?W$gD3RjDzGR^*oFLaT!!K6} z&Mq@R&0R6d?QOB6mn{xeIw1XKN8Fy^fPY#N$j1B@LgEaes$~S$VhriFP+{l`3;a;l zFZ7ogs$!J5{H2T94W?*5XO6>{T(IYxJ0|tDK*ShjZzijv+bCPH8NUb$kVlFwR(Ilys~1c6mX2=tU8+|d@&xwdeh?*NNA zorUERDUPMj$R^od$^*_ISI2am@#LgD5eO^W6Uj&B_GRTm;TFR^DpjTDO-}6ATnoS zO@GYlVv5#jW;ir=sgPoz+^csCdS*GY@*TCzC*6>oUJ3M0;5-- z6t3XT^;&%8o0~LmRS3;78NqB5kD?@D9X8JIjd%YR2~DD&7%o?3hBppj#?T(@SB(bU zT)K;v+-oAq5i?%b?++bINhkAykIbvLHmkd_2QJaMu!`-9@7ha*b@RNiO6?Thj5v>j zV?SV*wgZbe*2dy$4pUr$Iah!8iGsVnW$!FwS@ir7%+f8G)t=X4LH~T9HDr~rcDFLe z|A}K)LT|C|K33FuXAoKEj-m0NS5o`yctm9aQ~~i3^@nShk4kU27s+YX$kME@F0HDw%u2A}TX{Oo8LB zk*A@8rkFg$S@qs1H%%8@bbF$HZav2BQm52A2k7VqO}=)>DcZL8HnSGrVnu(qGv_7? z5^VRAQ(!WsSIH>Y;0fYBdSi?A5@Gu=e@r=*huO=vGV^~kXx659>Z84NVzh$)C(~%w>cc6!w4J>zB0la>7#+!e$gxa~*xF%1<#Q|EZ<)w&bd^0Awzd0p@ zo?+Lgb|ICmMnn@DSn)bldMX@eF>bo7cU>ZOb@#?8gE_)`9cP4hoq#y|c?HvPfu@GE#a`aa@%ZiTaGAY|nR&~Z1yiXW4N zn8A98wCs+o@|`&O`2p&`I`LFR?2_JjT)iKOk%bai zxW6CtRKP{TRfy#e|(@K7T!arA1%$zlp z6?%t-9)FAmWlpLzGDCK1hOj-LJ8Dm!L*>(Cwm%`B?!K)iC+$15c=|&sc=L)Z!(LIz zxO=2_@)&vD_anDlL#A0h4B1*P@OYXjC<2Egd%X zbv|;em=CM9;u?b`Jm_^hwa0Fv)*t4ytiyK}+hoKJdd47sU%t@dG#z7qMzClnLz=qT zi3VN>pthhovUbnp1Kt;J)tb-A9?H#)ODo(3rM z@T98)@niB@A$i^a1euIuQK?zvWhmy)pIGv9D#l#>$1~c$F^^>Lx6%ID+I(t9Q|>vW zom>wE&}FMp%xp?1)GlllKHnOE-)fy%iS_{6<@1bcS`<9*pPX;oeuw5H9HxYkWfWel z#r=nw@owRb^x$D1>Jm4ZZTd6=Cht>)mWjQQa^t3wKRQtIh~1Pl^e2t1*5aG8e$vbp zT70O*4;t*!PN$&G_e5QxUmtv^x%n@i=LBP_!*XFj{ZJIOG$wvvbB-&o1fHpokulQ&$vxJdvB(fy|%GK%B*$h?*JU1SR{OY8HGC&4&h?I8oV5Q zp0(}UND`Jt{7j9-%Yp-OJuivN?E*zm~7wmqKbF=^u2usdlG2KdY7HS@BW$S@?`@hemg zelkf|p4tr#mRWe&`~r7K$}(n+W8Levuo2linS6CGHu=?bHq;U9+!RALUa=LMeosPT zxhq=EOYkLWu27U_gL5mB5VPEXRZto$^t;76^cq5&t~t?=e%iF|$TC`aJ%&Cf-(lH; z3hVZDGpaHMAg0a~_B#@Vn2Bb1o3a95&HFI9j~PW&7t^S2my}84Fa;jlM|ry+l4Fwc z-f0^-Ds?eLs>#;+jD=~F0s~TWgx6O*5dHBu>_od+ZMS^-a_0iwKJ}Sy$iLBUQsatE zs(e9Q3#Bc)MpmLMdiHfbi+h_3yK+K~+G?T4od6~GiP-ZUE_B}VHRUa6rl(ww*AyA> zyCWt1&KL=Ib<*PNgInn6>r3=#qX|iUTT%2R2*Ke+LQPu=Qa-I>|2ikps`nqMW{3{Y zT4~0w%(3LF>}>gy)0Uh)m+(=BYJ9EJT{<=`pDviJVk1fuu_EBG(6zM&SyBz^@aH&L zUw%!M>3Y1Knenwn=6vcH8y@vY%4Lg1eAq}0{$qVRWwc(UeV6N)<$+w({=Og#Hhczl z+nX{@sd3d{8NaQq;GeJRa+zSrb1M}*ra*_=C#!LJM?=20qZYrh?E-a#EBiEI;thD5#u32!?5HD^p%$(azH|KlrT65!P z`g}m+YMOd#Gs{~z4olDP6e5y`;cUhl7Fo1|POmfOZ(8hm+saP->Qo0VPO;>_Ossgd zo;6PoGvjac^!U)Q3^KFMW!qQ;E~c#(T6McYbijfQii)H^vOlzJxFMhQ(ws+cG2v$- zZTSX?Io~~6%s1`T<~@7-qv-{MD7nCj#ajG_7^55^(wbrE-PKTcKg>jW7b*UWm{-0u z;kgag-0z_aPr7TvOCpW<*b%zC-}<8z-oBN+@J@i8X;UG5wb0< z>{IbA^894RFWxuf;=8uo)L6mwnzeb+iYjXCe34~pT}STaV0dNPgHFv9GJh*zH!ukM z<|d-~d>aB&Z?c|71$1WDHFA4UL+ZnK(Dbx%WLR0w79INxGjnAYa-9OzYy}ig7YK6> zm}98nY&_R5!kh!ySZvsgR)?W1E_N?Fw_*qLe^bQthYw|cEPlb)ay%^E{II*H1B$)$ za4Tw!{vLnEodO%|$9((Rvw#)9@NDuL-@z z`<$$q_o7-fE$R+&lNI>ErNY)nHke?Oi_vu>m}aRf$r8@em&9jur|kmmC^$)%Q=ie> zue#@%Jq++NO{o69lXyeoEAuMj5GkHB@u@yui80-7A9&2w()@SvB5 z+&x6Xn_MMa^|>LRQl`zVo&Qo{yc)M|m`>OC*|1gd{%{T4EKKg5jrWm1Sej=Gy-kqu z{gV}Z>nwAgS8vW==v#5aWhR_#<@`&D4tMXa%hMh-QlUvHvscW=#-8^CY5rUEnh;25 zE9LyKgB8E%XvXUcZTambQm%Pdm-BxzUUJx!_uD7pe*z77tVR?0y|~M|4NOFa&m&>| z<5rxWK9si3ka6vgR$OJJ6>oKxa~*wC{_3d_Kb~R6Cj^Q4$Pi=heOtue$6lj(M`~G` zP9n^#T7;-OYRn?Tp2`ku@p-K_ynJv+{`ZzKpAm1ui$<7m*;_fEGS!H`s5a$|7Y+E8 z%k`vj{}6NOpN(1DYJ{xyyRkYBY=wQb?7IfktnymQiQBwXV z+lYG}Hsm$=hsghC0lQfh1Cy#E!BjOGH7$GD=*@Lxs&C8Nog8^|7YDBG=fpd0>&V;l zthnYB5g+6$;zoW3T>Z*w8gg?n3vB6)f4RBB>zO@Jkf_5xTnHu2SvtJuDofs7$ro+s z?D+Y>j$C%moV)kX=iP#IdGn`NR4wdwWaxz0luu^XL*9GC7Vlw(o{dun!Kpnj(D0LLoBL z3NPA6!?b4xT1)pMvTg|*?mmQ;-z=xVm{MA`B$eVeSd;1se^z3&9Vd^Dg6kkZ-1n0~ zx^$V~cH9Ct!WLohhr<|kD+aN8`*0%Mh^53cRwkOyL<2+EHb)h9wlxc)>r0liE0oJ-VkkZD_qP!RDDPj33YG1vU zx^fjVSntNdV|U|Weix{WaYE(Exx&c})|mKZIGP6i#N5(WraJ8`dEfm-9@A8Kk*hZU z+g*d7>i3%(gYMAX7c!bWyC+k#9E!uDE{GnPD-5%8M*paTkgwmuCZ_GDn;k^Fko0)Z z(@LyaiMWl-fbZ(7&5g!s^Yp{jH094`c6HK9NEdma>xNt*Wm^#P&Uvw1w{aA>S;Rw) zjCkZD37@0H?58LxS15JF^}Hb$yBhHC{(5}Y&>HnNj1> z_u?tRE4~;9syfn;QVSlUVb7m!F;#j7D^7-%JiU*U-#eqkpHf%toh1CYmW=D2_(&Ro zYuJMqi_k%RpOEXa0kiCXF}r(uJb1G?@48CPU!=)+{a|xm?Q6+>ADVH^dI>*wRG)vI zYsfeMJ4vCT!`bG}<8WZ^TEX#967XmPi_N=BYhBHG%NP@`y-C6AxCNi^%7!1ZG3ED) z#QeqzF*noD=b!Garj}R3n8%cHuw1cDh#3)xgj+_;E;yO?4K(5U#~k>~?N(f6#Ll-1i>U{59oc zjP3c`BewkO2YVhZHR0hsq&#GcDo^c^NIA)Un6BLb^yzAW^|5KfSW63-c4lDee{kMg zlRE2G)2GYE{9k|tH0gjYZ_mqutwPw3>i(qr9A9i*ak1w)r z80lvO>--#{;I|pPuFS&dkSZiB{SWqCB9R-Ai9_daV@BXFcyzddqRJF}ozNdU69Zso zJPYa{ov`+X0hTVw6HbaOQQTmMu(Hwk<~$lXMd^69<0&>J8?hlFCTypR7IU+I3wf`N zm{&RxrED0ktu(=a1yZETbA;Gc3h0|#;)oD|+cOWr0k`WTaHi0d9UtjJlU3f(&fW$* z=zuam`x^1JkL0}7)RdP#)#QiXjU(#-5i6WK47Wu}-f>SBy4JV=)AvAPewA5neM}!b z&3MC18GrXd=>e4-KG4vHpX{U5--E_Hu;CN!nt6tO^je0f2abpym?P+QA$T9rXB}E{ zsAiZ2Upn1_2lkS4jU8s(B)yNI;fcL%aLhtFp z*z`D%Rq}hZ=$#{Pbh6|17ftwuMst38v>h*D)_mp`1+R#c^HZKmFWGl1wf0wKV;8$& zlu@Qo%EQs)at71r@{BgQTk)&WrhI0&oR^4A`S;P5-1CJwztt$^$Ck)={y-`Bu`Z^U zhpk!QvS0*G$`Mr0j7EMSvChX2)4YAg{PzF_PtQ{FMsGQv5pBVL*_!d+yQIA6y^L>k zR^g{V5&ie;2U z$%pJU=GS^C_^#%5@|ie<%=gQgR;nK^d={gRMTRhHqAf1ZaL13&pD<#f9+~*p(*9XS z{B?vmzvyShJ#Q)aQzaJXEleSrG@i{II~N5roiOg26n%Y`2oDV9m^;-5RV~RFsrd(Y zubpJYmvZP_O$$vQ_>X49J)mv%W2w8xY&QM#F0>Bnjkkf8K#&;M8T1B(a?t8!DJz3w~|W^*(0&TY}D6Tva)0;g)b{3`M`_xW7Jbxc;_qW zNgC+yoPjj?qZ|9KH65@1vq5~dHD)f371TeO;ZUrAwBaw&)U%ac%0ERRmkjvLnNq&+ zt&GbRmVCfsW1e*SE(urkX}9DG$}&6=wa*c2b}SV7^|VG{$#{(Tw3tnC-A8xNi23z} zrhK5(f`9*E!*9n~@XR<<-g}xJxBR$@{4FC{YT#&;FS5fV_tk=$mJ@v5F2&^CO)SCT z4cTw7;8sWETy?C1zgAu!L(KR2FKrBX^*kf~(9ww7ZISR8{ZI5jCy->LbeXDUe?*5` zprmz~@Ik4A>emTH`6jG=whcX+a*AG0)Zr`kNci)^Qr@{q$wkfjeB9Jxy49tSHM=Z8 zyVL<&_h=xZ&oW{B22(87amT*rYp}C#G&|`rin3olrZj6UKI5Vq_k$i+q}`(^16O+4 z`!Tj#`NE>e3e7jfSh^)ySYBp{v$~GhWZDb!7aT^aT{d$Z8b}AH6jR8^t(16WE497r zN{a0cEk1P zJ}?+C7Fjy;0C6mgPefzej1X8<^+WA|-bj4i6=4l>RE8K~M7bDlx`{&OR&87xppQg_ zF(xNV;PcWKS6%0!rFaE)9qxmDEP-o~0saoPLeRRY5Z;zxf#Fk>ReeQ{UL&?lszBM^a5Qw6V9#{}{Mao*(Hs#R zyG#+ze$d79N*(;VB*xJ!f8?J&3+KbZEX}QuH7z*8UZhnqow2i7^ULdS_85rG2_mSu zSt0a-1do%)2|qVWv8{^)n>1Yzk+&KiduOuS$9?JZ_c*dHk0+0B>&UszkK#<1u?3y> z;oyrN_}1!x6I~^E{We+n{@4sQQO58wABBpCPE0<#7iCR2LRPo0(y%ipY2KuNv?uu_ z?SE@dSzXOo&ZP*PxM_#F9p(_C=Lny!nc`-m8RGt?&?T=^6yO3^Y%0~3dtBGYEM zaMoT2pC5=}y-SR-87??ukO}QAtq|#1GusE|Z0iF@=CneE75!R?E@L~wS1d-!Sv@pN zSAZra3%^T^v7u0T2CH(EKM}Xu7Jv*ab2~OQhKOsRO2Xd1L+a2(%52#@rd9m{U9zXV*-IWN0U} zJD4JHjv0E`$*{dzjNqk-LgaKA_Q@33JjPh5^&PSF>0mtSnuu(^6tj8{!qJyQF{y@- z6Dx&qNQyiAjX)Q~@Lg?$Q|`?REdC*!GgXCT zj0jzlw6Mxj73b6SP(59Q)$R9%osB`l2azE{F6-c>SR3k13LJE`h6UHbN(UL%d)vb8 zjxin=igC_J9iQDafR|eM8l!<2Uv)_LCJGnV=;DD?6BE{}q0e41^cNZ+f4>;lB0E7V z)(&$N)_7|yLGbK9LfB6UjDP+S{=4{F@UGOyu!xyLl!-o!$LitD2{qU))5p%sQY;?U z35{JdkTbd(hodtvcy@mTFS3NZiw17SNuc`70)J2I!DCXQa3<6kHzpgvY=<0M*2|HT zI0-oq>+vnhkOkceXEU39S!l&$lm<>gsxs@u85?7soiTp;it*=5qF}hg01tks;L280 zR5&R)BcwN^;b-4&lS<3Nev{;#D6l=b;o<$g0vZi_KS<}ElOeO6cB3gSRDb57t`$W(i zsDsS;V}y;*)v#Mvf|PYeFq}OI2R~IPbBFBK;1o`EJOdpzi` zi~gV0alc3(FDLdF_TEuL^<`t|uC#)+xf$YThau?o0XX-Tv4b5YY{ddAc5vw>xP?Vv zb&)w_w@q*;O4%!Mx~M>s@b-x|)@UoR#9e~MDKbn~^umJdXiVN!i@$kyaowsEu4|I9 z`(f_9%f{Ju8S49KSV=dr)O$SpBsNu!bV4?pY9sJlNLuI`L$_~X^GKJP6 zXRPXLjRmU4@R}mWM3`e?jUL)`WGHi%VTraj4%n$;p?t1jn4t&X9!BW*SR2ojKA^CN zCGs{~;)1CI#>_N@**;^mE|+03SK{uWvi9YFg#I~ZnAu4It5@m5rxT_qXp|%1KCpCL zG1S(Uz+|K^ONm&=UUx5IC%=}l?O_X8?nO1$OLPQS7=X9#7Wfz=Mp$LMaAc1OW)|3? zsM7-6h_YuHHzu>kmvUKM!b;X_hi{(=>a*_>uc<~JD{98n07l7JtzGsgXTwvxMf3h9RB5ChJD?0RR2NO@L!Chl_ zoc`VsanF_u*Ee)T{je+y=<*96^vapvq!l!z^c4MDm`;j_v!pA}qdmbVXhPgnI%N4@Ogg{IxY9<751!qId+@-u=WS)lp4Q_-VJA4nM zWT?)=GSX>b))!V)pvejY%kg(}2BejqShq4(xKq;;!S;1Huc*QE=y%9)bYa`l+t|mt z3Yr+{z*l*Hmtor^1dessR(ng9dNs0Zde$_x zeHcCdGL|Nt&Z6J(4K%|!mzH#hCymVQbhd9#O3W@{gJ(D6LPmG&>6#<-Ivs##4-J{^ zBo!)rcA7M|Yw?aFw0TFNkiM+eqri)UY1;jlq`3KwDwa2p*g}gum!8G`<8G*_*dWvv z`k{atF~3@s)=YjuM?Z`B_0dg~f5DhE{$(1cu>q06x>LyZ-^7f~rpDZb2#mPt>ufE-skYb~W4ZWkCe zs!%uk3g)7$VTV7lC@=Fyj}V zvjgv}n8U2ks4v-#}M-r4BgYvqb1#E=A!EM;uf2gIGKbW2R>zv&$K*Ug*eH zX0KtIdIH-tq8>?u*WvNaF)--qj!vhn@ani2A0I{wb3U73Pp}ib`}(8oMl=+0>1a#d zhFv2ovGJfDTRQ$X0uEkA^`vKbv#1z9{QKi{jtnWA4e_~urf~0~9m=lyg6Zr+@aV^A z*LG&HhFR>^<_31nyP93=TEgy)ox!fxxUiC&NAR!1WQ6uIL6x}zC3=}c{VOTnzUYsb zEjQu+#gC1AT*~x@8BnN}Ds9_wg!SC)M0w`kG$8mI`!Usw9s7`hN!_in^{EKMgH{SN zRypAKxnx+EcVu&qYf^P>IyFi5)6Q-i$gx*AotwI!{;aN~_?*@B+T|9T5q1Z5enYTe zq!sqhS}izQ_r>pq(-?X*iLJjfpMow_(b?$R>Ls`he}Dl3Zqrc&(ZxANEep)s35^dZQ0{ zZJohL(78gxEZyc`FgFT1JA$FcjbHa6PxK?q?9+Bs&mPq zD)N4Gi*6Kdrhv`c80%JqQC^`qI50;j>plwlw`(65_0Ch5cqp_8OoJFcJ7ODsWW(}Or2 zb{@Ig9%Dh6f{9(av8r${rWPe*@9%xb=}YGjxg!=X*BsF)$r!8imkASft?_zrGP)jX z#<*z`=Ako;#dX@ues|u^?j>?|>H7@k;pNNTDK)3vp#TSs=b~XlC%mdQgE)J>z(1S8 zt}O-6he+A;Ez4L{)OGgd&R_~y-hr&GwCR{qPZQ)*sMq91HoW~0x}MvCvG=;;UZygC z6sHOC*X{9fbSBunzAS#QDJ|WJym5h zwnm_Ft_<(TWC@o7d=R|25>fAVvfQ2P>8pP|^-KIj?^b_R)=G^_@2T=hOTJL%_-oW) zl1T~U?ZP{>v1L7d7?90|N3OuH_lJbi9hFeOsZN*2R8p5AuPA1)9@o(_ z<8cqn_}SJv_^Oq_Qtv29|>oj@&jtlg4@<|r#ump)=_k_U4X9$8J z#U6V{w%!(em19RfDZq$ZbrJDNk2`by@dkYLZEfBs%anV~(dGS56;bK)Y<5dyB7|Ed z!trxSIDe&%-91}FFGV)|b!lgAGToAo-)F%qUv}fNJ7)ZFmKhJdYsI~M^!ci!)%4}* zX4dIs7}ic*CCrqBVbhVdY*6|R+TW<)AMNdV(V$LzTo(ua;fe+K(zD{CT~^%0)0E%T z)8z~Mrjw*RkNv$i4x1BJ2t{69uqn%&d47$g+oE5zafJaNam<|402AJEf(`#sXT~o% zi1~!++B|vBUy3<1kjl%fS5V>pQn2^rNBH17`@HlP{q1SS zZD!9FAQfM5kIoy$8pNp9^ufx)pxB zkxeVir$>jc(&L^d=&{L8(ws1k>>SIK-s3$w#-(A*sSrfobi`HH#lqfG)`+?_1$yCY zaJnW1Gv_to=D;9U7O{sd`?!O(T`6MaC;VAu)hA2~nT;2%Ly$Mg2RmO`pd#cyp;w(d z;@wxE>4XmZs`4D0T6I{!ZxJgjehi)Yr!Z(`AJ&-Kk>zb}!J`)iFpXZW%qyNqYBI-= z2Z_S4?e-X>x(ed$9hi1%5VP5GoY}waN~aGSQ%sIM?QTw^yRYKNU89|;TbeNO%QI-Z z=L@YfwphA7L74otBRoylV%w!*>}5u0D$Y7Xy{n&5I4;l=my`6Z(%qN>jZGQNv4nN*W#Q*y!;d5?F zxVu8k&(6{2L$v?W_|K}`_x3a@j<#X-KL)_=|u?F(iH{ z{fd$ChNV{gmbw);c9SbFQ+}n!h`&){ukTk8@4ZvO58pK8+wWba)BGe$`xB3MDsP0_ z#(%L?YEJ|1Yw>q2HoWdv2kySxn7hG*zsNS>W1q-*-=jvnhD^Emp*|mY;yfj(A7Xb$ z&dNKVxXe z6Wy)3{~=r6yWNG~h_~W1VkG>xjWO4*7V+{WN2xw3pY>~+g+s^J3bo;r5!&ex3o5)s z=jAp$G17tG=;Xj>esJVRYn-`Js};BGYQTq&7V{hD_4zLC9CCb~z@B*kM#qx`{p-HS ziPU2cBSYxTA8jtGw&1?MtoZ!Zc0A^62d-GJ;9b=9`P)(*e*E?;`ghWp!f!mslSIHK zWrgtdqB~|UScKP47ci~2`E=b}!gqYO<7%e1{LW)X?$ck+CtNY+opS$>R_6(H%eV&{ z>lTX8)(*(bh!D)nI-t7$B%C{c6;^F2%+n;Fj29X4TOkVGV5i`n+6?*1vF$W8b}7Ai z5X+*L9l(^m12OJ>2YmPzD|{Ykg}J5EFh4dGoAZz0YeFJ3N*h9#dmp8`g$L>UfDc424vcwCv^7oFDGV@beb;nZDQ?7h4KGgVHYV(MIMYT1vnr-m%)y}%BS zjAcK21ha4Ve#0Oq73m?vU_3q;gI0KB`y4552CWnhJ#s|L>2YYi_5oeXit+p0eMoL< zu~=UtcHnOp=KeLF1va~~FPGk7)VB;AH;uv{hfWBbY=~9z6yd~3C%B15LB;bS@~Wil z(3BF^|HWYHnl_hui}UEq-AZyY-#{f~iw46oq$4^MtPqSE z`oXd43|@cQ!qgTXqCs{BeDhXao^Pwmhc6WK<8$@-LoIFo#6z1OFQ}jw-rLw<?-H}XT|)iM5&)M^mu~q z5L(~sHoENUi0qGfLb+=Ux*p!gX6ye)syl7DWuYlQby>zIS1S1ayHdXVf;q3(GvNa| zO1Mg$l>Zococ2BJ#opKYK7aw`uR-%|RVHs4BQlP*le*?&6)$>4kpkhszyZo$pk?0EP{IWKCq;sxQB ze8v?CfBVjgH^!Lp$^DJ^lQR-NP47S2Ez4y;-X!9u$5uhhJ{K7S+u10IKA*ip!87Jc zx!*VwKHEdV`%SXsw?qmakz>SF*XVPtATci(evV8JjABcFO~u%PEyC>lMCAEwWQh%R zRJg&MPd1YAVR@#Ut+(LXVYd8Rww(V7GvrZM40u(A9xutwp<@`v_T3%@#l^M4vFSsx z>WqYS4M?Jfoo0Nda`#7`(lq)2GopxL-MnP)%MAv`D&`%-O!K+d443@n{g!C zw}I{c{uFXqB<3kR5D+a#|Er4y4_gKL^a;YotW~gDHUr}>mEke(!BpRUWF1CzAn(ad zZ2lTM)`xZ@%)395vuEH!o)>-{F+tkS6~f*uEBKuc$C%WkPW4C5U0uz5r<<^^?DDzgRtE|B8lm=%J~1Y2w|w*yTILzdzUj_>NI zWXBse=$i{|?G{0gVpq_P1~Hk<9>O%;%kV5&fcai?=svW z&KMXnOJ@D16Im~QNBT^}10wYKMHORyIa$g>%4J;hqb3hKH;Klj>ah03Bk<^>1;&0z z5=6aRaqH=R=(yftV?CdfcVBbv)Z2tlQ$CYYq2TW2*4*)`j9+_X#^WY`rbl5HnBKWX zxaMY!`!zX2A6a+o3fEzuW~`&Ecnj`6*Mht0Nx7R6SzYnE$8I5P%Poc}eg-kQFH@$o?hy2!9lZNUTHs-`del$@5+#|$cYt)_hw!Xw3&+iVHD=h zK(fggbZF(6uW5&s2lbI-o+R*0Q#^Uz8~r+`;N>uX>~0By_1;8m8(4%{TlV4Q%~S-G z2B3Wi$HYw|(O^3eF}o$m`Dy^Kc{7B!Dmv)uq=U%Wwy<>WgQ@wMxLSV*RgHbml9T|63UgaL0?2r3iJAdWDG-9AUuMxMaYul~&Q z@i(^B){{I-W|E%vA{zTvjY{i>u&w{)!^h7GXLifbai0X;ekTb|D=l%j$`Vf#=OHl2 zn#sLQ=#^gyCDL`;^W!;X$GjuC)PfJ~E9LheO8J&fro2#Q&Tk!)a^uxvuJYv+73=M1 z{eHzDah@{@BU1%~Y+F<`tU=bZdiKld8U2|o<#AC)T)SSM&k8l*1>20cbfpo$dQpp0 z@6D9vyPdtcz5;4t3f%3MCCr$iKr})HXU@k8M+YP>730U6)k5b06X;HNLGptP+)a#R-KUJEmf)xK z`hg~Qzo^Fd80hh~;yRkS+@0*dJi&`~o$z4v|NjGEjJEIb!hoR`m_F42CFeObCZ55E zfGqZR@ld*YVF$6y?X<;YJDnKPjnqSDvH9M4*d1&S>tGqC-8F#i&;;R?o*@oj(??0A z0%nIEim!r-a32{#vQM$dbz(0AiJ-1<8b_Pdq8y)?(-b~$?8 zH^i8AD+IqLV{|z!MeIg*^eN*oPn(F8+E~<1i^S|FGcZA?60*&IZEaHf3(N zH^-o55+s^T60(=6V~33d;mZxt?}a&hG8kMJ#Gqx(Dtyl!gLk#waMg0f)rXp}eyW3` z^TiO#&2jjW0dD?|7QB?ZU%tQyGXo8ASJxbkN^i9DW)dt;?}4yv4a$bhMewNr9M5%t zXM_$0bl1Vb2r>2^(u0LpurQ>b2Ch_ypz%@{Zud-aV{#;Ne;k6!z31>={29X08d%y_ zV36e$tj?7~r-Lb$=ZH~Us)05(NffLc3 zMdmN0>Yf{@Hm(oF#Vli6I~_!IQzs-GGD3%GVmzyk5&HEt#AADVxOEwVK?hw}#Gby? z5hrQe;u<=nTS@OF|ET)yS?Vy-f&$l@Gh4$5RJWO7O}sI*`XmSk|B9g4Y6XEO!TmlITyQ%74KWzv23Cgx(^gr9pnAo8_B_z-Oz z_Ky&XW8_euX$O`?NX@%YCV;IDxm}QcGR)Poz1FQjYEGe@#T~* zW@hUEVL+b0;@9DM!bu$ijQ2K!Pk|GbcTdH6EgklA%SLu*w*k3-GobD^PL!~CDYK}& zjyn-SSaI46qVZbr3pK>1&GEwQ0x_oA>Y#qD2?iK?;BR3D5}jHw=A<2K(6eOQ#yPUh z`s!?9S~mI^c0<2#J+v=U$CwA|xK+MN5Op*`x{4{rKhZ&knuhj=O;nuJ>G{$ld6L_d=BQYRF2<;}tAm#oh=7{j-o*aSidLXWMFg)kR zp!=I7nBB_3m+rAxlhhj?hi&oTjv+pdRN&thBlO%IFW6qyhJ}+M7Jblz(>F_GHFw9U zebbP)AQpM9gs#yOaQV}49O&8$4{upPuhbY1qs$;*DMIOs6~c;51_&4_!nfPT$nS0q zU!}i3u`&T6%U0oOXn&+l5-|B(Kjgi2#JQ<5gf$zZ#|cv;epeuPUzqS+-`!a z${OmctTD5%9+vgjgG);%y!Ug%-{(p;=gZLA!2}hL)KD~E2M*Wt5OTl_16_YuZQw?`WVt8!pUZBv}%t;jp|KU{xD$er^d3@aebM5_;aj07mvp}oe{ug zP`@QX)Olk>8>R}a8zmU|Sd7iy23YCg3c;rl;p=^wd=N2}{iB&@r(#x|ID*OFD&1X} zA37H(FzmevhAcG2jFzFo&kSR19Hxs|8&pxzaXeZ(88P|uG**+gjul&nFrm|R_Pj$X z3pam@n(}Gzdt$E4a~9a+se$Q%GleNuX3#G;!>tB$6#o~4l`gt0Wqu@!wkl%sPIyhiFNceY84e?{O(fyh#SYK243=YPt zs6EKqCt-swNtnx51&h(Gf{Ixvyhqz$l#2}ClXb9ep%_;^rwM%v^w6`rCOU4{f}yQ3 z4z2RUvcFS+KNZ+{@IFrdIEWpqmZR)sUtEF#7JGdc+}!^O_1ZF|3>_^@HE9#NF4jTQ z2oYL-D7}=91+*6`GsWL7c-DUcHqG*cWJo6rE%ZVMB`3Rl)WXs|`sh(6Muh%w!RD(P zYAtjzxO83YlNhcmWXjQfX5nzGGFLo(h{X_ zd!dJg#Uh;WUnSh0V1+r2nppW)8!B1t!o?SIRFv3Y<{B%MuT{r-6>YfK7-D{lA$IpL zgIe`(!E~$&Ue`(xSG-fWGT01_>M|Tp>514xWoC%nhmT%8*+S=hCiX32JNlQf@8z-V zH9(@EPiaeZ|TI5oQhjZ?o}qfGwLc>KrqR5>v$Ee&nzD zf$27uGoA8pY;^Ku+NUrl=hfSo#fB3&Hk0GT2zOX4SueOpbj0=ZS-7(5FMha{u(kQg zbf~D39A2i7RrG0kvL%bY>^MvN@@LYw_kG#oRfkbEIt-T{xniR%L-5@$pk|XEYgqpR z6OXQ76Vj9D;fhDpRNq1#AyS_3OOr4oDb<`*Vr=@U*NreZz5N4vqi zPqGmE&l|5M-N4gbm9V#N#N2WlW*GjNInOytBV=}bU2;1~&EK#b&1vkxlOT4{Xe|3o zs_avQ6EZ?dgvO`QFufANUQfHjeuXKh>CG^zxjK>lE=r~Gv(Au6$Rxj&9@bn;9jyGWAT>r9`W#=jHvnFp_rp0B^dE`4vjdmORkvQ!Y zxefkGnzq+yQcq1X>~#jLzBi_IE)rUP4TjmhM~Hu-O6M*$(Wfp3eBFcRG+k^=llA5^ zaZ3qPyh|XDAx~+)#IvB`P3A+_37M%&g8#Kk3>^VXilLcJe2Ulneiv9`@6N>HB($YN6p6va@mX(DMev{IE2k<26^b zHAX>4y+6hTsf?md_Gajl zzDRI8tkkTp$}DNq4T)>QkXe#}>80h+K48nHU0cWY*ZQ*TH{=5%Ykd%H-O<0`|1dM`YWxQ=0Qy;xwvL6)^ZL{?i==y&r8RxWm; z#E2fG7FNTGr^#7v%PK4y?1JW0DXzuG34OoX4&KJaT#T$=g_mYcbQIZJq{X&V&ns7w7Fymna1w;=ywI{dahtYXU5a3Q5AH3 z=tJr~`3c=>e@*JwUsKb^I;y#Qf@(+irhPw5n2llc58;yzMEs2z20W*P49&5xk-YWFb8`l z8nUDrvz2omLP6cH(DQtCKIWn!&o8&)XTKWq^ORG@j!2r%;!m)wHqK zMke{5j8lV73TJGx;hI>_f-mkPQA!onZhuVwj^3u|$)8AmLX*EA^NCz@?@++?MjGFt zj=CU|UcOq%yzY*`i@_HF(>C?-X(Q4LQEm z;(yh3`Q6Q5>Dae;8n(rSO=$)k(sv4;Ge)E4{UkQUHG|$vGvtfYE%~0WrhMJ`M%sKZ zn?BX-ps#DS`S+VLKKaZ~63YDO(uH6qojes&#p{K)sRL0u#+|LZH<0$vZ6rEs%6DCs z@>7MiRGeK(2HQ$#?H(;Y{h*v*xqg?1cj-r^(`T`@UI_?Woh+P8_QKqY4S3-1OktUO zD7N?)y}qf%Z*BcW6O*<1Py1hFu&a&Uk5lJD>P^b4^QLnn|3RfU^!}G-)P3pih@K)6IshOu8!YQLik2{6l;*&U=eU&vuY@(#4t0~v#4ynG;;v;Unp;Yl$T4Je3TT-^Mn_Vu#%qkSI zXPmI~OuUf%$^o^NGf=6MtjzDX(Ij5Pw2k(#pZi7htJR0z?$)P~3nJN%k^0Pc-+655 zmx5l&Bftm9aHSzpu+A{U#m8PyotlIn_QznIRE-U>kKmne!prT7_V3u`Ai`sT+8W0LK#WZEiZXd1Dr?kQ~gv3tztd;m>z>OjUP zwJ7@RaynHQK^IRxV8dp#}k;MyxgKY64Ar(ue3+(1S>^ z`V?J7YrB4?2QOPGB1?@=X;bB=#(klw4!6l5a4m(~EMQxmGT`mr4fdUP3vc!Z;#;%< z6LsiBIx}CBh2L9B9HPe;2a9=TvJrpgZp`!4wD>6X7Ft?;g9_VZ^vCE2K3x-_vhs+K z+Bq4LrYsiLe+9h``$*WQ!|k7&bITwrULd#Qi^p4W*(77$=&QyxRO=|JGN0CLO=BMm z;$fb8TXW#*@njm3%AgqoL)7_g-$t(mLFZz)XpC0|o+W3R{WT^IBElV&_VQitcyQR8}Z z#r(aF7C&H8O%FF#va|0N!8_`u&|`2jUgsN9;iC`qXs{*c-<f%o`o$+IR{@w7%O{@Oy$?L&0KSdaKx7PP$>A^Ifh*GjW+);{U4dA2atL{OLim^IUJoo~JLK1C^#Mb!r)1!=lh*u@PAC5ImJJ^sV9dWd=yIP!C&)WuGyBpx8sgo6 zpt)SAYVg9vp(*Ixs~HZnq>^b~tv)RpY# z$SG@*12y=j()sdOlGy!a%dD)JN7{MZdhU7$e6Z80QMttElBffZ<0bg0C#g}>hrKChP-cvD$;#}=mpwJJ8 zeryre^vOkMlb@`r`3p^PGU125|NlE)3Vz{^1?S7Gc*}P=pV?c=2iEIw?IK-X|K}cA zf7!=2KAVe-8PA1Y&t9Q9Z7{8|l=6@BEO|FWb8i34md9I~@B>bI{BVYpPi~jP?KmRA= z>Vb0J($kc?UohsHm*l+Urih>1Uqyx8N?7&iG~5k7D%?3;i~*^-R5q@ibm}a5%vw9{ zKEaybEw$s8|2p#b$E>+ij)ZsUYs|xa#e7%vF)9n#$d+r*#rn1l!in6e`1#`i`#$6{ zB_6ZklYcvKLr({OV!RW-AL_yl9IW{uKjj_l6LbHk`h1IN4%Jn~v&Wr>;daVKp~$xn zcCA!rcYlteJzuqXXrv`~|7gYkoUr5He>wA#mkNHySD!ELr^{8n}< z$ags6KieWuWw9VDbi(m&GhnpyHs<|HVZFNNQ~o0(Ub0=m!#66~6Jf*`^lhgX*B4Vz z&|J3Wc?ryp5H?M;0o{%gW*J$d%ZRZ^ZC(O{l2XjhS;nR}1dxg2QK}hvke**yO(yPE zq;}b#X^+?otA-%_bn<~#wG>WgqXk`w6nhd9VSD`;>~_sTVaC$KD;0VLcj~r#*zei-PI9moUQ_ z!>GsZc@$NbN4uiVlil#91xbN4rGdtOVFE+O}`J=vLFAu!zHf$bCGg;|EK@acUV zBQEb?@mhzeY@Go=U!luC^w;H?SH%3;QGI@0qRmAv+T5$6g4BL&V;l5xkUqI1hUF9s z;f-U_(Y+UYId2k;4pe&ezskK|XTWwW?V&P%^#eX z^4|$Ee#H6(jm=7AvldUph@nS>@3lqne&Rwd&Xzn=%bpJ_mh*4tthiCACGRv_!bJ|J%}d1R(k(*avm#XY{ln(W)#oo>D7gMoDc{)5gx@n# z@I5w`JX`5S_IMic@yGP}%3Lw;JH3)-j0;xoz;q0mn=4r7tpx5BuwNdJsE{eR&|=0< zXPfe`WWjfSvf&~}rEUis@})`!Ei=~V(9I>i#9^#)#W4J~DiZ#CJrqA`joIelBr1Ak z#-B`b;5W8d@w=aF`QX+L`~xX?-E{-rPt%yE4Qi%8WJ`rAw-I!vH-`2}7fvd(WX*pm z=%)RGWm(qIzddrk($Ajvs<7n=dG8QOr8To~)@v`qj^!xP_^5Hk3 zm9h#ABLfheFbs7AW?;8hS1kH$hSMo?glB`Cu=$`pmK)5&i0E;!ewvOBBkG~`Nz8gk zG^^}9>k^mq<=d_xfL?Q5Hb)+f3Q0KHc%4(g}%r;lj#pGJO2$f&tlI zFm07C<&Ql_#hs1$B|*Ul9J1sC=b7^3GF5(PYZA@AF@f2hj=-%^onSRufv!(u1ixA% zIQDjcG`f$J1k2Y?T)^?U+SZjIC&qXf%^Mp2rNAFvzxgz`EWP zWtYt1Ff~Cq=&peY$6etyHwpuN12A@G7~b73!Ig2g%*5Y^9m&*XbB-K`7?a@lb})XO z91NpoTLfP=#e%u3g@@WEh%fJj``MZJoiGNs8-t;DbS|b|&V$XKBbX7DjxV!@;Ydvn zSgjcVr%OYS(cKP}8%;3peY|jIt^!qi^ziqaJ>vAI;*c~42R(M5 ziN=jWFWgw!6RcJfTlyJd_Ig8nxf>(gzHNl3K}LA})*7oE!*MJ70=C{zVfhWNOr#UR z5`PV3(fexfY*{iKEBe8*(i9WdiSTs37&=?#2r&nYP;PF4kx`D=n0X8(QGTrC%{SJw z$%7ikg;Q~80&NdbReD}OmS43Ak!BW%6iLuOK@S}_B?+pwCYbTb8Y%KftlVwGjLIam zvgR;t>Uo_ieHzL8Obe;_U!%A#{!}II%*M;+p?j7Sd@9Y6ry4KRg*YMWsW)zfH$iIk zlSP!AC+%(`Uc6Mo_bWM&ZfL)83!1?w`+aqMWSF!rY#B7X%V z#cUZ9%|1w9-x>1#O7CoaNx=s>*zhIy6MjHR4w^l>Yv15vlNP ztkh&FT7KA|ge42#kIP~CHy$N=o7wDVMU-x@!QBJIJm-}@|5s?t`?nkMy-Gh+uko3( zHw~kZF+_#Kc96^KO{~f<9{z*m=)Xgb&X<#fhFLO%zI4IwP1#6XFrCf56GTUN7cJe<@Hg3cPYz*|r(W_1Bqx7;5+&{-aemf6bXAAC($iS8h(P$q$7Pb=uu*AU%-<3W4d0GZtVUdub z^Ib4*5ySkJ8$#}IoJx*FyZ1t<%!|gcW3zFfaRi2iG1Nvm;l;CVSlZnMw=C_UWIUWV z#|TDa6eyO-ao)@f8QG3d(eI0$BWB{Z{&Ebm^+oMWKLpMfAo^qh_F4~$&o+oJwS>Bw z2#xEM{{OfouI!efw@8eP0&DCy?uO>)@z5~biKS|55UCpjW-%Q5UUfjfU51!cY>4V0 zLzv|oqMvM`(011l^<5S467PXkOY^K-uuTYi42{J3LBkSZ$DqQrERhB=&8oy3ht!jdqN5t5mKVL{cEr;T= z4VokSAkgp{0#`0&P3gTUz9Nrq3hODarG(_!wp5ek#;&YLLH8&t7-Wj!;HmU~8u3C@ zk`aF9nIT}Vl1KNJU{$@gk}m~pONofmw}?ou){(9pS|hYt*z_GLT$BJ?eYvX-*$xWW_`pA z)W=d|5iHh+2+Mlt;84ZwLU|!G%*r! zbs-Rqc0p69IrMv&L3^(eyiHBeGcZE9yha;mFYBWBO9*bYb;g4Nk z^pW{seZQ_Kk2Qmht{hgI&EcFQ!+j3}q>N1ve%k9HZGjbZ78~I1bT#z1Gs1>%*0{P% zimkiF*yU)58==NfpQnj0r+y0d`agtiU!>SLNP(c_M8WTh4Mr>9-S4yV+$;K!KQczP zrvdiNHAmME3gkPR!_vhNKFJ#BI$j?d)_;Xw9rSR-P!Hx6%Z2ZK6$n?eKnJCNxSym0 zZyQ7O(Y96A*A0OqyWvxTEk^#5BXOVxuI|;v@3mt5wiTno-V|@6Vuiy>|KmB&7}9bv z><)hw20H4)aitf`vQzM){W1iTWHetA5c*k;=4eg4UTzA1C96hk)`k6~)k3d)3F<@! z@b@!Apt2X0yMhrOaSOH?dhGt7No;;l4>pL^`Oqfndcb4@*n4reWv6E1_CuVQebeMOY zoJ9v+!daUzbiZ!|Hd7BKE3`4-qY1tq8zj)rn?hkxH-u3Z$eM;boB;yp0V}@lu5K^@hlHj}W44G%>MK$)c+wJXaSZ z`kNW%m)oF7Bt^+~0~DybVqS#;ca?sj_=q;5e(9t0P%YT{>p{{tUO<8b-bx-$*d~FG zvjR&#D=@gf4Q3fDS(f1n8f%XkXBDUk6(KOm6o*_*v2L9hGv-O)T9YZ%rkTL|p)R78 zJUHrNivN~aA;QD~F56@%Z_`7!ArCyexJ{bqAaEdj-p5ayHB_0AK$} z(Cx7ZtB%hXI)uycFERob_FJ*~m6utrpCO(9=1VJl-m@dQRcy)R--vvl}#Ref*I8$)1!6gNoQ&*Wy>zmtbgk$``cLx zJ2Zj}7_Zi5M#+l6UcN1}Cr9(#26C8p`GWbQgEXllzNQW`+gR%X}& zBOUJX`yQ3}2UCtl5IcVPB>1Cj7@Gn+d&LSq4;iN2QDc{Lk0I2uFWXNVbRc0jjjec2 zN11|8`~H_KCa02%TMM(+*JEez??;YLIu_0BfseKcLgqUMRBx|FBdk*Z( zy;fE$I!;l!*4$<5H!A%3fo(3I%Oul-S=GN$Y_+cj`?1j*$9rxQ=06F;M)L?ZvGgvh zN>k9HE`h}V8&AWu*V6Cuv-Ghpo&4l+)bwTtdF<#(gAN>E1Af0n-EB84(JK({srh32 zcq2AKO_jcMzd*V7wfNQ1+T3(o0hwm1Q{9aK`W5_|+G|@$>QPOX^R(#uq%)ZP%nMGU zO@h^Wjsc=NTwSY1%?(X7G0%`cc6mX+?n>$X$wdrPDw*1}MRdsY868}Fms%#9(jBe) zu>a$Qtt*m+!7qG}d#)O@#%R#>2}dZ`RhzHfagm}H)UxI?_u1;WVixh)nob5Bq2nV~ z(ae7)G@|S-mJAq(?ANP==oC-rN;hG2;ZC-C>Rd9iTuNIncu`5X4viDL(XbW~X+Je1 z>Ar5s8UmVIUAX$#@<*?#izp_P)V92L|n7S)%9_B5fBE4d|h^SK^lv7oyDr_ zbJ;M@uC%881skO%WB2=h!Rpb6aQ)~YB}bhxyNf=;l=DUmk6;o_!^3?NT-j=j0||S9hHnzQd^{a zG{pFwS%T&pCzvFrV)K4yCXdym_j6L|)|-R0x#K3Pe=wGY>KrDCOF1oHm_x$Z`|R}1 zdi0DNhT=yKn9-0c_@8k{uKrbYvP)uHi{mIkP?DZsrJW zy+@W&ibpAAy!vw!N&K_PW91H-_FbEw+bidheSTB6u0IW*JenOZ z4Z^?9>x6=2e|UHDV3ivOQsIFY+i2Qo zb^gHkCe^&~qBsBjMbYOm=-xS15c5%B20O6tVLr?LlS^f(t@P97773T$kVMslYt*ap z!*@=RexLg^DWZs?hHqtimAv^MN7o(K)A#=y+UwoB_iDczG>K%C=swSRkx~&+lD#Y0 z5lN+mmPjEqY(m+T$liPJosX53jOzFP{{FcCoqNygan9?x?&FTJR-rImlqVb=GYrF= z4&z2u4YZ}_*m?aDuAi342Yy9>- zCA^4rgwd+eczSm!`aZja%{58PNWO#l{x;-=JA3hf6g@ur%T$)M(2$ixor1Do0tWOS zie>N3k=Hy?a4a!{RZCwSyO#p@bJa*cbPG}Eo??=^l%-GbV%70o*t^{lHuu|C)bl#r z@r*z(=7A!a6h8as2sIs@(d))C3^w?PfiWs<7>;1a<2JGZ#aq~wtiG%wbvg?%@nw^v zj9HrcHZ(t9fE-IpwA?g9kk>e&(ZvFHmn{K#m@xa?OctGai=DqZfKNAU&%bWd;$1^h z_?RD4xyOWCOth{U9h0|UQJ8?^zs&G-Op1_j*bxI(W@G6V#&mYc`Tf4-+-CO$9=y1k zue!36kNNhHd+A={>V;)|*0moj_qH0F>^u(deZ*+DGgk;g7u>2k1`o@v?Ecc#+_LL= z{?(_EcUS3^`a%sl`JX!3T>Z);!mjg2;mbMB&SplcJU6l08})PYg)RbBav}v%~cRo1fGS`lg@v;-Yu=pCG`}#6r)XX&e zJD1C*Z%XFpb3gMHCv@r3M{|06(25$5D=8z=n({82lI2|uid}Y}cQ7yJiyCv7)|M2E ziKr3IMV!WbHBH`d^Dtj<>n*>1T%S_@nbYC578Dg^OHVz`=(V#6UA57oHS8~+9dwOL z)X%UFFLE))`n>RN-U}?Q>BhHRRi|i$8U2|orI$VR2@{RTZ;OL<;i%jWRgE4)b(~%s_jOfu^ zUF!PWob;;o$?)t(-e+STQzVarXj7R`yEh)|{+wn>F?IZjU`J~A-09n5D=H7SqWhP+ zkx!ik`I}pi)H=#M3bZFAwR_^^VfV&o2vZ%l)j2)XRRQ(Oa$FO3I zPdmU8JTCFv-X`?qo1DHF+LGQ5H*#8~q>>M&Wbs;;dW0P0{{E$G&c+3pJ+QmVzZ47F zZvF5)AQPoMf54>h7t7bZ%{v)b(D4W66rpENnKz`Qdq|hE_Z{Q<{^yzO)?JLX55SsH z9=H>qDExlf9&XJM$XK!%cGb;jSbUQmOjyISo?hp=QO9{*`WC)kZ7hG-c^~so`+_AQ z8Hl+bfq{dp(2f=guiR|#lI9|+cpWzM%Z5e$SG=qr%9>~GWORHB(`jDMbc}=9neb*b zVG@ek_s4iI4}6Rg!MA0akh88Enws-)Bu|sgrVn`Nq{~L>8?sL8UgPny^JuN`W25Sw zn8N}!Hpguv+;X$AFv%IyS~X#%8!PBtwn7i{rC1eg&pfXUWqTeUV!Lm4;X^#-e5suy zPio8Jl@I3fa=m}7`mz;sdT|=PM+RV0h6Fuk

S6u6X!%6~@~RW+Pv^@HO%EToU?{ zH|kvAt*2`Fg10ZZ{;r#RNY#F>DbwOM^?~f`jUPxsNG%TIM_YFA@QZ4=RLJ5w?;G@-^$I^^HCmDhLIpo<#Qc+wXu_R-f1L!$Nw zBI8wfy!t0AaQMoX4l$$7QYmRHv!Jm5ENS2uYnrGuC-={0WazC+1G?+c%Bwf|QoZf0 zIX?z>X5JHa_`ZX3U@*V+%Z%m?v!VuDbNX>YNtZIsXzF);D%O?Kvw8&;eljK&Xh?PY zpYf>Kk61|GbWCk`R~Tc^geUVu__hsZ)WO7t&VIBe-x(4zovomkUBz_P)`AKj8()-pK}*zsDhKcvoXxH#}~4;`|rQqu0DPSpOMj24v3X|#)iG_|GV zH_w#(^Awb~!I1X;JUu54EFFu%U-KcJ%sz z3%NI1)ADLDl`2i?qtuAPAMfMZ;cM8!wG)s&x=3h9nS}TDmF#W9X&%aKsq0n;+TiCv z?*iJ>k5Ff_YqF-%la1+StPw39XFzxF7w~{>3Cycw07Q3kg%OKAFy??3vwJy=Pg$-* zMOIey<&ZV8BX;ym-I?B2srDkzfG$e)XobyN{`atmFKl^@-j}_h)h}Im*WCrDUc}*4 zVgkEqyoP5V5K{xUrw92;5?gnm16|B1G)YWL{M-0~UnBX+d!1NTaRer4y5rQBOkql; zJC=SQi|ToI5psPgicb-sUYJt5%I$YI{VpE?4`1h|wP%k-)t|8X*$s!h;63JdYXhZzdHLw{U zgt2D^VU)KgLarENcTuWvE!hT@qbDNW{}E({H^~_W++RH6+9o9!`U?y1HL`Lh<$Q)rEC{l5*^Hsmd@m*iA8+; z(33prMIqmjsK%f8yRq!Sl^7r4hph!R2*}G9Zfo1Yck4j32CFf%(4TDF^;10X%U`Y^ zqDJnAw5h{MO={`-k5AunmoL2|=I$llEPi1qvaWVOkg9){Xm!QlxGL=T*~kv-?&cG6 zjp^SDJ^C?DkKWlEQ@^b$Ef;j?eqSBh(XE!d_20z2k_&NPyEC+Itrdo*4#N)po{Zj% z;-hR#NNcT#R^1g*lSoSAKbg_^Dg_n4FrpiohGZS9Px}jlcyP`Y%+KqB3-lDn90Uz5;;{;FIr>%nHVjYmY- z86ntkFU+hh`MN(6N(ynLt8XnR$jXKkgT$mM(IDAe8TBL^nrJVjxIS`fJ@jeLJX6%Zpd?@{~|EsUZZ{Jj(^Ob0cx@vKgxywuD#pQq`GdPtmc~H2gm$`Al)5 z4}lhR|DG{fY!K0?q7QtxmmL>s?!eZ<4e4!#!p68xNQ=!u_?(w)&EEUGO04oJ#`eVT zDyhR12fA(}C-oO%GKx_rJW1qdeEe9*#sTn=R??aclDP7t@ee?WnPPK95>a%Jw{|!t76uc$Ol=-^4iKLRVu%oA*KagDm(Y zw!u)bmo2`X&;OgEMoq_zX=ci2{zSHb&zxAu!mm8VfvvM}y{9WyXxk$3@e-l@(Esl3 zg<{IrWSn|31sP8&(S5WR8|C+b8DzKTZ56NB+c!#fYeXr=ybutO6M^aOT_8)9VAae7 zVcjcpY#KTnL)RR`^K}ytq&*C$$1I1{<6CIXZ9?VY>lpGc4NroAEiQieqA?X4#hr2N zsUi06oGav~8{zeV_DJcKjQeYY5TdmL+m?PnV~B(~i!5043oTZY^&iCF3t`%MD8?M& zc-2RQabvWxTRlQp*;9;9+Dgn$j>cB?gV^@RiG8hl%fdc5^YR7J{P4e29n9(?d@<0$$^t((m?9xznXvhU3=dNsk@&U{-M>^B8kM%VxHcS*Mw+vNqwf4u#e1Id&w%PH45)+3 zcN_gsXw6mw`bn!_b(h0rv)2YMFoL|*VUmZ$cdm%LEW!>4A{ z{;q?lNY>4K{7_q~vlVWTDW5bUYsq78L*m(lX85_+JNQpmV6zVv!4l1+P~Wqg?sKQ9`) zCgiY!Td(*DKWjSOUqP({RAXUIeTQ4qs&q?wWMM{Io|%!dR7`hHm2+K*lJ$Ey5|z7D z{2LpFuuy+yGx{LM6ghcKkkXtfV%on|Le$5K@}taYQ-zqyf+f_@`oHIYh-cgX!lAvs zINL5$C{6EzIHRKwLi~7!pB6dKlT%BOjAmYyk)70*)K&O%ye*}1QZae*T0Z#C3D%)t zB@Q~fqWs4UA#Ab&7KIbA_rn?%AG?*G?P)}Nw9M&9rE1M5TG5&(GFoG);?(oMdF_`0 zT199Wxo_#hGz(~754D_>Iw?|20b$azWP`lpQG|CdGQvszQBrN)=O#q4>g*3 zcnOccI*Luzi^9#>Hn`Jl0ayQd!kTwhm=)uQRbS#^UD1k*+w0hz*DLsJkMI0#S__xl ze#DLa$MP!OnXKt!Ir!$@;Aa&Wt$$3;T!a4T#IYY)L70{0@PV_v{9oE@`>#oy`4 zx97$38g9eK9gbp4Pn^PV|B<-=!Uwl!h+((Y6od3<3%$DJ;2H<_c3>qEmkSs3bUgfAPtFx4jqYpQ}U z=~y7N^`;^F=o+MVtAu4mHg*i}jhHi?an@k~$|J%s>Xtn`YNgnHa+Y8};e*h$HqtS&wCw~RmLK&dmX=ru%@X_tpCNSY{GD2?P_n~c6ttOF7QLoA{i3C8zMf_2t6rN z_}14JDJ`bhmfRlBT@K;2P7rIT{?1yBI`IidCUe&f^ZEWpHSRkkn6_?6C52qHrQ$S8 z%2rd5l`22ZG0EXdqmit^dk9v(v%}4jcwyHsI}EB=g7cf7vT;cbJZYgNEl8LA|94|Y zsgB%5=H%fZrC+wjG~&upZZ>Tj`*|}S4;HtFm$X=LE%$(X+;T{!J!00wpK+f#5(+#e zA`e$X+L&Za9j}V$&MZ^9p~}DNe>U)h=bPA&1L?3B=8CpANkU;KD|`%`joV@C*-!P2 z{OHDZR90$4$D9qR_jXn8{UV}{4@LAbvynSC4dlNS+AJus8>TWlmCmFKikk|sL_*_3 zDRVre7=G@bjZ7 z49~e?S#>r}=uKi{gNE?>-_Lk<2OY})twCxV^=YN&ki-1)2Pc@8XeihN*$K1_7Zny zq(i456q%7qR0^i}nPGwz9wUU|hT2&1LV|O}&L|3C@O|G0XI76#RK!Z09bbW^#>)^o zYBD}79EgRl0$!3Z0?K z=|SjG*9+E%dZQ@A2kLb;=u{v@zO@Mt3wv@!3`i*8|yt{RR_^ePoT@ zPu<}avl?oqGuZi=zPz2*JTBgtzY(7KIFQIAv}8I zI5uxl3QF@FQKjNhXP;g?z@nD!wJ!o?pA}#*T`Xpz@jn_9e>k{hkDV0W*d8a05)-CPKBXEe>nUMgLYU z7Vf;3J$P%v*Y7srrsYn&c4G?Laq=8G-5v-NxfLcZmLk|#f(nOJA?%L~HRB{I{<1(* zsSIu(m*JgJGrEL2Fd@*I4c+I&YB#E}gn24`yy%GT+a$0vG{Du@rZ_xjiST@$1;QqX zVR_jMze5$U{^5h)>k9Fpr5slj*I|}&7Qdt`aj}yx*80oQ`-(o&hZ@1{lp%&5nklr( zO;NDL3|FLvnDJhMK`-rKb$2*)8pgqI!y=@<%fpA-MM&^x2Oi<=Ry%rsMB zdw}p}k`C(B75Ern1d1}p(cfJWx?&P4%BJGKiU@q_I2kMVjK}3{chotUqy0c5yd7bN zBi6>~=rCPqy=scH>&y_5CPAQz7wh)>VcymSaH>g0wo@=fqXXbM%>$X14v5V*!QW@b z(3XpEQ8o4l4-v9Te+aMFX~Jo+7=Gt$Ag!~-)4Lr}{K^_Fq5W|1p9kU$ZLsBt93}H* zXp?^w@avV}R;PyMDTzW)BV9zOu6t>u0_mT%p%tx#hZ8OFDc=#kiuSl<&>l6qHt@67 zM!@0U!uMxig_1N41gtQ{H^mY`FGz;L8Vc+>V2r-k^kF~J5LVxfaXG*ZJwG_(OJ7Iq zZI)xjXH&Fu`XgL_^-*Y^rH9NXrg*w=x=@#9h&L+k`F34~GIbGZA6jF|98bKI<{)-{ z4N7;Vqj+IgtnQ@1IAe9xUK692s^`ka>ft9%7Y={X!ARBGxZ9}k5GRN4u|eoL^*SD3 z)o1xO!O&myrW;cS^07Hzr++h7FUmt6&ogTDm(P#HY-m?C2E zWNf-*!e(^MW~aaBv;0wk%w2JTO$p9mj*=%Z)Q*NzxB?qTS>wiFV;Gjl2o8b-9;;07 zO>Ba!)F^Db+JfuJli04y#jGJ&$=p+mSmcioHazPbMn3J0ujl>=n(Iv=@iD{7tEqy= z4h#GlqT<4hsvcSyfQChtNQRW@Uhri5F6ptLb|abXFeuwmFS3?6t>rMHK%x;PW%Bf26& zY>K}XgO(QT40Y~L6l%+L_Gm)T+HdsocZ&>2heM&swG{`fDm zH)`_iac-L#9*#GFaj6Ptf$bpax=8qSLV*__WH^#6M#4%lQl%m+vFd=I*OaJ;@ggReY*$1+RH7 z7&FQdX=&~%ymiOYXAX_;WSYWVXuUAQpUN|iqy zFm^=>+~Q}3@--Tbsn+b#$YU&OngO43rw?D$^q#etoMv6N|6wa1jpGw~Tk$P{o7m(1wOHG^ zHy+)VV&>e1!uZqQ(c5s2%5wORl7`(QV^8$0q=hc9Z|&YzEY%@1$2 zpt&71X#bhzyhrReHa$y+{k(JnGu{?JzQG+^CuRxij|sfUyCmoEDZS(QM(+w<=+K=nTD6Ng#k|6sW-GYf%oTW_ZfM=7 z$Gl&*vR{kpx&K6MTK7$xG~2IHjs49^f&+O_#T!0$><_+C`yzLKufs)NN08#?4E^yL zg3ZwGaIClwQJ^{x|L--=?qNdnZ@u72ea-mU>SVUl?<^Y}w}786c*duP-Q;zjC4AzJ z>o~o`13iNa1RVo+qpE+3zt7^6b~CS?O8(cknxCAM z&P}>W_|(4luv_AeJ`wps=df-_Y|BA+$1-+%`7FNuNg_XSvJd~Y)tGN~>ckJe(&s6U z?7404Ui`sDJDzTEkv)1`gy7a zqWWyK>YPF(b6_DAdF)4RPiA<&7H8ASP`P~wS~~TD8Mj0IGL@IepC;s=F~%A#CEPc4 z#^R!hC=1L$wb3SMn%80Hc_S9qQ;%gxFC$3j3*1L-Lpy~(ru28fL=79X%$h6srb=-A zp_7VFi*dl`Ih-sU*nmD+tU&iPyZ_`6yAr#V_1_f7zF+rXp6?G}kO#`21_>NcH zywBGe9OA`AK0Kz)h+Pej!pR6H`1C0ke*YebzsEiy5cVx6w>#r_c9p zb>Lmaf!r$cEbmB$WZ*9*gZp~)pHCHU9y5}QZ)NeHJ2JTYqBTr%FB@?!hlSaLGQsj5 zvbY!9d29Fce3<1Uezx%%fA--E@4HZoO8h?Y`mQ&4ABUIxNx^+?y)%z{#iX*uOZ_og zx=oODS^%HY9n8h?0N=Jphh{d&Np#hg?wYhCYlrWA&D7VtUamu1ob_mV+;=`xw1A(F zc4FmMfn)y)h4Bac5fvZC#`MnN%^D*5a@3lB_m`1>w>QJX;a(X%J z58vP&$TcLPtZ(5^>|dkWzZZRx3U_we+n?WYe$Cxy$*9jc3Av@-XvIx!%XgQ*FS#zQFttipSb`r4*7zHRd39XxQFbN_I%FNZ9LuP5AS_an>LO5&9`*Y zp(Xu(^YVj#xJ$MsO?i2RZ>{j=3EqD&VRaPP(kj2Wi(5; zJ>3S=Jq2WE&4Vz1KPGOth`6c;c=E@Xg=cxQe-^$>!%W6%WNPgB!D|>jV;U~{Ibw#b z9B<6hginzUP?;vUr+$L(Wdo)kJ(O8o+Q9mXOW8x-i`5&AVqXq;ux}ad*quvTkbFHA zmv=j%S(SGdZA%v{U&*lWU^;l5DO(be#(rtvV$B1CctoQkzi~vHm%dBkBCQynpK^!Q z{rHPLo?G!Zi*S674VwBc6_%)3W1KP#uYLP4@m2*dq6+@F@*;n~rs`; znZI~m#+%K4u=IACtZ~~2&}D1vjLj9Mbm#&ae-xKPwlYzE5jP6Cz^lR=d7AD|{?AgA zrU$9?_tQ6CaN;`mTfLm0EStri56?qDW>2Uk6bY*jh9G~N39~=y!jqo7<)5Da$Gt)f z=t`Oqg@&2ZHFGfy`=!#P8{c^I{LB36PZ^&&y9wuq`6F%T8lmz{A{-@oZ0xIKzG=c| z9ym#l`W0EwmTlJb;fEcSI@r(xOEHZ+sX_k+-RFMyiufgm9OiU<5#m=J7M#b_p}Aa> zw*(*IyO+J=X7BYW#>IlRZLp-}E0xrA%#6BxFs9|l+mVan5C6li@hy_GEPmN)h|JFj z4P_6Jf4Vz2ovlHy?93>^LrUbSPuenLI?z={MGJH(I7Wk{n~ccngckMud6E0E{?Bzozk>R3_opKkO-Zb|m(7SzSh zmcl0+lJST_p0lTv1xAL!rErb#{A>^gDOa-zA2#uzF)~`Z!=A1jcA@q9jue$+P1Xyn z$?}vnb*)lR;4?i6ov@6*%+F`Ki>P6AzfC@`R%Zr zA~ozNXt@QokqIq5sY78K+qn2gAm6>-nr-es3&YDXai>Qi`t-+_};GZeSI|l^xxm*})$wh@ zD?YpACJ(>ApO4$D&6Sfw*k|1|%=l@Cmc5<+fV;nf2xa*nPYsvrl!uB^_;bq7i9wx^_`Po_)>f&ub~|K4wb36$&bG zGa{Rp7x<^2ds)h*EDVuX3Zvdu;AERFUl-HD&%Cmt*sFFVs<9y<)}Ea1yHU1_ElG}u ziF=sR)*>Tnw`f1#bD)?dsz+n?t5U&g-xy5N-o-Xqp5fD%+tRw14z%{H1N}YLo=i_W zlX0*Ot=n%*Q?-ogxRxQQnXcqFmn~rSPeO3$-BKaP&K*BnHJFBD1b-EzOZJXd^k=LM zZCAIa=#S3yG1!tEHW<+11U>4~?H!N5D&k$bJVxLam4B~XCNwU0$MKQ{h;Nz8o@5ns zSE(5pf3zpDr5$COIMMVNbJ`YRM)=;wDQG0GHSWlQuS8(&F;_T#NDxX-IHO(e1k6}| z4d%x)*{1p;ZhFC#0vs)=w}TZuUMZrxFIu>AX%dgyHj{a6u0&&PUwF)Gk27wmLS?B0 zx9OcDVD)1E)*62LQv4L z14iGFp$|XRo zOa6PFl)R2O(&w(0G*^XTUDfzvg$9jXAfrEPY{+@4l;lZr`a1t5Pi#nG@q5Rj%dUT*-S)n3YVq-?B@4oQ#pM}g# ze=(dMZV)tX6=8-$D;x61fXwb#Qtk%{1!R~}wXX#|-f2a(Pt1v&MPxI_fQp70)8>W~ zJflw-d;ef8)`b-diIW#2e&t%$W8Xu5y~%>^S;@(Gjg&(3EGcoEl7s~cDwCPe%@@Y> zz|(;4)vn^3QwFm){Rd*g;pM`(U&Ao(ohb{ry@ZD+$Y^<_1Dz0TDAd`G))zTZc!&j^ zI%G^jlZb|#|GkNU{3vN^w539W=ZbzX&&ZOukAp|ZYf(&eKJPiVl;K1r{JyxM%u#`7ms5qS4{UK}M}H(&FGqB43mk3t zFthjh{Fs9}u?NPq!t68eBTwK>YgVzzvtFU+!D4E^GttkFI0*BU*$I#G@n9?N0Ki~Pnk`5Lq zjE}~?@*{}2G7;5ZN8^wz7k$F+VEc+cIM8qr5iQGL`D*|M&Ki#E3#K7Fv@^^tWr&R! zFFcS@V)*cyC45}EG55>w&xV&B z#9*lp91P5G^O78M4=)rBJXApO+!<;8*5Y$Q2=gA}#pBhV^EbO%_~;Es>?-FhCNPe$k2Q{S;#E6fkR0oU~0+s__^~99~-!$t0Bc38`61OF=aoI z(A+a}T9~Cxy`GNcr?m`OOVnVvDP6E}Xo8S>NeQ026=NjV*sIj%{Ck^%W~sEa+e`%w zh_$3IR<`7OTuz(PWK=Swkq-!k zmQdUua}owRP@k9P6uwGEV?+uHk2WLso#S|Be*Ua zg;@q2vEXHnaD9_K`kjfwdEK?lNqZ}gayO!74(7C?R!%<)tmxE38GR2|oj>F^ALbay z10BR{!e1vya&6&0EnB!hQ-Swc)^NA|hD~|~{PCSSzVE9krLVD|zsA;7rJB3`n;PZC zEa9@vBiNL`WAW{{9R2T!AssSbF#RIN{7!QC-dTu2>($t%m$hui%Y5Ex`**Inq?O-# z|A^-Vjp1#D)7kjdWqA9`3o-{=yxwnthGB_n)?#Sf20ZYM;D2@MmVvm z3ktIbL4Q;(LgdFFbhwDP-Lo1T7W?oRc5++9N|T4_eDYElvnwxW4oq~sZS zpLcII;Nuk+aPE;KLR_rS?nRnV|6K;d)?tt|E@1CE@8aXUMU+sWpz-Z2=}oyU4IXMq zx?L3%8Kg&Q({lOySrb@fz+lYpVU4{`nZn!q7En{pgY(sgEb+-Z{@{@+J8YKFbx6rW zmE}8kHmB_NQp$)jCg*R*_*0D?tk`A-ZpS;~T8h6Jf2%4j85N_ND zj#)AmU8LmQ*Ph@XOLQqoZc6vf%_z@PN)Adx5*1hQ;W_KrV3#>qcF!1E!E#h2rwL6~ z)^OX<36fX&7!*E%*~bpy$2FgGx};5KMrzWMCVe^=@sJ;w z!yqsV(XN13loC$|DKOndf+(#_fsK;ma~CQ0w0472y8w)u%`p1IREUaJ;m7uFl0#78XFSgoE*Ob%b)*blUtHm`j>BcoXoL?=!M%W4&>lS+ zGt2yOwZRV;IfL1F0m^$07&Tvvj|+7$)?=X%xUN;0@k52l|3nybT#6kHUGOtF7JEI@ zFftM7drj3LhqaQkqx;QYq~vxm+WtQ-u{$pku9!O|3mo%PW5fj&-8vH%^A6Wn&`plYHKj`oq`C43W=8#Bisz3_h`D% zhbvH0uRuhdKhCdqW^r$P`J9^vx!2K~{BHaS{(YqyDSDpdU+OJ+a*2XjKAVIkJ#3J> zQig6vGKCF~QkL_p9q#cvC9il)&`w_a_zfSuu%3T5 z@#6(+h-JIX#aTxsv^3?o-F>ofFHekb16BIDEFI3Th^b9>0Zd&cdO#XEq46$s*Y@pkdFL&miSU1N}bOVEHLi=yVtT|2M$iADbyy=qZrf zV1kNKCfMXH#+JXX=ul7-Wyb)JlLXz z^=f(;mTmyc=lVGQSsx?1EEV>97(lZ~1I|h<q&eK6di80Ea>%b>|y0GP~9xS}%@Uf8L(pfn?61pOBwGYO;$%2||4W8Pk zVaHZK+|ZR{2Qz|bxG^fjOt95ngp7(M!Y^$lWKCA^?`RD7amKK+9gF@k7hssD&l>v- zW$_-pn8Sd($eB0>gKBl5Xln;SZi|@-avTjz6PzcRgXxJ;x=#f6JI=U2`U&oT_F<)K zyqV}kFw1|ifr)1hVDm(^xE$LR-!e?myVV*uRh-#dH$zDLWe&45Vs!3mh*eqR;T~{E^4SF49) z(F`)7`1O7>SMVg(UF%)aCzYJ>dYps&)sUOGt15#!YIU}17k)qXdM zk$lSx*RzLU%%w{7yeeYJi@LE#C-m66F+@dtuQf6>-~x@wCbe`4{w2 z^gs+(EmKswW(W)W>!GMzgq@xAP*7tD|13wu>N`R0yB9`}pMfvczG%4TirLHql~WDz zW0W!KLychVFF}Wo(}hG;-S*?mu)xOvzLg5}SzwB22P;T3^)Yw6HT+cm`ck$HM32p| za-RV@`xz!XqjvC^bI7bL?(SfVK0+9m@;4#z?QH656SZIdyUuM|6M;A4t zWw_EN!Yh^LrM}iiMe{#ln5yP=tHo&dx4$s_?_Z&{eP;~n;)4rs^3heg66qz~SmM0` zW-@#o`}M7g9r!L zf2fDR*Gg#V8!A1nRmDb68z>vbVui1faJUsJiW)P;@Ykz_enyH=mai7Ss<(&-!3pAL zxs`~bAXuL@#(hsMjj!wCQS#opD>F+4(*3vr{IXPin&Ws#38$I+>1quA~JkR#51- zDtfgdlWy&;pi2Y7X+nr3jHmCx#WllF?kq>Wf}i3K^udl|EwN$FLo9GyBJKw)rKvf! zw6e`N3iL8#zjNxG5PzG>T!zzNk5OX#+X_5+x*EQ}y5iW{S<-~Q0f@M$EW$1wf}OpW zc;Bc(M%mlw&Bb~eZX@TitN&tX-WdX)5(a=dG0( z?No(KDhz#B7Mwsq4IQ;_^v%d1evRd5Bq`e6+5K5 z#k0|FUAS<}y(RYEx1wp!0>~_SJbit$n!a=^ry&( zq^Z%LoU?GN=#2j7mrHms6vDg)H-7#Thv&Z_-);Jw-}r(4j?HM9tME%W@OC zZDmGJ)G9^zEO&D6Z$Ms?P7C#iO=8F3a^e0%S)8+s#`|_Xaqw=o)YhpZF0G5j=Qk7K zw@68RKC((Ye>6_i^vDo?p`FOu@ToXH+Cse2{)+m$`*C>CP;8YtBl4Id>f+<2kcS5F zer*U>?uz}NL-3+118s83@U75BSUyb?#k0DJFN3SlOT^J?b_k#J~Ox{TVTYZ zD9PlFg2N}6qU$9e%!wHTwWxS3*_?;ZGtQxVjf%MN{Vz(CE+X^t8yGJw#3nU?J*o~! zI4Vb;Q@o_|*cuZK5T+m4hVyZcFnph@`1vqNJaafNN<)u{D+jiVlmS!4IqObhyR;9( z3W9O1#u8dZ+9+STNE*IE;fvjX|Dvwp`&w@ia&wRPYp6qWWv%E$|0BX?UK?_kb*1;g zcLd$H5LZVoMOg2)$lGs-#PONZyw3LMBrU__j?SX*MOErLc?HR{i|I!8divRZ42}L? zOl~`m(uWh-G%f0uxH91uo@tGMiM0)4GPg;QA9~~Zz;g&uN)Y`l7f`^{Gk)A zWc%?mWkxG;L4^^oiB;u+MqlXh?ixz&97GS)!$hxk>(TI`6RhT^ND)=DQN2=6*u=~c zgQtz6hSTTBbF~Ve*{{#*jcxeTJ_F9!|C`vnfP$JWX>aOJv2wp6<7ZBSi)Es;jv_G2 zcY?T(t4(F&ooM3A|0r_UIhxf|o6E);ajvx%2d5n%vxu=2*CCZUl_k-D&3U4!>ndz~ zRUmEen~JD=&E0n4geB`umxA?R85Eb54=kA^|-Z0LN8_QeshHK4~aN;HX3{mH+QJP#~ z+eAy3#gcGz6L&-qMyu_S+{#B{c>XBi2fjKY?yE2Q!5{cuUaTts6yw-kN0RP#~I1j$r`jf^^08 z$vbf8>UfNLVUM3*wQ(muUYaw}7_O6-BlMlI&?rw4!?)iS$~*mOWl|fu{6meJx+KuW z<>4f~trZ>{|Kh>ALU?KQK<+X{=ijnSTKCx!?G098@^}dUjTY3-bSq7byhMFH%P2&7 zAKC7_Pbsf1Q*>w{_22qUtWIbxJX=kLm8&7FJ+q`8&D~HGR*8NIJB9a;b@bTqBK3Un ziI&f8BBNW%%#O^Ne9HGAn9u~NMt|&xw}Q zHITKglpL%dV`O1h8ZVSN(aD&<%s1t#-CF!>t}Z*en(^Ck4c`4rnJ<^?@-GiHPE)-^ z1#?b``G1!{=X0IpJnSRp1?baM<@eNUffa9g>%t%28}gZXdc5_R8(+Mo!)Ws+bOboX2)-;+_>Pc6(@JH;&jul>^M`- zZ!+W@s$t7*A8Ye}gVs=~#}=XaIuyYk8>C77{qcSET9Na1JB{9G#{1G7c)_{09Cy!= z*R8bX4;QU@@*ivN6C&f!L7M!2=?eNhGei8{G#202WJ>nwy)duHR;YChBb&}GWMHYw z*KWx9;Q@0V@YIg8yyaZiSD)u+sq>WkemvG{w+4k&1=5G>Brw0msm1YDT zo(}hKzSy&=3lbEaC41j8>4}vS4t>wYP21LD6dt1Kfrg;@n!+sRK1Mw~gSPLwiqqE} zg(@kFnCcC%pRxd#bK1iz$Q)j$Q>3>y?GP|=4gCJL6PGl{ic0$v;#sCA)pU?iz;Oq9 z(YS&tQjq%-trYy(ZqeLwn-R5OuLbc313;IZN#v znn~~eKlfF6Ajf^J`OuE<)JEeqHTaZ}Pfe*9)3P5W_nk59$0q3x1tZyelCW;O zkZ$c#XVZb2?A}q2Pd+l>q#p)+bho-O*u=wzwN{$&O_>F!xES-je-^9~q0eD2&A5E5 zKHr>JLmlo`h?X4*$Qb%g+OObV*GW!vcY`{Queak|byp7UWzG#AGB$cDW52PcJnx(# z|Fg1Sr*K`K9(0N7oJ&RWxMXN;FP0X6EX3)hTC~3PKT1-u=1-^Wc~XQ8zv^qxkK4E7 z7JVBwJ7~mj0}Xjgo-S*4KS*smnI8kF^lh2w;CR5#m}KXq~9k;5GM zTc zw%>{$$!z%XDtjK^*_Cg7mb2|9ZGN*zlRsy?r075c`m6E;?W%xH)8nP{|JtF0_gowp z5h-TA&n3-tBVMx8fkz5EmIpg?h_eOPHyH7j`oB~+F_`@JyNOk$!7v!*f|!-D(vi7N z2s;>t&nvH@YViso;?_|?2SW}rwB)i+mfZcK0b3RQp+KcYr0+3XaQQwg3G0hUEh}Kl z0?D(z39RppLcoSZTs=~Tc{AfgT8KZ@%1ddiS}_%>CsR$64LNihC^la#!j>JwFl3Jp zMxU~TL1uz9WuqLf6Xz-B?*PX93PH%;B0z2^dM}a0uX!;dw^N`vDgTK-73pYL)DOl+ z|KY5X8{*oTV(!z0(%0`+2-`6ga@{BJ$=ZvOBlj`aL`6K9tSu6sdWe><(IRO^2l3PE z3-0h*JUXxd19rKi-)95#2wg1=Fm40;hl64J@fM1V%*4-$CE|O@Aj+?eB(2l!>um}WW-5*Qu1{#79YYg@Pt61j z$=WIX*Lougf4k7KHI}T@(Sd!I%Q(874eQOc;+Ts@T-j{R2B*#V(kuhkGd5;T>yNa0 z(>me!egPuwizHe1T-*r#BTi`O@R=%0{yp7<*R3^Xz2R~$oMXj3J>{%&%YYMSDmdVm z9&cPxMJ+s3Tv-!_zYBLrrWX=n<+EOR?tDNwujH)v%8Z*wD?D(ZB{!zp@uI#mR&~toIu%|gpNJ0EYH7|-8Owh;aLQRb z{`ALzodV4HqJ=TPysO0LqT=XBm?UDGhQj8x4GQ+Gl-k|3!}KIZK-?d+%4n}O3Q@AuE#0-ICyz3;ZsKmfo4kuxG#k zY>Hlnd6AJ=`>_mV=RJklqjy4mjWb2XJP~bIS&L3Sn=oU%Kh`;ng>QOyJe^{K{Fu4Y z#My=ze0?ImBp-%y%TOdV1fw~7C7yJ-iBi2U$ga4gcuvA)|Grr7OZXBMfoGbXF}^?# zS;xjmK6RR?e`^lw9TQOBH3Z`{bC5srA&k6@#NGEcB5#PM81m*ZHniV?@wMY%Jh}_c ze|5wbUoAw~&XfMw7^43~CwS`4!0E$>;APiVsIGr4;;Y(Gq3?8h_C0~B^K|K@UmwxG zvJ`i7dgCMOqJFwMVy-7h-hRedJl!2@k8MOynZGE@^P;VP9@FcMf9POLYfjA4WYfSG z^z_g?Dwz}`Le$eS@~JbFr$SPs)QC;L znDEC><{WfajWyndk#n}D@Ta~wIlB$+j9VpTj_ZW*?A=&@>!vWA`-JiF<2=qUqB{J8g)9o#CUUNy# zeN9YwVyFe*{NTW5iu~<8!Hh?KF=zj)MjRD5j;u#i;!;KzEbhNt$_oub#D4+8??@em z?04jWEA7~Aw2aT_D6%8fp3h#g;Z{~=j3p-QuVl(^_iv${fl6ZUs~#|VnI^3a4@dIM z)k68mGiqmP%{v2R{A#rczZq!31M93=^O_|W*%@=TfeFhM=Re+9NNa0s#L)?X7+X;w zdFhYBuR4e+hYpkDT60d%Hs$azBbJjX=k&AYBRLj4wakb&E;HevOeNkhybm2c`WY85 zO8DipLV6;(BWm+L*oI2fGNCm)g_`rt)@D5EtQkuKZMjWHbH3YR%7YV)6n)|(LIhTDHN;WIB@>ibItxR_lTHl>rDH6oA74cDS-i3q9W~k}N|FVg6Vb zDIeQH);tnLR%!XBi#I@kBrT# zSUhr$bfL-uXDbb$S7?O~`^&IwO+T^B@2dzh?LhtyrqIhv3n-yfiF&s27rac7RU|`B zzGBb5wnA{;Vrj-jYc!s;LW|BEyeqa9l`>5sXbn{TOp0kw?)Uh^WmSn zN#s{;qBE(g{GeW!qq22)OR*6vIUDkWN&|NE{Y0PC187RFnz&Qn4e61#7?hbRrFdB3 z;iKNTz0gc7yJAa?DOIF%RD-J&%n+jB=KX&a%+svR`8M0gSbKxe@|%o=A=U`!t%rZT zQ>6d>?#J)pZ4eQffdI$xVu8~TYF_Y&hD_LHl7wh_AB7^2z5V$LxHC*D_R>Z8Nqw9=Vgb9Tju`)X45H+x@UW-0h@9>y%skaa zk8@9OZAvmyLI)sgr5x(tw81hx#56=p(lIMU&$q<(VQ%O(!5eSo5|#xoLi)vA1m4+* zZx(5AOPq>IYIlUQcVFzjG!(Z0NkF#0 zx6Xkuj=;%3Un9c+w& zO1ilFN)OShjtCzefsspgBIVd)bZmK#=Ee&MxOW6MR!>9qYy(KU^zdefDl(t`mRg5S zlZvJU8D z*m^A-yPYEWz1PRpP)j6rY>(WwnHcFjQw+-JNx?f}NoU<6YJR?!f{%F9xT=NXdF!3v zM0dz1Xdo@f1lO;pNRoyLjtq2w-L624OK=v~w)LQH9*4;_?G~y0tD=zktytc*l5W^p z(Xvcr*2^AKPqMB2tSlz*;( zHe_F*w~0sSVZeQg7?VjoYF-HS-%k)Q$_wT##^_gXjMC_pl9`1J%DatF810BBv1d`a zcZs4O`%raRF8y{~OPfm%P?(()S^VrMJ~X7l^{FkARrFDEQl%H6*-o zM9)?WFioko&}_^V-5T_0N3AZMv2dle7E6V**=1b1G6bo?MmRG@2WuKMu;zNCv^!ey zdYD2F&8B!h#tn&IQeb7+gnh1dB1u;+;>z2Jz=5rVLH89XQ0VwzjTw}ND>P86gJR{e zl9#7BiqlPSf37ZcK3d_KygOF6q~m*80XomRj^oAWa5_32!M{9^dczPM`f0*es6q9n zF8Y^DlWbGWkYs2C!(K+vSF=FO5f6pF0+G`^9b0eBhU(Qc^weJjnVKtrK2AD8zu@u=)ACD%Qz;uo|jL*w(pi5WC;%DHp<{Vt??2n+_C^Qz#M3r$jbe(L2 z@ZT~_3^7FZQX^Oe&6R#sY9Z{S9_nipnW)ho1!KCwXnrg*&nIC+c3Mzol`vzEYSQXKQ=J>nO7=4_+NqPe;ap9RVRNs7) znrlpPD#RE!Ej2NCqb|;l(ZcYpKc%6+7D%xxwD97i2|gKWpe4xwr(OR_n|jD`-^Kzb zK4@T}i#^;-Y*D1}t7|pt_z|pxwYh(!hopt>FJ?(^8nxg%O&_td^zcftH&tos_@$!@ zkAY5zkGDr_{|?Y?(8C8;4V?d=06`J3$6@afg2Z3qUUyn2Tf#LbdzC3kP2SR(82L*h6rA&fzhdo{O~ryTMuJAI;oCg z7fozX{|{3apF>JtJ+bQf7!h!|v*>=~23#kPMxQ)6B6t6k7Kf<8?U)I?)(1*0F?!JV zv4`qVYxtOVL+-=77&)+ukXHDJTy=k;v8O=PHT)+^a;mX6)*C1K8se0~FP$F0mqvD; zAT3Naz!$cKeuNCJA7?|7nG2JBY2r#|hR7WmD4I`Ji;uxcBJA8FxK5ji5hmJLb5R{* z4;esRWrU>kP6cE8o8shZBfJ}*h<7^bLiT=!$T^!YWVh{vT4t_zK6-#?9&rxG_VmV# z0)4CrP=`^f4h9=9k;FbD4Ev>m{q?%A@*j>l{|;e}ubzl@?;_aVK>QdoQfz*vB2+9g zpuNcoixj!{{*flu9MZrk5iQN1tc&-HRq@-r)@+e)mbzSp z?8I*DUAYurrU*p38o<68}DAZAT8bzLytNk zGaw8HcJ+lrsv|TDWq7m698>+BF`~Z?)O8GS@M4G*a9b7nP(fYfSE=4!1D3Wr79Mj6 zO=#IJ&ZM5gz7#ivOlpIfZPrM3_8qaWTO988Zh}QpsrYvwnbQ2L=WeF5#b)Wv8|3ip08E=`y|f};Km6NSEqp|6pT=6gWPs2IuTM;H8i@)zMM2l4)|mx!!VB{jn$ z;=-py9&&bf{X;#JS5lD0XHnZmL%5b5Lw(d5e2sI1a^@oGMO;TH*`CMNIVW(W_B!M` zwxUbVuR`b^B(DKheErpD^3ixJZq-Z`Y&=q|4-FO5&uNHsS3f+9*eEqsgkqG>3}JA% zMr>y*+7}c+>63!#fn6#s&#a)bvNZBfT|kBU+sRYgo3>0U7PFgM;J>~-5>IDJxwHMC zUZN`kBL4}uYiFqFml~&xSLdW7d9)x`jb^6}q{%if$+~+JnW4HuTsoXQ6_wVa4 z^@TsO?roITb?gI+@74Ghr%VoB2gqWCI`{vsSclPVaZcrtXt-V?{w%kqo~mUuYT$CR zTwzRYr#!^dRsFEiVw*JJtv3Sn(=nVjiv1CDXm>&!t*YrpQxP+pXwk5jV&^wA5qtF;9Bhg)*U%rIK6pS^k+U-T=@cwuntW3{fw5o;}bNw$)eLjuod2cZBb|KoH?1`0nRtOrQiOMmvBoAc`JpX5> z;EQ~Gx%Lh*S!Uvg+e$Ii;DWe%M!%tuk_h4ZwlR)tsY z)aQxaZFrBh0b6fxp~%Apw0VamwT<{GR_!+uzRSbW_SJf6^oV&dyA&oi4A3Uq97mcE z@E=VIJV$LZwK>DVh|kw(a-?D|R+)!T?ynVOl%Gt8@8^ly^~p%6*du9fT8+h@?h6C4 zhsxGpqyy#kw8QZx9q#st8ogR`+u#rMa&irgl%7$Kf(wakF2r=rAvhE;$--sIv{9NH=H8j2BcMoK4o{+sJUc23wu7;EA(ZXh-M& zsK@6(ak6*>9(!g>O_TfJv{zekFV2tL<~*nJH)d>EV9I+e>S$xrA$l59N@gkQe9KnO zFXHczc;B1iQ)h~EF3Vu`XpJO`?TnhP*U`AvnYMo1MVUdr$ud-(Vi;Z!U(OD-|+IO!%-Yw6^JoOw=oVqctI zH|C4lsysmD1Z91^OIKVs)6a@6VpwAyu09WdQjay#&f8<~+xi%`eK-W$`{%^26&on& zSTfB&RYR92XmHnduW6d)7@}$|s=l{b7;QX%Fq+n-rvCa$tf72J{)Cga_ro&R2o}hiQW-` zc(f$}hhOZ)1Ixu5k$zrRxD>P0IqT?f->Fa!T8rnaB_6(d!8-L#x7bkqc zyud=ZERf)$=;1RwmP^hNcG$8x6|o(Ai-7ww3N+eA!+%^NdC!BCT(OVzW;`J4IhU!G zR7jJXzlq<0t;FWx(@+y{hW4-Xq_3;mB5uhsTz|Afgj`!kL#!{-y5djtaNjpN)K`Ta z^p$zOA|%SIW9}mE1?mXh&T$x_1KVPwtdP{YgTgMw%!XvXu0`d?1rs z8r;QE&fjgU`S24vK6Tuht9u#o4m%Z=hTJ1QpGO(?8KQNcWSmzkm9Ek$g#T5cwpz!i z$KqF1SEt32o}7y;EIHH9mfda`bINx;?s`d;WB2@~;N~0DKJ2UzW7c5cvZK-$n}@Jc z>qZCbmATVgW8M*J%H3~j@zyiC9I@S$cZF;4#+}OCbV-+WlGHeP;AQ%1SS8kOo{hYk zQ<8n?YaE-PM;=A*Np{hSZ?G%pHX3qhogN=)aN{HAbogDq2Aj{2v)NxQ*6h5Q8YiU- zRjptg{#PJ9_gsk7$n#=r`zi{%YtIo~+pTm*>J|+99!Q>}87Gs?T?PWW3DI zmapo%@ylF0?$FJM$L47A($K>+)1g3gZ<~lW$9iJWtbFOkzaFUGpMdJ;pP`i0Bz&T7 zlY5h#ll|me(AJR?rkL@NGEJU4<2X&KtQPmGt|9d5X%lD^&FNx9#{X!pS~5&OCs;nPyFId}vzyV&C8>Y0+8zbzKq zPesn+Tr3M+g}N^w|zk3^#rd)+ZbZ619+)3n>w!kEN9llqkBc_u( z&aBr*LtT`#>7O+|KgmJuepeAcY_PaF^r$#x?M15nOzEnbBX!qYNv+jl>B^12BKn<$ z&>2(#`AJ`Rw6Q~;E((rsCDgv(5`TxCrRJ6&^kCUP%I@-;7N#`Q z;kB)~(~fU+rhOxQzg|K;EDi{}C5O;5-vi!0dD6-|qj0E8m@r-vOE-S2^M4C8Iem{F z$E-BuyVnibahE<1&QxbxkH1ttUzzh8XHzf9PCRYh3*L1_(xMq_@p|45akk4>I$vVU zo;OYTvX&)#4zl9dqc$A2&VqfNO!!l(2B#0z8hB6k{Wu~wn{Y3 zOoUI~TPY)=l^F2bNugN{E;($^ZQi)>Aro`1-zVb%`WD>lfGIzDW61jyYuGqWmzT>f z)4~O%!aySd`7@77ewjPb*+G+r@A*ePqO3XLm_6GT*l@X_1JAE$$0{prcz3Z8{|hl> zlL}q7{Bn@wu6bf<*i^Ldm@7?+n+WFt`-Gm+dGa^4<`D6Jjx_;e&l4>6zFt7)j#->ZkY4{~PDbF}fDK$ibNVs4usE2V~ZPWbz4B8(dfKDTI3D8ZrU*SoFOgz zA%o7G7;Kgmqr5%>10U?i`UQsKlLw1-9p;JFKZc10mA@#{OT(k@itaweAA3`~;=HL0 zXW{~+^^uBoNTcvz+B5X%SArhf@4?VSO$;hE5;giAM96Xl`=`5$nOU#ULRlEId^)}^ z^?}D|Gj#d9NV-1A3S-m$Ll>Jz*fY~iobWCYdiMsCTBmvBb2gVwwmCz4D%O!>UnQFM z%1vz7+KFzTJHvjK3C6oFlbj4~(QZcoR#yMP4sZ(p%O6!(|s-Bnw2iqwdsWUtIJTHzgeu`UQE?ldi-gU7FU~U@%aEf z9=1$}Bg@syXW26nm)w=eFvIU`#dX*C!2vBjcg=|}3BZqL`> znsTAGj2DGl^4knkexu0pO=iaIoMps4kC?L8f^yn4&09R|Jq&(#4oerWmg3T8OG;>M z#WQ}JgJY>7VH}d4H`q_$K9+Y#3QUlg;)Zta#^?A?iDw;QL zsJMQ4GRjo4rDId#vC@5m=)S0)uJpC!nM2IE>ZK_coX<|N z;&XEpd#7T|=gnL5ERjy_)wYP~|Mnt#mn-()bimB`3DTnVw%9nWH}1Yo#>RGk@%>_{ zNIsN7zHUlv6{yc)dp=OfuK84>u}0ke{u0A~jzNgA6I@}5s>VbqtU-ofBS+)ojO9q{ zHwWwWj=}bONAYL0AHgugVSur>cXq{$&iUQqYW^C{*` zjM(>b4Z2Ko!f`)ibib7b}s{0!><$CBInTk+r`6JEYT z&XcA)aqxLL@2xcB%y0`Hy4-~4s*R%t)uR}H)fGm+*GSQ$1JOYqD0;oDqbFrfTsy&@ zr*!(?J%?8Ou+V|`x3=ZAbIo|`JX1DUZ^~wVTj=@GW=uKX4Lh6mNL&4*aHGvC5%l{R zb+lKxBTC&j!V=f$Q!s`?~@MB#el@GBLr}mD)%+4F6K0ii4 zYl09X@(GT_G~#G)Q_iTc=67;A*V!7gm8mKF+)(0XE2cYFexq(z59}*V zmyS1g#jZ=GQ0wGN7d_SZji-!5*O>8bMUOAJXUiYM%=zP1Gd`{GqF#m-6dqA2H0Q0y zkR<2T2ZIO0(P z#j6F2=c?mzU|?HxceaG1(@g1JxHblDbB2a@G#2&wju!0`LN_{-E~Yh6N}GRl+ohg9 z%EwW=pEHH_o~?-cOt^5&6_LH{aplo;>7k(+)_ib9{=zZP_!5JllSk2YX@t7tsp*g!3iWZyW>=+AZYD7y%7MY=@u_H#M#^7_pAk52%#gumk zFxbdJSXFt8{hB(WF8UzUMgje+0@3yg!E~V&!mH$1zB@_kA8LkAJqbg*B;(&df25Dw zXE>hhO@-c-V))*yMkyc0i?`cR@m~!5hxWllkG@#atb%6fBl(mZ8%m~1nQiqkYMvz) z{pSMt&tTjgc?t_cRfX}}_QLe(H1WHZMV>_+eEMf&dQNW`N9iJMwJwbN=^=FULg{>0 zJv`ZAjx29?+`dqZqlfy7lU`rMH=Pdj!fz^_SRYHj|0vOyk^hM?(prqGv%p(b1JDH{ z9GE#K+CLg2!)(MUO(VK;wuBxyUL~u(&**H&ZxsIG65Vm|r&iZGh^Lol zBjtfUVkcYS=D&&3dwV;$1a`+umA7c$?uY1Da+>Be>hOZ8#;o#eb66*i<(cdBiUXw0k4LwUfFSFy{X_lNc zM#j&xwYVT5oz5;8EhZ@ZDq^J^_xEH;OM5vZ(Q7gC-0H=|r?08l&FcUEckFzq8E*}sNX39c19ym(*Y4lX_C<%N5l+B#Tu1*(fRjNx)@^0FMAmB zxtBVeJ=}oJMjCS0%Z42BQk}0HETEq^3KibG6uXbRV$-oi$!vlJv_H&5yxt~pyn6v< z&1lU|A$nYYTZb1aW>!iu z!h@R~P}!29(B%}7Su>Pg4tYd&5o&B4qr$y>w0T_r`}8YM;q3vB(eTL=U*4I*;D|0F zZD&extIW{gX9l?vVW09bJg8qKBn+T?9)(mqWHWV&+d{fZp7eC{RI#o)3(qduA=<+P z8`_v6d}*X)wATiSI&ut9)BwHYHVEz-D&^nMME-4U{4sTbQ=t!fJ@kiNm!r^ruPdfIIEl>{)WzT~&(Li` z1`2XVz{|!SHYWNo50m5j(pl0db$!_UG)6#6TRe{SMW@UC(WPiM-sNSX+q#X=7_ba+ z=~HmRVH7TY8UguWSGZ5HLTaEHzIRHHif(G+$2M~$_wc}CKM4uX#v=Ac4CW4}d7c_V&&iPdj&!0z1Z(A8a?~V!F>kRQ@t}8D2b;W_wIVi2m z#-Omt_;Ic$rkZxawmzm%nQnpZno8I-SOX*GxI(KkMyh|Njp9cNKRBy}*~iTw&FO%f zehJvoxCItpQt;k?Hg?zc#MUKtxD>CAS2GRpyh;=5awF^upCLt@)Pl0MHhydApd!Ko zKh{pgkH(FN+I0`{OB=DZ;0*TdFGHW+KnEv5}L#QOqGcv((^a~_bvh*e~(@k)d>zrnzYwpeXt55G_azVC{WHf!i3 zZ;B;6&UM5W_4Cl3yjWDn`%~5K0@9VQrjh3Rs8hZ(EtuF|^h#fa1IlOJxWX0=u_m~) ze7w}S+ZdZ)TVr!&3p9P3kG_psLMy32RKy$7(L@=|EOn<*y%&q$y)I$0hz9jcr0aOV z)?bE8O$O)?BFFBI^QD|#a-6X-gu`nYvh(aw{?Z=?)3Wi}Xghu_zllc^Y7n;~N z3o!FUPgp*;hG(cY)DCH3&t4Oy-_4W`ChMS;sT>>aH8FRXBkW)HM%nop7$lpAOCy8f ze=H3D!oy%STHy9UcWnP;4)2Ld?JO`x_YX597i|rM^)$gKH%;6qw}sb2Am~8?_GTm^ zcttQS?(c)Mjf~_}U$jtiI$Umqg$oT(Yo&tajfqlEGdnceYG6zc9lRN4hWG1DFv3wE zHl7~%@X-u!T07xPe=8K%*PV_ZLAfcEXbOCQWEkuf4!ij(PKra}XLcZ_gvnJGBO z3J;qda3tCsCRY>)pJ0fnVr?WH*TuL%L*R=cHsxtyQ(q(Gg)Ei!{-=uCKbm;4Lmy$U zWbi*{jLXl=;C99dz5$M?oau?Xy5^`I+9bt{Fo4|$<^P{*BfG0Hs;!qu7grhMQ-eJc zE42_eK@;AKb?~KaYfS#03X{5H_^>VkyYo9E;)fi=&l}>w4?U!r%HUCK3EzbAl5wy; zTs~T(Y`p?=w(8^buwIya>I9w^$i$lMBgCU`ej>x?4o)==$H%E=Sn|spu{zplIZ%ch z8Oc(QNIjgKtc|t1tk6Zp21V`9BJ#Gch?fab&=?}-yjm{`Cj<$VkQ!(wc7o*?8G7B- zg_vQALvtocG4st4GusAX-K3*39U7#HnTMf3bA;@{XJVUyx0Y|9J8>j}YP zv)>tXS=SqFB)J2;encyKdXd)dPGtt0?|FrSVf~BcPv~Z)fDKb}@;a9#TZqMk2cJhPxGC(1E ze>D)zQ!Isl+-1037>rNdl>Q#8gX1;ESoKvG+lB>6tCidh{9}kYDaN>Q$r`i0e6gf# zD$ce)g6%)Aq0a08_N6AH(%c(1E8Q?hM<1I!^bxgQ3tweZC5-tg+1mY*iVJkGAlwRL zQk1!7oe_*1ZSbhFFN~7=Ac z0G(T^V)l1U^w?^QogSAa!Fp-)nV(YHGY8Dhu!CWR75X`9;rLK3oPA`9P~~;apPQhwjuzmi zk0GATlI5)^>2P;*czkfeoL;@Lufrbf8F>PpZ@tB=y~V<;Zk^Ds*(Vmc#*0PW)Wj9n zGdNe@1v+J>kd4_<8^_N#by{X$^aq8DN@N58)Utkg@F5xLQ}0uRJN8=;W>#!ueZYGLZ!G_ z^+yDJ9Y=+0Eh(|r2GPyo2sT7`V5q0E|7}Q;4D+3#p1VxhKN|3H)IM=7WEmxCoTQZc z<dRL_tEpiVo|p+ zi443RkoSl0w9SP@3`9p!=W^-YPjfRT7wrY3t?yP2g7Xz($+t{;gX^zeB^uZ z#>8K&b=IQT16ydmN-b^KWX&oaRCsvAavIjYASV*1xH;+p4hF+BJ;*8Hb@ z2Xj_ScXoxMV~2?%%H*184z#7dn?oq?>jWBalR?Ghr$|*RgXT|NKtp*GC0^=G*XnkQ z)8X&2x7HbtV~QlxSU-3qXo;HV&Eov1)0BNoi`DeCxpQ10Wo=WXK-(bNGx-$_@%u^3 zi!V~2TcS3j##rvEWPug4+ zTddZL3+rvD)#H8Cl%7maXPA)Zr3bLp3qhb}v1GQYE5@qjp~vP@F}rXUwF-_S-GiMd z>a89%zimUqJmfSXRza%qEof(|GkLbUC|W#O1Kk##5dCJk)Go*k1*@0h$94^2sW6}g zs#etXkt5xTJR^2@@uAm471XNsnuvU-N|}Eu#jig4qN-04G8(+_c-%y3>^5tho<9xt z&Xdv5Q$ysAT`A5_nMPMhKU zjHQz01QWd7=!vT;HJv*m_TF=+BX>KJ zbINrgWm}0sE0!Tzw8Ic#ie0Uvq?xy^VDvZ{iB<06qOlg;`jJLmm+v8ub;VSaHHsR) z@1Yg($7pw54qg9xOXS(!z}XSOP(;}%ds?2fZ&61KSX2!JB#HEtg(O>6Ngl5ENKgL> z$%ecojgmK%)=!yXI~}1h_x-7MqLCQ!c?>#aJHse=gJiRR5T<|lf&>3$h;<3MWOM2X z^&kD6)}K=4$$F+-S*yYOjepSIemCjJx`7licBF_OwGsCY`lIn!fz+@!94p=R#pxH* z#lhH7G;PZ{stDKMYv<)W>yjNex|-{(dLfbhz|6pJ`tI8#M3wGuraz9=-dV zNn2(v6=A=Euuy-Uf2Z@X>+hY~B9_ZB5IeVY4^U z>R~!OIn;tPt~F4H*FjWkIY`vc4TXPEu2i(AtFn802~&+;6lwjEeC{jwsfU8KN^6PE zAEL2d`^m;bo0l!I;`GIL=~RD-YMx9M>;J`};ZC*`;MWDK{@upRlWs&mw$ql2GK~*{r@w=DP1FW4S3utX1b>CC91X?7O5FzMgXJO2r;%!DX*yQtqS>Y`wM{ z3!YWLJoT*bpTCB>`Y)$nJ8sg@N!pxj^p>mf7;g3e^I{ZY*@%uJa7zc#VVQ~OHKPfg3eICl zyQ!Eu$^~C1nc}N7QJU~r0e^#NoG*F;$MGiOqW1`K_xd{V*KUK@F4sXKbN$W9gW>mzQ^cdc|%kBAbbqYBaAvO6wm6-#GV4M5N92IAkqmNc#I z4atAJr{u2stX65r`fkP?z0H`1TWa&jJ3mOi@(M*|S<;@;dS&()fQ)zBrO9SX@wqrt zRCi4zqm(bS+eMe1Mp*Nfk#?*)!-)gG+ws@C#;jkc##7ter&Ie2Y092-5$CWJ-vUoa zmvgHCH4Un_uAq61Zz!u;kIz(EapFg7ZWZOocTbw~r(-hictnGz?E6b5%dXRU{nMi7 zg)9u1c~a_U^90d%{b=NDb#}XK$~W69_~JG_zTQg4T6@gdP)(PgoKfRH^9^~rqZU61 zy+lccRU%d;9=_}ENI%LyV8~`U8K!=wH%o2VN!Nq*yPEJH2RXOy(u(`08FIC&F6-R3 z;=s9jeA#aU8R=(;3qez1IAgQ4v3M>#EzXMg=O;-IPVCU3HJ9(S<1XrU{N-#1)*Wuc zD{HMeGSq=f7aMY)wX3P*_&TAobp&?qSSkIA=?mMueBrWrE4|OM;32(S*xa`zJ1=nM z%Ny-j>$x58bhc+R$%;8hpNH;VLFK_&qEDxB2)w^svKibN+MldNuTSIY$%}u)iH5vJ z$A&8|T5$0+M?Pw7&24+h`M=xReA-5Z-#P}7NsOZyZZ!v?8w;gDxBK9fK_Q%V4vEN} zmuTyIIp14k$paJ)TzKA#KYKXxoiE0$m#xbOKOd$R4>t&>0ZY&@)F0LPD-d#s}8CP~DnSot=4Jfr1?_>99(51%+Fk7wHMtu&CoeEOPNgSz4-e z#@QWZc4LsN9*Z8Dzff0jQ(RRqq^LvJ$;0;q{T;Z4{xcm<@^|}%j`|0rRHS3BViIO2 z+GAE~s+2{psHmNWK1uoLw>%9uH`U=V28!VRJA`%qMloUkT4ChWM^tM4#$UfNpur(f znd*(4t}-lFjF!5LcEG^V8MwbxOC;MpNB4<3BGgGPWCJfEpX;3(qQp==`k^HlP~@FV@{=}PW1W1a&l~3 zKs94k$hFR1wBC6f>61F6*wqC7f0L!5o}Spyl!L)VgTpTO`t~p~=c#1T*$Q!Zgs=`(Gwy^nonmWAuk9KLQ z@S~M~sjOuKjs2m?+kAghQHOU_@N+MPJl!wC`|Zc-_P+Stu2@Qa)gRT5CyFCY3#ss} zHqUvd%Vtkxyk&?n$4@ur@_adG+|%arpN$lHNsU*mokOnWPC~sl0A@Q5O9dG@@c!^y zbg2A66E2wY`Emt2_Oaogu{Nw9Y|n7D;!T=ntQn%id9QT&zo&P|^X7I@w0II4%brSV z2`}(Z>6a@M3RYWa$NmvkY#-vpt6H0})@%b#-lAZudP@#zA?Fi1hTLh%GxCXgD4rcl z#23%|(%z-@2sqcD+)tZvc8~*yO|$0&69vEAWW`UqnsU-5Yvx08KCsf96E@5F^2uA2 zxc#Is8k~Zbd%j3Hp6cS%C^sst)#1~(oH%oe2ahYZ;0hHh9zVp22P`t@*HcY6*2{_w zwj1)fkV`bH&jDdno&vpf2c)IZrMS|dONsqe_~1D^?m5hvW$*3T>ZLP}%JkxqxemPZ znF;T?Va&d9GB(|Ch*W15i&>bC@`Ms8;OJxsuQCy{`z*CLaAL!6u6%!^3vcyz=kYH* zcx{3`ODQt$k|pC|-3<9wt6aK!Ctf`84MgGCEQybI#3MfqG4{`JO6#f1f9Ke7!W?_{ zRccRTfd>x>vfiiG0G7y zwvNX2i3xac?qah|#8wmf)KEg3IM*J{qDZ&%pV~t8a z?2Be0Y_caRRvV+~d73o!m=pZ23`c5@=WttNBWA7KFaGQfp@m21Q~8KOI=HNcnx)mW z$5o9yZnhJCse3TAz6+X_x#j4lREb?|q2b>H!wWP-YTaLP_v#t4b!(*cO{%OPtHXQm zYw&2_M!L?mG_!*lX*zWgzFs5nTJD0QchjZd03R5&+>gY&CE|+J9!gSbw{)Q%-|^Dp z(cfgew%mYgPieE>er-nP`^FjYNE3Z->JvhJ&fmaXjh0x}f1{-KZV5~#?-rkCeW7DR zop|gM1=l{e;0@cXIn!RL=~OE=H8kZDGfa4Nqk@YIj!@CxPGW&F!+lP?BALV=z>h~Z zbn>r)_qB23c2jLwd!{{KO*i3#Icj{^*PM+y*mGuo1%Ewi!G`tEDK>nG2y6&LfOJIK z8MzMX^F67=%~rWaXLi)J;BvbD5{4-g_Wv!Ytg zN51Q`?zmi1)D0AoM+V^St~Jt`g#)3!%~Xt>lth;tE!i#KmDNAk@uzGj&I|D1$8)S% zt|sS^`X;Qk@-t1`;!O6(9^(4xj`-!CEai6h#)BgnNQize!m=OG&3jh7d73l7JmbhI z<6ZgI1q&YE!jxY;QRRPH2^3H)iCiZjZMq$@hek@Nk=7WyJP-+is={lwCat(uK@%>U z^VI7${662F&uung`zz|);c^BI^(_@0B6q=NfeWm|t#P<$mh^er|Id*IIwBRRGgQRG zBl|^K@G4q)LxoS}$#_VIPh>N50p0(dCrW2M!`sm_P-@}^T4IXJ8&^mjo;V@pcnE58 zmf%iy6bjxPg^Eu{5j6RufSWsgyZc<^+;9=i^;;mz9g5$pMx)i!Hkh$hj?k2N=}Le( zWUnUU@r!a;H_XA}=M&K8z)CE?TnkTG6K=P?f$Sbj5pzj`+q)nfD4&evk~XOSV2fjA z5z>BF8N#WVuubqlmAy9h z%rwH5u4z)4x-I60*&*-cWIS239|dWy;_LKRBH~<28rC$ON@pa}`89^5*4R^g2sntv zx7*{=02>^+Xo34WDN^%cQ>b-uf!VsXXxXm6=xpAe?mE^HwP~Wcw=|imbUFU*OS(i+ z6n!{S{8nZSQ%742bM=6%FiJYG!U;2XjK;u8_Tth{FB(|&j)wd*;OY?ueDReDKU$>V z-dUFXFkg$qLdH`?Up;a9gut1W4rs1fDlPPDgE^Wzv1isbQ9kG?Rh+QmM;px8X@(V( zwUWo(9Qn@@Oa5wS!Ee5Pr6YaLh^e+oIM>$;pZ2ec{P zjU$#gaTiVH{SMf$Mzu3%zg6&yro0>tC8mMp^(@- z4=H-ucsf`yPe?74Y<22@9$HqA-?qkf-(+cGoh6(cJ#Z;;FwSQzM*hzloM{&&y#98g zQ8hE@>LWXHEe#dNCSSzKnGx{0E|8aDf#7@AsL@;^RTx-f>pMSqkDQNuMJoKI-+?68vFS9{(gae8O&p}%phq^BV6(S1{HM8sqZUc}fu6`+J_af~<3xY&G74>yb1wr+-gv;8PpLX`$A8xB zVQ$GjU-UR(`6?>CFj~Y97>dUwUZ{JMC?(doBI-#JGFLqj-4?#5*GFx5{&F+kaz(+? zNOQJ5Zp9u@@ZiNVR^M=(u5Q{P9<+0r#$Mr(7a@dirnw@KtbNJYCVuJ8(5Ce)oKo5Evh$A|FI8|wdFIOX^Gh-~EvsGY) zqq$JO=1iuGPtmS%I{fgd5x=@;$|3b;yZ}!?u)lJtvd5Tip|~e&MH{VX=#Ek#Hc%Ih zEfz`YM{VJ})(e?WbFi;?im>+>O2L~SQ`AdME_6`mhuihI3m%aCm^WojeF6k^LU@8b z!iUN+?L?9k*hY?BN-ZtC*b!S+@5LOa4B?gEkB&bmC8v@N)Zudp`K9{rIPnb?SEA`Ai;5o6hGPw+sw_eqrL^y-vuD)u|jaM4a$nzqhwWA82<`FhyJrL)-exr zs!LH+l7>ggk?7rJ0wRX?#i4o!yq{!`v8HA))rpZ_te0U+xjr_0Zi&km{E=!h61g`Q z;In5O9AqL0j@@z4wF_n*@Pog$8@fHUK<-du4AhU467D#`WRL=H7wKWXfdf9T z3c<&LOQD#*3f>Q=AUUcpo?U5+JXaT7Ze@;x-_0??Z>bLvZY-A#%3pB0p9K&q*68ONL4F zzcooeHW}jjOGEtXKM13HRijgSKN0shSDZ}PD;6w1BDQavCg!ZWg5<^?=sv~@bNlGy zpPB})1kac1N(}LQiX7fbhWuxphE5}4P9_lVaK`?^e@Jm zUT9g1s(}-+{Hi$=aSmv=EmmqhSve2R9w_OT3ez2hVuWuNg_pjdGVd#NC2l|c`rJrC z&JSp`b1>a~Hb#uGUyAy}_J}XGz`K-L(x-Li=(x}WiL3HZ*0f19RIa4Fm9*!)Zuo;eYOT>>py~Ly-W#-(s zf#!6(Kw(3U(eM2GG~8zu$)~&&$*xZ@dtPf8Y_-C}TV}}RMbi5N#<V_R|aWGZYcZDIV*5>2O!Vf-aRdU{S1?k{98 zsdt2PuXqSsEup%#KwQc*pssIav~{pM_5GM2Hom`v(@w#N7-NZqpB6|utc~W9MN;!~ z876-;#^1xr9I>Po*3U~tTG9t}H*geaZzpDL@DMI#&2ZLViSKfI_$&84RxZQeCo(wo zm?cGJTVVEl1^mZb;?WuhEI8+lJ{{9hx?n4A{kVc%w@<=MHwXCL9v;FLe}w`=?#kh% zoVRtZvn8W@#(0%zj9hlrZvk9n>9{aC8Y**IqlJ>$wpoU# zm@I>4vk~^1L`wDRjPY)R4$ju;z{lDZ_L)6UHXsZ`x<})7*f8XHMLwkM(_I|--u_QARD0zvOOV{wEh ztZGcK_`Dfb-!{UC07GPr>MQk7=BTY}4KTh$ALDnJVD45gv{IO&U$ql{?lna71tUz0 zR7Ka`PbBGyA#NQp!~x~rNk8qN8NWy}*l3Ehu3CsV@<%GYZUUoXJ6x|fgH&pO^J7~= z_RJ9-b4}4Rph-GBLmNGBYQwmj0%lKa(B}9u>8s2bZb7=35TlKbN3`+elLC9A6gd3R z9=E4?A?%X}#%n75`kFCLTUny+oh~|iH%bFHsbRp1Vbb60MmX@FHpWj<$J$&yR8291 z0d>OX-pg_I;TdfGoQ3x(j5H$)+>Ox1(NjjqZTKqf9`af06qqcXayNngf95zpRgSZc zI+(Je2dbOSU`vIzNJ$wf=4N#j87eoh*kTOkPO!nSRfg#ET>}%&G)iBWjFWDBlOcVG z9G7qF;P|~(IC}3gel~Rw)f2jjb}f2{UEUi+_l-TojwO}2wW1s5_cKDmGhGxYXXapf zl(b({4ZBZipnU5;NiAXq)O|g1dAE@ry4ez#QVzTHgDt&dtumI4?Vl>^=Ys4`PhF`He>bJY1#V0cumsntIye9e@nW4@{4<(^;Y(6|v`mj;Sqd8j83Nyg0 z0ZL8ow}D|BTU_+C!jDt-IN8Y-%jcS6_X|CkDVgUqPY+{f>cBrl1MLlGODVHVaKF$H zBa>BeR>_6g79JRQ$_1+<+^}G<9l|!qkP{|@x{U#9eYG%mn-o8F~Cr`9`nknka(}i7qrcfWWN%UBgEcWW! z3SJ$8F6W(aLD@G{+a^l#t0u^{?t#|hZN(Oglj3oqjB5QrN3-4w`@LtxN{a@uAz&i8 zgxXQcvaRA|O%=kn`C{vIBh2WLCUtz*0nN25uzSy6+z#C@wrpEQR9Zt86VqtYqLY*v zolRp7Rg;EY1l=_w@p8`{&qvgZ$ojrzbXZ%o~9@5wE4q8Z9aXqfWDv6B;8@bv?u#DjokW^`V6>8vSe+# znpp#jpY5>i+g@qQHGy+&ZlS_Rjp91Lp@#qDe6so(?b>BVi5n9{&p8$1+t_$=x%QNz zPTr&$Ic5}^{tSN|y5an*mC`tCZ*(7W5|hHzsAJfE3SY0ys@<0p5fwg2hU z+?PJoxW<5j=9|+?^Om&Wi3|CJUJ#GX^ANGo8c*DoN(0sXvH0^MTq)EP$#-RhomQmS z?nI+;O1%HmhSm=-r8a9Xix3kHvOIBIj5gE|GxmhzTbd`_#zjkZJ~r5KV>J4#83e}# z>LN^Sr7+nuNvxDsi@`hmsCwvIamT<_*u>Oh)w3Nq(9ai{!A`grZi|jXmP*6gDNs^t z4}Ax3{GK@kRaKezlXwU}zuJq|JF>){(Im3IS0N}a9hcq=!smN!pxW9TCcor(cs@xA z9cBv4NC&*D5V&+B1ktZ!u!(cf{m5D5C_BiDRCO`r-(`HQs>g!FD4hV@fz@kZ4K zXDd^rwe9WEMVaqEELshzbuHY~okfCMn&>_6vM`x{RJiuuB3@sfE}CsRiS7T6W1UYl z8k3a%c0d<~gVH2DO-J-h>WIxZFJS!zKhY-4xS0FO^Om_CT0JB4lIS#mKW-bU$DPoov`cQ?1t0j_A>(-f=JW z2suU?MY*)F@{UOQcpHg}dO!AZLRlP+0`8B`HSAz@#zan;xfdAk$E&^Z5`d) z_MPUstMS!zQ{H?_gHxm*J4qbh4hzz24BCN7|hKT9zqS$b3q8sC)GMm!YQfJ=E4ZlP9xaSJ zOvlsr(+V{mo?l?iF4t;F)&XP{Jy~?JoCoc?wbIT239R}62hX^XTeqDgJ@`wyOSF01 z%|E1fUz=;6Hc(mqU-~s#gVSc-psGEcsIsUDy5%Fly;Gz;GkRdXb}qvH%NK34R@0A> zzp2m3s}!{5E$#C*Wz?y&t!X7y-KwQ!P9>CWy+Oo%+JNO5gK_wBwxl+D5NZ!@!=&GP zkvaH`@IJVXE?iztUP-s;hprCyjekd1;zv_Ll|D6`DitxmE0C!@8WZchpn7JOv?04C z?v5FR1>I9IrQH=o%~&GzAC-wC`wVGAr>-O$s81u-P8T)3j6drP*<7AS~pZ!l&ldK3)YF8Z??+yry_nM#DZUzB#v*QhNLT0HTy6fdcT{r z*FUDd%dXHXw~bUhtX?!{Xo$?-p?GDWz_ayP(yTu15pwxBJeO}1w|f*)&5w(8`Pg^5 z81svag4H=>hZ+yR^pkGY-lFElm9%wYq=;Uz8dHnfVtzrPG@v>ds?!a{)=w=d$mt!8 z)A&eZUh6YdWL)0UnAiO@=Bv5d{4c(qPAE)P0p&8@rJ@hW2{j&8K$SYg7Wjnvt`;{)m|FCvXMnIhLX1#XLuOV$BZ zD9zBMo0E@|t@Mu0jL_%X&P2@&A6e>;3M)zYF4{RDLYS#sk3rm zaH&EHnOBFad;DnB6m>Q-GUMB$6g)mtkBxL?ynlr`r#I;EG&^fTOxBwdVcjp+L};*st|iVSL^k0%WI+*BKG zan*tcMLY83Vb(k^Le6FWI=r%-D!-lGj~=JnhL;d6_7bD)HALBz zHMnEH7&gl6@I+sZs>^Xw+oKj3+#wyWrnVHdUL(XL^GeaZ(T}DjThZ}NPW0Mh1^t;3 zLw?;HT!I@t_zy^(fxA@IpAJOXofMLjC>(G21DQ+yiyE z^DU)*Bjs%Wz?d6SjX7qN5w94d!^t|$6gpU)bz>rF`FlH&A0$EXYNvERHVgfy{3l$_ z|0J!JX1x2gg5Mmo;hv9d_~tZw<_s(Dx66!^OLe$wPd#?ITT2Irmx<0j7o+>Mr_#;# zZ_$5KFf|@iaL+Y%Y}L++J=;5R(`!=>NiyKjDh1cvvt$h+=XKeJZ1n6oo$LBoyxx@n zo9|B~i}7DEw;_Z?tQqGOJFxvudse(RW9xJ)P6{^Vb9y%1eTST{95m;)zDBH8af@zy zoDvmj(a>-EN}4#i30=mx(bb;1{KnClUybl!%ib1zbDt&e7;eRmW6b%1n+ebMvEmG+ zeqZdpMEQ>nh-Z^iaee-F>6l9?0_W>0_eh1yH`;OW5NCFLW6x7fTzEy87f0`M;HFv= zZdYu~Cjw>cyzvl)=oX6XiZFEZFP2W1O~9CWyT$t>=V++p#PxNqT-45$gIBt<-FgqU z$+zc?c`_dKSjKB!8M3`1kG%cjM9s}W7=52F?KkcKFMCZ9{AC37KB3F~Z`<+8{U zaCHc5b}fZh?q1v~j2D;Z^rgCm`^mGpjPBn{rHWd68sEB?_}F?a-o6Y$n~6ZmJS%kg z|M@?M91y0Nh?d5O(c(@xYO}Y&*Hb2bFYhLD2gQgsU51Oy5*4AgW;tH3>4brwg7M7P z3)&@4h}u0=8ve}?pI^>_qT~&Z2kwLR@H-g3@IQ8NqL=eeQMUuD zY5HSTy7JgZIEC#)M!T+PQEP$4kqOcrU1xNU8iI2#)I|8|pTaQVEZr_>qQ^(oxcZC^ zhs0>I%djTWDY-`t6U^xJ^)6z@prP`yivvPQu_oUFM~-;`(HFyZaz&3WV9ayoRVvltl~3eS+kl39-f zcy4A(3x=EX;4(LU{ll6^NcQ~bt_fe1)VX)AImm=Vl+3*Q!5`8=jd5oDYTRu1A&|juJaJMyYJ8jEX%d9!|s1b);)aP}1 za^B!mL#53Fg+@a60ZOgr&hluUj^HA&Xo-Nl8i zJ3H}>0#{y`Yr)a0O?ls6HI`W{A?>UH5!|gOVvfqN?q`%V{;>ih*L6fG8sWEDlfDF2 z(%-cf{E}_?oN(YiZB6;v4Gq4ZokI*0eLAG$htcVKdokJkqcB_QPF2I6i}u!z!g2R{M7|OT@fd+wpIV{yKqvg2 zwM=?lY>9Z=DC91zK-Vu5QFV71K6TE-yd_sq6#gB<^&cQ=#8M1A(-SYYNSL;0A%3oG zg+9I(=v&@Ta(t$Zg(ddL^BapvD<o^gP}8uu}*I+ zuK0C9^L8iLPgLO4vnA4#3QIUAIN+sz81~#hhV9KBV)oV7V%n!xL}zDGzxpIQf4i?P0}_9JTq&=kYW|S6BQi4usbj2TapjF5N73!^M?5@VDO;;V|SWnXgp# z%$W-QW@gQ^9@uf;WJezL){@7TTJUuJZ=}dOC9d>N#KAf{m=!OTjy3nj=}JSv+F6uv z-G=q@YR6yfodo0?ODeagt3YSd> ziv?K^X_JLJPxIcKucBV0;pUSR{exOGL_%cW9~ z&Kw;5kS_ild_`eI&E!P?qJHx|Hsjp$3yk~f4uDLm@#9F zecvTorP6(!_eoKaHY$-qE0tDhv9t<_qLf6@@=>&DQ>oCZRiS;=p7vGx&G+~JopbJc z&g=bpKVNenkGV5sT=YcBm(1Nxw-%bSmg~cC^mD$TGbj=z(VV>>ag59>6&wuFJA8X|V|!2dxyI-L%B<mX)-3z{U3KT3%6zq}} z2ncY(`O%-@-%XF)7M!KWD-C&sl^OSIvg9pV#{A#}6@H}8a%w6Z#}>p)#^&kn=;kIy zvQwNeK*a>zOkDA>Qwkb({l&0pXPL#6Z0Z{Hjhs7m;JLjX(3y3Sl++N@rhu2Z;h z@(6Bc72;oA5^@eHbzayL6B5j^;G_Xs40N&1c9}59K^wt6L|7N)iZ25r@!`&CeCngZ zQZBi%r9QE2c^hX*(G3`Fn}PU9PYkILgC*)K`FY7_E=0vqqSAzzN?aXjkI`n(Ww-jvgDp%JMc~K(-M|r^&G6gb_5OH>g zk^bI<^)r{y$pHt*Tm2f%-|~Xa#(k#;_b-#RZ2)~f+=+>r7b0l290^0^SbKDp@YK)( zmp6BZcaN9w9Q%bGGCN0p%IsV*QpU&l$+>sCIiGghn1ASfkDhPWA;rE+=>NeC!wbwY zR6k3|^0k6`awx`FCb8Jcy)?jD!uNTZ@T$dTJa4}>SJ|VSiAG9}?xM%Te`Zm+)_4}N zJP0x;GpsqXOmN&{1KS6w2<-Ei9bWQ|n&Xvo{Ev(W>&m(0gq)i_FyV?QBktD2fJ-Z? zs8`x}%~O z;a*7a#;e!bUSRHlLpO@N$iMj2>oq* zM7wrs@ZuF}{CTl1FS&P@CZ)Mi=BFmq4D10HRa@xUX`yUci167%4^DPwm=o%adr9TE za43U0P8mXT)wk0PL0RvaBBC!{=-sNx%sws`29>thqHTltDlHs$4---c$&fVP494B< zv3G_)?vAg5Sfw9ZxV)C_Skl6DzgDt@{BG>1{aO6?z!xz}-tF2h1@EMTZ4FC>>JVd$ zzoU<1KiyHKHV6UYNTlVQ#+vII?88z=79FI+X7qiE3)|PATihTRJhK3aG@v?Nj;Ik! zgpzL(lqTsR$JQ0u`+c#XcsTaHn2ib98$tVbVR7(ktT;awd|nj3e;kDc>x|KGLxcs9 zV)$QME-YIj#|)7amtJ*4h|C*ZMd5f;pMvSlvr+Cn8TLV;xO09aQjRceY3cz3Z%gQQ zl_I%b4_nQ~3w0X|F*(c_ry9(0ahn5npYz4UQB(1%F$LR|j25-o6N9#SL$uxwJ=Ysx z(sx}<+ph-c2`#*8ogy@D65*n!A@cT!(b%TIk5^qlNlEytQ-T(~m2e20jIQJSAye36 zdYKfN!)0JHO_;w{U{GnOkZq$ttC19!Q;hIsq!V7AoQdA6OK`aC9&F-2;LnY7$Xk01 z`?^d=v6~!e(~MDdQWswWrAS4LP^PDk*a@0woMnhY$#A^wa}rt2Ls+ObVG&gSuTkFuE=s=SRdc%fo$W&gcZ{ zp16pH`E8_06~2_UFNvKxQi{Y!oiU-aKJG7+pz+Im;apc!q?#%)xWizScd=!>&X>$b z9ivFc2GR&SOSy|X@Ou%})S_hkt0NR_i&-q9_LyOJycoIbRtXA_oGA zQR)~Zl3*__#lW>-od-Emy|{={O)gR5suMKB@E)B>&7xfko-u>%kKm-tj=Ce|*idH- zd##zmxZMWm6(GaXMZGbwwGLBXB(a8*edyEcO*GOVp9bh2pp`dmNv*sy8+1G!ZQ=S# zed=Sv4nwr{Nfl%w84T^s@Hxl|en(Q_uvnF~zS+cjE)`Mla|UGT?nK+OlUdxn^H_^f zh*~U0lA#Fhe>9aIyh>Q>uYNoAU(V^Fhz6c*+TRieJrn(qNYrF28HJ6uxlVrz8j0} zUE)x7HUt;GO~tpL zToD%G)bS*In-YY|C_iYm^#+v8>_5~DO>30Bc3cmUV|1}G-&pu*@J!IVqYFO|O&oMl zM;gV??)Uq4#ks7&w@~H`Wf9zH8&idwsk$Nf2g>^ziS2HdJQl zV&yeMtg4m+>z!e+*%!Wd8E?clc33_Dd!;S*zxza6E}J!y*D?@h2IPXYgZGTh7-;m(gx;q^#O z4DR_uFggEDNS>*R3nDR|uXRL)?h4%1KZV>SX^5KD9|=yjSU=ec2MgtxazciKS7o@d zXSfi3R22bRG|)0jgom><5&7N^e^P33VzoBAUNf2%zwN=ibRHqHXC(eRs)>%X^-xu+ zz@&vTj1j_xn_jL;OUyDEC7nW3U+A)b6uXa9P{u>H}wY}O?kmOmhm z#ib2pzM<8aqS6gB_ZY%FUIKehHTZT86grL6LfAP2JZLe&)B__Bu=NCd4olg(=5DN8 zzKBWtg|VG6YK)>XFs{}b5A4Lyovw#(Q#JAH%XHya4=Lt&SimJvIRg^iFmM?mTK@pv zpEYFVyG6`3)11xfeg%#@C*id6+V_DPxbn^b|7EMKJ9I7Ug2+A-pzX%cdej|7&PrVz)P_ZE$ICnR~ zrJXXIH`K@LB6G-ZxuRPKOT6~;!Kz$qc+9Yd%-s&7Gjx#FRS%>8>LTaV7omStn($|x z5pJK;Mb%TW^1IQ%@6VdJn`VJ0C6*X_*Ajk5rMP-kf{%-3$a4@Q+D43Vo=Wz9tb**& zF~X3k9T6{8!|8eYaF8om`hhW))G5&R$pQm14UlqQii@9QP<)YKlYw&oEOjLlXd-W@ z9>RJ}6+Co0U_71+TP#|Iu1Dj`QK1b^O8c=`06BXZt4O4sSE74t`nY%yisPl0u=}TAlI*gWtOcX?G5Ls)Gm#V z&Nxfgb#iEMa5crBkEPV%eVLC@IWGPS$F?k2l;^D$#&+??wEnLpli@dwgZz=4-RH9>;M-l8wm3u5f8g7nVo#!(-PA z_;jisdM&N6vvp-J^8T?`hYwTX5lil9@s%z;`N+ftv)H<~quIxba5h}7#`@>?LiOWq zLVDde481>v{keIQT{g9(QuSckGiw6vhAX!1$(H&VnSZTeU>nEbO_NZ;@)+0DL0 z$Bi{fIDZ7~K3U&F7L9m&ACg&y7$@m(t()9fe-X=W^DPCNIpPQw?| zm4HWNCc8;7H>4zTsE3t?C)!7?6n-4@LiwmmXs%GD+`R{=d!+`K`kkX8G1uAt&^v6< z<|4Mg-kOFs9wPhc%c(g-N*TIMK-aiI=Iz@ z-o;wdodZ|cK|4>nsxG6~pRX}jxf;bcpJ8slRoS%Bu{bOC!i}fNLgslZEIK(GlN}=Q z@0cztSL(i4Je9TFDq!jlyeVXTE7J;bV7Ej6!hPmJXsHC^X_Fh)cQJr9UnCqjEJbLs z30j}J!bN2&?i6IgJmm})#ksNwtz4#E<;SW<)nZ-jY6N6YK;f_f*w^0-?bp;`{BWso z^M!JjY&3`ePhX7eHV+YJS0G=b7<0OxLErKE%;B0Ud;IPiTFjoJ@6DZvIK?slycDbc z=)hKWmJqg49icIv7{7Wq9A`Yi^+gu!*_1R^x9bY4n0tcB+jg_%6*JlEBVF0Y;k8&G zT8KRs3YaP^@O*xv@cFI`2@Zb9&c24A&OMkUVh?L*(xb||9m(wR5q2QPfx7(ZPW@Ki zWLb~Q*c+pDFh1slU5m`HidG1|O}1#rSpmOLCuSd|Mz37csJvqZ70xf9V>aXH!nO*! zpK+Y-Tdk*4=Nj1H_YD}}9EzsT=2)ScDNGscf#R^s_}Dv@>ANhV&0eSILva(`+53=o zmcOEW*(>U_@-FReKT6*^_a=RN5i?MU#&}ORl#Ey*n7a%^!NjkKy}g#{d|6MQ?mQy% zw9j;HgbEKkFXg?=)VbZLFZ67911;DwoX&I{%f5IQAbz$RZffNSadYNE?WUNG3YyJw zUWU=mD;Ftzry5TQ7W1nYEqKi*F`u0Bmr_R*QOQp;I?(Mm%T_mJ5iaA=Fr-+h(OZnk z-zPBl4f=GJJJ7AufmGgno=$Yt8V`>cA=f26hHBJDbJk6J3P(~q4WDQUh2zqR-SIo4e#l><-ch*vXtIc3q( zisdZ!*Esy}E*6~otieOt%U+E-OxwR{^1KcTzO~7U2NtXH-mkxqZ{Z8t@?4W^%~EE1 zm9LaHdLea8abv-6Lb1Vnv+yQjG)kMo*r;xqv_VD8Ck-&?ebVK8lhZSrHE#p8A1R^O zom$+pw=vJN{6k;d2GfMDLCo%b7#>w_5X`j#@ot_cTfN$!qUNZ+%6MRpIpCbqVQqC}CSGP9U^kI)2P{#Ave= zVSj)Vn#M%K<4r0C|ELy+2i-2z_K2mMfahqY0Ol%_GV{-eVD{Y#_ILHW2QZ*sraq=9AUA#>~Go#CZD&7+5aeJb89#YeKvzF`{l!WeHF1k zAIorjOf2qA@kFN=TF|yFX zbXrt#n;8ZCMMm>({0#AfbCEIrjaVYA)U!kF+EwV#p&zUIYC)WvXmz??|i+lfnIPmQa4C2PR%Tf)DAXY|yNHN`G^iq#M7| z%akAF-mc2kbX0lbiEs4$MFX`RUrXO+C9>`XSs4D!13rm)!j*HwG4qH%D?Z>#?zXRq zuDqvB2le<=LBtzgNqCohL!R7Oi(hy6PU8};QQry$`A_){oEePx7llGVTpGe>tz}o7 zQ>k^xNBSG9!(W!0@w&Yhy!5pVpSr@5*IhH@Hpf-@vh8 z^(;zr)u}nUihhoGP4>RJeD*$5o^NW#YbIFp7u8bUHO+vhL!H0=+D6+}-lQ)x&oevP zjAnj7_;cnS-Wm6ze)m-QA}bm9+GfN*P0{5>^F%zZr!l{|P@5MVR^>%H%6hJ8aD{6< z-Ti)=-TJf$ex$eeHa>%_wv40)NW0pA|(&eMPD^Mb$HTvgAM zr|;0=W0gJN^e3IE4+}-Tuw9tYF&SC+>sb0(C2L)_=5*hkzwKm9fN{`=-&!G3Mne3Nt2*ysy6KXH_MBy$owtmb+`q{soZn%p0 z+7dHvZK&W^GOYQNOjF*t*MQsX*5arAI`ZQu29w5BE0*X#3CH*n3t&Fyn#Yru=-Z4X=(i;+Z|Q`PSAdN>aJN24A=U)y_llqs9&i zJCcM>4_&ZN5`@pRKVqP|Msw=M>|-m?^f1jLjW6A+KwTY zCu7`ygq(GD=yz8Y+pX6K&m(;h-ZveYb9Gslv=-?7(q^9{w3*2MKDN5oBWtxU3pa9L zUGAzf)_pVP-b_Z2u_xl2WH7Et6MVOsAnWC7Fr*Ldoe{6nciK=C(!e?fW4egn5>@t#7)q2qLC3Q6M&I_tK zT~9OR=jiL#m(-frNM|yS(3S=bvTPW{WOEmw|0PFUev~N$S@*-Cd#bE*_-)pC;CZ^z z`kOM_JMxcX|I*avKQw%kCYN>jO?wu;B`=FZu{%#M+SvejmZk z_FqIL=~{fCgAV`TE#?Q`Nw`<6A&+pA@Ek8~o;&&<9cxzQ^Iyc%yCh5YcOt`aQNFO^ zZaMzw4ghkPUym z#(|F-Z_h_Bb>y$#J9DewmR#+wvZv=5aBmY4|5LD@7R{Q^63Y7{O=YXl(_DakvnKmJ zYYcrKq{EvQSn^q7EBmtPWHT@lYb zzWj%Qm;I3GWRFuWtA%lg%n;HLjI*Ltm`*-|akP}pE*(spckQPo*UG44_m%We&5Ekp z`m^s>cjD@ra5U!hz}FQ2dTBqv45+c&^YqyC zo;{eTb`ksX(~X6NeuhojdOWFJiqi|b<1G`x+B8YfoNs{%8;4`W$VPlUreO1&D%gOG zAgXapptk6IS~Be%z516+4i7p~b-6oR-f{q6I`_o9Z*qKZNfUPLbHJRp0oZ=*7mjxS z&Gu*1(aZzwR61Oh&*-4Thv;bV<&N!CtZ1flwNm;y%7+Cgg0XyfXS_I`F3c3V;;won zGy}FVSzS4W%fx)cc3s{wTbFBpFyJXy^!exUT70*T7S~HVNv=7COzYu#^t|qhan~}1 zFB=9SKA<=2U==}eiw$^1rGy{zGUPw}?3;v9x+UxulTK8S6SP+y6Xs7 z*%KxAHVbbL&c(LUeay1(BlRz~;VBMsKK81D_bW2v70-8F;(tbBeJLU-;8#lu0X$Oj7BHr`5InVE7#Ft-FURQ0( zmzP-Zg*#38_*r5e*iVlibQW{ZpJ&OWY9tFu9fQ0R8-ykPt1o^0x|CpEuywKxjFV<6CpP}sBvS4h!n<=C(8Hf+hCG2zf zavJc&gs(s7!1E_t@}sY8cxAW?pUBMkuVn`O?;a^%>i(J7K3iHc;Wk>Q^@OEriqHpc z$oI*B$h3tm&wW6DubFalvg5v+Z1}d*4*b_51@BcU<)edCx$IO5U2+lFi95b<3%5ay zm$Lsym}8)hAO6{^vVBuD>1g*;R1#y%gRISY<~=KJH9*Ss`lxfi#4IZGP_n|ljOynAg=iq+{j<@WrOJk79AeLEvq`n6BfquEfS(-nf!1wHp!b>C~t zZg<=c|H6@o%ZY;BLKnCjNU^MPs&L#_jK39Qan#^2G|VGVT{i+p7o=f!^G)m-^8*b^ z<{$QG1qyx=PP`q0lTopVYj;Q2fl|cQP8Zs?X=7Hq3(kBPk08THjPYL!&t1$P{>PKaGo#ZcK8y zg$iR~Ou@y{2e!$YEN(~^(Q7l_CEc8lonyrFpPKSMf9$#0dsFUSV$72cn)21kTwA(1 zl9JUu_Be@H2sHyj~yfDhZ_y~j7TGX zXodxMd2hmlo*VLhH)Y&lmkM8K??=1#{eWFoZ;Z&v7SgJEqo2!R^w~q?9;V5M*_!aj z-HiFsH^zMKC>uUxuQ9juHs)_{%lL%GYRV}-!u2Ru~!!VXP}I-!(=+<_f_s zMTWpa1>7`JkUilqUIo;!#dEW1=j$)@sZR&~YSaT-@hg(%+stD_jd!ECz#D6mBT_ zhO6kp;<5z3-&P7`ea!GmK*y_Ew+CKyd z-Gfm$AP7-hidmhN^LO8BVZeGb9Ny&xzeUT@`m#T&cL!s?WDWu&3h*p$Ki(Y6#tPpN z*t(}1wwL(hoU1?9?leV%hYYl5s*rol99>K#xY^qRGp7U~kXE5nRT;KLork>QB(!6< z;lK59xblQ!>bk)gd)^eA1TB0_)WCr$vxGMz^)Rte52rND&?p*%hw7)W<)R8}%JpDb zHM7`YpZ;vlvRkNKmyXo@zA&s)DDTe*GmA~o?e+{IcAyyT$Mvw~jy*1puRyxSV0Ojv z2aAvNBzwhVQfW(|tHB+q?b{%hJ~t1c-%at#MIQ<;5n6Z57J7LYV{`zbT7eZholkJ0Bw8`qI-P#7EH zFc<^-n8R3^f0m84!Zn{|81U;6`}FWVJvn5~MHghe%~{Sj+?Vs&ex{r+HsYSs4EU~L zC&_KtF4jfKd*7=pkYt}ONH^HSys~FQg#|lUUTiYrPFmt+Rf^Dh%K|yo$(W>^ZBzelqtsFA+;FIfx0UE|r&ES} z$R9CZ(S_ zP{Pw*%ebGv5fA^R&mYa$Npt%YvcRGwlvTRozJ>_rMkEPRA9MJRafkV~Y&@GYjZJhM zM{n|;(8eMS-s7Db|Cgl8w@2J3@y5>7*7q4Iul0q-O%teV7{lVibYVxf5gPAEu&*9C zd8-mZTeH|sr(n8qx`ck*C?>tC?bPhkgC5PA#RgX7V%IoVxIK~K^BFopStGgJ>DJhb9-WLys}TJ62CKan0cZNJC&x%t_MHIfk|s{ZB&1_?lwh-qbgX@ zQ4hU0rwHq`rRbb!fxcRvSUH%(qt+i^dQXI7V;0Jr^T7*OW5un-h^h%iw;utxQsanj zyYx{xSqGV`mHd8QAHQq$!5()<7v+4HSx><;-Pt%FJqzZ0$79geU_|=_z`eCIbnkgX zsIo;wyFNZN8shG*L?Nf$0A9WwQMF%;VSVgz=^-%0WHxdJ=V0pY2`Jg$4;oIL@#?J{ zJJS?Ud-qES?br7W0G%u^!$usi9-NK1OJnL1&o{-rrmayVeraTv&&ODYH>B zX&~kdHN$L~1U+o^@hnt|CM^kSe=ZP)l*mz)st?EcQv94@f~Kfwl;!V4hkg$rRJ3A2 z@-Azrw&Rg8wYIw;c7!*TrpY}v<>H|GNJ?jo>ff`O)aQWB4^#chT`9TIrQx1%6h|v$>(%2s?i*; zgVx}-lA{Mq$)p3%KdS1CK{Af2#kC%?yc$;@vk9f%yue4XY%DzQhVrZqI@r3>RH znW4*Ed)#T;gr0H5?2mam?YP!R-Tj}_j6r)TPyCW}bkEV=VFH!<`Z0@-^O5$y|J=AC zLdUj+g6T{hWc8FHYxFFfTfkVRi4(2;w2j`nUZUQ1Cn%t_nLeD!qNuv(>?hsBOfzph z{A!22D+EAnZ>g?!3z-A`+;IFR2#4|dfg z1K9%|p(E16{_)z_mJ%=2xaokGh;ew0Ee`)l#g1qlHfenUn_ej?@|Up$ zMJ;B`4Nzu%DH3N$@b;esYvPiGca>rceW-=zIx%#2bjAF)X{dSk4f5f(OrB}VI=^;i z)=U22dVD%w4t7PKz6N-hB!bp5rCySk3Tm^&_%ulhMTHbCi|w$vt|uJttc4=57(cvk zV8h1CC|H;St-fApTxA5w9&PNg6Jtod0bVbjFSPg@LT#TE@A8ar%EHBO5s`df34J z&oG3!t%OhHGHh5i5=ngn(PGjQ_fpJ|u)qigt`g*L)56`^BJ`ddB3LDB;MWgz-1{U& z@KGJ8Pd7&BAuF7-=>)lpA&Nt_(C<_SjF_Q{;W1LI+x=JgHSmM5Q%!>Egal#Q6)hAW zSHbhG2DoxgdA++4ERblG(5Fy;O?l(hB}0@=<2SlrJ)gjW=%xVJX?4_ zF~$5%=GfMtiO}k4!m$Ji-Xy3YtX&7|a_up$?IMPTbYbgaeOYGFAm$QN$ZUrDv-Yot zuxD3KI26m!%S#t>LseXH7$TTl(88|%N*;P*j7c^VQ9DeB8H>`{qJHbyw47mVj_+lb zAH9l2H{ZrYiou9S`j}p)2l-}0g#B3}s3eNed_bAyCMq?hn2n-d?UgDUTQwtMxFULKkal4dHB-C=8k^gW51%bja0$?k0c4Ny~9FTFz<> zdo$O~VzyxP7}j=LgJoDNIdY-}lH+u+rdkX$K?cJy%Y`AnhN$A2=y*el;gelaEe}D! zx?@OqYsg}|>9K?1=FDe(9e(JJLQt%-_S+J~Jk&>2x)x4Y#0fj5>S1QN3S{;QRA`Ek zJI@)JHxgie=QIq?)ni_LCA=>!M@zLc%2w#3%WnfTp3}$7%lhyRUMB2tk|2YNFfjd} zU{b1uPhYKI9c6{f6S{#^Lh(q$AD)qR(C;flft@DgOsv$D9vTK}qv&#skX)jLdoJq0 zoQ`O7l0nzR2rfAyTzO>gFlT)>EuGS1L>+EnP(Frfx91yYI0OhKBsJf&LYfmF|>!Od-v_Npr z)W-!u6CpR0HI3CoYJ&;p^tZ->=q~WGv%)tu2Y8HC;Lj~BbpP;8$=e3F-Aj&f=32P# z5-#l0*N4_U5$b9*5qVk_5et-AsN5a-kE~#|Re|DPRAF{j z95k)ED|#;cyK}XK`CeMa zoZr~9{ZFFN?SM1JW*cCM{WxK~w+Z~3W?{aI9UEMKni+Hw)14rqAzNOu)utDjWJ?>< zD~h23I+kQLzJx839fkKKPu%IOfSoi+Sa`i3Zs%v9)5$-uopFF2zOjn(g&Okxxtcaz zI7{CSZ=iY8&QjfgIEuQ~kAdxn-m^IT7iNdQ;cJDkD?wQ3sLOT?xP$JADJ-&NIqlne zpQe2KK}|c2`0Yztd{%y=lAA}9)1cAJetk8%3|t5Q)qQb$>Kb9$jNS+s+L5i>zYnRk z-YjOC7P-CLL&q(i(z_idJh%5>>KUFv!~6eW_lvZd<%7d0FIs~PHCz1iULqVH;0Ahd z0T*j(@ig%To*i>!BNzQ*Rkse2$7%)7oc5MxK51nNCBL5T8^$V@j$? z5Fa-d(#g|U_mJDnSZqlH_710$YbQ|WnOQXW-zn;rpH4%Q6Uq3&4tk%~i|WJ5*~)Iu z@UXZ$eid&Nj_P}&o2nkmRH)Er{aSiBNt3?{)Z`xf^QiQ$IwjTy(YbFgXvfyCRF-_1 zSe+J~UUV8m?stKWBuD6(?*yBSCN#fOp|Hpo^re>p&s*}CMn8~{@!MtWm|7Lv;gm$h z)=y|+@lAR?$cRkVH=*E-JNmb-6TA&NA-Cxq7W7l4RM!Kf*RH|qlg^Qz{XJIJshLe@ zyO{3^ds5M?q_ZEFQ-&ZVs|79SJFyprnkEU~PI_W}K@R4K3t69MvGl$?k^05-p*&{; z5=y+No*U4a8UE0QcMvHo_7uwaB6I!w=m#ylNXQl(GF8WrU0 zETbQt>X>3w7Ya|5Q%z|tyD^g>sKuhj_GM*X1olc zCJb@U5;11s284XN1h=RztmC{*?Ch5QY@g8ugm+(snUbMOJ{pMZ^Hw+&D8VfIFk$(A z3#c;(cu(vL&G0CUZ(o8vZ;DZBb_Ibg`Yf(Wi!JlHj9#rTarn`8RCZyw8)%4&7gR99 zdz#?V!vJsYc%#W}FS1@g$N1|`?COD4?Dek;%+>S+qoX_7TG@~h+{jcZXf-eiy51M`7gM<&)$J2S*59JVEl(-BshK8r%AyN5mNC;M zL(p^RMnSQ1Aztp^$BukIOp+Ixygo+3bq`qc`%5%3W~{_iWwN!Q|UpJ?+=>ECEt zqO!kkxw4#-e(-*k39Y39Y+BX%Q_pBD$NA<>A-P`aGIZ}e_f7H$84-HDwBrW|Rrhq>?Si-eq7`iS5`;%N?-Z4?Q9O{VB zk`Rnvm5Qe?FJt?hRQBDqlFb?>rDZj~)WJeT#argEIc56n_`S0TSDyP3?uQ^VF~G;OTzhvORY41&=IMdO zN`6`qJx&;M#}?dv1(v$LfN8ygiH3!;pCwya-R13UQVwHxJH)b&JAK%qbP?k@MXBq(nHKpA85+aXl)Ys`4&D^%1=I(w- zziwQmJGXX_@4;WpF-MgdnM6YWl@uPz`CakJ8Y~Pf%~j<~Hh(9T(mS-YHj7LL&t|*))?>vfZ&;{g2%(;XvBFP}Z9U*lWm<13a!4zs zj@IX|`ii*wMhTyuX2`o+Yx83vKPgk^I<0$RLQmFwMv=_`NZa=aTO(5uSGkt?+)1HK z=TG$MoDSDIW5!PuTJVY7hQAB8l8eTJSFJwK8G89 zG-%e3D!Qc9%CRzC9x=&`PpL8I5j(B9t&tHA^cV9#Bh>lZgunDD@fMBrt7ZKct;5n| z)xwttFVOF7FZ!xn|8jzi`<*u8aT$8N&C`G%4>0CKL^{0rj2iDZSHz2~G`UJvJ=NSk z&76NvLHNE$Ld2>M7`0VQDc@RYRGkZmng` z$DXt1f1(Wd<)VDD3@&1=vB9v5$P)5z1uC_g%WmiHrcd)txWOTNt~u0=&q#ORdtX`b zcvmYPyUvPx7n$)6ZTft}!%RY67P~k;1QA+U!rYPGNPnSVI!zO4M^!rwQl8uO_hvjO zOu@@_Y`9^LDgW}H0pHO}n@cu!}-Kdlt{e%7gA`bk7#v&hbQaUNTf$ z-Xpx_2Me-lri(h}{C9#WpSjMK&wFjegLZ54xt^yeRdkVUNvOxQkOAOQClqC*34*ut z|Gz(H;`bEn)%k~FbrBpwCJhgQ^#4^)9qtb2_QrC>ZJ+UR$ zcFqvaFS5hE^tm|lAQzp#t;NMpZ&4Q<%*^NPVfnf{Sn=Kh_GjEcHpRXj@hNlhysIDX z7ka|zx)=_-qJ&RXRyaE$1HOy3m~iBq2>l?(?EPz?72?Qnc_fQ%s$yII>qQ}@W)$V`NRj8(P;Ffz zrFHK>UiM}z^!N$P-Ry%MCvC7lqEL7`)dlKmxiI`Nl6AE4pe+vPNiy^$9kaVmhbPvO z%7WKqv9*ybmL8+`E466T)gf%--*`+;u|?a7bm47g7pQxwvOw26ETZ=X+R^O~c`Q)j zx9+slE|tIJ_g<5?7XG1X<9GDa_b^Src!*s~F2}XC9#9ES5@b7uVq5q`HhuIG@_Met z;}_`gsmsK?a+o3iRAk5(R!R7g9oqax;y?QKPnA#YKaXzBwPv!9LlE<5pRlH177Qc4 zvze#AQs-$h{$|4eo*9_);Y-cAcDfbcyU>(3sT=X<_q4ciUtO;G?Jfmg+{?OknSq~H zw}n-W50QR7kW?NT@$myJ`9Ec57`n}dcc?VtJwJ%JLpNhSCeMWboMga{o!95*E1%PV zdG}dkX)xsRAHi8bak zpNRS4PIu_-z_V;?VJb%Ar!Y{i!8Q(eq7m1$`N(CqJZ7siXA_M1x^NTT`;#eu8f471 zHyCoy{U*GylZbyFeT6K%_p=@oS7UO{KH+IZDYT<>=(}x49`e_MKiXo;_YSn;igG)? z+{%sb%(UjxK~kRB!H{p&H{gwZD(UuxjjUZV8SHSL&{uB~zFyeNB>OMW1xw}rQyjSe zVtXE(=g6zRI&+_&mb~bhhzAxJ@Ef5bKI7ba8km>B29ymz_n=LJLzx%0=V-9XFDL8%AH(GxNaY(xakAIFmpi!ECR?c_Y3M!A z_i0dxqN%8qh9*jhq>QqOhW6Cbl(dJoG=8ZxwD%J2l;8dTKKFg!?{mKAd=57YeNXM5 znbJqa3)IB-#x_ZgaH)?cw%H^=^j`uS7O;jw`pdcb8At9q*Pb5>bmjMCDxR4oP*=e|O#RDmzp7`Pdopmm_hh;T~eYW--n4t7*DgHzD0Dd10&-Upn2C z+a3HzgKwwM>92EHkj)M}eA^WjH|_D_(PH&^E4+I+1|#mIV^#POY(JC23hM`w&x8F$ zw|3KfWd=P7wxR1`fvn)d4xIfx3eFN=XbiQ)IK>j-&wn}9fTcY+aY6w2|nzdDeRqQg?+b2;+N$M6mBYm zng0VAOweYl$C|OzEgjhBPKoTmlFqEM(`yVky&Sa$i!dnD14(5vOlw&z&?;-VUl@#m z9Uo$Lh?He_C}+nX52B7^6Y0v3Qo0g&jvlm>(3=h#0|0%HqVdE3>t-w z+zZQ`ON1FEE|_?#9R4O7*_O+DshhQghpsc?ce@yK{ZSG=aI}cWz0>EmuKL{b#R(cb za03fU$%pw(cePKh5GG9+g?ZcjSX@#Bh3ks>?MJ4($j6LltXJ?2pJhC}ql(8Z67z;Y z6Q1$JhnTCMWheebs(V42f*{V^=Pv(Tl@MKFHIFH;%C31C1PbP`w+!8D_}`&k=L4Vkyt?YNqvV9LRjv zbtEKrMpEfA;rMqi+*p&3vg9}H`J_j*Ym1Pgw4j+EO?*W#-# zE~XiU{>)G%AXX;F{oZjx+mH6hd)FIRbJ{THQ~I>yA7;(kZ?zBNU4L#E)gP=z+>eUVd|4b66%EG(>&UHVo?KcY0be2JJx zIewy+KZ!K?#|oBv`U#vDje(nq8+tsnhR@ms!k1H)NYf0(IM)<7IL(5+&JkSg;K!W8 znwUeS8+qEiVw+Ok*o2re+>Z)@c~%&v{&L60!&2DzrV0HHs~}35fjRXDkr*F=bMHnW zyd?uIt=FNL^Aqs6j$wC}!LwH|PTlB-m2Q#vGr$G24ULiiJwzzKu7|1j+T)AHcr?}b z$7$OFjQsluVU-m1e>tkxSVI^!RcI+y z;NFF{NJ(Ca9-9K#n4La!`Rp?q7NfzPCh2jzNJB1N^oAUVB~T;9G0)t5%no!$-Xmv7 zXQv76f~=5j8H!b#R4i|sJ5}}gNLSUYYroQjcito8c^~B5zCg*3HR$q%vmO-#J`6vosC4E@jiKB!y7JQYd zf^U0f#^q0Bd{eqLU;k9er$w`-N{qz)XyU9S=}DfV`mBWGc0l0 zbrSylw~i&~?k4*XG5@8l;x?x&cus&lUmjt>twt$0kN8WC1A?j704b}f^TZL6EgoB^ z3VBuzIIz$a&J!B(xyYDKzdTDvYs`4lKNYWeZo~JtDtOkeHr(-7I;HiCX2XY0$All| zc)CdaT!T2_xUO11G8Y6MUW`Z1ZP*;WORVr@5rxHkrG)MpJfiy}T9rGVrbN$U#}948 z-D5p)_ni&ePZ4AAv}uC)y$m-;IibIO1QJw>v3mDOG^|KuucvhaYY0Sh`eD&FDWba>;6dIZL6m2OxsMov43^=ZRUdfN4Ti?ZIaraf4%)Z( zjSGN%^|ociKfI|0!%6sQ#G1 z=yQ7tT=T7%%?>H4tPfKAhFdiE(pze;`c0DOHFUu}kX*ZYvD<&A!=cy$vUqbu8;llw zJft`w?~FqopJU~yZ%os&n&y{_IJ1@W?q|&T5_OLq(P+URWj~?} zERmI$D2TGHu;6ACCgv?-=OyK|X|yTdazM!!6ZNjVH7Eq&6 zBunoVhQmrb)cUCV+e=3rG)seP$P=bypDDYeHJ`gz#%H{f^Vg*c{-;67_YaqIi8_0O z%1%(z=-n(~N-PFHb-?F8>B5nBPPip5#DpHt*odt!Y3M>Z*HoBtxEu4Kmg?LzFy+bq zru@3KK0lYRi9Fr5Fq69}$YUM|{~056Ibe%{y_4bhX9IH#*+7Sib@;3KBHn(pF%Nug z#&f?(c;AN-e#EemgoF^9yHk%9g#+2`>|i)(v5-629@X;)!uP0>seU_=mg8koL>uzj zai%=eP~AzU$oce45ua;UMjdk3GtH|DFrmR7W_p%*pphyRZL~n_P*+5CEWt9PX>8V{ z29Wfd}(FdShmZj7_kN(m@?b~$|gB}J<1Ti2CHzT z-U5%8cYtkGD9*786pd%BeM~JYEc?Xb_Mc$AtU9uQ^Jj42LjZDvRH!plK&&By#kCni zbDjZ|M~$#w;((u(96rNGz|iX;k_(L3-@eZ5A~RxQ>2vrzT88*R{h)uuT&;aW+^83! zqfWALCP<9fIun!^yMb*dEHM~>xPh~9EVcv#d#!<3nTu&&({L?s44Q`!PAs*9<~|Xs zM;c*5X0DKFCDm;wz!idx!2zfXhX1^9ga(5n@w@1O@zhD&I>V;r&H@FV#iZ^LW z9E%gf*VY*2BVvW-i>*RgmIPH5IyiaP2$}DE(Q))N#3wGpKN73YgF&|L9WYU^h@1{F4wamc`y^VO8djm zPThBQ$}nDOf_W+0Xd}`_WyCmP$ZjJzbu+>c!3b}U+e2St7Gm0LMevQsn8ccq@c0U3 z;RleZH4M!kEKqsg1UoehVKYht-$qOke5RP7#}5l!uaQDQeGxPMH0-{Iu+Br)v6#rC zEN)ieSd9@L;S5c0NzX z>yv9(+iyj5YT^q@_N=AQL5HaH+h6Lo_yNT{3n5)qBuh14ifxM>Fl3Vw1&9)ihe~nf zy*aL)ScAU9x3IIy9NM<`9(`_nP8Y`Qq2ZaYX>hx0^7jU5?FnG{=J7aHq=I0mkG8KP z1aZ6wBd#jZd^iP;b;M3Ta-rBBo9NEkEA%_}7;U&xPluZFY1rKtEbYK^B>(4$ls!sV z+36!yFGDDrW{pUh68U4i@ul)QJdUL?p$4QYEv11sO3Bvx06iPyOb!ZfRxvdPhrcP2 zbkGPHLqsSPmI?N@3b-s2VOF^Xp7cqE$7=&-9=(mOQgQLfO!1tOXYwB*x`d)Wso1=eX z`t)=(wmM?ydrK%AO<)~rj7xtKg#C9F=$tPXTx2#c zz{O`pc(ZgNUXAIBejAif95+RWUt$<`HO9{91mQs^1%@p#!rvxyd{cAz0*hYwzI!ab zr_Dl3`6xW~o{FGpBQdDNA0MRF7};!!c3yI<>?cP2=mo-W2N~j21~7J(<8UWi+`8EV z8r|YB<=kS-@)?L-R|By0I77xJ7bGvWz>I;W=oV>)$WmkUSvOgjq1Oh!uWYd}*a*iv zy%V}uc%ZJjRT#((Q5t7}1-m71y=;aJ585F2=?}r(MucyJv|xT*3>v#gsPg(NY*D|@ z%iG3~95I9bVht=?C&i~mbJY7;WBwa4tOu&lI^6(%*7|t0$P|guM)+-Oh;Lepgun_z zoNf6jRK!S7|Hud>eGL#-D93Sc58NB;g|_0hI5b;?hGYX&sn6Ly(8N0RJxz?!KwSGI zA;e!B!H=}?;SB3r1wui&*(VhaGx6oc;9Z%u>GILPW-FToAM{WYinM z=9VrZ?wUb$r?2p&Rv)XRQdF1QBHY&t|LkvInxQWX8!E8Sl0nR1y{8kUHE6p(E`V6?vbBR{l7a2ag~;dHRDt^9|O*xeeu zrxD(_(?Gpit6@(%qB^4&cx()Qd={|7!3n!N>8bm&IrN+K@Z!Nwq4=XQ4lf)d?7pjq z9)=b;TyLV@5A_^o*kDP33$C70V&1*Zh`sHBJ8?>!pQ!`mH&Q%WXNHlR{|H+;iC|{s zAzZsC!Qee2)JC?!{XXXKZYzh|E-}`*yCCPO6GZ2{aBiFmw{z4P*H_KC-4xjI<(ClE zqebxg<0BMqGC^&P1kZ z#r7sxEo&0eyN(i`D70}pOBaRd#?VZbVt=(1C)1S}BXL8HkqUp~EK&Z{9F|e0*d%Hd zM&8s$eJ3-N-P1yN@i4*RhBhXScf+jZeKD~)AI_e85w^Y)D=}TcEd1B7;_8EJVn!_c zJXM3mr(J`n+i*1HX`r{M1_rK~DU7W)z@Xh$Fsqx36EDnJX#6-fu{DqB_~)^S)0eTY zR{3nq_|0tgkHu_OwGA74a1`X~JNoa94yK)(DR@3M#l`^>a9`7o1uZ(xe%v)7$A>yF+bRW((zQk#D;z^+0KHij=@*g8&A5lL(jp>gqJrw(QkP!KHSz|D)S09 z^4C(Tl$<41>QZW}I!n^8g;ZR4hISvGPO=FCn|XCF&b|nPcA7OF>Xiu>t;S-qn+Y?V z^B6aGFJ=j8OK8H_2eka&H{wjeN8Hon)>U_@c+5yTmOGNUl^n*JdqwzhoNyp;wa|TB zC~mLPVV90nAU=@T>{QR>0aqLyNpjShVAiKg*XHk#K3$ z`KF$2d-|9Sq3vw?TYK7jv4T!Vr<2|^Gir=(KuQn7l%DBA!wYXjmE=IWa0@GDGpT%k z63tKbqZlU>8oI7C-QFjrKJCp(Hr<`<-JPkh@G^_7Sci=ty5ReTbm7WfSM0iz4D%a0 ztgE?%l<`*dcD^H(XI*1Q{Cp_tyg7Zcxxqq`wJGk-DVFA}%LdRa^m-eB&b3Q~qFh%L zADN9e`GXMRpw9}Y6tR>4C9-Vq^{ma#ZuIN#SGHZ(iLHI6#h$%9hWNRI;HTXleP7xm z>wT8s?yqL?R1eI4+7)s4!qN9j9>x#7jbjb&toxr5cFwFX8+!i=R<48&*)F#cc9!fyo>3=Uo}5 zl`_oUD#CqBLv)%SEd&J_W0j&So;+9y*~BZTer?ObbC?kXHxt;aeG@1SV z=fYg(R^!;pc%%fHtGl})q-(PTw|i3T3+#j5W>@h+)|Is+m9vUt#x#9t8yaj{$;PT& zs5r)#u5`P}qV`#^37xZX;;a?IZc9~XtKCj3@Avm24xbamY_vNPI8I|J8} zdB<>?)NdcXKXsI9w=bvVm+r9LZJr?G<6!kORakjpso*rE9rlbmj#1@{S;s$#^nTPy z`d5FK(p(zIr|u1v9DGf;gYQ%P#Uo_u*OOk_h*{d4aI`z`h<34A!s|T@M;ClWLtz0^ zd@G?n^-qZPZz3(HHhjY!DYxsc!@K%5)7SFbG~;F%ZLS*5_ByV{kbmv5yKa^6`b)f8 z$1?WUeGX%*$B@Y93N;VZ=DL5xJaeiIfA&|xo0k2f7{5(4+rpZ>ihnZiLJ6C3V;n3j zRte87=A-Av@vKjl3E4S2Q^Mwf6qa;}?oBY}lP#p&B*BmyjyOnN{l-w>!lm?VNCr){ zSji?oU5JFha^c?gY{YJPz`XbDqBYu=>0$Z<8d7qDqUU@eE^>aEk zs)#xb-b%wy>+|V1Ex2{eU&^W)NGBUZ*zwSzFsfWFJUkMJuA%O1!03L&-oBzq3Fh2= zshk&h+@p8H4$-{c2k2NweSS>sr71(}$vKyiiRL6WS}_k9y7|JU34QTyZyj!3cOnnk zMXz1{P{Su(zKH#%co#jsDBu^#D*uqTpEkb|b(6lH_ap6}ZJ59QF!&p$3j1w`;hy<= zNM@9<_uE%cR=*!Kt?dmOGWZ?6vy}2{Wm;U-bb=Nxsi(|q8|l8!CN{`x1*-anA+8`( zC<+Qk=Mg8cGOG%b9haGO!+LTo$)cGBx2eY@J>D4pmU8XKQPz1=vPsy+TCSeL3)^sL z$T~poy+|;$^~52oi9l)&+NR&auewDnBC>)l-Yq4IeFFW-7n5d(c`VLN%ywj7g=cCO z&b|)8g?0`obxjoJU6-Qkz(A~VOh=mkE~GE2#qYb%5cpTghE3|q-1~N8w?><@7{{Lw zmtDcBJ@X)1>49&(r0D-AS{T0D0k+jC`2GDQ-mKJS>uwBY)5_MdR};6egs%eYS{Tiq zX!^2I4uDx_jE<&@gu+^LEI+>(o3u^YgR9AGS>Y{~?jA(PD_y91 zhAzD{SWFX>rV$(WA4?t9hzZh-*!!eArie{3wqKg?`Kb!sbF$%nxfhGqRMNlRTWF-; zbz=XH(47JMDfvPJRlKUDs!Lnw`puuLpjeA3??mI%BP+~_$`w=z?Xke@IR5t8&VJOE z($!tps9V7|Iy>YiRfTACNwgME&G}9bj^CjxJqyU~N<0hlU5-;_gioPG!qW!=8kZWg z*})#v$?F|uIDMv$T}*h3xroQ!G3ECTnDI?>^!W|{A5=U2CMm92&_~;5Ow130{grJ( zZA&Kd=jJiZm#MUCcq5%NH{|Djs`wVcnvY1a=gr18+-aH_U#Hel%7Ui#D%X*g^@4 zTco^G%|7nQ#dD+6g6R1@e0l9l!Sl8F$d^(+s7TJ6bd31AyCU8{Pr(z540vva7Vmk% zgnt>M%TspLP}1j<%znxur9>~8xJFV>5x%Jwrw_p#ztPOkj+1T*fIA?ET^p4{f4 z2_F${zzx1A`OEi){MY%7)M}K&Vh%*YZ{<3{_EtPr8D3z`$>-?iAv@mXqbGMWwdPZP zSaBak7ryYBB~NTo@dIvld}M)$Z?0TTI(yf%tr_94G0zs7kr7)##yjgtR9XV15Emh-VmYVSODl0J92!d_gu zfvw35zjD1Xd&)xLNTmZlMTa6yo!e_q{YH3jJ$pKLH6`7H@`&CvZDvt z*VBy{`8gB4(n4`|wKa@4#|W<)9Z*!i0B+MuVX$)<&gZ^?zhV&kRkV|p#cg2|UzW0b zaR7^T{enppgZ;|_@Vl%dbj1otTvCK*8g?kx$%mrAgt>cvM9Wtb)-BzbCHK9L)&CB| zvb-}pCUIrCYc!c@K>@nzr(txgB_3u=U|hUVa9nJOVzqZQ?r~?ICWW%|5010^_-@qH z-jW^_I8x`eSv392e2U4^AhUB8?5624nAi!};$($L(`=#RZ5Ql$Rg8f1L)pB_b~LBx zA}x9JhAP#&)>M9;?u~v&rNQ;&bhe6~x7Vl3zxuPuEwLD&aKxmpc|ujsPFUlo%~GD% zF=g&W3hmlL@!#5T?=7u#XSD{D$!5G+BIVa@&G@Pr23(=lN>`6*@$k?%>ej`MJ=`CR z%7+^Szg@X_Q}>&Q%e=k+AZT#He2$g zT_wDrr#U|vE#du>>ZqpZ3>(^eKK55>;dL7=)>+?$&IB5Ad65I}Im(sGC!6#CCMx-v zRuzArXwJnm%sAspE|-gV+?K14!H1qf#BlqhGPq~ z+0VNp=-D>4Pu;fW&S!1-vYQV4lBpYSyllyvz8P`R7eju0!&~ZpNJ8CWo}g)OHw?X- zDsW|6NarQO!+0)x(^N|1%4EEkk0T$j%$_$zx$>+|D(>+~%1cjb@a>P(43^`;9=nf# z{~u>u*q$m_xY*!_?pU<*tiiKuOW6*+QZjsQ%JXVeyl;{fe_AHtVF&(EkMu?4p&!rs z6qLd6S`WCDIwSe;JYhp88Q3xuR7H|)3Uhv{!x@xkC1 z8?opjwGnG@n;>m&x8IQ0Uex8zvR2xD{~nE7C#4=`FVq_G$i;&wa9<5X z$2uV}e7W$@EDRf?zfyj-lyc7b*22Yn&tmlnDbTJ zj?ju}zU<1LQ0&=yR_OR(9~$qdXi>SG^9UziI?9sAjIrV6CTjM4r_GO_Q}C5DZFu+h za&BpD!4GwRMim1WF*A(`nDG3lP&aEWF6g+?V|Od=cgcaj@>6m}kPYws%Zk6VmhsFx zHhi3kIgf9Z@MbRK`%6Ak{kkHiA3hyN*6a}cZ>_?^O`23L7V)vyt$1jjoY$#;_h6u! zKYXqE>G_uYtk{%yKV-~BXT?0A^bF0qAIvVhj)%47dVvjJjPB-Z*{Irw6u7{Wm%lRS zi^nUtonXb+UbW{5!<78JkA(L%H071Yjd`@|a%%f(09&*!5LYIz6u$o*gro0FS*dRt zm6cd<-@a}6JbU%~dpq#wdhYy4swKCr7V{lhQtsv0Op!P3iMQT>k6kx}OA3VQkzH}v zOr2k4uh_kshqQEuisuY=MIW?c24_wK%hdzP9 zuV_dI*hACa3S} zbPU+wfY-QjXgzX`M&S1Q&bU%2fs>g50=26nrVrr?-GgOZe9Pvf zx2NMrXOgyJ2?cEu(Y}m8_BUcbwvOwKVr>g}e3GD`Q<4y7FURB|ZDF;34R+lf$gYQU zCDrT~)NP{%zhD#ng;r7WXZRDmh9Q4;1ypEa#YJzexaL|pw|=1FFH73;Ex%RVL}I~vH>vpR_6mN;Z7ikf zoP_9`4#v_=!&6I;vdJC$J}nW(2RNd~x=5U{S;yX0?;_Sm z%p1H^{7P3PuU%}rM4?C=e< zH;U-R+q1OkrMkapTXLs9wmjb3oabL_!<~MmQ{R&lm}u7+^jKu6)|xRs7^esq)Es=y z(gj^^q{6{gn|<}Y#9ogtqN0{&+MB1ryAElf>OSLX)Atyp$s5*|J#}b&gUpM;PZ3dN%wj=A{k*q4>61tCx!qq8V@MMW8 zLPyF_@iJQI(_jEhK(oJssv9nF*gYRJiwA&ZL}FpgK_rfC%cghnXZtNo*lnLGOq>;h z>wd#fyE_2Cl;)WJP6d-evxEtq#E6LRf-Y?`u=hWP2>XHX(w&Bp_twLA`)>6AoefKQ z6!xt5#*n1`D8DrfT5qiIpiqJLCR2qj9}V&Hiy7EP6(pZ~BXU$Oq8bn3=cY?A-g^ei zE^WgYArb!WJ)pC!E9zy&7`9aev4;p-*lZ!JOo7Xp3gjMeMpDXT6z#YG{V`fh|3wG( z;owZR{2F7%?Qdh}!c52-eULd{0eejgOfFVn{D}F&;lE<+$T!57)h?LexgX{J1DVkF z7n|?UiB^|Srs}ZyWM-m8YbFM;vTZ9N`fi3u9|^3@%&~e{v@rUo6|OhiBjU|$6s@#m zGmOp1b<6=OtGPvnZg1(;mS1!%^cqe48$b(X?b)M_G1yjN3Agc9$jONjZY6o*(mY=* zZ+Hfuf6Xi>{~Q@^R`aI0dWYXDcubKs-<4s(FQh%7B|645>)ly2Y`4R0H!FDEi4(f* zcZJ^4k*EkwVuKv^&{;=Qe&URh|5#XorzQs+{+NX>Q=hR>_g|9IOwRkCmhf7cF+aXq#5E2{_??{+-grxw z5AU&wnnSlRaY!;gk8s4VpXtJx**4gHJ_YS;H?rOS8|c1Bho?zJd}4$#-#pHYUz#Q1 zzS$Da?|-59oZu{1AoyHg3|gaNsjHl5Nctsu-DJQ|K9z9U zAQ_)5$ock3BAz>EJ3aAS&%RpCL-1-F80-?kXH%Sz<}F9wE?3N4vI1<$EM{XFL5o&B zCD%$_-ea+jni-6^JnRvTyxD<5ET7}0l^5>3utT5sGOWvu5mr0NaamBnBHI^ZM;*fS zZUwArP6#Cq-cIvJZKknWTd1nJ3(b5slN~&fk7o_G_|!&@6kkJ(u89#^)%%zJ)C|@0 zJ>jPe#jHgqV7w4aJmnfQ_i1FK+n-^761~|r_jBkKLm1InigO1nkf!z@Tk$+0CQE{O zR$>^L+Q9fep-oB{9833OWt|cG)W(f9tuSIELS7;vHXpLu{@6HLiqAF1=%P|!ug3&o zqP-zj92KJ~urrj3-pE)O9N}K2kM|8y zRDMnnj;}I>5tpH5O*_n+?2n@pN8#I}>4?6v5X-8kBkgTJtd8pq{i1Ft+|(I2H#*?o zS8bTfq!?Q{Ul{JGi!RIca9(Br%{fZs&ImyA2(@3F%7#w)IBc8Q3t2&Z(J9dhqmCIN zWsev;517K0>Ei6&rNX;vF@g(>@lekI_qy8PYlAmhUZ-M2&}Nh^%*I2LXcQ-fBJ`mx zf}SYgbk+=7J;ZqKBZKY!SYiKUBaGRqgCQ{{*c0pk9iQnKZB~xc)erI0?K31#FJOS$ z4};4_K~*bc@a61$;Z2DJ8n!8*STO*~414zQXiw_C z{3t1B)lulYi`2MJgG+v#B7X-fYK}B#8xMtH-a!e%dYhxEDpAO_m0@$G4fMvQVOh{x zcA>I}dS81^|7@>O`+EoJr@>!BPCadT6+}hL!`XxLb20gcGsXugp<|paJd>NDV<#v4 z=~s+HSGTa<)3T{=%N=qkenwY*@1`n`S9E;BSsL}LClwv&$F5wRhqRR{SjemqaV15N zg~-ueZio86$ynph*rOFLRGGDrnzvo1EkCO$`2Jlgo|8wNtDmp|*7Y!`^g_>}j>uYQ zsb=8Cf^xMSzIUv!P^~Rf{R@Z{FJZ~UK^A>WNq%ZIO_{f!p51n$r~%&W#>#Bi-F8Ay zsT3Q|m|~h^tZ?YO5|#^8SoFdMS579W^GcUpxv+-iJ4h(9lawA`bEjq1>1@ikT3Ck; zLq`*Hn9Px)`mz*vufz#CzeErgis7)p5Nm%s!}?ejMwkD_SIf5S_%Tbi{&0IXGvFW2 z9mz$Tk6xI#))d|u#*p=pVO7#x;npk(W?VKxl$R;KMLHl@-y2n@@^P$gBLZIBgu#}} zaBM2VypoP^^q0cor7>)WnZUY(0>w|K3PXEKuz#c>1~ECRJgp&{=Z?_4A<*=SMe3tP zC^}aJO`d|t7C*##OV#|WiA_yfc-K>fCS{zUc~*v~atYr5GexY0BdpB2;!omeybhR& zeIvrruy7J?9UOtk9(^z`!X7n)^>HFpA3h5W@$L2~Vc8QiT&$PEKgSqlX>H+W7>Em2 z3$VsG8Ha9!pxxo_$ob}pW&hluRb_>Z9SjjEGJtJ6J*3^t5z-W5==*D8%s2`9B)u03 z`-+h{&IH3ul?a$-iIjnwn3`w<-$yDG_0@sDqX-|i8DL3-F24Gv2`~IC;HGbaJ1=F} zGffB6j5M&%+!Vn_)pL4igs)emSTj|Q6{SkdueLz&T3txu46x^o4p!Mm3n9h&n3`jN zN^f19^E1KsQThl`=p#GX7zvhMSWw}L$L1Dj@seX&i5&0p^zg<+59?oR;hR~!pr}(| zNqbW?7@A_y2?NaCAx4Q;cLbeD#i5Afa8oY9`X)bk_qTz&`tFDS*2K7H|AZ}rPF zO*nH$goqzD5H~0=JkJb)l0kUL&*08KefDhP2(}=$3;Ps(9U6}&BeS;@$L)16I#VB) z_ZwlI`C{R3y$N<%nj>bTF|KWN#U|G)Q1td@q7%JXUYjuHIejDhJ3f%PkG%v%Sa(RT z7-3|p36c-$;a1Nu;m>XpXr0i)P(2Acybectyb&8D&tkfb`RxAk(X2z@Mdn_f zLjAB9#Og{hNmGVh#v=UQH%mBQrk?k66O1qvVQbeV=-*d^tr1UPNykc=W`rA4Dpxc2 z?*rKQ_9vnLzArYtHAD7pGqg{Wqwn<`;Zn356MBgtb5k?mRF0^oO594dV3HhPmbu%2 z1zsJ=!gBxMU)RO3F?2;mlLWgIW>|bh0UiB9!Ss*<-Cd0_HK0W}wa*-q3&XKL_z+?f zO<8S_HY<0wWN|a=aN*e`1g&k0&o(0L`zeOZ(iCA!mk8_ci=gvP6S~9n5Pd|=G{<^k z@T5qTS|7*gm>X!X(kD49Mo2a|C2{Jzy2(K2Zu%}dk zl|Lo8Jy{RG)mij0U5sLd70mN&kPzGzja_Up*!9wp8LV!N7es?v zg_3d^bRQdHc7O!k+NvxFckY6+CXW!)9#}a;KUi!b^gRG$XtbB$)J46KhvS z2}K@y7&ughgD=c5T-ye_M2^@PCC7hqPfYu*W;Ay(Iv+Mcg1rOIj8o#Zdbjr$n&4B8 z9t^L~76LCCBR1C*c3U|n_1z5Rgw6Pv)`eXjvWDINx0bEmx}O#Oo5dPewqk#31$^%J z!>{1CLiz2V!p{xyg1RPj*0g>$Wzg*_GGM8C;WwXW=IZRh`3wxQ7 z#s*hdvDMQ;G4H4hhW-jzwx$XVu2wJ(i^0+jb}VbsX_nAWMC&Fo8v5}q^O=5*efs;G zeRrBjW1Q`1$dj#X_opgkq<6ux3JcsEohGbb=?0_tJnZZL2f02KY#b=E)K| zX!L-J)4q|-H#t9;t;a`ftt0cAk#w>(oQY~Gv1M@{qGEgD{=pO>c)LHsYFqKEX)lUL z_hhRM7?Pqx87+SCfTl08G5?7OQzaf=42GiLeujtc}ZzQ>J zm5!R~QtN{K81Lc-QR!+yKdLvfobKZIL@j#o?G^ofDCXnGJf#o$a_T!VlQq~JW$y

IG@!o@7RCEg#_dFNT+~YlO31e31WO1!g|o$`W&CQrNU4>S*mx z)2>VCQZ`;;!YhAZNzfU$}$AJByH8JQ`u5t{Af45s%xL;&RRe z;ar4B&DR#NTrS`a1>x(FMbPU~it5f65I5A2bxYS~v-(~I27ZIpg7rw759|yu#wsTx z?0P>%xM3=X%8B9i^bOcH<1ScLTUNgziy3df$lmWf%qG-qWgW*%XHOH`vyXQ!<4BuC zWLn4&F~SNL@)ru>AEbElr91k3u7Sg;ZfvhrIosf4Lb}H_sng#|M$RsDZiX+VcKMGz zcx%ZzBF{_>HZf9>ybSMcE?>HTDn5`aa7&cx->)VqMr4W67u$cO0A_$ zwDdnsp8j6ST_rlab!#(?R`dJV=qH7mvjH@vZ9VO*98HT)*$3YTZji5IzOKD?B1|`f|#g_Ojg<-uHt)#V}Ss%}zC)EDx-s>|;hHrS-xQk70OnY%J?OGnW=Ni}`AEYd(8|IT!l9p#7tlQ|6~_^mdK` ze;RGUHJe+=H);Tl8aS9a{|H6%nw7%gOZ{2 zsRcjizaiHtc=0LAt*> zGk+a`|GUd<>bMP*#Ivd0r`vQcNsssW`-b?XvGm}*gtirJWL5nS!TLxzhP3s-3ibY9 zyy^~%ej}0Run5cl)IjCFguSh=U{~c*iaz8|x1CIAMejN6wxO6k%D8~O(^FyhClGyG zEb%`@=N(t$`-gGueOBk3I;TBVijZWa=eqBYWQJ^!m61qGGK z8OX6C_Tr_AuZ-WpH|@T{XXYH^rH&>1QTZbt+x-f+Fx$an=@(o6L7V*!pNPHQ*3ezD zS~zXi5u<)oz;V$|mV7ji=Oong@(<|8@<`#cDKZ`qL&Szai zb5WknVEHXysCm{8TlX5X!e?!GiNRa04E(^$o}1F017ms`k^P-6xy5S=a0Q<&j!;bj~`wIel6fpQ1VIwSN5gOC8b*lT*jF3i`gz zgwhX-h|VZ!?RX>VY^Y1i=ZGjw$AAheFLARW)vUlc9*Iu3g@xBY;98=X4;k^1zn9xk zzbH3a884%Li^bGR>P0Ev%*emah{hdJ(~U(YRHNR?t(UK48|_2k{cyJsrn?j-ug|b; zJ!*N*B?rno;YB+$Y^Z~$4P{7tDRsUTMcY|Xz* zp_u=EJ+qy-o41jxh(ne+|ljPZKw@9kvfr2zwwu6J~E>XC#|T#$&!*@*wf(&YH}_V(~CMoIyFFx3U>A5 z)2`Yu#f&hFIF%%@SAB8k%4%#3C}UqUF7e)`Qd;%al2lc8G{(}4zPdQj=G8Jf{M(rH zCzbPJs{-bknTQvoyJKqZ0-eV?Q=r!0P}8y6T3wLkU8>CWju~+y_eiC4kp&Yzw;2*4@qJi8j}G_1P1A zt8o$Uuzx(Stvtf+wEv9tqmpq|8H^|$YdmvaEZFq5fwOuByjHA&zV=G!nZ3i3#{=1@ z&%0Tr>2~H=zL`0-@6X&%HzOi-E=G+Iu<3P2L}XdQ)qIr@X6*~d_v^8Hnjy0u^bl|2 z%~<#s5$kS$7wa;vVuQFd3rcfhD>8N1q`UdJGG_&fG%V4^(iDvcl7zA}7rYw34t!Z_ z)_>k0CRtX+&du)1e@(aIJH9*dglCz&=jk|pLZ!`_hYbsyejL4uJ43YG0N>7~3lZbo zu=naZbl)|IS)KRd@e|MTsu>#3JavtKbUDKt`n=&gkKN&U6OZw4FAaFamwxQ{n7Qy- zuY&RZG-0A?M?9|3VUZhduusKjxkv6F-XiEw$kc!Q;o4^Ywxc2a6aMglSKsozcT4#L z!%}wPYcY~iU6Jj(S&;1PgS37VS?hL-_!Tmw&ZCU!@FofMb(7J*=`tF+OiIJTjp)BO zE!;O!hbl|v@ZDlZ_A8EIaM^xg+QhZEZ`Q;{R(#=4PMXuKIZDdfVomb{tVw>ymfWAK zsNVxQ%~cxF;uXg9qQ@Qn=)`X3vvwxd{(c}F?)DNTa|ZA(Y6X1`wV~%TRTQ|Fban$xF8YHHO=Ld|n5C?HKjXL{b`ne$IDW7`Bgu6!=J);j*)bs;OC z(;ND=8G>hw7ZxwlXW`#Qa_##@RCU&d+&0^4bni%#RqoWh%!&qFHlYor#x(KHYu?X7 z#=G2o0juquapC$>VRCO5v~rt;O0R{i!?{iTl(vFW_B+uH?m${6-RQx3HTkuYQ)Y52 z>KGlyUpjlUzjsH%s)IN71}6wBM>^qf&++)|)_^H9RFF#f zU6B6cxB4gW-|G2npVvM}`w(hxyJ5d;mM|#I22PVkV@!`_NF7{`l4}XjBZgsi-dNIGYVyy8j1alr+BHNJ2o=`AJ= zF2cZ>uaT_b;n4rg*{_p+Y-n^GTj%Y?+CF}VUcXl2x9bvI4R^wg9dcy4EfV&BSEH-* z02C~`4as(Mw)osZR`h%z_b*w@-v#IK*JX7)<=94k^`RE8j%&}HO%5WuH;40Y3tYMy zCG1L9BS)_nmX|l-(YhZj+3hSJCu&7Y<8`*1?U69kHD=M1zOhl0l zrj!(KLT}~`;BR|e$0@sZc;Bg5*i$$UopblFxS{WPaNw7ve$>n8_fjQIoqm+N6?S3Ls1R&FQX}+BJcy~&)ZEWgL4N-^(^W$&a_ekMKWk(( zHC&hcw<_t;Ok2v>qoA7?EhypXQ+_#eDcgQ(0wy+Y5ke|ABXX@fe^_Wmju#zixtEHX z#kO>7tu=XMnN!aZwiJI{Ne>oFsQ#BZU3vMDKTcl5=8EEAetDyC;NCj?=%&TrwwO_R zduv+dtDpsk%;{>pnpSCiZ1Zh3t-L0o4WXv=J5o&b%WHU(#Q=8aO*qmt`KQEY8Fb3> zSYh1*?%BbLhEBC0Nt1&9-Lay?=k|1{#*$Xt71PcK5;8n&O2*69@vdWonO}N;e806x zI2Ah>FHg(ZtEtQQ>M@#q6FXDXCmWj3+L3C1xl?w3E7I*QCX+idI@{$7AF{xa54n09 zgRXW$iPH+<>^&c_ADZ)g`GSoe{E$!OYN`xzq?e5jR5iRcjmxy8qFBwk19V6&B=MU8 zjIFC1i0#{yh#I_1SWd2x%>f1#BdSpC+=C5S{)M$W z>%o6deZg86IWgyk9l)x-$aoxrS&EMMyU_}PHJL)su@)#8J_(MSs-aSkggkOMe150l z)Vs?VyrBg*HebWPUkO+}wKq(@_eR68DX974jeI=?Or2AOAzdZN8EFr_lamqoVH|EJ z=itolXSft7Va6&o8!^C;C1~A)rDZmbOpic*RZrY0RzP2AiZN$mh4hgYn7hjkrE{XN zu&fdZE8LiK>(?w}aXY@R)hxcjGntpk#oTf}XAeFc!{vTHINHw=L-;%3Fo9qH3e!mWQ+$t`1l=nz96D56`CB_K|vQsD5?El6)g(Tr}F`0x$86oroW~) zww!fB*6I{t-E1Gc4A_s|aaY*CzK?j|0yWu(DX9FNin_J4p@?<%)Nz`MBr7avZ~P~I zcl}9bo09_V78^7U%@!(S0&(%RF^e3Z#V_u(qO$SUG^j>SeS9_JjCUsA?P_vw(CBuZ znyT|O<9-k0hl;B)I;0C?;x-A3PL9C$Qz1-ke4k%<;zGYBIMDo~7Bp#|Mn8j{X!R9a zlDH`;evpDjT~N@Cy#@SQ%pXMU=z;_Os|3S&;ZWRO!Ad3n@n1h|>8PW|uRP@R<(eg# z{j;HgF;;YRyNvejHK%Rc&8cqAR&H`u&Gr?9qS$D&P_8owU(9;3kLpUkU!%X@vy|kX zBct>8<#gW{d1-w!jjHVC#o+oJ);kvg6D%JuqW zZl_1H$(h${A85P`!@sT&9*wX? z-d$%Xb3VbWTFe8BYWa*b867gWqR2&d)ZNX36xX#W%PW+O-{tw3T?LN`FW;qu#OL-@4QuJD^fgs$T!{$=S5@UnQPvD{IVjnX|TfF z3>h4eDAbFraMZUG5*){4zHS0)R-Hlk#dy}s%%6vLn#EiHbKo~NjAr4NYf*P95`VY& zq2-klwu2?;IyON-of(c-xxsw*Bpm2H6uxt3U|fd^3_aq?jv4k~{XleqVqxZ83nZjA;uLC25;H8!rp_XG*%~%BGsaWDP zDL-?ugqP23;H5L(@E`qubB`C7ctdFrum0)9{@Xtb-KDnpaZ`#9Ix~bJE^-uXXbZPa zPhtG#tB+dM6;f-N#-B8TL){h93-c-848NhWQo7SRaEvwPW|_Y$ah{9Kf1k$O?(^$ z_bNxc?!8D@f6N`9PA!L4*GASg=s7pdmXrBT302=Sp}83|#tIX)#P*XZ_T}tn_O6X9igzi25 z!2i?@;3pIGSwLNPXk}Vs&-Zx2B1w&(Qy6ySTQDDT;V~!9^7g4l#Kuafw@OZC59Rcu zwTK$biui-bE$pRYDgwS+AxWY&*5v!koeV#~$>NQkmP`WiE2g^5veFiEgIWC16+6_)?! zhSgEs5F`(U5LAs0>85PUHYYavk|7I@dklN$<+ye&5Oqmb=sr=?J5NcWGd*5dlPrd+ zjVU_y?f{(SI1||mi^6AN&-L|K)>MF1H5n*8KMf(8xn*4nLi9r?%sFL-zRx?FqnLviemL_g!P&L?^Xk0SrCYoRsJZtreW4+mYA|ggom0v zm@_C|X#GlrAM2%9KUIm^dCqA18i>fM8Ssfq#jJM2Fsf%aTu23Uxf(CJTcGCz1KhYL z#)#P>eCa(^ICETry4PZq+v(!bCu;=x_`+M9j4`jaLO7d>eqSP>lQjZ1c{Zr^6Qh@> zAskGMFDF)h$@$i5VcBvwT9j%S9_>vwbwKc?@oB*7xt3tulzASW30n46ojJ4TQ!yMX2 zvWFg*abzDsC(|4&+Q?D7#Q=-DP7y92GQj0h36%YxfoI6^PVUAivE_A-25=f2>=hzsCUhEpcU=ECP6RzY6a7 z_9hSXJj;FVwW4ueC;6i)8@}AboNf0Xg2^gVczVhqOPnn%N;Ai6T`Qa}Oo8B$&-6O2 z<(*9%x%yN+w;NT;?WZ>L&MWTnmy-u@(+{JVUdbFZPqan$4;7Ll!v)z1F{;uO$XS$w z{hM~Obp1>|zvVX1dD6(O3=i;5uU_z)NvHTQmB3H_>BauaXXDU+mT=e9vD7?LsM6H$ z!=v`7cTE8Q#aLmv8y7Yd@^Nt&_`b&#{I&I6?!i~{w3W}9vfERPtMr2VT^U6Bn*MV% zR_L)+fzc*TI2zCiM~u(H=}!`iD+ad<*u)*TZsOPC5AnhMTl1==9ht26GR$dj1F?Y| zE96R~ZeAqp-7813dGn(2>%KtFiJK;@-+jP{;@@*yaT$$tVHI>BAk!Bj&RFbjETra z&eI;aSEWLPvlJUVj4>)*39q(kg6B9ZTxgWTy0r*2PYr{7Pgvy-!PtE>(QZc~^i7we ztZy`^ZwHLh&+U$3Go6uFuEdfUa|9^FShdIq+fUCEjz()^g^v#Ay>1qch^#R-Q;D-( zEivw?9a=#j`wl9RTc&{S0WliNY|yv2E!rE~;QW0Ps1`>Hx89iJt=tp_PDc3nSBV~Z zhFHv$cye9}kG7_`x84}`B8^9n7h~-=WAxr(gjlg0KVqdo(-gs>RtJB%IXv`aSa{J4 z-kS^&Q*Q)OzBSUcHS@hB$M5+f{1nMxU#x=1S3@)(kf1rCMQ}MaLU>i9i{?CIus}lu zem929Oajk~9U;1rf=O9Nk!hO6Xuw-2$m`X zd{Z|IxziO8{q2W3m-E=GFk{l*qgYH<7v_EQ7II@|K=->CHU1LJNw!4i?b>MFW3muF z-Wm?IX7IKBBW&&Mj*JmEVBf9-yHP1H#|ML1qIUuF&+p5Yd@8}L=(aG^Q6o7Pj_B4Rc{RzUkekQO=Fauw2fjPgH!fk>MOSv_k-M+C&^Nbt&^frfmDecAD-am`s zMm<5BG-q{Ig)@mVWb{oHWV!Be8Z5=Hxl&km2tw|qBKU5Quxtuo!4e7kP!r0w_SR!3 zxTrpngcFYiwzx81-TNnRsYBFuO z6yv{GVvxB4kG?y?RF-l2TNSPZGh5z1Kgh2EVTV?fW&q)h1)a4Sl3yFAw%>~ zTdt2bF>iz|L!}5friN{b3P&$GNl%{-^ym%#X$0Rkheux_sem6h?rf!QW_ z7h#4ix%&9=#}G1+62(XC(TX}jzr+HwLQRo7S`YI^%aB)Rjx|qJc`B4NRooSxDF=k14n0s_l7_XU%fjWQ?ET9${?WLW=bldIkv^yRP3^Tj zBd&(e>oSXf7!=47Z|}j${o}BCvkR1S_Xq)ogYoi}G5h`W0XFL|W6qLPemmnKuiWs1 zYl)P!t(zg)KfTSv9LMkq*U{`?MGdy~+K9J4U2uP6rf{we0qy@`&G}+9j`3&fOO3g) z>pnj9@^ikSKuz94E1G*@CEqFj&i4A~v5>-Jcy=KZBlp`Q(K1ySy4Dvj`_~}Y{}jRl z8WHi!m5ps|VjX?UVgtKSY|R&H2lMicBlzbv8GOLoI&M6D4Ii~@0nZ${jsKnG%lDn$&*E;pM(=07 zIM#WsfPFzIU1Q4Z&bH!b%jhdXVU-FBc#dP)1BQ7f!^K{V?HX^^AZJQj&|BQLc z&G>b`=7y9jUH`*?hdx+bT_6md)gHU%*P~{(9Khx*0N_l`phtVHsXrgBVbsRaJRKP4irqmooOTS z(N2p6|K7lAC$D3=s?F@;%r1QGv-iw=fhTj`>BK_4$`I9^Bf6|L+*2*Fb?0p1?lMh3 zGO)nLHyyxB$Km0Z927X+$GbES=CgDIi@nEK#-#@6EiHz2_youteNb#}hddhzHjf)G z9O$?S!!o3rcyV+rsFs0_S%h?20c;L$pQEF z$(fQML*WN-~fwlFy*~cwfNABWo*<17hY%InJ10A z&Kl-evtF-L(D>66dSMoj1uYPkpRhrGP!gi;JlNHqdi-}@1}{8R%!d?g=Ka2n;af}( z^8JYw{G#W2o@9569iDI>5q@6}Q-bkGDh|<123Y^N-zSY|n(zXmi>QRaxNz8Pe86W<`nZKIVpO5-?mOqU&rT-?I)6XNuG%w&VpFBH^*GHyv?NO=RcxyI0 zyeu7khZYHb8&+UL;sf@#WG{bz_#A)p^&X#c?lP}U_{6`q(J-{%M}D~LHU4b+Q=U8W zEy~%>Y9PP=zOqZI4Z@i7sOMb%D zklMN!(RKZAJhS~G?r!bDqPh>qbjK{=v)N$0GaS#X&#mOs-6X^%HWZYnppHkM@nJS= zc+%5tyn41FeN3?=wdY^Hf(CHm*kG1bF%qppGlZ~s!g5m&v7rfOm0DQTIA$v zaGTpqJItq59^#ur4Jq)Unp$Pw;k_P!PjH^bDt^TvWq*utZkiur*51H~tFC;@(>?s$ z#y|YkVFP-S_M86-G@$z5zqpb4U%s0Zd1_U7Smb1=Ge-6-4% z2nA0r$GamHc;<1DvCu8NXix^%J#>?c`We#p7q9t%l5u=`nuO=~+|Kq>1xDYWjy{>~ zP#BvaG!(SPps%CQEK5M?-+Ht)NMdyjB}_fdoSzQXtaY83-?%xCC4UvMGZ!x)Yke|a zWDme*sV(*>69iR=140bDqIbzsOmixQ|Avdun*R_RCtI?q`+Klf`aRgkwiayh)9;Xl zoI!5?2)M+y#yUG!M8}K~5__3q`N3%1>-hnl8%^1-{R7#I$D5dL)K(TR3t)SqCbK@y zU0B{E5wrE*2e0xNoOEgpk0-x{YNuqOuSkJWw(&S~&zz|*#IsIb4a{@wKprvLjlb46 zx&zj*Z~yK2w`WzJSS4(g_14s#@TTO!0Js1UY36;;oOt+=P==0Q97rZ-o( zk*b2%Ei2*wWD99){{G)po|vKL+EG8ST0R&riVB4e zG0B*7ZZ-RHIFVoV{=}vJ#`LDtiq3toq1>$wWb0r{Uz%iOyjO=jqwnzxS9AE0H7i+u zek!)!s1w5X)WF10k9Yf7$*){}#nU^QP}?#!IW$_)+@tms|IeHPLd0ZqU6+2<{oyZ$ zT;qq5PP0tcb!fH!l&~%98I~UF%unR#(Dcvd^oc2`&3vG%GP3ju=peI^vjycZ?&eei)Az`MofPedlJ@} zQLhz7w6j!2y}uaK)E-;;X~|0VeEuX2BkmMTTg^kR;c50DvzFIcI#5ibC)M}1p%GEm zbn1c+nQ3&ZdqGXZ_3i12W=|K`O9)7@*^m3gj zbfyQnUUQ7&+4?ssq{i+J1*dhL^i+}UkVKiQhS>o}25 zfBctMSD8`Ce^&Hf<6|ji_7u}rOc)jZhTxX ze9h~DJ42Vjs`(3+s(!Ihm;2ne%8DHNsmZO(dh2nE69fH zmA2;nPOappJ(lpnLt636!FG%tszHW3$JC=1C=lif+luUA@MSGV*A8PVW7_g9oz8MS zlh-^?-oQ5%oZ*r`Z}|5P_xQ9E$9Tk0BmPLgA6po-0BPCYSZI?j2>u-*uhU^~S~oE9 z>oa_r^&jr)uR|@GwEES&g|A#|Nb3AQJXZCN+teT7lMWtXiPd{?!p93AgI5WYyhh+e z-b6NG@FL!0wILO)GA3;^DP_Ho(Swa!SV56qB5Em7QptW5#gvLk>ya6q z*J!zqsRWUm`lR9Mit@0K)ok%}CT zE9v!E8TB5mqKj|L=&Ho5_)ceX z{@_H;e_Y7>yc?a2v85O;qS47BdbH1s)^%RTCmxMu-}1Tw+v9~SEiZgZ)nzw6j^xs_ zMx@hVL*@0hbhoD?jmmZ-s|i*#G1P>f#v0Q%m)E?-T*mt^YJ~L!Z&akG3!Cn{AaPzK z+J?+yy5*brFD(TH#yU~0+JPi}+-Thk714HcT5YUFMxNt%&uJZ)Nw*1z{o{qrHYtLy zfhQJU9*af!*HF7_IU6-`6Ia^HNDV7`QEyG3+Da+5zJ*(xC-UgrSeEpo1QDGWL_RLa z-<~W?Dsw^f@evqhm4qHEk0EP*5-S+mpGO##@b>Bh+%P?rhbG(ev)u=>O6Ov%@EU>( zJG&upssj33rVAV0Y;nP52|Q*VLzK+|Y}tAM#)%SEY1fVQuZv+_`j237$`qc_B;L+wq>P zZQKuZ-@XOM+Qj3dcWbZ|8A@Wage9JBaCZ0zynX!$d+L>J%TNcl|U)#)Y zJU`9X-^%8PK5O$n?K-f0$pJKc?+hz@OZ*fv1)ceB2<#V#{I1P-q2b;aZ_n`M7614F z2R$0#XhQYZ^l9$a7XI|x9WHyW;9jX+nZcovxTN0}xAiv&XL5aT%j*zQ_iSMc)Wv+R zOhN-knUZRWDS2v3=~RJ;ejYTW%l8fF)7)x)(RVvb^jVA6nsd+FzDD?abOOjf_8v?fPJ$*D@J`Xi?rZ#BV4OlNCNX=#CGKEVU|yvpl1(8m#zs&j>@^JB3& zV-Ndl{DFULvZuCoN>V0T(!NesWHV7otecwhBFss2Kt?B{mE_iVluxni%94%;W7&;L zVdJ>n81zTQ&)?8^#BXQ1sHxX;k+#%9-<%Fi(WS+&lr;FHEj>T0psq4Y>ig{}UtS*1 zEU!<&3HwqZP?dx18*UsMt;ln$BbCil(WLRVv}1)ey|`~qg@ZKuQCW~*Ybj|*$!YVJ zkG$RFwXDzdIGo(PQ+QUs0bM+_xRY8$$NpN=x?2jGXDO%3mulMUYeSQtsVQ0_rKLAb z=|QuY{H;&%*Wp8$xycCJ`ngk>)-fJ!R&QdH|2^RM7h6&4Z43JJS3%pRThoy+2NGGU zD8ODqj-MqI^3aqfXRqTOQv0zf%D$Lau~MMmKKSNu&UB_G^BQYQ((dL=58G-olaV8B z-{?+DM_JLpWHEW2mQmm4&%ET2BR{zCGPc#aBWdy~;i04(if^uhiT;0V*p2&K<)Nmp z+D??Z)`3oZccq90|G#(CoT|QQ(}O9A{Ow`JHn;}Cdx8~`Ow)xVm$t~i*9%>z{y{`< z1D>v`=26QnXp7vMDr@XX=5J2fuXL$j@oIkf#uoPI{Q<<^Y>NgbH3lcG6YQHjk(1aH zicTqbZ~PZ&PfMB5aV>Y12J>QeJfM0)`%|Zs;$D6Suar3hS^mUc6ez{gC z3ATZFK`0s;lW;vP8oGCm;IVF3R{b7A%!i9OhauhFDAP)lKS7FEzEEheutji{D=vIk1O4QFEcS~pkFS5u*KE_GBT^%h^){iK zfv@@H-}Csc=4dwZNhZ4dvPTpwFuQTRaJ0z@GPhx9OtfGgtvz^C&O2^gC!(>vMfCBF zf^N@LQrZp`1xoa(@Oc>5uQp^(k^$H-&=I5WEfIe1@xp5VEtoi@o?SWgh_BtCrq3D; z#4FUaJKTo0={V5qttz@X%Yu3i|HS*2*0M5rDuO1tVocZ7g2R`dxY1(3uB5Ey;cKku zRjf6oYWi=VKs6m}qDTIAv}U1_R$Nig-&6&)$O^b;=3nF%cZRLwT49+c zhl_HTv!=jCZuZ8OMlMs)UX9LQ^iUBGwWSn6lj-M}llKQX9aYHbv(`2)8fe3W>%%eh z?+&5i>IA5=da`u?3SRinl2%<-QqU6_nMNz<)&m=A7O83CR&&aoub|NYNkF#0$Fxah z*po-(d`F*!-7vIms&ML-7hbKagn13PR;E79X{Yg@AsWAFw4k-~9q4zEC54DB$?ln<=lUtKsDnWi+Z$P0g8h z^lqvJ#cb84M;0mEC1)b*d~zh#R5`$QyaoOZju+mA+F)|22Ph&2HdlV(WOOZa-?xT0 zwfW8`cxzGFw@3Vbsv#;r9h4~~RY(p-G{ zR0j9%F>GE%7e3|oOuj7Jmal#?l&xt#h8kH_}JNj%~{ih%^zpPY#$wjjptx^#|}W}guYM?x53dB#t8knNH{P{ zg|^k*P@l34VON9UkxZD>dK!!#6rf}CAw-5{!9Q;>US<1X)8SwYmW7~QrV2GojN>5_ zgy$zL&}OY1-y+;_YC#D8=B&Y)kb`(vScB&~D^TE_kITOkG2!+A7@rTup{r&XRAz~o zAr83qZ@N$xXM_rw6cWK6SAD{e`m+LSvb0%g+jcD3cn7PJOiKvby4hk{v%S*K#nucSQC<1qJ2ZQxU`yx|uQ{N~|H zF7eaff_N)`FZQe@0%Paf;`kdgT=+UkxZl$pH}<$=_>GrvfBu=JemliWt3f#Qw7r;JN%1IMwaO#7Tofl zhi$W_m6dV|@1>-98kXpLMMX2lD=5W5O!M5T_)3r6?Do!BbgOiM$=4JisD}$CRb=A$ zfk#aDN+bWbU{!bSdQXjkdE-V-k>F>4u|0=|YC5 zD?(D2Vs&jkdum+3mrd29TLVQ@_QI6f-Imc46Dd7Tlu};BCvG`;5TAQZpIuqY@YBi; z)6Ps1jA|7)(!UQD_LZ}=0!O~@^BKPGwGj>ICZ+E8krJ7K8N$6H1;gAuOjmq&G!SRbf>bH5}ZBwUHH_tn@ixjl|f z6kv@02nm>iErqi%yebA0Mi0Qcl3uXY4aANc{s?-h!qp2>Y^fK+Z{jlHv_?mh-6VLq zMTWE{Gqkq;ZPeT&TNyQF+q+u!GZ7$slbXk16aiM&Fs|93YN3Cnswa~#RiTzja~f$aL&&d z=LQ+T@Q$fQ^Wz0siwJemmdIXVhl?{;!RN(vrtaUJ=Xx*Zg$@baC^v`e>j&^v`x4l^ zc138Gc;f9-5te>7g1mOBpb}fb&CD8|R}uayo!OEb{@mrzF&?tyCSS7eEVppeqPb^I z@&&av+#_1aLj5M<0Lk$5fE=~MrU)}!#R#)&gEoKD@X;cV`LtWhA0KVxL*p*wk~8eK~+TEg8d>*d#)iTcbL{85_H14p#uEQeC9@tYlZ8B=L-+Y z3{h^bIoG+)D2z$Mf!>C!-YAc~V$oFxEK#$yrEYA}l7DcwT!XS)4}4f?fq|O* z+I^Q8Ct?zWPl0lr&9#8bAQ|3vSL4xkA6yGe$H=!k;P}1)cK6RinYaOlr#hpkQH7Tt zN~BCN#`Iz-;<9H8mQPJkG0y3kRl*Y1zTwVA`ZvE&tVzrrRiuYOvH1u?)arS zC(~*&4IimkL;>W?ZQ1=`T*Qc}4JbO5-<_yDfz0sID z%?}wB7N{sTK*9@C=t#|Aws)zpS}DU&(s)6HMpMhwuw5tM-l}<6k-r2NYla~>g(3e~ zCk(mc21{>yG~P0W?|ySsKM`YeM6#fpBE`D zu}4IP0Vb+UaJ9E7Hu^gu(^iUKlm7{(r!s}KTru*OX(OzS46~0&5OK#E&npa|x6~S! zS}U=*Sq}HPCU96P$Amd5T+-zJ_zo7x*Sv4_$7CT+Q;WsFnuK>J4N$Yk1iE&n*y^v* zx0W+TENP9`&m1u@z!U?EB^W+igcc7qme^=Ce?@{K{i#BQg&cP`Xr6U5fu5%rJF=yy ze9!}~8LM%k!zp;~OM&H>?r>je3N3{eG6ovL`JM?1yP4y``Xxf=O*Xi&-W-p58lZHE z7$ev~M293yv=;Hl>nynQEPD~_ySXQyYeQTZd-B-69(_kYPOWzRLZwI7ScH;tgQHN(ga zaztF2Ds0}Uho1^5_FYq8M~7(WpEqP%$|Kq6l03HA$bp%)%Vj6Lf|<+Mvk2Aii^fIT zNXwI8nzsm%4~7W;tqFrCvI;Gh3g+`x#L5*m>_)qK z^z9x7gBCFiy$z6UXO0p(85#?h3Kmawk-S$Q|1E3J!PXf6R)KM+Y_PtS6;5dQ z@I$dPV)gBCNyC`;Z}^I<}Zw*q-V3e?%@AvDw+>CY^& zx5)~#zT2X|qctu^sZc-L8n3^ZAx%@`(}J|{eDhzS{DT&vC6U7FOCo65igE0RKKc)q z!M9X}0rNaC)yEA#URz!>-F>;(?9u#UHBZ%llNHjyyP7$>Sr}R zsXOBCO)(_@;)H4LUg+Jl66)Q*p#C34=N*>g`-btf_kQbr-}XQyWR^tFd0&@g7P2L) ztYn6gvtN`1L!UfA8ZS$90|O^*R4~hVBzCpVH}0@EPhC zzLeHqJ3|IOxs*Tb6#ey#pc}6sZl&%+SYR-+43xNcs#vP28ID35U7@S{0RJo!M5A7E za}K{x4U@l9ldGJU4%24e9d~H+!;z$RW`wXWE62;!Ts&R{^iB9r>QlvdvEwIN_uGTc zCp?AWZY{#kZM1C8BU;$QnqNLt@;Ki$hP+_wp>dI5L{HL~Nny3U504VW;R7 z_zZ`W-Eiv0a!K}y5p=|$+4WjbTK#F7_g0htp4Q^3O>60ujW+#u97xjomo#a^S9)^! z5@{UMBCD*Ea2e!_cTJhnwcXut{o!p~PE)0EQ7=e!uMw~8^@NT*w4kuCG~wl3A5 z4wzAo9_{JFXhYgG)|P4}wW0DXXVP9&DP&$b=r_?7uQL-QtIMuvJ8mgH#;b@vHw>t3 zx13I-$w_b6CGmP@2YS)UoOZ-r5;-UTiiW2ZVnl$tIMpxp?FZ797ggRzHT}6Bn^#aJr}(bWtGsh$u_hD(X&85q*3+h&{Gv zU>_d?tpQGO`D>5-;za40r2?hO?$}aw5vxl(i$%V>g=($=eUEOg=ix)*fk_+xQ%P?tcw#18DLNZ9gt$b%u8mB?Sa+lGUi@V8v;s&yI3ZZJF zJ@ho-2o2;_l>6YWxY4%`_L)P`KimN=FC|Nl7x>^qmvcB?n<(5|7gNEB zNFg^G$V#uC>h|8FgL;SQnST$e`(`X6^u}Ppt~Mwfoh{AiKN9K{?@^|eEpFParZev! z)9}`xX@{jMKT0xVg}DZwe)yS`Uv80#ViXr>o8}g7j%7=N$#U&p@pWAu)RK0 zjO{gsChWXOzxS#0G&5s9Gtic2Ju>Es$A8JAZZoyDwx%I(e~Py5`r_%Z@hHE&P8uzr zjTQbAh0!;C>Y>w;w4(acvD6Fncbh(6ylcv%AL#P!viv9KFr7@ErO@%DJW={2 z1@E3!NaJ?qKvVmH*j>JZ;#yrK(dRx{+`dNb`!&+yC7S%E-Zw_Z=B{@s z;9D6P*6*XCceOYxNXdI&+#&Ouf-0@2ir@<~@M1%XWD(s1)gkv`S=ovntt_FAOMlUd zG0pxp{TJP2E&iqXi%OsWrc^z3uAF_H4p?`icBB7b+suB*4o#L)?E|p>$6Bo4kSER< zlMxEnmjrvQae%PP0FvFbfWD0PM1%@M-HcfH7OEEFl)k|?a4DyC$5i}xy~ zVs@{!aF3pWq-L%;`Cc2J3g$|8^Oca*q+?DSQ=zI%6Q2TV#DIW6Qa*1@WplJ?-{3?l z-7|wG{HPK4bw6X+e>>5n?}egSa#V~>lP)Xdn6)_#^N;ov-!znDZBR@fRj-m><6%1e zd>=jS`H(CWS1E63F@1^pB`m#EMep6A*fz@!FV`hY$L!oN{9^@-#Ws=iBcEpWx=d$# zej}&QAM~HIIxlWix|7Ym4Sw^esoSTR#B(M(7B&PHr@( z;x*MDd`~i&0Y9-Ytj^DEn&^4jb&5AS zCpxuXiLY7bB*pgoxL@Z<`&O&*{;y_yY>kX}?bhRa{f)S=SxY%bbU55lozH(ZU^6u> z?vYnblTV)z8eJB^%&}Hl^58AzxEs;N#1HggwGFo(W zWT`W^ueE0XMjIZ!%9givRr2XTeU1#xCe_>=QC2z}Hxn|YUR9l;%&`*DG=a`_|3g#m z8gPpPR?K#Ceo$)9b`eU>Ut`4X71|tEr^33W{b_5awXmNt0pZT+()haGX#FV%+n<(+ z#6_1Vqpu0a7RmYJXIoz3>dtGA*mK9(raa}GF2Bt#r&(fHXjP3!= zBZ+7|<^vMi{S@cS?~~PbD}L;xn%m+)Q&;G6Pz%3h9*ENLpyO)S~XDHT`8;X+3`>4r24-J2BG1b6T zba|*Q+AS!8`iX3O*wzWh4qC$GW2SWBerrVbS%!Bpt;969QKC5fnCR!wohEm&rS)&x zP=R3%U9MeBSF2R%-DrEUtNs|;R&+<@2|M_8Tp|rW(GucFEuni*u1 zC4W|!aN&*HRAqTuG$>=y(Bp+7+Kezc;lUvxMk*pbJnyRy%h=Cu|I4#-z<{yqz~ z4KZb(%L+EjFl2sRP2raN#W%ei=y~mz3_XkSdVnt3TvXu)r)^l{tt0>2YR7%c9Qn=% zH-2Pi$3{;uj`KujY8-`A=t11uR(CA2ikBXS+G2_CTx<_o zB(j6o)9xeYtmf~;lb+f0$=j{j+)T+wcboC7eJbpBGK2ziJcRG2aX7W06MpzbNe{a? zpnXa>))m}B;-O`tI(!{vEH~lS_mq65#)<=Gned->|0viqo;psOC+-*R#EcohC@*J7 zYLQa@MQfaj8Hw~(Nythp#~8;YA~tIPt-H6EOb_fLu1KQ|o$Sf`#Xxao$u=C(8V;io z?QvAo8vS37m#j|N;Pi=jl*`L7_x(&%wBL<JRtIeEpDKA)bihuRq4>J_0akofiWg(|3YAWQw8CpK?HIR# z9M+$u;M6?&a!{2bh{U+~2POAbjX=^DN zml<)xYdu~bqt6ph8gtEZL;k)(n_phn;tQjXQUAOGajMG-Y~0@g4?eGwJev9cp0l^G z-aMAZeKzD-r%l*@pef()(yZev=4^ID!AC-kxaP7xZ;#aD(;)$5`|>hAJpgvRPL{?! zo{hrfUE-(gJ-sq<;7NllIBkQR>mDh2eX)$EWGHyo5;Go}ZOWI%S@4I2hsbiDm)JaP z3<}m%NNxT0VBI1mowk{TPe9@Lut4w*VojRYpYr&z9ZF%|!8H-#w>+F9@ z6Hdg6pfQuMDC<8-J9`b*uW_Z)QPy19)rpTqD)_gH9V6I=?arEU?R{HroMg#f159{S zo;h#N`baS=SBTP=(U@AiUfOeE6||J9bSvDDmpry+-Q_aodUFohX2q6G*8FaR6-SRX z;T7%%oM~;$GlS01)sBP2fuO1AS-nLv+nS0h&kf>6%>!E8XvIUi%lT8d1)o}L&0oA7 zcyW+|J*FD7hO-Gz(l+2W{a4YwjQ(PhdJr5_^QF-AF^HdLD*oJ0rt3%LeBa-h`v=)_ z-!Ml`YwgO()2!I(k`a5_nQ;ml$$6L)>Akyxxjj20V$o8mcZv(fTC7Cqrk7&#)jB%u zrsM_nj+|TMz=uY*V*T}UUfa`*9qLutOD~ZEYkh^@b_sDyT0C?GFq4%m3L-ky+D%KHeuP%{tAJ`!K zRewa+u7F~jy689TkWkZENr}EK_?3?_57_XY>|aFFzu#*`->AokNeqFO!UdyZtg+A} zQL^r352s(j2*^%ERj0*>d{luyIliJQ>8l8advlJh7sdUYh2l;j!d?jUOd1MH&9*qX z)CzBMA|&@vCDvdD#O#BJIy?rhS z&l&M52GCd&B@KL~2fz1jc&{3dZ63q0B0UM38?WQP0%MV~S0Sn_o4sIS17bdJMU2Bt zxX664poaoqo@n4<%{(b)nwI9b$b;lYl1riRJ;r!?c(uN=bJf~q^Em$*Go6`>K zvPv@-{|=u|ze?tdey=i-kgSB+IXTX*pC=W@*&tda5VOZC#ThF%D%kjj`c5|FrLzoK zwpPZ={Vh0Zoq|J3HTdS(V9M;RDYh)@i(wfqh+38^?O4|a^2^1rcDgD)PJTeUpDB3e zH*p^1Ue0@Jzx9+D(ai4m4zz|}n|SG7cyGjd=?crEIW)6b zEC1$O^PPn<{+zAk)mxqT=vyUg-nZoXMgDjR$B3;YDZr8#Fj0REx7)bjGJm?Y(2Gr8a@8t&m%AB|4o&~ zTnNX5N!zRobhKiL^VS^l$A^ z=o~N!XM*-(>ueVhI9&+c_xfUY;9g`8>5qs`zQEo7aJIC-$2FFyx0o*}>@5-9u`4E@ zScbXBhd}+m(b!V903D8P!t=}fVcC>~AEAMeeQA#wHUqI?*bunNO>w)-2#Z#YmO@Gl zp=oGwx2H%wfNv1*Tjd zCq=E-#i&K*I9_Ut+7ls|vh)bNx@!slD_zCOGxNpW%>H7`l6z?I&&JDQU~#A%_pTXX z`ZZm6=ueT1-^mbnQjQJl-O$}~7pw>L75zv46fQvzO7;l7{Lly`}4 zEBvW{L>saC$#`TWDv?*CK*`lq>2jtWK3a6fUGWCjYAN4WIhhSlX0s)7HoA}!4Z9BoR@9Hi{~7rTa&g4*A)xUX`3smtd>ff{PbpeXKvN(8xsw=bn!O&@OHD1ZJdRad`lRl z+Q2V7Lh|h`!^=KxFsLjWvB7giH?3eYe)foTG&Q+Hf;wlg9!FK)r-Vok+PCZ#x{Yp+ z@ypDRF;RiO?#a@jNCWKLV~NsT-Uyp|0C5p3#ExHqWZSWrEc+Hx;UGr!<)2IuG|wJ^>jlogI*P#OBy7Z0p_=qjMD;%-em8oH z)uCq){%#;XK2hSrYYXhZYL0IgQ>9yOGOVpLN9Q0%d`SXMy&m2?zspd$$Up=yauq40 zFDBl4icvx7NV)=SGHB-JKqa;}SYc;KjC8|C4_$Ii;aAiStCsuWWJzBfm_Gw=M&}{c zY73^CH_!4jGf}p36!v%RgD!vV(Z$sm-_uPI<{l}1HqZwRwS>{nj<^)o59vvxkh^OJ z`b}Ddo$W(0ZOA|jn<#K%XEQU5?TNP2tZ;CPE`kjVaI;~G)WX^jc{4N-@k$F$2@1%& zOYo~qMx%T+#{UaL{ET+C0Vu^13;IP#X%}S3S5ZQ@hY?by z*`TX?Pdwh3gpBwSXm8Gdd)iDKaT$a_bvGQ}u7J-6Lljh+V`zm5UWx?C+1?QHB__yP zYmOz&Oy;p{9)7gm0eRJP6pU-aW4ei>en+7Hc|5*6G(wEE3Q~+T&~3Xuw!BW0b~fKd zKT!{|I41}ve^lF4LcVN>7^AjXY&mmO>}hpIls}v$)_kc#<7>vKA2NinG2YnAFlx+v zDYilv;U`R>{?ZvLr7N%|aHc2-?m^m)QFKloPgW6YX+=g)>c2WcoK4<^#R^}1|IZYn zuP!>5%#)(O8AG#DiKc!7p)$@%6#ni(dZmYH;qY7Z^h_naC{*FMGfvXW|Ey`tJh=$n z*PMF?8e@113+P%$NP!oP;jU?qcc0TS)_A>Wa9K&idOfEvBd(B5$^n{l=nowlaF5#l z38Z}*A);4xJZ4!r;9^TFBpank;jPW!C|aX-=Q>=>FA|52WKfH~cge1~iKCX6(yhm@ zDD~1AdUFr-uSY+z>A(Uc)mua3wd4mi6__CS4jH#k& z%PNS>?vr-Sa@rmBQe3|H9G~pHvH!I_N+T5*(J)!MaNQV1>r8Q2y93@`J^}ZUiDF8< zFGc=cON({Z(Nnd3G*{b&eqQV(K1XIj?r8lVOUEsUs%le{uCu_{Lo$E_@3w0(B!|y5P&6JT$!73iP>+-Kpo0UXhEw!WD;3DDb4Y*G?WcK!~e0z92=? z>?g!qh(t(L>W8e|P$0Rkt44ecu|69W3xf zRUIA1>!G-h5#H^eBi&y8SL&o9>Ujaz`a-pv29eLQxYk;?bpOFI~`2gqz|9Rn#h`Eic$XRSbEADOZ~e- z|E)Vlj<82R;eh!Krr5gC5Q!_)@LSpJS<`1qm-}d-y79X-?4bcp_p!tnPdjve(H5o^ ziEv(V40}dr;beMW?25EUx|bTL+3(Z-U-JS58s?0ZLUPryFjEIj$21XEZH~D3Ah>k7 zgmEvm1=k0Og%(|f*Sp(z`*$p~&dbm##sGCcP4M8VDRj2Xk^af_QP)}>=5vhk(AN<^ zdR@exx9vn+i=JXzhyKFeWxc46>?K6W>!&ulHE1{Y zj1h#zXe{V!AogurDxT(Li?jj3B6#ivky@T4q;(JRadrr<`J2Gls|7mOsw1f;Mar09 z1ic?7kVV;G;*==V{?rs6FDHvpGuDY>m6qae&3bV!eV~{&{4~~H@kN%28MeI8!7x7= z(n10xwdud5Kuc3>yJv(RCB5O8zZ-QcEQRjXuEKx6zIgjQSm0(0@wYq^4THKsUSx|k z?<^2fV+A9NaZ*EwD$btK!W zCm2>MaZz6vmCgn*zmgz%{M1LEmg;D9)<)b*6C61t5Poj~+*Th(`;Qk<;8cRnHt4Z*{o8_$__QF~{u{rl`B@fTTuCBwuXKcZWyf?K#H$ z#^8=4hh|@BtBxjz zW;Whui;t-e7*ZsMW^->XF|@(WK{BlAtwfi2Q|zoZ#LvC9crKGcxu#eOeC&X!Hnz|m zJRV`+Gn?6C1F}|mh&lUL3Hj_oG1qv%C{2wMv$yLB<@E~Gj}-VdwgtY~xxuj06lvdT zZTzZn#$1;ayq{nx@_&sNGw&=H=TBveAy+fRc*|^2lu#&s6efwO>>yIA#v!9Q{}=4G z#j=b=Qr{vM47)uI%3KF=dG|5ly~dC>y%sca+gp(wdqHe^`%`qV45Rw~c9hUztN7S- z2&Q!sYHV6##O&?TnQ(t>|CoV89sc2bX_;vIFO5v6pP|WROGz{9EWIqvrGRCp$ZTN* zMKA0jE<_e1ATb2J9=U>x?^l4_4M(!TIT>~%60i)~Kf<%}FWJ>-N*qqC&45MLCYtip`&O9)SViMUHH;-=qk zv93coMZdG>KM76LWcEqubc+yrd&h`P6GDWFgN~SG;*Q$K+0vuRVYstymMEKaS3JID zN1qN3A&Wb~G$JO03SG`oZ10tHB_fLEg z-M-uhtFvnH)LM+wAJXDjb8^{}D9UQjiSmyzR5$7|T^e$OwxyfX?7olj zB`pAvt5c-S!M^xrbOtLItI+|qz0~wwgP*IPCMlv;giU!UZe^B=ltf4JOD>}aMM?BZ z*M#10et{l8P8*VL*Dua_X64OyBlYiau(csZ*3C ziH_%mQS2A-b#8@Nv`|x&^qqyyrXKiwGF6I6Z-*k4IJ~nNk6%4C#U0KOr>e$_>|Yzj z%nROh;9sLSknbeMcln5Mm-gU9&R}StYK?E9)=24)Aer9Q!_OisSbXY*UM;3VW#9(H zX`DplR~OOCAy4Rp3!%H=65g6DLz@d@;6J)2yw0}9@(63Jbcm9i^JJKv?g}&iHaNFo z0-}$mWA~5k(CdE*x?%dF(`yy6s#`UtN4~>wr%f<=M>tk*gT-pi9`kLrWE$HVD^~i# z^sIcj{_z{AS8OnaAKFdxryrq-uX4%l=50~+ z<|bO5?v3Zy6mVOgEe*Eqf}nlJ;1!u9`b=0%YFCcai~)D)6g{LaeO^g;>@Gx?{~(9F5RDRg46IMitq?&!8d$7`!3nduCeTs0QMmPLrP+R-#RpQ7MOcg!Wt|PLzP8)iEqUstAuHXHE#x!0hkAY9=&%Rn}qnAU9*kn=CZWxT8=1OHT z(WuVYA^r?FNXe(Q_`JKExAn5;7hBcY_VgF}obi%28*B4hKOK%a@`dj9Swwjc-Gqf0 zfRyqTl2kMbfBJ@q-MzBN;Ik2*>0rYJBV?RZ^ORCrtt6e7TWIV{ZQhw6XXC^sYO$ps z>HZ54rm@2j9lBAP^4A|Ddb){VeShj$@tig@)l%Tnvs*|;O(Lp7@ zYjKy%;z488rizh^=Hjx^Dyf}AH~i~Zi*Qpn{&*z~9XXK$?)jkoj2M*jy@7+j+squ$We zp=P|QT8(|b9;cYdd$jcWW~y;26kF9d!Dqx6?6|d5dV&zVesdlxE*wYT;VN;!sDSzy zXHdiS8v3$Hi|a==knf#wWF2Zu#UqM^lgVMcm^Bk2pSmFF#B%BPOLq)7G6pN7(y^$= zWyA+0iY>lnLjAlk*>31Tqh9ONGtb##bax~1&+{rw$E88rXBZ5&Iw0eDy!6h?5yQND z;_ccL%-7wCl?QHMZqqY#FjtD$QQl&2csFq`&Rn=}_zruQ>sb0=Hp-GcFz}`cJccZl zcD-}LYqewyp8o<{mRJbiwqpgY$`^N)g`%LpB+BcjiJr}0={Z#~FXTTgY#)hh&G~)W zLK*5R<0QPWz>B|2VD`sIgsG>Ck?m?k!ry^ZUgtu0PwP;lUm^wViXgj$+d}E~8~xQv zpt`;rCUk7(@DB^5X-5>W+#e5v**!(?wo00Ma|@}|Unc8w<@By+Ud3dTJ z|MfKG$wy52WxO_9)qkgLDc49hMnSjdeZ!rDehApTQVRBt$Ee%c!p|vz_VxNe{`R_j zYrGXVoUq|Zn;m#_KU?0@WXj9{(r&#IvO z{q=O(R*&!IC^@geicgKP=hFks*)r9Li@Is>s|8Io{rq+6y5gJ&c$$Os*bCCD&?lH^ z~)DbzfvWyhevNHJ@!!Zon<4YO+I0H8mbMA=++Rg!p-9 zr5~y<@Z8s!d@p^ZF1|J#<>bPKi%q#_s1fIlZ_AZm4EW+D9S)38^3^0g?z(O>+2m&k z-DzPkPv0b+pST1KN6(2s+cU(69eA8ZTb`g|%ML58`A2F(Uw~l zDS7NieO6UuQOS`UasEyaa(^dE!=l>5?YXrmjhjTbeE!lCJ3~(SZpA90az6LRjvogr z`OHHjKKMnO*ZZ{K+`Ij$^oN~rOPCB%og-c7+8+f2^5L-Opt$z@5~)2i=C)ttd~%%~ zyQsVK^n>>7HP4jw<8=9vUpcviZ59s_;*h=12Zq*bB>8T_V}mq=F8v7gjX%ZvTeURt zlNHOhDtStlBdgcTxUatso9Z8<$6cz#hn1J`JZ2cSh>keiWx1rf*ah1IhvS^tBCP4s zglp&TiGf4b(anC>D5lp5igo#qbo-1W50m|(Y{z$;GEIX0-w>?!v4xs;g7mYKGxYqY zWAXVl_&IO|de*;2?6x3Ll3F4R>$V8rq3ea)mVV-K_g_d`I1eLs0UER0Lb2Nf^R7*n zmd@#lN6Iv83DXw&s;|*dXCQvB(G&G{cQKi+;-jgTsGHy{Tqk^o|E;yyH#QM&$DE+D z#u!^?B}r2bD9{+5j_(Iri{7{U3&XY*LZ@qY(pqaxuM%A7#DOg0Ba2D9ScOU*?ZhgR zlepfcE9RssVG^@cdO5m1CNEo#(;bJ4^2u#UztuU~ap4syHEvK)yGm-cyMaao-=+4O z4^h{j+O+%jKw*0^9E!ybXk4~QTCv6lja}5l?_M>+N4t_fMgF2kt<`wHz6uXE{6p>9 zYIBzOMN3}3p)`#$Dx1Dv1T^i#t<7za^=GqGzcv_K%qEG>MGI-w87&@?qRSm88}s5p zrhM7SjPGqW;Z$24c6soRx>~ET_%oZv%yJa5mc8Njb(eHOXEkcOeHSSKjr26njC;jd zu#vYlcdM}GjpOXN*Lwvo(U9@ux7r*#M3;Y?-k~l=W$FMT4BV&(KzE3l}b4WXt*0hU{c! z#6$KPuwG~`HT75^9L*$Lv`ClQ?C6T@3@zdCX9T&b>F}Vx*6eF<$9~QCs3sS#>0!mT zC-r#lLR~IDR8PKljVU$i2`riPW~-H*hchXX9YbeCz_i0sw4Zng zuA%W_T~R-(F5E*cJMN;;pj7&_!j8_m4HRaoJK%e7Fgjf8ioM_TaVu(q6qRR(8PDS2 z?R5~IAxn_hb1x3n7>lYdzQSZ%lyG+q5>F=l#CpqBIDT*t%8vKJ{mC9kzN-gXnIyF~ z?~G-+A@D1BgT)gM!m;T(e)?&M*lHs&%dEZlY!xYF0j-5YT`m4D&48I{9Lig?MNE(k zomNhiHb1h$ER(_L`RoyzzL|;Gh~46;G>Aq;$I_R^d~(h>OUjHq${VLj>c2XQZ|_SX z<#&bMN()qo-9@^5q}uJ@F2? zrO3!=zL&6C7=+`)oiTb{yrkFE8d+8Q@e`YcO?oN)muSR&AM5d_S^Df#)a>OG40-cv zZSMM0iz9N6QRk_fg{Q{~1SY#drz%%kb7LqB&v}WEE@MeU&xmKLnDVtCQ+Drb!TBG| zd9jU>OGg`Vh@AnC>88hiRsmGjwFYx0bim1@TcmMY zZ?YA?J1S$9$x0qjV8$hyX3cZmf)n;1BzHS6Va9`y`L$A-IBP!^zgJLydl?^J*pg#& ztoY0~TfQ~lj4#R5xwVHSuhg>R{>}TE2g^Aj_$hgQjuk&lrsHzm9_e?hd@OQurFE^X zxwV56`=u#(jhh{R_OszHw&r~Gvn`*`wd5XCOgLtzIk$HDMAwsZ#rF|WFc`X4x>}x# zH+rgcG|!L=b!|BHkBo!r%-Jy4irY7{e2ZmPY;xU%!@3#p=Ky0?D$Y<&;1JRMBDmy4b=F(P4w|rz!;&bx~Ni>o{(I z>LD7v8$~N^SMr|xO5Cn-5$1jWL)4DGFgQ3I!|FRAImQ6aW}%YHZF8i52t|IvaV*?E z4f9eaqjYCBrhDAM)aQR-ZgdR;no_ZRX+H$q=!N&YCgNQ0j`-Zm5R(&Qq2yB5LWn>7ws+Tlps+0yad zGQ3~t01dCH(EV9~{YI@tVfT8mwz54{C(fn>MH1QeGA6It{e-3Nk!J7n!B%4{L=HAa zx>l5Q#K{yLt~g?pv;tGc3=o4qb)^kGpV6&G6)x|k&DFzndCs_3G-=v=n$Rs$gosRh z401%T3O%%~Ns=B8R^a}h0nmIR7Xi@?F7hUS-011E*Hq|b$l)?W7TskmA7;U4CMfve zehuE@HkN)q*AZijdZT!{0vX2X((_^N(6`)W!MG`eJ_Ya(ZNE&Gv_^*)y{vuM&*_#jzIR^1Car9_jfyp28fV6DADFXzfjQ4jC?t!;RziE%FvNC9lO_ZP!79MUq?gm5`4&}{`Zocv7I6I7TCAJC}y@zmXL zp-9czg4xzykacs#aRU<=e@&1!E;fZrf)jMMO~ACSOQ70t46`Rji(PxY=<4Ng@)%`H z>3c?r$F7xlSvL#XX5DdUpd59#m4ITA6w^}|!8zVAx;r1WQ3DYh5{mMP6$l7)5i5>L zqETif(D?|8UkAan-2fa~AYtkhIexyCAt!5r^tizg1{*!$Yn+Vj7kk5^?O;TXjD)6d zAtL+j!Y7+t4AvTfIo_V=7uOdKwSoBbOOD7%CRns^g;Y~3$A&U%>{()qC@BCD=hG45 zu^sE~UBp}I6gF#Zgxj>~c;63@tOuc6Pa8b%t%Z3<)KGD2p(NX5ii~O{7HPU6$!Y@r zPCSK-HR{6olZVJRjTEzr{e?^VBOH}yVEO@1bV{>t2{stC*Ak9b zZ18y5ZrFDmD6HTA5VtROqJo#o0au0MrGd5G7+kuYs*i`Z!v zXeBL__NFV*@){D}DcXYGi8G9_*baXAL`0CQQ@(bu$ zsDNXEJ@j>ArFDy~ky|hlW#x-S;niKVMq$G1HYs?3B!$W^U^X=g~5uYu}Rm#CaOl zT9^A5n(%UG83#U=vDth>UefYE8ke*|luwUFZAV9FzcxeNu31w0A9Gw$afjS48y7~+ z5KGsMrgNvB&_aJLzBNySkM7rF)An`rFt$A%pYs$}+3xUnGez|GbfqU0& z&?~?guh&`NXxlkbWZ!R6uTJ_{(6ue*nlh$*?Sm5yQxMT}HOB4QgnXN99JvvNj_yOD zYU&50sCIZ~WQ5N7CMcUZU%FJFh2j)*q#kj{5rrSRrH;Yh<+JhlWHj8jPr#TFfhb-S z2oY^$nARgyWI@o`FeoQG--v8HX3gjA$_tX2EPQfaTet* z1BCzHeDPeXTv&S?7ir4rBFE_h%0Dr}+w~WyY#^-) zM9yCa!P|YPYHK-d*?yDsL(Y-YU=^0UPSC(uCAk>Lg@1S$x)z$_Q=S2y|CuW_s$1h- ze@lGmoQ3~&Y^G6F*IO7yfdgme9Hs-v5s)cG0kyIl!~gmHaVUjuAl@)EAXs|!s*753 zB@ECgNCr`8Ar31u&EcX;E*5B96qU&VQ!q7z)NrH>al#n7XZ3l1dG}h+e)qdS?3BCP z#oqGGa{a(B^62n+xwfT7UR*XJQ~#4(wK+yk%}*35_Ul;q*pJp?Z#LGhmX_zZQ5WjX zz0MtUH184V>LNKR<*FPe^~yK9s^s<$?#W5_PRWC<5%RB@v&A&cD$@ELiCyc2@!>~O zVXP-hzw*a=G=tg6Getm6fLwa&E4kSFto+)!WAd_-Kgs*%Y?2p-^@^Prdq^6X!p%qn zE8CnoH1A{S$R$tSP4;K?Z&NAVeSt4W@`Oudg#1+6Du+C*lwWD6mV-X@ll_dLB6eOL zQGOE$j&mdYM@QDJT`Y}xre?`vN4mzi)0vV(^ggw4xU)l?@6*d!8l$ZKG*Hef_*5L* zaF$~m=MndwJGfu!Fk#`feB22;%9PPSS6sR$Cu z&qwgJDWWgIjE94sEB-p36=|97x>8DhRe_Dsn#OG!w&uH%awM3SUu@t}!A@)spJCFS z77I6()GETbyUmez+O2rC~om?)OMNh8rHLWi-E0cv!rDedS{kFYWMZ^CW8iINJ8;Z7lN*~fYP)7DzuH^hw1y9>ErG=>u`1WbZIj>^i zlrvsu<5_yF2`S57gp9&gdZ4!kCJ`;ndx>5Z*5%Y8nS&?cy_Uo`3NRxU` zYN%XfAoqkbE|Ke*Tx%~(6Vk-O$}Qr>w?4vWuu^E^=8IV+tvr#V>5(;TGa1Q}Rs426 zR=PQANBdX<_X~%mt=~j&XQ;Y<@} zEF3;>i79kQl7DsN_USkd`PR}Nt`|`{tr&RUOZdnac;|RL<4aurn!nFG zY&mH0Ad&fz)D&S&%7;2i!p8E3r9NB{&OcMv)6m*TL-J*=9j)Pu6@^4!oWg!@2bwP0 z;Quch!fo`pDifu@h1)XhWhCl^6{nv%QZdONsmGVl1;M0flKF4LOiVSC*jZqn(U6$;6Z(~ ziqdW+zuW5YtTtiVRxDj;^5k}~6Lw?Nl$9CS5~yHPy*t<2%seUcX&FDkfrSW7W zIMedT4!5tBtmsy8wfea<=$<8Ae%rvh79*={mBhSoB6q-~WRW%TR)cB!P zo^bdx5|E{rJnhrhz^4yFwv+05;$wH< zhg$JZJ|N9@!mdYFK`49%Yy8nOehGy?sv6n=eD;&>KjOoOQ1}kCJU!Zngu~&T3Mw^Cn`@?N z;+A4sD!3q!B07y*E{H1#3RDIzfTRedh`!$Mec#7>zwhxp&-a%P7svJGI^aCd|NpoA zf0s$x&aeLO&sJGcL!;8+$uqF4Tc@eV<$q6mWK_Q@PrdUf_wQRfzpYHY12=wbcgH<= zkhRzxgC#EHo!~AxOX#9imMZUM0N>I#k0Qru3oX0aC)Ty9()E=uh4PKewHg_6V@>0U zYe&3Xi<8c%rg0k&=gV=uyw$9YFFG5a1J`F<*44N#J?&c7h76~b3AK~$TH1zKzV1Ib zsGI2!>9{esu}0QzsK4;_G|gH?DN3}F@3Q2wt`uf~e0CoG`-ijt|GT^?z^!W9GwrTU znnaw+wHRMKdlGgMC6m1E9vSWFyGqnF{807)-u%(f;_4vP{g9AU5b>rqG1_l>J|bl_ zx!SDcgq@%>3JJoIr?vBAR9fWvHz|c;=?&#$yri9sdQMw1u*G>vcFJ#?qyll?mmw+V zT-??`jQ`_Og-`b(_Y5JQdWUr)Jp{{;-63IVe6H{s>8#4`?!dw||QZ zs^ExBmsi1E8(FRw;m-%5jL^)y6JS+*57oJ(qRC`rD%$B%gyYF9|Ioi4?1`j=z+oBpU_S=x@17_VdG$;l^!v0^pUVtBRCdKp_j ztImBW9CZGkZ2NNaw~q_YgavoR91x!tCp>-Xc!bGpms@ol01ijOCVC)Qh0Q^d9~hyz zPn~>Zj9OaymEj8^o_{uVHrLBb?r@);5k|Mo_Pun|oYp?4tH5ohOVx}lC*-6z@w zc&H0-ML>bH!BZ#47n%JOD*h*;nUD1-x)@)}wf|FZLA|<}hcnhmSEs~`}{k~=4 zQo;&;m|#86!>!Fux`!=%na&T!QknzrecC8~bkW3h6g>NMm#+JZji*zJ2QuDNJvb z^wN`#mv@p)PqjuHbmd2;d=0%A_bZkt5L?U6_?1|ZIyZW(cHKt&$J(GqUFV9gwMP)| ze3(;pFVZ3hJ9+DtO!;W*{>bCe1>?3H5VR>I>siEO{~YmIUW+e;U#X?rqOm~X?tXiq z_qd|UW8IiUWM#*f4kk~Q68RpcOV_#sU1U=p2hJ@QLwvX+YBIec1!~WS2fwurTa}Ah zhK)g;jP8C#l*=CR7_Q&8Q1GI8dq%VW0o>ZgAAUV9|C`o|>XiyzHQ4}7kq-f8wJMgQ zoV0^~9&TmKAD11)IT0D-P--3BRgu$DH-SAm=sDjOBr%9n^CoJVlT1Eq=(l8MnSxUQ zKCR249_fZl0kS(wj2Q(jsS5rtc*ru=+I2BB&XXnor@Pjw7J5Lqdc$%s+igtGtB80x z)~P(vROSuhPW;3Oz@?C(&?>t;+Xzd-{cWe>-+)p~1mK!=tUlwxtg!+AVcNAl0krQz z2_Wc<$*^L!dMw@@MRGv;a!X;lRdLBbkxLpKN&+1Ba7u->i9Ni`1s9DFnWfm1MrP=^ zuP|&V$=vm|%C>ZAgCfzQ7-4Wi<6J&$731>SO+Tj1w>j>B8>S5YSlir2x`?&s zXwM4>P9cij*xfI`OdAzd3I5I(=9l2w`pJuY#f(wCvtF0OHLJoL4$h^_alVSdO7Xf} zSDv_$9!ulp%6eQx#@}Q{Fmf~IZz4!GQ<;rMs5iAQ(Z?*h|JOjbxY69`nRq`rg%|>7 zEo@77^J)h^AeTkxkG^>}&>HFZ8W$6uQ> z_TxAjQv0}gZsO<6piDDca+1enzEkOVl6y+2%4@#?nFmlh8N-6GSws+t5yyYY?UxzL z3M8fMsYn!v%~uXobOh)Y4opza7t!J!ho6g>;KHCJv+MMit9CgLx%M}12(KM<#FRLG zw^FFOcip^s=4JZ2pL)I>(Bhzzwc0t7EBm49f{ui2(Zl(x+q|M?y>){zFF#`XgdhGr z6-QX7|0@sFH#a6WWU_zr$)qQ%f?6b|e*4#7qrIjQeoRVea8(MwRcy@Oqd;bc{6m6z zV(9MVd=uKd86s?nV(V#xqH*zzH_%x3xG*JKC?wh=0?%pVlH;SG@;I7Ovd@GO@4Hdf zJn(E4`#5Ih_rQMD2S<$}$_EJDFf@w8PJb=+w40t^cnLJrxvh2nnbY?`8=ke}w7qi^ zZamH#;dH4V{HCzclJLLWpr#x?;buO9Z_@Jhu)wqCWvFl=auuSoP)V-`T*9)}J@r$f@-1(>CR)$`$xn_u{j+2=I)FNYMPDMb~%?F0eGwN z|M+%~aN{^FVtdn}s@j9v2PZ^y`cE=EmXf?S*u1 z>%e?6khI6EjhYzQ-g`J5?xe)-@U)_r3SSmP)VM&H#5gSCifsQGwNWmP zZ*6YRtdbwwFA|1gZ-Aw5wt%R6Bw^Q+U%9xHC7QLO1Ins*WZZU{S*Uy;JiGRSM4obt zcLiMNnL6Eh3o96 zdGQ2CZZ}QAOD95t=>-bpFl0afo;1-@QP_lSpjr!DIPDrY$-kZvVH=)*m0I@{xwrTR zLZc~aF@&;pa)Nv{zJy`|Z`;>qYOY8cB4;M2`o{?bIY$`7+9)( z-WlNu)^Dy$qU48t?zXj*dT<~S&_h&7^1S z^PAffg88P&iB&~9xzi7Ac8AysE757FA9&-buX_|T{)+veg`$s;lfbLvAH+k?Kqn`F zUB*|f3>_CbC#IDvX7 zc8@iaMUF%E^?bn7>t*|Q)YZcXxd^RjgiHY)O|w^(6iEzXrtHMad6Id14t16 zW2xD+bcJEt4(WG(o%r_y3(f$0D#pc?N)V2}5MB+nHIWvakH0Jv*=jQ0uVXWfnWHnu z?i%Sc2jb-|S2X9H3Qd_WKMThaISDQDb>iN%!Z5NL(hJA65ljGnGXo_Hq^Xogx_t2T zSE=$*7E2)abBDE&H=wuMc}Vv$PZtAW=@63_68(7f zc;CuwO8m^JZn0^BBH$z^^a$~lk)*2Z2Z-!ch%Ie^?6-W?B)wt;Dr;&f+eoXkPqm)k z|0;7B?*)JEU(jG~SaSjEy}?2gw#YDAMQRi)e?UkGE-DVTfw;lkg8w=2sPY2MuD#x& zK=6kF;44If6RT6Rt3p4+zeHRRP3p7YsfnP)REPko{4K=iV;H9IvL6hW9!4D$g9QK->1VFo5bkk(wft|C{ z+us!5e|KtA3r=-kQ$IH=SZ$@7xBG7%`E`h`d}Cdg`9I5i(YsZ93oP!wM?Lt;e;aYnT>sk7J^0m#JqLmm>oaH3 zAa2{|CNbNYS6(pZ#;Qh(&@(Ob+=Ypf~@MZw;HH2DYm;{;Gx5L227 z#3shfkV7lLBUdhP)1j~q{;&M-n~fNoV9rz6-=X)Y*31jRR1>FXJ*rl{oOBR&(4cPJ z@E_7}J$9Qxd#%`fB*aWnywT70NdAx`_!& z2a*$5kI4DgYH{VKUSO|OnqcERM_Y?0S)B&r(%~_iFg4J#pmeXnMDsn%XQ0Z%jy_eK z!M~dHHy2-Jcr}yet3oWw3GS=5GZg#t=v6I`w!)$G=6OTllePlh5unu`P zIwjHj46$whuFopd#-o>{&C?0z$qi+;m$_?!uIO%(?quDUk0AbC zBf;IJvOnC@#PseZsrLR{Xv=q}@bjg;Xao3~68Nj7l%Bq@dF=Ore!>za;(Dk9oOIPw zcTFE;dZ4Xt@*Ilmo1mE?dd+1^F-0W>zd0{RxY z4N=1xu|ub_mwXxMNL6gu^i(-#wPj4WyIWy3ilDefFWB!%IkY>1EaH|%EDT@P@P2Bw zPpOgJU*k6pepE{KCP;1$C#);c{3>6Z<$In-k=H`{Si0CHdm|evzqJ_LRtefYRMcx% zSfv>*=cG}Xd+p3fS1AR(o-oerDKb?@JAGuB98|&0b9Yd;GuHP-fOCyD1>tEtQAmzf ziI005^wai+BugXQn%^#CM2s()sf#A3`9wTqPlX=ScG&tHpwAIz(nxRzvx$nXGMPD9np z4UD~s$ZpMPSJ1f{L9Nvhrl$GzS48HbZAka$du`Rk2<^naiM>4JEwx$Rq-Qu)^%E0o zk41(?tCGAW!3h4g^lj#-dBS^aolgCPkVv~vik5B-ZwXA^Ih?Q-(NYkwtTvm~9vMk; zP#<3FUT;+xbi=+GS?R@~Q_jIH1O2!+m-nw&Cg1p<7M8$6qR~gnd*@+qXNwEfnfBKf zsL5x*2O8V|HWX;kUq~?lfQD%35eo zmSxT@O_5)m`pd2eMrixj;mE%Q?5RpbR^?k#N$cRe3aFDs2|nkUeo=``6MsJVXl62f zVK4sby3r$!z?|t-F!09ymv+PnY>;>0C}4YIBRkI*Unh=~Ovs-d@6NkDeK0&fVsQCx zo+hc1uiuglCPrt*kdLwulU;^Bwv#D0O?;G?#G#L#;hY~m7+#symf3Z!36n6dwyd#M z4pPFGWPX(SXYE146}&@4dl6@Q;d6N;6WpBqS>eje*1kfhimR|Rj(Gdh@fOve-?InS z`kN-!2iBJNn|}6><$uqt%?Mhj;5ZGzD>prw;n)Du%?IN!Ub@(n+fifMH%;NZbTkV{>-iWDp>N&59WcUYfV4;aP+e@S`)t&k9Gk?xZOL)NTv&>b&Z z`H-mH-Ix}E(61slP!CC*oi8d#le{}kn3iF50jic+fFa`iTJ0?Wj zqT%f0^3_BeOKRqbRK)iGsWAJ&>eThXx2lrLqO5^aQx(YooYm?6Gl1= zqv1n03dh&n7=aVm7+~disGxdfm#1Q6deOC9Gd`!$`6qIzTj@Ex=DKP>-*Jm|sr2Bq zQOyeMrY4pdX!voLGwv5k8%>z@KTMNX_C)!yzBa_AA<}4=YDcI_k9a`^cJ-Fb%VTeR z&(Ln=GIq|k^c=AV?29uk|zHVSVK`$3RX4VUsElA*qPu`@}g-nGd5pt$}g2D z?DjIQxw^Yu8$CrWrGFc%V_a5c>$xye%qXRXzZtPhA(fu*H!Podr-~??z9yfnlm#&I zXMWx$)I6AO!uce5VIRWW&%q7K=trwMVD3-rYkVLT+);?hIaF)Mpm$(`GBn7^p$D?M zkdC!AtV^A)+WXwWz5C5L5=d3O`8tB;V@Gd?k|?s4_?veprU=|YAFd*d+C~RfEmcq7 zN~Sut*PZ6oCKn}&<^C=Wh|r+ql@!q|1z}BZ)E>rX`I*qyXFJJd*U4d}%mQgqe+}7= z?WNh4hfNL-5IvhSD3SmFXHEl)1znwptd zo~{#b(xEq@t&w*k!kQm>?smiirV#053u$Je$?8Ni%LO;N1!rs_)r1s;`xP1an( zz;4uXljnhUI15qT;z8x@`G18Rz~HUPWEq(L)i(7yHzU|y61sllH##hsd)6|$G`u|_ z_2Z&G?pVjbNpQre#6rFEfvCg_JQKFKLALn_RX9xm(Y;^$6I$txJSx9=XL`Owqy- zrzqg>MvNmJ3C(|Ov_zY7nrGay>xa{n&n6~YGm{*DVN*(K7X&vmm+;(}&t2E=!+cir}wx^16F43te`s&EJSm0fR!kM%6jzfYLXi&*}r0? zY6^a**mp9!?utt&{x$@Cl^ZN0%1AO=bXX~P!;sf%aBEj1VB|wGQmYP6w`MuL9%%yl7DMVO2AMohUfScNk zwXcI)iLc|TsA*tL;`G5(`Q)udx3(9s?N)#Ddxlo7Yv5MPCD)4g|F19k#*#(#RuF*00BsHQ7~qb5J}16U9>eQ<9g$&mC3q=;8GisVKJI ztMy6XJt`LH-oj|m`~f{8n6UYUgH3X{P7A)~nM7819Y(jIa+9Q0HRr#W&gu4*7txLJt)f<;QKk z1YY5nyI?(=REBNxzm>u4%>1oB(-}bv-7oUHK?JAMEtfS9P8WI^)DyqOOIVEuk6X&ix)lrIQEOYD=DPNchC4kwNyBt&tAiq*^gfKCRc$A zcfFc2XX19kgFxr0<&@IAo3D~1+uJM95|f_R^(?_~0-g}_XKVX+;VvWT(kOqOpp-MR zqIt#zpxHoo>iGVZa+2M&FdOe%dmHyPp4%)nJTW@>q;>s)(v!6VL>)lCY?4s%I;@DT z4bvy2?t^cW-W!!<$!3NzFeW7n()03yqeYstsUs8?rJq)}`grC2lWDSfA`@QJ`{fsu zY)<3*mBEmZ&IeQ7Id7vwK%K^{_^}uCO!GmT^v3x%v-%W_WSz_;d19 z(_&+TLX|>n6&Q#@VM$)CJ|jjl;%7FIp#F3C)aP#m#&<4Qjbo2AoQ@t*^o5sn0X(Wzwj z{907?LUl@ryY&V%reNjByw&Vfu9HsFc|1W20(D;iRBK{|29F6nk(Wi~s1Q{?b8vAW zAU_=ukmOa%aNwfP1?`3}uli{HfvmGDg6hQIYtIy0rpS9G;l>NxKYeMzI8@s<2VdX3 zb5i+UJeW~nN@za0KVxz(etYC#lTlaW47Lv87gz<&Jx9dLP}>}AS|C9JBiM-dSW3m@ z{O300IJUlQE2t!)?bZxr0aB95jaHS25Sn4M<@60|fF}xxi%PCkH=P&#ZqITr2gyea z#W}-%5~3n!Tu*eqLV2s-m8maeBQU6U@1Ry<{lJYP`QdPj{qh0ZS&ezOmvbyk@F&EJ zUXHLeKWCb0Z zqa-fOg{Ckp2O$g1NA?E-v(DHL3<_Vb>;hH2;gvE{P}LIX0{eT^r*G4Z0&um8Y$l5v zN7ZquL{DoSMw09KNj)*5#V_6=>>b0x6NBbiJ|mF*#H_|JYh-|3j3w$AJVie_z`g`L z@mz*~Ve;Sgv4#;WOL5QgY`4%sDv&V2PsR&tg060tWd}6rLe*cD*^apM1ce9ye!Ii@ zComk!?^6(g8CLRUmiHrP2N&yV5U3k0Nek+`vH|WK|H4y7#T#X;?Egb%>d06NjgDFp zW;C_doN*54pX*s2AhuXw<9CDYiy7{mnaf)3_C~p9HZ&f&BtW!SpH$DPR9`hZ&nYDi z3$veroJ7#>pe#<7uXBgmr()fsy`X%u!K_c>w|^90Om1m*J~VPnapvN>TWEO_ey=1C z`+EiKZ>aj9>tc^2-M^N_j-y4o%{=VHhu>;WK3ZqWoIO}P9qy?!@(6x9*t?=ZU9t@! z?RpK6pn)?%YID^IqlgExz#&7*UXkyy2Lo0S}1N-GD0Hv$5?QZ#WKg5>R4hI%x{lXG%3Z^?yK3 zLEOHOd+6Pe=}?CW3R?%4d8R4-gYpn=6O8mIz1^HLNol9Ccvl|Im$})`oE+-VL#^)$ z$v+ieKvxzKIZ96WK2Vr#SV=zEWklkh6Amz#W`8Nid@zLn8ZJu}`GVMBYpTpMZP9KR zb97!!Vcx$P){nuIiF@Cuzr6)$Gyz*RE(tKRMwpE&nLAQl-B5q9B_>t!uxph3$3Hf< zhw^(>yKl^#Z=LDaWpZ0DaP0Tr3iEca5yuAP@BQ~wz)5#MQuv<9!b|^_yz6GK8?Huy zUR;7*OA{Lliv@^F+nIM$S7=8!tY-Qzg$QWpL6hnm{`M($p#*%PhH!0A$z?XRB5zqo z$+pJfQof^}94{~EvPsHc`8m`vVlf7oYCGnm)xHrR%j0yAh~9Zhi;nuS>ls$c@al;S z`Ni9?U(Bt%vu4aw(k}m(wLg0oY?bnh0H2?nGUZar_6j)L-&W_+^MI2nc%nC)V%4<$ zoyy>aL*8h>X9r`yjtqA1P+VN;jlcfVX)5WBCB%gL|M@L{n~8_yxKDB839;tj=Whzs zuWOMIWtU~1QAaQ{Pa7P`=ppd{!9Gg56m!T3&Z-z66sV0z&3C1m zXx;YFl1Cp|?neRzl)58*E>>YOtRW~fbiVBzx+yDPR8eJzDSZqS{wRO7(a`FqJ;>Wd zQuFRun|zbW#|%*XDr*TpDpYV z%WX<9q3T9{n$g7trcz$_IF`2|mDa?7DZF;-%*CiOP&VJPg|XV2)KcDT zMq{)NW^$!(E&?KZM$>}|hl4a|s(+34&!|Qy^x`kZnN}x7oS&Fro|o}hsjMBW*CPs-28?BsER^L@P_eg{Z&AHl{6t)8MBB7+C5sFZa5my5UifH@oVM`}d~)_kNSTJiRWvrSTj* zVMdtVrq!@#dM+r2Y_+KaEzic8Nl%&w>8kgN$LAHEv8+&Qw`tF4Qs_G7Dyn~~ucpT$ zZOwsTV)9jdSwe^{R@8cIH7 zDcHeJa!+#jljg_8rgEhv-T2YkV%+)*+~Hb*$4x6D&N*GZ=U=k+#oHoC0Q#-U9nUDD zif)md&WriK80CC7TUXgSQ)3oXoSa`YH~P-AVDbHB{;;1@AJZezsw$$`?ycD8oiqC( zWbH@M3fylQ(>ynx(|fnWx*!jGeAKI*d|R1_FH`xxpzc%gmIT;V4}R_7aMOgCnK?F5g4`#2GO^FcE~Pwk0>*4k@ylI~GTsxLO))O>9p~vIn;pxA#eoZQ;UFvvLlFG7AtL^{iBJ4deiF~ihlrtU0HE$+|W%pMUi!^(5Pt-+E@#vWTW@ZfIX zq?2Qk-A+|HOBx)lUfVbRFh~c5_(1^pIyEP(j@Bky88H5w82c;NmZwVxgx+8bmAqYc zmSfu<0kJEaqfe{aO~>iey#aKIn7*9yA%fZ6GkUXBu{0gUoE6N1P0PMugMAKjkYe6w z^KbR`R7w{Q?m}3(;0;((7Dq$197n&kj+n1RYGAbr%eF5s3MbBFPEl8;V3O<1`ZnM1 z8}}OJ&lf#HyDW_J_A8idut6}YPMl}^&rn#-;}T0hsj`VDM<>%VIC&o>bB_UvGK6F= zqY$9?ixYI#7pU=wWP!_8(q`>X&F6<3OPdtu9mPUPIUDwS-6!)yj_FBlJ?myR`A)D;n%GRCnNQhgr8Z@HetKg>mS$56a{aHW(O647 zqmjsytS+Po1&7Ec225K^Yj>G|VzQ7*lrk=7iAz!BuVkv_o;>y7aPvJd!CI#LWIK{x zVu{Hd-jv%}<|YjEII$UE$?nXnu_m!7k#3@g^Ot$q4bdDaCws=Lb|7m~-DXL$b8)6j&@t7j-@Wmyc~+$J`0%bF{Vw}d+o_9Q z;}IK~K>7;s0_q|?z^N4snmttKAL(8Nt$FDRSYn`mVpz&DM+_-FyoK?kcg$*%t}uX! zs4K2?0<;iyNMrWrP%<@p0>JmiAultB(w76q2j&gE}eub#k{PG)vOAujlX_VdX z>`ZMr2xTU{Hgon6zWoz@FCOMdh$~KikQcV&017$iu&R%zaH|c#XiTDP&Mh(FnUv?| zO&)*J8&QWViq-iGy!9uhxpUPN0EbN+*aNCe|BEDM5x!uYANLIy*TGuttHRLn>u>w4iDr;v@8^gzGRPf+@P#}??4Y<<&^Au;$U_(b;?(K(B>@%fl#)4n z!L1r}n^J2znzZ!x)=<*pIn|oiOfV1!8!W#2Li-Js&&vy6kTLRGE1JJQaB1E>K1BUE zM9}QMIRz*g8;JS3tuPMQ5O%40M?8xyU);o22Vxxl;>%w!+)tYMH&&P0U7$KHnb&by_g1{tm#kK8I6{gJ6p z%m#ZPbA_*DT)e6OtRstBE5JxutK-yYR}_1L`^FuEMMbzp(Zu%Ty~=4e226L0j2bS; zP+kbk94Yc`@159%s`Ik4u)7MtI5#5vG~^Zt%0W_kPFtL?cq_3BetYHl@RJB62NN+R z_=0&5hH}`mnbe^?D0QgYH!JI9d8y5@>obb$7YcpYO(X%KHLP?mp4(dQj!c8bIxnd` z`S&)11bGb{-j86@s23+$Y5zlPe#peh|t2E(0`UAB$}fY<^&0)c8G z8LY;Cy>OG&aFo38{#iznMv?fnrQ)GuRsnwF*V8^@dM%QouAej$3G9y+5aEdl6s(3X zXeCKjxjfqGwT!yZiF$y-4nBx!y^zPd4XA_V2^)u4Z8#V2LzacYd3mvPm zQ1}fm*zW$g17S$n+HOBf4-TngR0|>+gW^FwF2B|MXRtfSX;%L)@WocGzNw$)l#|t8 za>24wZxdU7W0-eZ%P(x&455V^jbUe!mWCs9QAzO2&n|QWL3|RhfDYBQJ-W zK?rRy#oN)s*XqFpb!!?~NQRN)1O+pen#1vsNG8`XbDpB~$gr(1;C9Cxo+hV@SLME} zW3&9~FaUi=sWdh=vH4a%lP%?L5%y%aGt_x;_MYevS`E;AFL3?(ob7~jbzLCq;DCh^Je!ZV7H94}irEZP3gV98qBA()((<3f5GZqoH& z?%ndYxjO=iBq`1@#GmKSoiHQjt@`#tG-r53Xh{DB=7~B$a76NMZ1?AwcaaoXvGmZE z2D52&%~4E>3P_gkX?FfD*ymV#LFNF!ts<=peIwm&L-4-qzLHieX1~cATYK0Nz%FTE zTqiTwqdO#rE`^>8(IGd-75`k5_27h~< z^-uV@_%eCCg`FHr!*qD&W0D0_$-^{tRDDREmG$9k@Eh96c(AYpKr%b*uMBmRmb424?#8|4w zO_#Xz4ty7c)NlBG$hwCGi&R8k81%x%j z_2qB7rTvl1V%st#@9`gUa%Ca(e3n47D{tmvmxhjjdG-n9rkcUxl^JhCdihP#bdk}t zFN8K9RnHpj`e-fSf0#Z6!su*xmccb@{i?HuzX zV1BiCFX=N*IFn^W85}u2@M|EOmM$cJ`gO8;;6ACHSLYQt7)e4Pn39&0 zW6)28y?98>?g)}B1XCcFqP@UAK(+I(1Z!w?=ITy`75L;P#z?q-v6BGFJjumc#=;Gq z`rs_ziUOkPd09ObYa6y20~-KXFAV!?!gHaO_cLlNTd;~A*i1E)#`=c(LK4a69z<4L zw$o*N6Cc1_)Ma|m80*zgSU%PhK2$V5-$jkm;@Y%3dM-m9NA8k(bKPYLYPA=h+&mxgs=eR2D)i&78T)&{ zu#YfA2u95kkQ*2R1yYu!-z@8LXw=WF%WU(2)DO5m@ehas&~F%HSnmv)^H$I|itOO~ z$yTZR3z6z>uDJi#)7XF%RX8*yBW5+E-~5hq{GcrCrPg<(-!#1cr>R8C@Ir2pzu98a zgO4s!dk2xJKY(CeIiE@v!W{@~6Q z&z8WIiOQ)nKy*-|jv{;PgTLl)vDJ6Qs@v0K4u3A9*A|>#3Qy*L}rX; zHwHZf$hQ-RaCU(di9GT(GXZ{g-^ME^4Ag%Um^R}$_)msd7yW>^1- zUl>KE!K{#(RsV%ayPa1O|0V6JpK+x}Ep^YFkozx@v=X6-zo?Gz92Z!|DB@tA1kl|= zC_J1`XWH>m2Fz{!^tx|mjPs@eL7%TNbsrkPnh2C2S+2r2HmDo4@kU_|0SA zes@~^Q{|7`-5QJL!OSgm`NGdU*5A@lNsCGoj?9*wqwSrT7XN(e%3#Q*S%#GJ@iX_* z>rUjC6;+o}h{O8^%p7DTp_7EO0;%t>Xcw@uQ(^q_aD z_w~s)xQ2&}VIzAQd&Eb{S+e5|91O-YF8#>REkxfy)r9dbtI`5tRVRMpo>Z|}{La1e z%8wq}KeO{1c#%O=H@sxkcd-7T&SSu9VZRH~II*5Nc7M6iA!X^{m-mfPSHcd!D)hSv z4SRHhzi3^VaSS4Um~x_y+`MnDt=itP60;&%`5R0KEopEHyxA)^Hzw8fsx3Lzm-I^m zhoxLyC6k%!+V-kl5!(Qh*=`OfFa^YHG0)wrbZ|_xS<6^=7~F5>mMY0j{%F0(SKj|g z-AfvjAWF^z1`HXl02BivuEQ^No~-Kqw(5zg8v8>!Db9gE^+^JlmKZOi4@KH?XkNc% zi$o4V{aiTtFyIhJ@47$AnYlZGXM!HTl_9C&Zr=!N%ppBFk60VPom2fkPb}wHicm z9>w;i#Jz&;E1)!ZPm($1n-&VXXLM?R9s4A@8+FuV72yr3mUhf<|06L9Ig)xua^%z6B;~vRa@gJsMie)G4(p=1xR;}Q zt8BB2mwdPwu*hcK=-U4rT1Vdc%Q?HDX5|>K*KA5;L&&lMe@Y@Uzz$n62y}C#F_;AZ5z7Mce8Z)1Mxw?*S%VKQXC4W2G>LLN-go&g<(s?&7)-RkK8|So3gBr;e`h{Q|&>1s80_&taldgYTr{-1{r1?En2xAK1dHeNW_V5?YfbX7McpeJ-A7KjS!jjX1+PED5I zJiffS@%42_>pQ>G9hNL~c~>y@FFOZv01Cgfu_RTybS`i`?+p z{P0VUl1rJy939fFSeth0{JhfV4^HhV?2eJCeO4@=f_V859C2F}_K{D>7tEMNkPx>O zx)qay?Z4aOW<*K3Fw<^?ZmBs7-t1SZyh{^H!1soO??7qtQy5=vOQ}02YzbZ=enm7c zluS@o`(elmF5Eu_^bfj*G>Rp<3odD^Tl4QA$Y)0;iQk`d4E%rxT1JNLgnF`#2Z?O~V3uH8jLnAD;u;Iyt!|6?lDwTNX zd!|qKgjAt;)^<%3WnP(KOotk|P&1$}lDQMKStH|VSzXJ+PC}DJCjEkDz&_{K6VJ;BX}c-K1?zLq$lB!N z4B?Bgj(QVafllj`?-_Zf-S)|Lf-~dhNyis)kOS9pb!Ii=N_GOwrB+Cm3tynk{1ibm zf@;0!5#u7g{|@g&M{;%Ua>aTg}#0%SIao2wWuCp!-v9P*#}8Gf^o z7ZKpS1Cy_}BFn_mXjU)GN`0qB?Kkb)Gxz*UGD!T#Ed?yM>8`3C=N8Y-*DM$|wwAy+ z>D6lY=WdKnDL?JwYVZM3Qb-IhZU3P!*0la`seXNtC|&D2CrllxMl zxS^z|C-t^S|M&ZDzXLoc2cuolZKeAn2(t%zy`byw1B#R*?;!%Q) zRsCX{KS_Z)N-rb=K%F<36lqMpAko=p>F`En$f>h1o>!)hPqA+n;j`lLwD*ezNORp_ zI|rv@zjGLp?Ij>bQ1usg;c|ZV<$|?sYI^s?$kYPTR?;AlMbs0NQpNX7@0G!Y6)Loj zJov0^2tPN9>}|bR$X7+vFr!nTp&NiYqVWsPpecC5_2NdvmdX6W=%jAsJ>f2Uc58Ao z)i;G=bNpnHV|_r{%KMw{J)1c^|DNp>*S#BaM2EIgl&eiuG-CFdS>@KE`)A%RnzE#Z z4^l_1X`qzEbiT$RXePl6rhzw^9IVkRHQ5|t**|Y41gK4OvY(llMu|r#TuzPNya~lP zZmd2_XpX4c@R)%!iWv(F%ovH@5Bn5f;y7qsHaf80ZgA-gaQ2vdlP&%BQ&5x8&*!cv4Q;7)%#5h42u_LV@ zW^J=AMMK}q+j`;rlp5p~{?v1CSmjQ2pb_^1DtE3V*SARY9+O>La$nxJ>b%LigtKfg zUuqP5c@f26%t8crTH_A)u3vE>cd=P0@2lgdm`l)n!GL!V3fPsrbp~!9PKnPEnues7 z8|uin3ujFIJZ>(dtm$@jsMYVxua(N~-dGh+Bon9H9CyiP3So?zi3v_XI%;=nI3ZXI z>b_&DUczZXjxF0qdm^`Ul)AaT<7iL-Qn^F_U$%-kaIis~peSEKc;ZvS*RLnDDU%5M z3ij&+#G(pPlHkUJ0BFB%?Ej=!r(UuLTQ0#~4aUkXE5YwqUL9u2sdTDlKdo`7})(4vRFgN#os$O4m4*+T!2 zK+YT0txRxb`WOseYok8;v}6hJXO;Q&jUhXd+2`J<79vKz9=1?YI89N6{Q6M&>{$I@ z2TG^A)~{UY2$1xHZuZRiAdw)y57XUd{sjfmBwO;w#J#iTRG7x$US^9M&STD~R8YRY zD=BCMQ|~zHbO|P0p9~ov^RaQoj!x-IC@8s64SuFjf4=0Iqrv^jhwNx;(J4_Hc3U!1 z<*rRW8OxpC8oYgYD(e2&;E~PJfGMAuUICPnQ;ZByL9~)!UAXtPBt#iDMpB9R}Y-)|g(kP^ciW z+nI|-pbJCJ6%OPWV;0S0dV&{)WJvW_h=9sD-kN&!1hWu*>bg{SVBEJ3kP^Z;TlVu2 zJjZF4Q!)b4*oW0_Np3;R;#(igIwUcT5NTf|5uYY22SN->Xy(+dzyHix6OSPK3r)3O zyWDB51_m9S-D3+S@=gRLq9;=@V`~v6;5;TKbKKMhbyF2klbb(#qUMK{o(%0Q+j_7c z^}Mxyy2+ku7O_JhJe5fT(AwDg&kg3ol!Xp6o}fea@rrqokjg#C0f~PB@@d zUAcJ{va9r{P>EGX@hAH0_S-$Cu$`2l2IZTz2Meh4@I@DWxugHqJPT@#~=Wh^%|@g#kGI=$qbNuI~oK z(?KP*nnAS6CpqccDr1-D^Bm3o-(x(JVftBTe{%a9L|tECgC@1UWU&FnS-D;F{SdLjb{h4 zGh}1Q+SZPQb#2)>JjF#X$~r?_&)PUy<4A33D~+CI8fR-VK0FmjV#Xy{9M`4qF~qB- zX3R_QOfXQJa%7PdDwVwiSHim}QC+9mAIiL#x3kx7ytMbpI>be2WEW#?m_U7kBr#gJQT>aFzNT=lo!cdFD1skXOu0K6&YCzA0uC`6Q zif7}%cIp5X6VxI2I&f@w+N2USb2?m28h@xk_P0?%9>A;-YHn6+%hs@b;h6&WIaxyJ zG`bpO`mj;PEo3I zvY$=%lkDsVT0S)E@x2lmd*TiSZuG&kB{vpm*?;$fa!l*)Cfkuc1%bav93akMqEywL zH{1E+_dKoQl;&3R8!x?3c!NIUInlg5X>H4YDFDAa|GQv|KNnE9b>A;?w`63%aH)Aq znsUeF$Yfn$m50k&$*I4PrN1>uZdxND){;GD5PtVgfxB2*awvG47z#czOlQuok`+Dk-C`xX{J`Q;G~SVUP6DIIl{P~MXS}h zX+$~~v)jq@+4dtc2V2YE@5+1T>E2eekrSi{nY`k5^~#uPMwabRQ?I*EYQn5Sg31sy?ci2n8 z#BdRwFAF+(&IC&zg=(BH!1hW)77e!}>-&ZBK5Ay-mF#5)Z{g9zfX5qt&HU!^Lm9x! zf>m1*tk)Y(yFd6baMYB7cr0v}q2GMiGf#9ZTrTof@Q}Yh7N6N!IHmPmMWPdi_BNQa zJUqHoF90EY>ZbkBU+IgRY-uJB<_ABX&a~;jTQwc6HIT%$NVWW#_S}Dsyd*1}ms5kE zZMaW3zH-o2#B{q!(k@NL2&9&|jT9PzJH|KNkqUo05| zn$~ZWA*HGV2pU+n@$9MHPzbwKx2mP2Zib|Am^0!pEUGTy6<4imQaJF_VNP>R_ITKw zCbW*T5PVF42ke&{>a%>VmKUe^Ij|T@O`scK;I|8=*)u7QCb0p%Z&a3<(!O-~mrG~$ zZ5%V|FW|~XG5jL1`k)kpf!A@^o26makfH4)i~h}(J^A6|gVeO}9v{oCH;Z2@YIgwD zd(R?0so+&`Vucp1+j(@)G#`~s!QE2t(T?(nmT+6eg3GAYA1~3+?CbsY@uk^;bHnjMqK9&*Ghxt7fc3vmG&9U7OW417m|hFu{l<;kJJa( zb9)CV&KBNwovCFzh<#BFGu8-C0AB&8z{81H+met?{TJI}jDQlmh(7KWN2tlzbv1ps z<256u@)(}%qWI|I*d(yDp~qB0^0onGw;qZL3R~fowHB~5ic^Ul{_<90<3M`N9PeE8 zF|YfgqLI(EMy=A*#G{R*X|0F4QS5wq{8t;RpQkouI3cg}@=^iCPv${(uLrxu1 zpReUR=?hg7{I_x)DT|dn*#XDofQuHZ6ahN71P%u&K}!^Pf|*0DM7)I)EpVtv1)o_$ zp805#X9M3Q(p!#Khr%4AYwSE)VY(xqyOwG}3zXdKwZzFDE4_S{S$0fEEf}h^L%Qjm zYoJ5J{Ybg$*=+kjTvYgO{;TfzR+p<%V0=urIFh)fC8aF~MZx**WMUKPSBK=+173$q z+Img#Aixj0xiUBE(|o~e>VDpfDM;-$v4{TGT*;`xtU1mz&~0a- z%-UgrBWYY~(vo5oQIy@YkST3;5)w;@8m!;2Dp^{ai{rMi_LS9VqX1lfk(%5=QLHwI zb?^4s&U_0BvN|##`xLcGi&>pfT7A-h>>JO+d9|Th4Z0SuDln@ zs8l3OUA}G94y4KD5&R$?GyZEm^+o$;UCZtDmjovR7z>Uw>k)=Xl0C_X{L!Hi#GYKQ zcELcvVE{^%;vgwvbix26H(~+yg&Zcen$io4%Su?LUm>-2;&8$EYY8xOIvQstqk7JQuNaj%a^c2ya> z{XYK~J{6lloC-Qdw~N5iLyWo$Wy&Y3;s$4r=~S?>e}TUz6f;hDB@z|obRXWO@5E*K(_hc4wrwHrnB{8YgwakkwSBfIKJGru`tn$ zlw+Sf0cp?aBM1qWZ{to?s>G|Y(YtE-JTFOL{3?fwm)b4?)W;m&z)v^?P-fmIr*W?h zXf;A$8X$N)O#PX)Jh-&^M^512nUSXt2YJR&&^E>mp$@^2v2Ap)1Zm=%>I~4DwNlTe z-1f%eky*NgOEwEM&NS8s4>)vUE5Lv>LmD5-vdbjsauLlC5? zNxjQ&0v<_zKfrb74l74&=b}eaXHlf~$281<3@9o)%6w(JnBT6d|Ct#(P}Ns1XT_{HE=rOud8CK?x=&~PS->SzG95(jEnzB(SI1@9Djd@Yb?&_q&*_)agXmtIO14{-yHHq)ZrC6(Tf+Q|j?=0hb@6!>yY zmve?zmS#2U>FEEEQh@$d0ausC{ARd>^sjQD@^&=-Fln=Q=JsLTrss+)fH3!9>;dzl=G|I8L`?20Dmtuu801l>|XQ@JHqkt_0bnQOUTI z_w3XqXtqwI(Vm;IZ$oWZQ(Vgq1ehxLoxQ!n*_m@-bk`WFryY|&g@rKdZBkND+Ju1* zCKmQc&D(Z9)#)jPZbb2@i;;c^pAKb&N`)=(C-jr@!V(V`Y5W_t_$1R}@l2`KeswRx zXl@Yhz){@h!wcgEillr^>>mIfk2rU$td;S_!{kknzAn>#kyiR7CjP#Z5mMaBH-;-f z|M2tZmBR?GHay@W?~!YuA^0@!KZ^EADR zr(9wQh6x>M2gRgoNX6L>QO)1;#IrhIb!(U@Q(wK4_x<<2@w@XKxdRA|Y3)2o@#`fn zS1|ob684jrV$=qhc~YsuywxRAEX}BYk)4y}nWI{oL834FpD~6LdKJS{@oU#0-^6o^6$H$FYQ4Y;|+ytWkb!K$Z z8k2r}+DWKzif^AmPtU+EXHgK61D)3PqV!qPoL|1~De?VVp!lPFGhO6|C0VvFj^h`0 zv}eT5mwC)&;7${!^iiX{C15tPRGIZAHEU@Z5|+aMgGG|6GMA*+KBhy^9)7?X@RyTX z1uwm!MhQSNzt4^KECq|N7fR%9rNc`zq3-0WK2|D2zns`fvcOl(_$Ba4qVHG`vGF!5 zKjyLEwK8_hv$cVRTdg8xe}4OQn?}I9fCAatKIMl|t)Z?+`}Jxq?2F+JmN*9HmJ}sN zyLlSjqB`GkNLvj_$+)pBtAJksMy}s)#(%cz@6tnUTr>eUBraSTbhiQz)d8c|-x{#| zq@|b}H4)lqf$-8j0A}?-29EYgc&u<;zZ(%YF$oy^89o?2vWT@$npueNgGnJO#h`)YjN8Mi`i^anMcx=tDykO0dQ9ptvMc)nn0i zx5RVG=W0{H{nS*CTD%5T_!+!%Fr&3Dx-wQ$s+I+^lb{6C_Cfh?xjLq=#U5s0&-z9{ z&WU=@j__#rR((!J{Ug=Gabw=~)8DlRKZ#@x{_L!&Er!Ro2ehgKr)Cm1@yN+aR?)pz`34|!r!h%JJ!L=P$jRMJ(? z@z4yB><|U-l`GeJLj6<;E!hZMr%U{%Ii42S1gj&p?`-{|ma?>hptA3##?Q7z!k`&g2Fob@7a=kz_zr__QB^`87uF# zF)ojfj(Wex&1Ifj4sCNi2mSfEl(owCi7Bi9uf&%BC@JTysWij9lxcNMr*T=M5x!x2}KFuj~~@CPAZ7JK1rj^vde&MrVcU-YlcN$IccUMz^vQ zYmt7XliI#0IyAny$`zS7^-Paf^!OoU5TOk0Z@p>{EcvF$+&X5?nXB8o8a55sWIujB zNeugWKX>3XR)s}s{!!8TgF$AFs5dRh`Ec-1Mc7p%jtPbvg!M6TDdh6csc&8|vIo|R z^+JVxzU?Ehs;Zo^xKD33;YclhOwU-Pj7jxCN`Jpn91}~H>#zrxPLq;bqGHGVdSpK` zDM^Y2vgSjc!=AG!9gcovgP;E_-%HJtik77Ku}S361xHi?7>emK;EpuwF{>>H1{X_u z*=F)faJlb?ADI@KvBh-H*u$uX>??Ho4~^z~$X){94c=Wv?JcQOlY<$E=GvTc*cZtG z0HlU`fyi0?zjXN%pcn|oiV?lYcw+0J8JjHWOZ(>$pm~ct*)I?C8f0khTN=MEDmaBC zDZ=cBP{-+Z$GETIC!oG%X)krI_wUEv;P@b>-&!N!{Yee3Htueyx2l+96n|&ntvE(k z@{8gvh9SS~PVq}_9cyNns}DGND>mEK)6PkDBHh6t6<5Z-h%2C)1iog#l2X6bt1Ej> z7CJ=k`EbYT9zhl@AnIjah<$ySH8=FV!Hjc{Fe#JIf&~w+;Cz=>~p(G z>5TH83bd;N;Aw_sdqe~qO2=8rtpiO&DkDJ~)V+MBu>p{F%0BEqZn4pGy^MN>m$&dRhrKJ-DP6b%z%zlZ6#6KfdBt(erk-AMbO zREkvw23QSEo}T7uCfj>TtEnkK$w z$C?%)6<<1>DZSpHPDr4IjHjSeqLvz)w6Tk4YNkiM_kV%08~NN#4M(AWplCzx@ME6OhI z9v_z_2fk~Ua#z{^c6GtYUs_mby;o#r9&%b8(eB+Peso^_MGiEdx8c1@^vykWXnzV} zFYj{8fl`+y%kPvAL}>w@a4^lJA5@br%5*{n-H9L^pPY`+bji=wR~yI}ew$c(l;2?B zRdLH-4!_C@$y{+{mHBRZaNbt6448Fm&bSzZT10$VGc9)fCKCou?Vn?Q7Jb9CpVgX9 zD>!V*Junxj?Hg)kMl986fI7Y3CDeZuNW#sNxO-B`Wn%-YhiDBhpPIn(wcva{gOe*E2~sF1FdA^dm3MlwpaRWv%(zb3ZGD z^6iO13sA0zYrd#?i?#DCrch%UwZrLpRZsJLfZC2^!ip)3I!YFu)@*qkTiM4_9c2bz z#BmhR(I}$Tg&x!Sy1s~wBNV$!0@Fp_E!=-YR{!3Fu&i51*`qG~1CiEeAb7(CSepjk zXua(^W?*K(-}dX-0U9F(dET4Nl!xLeH!rpGbY7{@hxXgF^z2gJRYth+dgTOIbL?}n zeDlJ1FVuVvtoDt8zyl+)+|>xmy2f@O35k{60GhJapU2wov~s=ZaxTo9#iR!U2@drf z30~6xJR752cgn~uAuI`y)AC%<>9+9(VxK(Ba_|K3@p}6$&4>ZbX9Eh@Nu}iUf`wXx zCWLc?PKf!d;v+N(g#As08!3f9+>-iHa_L5jeeSsF=+}L@6caVWy?JcRwoL5 ztyg;Xw3du8RFK72F^jK|Q&{mEek*@{U?KSf`pNwL6PBE0TF!Le)cRXwX^3Vtsy81L zhyF^UXN~VHVz|Wx-&&uST{ked9IUMsb!OKaebJ6kCD2_j&+!JQuRU-5(~{&ux(AjA zq`BFc(|#%*k)rIi^d-L0d`5!o?! zPqCMOi*80c8|r}6M_C4#88aO(-nMO`V{x>>&wWM`lLwgbW=&J@AMr1Ue;u6Am}S&C z&{I&Un|bz%C7(sin~?*;`Lv(3(ozg9BH+UMO&3R=LF^VDd`Y0z3c-=aO04pk*cAR& z6=r*NOzy5>7cl@YIE-jQFxT-X`Q$I9BkIkWaidU*l<%qj>&5!oW}Hdm)5&-WPG{Wp zH*NC|+M}OcOIu3uSwCkz#H_UpPwcceK7vCwu4M^BS~eyZy$aLZ7yof0n6lF1>RhMD zTr$K(o^=!f!t2VFnU`!9S)L~4px0krB+5hj=ob0o=6C5K`P?gH?^rvElO9jIHsN{} z{2CT-r7FiiemXgn*(o&Of6fwnK(&{Kxxu-)Bx)tk*KzDpa5+%^OL)3$`O_5RT$?9N zvzOvjN(YYmgJh^SzERCGOZLCuCjWXl4T#A16AGxnpa zPw+yYTMAm`#a7ll-4xg!KcFLjGQFOlHrQOrS+~D6bLcAAdKH z`YHo#dG%7JQBff{|1fa(e28>73yfRb_E&d1d$xq}QOQc7+!x-g4Q@P*mvBBe+!I<( zOtDyHg>bx5-jAciO++z2zWqr{xX(W7Ox`wVBd9Ir1}7_`YKB-0l{yMR(>9xx$5iO< z18HyQNA`z5FAOAAXx|tIf89^?y0Rh5FXqJ~9r39aX8cmHy$j7vva|Sd-VQoihWuq) zX{=<*^tJGD&7ir*3U^ifJ*KSoe{Z_~=I%f5%UejcQkU{rr8Btma(TY4_|XZ2sVdYv z&nFjXeaV@z^7hxc*n134W6dI%_NP?-a@^$>&B^ZuSz+)G=nz%LC<=*@O$=4NdLnwB zt3kitf%C++Px~n^+q?w1@velUeAF>Cx0Gwa?G<1H8=%g@&~E{>HlbgUhKucHn??+@ zIMR082j`$GIoQA;*Wk_2f%Axw6#KzCRnd_I=!)=Y#CA~60uE0cUjS>K&a6e@e9L4Z zB_pW|g3QMhOQdFM;dv|ZJI|AcnFgN+>GC|AZM&FukA(wv=Gb66Pb?md#ge1I=?)&U zHPTC=Hoe!&sefL(Bbu9sAg+CO$^#A)l{$14nnwD!j-z(e(6;*&W=<{No znjh9E^vqA_=r{diGy{}>2M4-q3HsfHBrK-GyGPCYMujTYp!R7>`rnqX&#wp7exj}& zWnMB@^U5vmqe0#q;4rtx#;b)hUrBrViS@EmOupm?(Q<~`i?j3065wmhP_O0f4*w_d zErs;D&-wyu>_Pt+sy6L&0{t{MPobTC0~b;LX<*nNmq-p+iW|wO^Dj!(P0XlA<~^}% z(g`Wuie2SBK4q;7QbgE3wEX!!YR|lzO0av{D71a-wJp_CqMLBlv!zKq{xQLAZ%?g| z{JvYdM|J8@kwDf8{2l?m15#)`cZX4bT+J4g|Cco6fA86~_v`67A(@*lI9(u_{xg{d zP(FiH35Kak2mLukMU`OhlW7G(6qq{+V-D1dEJI?!4;-Fn;;eZKm zc4t@d)`K=Q`@h8!jj9#|4;uyPw196p^txF+#uWovecW^8@h&OWr_{EhH0yJt1oaPsJT5G z!A8NMs#Vi@o_6WN+kgk}BlrKC$MA2j{r~^(@8RW7|e|Y@0i_yTeY>v2E`p9c#z7ZSUBg{?43>nK?gTF4t|X zs#QJz?I@Sw0itKVCl{QoG-pKbzi*yFXt8KQB+nW;*3vp?TIf1rL|a*q(p z#JO*oeJXL$e|Wtaecpb$5NmfHfLx6EfW+EpOzsTv3DXyrGQiEO29yL5}^1838(*9=*wzt+H4beTQU#M-C0__bA6$P&l2oeuBoGW!M zlPLF_OfvW}vF$g*oWz9KyngZre>Qm*>^&}RFnZdt+9|&d%e4nC9~%jLG@&AhK!U^3 z>e_uo*e*E_Q=|GoIhM1+{W%dzM1Xz>lPQ8jI5Z?*g}NPf%M-Hs zg}m9B!y|zA$vSA2kc%+5r_0{VXJ!|E;nA8w&SOhpUF*mBrBiN%1mVe|@C9~4K-&B4 z3ec!vitU#$dLWqSo$Uep0UZ-M#ilf=vi*Nv_!}0t3 zijSDQO;c}{4jGFQ!wdmfC(E_7N5&TM7)@Xe(bM@-)HhYu&}Pg=($+`L_yCY_$fI_C zY=+!zzaP}23Odm%mRGdA-4QO!_0)JEqGgun_HQA!&x10eG3l>_aoX>ykAVhTd24^yR~kwXF}w_e-)t3745)!UaYmBfh}A zz*l(CSJGqj$3H?1QKa7)o}R6FKe3OFB=Fb8`^kveUZ#LM@06<|-`mLlM`j0xKnH%b z=*6vCPeQ+1-HV-qwM31)+h0P+rp29DdM=-0tBK1d8HYfXna0=?Y9^3nxY{?a5f})a zc-*0iPOb1I;{3|=ylrZk(I>Rx{yU%~Os28vn0(N`pH|?`TXn@N*B^ zYGmojN05HT^moo+Qa|#V$0d@KE#ixwc2OR5#|3m;&tt5*D=t+LSKb|xCq+TfJ0&8d z=XOkMxsT*KX&dyBRzlQ`g_axYmg!V?f>(8sdP_InEwo2C8s@m~9BJLl(;yeOrLK9B9t>ynGwq*Q&Xghs2_j<3tX_=m_J9!DJ1_U7wKtITemB#XT zi4=Ze?#KxuI*FILQDeIEy}E5brk`K!E{I#MXa@yO5#4uQLCa+>hKg-vE;_plRRrX7 z)0|r0SaOGPzE!nTK0dK6itw$UT!`Vv5B8Z4p95A|{9t;rzuc$l!t z+5I6pcUead(#}?XxoC&N>KGyx(|;I-%-$)**%Pk5L-~C zn}`yH8@?uMuy21S|02IWp#Q!>P2{apkk5m^ z_jLOQaILFLFd{odPpZ35xDZ`3K*glU&}6|!Jlv~)m8)>*yOw8X$}uKl&CUI?ko(tm z(LDF3iRp7apgKbk&v3uP5vt$o-y9dnv6d=M|Hf$!XN(Pm~f$4Iy9iq4Pr$^}UQErS3Baw=YluAUMO! zQAw(?Md^NyB%OU+;I({Egr|M?Kp46g-ZiY9iyVt0%G0ny0Fgx!`P%%VhOJ`x9U32_)mLhJq)vTMC~>K@O5s=d4cR`0pEb5 zpMpx|yiSi}Nf_ljDe(-K`(xD+MFaoi3>gdTV7mR1Ko%ImUo4Et^@GgZI zE4Z}j7#x>K29yDve)~T`;|ZQ*yQe5e=>3T+N5bg#hIIh4BjchIYKYQ2pNDm1bNlr; z06-ZZhj)jmhdwRH4F48Ex*X;CEXa6!)soJeYJ}p=j4m!Aofw4TnkT(>gqdR=pv@j$FUsLjOn-mc9JUKbOKy_E5O?gy-d#MJ1$a;Q}s23Vhs%&Gfu^ULnus=YM zF8?rN8h%3d+9Mb@8>VbVo&e|7G|z1O>EWs10-y9Y6!P+C8#y_8XU}0YBqCTxQcPs) zWD@S9-J!h#t9gK1X)_sww%w7QcsJ8vc^>#>JM^GFY-_gEr#EBge-AomeE- zY4kO5Cpn(MPvqIP%h_OKT^LS!oRIdgu1m)lJYSPEjU<0z4MT7ROHug`HP`|zLEGc=r@|nO8NKQP~;HmE9^owVjXzjEzAdT0f z>M6sBGAMZmKLAVt-quvHyBewaxnbsaAt4(w$*169 zbZ)`<7_&!FZ-gz+hih*rdD6+(79Bkb0WThR_uF(KF|>V?8T3NH1KZwN9Z5A=%}}C! zC|Ob*2G?8CDP$>u+JtMD$1j)*i~zpuH53 zTNJ+l4w31+#Q3XId|r6ST8uk#laCjoxI0ld6OS08aY zX}7Z%4=$l{GQ5a6VH*Sqy`YGXe1iWNZIf+Fq1JsE$wcdn7;P_>e3$vEKm3Dr?2UZ0 z@u0Bi>p3q0P&9mE$s&h*u2U2duQlG-_q^oTAtRX?1@EA;ADg`tH;w(dJ^ZFmxZ@4O zIr_f)63PoMk-j3~v87@`f)KPHXCII^gAnRIu5ko?f-!J|rAf=RsdH&@WN5r2e%bF} zY_%}~&iFRn>$&G#IJTk!Uc)YnDSz`dm9iEsOpFvFTxKc(wRED3G{R;dlIf^i57iXX zoa|_>nq3{`Gz7$c;Guy@URX4HMW|klu-{mkBo!|Ht7|=_dITMHV~d`Zvc&q3Lnm5c z3mN<0iVs5}t7;nwZ(}X3pSVZycnvMwi~8SY2_x~F$Uq8x^lz~FqdJS&-rDn8Gsg^G z{T|^ctMkqo?|XsfUlu~A9Fe5F#T!MTVy}X(4@UM!CdHTwkF?Zv8c*I>4gudnbXz!v zk$|KwTaUIk!XUDi=sn7OM=CeJwOrA_eUt1);dye*{Ciww^Ubw;;$fd;xg)Dr|FUa= z{`Ep6vxA^#+Y8$jY^&*O=qw%Mup$;mYLg3mM<CxqrD~G6VHB#Ec<_p7tndZeAPF_V6*En)-IG$lK`S%rt;N-1C=5Sqc26KN@+k2o`voVnILt<9YP zvBi4iS$N32;x`Eq%6a|QJTgctSjxom`p-Z}PEQ@v_BpM{&PayIHJSW|JHI3`JTNd{ z3`65`H*SLUcpOkmyX3J8S(zs*Qc^^_V1Ds)2|^k|#VTmhs$|FR$_hRQ?~r<9g7z3z zWA2mgX)5_P_^rhGOXKT=2YY+if6ss0fo0GENUnp`_c5YV1_5`oSh7L}8^TZ7STq5i z7cgX9f^XX{d_ux}XfIe*+OIvg>H6AG66%q&W7Fj6oWHqzLY=-PY`A7tgr2P$ObG=U zY~%Wgy&41*EJ-8_;)(3c5~Ff?d$)*P_IL}XD1tJBb3p<>GYx(-WOCRQq_|IM+7P}& zFz-ldmLDIyhuraS32+BZU!P1Hg`S=NY)&ECfLX!~oyZ-8OD&TDm}U?>O+o~!U#5kd z{m>oHOP+&u&KSFRWPw%hNUx;)>x9WO4wng1HF0H0KYng8Sp>9XWRR>xqHJz19MZ$Y zU0|GMZOEQL6j+A=WYoRYD1Q(1bOv@pws4yy9b~8?5l$Z-;)4l=$38 z!@BlYw@;5X>^txwm>dx+AqU;y0*IiJ^!2xMQ6kJb;t@f?lq)3=8(&z)Kn9r-b*VM* zEdJ!^Qg6T>=oFt=7Kri&%%#yE%CD1(>)Z1AOapg+yicLNmN-fMci$Ej0FelI8Z@`f z%{?v&>K^a(w+I&dNiA{2un0=u7mla0j7KfNruBGN6?(EPar8LG{khjq*}-#z-7p|6 zhkQo73c9H0dI}=_QCO#tQ0MEWNC;sk`}~JJ!0EY(c`$Y4KIxo7nI~4D9T5eAcf2p6 z{gYv2*kMlB(a7!rt?HY0a^%oDg!Pkr&!ZHPJ`Ac{0`51pJ1fn<=Pmgtis2296x|La;La> zuJyF&==bdT5nQdO+_WTTRu9yI4^kD~{R{s12%8Smk$OvdK3!SO4-!=@MT<~Y?Nkyj z#_7pso3Z6B^9=iQXAZVbNP1Qze((ppt(D6 z<=$1Vc$nwzJ#<{mTcm#BksnWl)GzqRh8*L&qeckyND+tW+;Z>;=YJFv;~b=*qbbJK zWhI8|cDK>LwET^p!JN@-=$E8k5Kc^#IkX!AvYw|{8;h_V3fm}@O2{j{=+z_`2|o18 z`3^mh?EK&|=nn`=5=@>P@7Of{kPQorKjl45cAO=()jkqju0WDQwES{ z(&A{ERjD;4l<;VkItu&5*kQUZ1q%F-cOR$sA#vkvI?ZxXP49D0&(51)UMO=6!E!gf zbysVpiuHR#Y|bpmJKV<=L+36(Z>u{|8?H~QU60Th0$P!YopYP*h<8E%14aFlX&~s( zZAATv#h6Bp3~ffL!5*pZNOjq}{AOzrs6;L9+YmR6`pKdlOj0oapqw41DWXd3rt1Gx zy{qUDG*GBZg?Du!gA0F|Hper)T?u}vEu(UGPQF4n{^6^{qU)sj*ZG73^q8 zE#=Di{?|M`=!X}xQ^nPeg0RXkRD5~PjiaXtK^J~i9;I{!q~4GY)434JGIx5 z{HXLgVu6Kg&tgpJV}dEiK_txj=C9rLSO6S*zsk7Abw5jCjI03<&k@4OFPm{Ve9>@1-;aV0|p6?l0w_P_& zcV^ldH8p3Rzm4MJLxkduG!!KU<7f2vHkyAp2FdrI-46;hxlTiF$z6&p@Cdfm+@9FvJYYKAv{`tfyjMW2nxprX5d zL0q-+;1ex&Q)F7dx9$v`v)RL8sk&A7zof-&TKFg1+1h>)6NY6T`;AVR!>ZTIN6wEh zXQ-h2>{rwaJx7G+CYVfOvTdB8UwLkLwk^IIqXn#GFqPFFns{_43Pw`Bm-Q&8*Ca>Y@Mla0ju)=vZRM2V_ zF)qBb(C74axJ%3pDz=d6!Vo1vDmJ>@h z=yQVN+D3@ottCinfxU-I+|<@NMfrms6j8iv@D6}a zHC)^@N^b+L3|FA6=Xx;rG6!VtJK^O7wxK$z8!3+!d!$&E z^7o>8Md)u?1;A@lJOGI4#~4fs^cJj3kh0bSp%~7LzAJcDc@L~+Uww~;(Hl?4;aUYc z(7-XW-LO+y;<_-eZtBe*O`c_tA`xh1fGg71@~dioA1NF`v_wf?i-)V!TiT?cbeR>^ zYf5v1EP%2{B+WUGQ>L_*R}kcfYWk{7!q-~g|AyKLfNXymR}?jqoq=RD)GiN8FZn32 zlT;>LOZ_Q^#Il8X4YznH-`EOiRUPE}-4(0r{rktO`H#re-LMB8w}dN9*1_Tj)T|jE z)2p9i)~)=M5MsW;%??tKD%Db-s5tD#C;n59t zfe#4BS6S7ufY4a+A>N7Wc{!(_v|i|r&6M=Fw^It3QU>mS*4$;9X)M>dR&G$pM15w8 zrdAv6nXd1qNzk+UwP_I2gY0PPeD#}~$MNol4OSBStqNheT&0zJxt3pNvB4wGMK*wt zSYxHitZ`x{qup=~V zO?!n!oHB|zDA5LG^+d{lOH%pjO$vX(On=zajRxaOAG4oMV z^bM4X${oE=f#auMq&b$O4=&PHX9Uw$0H@v<21>5j!+1|9>5Mr11LZEtA!{8knnMR! zHZTGjCr6&VzlEYKXw(z-$n#4n`ClyFF;M_(_%t>Tp84K9-`90dvtWd>C9Cddh(qt~(%NK+}@B)&V! z5JKY3hjBE9Y^Q>_oA2C!rTI?2hWBjF_C$Kt{=+;o9uP`Ex%OsbNp<&Fz~?bP&41Ks zAomAdnho;WMei1w#Y+w3kSxV$Aq#8$v6?1sCl-!>Z-N}e5{Fdm z)Shs0ZL`%lmWx`xXa3rUq)diBh6K;#HxZKltvC1VR-P;}VYibFj5}uV3THkAG?PyV z?87iyMss_H#v3dv&SnP@5j4hJ*sY5;`|;-em|M;mqPql3$345wU@ktH^T_7@f^yQv zBuPUAA$>hvCs%@9Dv#t1<=O{!VN)QV|IQAbtUHXYVx`9KcE3{jmgO}h2${-X_Ur2wbES4zyhlP>Zw)YwgH0!zhhC#T*L(I@@we#2cfFt0jg>n+Ew4XS;$xy0N~+Gf!XLMX3?76>dn-buOLPSJ0H z&2(qg%%KQb;bCQ7%l=|WJw3POYpDrWBm!aMJmLEOgoxsBQ*gr0VoRSAk z9S2+bpx4cL4Wn64yC9B*$&hWATZ(i&bwS}vue*;BH>#b+zv=aj_~JvNX7hk0gEF66 zlUx0P2zI^p%8-vtsmc1Y*gGrmQf%-uX`D=SE3r1>pBcR^1OFEMhN-sW_xmhy+yR`L zDJItH&k*WVlDiXwla`71_~Rni$QD6ogX7nMCGMMmFMXrt=pn-G>ja7~uYS4RgM&aJ z%ji@G)UW1YvNLJ3w?;#ko#cgSKZvzMzXk24b0pg0?LMK5b;I;svtmU%Th4W&SZS}Z z6DGBvnOd%o)(zP7!NU0SCY-K3>ye1~zrf6~!BPvTo&$evgP};~CxmaAX8ma5x=lP| z*h3;2bzwZ@tt6V%$~^nDSQXKZ%dizrCKF9^S>`EBYe2!)EeYmz1*bnP_ZYv^+BNH- zR&h3JqX{CRV-%seyCrSpdno1HoWNW}20Juk#y6GlW<3QlPqNwNsyZxy2U;L6r)w&v z&7xzTlibV~(`?3g45BdGqk!IAE)dNW8MIufP_Mb`aWm|<2&5f|Scc8|vsZjK$(AI9 z0<6Bmu2GP-t;TaEf1c&tAUrMB-cllSaWZTnJ<(IJt{*JxWzYc$W!khYAqOF{#FO+S zxQbbBe3fSwBFhJ{Gh){g865Yvp;Qg729XNQr6XvW<>aW`#*Hl0uNKUDb@TA&9JJC3 z!y4{o#f(s`xD9$E)_2a~>Lr-OBZGHx+Hnbf1%w-Zq-o0JqKwzFK*cHbTV`oNgnXPT z5yE_(jt}5OT}>OraFHA-8F%!)=W@qquK@&>r@-E$H6pQk4bg&skvFnOupX~EKjAu( zqnp4i1m!GJi;R0GWU4uQ)DaB#tx!tR<;Rf55;fidGgV(8pA?yKeZYCz`>Jdne?5oJHv?)(^ zhUx3r!;%9Zg>$c@`E8=Mx}B-_B`%5gC#7U3{%E)4kDG|!vuyOPDBkXolSX&wOx3h= zKc)187l7Ao?BG4ImUIO914BAWgZA+7Vb?w*00nDn3cb;Vl+vhPA0>PL^i@nBp5`~- zyY;;HgZ+XA%9sP>=)M7bVJX@w4Mt(;#>O%Bo2<%WG9(UjC+t#Y9mu2N491MC?9E@j zS@Lw5<@^RcgSZ*fno3BuKWHs?70egKo9Pi5qR{4;hmm?cU$e7 zN|4z&URQ@Fy&67vw&s5p$~$yJU_L90)}?yy>NYt2*;^tC`LU*pfA+idnHk*O4HK;X&(DIA}KJ<}^lp zZIC2xZgZ+2tL`4%4A>-j1JYzgs;~Hg299HGPV1^fJATK5lGQBp1&L-Bahyr^F|ijM z?=oOn9?IR}0;6lO6*_dZR20Ph===v$v8Ad$c{xXuba$q*2Enp`t3>wbaT1{}HOxJ9 zF{dL)XPkJmz86v&8Y@w%30EfF3|gzQY{Z4K9g)bmc;nmDAH&|W-iwW>hU!oSItU6t}OXpeg7MI3$lTTXCd4V4>| z+*?;njTd|w`7YUS$xsf!an2_Ldgt8Gz%-aFjF%o_qBMgZ7$P2!PibeA?V|m^aOGnD zEZiBX%qADB<1+^E3yVj0vzEQ*dCF|OXizd3DhNqv1^%`2mPDdTt8$-!jspkTfq|h@4P}1aZ1FmL1YDh| z)rLHm>xNlNQ-SRXs2U>=irVmYqMQxau>2RH?%|ljCieak|1fEao|1gu z2j)X}v2v`RR`sA${Z>v>u`YmHRr+pNX6S$9vAj}`t$&MM+gyab%?%8 zm1$Qbw2Ru$rq%Te+q?j9O@BFjMDT(A#1=dYvEmZi5KH z7Ab+m7u!6TD4ol&o&vlE7TY5LjhNyx-L5C6dSh6;$MQ4AtQcaDSy@fp=v(nNkXCkK@kY4 zN18TLvKK@^f()%L6MZb~&BeWoj!N`bih*;yonjCzZN}N5UDnGDEIprPZ%>Oc#hery z3(aSNUZWPe-6H{S9oG_Axr*+L;OaGin^YIxX@jXf2-!(wfM{mgsaINUdx;W3oSl*V z=c9aD0uKVopcMn0=_uik(B&5KfQdkYw>^ifIRe82<7Q5k8^)rZ2pM7`PRro-b&tx0 ze}47s1$y=*FQkfDE$Xr7(q+rF`XA(Uw?X^Go;B^MEIsa3yMf~tA%%kcT$e)J{o2qSIfrE3_2xbl@v zTC>U?+sCezP#%ZzwsF=AvYPGd?0;6?o!3ZTB!bPiRC)V7Qn5@dw1)_OcM@^8j@kkE zes_e#2!(QZ5p=S8t`DL`BRlFE_4y}ci&c{Jz_Z%Lz`>LDypust<~ixHVr7^!9f}%( zkmTJ$D~VY7xbK{O7cZ%-hMKz;kd&g%{KxB=8r(AEpe-P8OAcL3E~X&UiGZ)Xf1)w= zbLDGt<=*I5^M~G5K;n8k=_^qNdvI{99h!w@EEVs}$HmSLgVPf-A^iSnrbrK^Wp}~M zz^_w;of}(E6T2`xA@8IV5c+I?@>!aM$?y$ThD}p6ng(Z0ES+?punms*(#8ngy1A=N|D$c7h-JRZ5rI-E&Co>{X zTMO6mQ2Q1e;PfPX*PHD`pUqHQv zu^dxsS`!zGk_=yv!giQwcqX>`1t&(J-u4nPqL%J|tvT$R3MZK6@7JhiiE2jKcIyvu z^5MB!{5rt@AK#T1WK}1OB@)2-bDz0Co6~ciEWmuES_X0aVr^RMVH)aZXLnZeN#Bv~ zUur`6Kxpld)!NsdgtJ(Bm*DJBzsva_$f7gGjPb85^AWhC+F;9pS&9|ch1c%|KRThe zgmAQHaJB3;Zngv19au0opSr|ylj5AXu%Sz$Itk$%PleOewD&SsvojYgYkr2ustneA zDZ27(GVaG|bCIQV%>AVT>aEWt*7Y;#3Zk9=qnXvM&8Ac@C)-d$>?UQk94{^jvz|H z4H~X;AGwYCV5CKdvAftfeg=;?TXK&~GRvR?PXr!E>@6zZ$&#rx#N~gwCI1WJ^TFd6 z6-pH-iK?lB8TEY=!qnt1TyrxQr*Z`~uofU*1m^xA5+)GbVW=1`Np>`_!N@!JYef{t zFA)vchpYo7kT)b+2Rp)}I~@5Nh@o#O)2 zat(dD%P$StFVf6r2vccJ!(*n5w8!Bce^gabVR2i>a(yjWr(1y4j=}UGC`bL=KkR9J z!=x1tM-~;xkMAt78Eo2WFqcB)*XzW0bIX8pG3$ zdn_5tSPq))2PhR}W#r!&RaE@GG+$ zxOpa@u+L;??@rd_;Uo@c4LBIU&qv^X?@a5Z*usZnVL81>d}?kgJ*3EU z^CAis%}i4lEAg!#KSlZm)E!XbO#VakEF5c$YT=i=#U=AY{@<#e%$Tj1e0wNLaR~8i z)s(SQV?9dM3;dOHE*%K_=RO29-xBHBnN1?*ZXS|iHz?*B2Zwfrd(ITSIUnm9xNnWW z4Uh?vJ-(1?zd;aSJE|r#fYQ$Cp(=BvnQe!bzy-7rWyQgpy$Ov@w&#h6&>l16t&NYA zY-4+idoKNMOpje_*ew68d8g^Y1sgICXZvhrobN%6H8C@P;@lMyX#%$?E^1!J9IBIS z1X}OwwfL)cl9>L>m7fla9kOpEdkS*c*mE|9Ja3%Mxu!KBg>WSpzdhI>=?k%ln)3cl zn&;mWC^UY_Ofe+xsGb{WZX#i^#`~i#XKNFcE?tSuSy9WP{h>3(BycT`GopY@{R}ZScr(Hq2kRkbO8J!=1 z0v+I=B;QtKLVSaq%lwO$M;^U82pVb4lkrOml3C!v0?~>m` z(q*DN2-w0?gXprg?dO&$=TIb{%`GU7LUuheU3qsiMmDT)r9*>ZYY($z8cx0p46Nu~h{Fk* z1^R*zU+X)81D&QVlfJp$U-xy9nz+I?g#WT18911}nkIG$a_jW60#>e>bSgxUUYR#_ zn2iL~vAOj}IitJtpja)uQ%3Lo5@#G#vs}%oJ5(5f5-4d|$5d8Cd5y5GeJ?i$A;Bn^ zXFq2b1(BMk5o-ZB;T(Z@!s}u&|`sT{t z`;F+O$xz_C>oRjkvfmSEQ8~e#QCX~!`mh%PaHZR;jX@F2;3s{H^+K)P<3W59xW2I& z|Mn>`_HSJ?NTSl39U-(kpD9`$e_Rq^yUC)^noGia&I%Rl8cjjmkba}c?2u>_Z``=X z63rTXLmUt1J;R!La(Q1v2ANkVe1{K@vGpkP9^*?I7;cTR6Zt}lbYLsevThjz8;(uM zmU+cSFlg+mA1k}iHqVuja7uf_W&9*^84?Y1|IarhU-*wJj-GE+Jm~&a zU7M1<$U$qQpK;!PGBYGLwa4Y>uPJrwgGs%gk3aEUrH~p$JE$~rFt4Yt3>08p*Y2dW z$>yx$CB8c97Md>*n}XzhB04-;%wEVdd{DB zR@qxOMh9umM-w(NWWOoYCGwSRLU2{p#sXcO=pwOF78^Q;x1L89w@+wEP{HClNgGDq zk5N2>oAXgi$C5S8P)d(y{a7!uPVb8q<7joyR{q*xNm<$#ts!py9sMbog# z9TXWrPW*ydASdpMB))%%#~)<1KaWYhgjLsIc8OYr?43MFol%cj5ME7 z9#@+adqT4hdmW@r(}jBb@Mdf+LC9t{suJr`=ZO>F4rROzdZ!}X{MK(ABt@Jr>VMPn zt(EJ|A$Kw9TWTT!_+!qHOT6isB_OWpb#o!dL;zKA`Z}j`8e=o_ zQf)3X(2vfuICtkW~ zjCNIY7fqj1jfPk

V~Af&-evRwKNY2z{yg*ZiUcZjUNG^kaV7!%l%A1Xw}-5V?%7lJR>pSPFM4Ezjyb zG=VZAyBeX^?FN!B^fN-%Gt0~=IliW!hC$XcDTiM+Iy0jv@Zy=iP7}ywR-UAni>r?D>{4_o5$d4 zLQ(i!H}IBIw?=RH z^B976;HcCua@YIkXP@WJVVH2B>!5QP9tO)hF(&tO)&^nc8k2_yjDQNrj0r5zHN)wAM5_o*N(7$d**ahwX6WRC zEIBr12HUq{6KQ;HKI2R-U4wP`*sFCQf9<7E~Nd)JkUc=#8VHygL>gWT)T8{itdzs8hH{{O*QpZ=nUK zS~ahf+z{qrZDfd1PR%$g4wOMnD^xnJ1RfPvG=C{=QXWtSrFg=|^&=?6Ql^PbIA@J( zHfLe(z=9z|W4g-@@iG7j{A?zqQlXYN|1g_FO*jHdxA~VWB=Ma_QyKE{T2MaPWn6mC zHtq5?r9ueOY{bSr9C5Cvvp*w0iqhMtlQY8BSB=N6==_Y_a?)-$FeqF-jqWbVtK-FS zezDGZu9IR@)9yR1PpYEEI!3rpi#?TqIW(o8E?cOsUihLMFDMhE`dUc)HFFr~yl7%w zAAgdfjRouRg(Yc6t~boT_eCw6x-R4g93YsiF3Etb298qCr^|TnNz*<3k0MJ)j;2kc zxHD0+uyxHmG%7xharZP$Nz0hi1!$`-_UG2?q1AqzB%|Lyx$|jPR5-!t>lEAi+kN1R zsEB`S_Kyo!=%?EDtl2o5egD<3KO~*9Q0Bg1JP(T5EE#aLl zc`CfE84v5ft*mfpOU>(pD4+x#k>U`3A;;FYaMl~Ts|btZ68nFu;bJJ6%T;p7-fQI>|fx?*FSl1*7^2lL|(S=%VsP8 zGwg4WFv?{0*s;9E+@EoCJHnUmLxoDxz=uOPrmAXboA;+-SV+ZrXJ-Lei^aOOBYv?B zKbtr+ns!0x%o0UBV>h*AX=P(tjeQF3`Ivhzek|Y|ma3woV>d%G9nC>cI$sOP^3Hqi zm?pvINmOgludsIziW1ApawVZ)sF^}PZRFd%bTvUVLkQNgNIk})oc0y5L$9n~{5N10 zmqkbp=t!x6=+e>P`}BC<&&jID1~51m$ZrpSp{h+i(!P%3mAdu^gJ=0#XyB^ReDej_ zpcvXCj4d7oYoQ}lGpQ#`=feY8Rf|H6^=x(@i73;IDJK%g`J2uM$-(&zK9_Ps!i>Lg z(G@%0%F(Zaz@37wzP!ebV6KyAO#pbHrqveDKhLGYpBt`bjZtI!!LUSwO!_Hlg257O zj2`Ktjx*}JmvYOsj7!vFvAjsM` zF@voW!PCW8`~2xn`lThzY2cz&%lAK-ta1S13Awd14Vt;!62>p!`+7GY5c$ zElA%)$mjg|o#KNM|L_WK%p1$wk=lPCyp3rYCW~zYzTJj96O|Xu zS~#7VFeOEbj$c)-vyh?=(*I7}JjD8>v*26$Y6jVo>;@$}+2+glWiK1&Fvzzw z`4_Dl`1CGbPlV9=szr%q9%0|wQmxqX^Jd1T)v%{0#pl9D@9Cco@5s=O`sKh9io{Aj zU{Vc`A|4;*bSK;ICVFu#x6Rqcan~hEtGf#*@L6s+7R88zcSzgXC2j5y7}D zrngKC7SytT$NELW)ei)_BUimybw6dSS{F{}5#FW54`^YL+&C7b$jcEDh7h2uV5y4hKF zVDozHc|eU?Wv9y!iLELVJploE5kliW#jN(WG6TGZ?tq!V3!o^)nJN&_$7 zav8A|&!b~Y3Wz<+T>h@;l0eQpr=YI8!co-zJ)15`3JNAIb(c6V4{KLDnDtnc9Cy3* z7wr9C<(*Ym9D%m2A-IR&PJloN?(Q0bd(g(+p@HBK92yVq?(Wt&1P{=-yF+kkIBYrl z-ZA$52lu(&YE&(oYko`IVv5jJqWJ6Frypvc1=pubb8LX;a+w|Zdx_&qb`lMlmx0VG zx}Pto7f_PU@}5MeiD2B9wK(LPc)75mUrbAu$DXtp60_ar>%WD~=1z~b-?8s%{Xru% zCX*gAKgrcC&5+kAxI_o)1xdBilabM7FOpeA@QT!FOB^h5Z=|>wlh=p>(C#3{hz%12 zj)RfR-JI_ge$k$daHLpBT2#WY=6G8v8%_c|?*5FIxZ>1oP1KbZa~$gcOnA0Y{CD8- zM=*cDpIbOS880j*30;v#bsgWT_h$*tK^}5YH0TzWMJi=?m{Q`a0UkW|K}45PyTvw4yB&EupcL z0IUF4>D4ojC24~myI2$H4b8!lgATsT-*X^90){_Yk<7T3aRhKU%QG1j7gpzvD1~fN z!93@%@s$X&;^xJtWtTwRf;?@i^C<&6n}?`}wDEhqS)1a>UW_jXQYlvnZ2q}NPYjcl z4Z4G^>xw0J^XMR}%^utwzu-OP(wK1{iReenFD6QPy3H#7f&1|ySD%{9vtg*ZYFhE8 z?)ve098S@8gT#pa`LkBaOua=(M;C9?wZ87#4yhc(GS{SL9+IesM}by%rZ3~Tsq{RQ z`6wDYZ=>eJx4G_VH(~;K9v~P0*jHrP^Iw-}!Et_UmvdK6FoG&F&N~Ql5X-@%Jr$h7d*4Ka zk<0#7Jbp-2CRqA&j}S4V9x6L_vF!WK{WAp~AaA0)YPKq>_ctjCNH-Ti_k+qqc3BJ_j3t zd6-)ix;XAS|zQPL=C27_>Z&-AWBn!>{0@&%N|Op4Kz(z>VeB4 z8ff?5xz2Nl_hoGjc?#grv;DCf0$}yMh^H^$(KVCo6RR8yYp8J@VUO1dYgl-74s&PZ zkkkDO*;PqSsxywFBwZY#vuv?6@$qL^*9E5ZJ>#`)fTsH+dIGeE6g50YV?v^pl=>D3<9r!&8$P5Ht#T?7tTt7$axIz}d0 zR1xa_K1L72exx%v{QBMq+YwB)r&vGq5yNJRcSkSF16FG>1ZL7s8AT7u;gpyzr5r(u zP;wJ-YB&HUUf~lq@@iG9I5*Vs8+ips+DYoL*7d#OEghWD9la`|HIBeNlJS4I5HU!f z;Ai=#oms@uzgm7>%C{@&es6><)`I56dy?{x?|8KXdY6ZAsn+v72z$_w%-hN*pwC*k z%j`=3n1OzM7#e-VLgfE2s2m9-^2mlA$3ye^@oQsxsJHn?zBb_EtmVP~c3$U;WUcCH z9nrIL)7Rgh2I`c&V`GetS{PbuKr{`Ou+iK0Jz->Gw?n2@o`DY})<%59N;rQH%w}hJ zX5r6!@GyR(1Z7s=s@x2k0^Ck}B|QdZ3P|>rLa1!aTcj)ufxN!%?bSQ&XSkt8lV_C9 z^8+8yIw~4hmWEn=5Yg-iEH=d5Bz!PBs+C_t2>0vysOn_Htbvz4@#1jCV%W(&=^te@W zCv7rUTGV`NXT3`A*<3g|BN3S~&Rl)z6mmxaj{B&pWn*lgdQxyYcyZBYk7Rxq*x<{} zuW}EX?O@_0nra!Hl^~eoKEXXHbhzMl=UP4w(Dnc5ic~K^p5pHbt;`>Z?R4N6ZyweF z8Vq#EVEG@>#7x%{kpJClfu#--!Pz=gBD4LYs%TI>jKOOd>180yyxbLCrF$_!xeL=0 zu&Ux-rqc!uM*n@H+A;KwcJq+=kG?P~xYr?0c^;W5-tGFK4ee8KOq_yH^s4bwF=j6e zLvmq!aYE(;5Gsyw&6<1Uxp7KC@^c(RK3b7rPllBO3-@m6m#zEPRvbQ?rFyCYTZ`j( z3;#aCQLZRd$Rt;D`?mkY72i>%$YA~^q1$}qkY(|nZFKd*ssks+3#Qb2fyUGUzv4@v z-aWs)#2T$HozG~?-oH#$1V}B=!)kFKIOwi<;yLa5%aP~96~-*j>Z9Vs=)3iaZ2Q%~ z+t-Smj}J@NLT}4|xD4^<^j((EmBq9|NtfuM^DXaG->rKfc6+;>)ZBFH090cS0@4m; zag+EDzrpuXzmVJII{W@*MkR_kLqd&_3J3O-)alX~7Ca}G`x$;rBaa3^peXfLinTbb#>{Ay85T*U4nq|; zHD+2DR=Gbvu8X01ays`I_?kMcs5Idy^kBPErp=1Y?%YO;V;0OgRi=6}U7LCR9zeXg z$&lX`GX!soI5=3MlXQmFrIZ`JxD!!xV$hRPGf7-9I0Z;3zsK*IPK&*}Hb0OTow_tQ zIAiVky}6LI#F4ZT_LCq5gy}OrQRC~Wc*Nv;OhSxB*WYoKaId#p((C9DUw(9nxF{uF z&lV%TS2o=VJJ(A*C4_tf&6m1T?+AJWgs3Vo6ur;Qg)IzW5$a_faj5l2LS`*v(A`b* z&I|F8j)d(8wYscnouHe8V89kfUeg+ltOKXgsA5rV5?dXxHbJFJQo8>9F2nYT7v|>O zDt_Q|?P`Lb{%z)W-GId+QecNE^)89#C-{>s{ovowC@#`vZ{DGaM3!;N%1r`D9RK~a ze)LauH?Qh@O7W}nJQDoKq0kUx%=s;(GCph3;sj~@kf9L0NPyT{pPic5|gOEAc z%tBi0|2w=5TQm=A2?~{MeTb-F5ezb%?m;b+tvJ$SwK=(rMCv?%OYg&g((%EHL{y&GzPbl}o3#LFgOvMvhF2PTu**s+ zfHmSauC(6owczk6Fe;7{+rEHwKRE?Lrhe)~{9Lz$vEE?kHmMTwhGiHaq|W}M&lGv2 zwiD2VmZnU1Jjh-~-eL9Lot_^+smp1iJINHf8hE#7TJO+Oe2{+>4Skn2=UtbpW;pNZ{p+?4 z%hzbG!?eqYb8aMIbD@vEP9M4@#JT(mbVhk=nXJ(ESYY>m?W7xaraC z8hue^|0cdzluj(ebLH(do*H1k*{ji8P(J>nPcMX%!;{;Xj_L~6c2&Z(W3g|fpXZG9 zBo*h!HsbgDuj4=L7sZ>uFs;z1&aqvgb+<2t@Gl@-BR^S6j~R3EyAHB*(@qHwaqAF< zVlhe!R8}=>i};##hA~I_BaDNEczcmdh#{wwdkK~iahpX}nb%SoN?Q^=+~T82Q)RW_ zVaPhtxEPLFhk7P0#t%$S-f)sfb07!GdMz^vd}8JxY8Sw>w@EvBk;~>p!zHOc7xvqA z`XmyGxczqYvwS*ZEYcl`olTA+h0w(^UQE~HweB8X=RZ+YFF|DYm%w(l?e?l}fNy8O zU#b1}rl}X}J=(6(=h+>gp<(<(k;~o8T}C&3R46Gn)a?nAB?U$rUz_Oj?k<|>tul*p z7V7i-wROT-r5+)&YlFbHd%R3T%WwM8kl$AK%}t;7v)X**3bMbbitYY+u5_$Hsq&j2 zMHKsH=J?V50HALlk9_3XyPqRKkR>Z4G2}PpikRS`h>`9W$foWj!&)I+G#f+i*{uij4C6R=aUaf+ z{@7B|-OD|b7Xr{1mU*56mGF z{Soq^enJj5&6cAloO1eZDeOZ!-rHC{rcLYC_4^F#4Axt|hN{1UVl2`XE`T59?ELGw`>lPatI(|2eX|xK)o>qaik)1LO?FoE>J~4JW1%UQXZBKp!5f$CbtG}D~ z=v-?bBdrUjsUMTuMHZfU0#@^eDEv&i;}}$(9^qyUV?#)uG~%M zQZLB{AnaV5@_)qFvzL%2zv}WH-TsDlQa*!xLZpBG)5|ELJfX#a+{$?cbsswqp8vzR zrjS2*iducTs&PTvlr^eDfuh=^IO&8Vwn2rcm!q8iF)mOl&@Sx9GsTc=I%JJ%OoqyB ztt~i9*+feQvq1=bDT34m;k-|}0Y{T1KGBz!RFjVUdR8khmB_hj%r{xxgK0j@7Rx&D zdVavTP@F7P8GA7gwom{;W(!#)pq<9L>mbDANi>`VV3@9NqA3D*uFUXOE#6MzAn>4tzQwyn3Cr6Fp>jK$JcU_@P6s+Deus#v`yjX zVnkDTq~e%?oHnMXW;U~GHuXr(7ulSmkM#%)y$SsO29kH@H5GG!LZhwNAL293{m=YA zZTpS{+ZsVLov8|NT!3tEl!fiHNnzZ^P16{SHc1_9@%c9bjaNb{@y*~CH=nbh$zmQJ z4-IDuE>F378_DWl-EMYYaNsX>_>?ap-#Zn9wwij1*dYh;lfy`i%MCT+`Po^-*)=KT zz@VWot8zk0$O>=b@jZv#2coeb$JHR0+h$=xGXHgPi>pA@7hCJ-|AiR&F(dso8(>O* zdQrNw_fxr#9DM!8?p^KId9&Z%*;Vk@Y}_3}-~o?T=J;}7zGygeesUv=ZQ8Zs@*-;R z#gkmQ(D(DkG&lyy(o+utgF9%V4G`{&bWj3>COQYvLomvf{~&QDKG&)S==6AScx`~1 z0^T=;rN0E|XhSz;S-E4lQ&+mNt3;`E80|1EocmT=t-|(7JY*nKgzDRvq!Ub`Zv45^ z_}qTIBVD!zRl=ycE5vE+%M1gqoKwW*D&Nxbq*iL;U>p&HwoRxml9p*|oN#-&I1LfG zJO};;=4kuflJ1d4B~ht9PNFlqGBWAp2NGtuCK=T!^lo^Thd5&ma;3B8gCw2z7vvAH z_33Q~8Gli^JH{QU9_GQZaV&nXU?R+jQPj+&Z|Srz3Iy>>gNW+$e>%o0TG#nLNvtlC zt8l+!IiBXhk;CEVB_?11J>Kl~p+l0=U&brm2vFhrVzrIV?hZX|$l`vGMN6L)5l2tB#pr;k))ff>h3e{|Qkt0ukK-i1ashyB^|`RV>=j0F z?tNOZnLFqyFta{|Sg#$v2K10TpcvW9gkwIUz&?-ZLBMNwt4h&h>KXiH-M&0!QEm!~ zkbzL%xn-RHii7WbSzsAt0%;gt1fbq2tUaB~?Mr+>be}V|29Fku!hTnU;C}>Tilw6hVKq|Yzu7C)ImWYl7@3IDpyHVPYy(1 z+hXxFTED&)0rRxW6fI?R&9{>=p19q$VM!T5Oy7v$X!cxd+_#D9 zTf%d{XS1gRlx#bY1%urUK0JWfP21nmbmOfM>*Z>+<1Gq3{0Lt0lF*vJNP?vH*(E0( z)bD5_c>X$YuW$yZm2lp>%1cBhU?|t(>rLl+a|EM z-Xf4Ic^z+==2#1i_*9v2=u9jHW|ot=TXgT!G+;D8(ZMkJC-M+|3|>A=jIaxuI_9+*0sW#?`*lD)ZC9cr; zLme9zdUAQ179#HteCY+_B@cqLY#u_AETV?z;C}vw-h^tI;R;4IN8vj%B9H!S?ZdP) z<#BrLRt8?58LNJ+%smm_3oBEr!Q2)#GWpd}^SV*QuhZ?O_-;=Q41fEyf~lQk6p~-9 z5*Fp=d>mK=H+tmZxYYUp)48#K(q4*|Ij)kn=!ji`i=u3`92#dNd$m&v-Ehgsf0Q(? zPDQ`-%L(wao7yU@21-!Yu(jD*N|YXBjwJ`DqM59cQ)a%dJ!q_3qiPa!vm@ zjA17!&s1?+!NGD2YBPV-m=to!PnS79G0!pe0Gk)uVaUT17`0*LeGeMnlWtsG^Hu5> za2=^^WY{;X`OeHzO3!mQ_hVp*BcQWl`Ge8ww}Ca!3O6#??PZ?NX2-}s`~w88LKAvS zN;3{P@c$Yt{JDU`KfP#GZ|t(Es$$gYiz*$z^efB!jYlfPE_Kb+r-Nq4Ye?QqWJ z%$%glLi8SHz4UeH${oOAK=yX=X<%eaucbK4%qCT=`{F$zGou<) zz6g?la2Tq4mu$EZakeou!&Y#*$e!tt&AojTn2YdB=+sAA{c)UG%~YKC>B54(+wl|1 z(*Bz?ji#0BA4%LN>(M+Ut|vADNbuibeFpK_sHy{P8=`@?NEd8NHa3m^iw5k^Mdq=X zr?`xYcqY~xE{qQFcqBD({6t!Gx%=rf4m36MY2k>O(Q0p$Fzp0Xj1gSq6FFZ-1|8XVy+uOtoE-kcxwzHu5~HbHifeTrJcd|95p^{|Afp&lhe|(rAsdX1U4-0=l;R+>G z^ge2Ds3H$VJIdFipg1wc(gi;$Zy;Oom*PGWr*X5NVlT_L`$`GAamSTBltz2xq)=eX z*Nr4E7&yu$SL;MuI_oomqpg8mb48-v{G@gN=uec+g&*H|HgG=BR)pmbBX~}8j6aXv zh_}zd1@51V#g?hjDa}C>W$1q4E6#k`h*>?Fb_i9qjWm-?ny(o6WWq__!E&W$$0v|e zZrFJ_3>A=~1mdere0sc*IosHd+Q%V(xIN#?{EO*avXl(dE<+1E_)%~MvFQ02BvbW; z6#U7QVRy~0J2I8mEz&B;b4Un)0JQ3JGDTD*=Aux)i@|F&2xnn62!FQEZGfKw=c>uE z7`S`?fC-Z3K*cM!;o*!Tx-+PM+-)Hl<*NmMANy2K8&m~DV0n&&*BOI@PMQ$2#b8B@#NGF*D zlzsXTyKR=#6o^Y$gZCUV;Jo100yGo}XZFqAn7`yuCko_D+0&e9hWNk20%cw}s^{uw6xKR#dxvc!)3&Akw;dyLa1H2d@FBW0pOUaMd^zG2c|Zv$Lx$Y@Ga zyNh%r=i5Z-w87-Fyr;lt>_?z&F;(N-6$$F(#*9+%*rmCz%9YvsJ5ZL(fVIS?uzq|i zKoQ40b6^FQ(0OOa3FLtGdk^nzWclCYImy+wagH4As;3 zkqQ~z5!~cQInFN=x{e2~DR+&3TtsVxvx~c~Ve59g`VCCpb%Cnm{`2E9BE8&@Zj^#! z;C;zn4>v_cg`@(ihVqmqLAA1zN+B*=m&wJgF*}H#`RR`y+9N!!L`(&JdyCNpn|!^A zU0T-012s?){|>*Dt!SiavMed@840;{M*w;0N}CMOM=#A=y~Rp7kk4omFHr0%*!J*w zZ{l(1$%E-=BGEX!g0qQEnrO4ZlC7#EuA^fr#?RCXbA{^DU$iuqxkOal9i1Bx)j1!t zh-rUPryUsVN7cBHr+6N_&ux)v*Bwl~C%FmWV~{9Fsjb>=O0!^%B9#CVbKMVT7&3r+ zTiLYuJufJi*{QKFd`tC)uefWKq$RMHC^KDr0JlnHv<0d#@3clY=xkq2Yd#vgYmeN7 zX*y~|Oc*PwZBH&d;{;ot=2foWbQ>LaW{TTs6XbwYg(DzTd6lU=-SG#RM1c#lv%fBL zjnzn)nyoY4a^Z1T9jddi-MB5lrH~DfM_XVB#GBs)&pM0M1eBhIwe7lfAJ(`{vQT-h zJr$r`o7!y!@|c=X{w^HJ>=E!#_tYTU$4)$WeaL!MI0L5meia(cgbZFM=KpQ=6>0%wigx7=nvF zWM&P-Tb{q zlH^yQ9jU~JtAgcn>M-8i<(IfnX+qzXd?0Z-B`rQSDfVB)Dynj8Zq$X0?3ah=BcpIT zQ0WkH09}vE_~SD^+Cd4=AHYpE`V|&t6$?X%Ta~bHr=7ik-Ory}hWSF5@+VLFC51W% zeaLu~4~k@(P`w(9T+IGJ zJG@GQ`Y%_a5*B|Z`#-HnTa@zpeZm)+#rxYVHxd=LEmvxYtP#{|KaT^UyY!S7hyx!ZaZ&#F#1}-KeZ9^ zWa?pJE)ZT;f>0$%?vXFe?m7?xvy&htZcyUHx57PEClf;8SYrm9xBZCwreG5>1gn-8 z53J9hu6{P;VJ&@mJmO}_l|gb+Z+F>jrK0PDWW|{VJsA^(mwy-`n>9 zV$!zWe?4ns+@0(rNwP(NrqeNbdB>4LVdc?^!qH89vGFqE@(wC&HahnkOy?R~hU079 z{LYvALG;wn5_LN*31L(?P-=kU6QM}maDxMY8j-O5^9xSP7<(;8jUw;~t}%JU-UN5x zv1Yp(v>B&-JL~{P2!WE>_9&c^g5zEyPAR^32y3`N6>nfyEYB@7FqsNFIQGX$;E$v3 zLS`Oe?5#}(Y@LfAkTHw6%b<_HPyA$N93w$5Ed9@GfYP^Bn#sJRb>;v9Wb>fzOGJft zLG|?_5@OVebuN!hazQXI2o*DgFH10V(iesw+j*y6!7+G#{d@Lh|JEg4V51K45{G?ffJq7oAmckcgx#2z(_EVb+ zZcj+F$gi3Y06}3auC_7TqjW9G-4-D114}hsW}8vZW{3JrXWK}Z;UJ%KCwZ@A_rnnc zPX1i|$19V|fh*bOsP;Sl{71a}PNktYt+v#!CuG^$k8$Bq62(h#9Z#!ZT6OfCOk}oQ zCjZr9P4S`_9IX4_XGUIJ_&40q3og9|VqU%J&gps|3H&JxIj;@++Y5T0EiXrH+=G45cvPRnc_!) zbksQC?fR^fU_kreNqv4M5;!pGz}SI)Y3v|hqF3G35&tFBDJz*=UH&p*Ns0FxW@@ysf+ZpSzedag1 z^t%@yJxP3h<0Y%DeRQ+W*Jr&Ma*I&EyLXIUK3A8zd{+Z%zIO!HLy%#w=0VlPyUXol z?|qa%(m(&)6XuzJuHYX&vT>g2QrAcCeB=x%fODmRrv!E!0FeScp|bODw6ef{kwE-D zmZ3+6-gWuB?`+>t*^0W&0UPt|H40=l_1bP?!8h?u7L!?c1eax1zKL(#wOcz<9AANL z`!=T_Z)oz5%IU3?C3u=rr&uQQUOc<|;HTFe{bV|Jq(S2OPJ?5 zx-=Kxu9S|qKK@Io%u=tFW^!H*?;~^9t;oAQ6kU)c2%XjHY^uq+$5A%_4Rt~oiTMl} zmCnit5=nW9~c zr;Hx+-Jszi!_uG)w*zKvZ67PJUjqz$a9FZaUHuwXh zBcW~y%TK!n7AElVtAJ?M7+yZ(G;{3HY>x}c9D0NR`~sTY@Mi;VGPId^^Xvya1UnmS zy3{>K^L$Xm!k-ZV3SvWF#5QseZL3?cSa85r>@p~|>Wv#yIrWT!t6Y>jn{Tr|-(8t$ zUs6Wo5op8ER_RkaKD&^Of62{^PN!bBWBBfxZv!2xiq&`c~+o9roe4P9T z@)b6_+7_avIVuH^jpDy{=!}?bTFz+Xvc_owj6Dp3>i=Tin-1+fi1%H(*|ORRcU3Csg?!P0PmRFu1Qm_uWyHq7n01ziw}K5`D~MA(5yr z)}_B#Q{yFjBtc2C$8RDp%X(|h+Tn;K2z7ouk~eXlIs?hwPGI}bRn2oywNmmdV$wr#rrFWHi zH*Mht))QPULr!S|q?S5zb#TY-dsIB{bRM-LBus{4Ymp!yKXzAQT*Pa54D~>q0F>fV zJDK_(5`^X>#rb$X*57}f7nrN#Wfjn2wi&sJV$;_eXc!khRLj+!m8cCCj1QFAROWr@ z7+7(;UCB@Nw$`}setXkzit@x^t8tNtuePyyq9CoIYs5= zx}dLb4tt#hrrJ;Ch5OO0d#O-2Cirne-++EwO*K7MVN#izMKxYx*cGouE4jJNe(Y!A zm5uKnN)o+NuM)swF&NJVs&1nEul(PgujdKzmn$}L!?_y}o+pcvQY-$myi?5wF!pzxw z+8I_nK&@0n+CAS%(gw+qc$S-5Qj3v+k5um`qcA})&I+AZ3o7f{R}NAZ;=79*R_?1z zXqAk-x<}xxsB0K5rmy_O6GIV&*W}A(AiRRy($MXid6xl|zgn4rZ>qBPb#wH!@?;;R zl+tZW8?qd=1PMuYfM!`8amc3z7I9zZ=kI#g&HnMHt{ojBZmHZZ@No{D^cUrcD=G04 z@BLu|YJ#fHr!LL6x#lE;MWJ%Hg9C%*(kt<^C*`PO+YHixbeCzB9HAnhw^;Zu*U^R2`%3D7>0=ztN& z7i8CY<6^iH{37-bL~~lZ=8b0{)cmN`XZy@l9UL-OgL7fEiDZ_I)<1c#bar#So5`XJ z&R2R0+}NgOtx^u&H<2!9lg#YuKOOGe&6DuuKGFDHP)Q$TN^;pyzoSBrf26F98b6Yk zFt*7Wb~+II-mO)q+pEn%qa2*e5yIl9)iy<2Zqy$e#WgDmqIgL2B0*Qr-s(VpB6IKY zM=W$hJ`n?lco=C{#4MzmU)_PM{rEakMv&c&@s(`L@E$3S$A@wh6db=-v+4<@Q~D3J z0sa%w?WV`78dc44SbcVqbUKYf680CU#t$bfU%k&pQdocCS-{<|8`MG9F z`~0Ie5aPC!oLcnLwJeiOx#MspVr7eBGoSSlEQn)ul#9p|Lcz1+0Ae&(|P~@vsz=H^?Bg9q6dj@V7uv<{&Qe z%i%J9z3qLPi-S`TI$tSPUZnEl>u|a@BY76*MnNCdVlm}|S9BBFpHh4-R(*dhnnSvP zMMlcf5ggxVX=cJGalg9Fr>!=H@TV<$Ny3p4VUhvGf_r8;51V`Z zhM9^uX7dGm25ye+1c-B=zS0x>UU~qz(T6m9zpfFiYJz>>6f7xlQXCxWCSRSj=Jg>> zdN$%etTuQ%9y7uPCw61GS^g6rMKX=H5b-_>lW4JuHA)F z6I!bv5$!sJ8@MH7zu5JR6AfhufAcFk4&E=H^=Zl5t31>J?OFDAzCuG&*bY-6Z(J~- zLBE41H29&{_q}`j*K>YW`T*g-AE-3Oyb%ujH1yaG&8UWc6U?z7vk9S3F5&WlnC{A^ zwLXG32;B3Vh!h!IIac(1f#PcP6tVlb<0hLncpNBaN}oEvH~OM5Xif)E1M3a}Y|U;S zOz4-0C4H{vuU&{f(e621TCojLc>#Q*G7?A(DnCl@pEXba3kVrFIfB>Mj0h1G`^IZd zxCWIrm>b>l3INPF6O1G&cTM({3Wt>|_}ne4a_VG>R?lmO8QF@jvdyfqnW5#I5;_|Q z`i?eCf)JN|U|IgO(hWxc=e2rsLt&K?974}ZuBDeMVIz-9O)xSe@{(U_*VeU}|b zVM(AE(xowcTrC?qImF9pA7of6GR{uwityFlHs1YykizXAow$GW^SW^7j9=vb@O}-4 zd=QRXyYBe714XbsGz+k~+Y2l3Owl+I)MekVIpfqTTH1jR2166GDCQtF3!Y5Wg|{nN z2UCsPu4zU}@Q|SH3VjB?rW%Q&<@+_WFRk_?S8pTeIM@vhxhTI1p>| z!zwW0@28(sgOzxgd7m}5vf$0|3QNY4k@TGvbxsaU^D_1jtpCvAZ)vpPD!22)T znT({Ffj7bbeafV$Z8GddXMz-lOexc7qNtt8o1md(nZ$OchE@!}lJIC*AJJ%)43GkF zqTh0E6eJy9SM|?v(UcAn5hh&iazk4NBuVPCBdfZCf0sR<7_a+HBg)`H@G9)|tQ6a% z^Sl1)W+i#6=KcPE^OO@@9?NjQZvV*<1^Dmu0qQ-B9}BpmBJ~=C?CMQSJNx=)yfNd7 zX<}o&hRw>=xa}Vw#Gq_s6wWeSQ zXia|IH}u|NsG1;O7?-B#9l?Auud{r$we%ivfu|V10Fi7-{yKukuoweV!%o&5Zdop0 zA$~ep>@S`o{*uODn8qprM>9mXd(I_u_IVhnJ#2@6669Um7w`5+E6|_C-It4B(@cJJ zDz947B&-43LGmgMT1xSH?F|X+?2Ad*i~hDe>DhT8s7jw4;a6LlFE~Yq~CVAx6q#)6Zmseq%2rSIr_LaNuOgLPxxI zk)VR%#WT{kih6UNwVc>H-`Bc^$rj~vnt0weXWSg?)!G$U?DHG^#QoxWlSCafycO1k zW$i1*sK*mjX-w$%@l+Vquzjky({L?tpPl`NinB`v%?_Z3Ntlz6o|)xFenW=rB9FVHhpU%>;#g4 zWvZO_g$jWq-y_CQ2Zs@0+Xk4dnH7z{TlEHczDg%`cHdd-ux>IuF95pQY0@H5|C||! zK&aRpB*o2wi*>C2IA&dD7On?pxIFS#%x$^d6LKIuesrfo;#?Obe*osDjU#T+?dzM_m#qyLDCM5j>Jk|4TdPOrD>K z&+&;o{I|k&oDy`MzAUJJ1t1E$y&`-BK63&ZV#$HIBK5Y1Isp~?{#yk_&N*NFn7o7p zjLPDtP>6-a04tgY<__`kR%;#b&06L3gSk=_GV zqT7N_<98&DH@)Ifpi$azXjH->RQfcVCHKwSzU{iWA$nUqb~NU|t`9oW(>1s;g=>9z}t??QS)BuxUkv_MlGB_`o8Zt*aK9iI*15Yd8}sWlY3W91i1P($V6 za_n_T>LW5*DKF`E{+F|R-NhRK_jLsSPq{?IVnHxn#M~1$keh@>mw#&?G0;T~9j5XWx@uAww}L!&&2 z+47c=f!hJ+fUTpKd zrMEF3)j~QA)whFin@EIx(l;tH@yOFN2SoURvj&lmT5gVnUTgZNoZnUf+J0$&D6LD} zb`yhgoHbkOv4i&%49(_Zv2kHI&;t}7WwCrALm9Dv6>;Dj!onyDJ*Z@<0|+NmUu+mI zC*!xkRK6&QTie00jCl$uc8lQT))HZ-gl?&J7_Qp7iINeKX z;#ss*f3?&;VmX}pEE3(M8y=_Cc{K;RZd}2kSZNJ1V%kB}-d3D$Isxt#DM7zMGR#$1 zur>`wa>)r`b-??CS;$F)aFyJXZDu}<8k{GJ`TpyjPw&Eq;@K2nZ!YR~(H%2H8fp}_ zHbtWtU$G@5jsYKCnL9JpKiAjzFedk03$P<3!*_e{IR36%JYT6 zTe2i<_K+Wu51QohU&2SaaRC-gt#6mQZ{-BPu1@|clCUv2RXID zjY5e3WzaeCJh0^lw0C?WeU^dUUmapW)MZjxT9-GC%h^~Sr(qAf;T&GH3`zEAdgNp) zB`)kz+QjKMgcQBAX-3+lxZ8fNXbCdaGi}s=g5d$Kfvg`W5@-{M?ncApHHYiak;!#{f4bWmNGVd0N&dm0kt1hQ2;iNyAg0Wm z{@ikf{g&i1c|gTByLhKoToW>uIuxXu|2;GGxz$M4Vjn$7azC8Z`7_YqfOr0z{le07Oe2eYo1{6^pKIy_&zs2OUccW86%;gDt1+wEX_nKsATy-n!N`dvl$!dQ%JfS z8JG&Ll3cyj`Z36WniI$M>B#6FvxvSYSV(>KD6O0f*l+J9@~vCOK9?0(?TNT?)?K`5 zG#>P3#P6e%;Vs3`s({}(>%E-QbUvjGL~-r@<9+9fOJ;2RjbdI(8t~5hCeszfX>OkZ zE{PH3#2FWy=_CI&&(o+!>!VHDMo1^=^LHo)!fV6775@j~i&eop(}!73t+xG#`KzGa zwz)vAJ}-dL(6!2&2Y(nj^a~rHQ~Se#1rpQvPfXH52aXNND)pG0xh6w@39qg>t^P06vU2o@Rik7 zy28LD8u3rYu9E>Lb)Yv`HqER=sY#qD)r^6URS(`rR7iJtz9TRa7Ox+u$qRH4jIXLz z3XRGLa(e6E+m%NZRpzOX&SzCk;E#s(A0C%4q!5Zcq)VFG2dkUj3w)`KBoEYhi*K*h z%JDXHs^s@bOU@_8oITCnEiyCF0=i~lUyy;uvXgGb60ejVQr|7xd?hV2V^{}mP0!kn zpM8pduZojt&iH~hvmD9Pw!G|=83hvp&C&~QQ=*o*h)$Pro@Pe3R6lKsO?YkS5nMMF zQ@g$Pq)-VSBGgCLW+v#+oM#7$GtS=y+;hF;668;0&zx<8vZ?(4N&JJ2j_@_j7o|``pB(A7_lY-qz zKPjQ-e>!KwibV`hz46!`k!{G(b1kM!Xya1xl^SC z&mY2=y9NsdGgPq$M&mqn@QQ+5uZVoSpjAZok!Y)+7d2j{6jFz1+0?Eq>{=0iRlI8E zG1mPG@y`XZtZs{0%}wT;kG@_{-EPdt3Cv|MiMV-~b;m7(?{eJWb!WUWJqI`G|5tvkq+%OQoJGxGb+ z7=48BNqyfB8K2+Va$!5syZ)dN|=tg&3TKsFbKgbudv+o#Lg5u^*-DY)`So7S)^LF{^9QmcATmmk)ap~z;a1Q#FrN+@K zP?e?Rj?+}1eW-4tN5QJjBogridDdEySrvW5>TgUMN&lpURUmEx^c(F^`Q0yn=&5BJ zq>tC|BX;{~n2S2#c=>MPlszEB58zt9rgmt1^v|1}K`*X^(@FTLNw!IX@4?gdZBE`6 zS!B3vgJfNlrh~8bqk4*lSFsf-J+X53R(recyFv%fhUfhYrXMHo)4j(p(f`P=C>>5t|DCq&eUcXzs*O4uA#xxGwC>Nam@d-+} zS+NLLZ?g*JANT}8Z_i8o3ZoehkpPxMT+HVoL5UYz0c~tm7hbc|)qfzXy9Ii%bQ=Mq z8ofkN!+N6P0;<&99{|A5i0+T+kE{;Yy*BLYnAI#$2S+Ds9E@3ERln;SX7<}9BzCfN zEmj@@e5^r%L&E+I#{%v3SCXkVZEENEr1PinI*d#Ww~N0sH7mxrCEXqV)mJ*3i*%2vx}pZ~7+0HL{Rw8k=Yzx?G? zy|h<)un6uZaI0Qu-neezw&NeuHoh@?PU2ttwgC|ZwMN|(F{z{y&o9;{mQU*My3{;(yHTiekEv2H()O7-9^*gtRm<1d*bNSDH zsonx)6FIpVP&~5Xnu`Sj?&j1iKI8r4j*p~K%7h8_b(qz&bf&0hAcQ-{z`+KVTfD>X zQjn>G8$vs5Dp_W?Y_2O(S~{T~1=M%lANnEHp5k*QNYk_8B;QhV-sol$9?7+EH8kpc z^;E71uNm+G(tbF;G>tvD{9Lhv11ul}A!JhN)|z&)?JsHfM9b>73P-v_N{F*DAy5!v zA!F9{Y+rj<>$+l)ac5Gr^u+8$f0H*bJ1RY1?=&5-) z10Vk>UzU?RL^N{5)%2Fh80E#XVLps<&ZWqq7(dRT5gfIT`*S3?hr_0s`x28%4mieS zZ@P8vS^AztvO8wva5D&xJOW&?VE6=4c=}UDHjS^Zo>AGCk;E>xaxk^j*(1JOlo_GK z3>`Qzmj%uoYg2=2LSoXzWn}E&Gw(L#gWo}~#tuyibodWsTfIwTP|De)qexWOY#PS# z1tJGmb4b#swRwv7e7PE*L*9LfiEO=*`)ZoAYemq+mpGiaSn>#v?6No1HqgtXTuR~M zK1}2Er(3BOZe_HplAitEd*j^USB@xb$3mvs24{fk?kQ`CC1zqe7UMLBR(hkG^|6RT#(Qb zwYohKzA){k4vip9*^>nZQ4O%4%O{B+H7ge)$U+jAlXLsjYNQ)>RIrjT2?uGfHOXX}@{OAlmJLTgX2sc%{|DS4J zki+LOmJfDdl(uuNKB<);n4=IrWp5i z_Rv%RH6?|)YU!V>eG9ErGDn$2fA4Ff8}K0CO}Xzw`K0oNMbiJ@+wTC_(>W!Wg~Uh4 z_^fNMEvvIs`2xQtjcn4Dpzicxak+rfv(;_q7_AG6g+(bKC7X#@?Bs;Pcy8Kv^X zQUyXfB3My;(*<18-?MCnxnI5?Bu^4;us~bS=N!%4PD0p(gRIrh%k-K5?rBQ6>$sNQoH#dD5q zapLc;n@<*S`15~u+PQPAX`w~c8_C#vPx#b~N<2V9`i~{B-j$-|9DfGQPZ)dWwBa8T_ey4@S z1HBbW9_2eL1L^$)0fLsQ)Z)Ho6L5)o?oSC-G&+A_iZeqAG-i8H8%c>!_fF2awTnl_4xrq0WIq#kJ-BB(vnj} zrdGX*5$b;j%baJpc=)45qQDp_Lo;QYn}=d-oGp%&DdehY|J`kM>qlOp64JZ7!%kqk z^1U5zmlwcN)uoeO_p{5zNQZMPJ+3Qdi?sCV_rI^Yq8ORj6oaqFg^=e)=VG>!XX@ z$zbZ~YTi&h)e9&-h)xm67Xmr9=co ze-Phx5#{ajvo#d?GF`#@pi5xf-YlazJSaYR#o?jyK2yXgo{%nps@4*oX8+_elQeLT zc*t-b5MAhs8Anf^%XjA3$d3H}?Uw*Totc z!I%579=pn2a_r)(-h+sR-CT&x$pr+`!$lDpn)HFy0wb>*LUgL0419{)BiD|TZK)h}(CcW0_X~MCR-OL@^u-@EPCED*X2)-`M2@znI($x z>j4*VpU3#4DK2$WIZYT+v~HUk*^|`ohP&zNsx_dRh!Yco7CSsSG*Pqv7)It~>DN&? zB+2C}2%ei&+!_m?0XD7;N+DnQZo=&GEj($!+?E#FN{2!~>Rg|JbMO6`Ir}!{i7rwS z`r3TQ^`eAmTZGky37?9r5iK4p>NLMLBXCPz|d!S zVbuCZ1fyvE>uuF-Ma#;~ zk97oKC8qs344-Rs-{&8s_h*L@B0UK*tVz%Fa>z;pq>}ns>cl>y?HdjEm--qvDq%fG z6McHqc7bcdAWMPmfDU1bl95jQMXNyk{gJt8rk!RUPh_gOTQlmHkI+(ze!cyv$)?N0 z7ET_Fh|Lo7*|f_Lf6q2u(v4oe9nU%;K zR{oL1j=z0yH&~MiC2?irCS)e7hY$5^HRf{o-c>bo-rS6_h-5+&AvRdfefum$mHW@+ zp==Iy=NBtI3B(xzRR`ANu1cIRXzrv0e)W9y>!EWv?ae^L2ZdN@=T>44!VHg=C+aymZ~fT^o7Q#!9Nlmi3-&c zZK-8VJ5MShdy#A`I?n|4S7O^1qH#wrlz3M$I!zzcgC$PuFxQSDN!iH{1|91u?0ROI`Zq z{;T`+Q>%6c5O7Th)V41H^+VNOt+46nU6B(wIrP3Yxet;iesE=$7inl`J1w z3TLg{d@MgslBVZu6*QNuuECf&_1=X;mVjmL&Fosk;(mVaC^MA|GT&45-`yumEi7;? zD~*MWX~m)TN_8n0{VByENr9eSr1R(5O~rgabiPfVR+$0Sr2!!nTnNwE7A{<$o){?{ zv@KZO=NE(0kAS0E?ii}#P8(WZr!CnOIi=NQBfBU6rozw5_a`W`hwqGVPqhNVWV2Ua zgr_+~ooSq#7eDBpB=fQ$bc6qsU8SYw78VnIK~LOY{!Nc)a*58+K#YZ=FHr5+*h(1? z(}MiG?zWV_UlT}*%zCTsKr>3mtCPzapw;1Qw{o=GyiD^wOuN%5jhQ1a|6%H(k#3*?UC2~USTofXBFmDe#ia4 z9g{sdIo_UC1_iWgetH8^l0(#+|8?$C$w=2By01Px*qyO(Dni+IjvBRax~=)dROk5q*Z`nj)e^U2ag9& zm+$p0Hc+Bh`&uj9Y>Bsm@}(LP{n+N%f!ddF=|Q}BsPmZr#$4J_sq4=Z*f441Co({H zk1c0c3Coz*yONv059Y{pO}epq>E^0`@KzIIVo2627umR3lZ#7H(*9O<4Ni8oCUYnT znaWe9vJZ*u8c^MkYS*tPH-2k3J&N(Z@>6`SxZg=7-Shs8v>*rd2F`-bcHC7GoD{>q zx*tp7K>-P8(4FOl#nG|P5|I&*UZJ|B_bpjZt>3-)-%)Jrmp;Q^nRcheSR!zJdRH0H zFI(~vH%CwI?EYmIr~xXfd@7ge4lKVwry8?Ns1BBdR<5ktr$q)?#}T5oLb3wXV@MTF zpqdkIUV%|$j!GCxU~9|2H~UWcweGfoS+mi{#Eq&IwJT<wjE~CF6n8JYR@pkm8Iu_Wn8c`w0_cxXi8VjNR7&eGpB1-<4Yd> z=Z#?k)ereo~ZE!ELX|?eMbxI<##eJOMs59oc?Ri9^~$=DJA8f zavmOhr*>Sz0ak3nWDwooFJlDt1eiGjKv6Y5e`NClmTZu(`!5r#PgwkKo3nSFcGx>T zTznC{BbU5io);t@k5K|7KCDCM9_v?UxZcTGdhImscE~6sF0_}g<6TZnak>Vu3f9HN zmXEHR5*T(ZO;7#u-q>%KTDdGZy*(w?RI>T&&mG$(-{$;kbO{-di#=>@K)f8ak>GPV z1AT%*%aI-aAL;gE7nWkEDW7PxnzGBAS9C>gGv@V64DoR`!VOPKDlSlJ$`a{T9%f&>tVLJIl-9cWFzeb|MFP~q2 zsm;+^i*Db@B4Q%HLyp@gO3^doj;*-jaHD3PPXQ?FgDFYP++&?o>CAjY?^mgP62(R_RXvY^Bw92&XWeO17FZ@87kd-s{~&PcO)4tTO76Q3(_ z3()E)*6L&YM=} zG18+ytHb-1>`sENhemNYYBkDup~e9ztTK*&^+gR&KnV#6Xa^v{n3bl0N-HrwlMkY{WVzm z&|r(F^}`!t#U78z`k!-erH8mrPNt}h%T;Kre-MkTgx7+)%aHJ&-qE)wCM|uOWwhAt zhjh?&r}plUhz96&@QRW$rnx_%W?=5o0&eb);FL}>HO0w( z`h(TtfDFIr^tw0=u%|n&dkhd+vh6>ecV3I+Gw~HVwPs6#-*-<}u+W5@M5lej+0h}|KX6=jC=KF@* z@--g%^Vzemv^mL&o(=v>D&X&LS>Ax=y0!+P7;>PFiCK~BO?PVg#&9tt|3g%{VoX4I zFC`b^Ii~Yx`+m0J$3+G{Ii$nG2IRHB`M*dae^>{&!hfYj zIyZW?7b#C0<};lncSTpbvNjSO5F;d8d}V>oE-Q=B%MYEetU5ft-x(kzwGm~?!G9U3rE{8ff$y4_1xIjhFt}fzhKp=(ai>rDbzi8|s|)=6lG_H&5LKHE?I?gRa)P zx|Y0Nzsol6$8Fs!Wl{5Ih2C2^)Xd|Qx6Je>8$@1A%=DA<5bR6%TJ~0(^3nBwU804B ztS;2?D5SlQ!s>io#WHpAi=Uk8q1>0n#sa5NVSk0L6}JQFM+x$Rv?%3rKY=8r1QK$? zO?o3dv(G^}m|*aL}KGF!=Lntf4I0)2XA?8(NnC3g5V zlcLN_r#G%o_75L$tC1r6+#a}V2c;Ya&!#;%Kbq1ssTlTmOGBI~|L1Rl`R+0CZ(O(= zEvVVNXYpKM_RcyYwro1(=5r$T+v6@f9$L#1QsJscYT1qRA}e29plDcjt$RLx>WFHv^3sdJ}7OPkM!TFqr`h`S;KM8)x#6A2h9pqB&w#! z`&NjXZnS-p#e{vh=@jmY;*Xc*u`B-s%vmnz%y=tjo&frqaGR5w8qCw_Cg-8j%iSsg zkHx7f=riWuY_Gyva_1$PNj9kwWaO)1#i#3X)%iUkE}JoE!w9;*(S_4cSYKnZ&gukU zghgDKpL@m}5i#K=R}b#-V1hfS!IN3wrvUJGmGdd;C4T!{mmVL{7h8o8kV-(Y~fhu?I#R5ZO z?!)tl#{A&AR02dMh_A9G|GC9As0dum126^|@Y5iF5AfkgcQG3mUUK;(A#u=O`$=Ng zX-znAOCwuiZ(-wU<2x1H%u09&UyHQ$sfr+5^I6 zM-tuF+REWfgC+ZUNB_e4XS|e9&6dSYDXhg3!1S^1t&8*07YW>K&dAJ%vuK{H?E*)Q z<3wnZuUyGr@)#ngTODfjzpf7duK;t%R>*%F)fG6ez7*~6(x?%a1W)vo6n(OW<x>@}G~$UfV-4yjpuW%D@_$W)4y2e`|QI#BW}^!Lb~lMQ>7LdHsJD32;#{ z>;4s-XZb=XzmIv4NefaBVsNo)WKem8q%D56g?V(cf?MZ^IBEVd&uJ^2&HJn~#(TT* zPLN3T2M(v20l1Z_d>adK>iDM7&js_*U z-;fMhUA_P-k@?cw`lOs8ZC{vJ&g5P36C}1@0BsKg(|-1|W&_9!x|Lur0ayI~g$!v7aJAEW$DF)eqJH9t}>JR1J6~QfT zDnh#Zskhb@-D3388e*lH(6@;3(=P(%QdIAv4;lL;CuREmtzeMyqsYPzsX3i*=#DJ5 z;CCwrTK}lDtK^%v`N_A*hOAY$BEn$C3~}wMnP<^=<>VD2hrSY$@Ib@9fVHJ+yh*^a zM{irf(_sGOkHVPPR+C;jG>5tHVDb6h!WwEu#WcVl7mbP7V5C>+$K6iRm^;T)e3*h{ zewVG!`Ia+W(Kj6MZclQ%A?)t6yQL1;_))&mT zI+dY1IPq_Bu*Okz=Nb7iIMT1gGzhCuCHG}Z44i!OO(l2k_^+l)(&5x9I1Bd*60WFuo=X%#p>QX`6{k2jIYc8|6tR%Cgy?56`?s9nx#TsRJ7ET^-8vaA zEI(o$xC{#)mwWv2D1YDT4rMRAm78(lMisO^6-L&MNvEmR$&zpNI>>*?OV=fA)n_!q z0rjdC@6SV2l@q5{++=t+OUo)aB4|ZS_w#MIhT>c5we~esTuvMZ!3hr1&RXw3ULYd9 zgHx!_qE*(1W_c`?FySwZE!vO3Aj#t?3rul;Tx5-xUip9P3<8q z5(aX)H(+xeybgM6A9v|zI2N`2qjT`9;DQec4=Q`1n{wT-eYVSCC@| z=xab5pk4&`7?fmyAmf1tbq3DA6_gL$L+LLlcF<3}IeW8Cbseu9wg`tUsc$>Pl~MC0 zQY6q~YewD`D@e`tc$X#Pv$)D4$bEQO13%``k9H+^*4&%)wzYszFIr1@@S`BPQ4Ut> z6IQ^JL~2Shj@fP2oiPK~IeMiqNJk;EsjHvELv<>EI6Cljor?yE3+sCdtXxi*1y>9O zX9WFQ%g^&8vL^A zk~QXZI8;sP>RjAyvg>{L>nv9g;wM~qU6Y$r!kY0HLF^Jd{tfN(>nZhiz*mA{=hw{WkW`MhS+Ie6V4a-ujRG2KD17Pd-pSg>@H{8N zUuB}z&8C&krSRmohMk9U*l~^({4)J3>LKcx@O6s`&|1tI+VNLeYyRM3YihzjHi;5IQ zKzb*iA|N6lAiZ~xCcTA3MS2sFCMD8)?=7Ks2+aft5PFBu(@5CtbDhB%oPGWG^qsx4 z-&*T_?ng+HEmp?(E{8kIdbez{3QR*CW3j0)U;Ybe-XFqmpq~~Y*I(pt|Id&@in^(T zZucj6`|JHZhL>VyiCT*qYr4;X1@wKHq(}e2w^lgj{+lbX9PQIqC+?UvPNAuX0h>e(QU=P-@in_% z`81Q0s}?!JM?t(b9wXRK{X3p+dN@wt1wpfZOusFZT;%YBZrjyn1Geayp5QPXhctO# zab`OeU9>10RqgcB6i&zib!9{vxhY>D9v1f3|5nfH|Mht+{S)~J~SeD3c!m#F%snb%yE3GZQ0Z?%w} znJ&lO$`%0hR^VHCWcs)%YlNHgVzEav7AhpIV?yR$94h@=ozJ>k4 zxDA)jv{}bk z;8jJFIrv!j$G^{1nglu?|GYTv-wHC6F?!4Xj>?N#Jk{DXPfKWn?i-E&mv25XA6RZczAj_kgf8xJrw#2 z+$*b@>{YxX_5Cwy%rVQKm|_}lP~SeiF+$lVLOkiA-m+_M9WY?JqG;Vkd|$cJ-f_}2 z0ZRwc#exB9sSNYQ723#R zm)tkKXl+Sx;`1)e>1CI!F667UEf>~l8$$2K5R`F$aZqdCpp3!eMU@6pRg2gBSE7^K znraUn_)qu_58bG)h^qVo(Xz1mwBp}XV7zA}Q2k&`Ubu?a=Vc*_`H9&^Hg$OAdY$%| zavEbGlrUsOslt%k%&cyyd3RMMO@EL_IwaBoDJFoms<0`O?;8lW&JZ}k7K}-5z*QCg z^ZmDZ0Ij`>)=8|zJ962lhmm=3jpedw>-_|8>&jLoh(pB_nF@xJ$5Q$`kJD++&ciBh zkorUt*WU&-B98Rab1+X(Qq%p8;E8bVMf$Nq&oP)VhIeeBkY%c`DFM}$oKatAimfqnTErTG49JA|V}=mHP& z$?*>y>EJm*VNrRH@1-Dj=hqWMCOVX43g+N1nj{?ElNi5vCZ9(h}}nE^|AYH_9ym0#oMHt zprgO@C)q?u#ZrZZ6SesUDxNUwngk3C{buE0OIMOEj4h8R3&(5OT727h_`(UHWRIQIa6asnub3mXX~~f?19+=xijSd7V`HWtL+=jW%=HD9CPTaZA-T?v@^`5 zv`a@@;5H*szT^**4osKIF;m%wE#)il7A+jcd{%O{K_i0j#E)S65GyMBO>I4bqE4@b z?T)xF;UA_mBu4hyK6nHqMOq9hOh^`MG|7j?aJ?h35i(b$e2^@n?%rm0c?_Uq#sIM9Krl{F_3zO8JE=Qm|v#IGKo%i?5OhG zJi6KY$!3)G5C3nR_7`dnN)1P_#Tu!rr_D;xpx!#9^M^)vs!~C_ors2Qk^d{99g`?; z4<5LN?Zr~B%Np>9VJnw(28j9*%vsCj#S6**H$lzJ^=-%3|KbZKUuqmrD!>xWHnGv| z<-Q=mb_`*#gcwu|)oJ;^@HTzJKDc2mgw;Ft>JIEX0$0*6ugx`7BYU(gOhGN6I~Wro zENxGVO3*gFIomg%#ijjMKJjdz#B4(`OfWAJIXWcxuhZ4KB0du&YqP+Nc=8J^ioaPt zc}0jj(NrYjRjSB*ISU%Zrr>1cgJl_)7l^OF;La=8%`2lNMhw}!-uQEbv+Z~~!mk1$ zL_69;!r}z6Mw5e<2RYiungaXe|GaF{5++y-Icq-E>u=Gx81y5h1qEj&@E(U=I{y2^ zUS<+M3)`yfHwLDiBQl7`c5}|6a8YXxsq-vv2lvAIreS8fZJVW$%pvTOMFO;KfXZ1% z3LgpoiXetMa@B)ycI~)>b~&; z)@6`-9&4*mt(i7d2kTE;@u?7%9~)pg*n4A6l$JUBv!pC)7g4)-MJw!5TmSP-uwqm` z$Nq|UE;a2g%J-YpuFNKfT`cjwHY}mKcWLD3>#(u-`KA17WpBgg>;kEXdd!P#$OqYN zsU-f0ofs=PcH8^rGJ~JHkm&Ovv_g2zBUk0w%&-zu<~e1mBa2JT0zNJR{*G&ez_Qt- zrYmIwnC5_c?we`2avhvF5IIO?TEw}kPypF3TJc&AEL#K}6xnoJ{pMy*5$h|`dOD}m zpq{;Y3)Cp&L5V-HAGq3aP7?jc(CzCfMMCZ?zHEwYIh>-y-1yY`ae_-X;wjAqOg{up zT^{0~_=|}`QImp{bhRfW<)E#@vbF@K8)v6q2PRpVEH&Sylth)ypQvO$ss-n+pn{CN zYW#2QW@^kvoIF-rCuNrq?sz_6`o63_bF~A$LZ|tpX@67vxb6@R!R7De^ZDRRu^g!N zaE`+YPAY=&nngbPwWlZiF9QhA6@b9 z5V_4dz7+o=1wO!!RvFB@gGYmf}zFlgT5wUXGY?Ml45) zzX45#I*5s>ddyBWtp5oQGTXJM&|pwlRI%3l)p};CKn%s>%|D-nkro51#7%Q{={I>8HhgG}{K4MYoYF*+ z-fGB1aeB*pk`<O$R@1xYPHoHPbIV2OHgQ#TF*l9FWo=Uz&aGNHNie$O=SI*}0Dr zQh6-|$B%!@Y5bhw=O5s>quX`DD(n9G{8|8cb}v3udN<(H@XI}(^G4MY`u)$DgoHJ& zHUR2*`dlbnC&%=3T++zTD()-p-^)O&49}>mXwz)WBi!r&-_!u|DIh)x)b5i!x$067 zFG(SLJ5Ju2`Bdr$yb8!V;Wptpp~coRu!#_>AIvZq*R%w^K2`Xk<$sou_PdjCW@{Sl zEpT?1FCtLLUJC2l@cy;WugwqppH`mYc~P~Sv#JAy=R)0!sSGS07dljY)Mp+=^F1Ls zEpj%`D6v$aZERRYP)&`^%$HRTKHb zQOd(!-=pSngYD7tl1dKwu^(C1w1S*(GiO)jf9wu?+@q(tG863B<4Zs-`+gp7iIFLt z17J@@Rv(c=_m}K_n7#bUogB9m;Jqb&1+-)LyisQ@x839an6s%%>G2`4hKB!wkc`c(;SscX710$~(tm3u)?TupUhriJ+D zD|lR;rVU30HNzuL0bQtTdlc0~oHh9uuHDdpRX8A$VUU{$ot-xBqd0qs7ht#?IB>05 z+>S{-OKd;2qG&v~ZEb8cJ(Tu}6<0wax8f;;zxY*Xa+pMVtzR6B?*nO2&fCE;7rYYG z;mDXkUUYkFEx$pB|5jhWw_ig7%9NFR0;h(%l_)Ga{A|vg%>$Azev165)#>3RtdO^g zy^~kTfRy%V-)=5S;?kR8S{8BhaDY$uVmSGC``v5I#C%v=7sb);qe)9=)hR0QfcA$o z3*iCrE<2j;{0*?w;n8%jRQ&_WEgR2@2-yRxl3ZJSFUztB-;M28suLfhs!0)zNqEu3 z!r8M$v4dw7i%s7shB|uS9DivUydFJxems~jpH|>|CZ=NE3zVB^0#OXVF|F%~21jNK zGAX|vuom>*n2bW#p2NuoA~Q(iTkXJtDN|U z9s}Gz=anp8d%Zq3&%!)gG}X3<;QQtN9$JZPzJg7We!UfnC6yZ=FH<+5;%-52syB_W z)G5I~@F^dK5*!jNBGMP)K_+iaG-2AATj68;tNh3<%wM;vA!p%bDVlUa_Ql~259uo5 zO{pKsL|zcpp8tq&9rO4va(*E$}FS|}uZ?*}ZutL38o|&*~)Q}eH(57P$ zqwv#RIhb8-D0?0`l5&S4{xRhp(&)nt@ zOmXRJiD#_%)4As_+14?LgYkO=!TQKyY3bd+(fp8qKFr6`L-l*LkJ*)#U2z6R4mIUm zK55Ll?=;L0lQ=??YgXk{I12;oDhNZr%Qw&E5~0-lTK9$DPt;W^jXa{!Zz5?yntu@8 z-&7tAWjL1h&R0L;N-*YcsMI!?l>RNkyQmA2Tro5oq|+bdJ^w?9BI{8|P00_~%%fHD zN_S&J(X6Qe{qENx=(V>Nw(>@07wV$-nnc-1(?|(#S)|Mx9618qy~R6*HT`rUBmhgz zs2ymn7|IBf3woR~wNs~xDkrozB?cqr{CYbu16lo_5d?OxJ4weM+1kdiZ&Lb5`gra+ zr6f_`L@Ng8l`9?L3!AsQQ#-py-YP{wqJoR{=n6DO=ijdn(+mFVGpv`%KdQt2AO5%F z^!0y;%_-F!g4$D`q)kG;xS4gIyXVFQmY7@7Cv9`ee&${Mu6?a4PHrcu&pX=Sg0Xg6 zq~AL+z+|1?2U%Ki^K2B&wH{zXuZtG=pA%Q#ik1E&bB0P_9d1go1||M`|E$Z3Ll!Z* ztVLkZxNNqpKBwAcIzrt$GIWeZmLs#NffA08fvC!w$Xgd(RlXxTn6LGEI8JB5DwQv~ z&O)Cj(npACb)3^L{Nihqkdt{-vH=QH^|o>YN{S0>N`XmSu&K_hH!5D{uts3Qh8AEs zS0drU%r2ODR|p%H{-Iqmg;DUV%vkKLVn$mQZ({Q_s&;aW{boe&_>my&&?qhu@VA_% zp@(3>0w}+t>I}yH^xY^cV|(PvqTh0sGz!Ch+q9kh*Htbj;F$oB+CK@-^>rot)lr_9 z+LSXxdANL!GlR=&kk1fK=A%JLs~m{`CZKLE2J@?5HiT?094m#)2<$Yx=F1KPSlxW7 z^M@|{?M0QWhd6EX%;wy)U%Q^iDdHr-_Z7%|676@PpDBpVUtG0biWhxXBBC&kqs73# zrc+rhb?DZm@Sj9~=+4j$$ou9hkT!aE2pW;`P8uk&3ejjcwG$}4X-sg6y-;mO)X&xx zq&3+;xS%<94eft=`{h@S$aAnd-9<{APwv<}G)BY1{4ns-N|tzgwQx>*-H29@W5a7I zW3+&))9w}F-p0wJCCLL!{H2V0jdH1T_`X5L~wy_Pa5P3qt}%KI%Jn9`V1{xlk7|?wHx;GEsk1nrybQVg$ag-5+>Rg z^k`j9JC5S=r;v<993d?(W!52E?v(0W??3Z|V0o3O$q=Eu7)YF$O?$bhgeQQB{b_>Ex8*SihGh9KaHBywal+rg3<4fV5n1yMH z2yj`G+RFX6A%b_a9zJRFmTf$3y>dWTSBF!j0gJ@TpC*w!ZVvy z}{j(?q%`$T`;D%!$h&SMjs_ zk#&RiTehW|J4S&Su;Z(J!DicR@PwLcaR0%aN-!zJ{ajW(} zix}N13;8*D`abqjL%X?#-5u!(riaeV%4NzcrZs(lRO!NBNMVtoVt?V`zw#)T=B?0a z2eYx|2#TDuABX?4#Oh{&XcM>M&(z--hUz5fKDJFu6(_@A47b7&6`~l`iT8k zz?|lf@PNPIKQGB&1G>M~PHTJ8Jpn7h55~pTylhJco~??l24D^Td0Y>g0KDe1s6!q0 zrYL^e;(MD3vmWaUtHoLduu@Y;gkaxs3qRnZSV!hEbK~yc5Sv!hQCU$GuPUFOxNut= z8W!;AY3AgnrHaB@^ziTkch<2e-!SMKhpniK(6)%0(BpeXR18eJGiZ*6ZwrxZF>AP3 zdqJCpY++A)0rbv31@zC{ni&o03HnP%XNG(Pgk~N&qeF9bd>*+ z4X1yq`$6W&1x3qZAb*?7ci^U(-JhlYYmC#FwIh)sX>SSq?-E@;bCZ>gnS7nfG@!}t z(1vUK!UT>crn98c^AG(jpP=sN$sB42yWAsRj>Nkeb1t8+W!%pClTzgBpL}fUu%;ip zDkvZgi}d||aw^{-xP6bsW5DkL!_mWwh@AZd?3#eWriN;#AjX5&T&QC8qb7Ykd*j&9 zsOZj}L)j&cW2TGkXH2iomc1P$*N6Rr^GnRD`e6A;VWXkc3to26)X(eiv1#Ach9CYg z|BH}|2w(N&$kUG19zcc@j~jIeiXOe5!zVQ%ul`RgGHLo&YQg5d)z-Ho=AAm9PgLS| zUN2SMoBP$z_|AOHM(87&8rQF7MVl*{@WFzGz$BLC@VOK}FMJ+0EH;7GMXdJF=Cdxg z;&8&U;-vUROrHE2$|PTS^7~kom(6Pj_Ex3BC6lq;>B6Z_zer4EN^j%ibM55o*ihO- z36#sf3|;@4vvB@a+<_ALz4_|t(M>N#I%O@%9RvA9c8b_g81W-|qA8(Uim?LwD>JtG zdwX4*P9cZefS_xPKmQ(oM$x`K^vTN4#}~rx`+|)km%^`Jc+Yp+cmZ3>!+k2U2;sIc zjNb7Pmx4Hx{npQgP5Ln^yYMAlJ|Q~cwV(T2UzV^j8qycH03Ox2NGe_Ar1=iSqD;oi ztBGC7!3c>`?}FpcN5CwpQSv^bg@9DUeGU&`=+K8aw4VyX$x*C9&jJ|Ve5Wg8HEH!^ z-dT8@{rr(BR@RG(<$OyJ-Mjq0vF>vZ#hQusp3T9NDNv4E{h|S}Wfcqg*Ms@;VpS@& z>FY)>pws^&c#(hVwDtax)0pIB{*fwACG=o_ z%pwJk_Resex%qrb9*W$9#37I+t!>G{qv@3APb#wbd-V_D zkaGFkL0O~&AAQj(fG>+bzHI+fgnbLp-mzSF}9JUCHV&IkUm5=jJ zZFa!Wt?5Q~kwvx4e5ka|y_dxP?{uBhnN6imsRXq@yTBgenA~eEUat<9t*XyU@43eo zYnHYhOAZ5l!dc%nDC@0%etN2MS-97PERwR#0B|zf8@&&jy_dEoSF+i+ZurLNn`^lw z6=RQ2F#{Fnx=CmJ`xDnI3pC!;;j4jGpT>nEC0R;`YpY5smq7uJ`p9o6)d+J6$xr9UzkkI#LYK15B7K% z+-yn};of^r69JScM5dXl=}Wm2FOf^<4ZV>^?-lxGyBiRjo}4!W{(7G>&23>h%7rhd zwCx_jZ2qgDF_Io9dl-xgA+1KL(siUst2)@bk@@H!~W(R&cO$s6XC$xKW;oiU2IL|_hA~h z7ylbDRVaw1iuB3=K{R8Yf6 z>*PKUQhVASK8_G;jDLq_MV&UsQCZ+m(oFSxSw)HyrHAnWA}Ne?LPW|gx6A%3gJGAf z$%)*M{E$GOc2W0u!NU6>kn82#dzufZ9Gb2sRsthM|B?H{t5ZGg90#?~aqIq>=>jsW zqUQ;Gm**~rR}&ykl3hAR2!pB|8-Ug9cbT&6n60_~fv@1X%aTM;CHs1COS7Bcy--4K}Xr7wrZ-*e!Itl!ii*P${I!s&l6@ zpSPY1lGdGnR5Q*_X#!tB@_g(hVHYaNx7r|4-7g%1Y9JRs{1DY~4De+O9)$Xgix@RP zWH0{g*3bGHrm|BLDgQek{*=7i1B`FgN=#dn=^s2yT7U5Pxzz0U8*20kf=`>C1Nco|Ke?_w zf8(y&qu%nIs^^}l7eg>*Z6XYlrcjG6U~)?Tf#4&`5BfpUyf#MC#Kbn+R)^IxXm)V@ z5!}SMts4aE9w2X4q`U}-b&@H@L=P^voBu!|X(ylxDhH#Rvn!^Pu+^Ot1;XvX29slf z1ze>^>tgF}F;dj-FT^Y!0L-KkZkczSbxJr3(R|RL8?5aj+QobCvsjbj3uC5)<3XrmX!h~6>)lj}(_pI@ z1IdpGG7o2HRI*qaGq)gdqy&eXoZEjwW?WbN*{J|ad8;2}Ep~(~=owp(dMYa|O_5&T zo5jGw;taaiFly#|t%mTmV239vS0hJEpTb7|k+6OI)Ewd8qkd~ZPy1J{<6GGjL5=DP zJOuPTONiHV_mM`OxDM2ff^n3li9c0#l6M}8i!!+vxsLxYWwsFbcX*4Oin)b5X1!PK z{$JJ%pN>`)bkO-2U8<@Y(l;soA)|FpvyMamb@)@-N!JLp@aZwB;fq+6@|YJ(>x|r1 zgqraOA!X_i*}$b$T3kHPaM^mX<@${aT3nef|}3#$>) zt)W5oh)^3=HZ9mP+d?0X)uHIj+1K6qlwM<&3PUp*rEncj@N}&5sn&27cl;;*PV7vG zXg2qA2j0J#wvqTa7vumvS-p{oYFY0X>RO!VA-tP9F04!rG@k%AZZ=0MeP{%a4qdZx z=GrEtnTi31JhS1+zOf1s+1L;0uXXEL8`aJIxPR{N_he2oso0eYkZ2#TpOTVi4&j&o zZCgZogh{iQ*h8s;>fGFL#@6>=`|?qZFNREIQxJbqUCZsq4 z42HYq9c)H^mJ*@|jt4|X{LJsCZK*3#wv}6#F|Ih#Q{z`H!(kE5x;Xv02X?Odd(6L= z9r2vi8JtEeX~P?)e((1z@82Lp~>DF~`_e#7Xo*?M(h+u{ zD{!G)%@+h_=GI&{zZ+aIo%mMYYIvn}tbk{b)g@lx)@q&29;lgoO77eVAqj2sTBt$-ON5k1TGkn^7RzTkS^%kgBRTN&?_Le7ilS7(=mu1{TW z0X+A-u_uS;TsL>tA}WXUxosapuFdy2%zx4{dYv^iMA@RhOw>+rcjE3S=ppZOH4q|Z zN?)Mr;_DQ=KJ@M~_GB6q($W+Etip;;%mlDyA*wCrr{<`DXFHVT+{f>y*4vY}^2Z}x zFs<1zA%={PV1oC7U;MdP>BolA{8>vD5tvMZgCJmdh*n7tdA;^bbBz>bIdMa^YbY&_ z3D05oZ0F_oJw_kI_lx7N={P_ShSfBAy<9kIWV2-KXFx8Bz{hkLnEK{O75K0R_B6Rl zuTRpwvUwWu`lx(VZdIP?C`sZ`&l4r41$(E};JLqxElUdLxn;s3Ml-YG@H?{&1Do7n z*JUL8q*8OnqgLZD-r^b-6>=Z5h~|G+{jTp>0Xk}~MSrf*5Z;g*Np`nwN%t(@Peq5> z#B*nn*)5&jOu9^Vs*-3fcSj=2?$zu&EACsTr;^0Njk}L&2$?J$GpUlV@P{n5)d}mzde*C~UdaYEJBRFLMmD33sBpGexriFLj9G{mYZ^h=i#c;iC>w3_v`1HK4 z$GJk-u5WFMY{|xs*TgJQS(UQ89o-o9`nNgV34Xa;4$|Ua@rheb{nTegJ{7QTt%|oW z^N96=!um_u&T9RDpGG!6FX0Zk$F%ROavPjIpW)G>@H{xXnF7A5>jvTCF8z{dCJzMli;LrwA;V< zt#s#a>)JY0FS$`S(5&W#^Xdtj%ra##+eiX7D6*max z^-mCf>{FM6*LI3dEA)K}IpxQBCXL?$KCT|tFe{xW7ur_Evc&-GuQUcFXvJvC^k@N2GD_33bHW=P`Q-?I3CCYdJD$!v1dn? zW_ITl0k0OW)FOjSI0etVGF|w32`DNc?`ORg{$S&R(Y~_gv|J!_v6JI|3VSh=0>q3L z2MHc@9Y2Q+iF|7E24N z5C4j-1=gOXq%`>Z67N@MbXEUY(@vXZl-&a>SCF=~P>QOsPQ~k~3@C*n9HBF zAd~8UZfQ*sRpuBds2xI=jl6nH@doUkl^NPBh>s|hT{w27_)+x1m2>4Gm(tiG>w7 z)e_dvPI7A_oxSFmx*1D`B`ySt%whOjTQ z5RX0hytgL1$A0$apWI}7GbP9;gcDY6ndr8ner#5YdaB(d>&A$bd1#iY`BT54GNGZ)&wT2c}qnuR#e zgHyIB?i@rOwVTJ2ChFSzaQMF%x6Q~KDtC7iNWbPr_^EKUknz_+K2Wdka%^Po46Xr# zj8E}rrU3=ehjw;_O~5|igi_I&a2KTVg2kJ(_29t;59m^0Ke5`P*Wlaro_ecHug5C& zb9|*Gnh)^P_=%lqYt{XKQ6s2tDlx-FA(1Ag{EFxFcYnN6mHzCzzE#sZ4QntsE9_`e za``yK7yS7FNp3E{Xe7Jo1vV;WMCaITTrSSH&E8-?(^U>yxCPOr7^3abv;pl zr_RT$!G1KTCq7ENl~VXR4($a{5W$XiH>NL zi4n2+ya)L%AqDbIz({?wTmc^*Ak^Ix{Do-_oklqsbKs>ru5m_9(BWW8goMM#4-UIB z+`g&jx@AhI?L`J>w?xQai?f;!0g)Fe)71!?-8suXmRSkp8zNgD5Qcu|DqiVV|G%HgLn&m@`TaCWs5YCG z!2qbwg}HG+L66uZ^6PHgj$C)W%*V$;i4y>w%`S z_Uie(G{svNtAV1(>0z&hLnGypzhz1#Wqy(q{nwx+HM|PrXB3^_$C-I4$2r>{HSZGh z+e-5oAa93NjGM6csc(Yc^*mPKy&@A#Mxi-vbbwDdy+_-lA?5_sPqzZy)BI(WJt+=r zggFJQ;{C0dQy-L(Ab7URf1Y zyW9z&uZBmk2*qmfTC~;BKtc#ZBhZKdTXuIAYXExn13gzXW%GeLZ>HLp&^58+C?Z%) zGghA6f<;PnFwh2)Q#0!MB(BvB@~izrKqHP~I2!G+?rPuNtLzvht_kv7@%O*y zwOn6X+Mo2a8~mdR@$78K*$rd`>wXA+2_G^4z88XX*eM_3*dqK*QcI4n=APF2O>ACv z%08ZFJ5&bK1R)o;t^tOfVgR>X!YGzQ+QaL-1Vs+z?yr&yTXsH8NO5^+gkAwSok(jy ze@g)-6C~xUs@vaJ!JpWmmYH5zbRTa+G)--%BnJ{@0(wMoo&^VnS)bta$$V`f6K@Hu z!Ak*?pB8pDN&e-4RjGUfR%)|pVi~sj_fa2J^f`l1_1wdPis2>L1>lX;m!7vO0%8Ig zw#P2&o1!?3YG$P&#JMM{3vj*`h#N?r_PQIG^Um(5>_6Bk-3XCpi!uWz@qh@?gY|sF7@O)a5D2(v%2H!y}N7v;AK^x9scl^2ol5aC@i|luPOOF!mu@? z@h#I3@zg`*9t5YNVm{UqkoIkB;t0{35nlJfq)+ga{vY-@UG9RZo-gYq=BmJd zAvKNCC$#;F%Yj7N<7texioeQb_2|E^q!47}G5+Kt;-{LL#A;Xx&Z3X~J|y=Q&G6a}XvGHqy(6MfIr z4i@%nBU03!`fhP`dIwEFS6UbFtbUqn77<7h|7UHRByB{e4O?M?lUwx#b-z%p=aLp% zOlT^GazbKJT=FbKmWs4*9H<>!!NT?f=aZe*@@YNJXH@>^p6@#ocb3BEGT-zM3?_w>ou#b8`tzRL?&HbUHbIJv%Uz4bi< zaqYS#UH&(0pJYGrGvA3q68^+W_h^n7l;=#mkKNi(IF1YI^lWQ-hW8qbc6y@UZTB)* z{Z!#+G*w4B_xqtv_rdf>qkSq89uL}Dw_%~PuTLeCtc5?nINB?JfEjY`QKlA*=)E2{ zUH!nXOXNPJdL;J9xxFr`ttxg!WM|j*6zL#Qe6T9@uh@P&z0K8M?~zE#O(f4N!85+_ zRY~(*t$xXv{)wP;uMF)72?xAt+-yCqNe*hr5Q5yR<;R+5Q1E$FE2ZCowmzA8{?oNS zzW~J?jR%u!Z2BG-)J>!R6+3@h7ccC36 z>;w6($3(F9NU*#dqP=Xq$riD%iIPMM!ZV-yba^BTWBq^b6Zw{u8ReZLId8VkTVB)Q z4Bnhy(a#FZIb14_(p6$tQhkaFl?Ht)zl$bis_%@VuzonMs^h85O93C)7WiMI@jrJi9-1T@S30?_0Hp&Jh;j6l&-vSbB{Gjt78_v|0Lg*)!Z4P;A z9|InbhEjo{TT_(GfbZGY+Ug&GUeWB_6vH^`%>TYzD{UBkx5n+f-YH((&A0}+Id*{j zUZ?H|c5W|K*mDJMfj^t%P*ED^7|ekJe0oPY1Eh|U+0&iU=mFtai)&%_iIzbsS?!$* zB7P3n8&_gkmtt>Cy0bqPbv>RImO3|#f!mrODgi&YR7XIq39At~zDvYSG$;-%h&;Hu5 z3c~BAwUhnwkiV^T``I(2+6~ShS&U7-lWT_UQh9Q~5k@G3;STPq$Q}FMgD%jffevT- ze}ze_v|wGqPexDLsY1{DaI~5Rpn~c8o$6coy8(qkaP!i}oH0IIjP0T6B->|FOp?O7 z`sqAunWT}Q=KHU_<8j+H={zZQGCTKzxT7`Uo5e@0e(z#3qwr|{W~Rn^v+#R`UDD8q z_z2TH-OD2PTd&+6w0zsow!1|;uj;Y*IKW!5%8hU7`A%ct9tRLIT{+~Lb2)K?>nv77 z?lX+4{pLsulE-4D`i2i21Y`xd1eceJ zE0wF~WrZ7K6Ufv!;lIm;sRFH>zGT~xkb`BYUUf%swUvQoAWxoPWr;Cm+4#OoW@9Mb zoF(V(kl&g@9NRykA7(lRlE`U~@XMMvil}Mrv8x8bhAcz$gWw^A9DaS+wO{ldTR;U~ zu-JLb$@%i03ndCLRd70FYtp2@Vl~TjWAkRgf?CxAz!=`%X*4#~<&L~&aTkAyXmHr! zn1gX|&!p~#_B4&a_yhieoWEv%L@}P5$fUEysewWjtDgz)8~~Ns&+2NYA(Jx4j(xc* z7CM?HIq=(Vx4wWvRk_}sOiuLg7+e?9r)t{cfH-+8?m+sdeaBoY@#IiGiXNQVdYQ3!&HguMY6KAdmE_t77 z!S2}*QcJfduhRvYshUyV8 z;bpn~9h!;q6W&HFnkI&_Q|k9o^Bi+hTKIT@Uq5hIBNp@@k>lw-hv&;MQ zK0vTq6k`ZZ9n4Fx2S>nyXZe|yOu;^qrmZ8vF6^M_`hkz&mxrI_F8nO_2wH03BA@E~ z*{k}wVU1RVp>2IL;n(+JgLiw#EoXI+K>SymyROmir{rXQ*;|XzL)r}I_&019Z7Nx$ zKFCRJ5ZkHu0)k*TvSVhN?9SL_N9HC___jRl(w;ewZ|WF4jez+r^L~imN$C8waew~$ zg!^xTiRLY&LVd{Zjj8~HKBkp;_Enij zY0eV>(+$szVUVfQ>byi%iU^hO3j|Wuj_dx|@<*m47Q0Xz^|?`Ib*9tWWvcD`{pKQj0Y1$|2;+kJ= z=Ybj@m49sWq<5Rcjo;59?hSfv)Nf%^Slw*IZ{!bkFh76of??&+j>!mxO&f}~t-JNe zuXUvOuX_r?%EXbn@^&@Mjx$4QNb*z4;rkQb#bOzZOP8_u$_BiqW~sMg3fsSfld_!n znBs@KT$)V&m}UctBwLl$UqZ%JIDi%IOimQC9#nn}K0=asv>A66GG%;?fHR=v+TUaG>E*53&4jom*JLO6UxD;lM*dhvO% zx+J0As$MU>iBCwNs;b6*LQDtoFez>FU89CK~NlAuQF*NIPnM+z#VU{EIE&w9Fno(XvQwd{qbl;ld+zYToBpam^ zDB3`amj;ifkIu88urp2?*9@w4POXlQRKbTfWwN3-KRh3)1jG`!4hij{P{nP#{ZuI`s?zwO+XAl!($;*i%i^5mTnxb zKmTy@Tom44&)mo0F*=5`XL7PP{Tb;jGQ(urnsU`)1aT2`);sOarK7VcXI+Io1m7LQ zcxWVfp-fY9glV>)kS-Xw=*!eS-5?jsL8o#{tdaqZ>qDWlL{k$9QZVD1U!sKCWN$t5 z5tQD^1JjS zIT2S=yFa_k!0=NYN`F^G?kIs5GO-zl4n!8C1p`?wU2Wl(>{S(pjIlmfiMI^-&l%5# z56+JAT@tn0;Iar=39m(7vty-z&9i7WaEnIF-Qm8oQ`*a}@!un?Db5=%u$S?jnF;>J zenn?Iv#|KHa-j8m=E3vAb@!Oc&bDJ&j;gWz*Fes zZW^_C*asG070~OIj@Rk=0TW9nPlj&pCWud6M#wu;1NfY4&C~wg`&gLhz*<`bWQF9; zPKDUrhxC1S(IEqy!>h>RddS3l>GkIHG8min`zkKIpylQX^%%94Xbv)ZAegXw{Aas5 z_(W?xB;Ys@_(j$*N_5sBwnk;vMxhE+oIB&T4FmFDbTVTHS{%|O!rb)qQnow_Is@&R ztEE(eNBrW}wJbbE5Pa_5?nCCcFN*+oDe2L@oal%rZHX8)h1$`RptEZGKQ?E%l@E6H z0>*&P(|@b>@V*;SuwhICKRC+UebFCf&%mt$@?MvF){suT_!)jWC4u(#f^^ZbA8@ka9Uf9B_7ArQQ1KM7hmW7&gLKY?Y4@l zqNu%!qH6CgZM9W3%h%p}OO=>ORMo0g)Lu18)m~96_8ze}F@uO5F(N#k-<}_y`~DBE z<2tV6b9}D%>wKMW!wuyH=>=oZ00#elxgfygTk>+dFedzpzf0Njj#-Gz>y4P?DYZ9l zf$Sd7d-SU@^Q!fsOr5${@f9DpItvvKZ(xllM}}6wfz-!`dm_Xxu0c} zGUsZYGyG%2!C;wp;qU1?0#+$swVn%{Y!>#juSVSc;roX>;n@_O5+)& z2GB!C4Q%BzPNFV+sy8{=c6_)yH6&PB!VdfgYG;Xcx5p`AK60+nw@p!q=8~ zbOWoiQn-%?&cu%Shozb&B)H{zs1$&Hh3lLpnj4Qqd=u_6A`V!}d3Y0UqWvVj>4d7$ z>*iyfvA1J7i{P*zQB@)h0r)|AR$yAW6Mix>p1owH+T3eC?*;jC#Xrf7`}1OdY%#0V ztcR%}1%Q_AI&a*4)t;dm zj(R~_HcdJQD~MmN9c`xl1AgInf=j^c1YNaXaLY7avAT>l|IYOmEVl8a1vY#!p(5;V z1RtzQ8n72S)RBExrbQreziEGJQ+sdlHpCiX`bonMsJY9|&?QA=!Zpt8^TR}bHnUZJ&ZM}}Dj%9`)`<8^e|iy+c`}Ch0Vp+{cLSlMGfX&#yifrS-NA{&HM_oKJPzw)Y$8nV3TgsSr9F85BEyEyP zr;RQH;^N3T45iJUg5%`zTEEBW81&KiJvOXbRkKG;()UT@XRk}JZ$p0BK6GRdn zRCxT_RhwN4{;Ca6JNbQWYuaxJHt|;8bpB^;xS)E=h%^h2A@x%#Wa-2*!k;=|$qjfJ zdv{{)>cE>5^V4=;z||JM26Zb$a41}*yu#(g939N#QlaZF<>G`Ny_XwWWbh%7{OnGYx~%@%)GTi3B9GWXCLgK zcFYGGjlQ4ClT;zC;uCxquclod2U21pc0-|?M^7uQg>hNjAX1Gx@pG`=N7KIxU(IP^ zgFr%r0iTy_(u;=*Y7*IHsv2jinFHiE!nIG!4F-q;Nj0q!?P@z z_Md1-Kg`JsH)@T0(aez1tNX%jNSt3rKR&GXsJo2Wd(0?CYyqPGq&hfYz){crNkChL zBcNuoXc{0|bx%m-Am;pN>){R~>*vBU+|bLJ6Ikmy#45lV-%)S55KZB_z6vWbJ1*Y|mS3dZccGd=r6yM1pGm>O@YJiip3Pt}*^DIuebFi}bQY1DFHIg8nQ7dx| zu%kJDp|FC8K1zM<@9}$51`%!oBp2SBL;Q}F_VQ|Wyqa0we!R6Qhz|y+M@Y zCs)lcD(==`sHDr%jwUw2_MO9A2VzLJw$W`*R9NfXE4Zt*jCrK&0llM`o7FT!Z|sMK z%h*ZR0+!ULPG$q#^=YCrpe^PyZK%l5WN}8cQ; z!FgcP?b))Q*hu(d)sh*jOt7!gmNl3WlXsr3yPF+{LMSjS1cHlWOJ8i)zu#c3LO1~e zck&1(jCxOc{&O8{#QC>XQOidPeVgr&x4IaLxLE$OWPfO8jHvw>tmn3SI=`aKBRzo{ zAHJ}Q2`bfcciqgCOsk^t^qc>kBf9>2+;*C+*}KB&x_aB94so?3Gb$Wan@G=w@EbIX zZ74=hQn8^zQ+ZZN@yDMqWWEM&)EKuBiWr=>)17_f80;nl5IoEJQ)FCCX$T$`kka7z zWtcG%%-D#V4am=rs`cN&X4k|Vo6nPMNZ_H@A9MNxsAEIQ;>d=FLcCS8=Jv(6*(~C4 z+MDHyzExJpR8PxzO59s9q=;x3p+<;y@MY5v#CeNz5Q+tNvq80)bzlvBc}}LtCWoW! zQMY^PPqZl670dC$KlOr9k3++@HmEIFI6jAmkz0?xPH>1y`fII7SLlZ{Bh`W^>4%4< z=gdKhLOvCwA1Ky(1N>Tyf;|^~Js;o_loLccr;e)m@asSr^NX z(c%$e6QpcQ?93YPVnFT_nTqoSjy z+zTS5<>vw@6RI%692Gv``wF>kYYWRiwu3+<{taK1Sxodrn3 zO-k%7TJJ&cUQ(cBSeMp=&pzurEFyf0Pb^YJD7G0XK`v+$)VP!B?{0YaRq&V|&WJr3$ynN8pEZ*IZT6#ULdKqiby?Gda{&T9OAnsEA;$_goa$_rt2hw{g ze@+mQ!lPS@3lRinG2ueH%ddJCn0R2W3bhDCgRpA^Xz*S<@|6#+^AGNOF*t(jKGvlQ z+=7ILjr>~)$vSAIKT#L4qKZQ_lujgX7hJx%vDN~ig^OomwF!%R|4vQfZE;>kH5<&K zrRzlM)V0uxQNxMlrgo-5CCi^@+;Q=Jx}_-=Te>On4ntE`!dH_zp5?zvF=gMCqoo4M zG8rcBggJYKOtOu)FZNSq#v+r}PEX8%CPrH{g1m<+;|h?0=8tt{QTC=cOn6H4^H{kE zhQOz5IL+42_bdB3y8RC2>o+%5I1iS-Qd>X~`NS8^6Xg?KH90?}8~?g&bzJpAV>41+ zPIDOyE&T2%c>Sqs{zYD_y&PkCOTGq1SI0`J#9IoD)lMdjdRo#n=8aIsCZ1b3(P8i8 z5ak;jc4Ezysu2zug^zYvb&JbNfN&PoP2YGZT0ePawj;CVB+5+r-W5P7w;W{6!8&U! zVjUEztDywl9TZBEUPOY-{_q3@JUDqSUN|PXytfX>QDiwaD99j4fJ_Q6taJ7DB!u{; zx5>o^|-hz*Dy)bty0CVGyHt)ePB| z->`@!by<<}iazKsg)+U*a?Rm8<22(S8FB1D4oa#eR&uXX2;KE_>`AfFR^otdFulw}Fc>onVA6Uoo9gOsn zc09#vFmauWjYU}^5OA198I&<8fRpPO6QE81dhBG5z-E>0%+3|y53V*WY*v{rUq~2! z7q&&#N_~|0IQ{>OJ^ugPfOI%fkIZc8-c|h&q%?yg^W>Mv1F-pC`hAoSx!2il#tr`^ z)qf5K|5V-2a?5O%?KTG_czg`x+CdXJ^JVbf5ealDbPt|zSY?%zqU7| zd?ZcQz3?iPnlxs{y95Z0_=VrtMTTxP^AaBH(00@bk0Tm9E;AT8 z)Ovyx9|A&u426G=3E53FmR zIzvvH!dDDy^5(kC%LB9j)8jzqPv2eJQdC|jeAMu_ffACt-PC(3aM@J7{CuX=l(2qT zxNIBE52wG1>n3z^Nf4QPeY>?b(t%UC&!S7&F~;Ncq7`N9Ocm z8$1RZ?Us(;LwXzev{%jqQy<8zXUcVhHD^OGFPo-w1J{$2Z4@iy-LDKMFM3R>hZ24W zABSH5h}M=?mU`P#77z_iV^ezG1yZbV6HtC_N41Wqm@Gbsw1`7i)u=&ssP*_RdopC! zN%Z+HCJ+y`CsmW(jkmD5MIeZ-vSrFuf}t7mtzud;ImgjyX^5Y?sLvL><9ct%+9e?D zJhl>g(R*pdmVWuWbyCom*#^)bB^S?h`VN>@y<;LZV;Qt)v6x67_=?lw_{l00!Hl}* zp=Gn}t0re{TUBGRO|z+GVnD>2l9JvPokilv+?c?Zh02ymiUk2Y>itG)09t843Mo;w zg6LOZxcIylZawX(95s93>XxyZ%^y*3y&$~E!jV34sNx!FVHiiy%}OqDd9Brfq3C7Q zm#HGPc`3g-=$rd95>#BRF~2gzceWbNY9{%w#41{+Xop#9eYLi{l?Yl1;6|3uO1SOt zBJ)PT&Z>kpfHcfI=i}kTt5RC<dRc&nb%8lf6?!`q>|MmYarkn4-u&b)nIk<2dDXM2KI ziE1pZ{rZqLREh_nAN{`8-iq(dhSIdc<}3I6n}belw1kk?L?@g=dg9vc!pqdklM8oD zw;tZ!7!__Ze@?K}+-=eTC(Th+T|4$H*lF`6ClTuB7X^L)s)E%AEJzTwi!bm2Wy-db z>+`@LH$6FTk<3}#Ha%`iTlTi*C?wW?=^o_Oc3Xc?KvhBM<>WI;P=TM)d@2+-yfjfWP_J;Ni1FL zjq%3PH2&9H*9;a1*RTJZ;lHj}Vp!Y>z_%nc%dCFAsDZ>@{pZffcNCb*X;|0FswRPF zj?MZa#N+q=;`_5Rxs=pLg(`ScNI;3+gc&?e?7_n2-NQX9|C3JTvIsS>g_A?ZanTvo zQJ*(1#?f)KHKO@xPE1)RU&b{z{lf9|8oVwkE$he4b=y0+_w&*xHWybr>eQyTxHMaj zn4`1G*seG#?+C%e7T6)OV1L%4U&1y6!+GWu#JSH|03SdeFqN9tpq^g%yge~}eFAn2 z>e|{9+H>*Wz32Z<;Vr-9p3}*tNY}EuzvOUyZ?JGkvFJE5=JG9w{#HQpu9+Ul{F5vx zj}8S@2c7>h%r#QMpX?lEHIHV0{%Pg4*n#@x58547&7UpZkx-@1bj z^nva5WFQoj_zc3&mfTcy&c721zioj@EKTJOMv(Uan=+42rnsxdy3E($cBQ7c{#qHX zcvf)NKxp_+O_*t6u#sLQB9*oZ@7|$$uEDQ>ikQyhWW~R!6snu4^}})XlZ$QXl30#l zek+fj&QMI#o|qxyaN>Kf-F>?P*P9^y3|W8gP2a(>Fes-=dhis6#n+cNHxJ6&$wx#b z!8GR9rFOFrvzFG{=C?cK_ULoME>#BabI5f)Tz>L}@Q`hek{S4*jpS`~FU{c@l`din zU-7e5d*tk!VzFRlTxjJFNUxc)Z9!9hCX!EfO>2_lIIC1+0%tl#>4E92dd=gqe|pob_?vF_jYZh81ze zF8cuRPVrl0v0heo$?->qwQ#Kejd zPi?*zXvjnDF5l%`0=j%kHs51!nso}by5=#aK%2jhIAkkcvV1$JS^BJayG!!5V#*=? zKdlM>Jwon{z-l;Z6S9?+tq>ViRdgT>V<$wAjr#QYJZbUuO`EX_ICK{5PuWB@@m_b0 zPAdHSfF8+#3*M516Q@6-p6*HNy^~;oKW0?Xh?D<7R{IIdt9pkXA#|Xhqb0y%_sD^pATCNbhfj9lUNGXOV;y z$4jm7CjDfR{jB@0XFlE|zox%+$8|Fw>mN6|X+ZDK5jd=V+2kk<(hlqz&`^fMFY}}` zT5*J~Ig-MZ0>ESd`5`rxkAbvc@4r6{FDH&bUR*&xj!O|*FZ~_w&_0yUT|R!b^`19( z&(HaGa9GvISk>x;l%hBMQe_@_O+@WF9ch~d4)jXi4FDTgfcW98h+_KgJP z70T?Vif!ym7yK31V^09Vqoo%3;g1p6Ojp1$vMr+(v9;^5Cj!HUIfpZR01*`n<=N ztWS}cpvLNUD}`#dUT|R8?Mj8?bO;;nErX3H{eTSHb>LpTAcH7ZC{^1-p`WcaJ89GI z1r~KeIR}6)v4ByXgl$0nGm7OhtrmyPvF1Ad8-iSl z(`Bjqgh;4Yohfd6wM!xCIvm+cWkz}7H<|2phah>N!me&7Yj`Rb?(=AuZe2i~$W0hK z7I-VSeh(ewxO5+~yrPx;UYPlDYeF~sy4-RBX0;67y6Io|L;JeSGct=zFeTGf(MIzikR=Pq|O%HlUq)X+HuFkoxF0PIx!8poUqZB>JjN)T>jdo z)u7-WxZ$-n_fb0oqSFA}E*qb@{EU2(MRVfLAf9>><`CoiZiy3z(NBS}>?vpQJ+F1Q zs$(UNUh`5%#|AjmYe;Mhq63qyYoTID^2+r<^O0dO%@yB{o)sc`9xTqig~#L=?Fhhph{xfW6OU;}+?8zR!PUvg{(-cpqBEeG|w_`1j$)KVw!zoo;fjf)-O}n6k}do zJ;$HADewi+ChG~Oa>?T1DQ1vtQX4FBtTN9x?{e7pDzfq?T+PKiyc~Aib9v7?WkIVv*fF8&hsmU)~T)8=?Dk!GNGBk%L6vDK2;^V!Wi2%V~OQAFEp1fD2p92 zk({n_C4Y6t-v#H=_66;II1;n+FhV>iNSFVeJ!f#+w$8pols7fcTq0lrQZ1J%H6I^q zk)ZX;lb@dY>DU9H6l@?BRaMP&KN*x(v}Ay8TSq#^zR3=bf&Lxs!1cA}PLpR%uq%AD zBRNTKgY8sgC0zVVqCQUIzZ;}A-nH1Mn7=s0Lbs5wGwS>47sn&}hRz$lVgV7YrvS?u z)Q8btv8&zGimS%%)s;m}kPAyxL-o_3(y7yQ_rF_%1|C@UI3wDsb$$yG*PFlUr9s(o zTXDNKthC$n^4RDy?qu#MFR(l41x1#x)C^#Q2_-s)&`fRsqHFe>FIANz#9(8PNvaQ| zD2p#A|JeM_^S}tu-E%S$1AWL86PtpbSAb)0)@Qxn*N$@fZHIUBwVxmre2bbS zT-eW2f5{%-&&?;QsYLB&qaN%nG8WaEE)j`KT<(;+MLel5irME^7vuJvhk}PYzylSz zB@?iHv!OI5g=pXyN`6*3WF)b%kkHX$Evx!w;R)=6m3^o28iR)E&t0e+18?Zj6_S5< za0j=ys$%y^Cd{N>72qxK>9IBuZba_Y)E0RS1&Bj%rCO_>^@V)KF`_{*nGqtfP%%2_ zYMXOBH0y-IJUAT1PIOll`HRDEh8Icpo&n(kAE`J@dZBa_PEm`4cV&0hINr$tqt8Al zv|ScV?P1G;f!`PTAQ#^OuM4SQu#wAYvSxe61+V-Glf0TYg?z>UrSjjbA#&FWZpLG( zomvgV=f+Ca^@D#o=(qlT>1F;^rZ#8qPuh5VoHcI~7N$#(gLxB*Cgpmha5eB^AL3rB z04nI-_FvMFZaL|b`?4sITMl|co%Q0Cl~P8LYO}h$x_V2y?F9OJT5fpLvF^Lrs>QDL z(9g6L*aw(ZPvJxAxQ>&$-Ka*LneBHd8|UV#gj&kTSf-_NqfHYKUXx!D5H5uukpR#(fMKiK{8M)NCedKtz`n} zBpG_)04OYr0l22r3xVZvQFs!RKjO#50ca?Eu0gmP`?fR$&u67mk)dEZVm{LC$l~+H z4BLI!xy1KHsTpn337mn;5Z>*@SMgP9uM$`H^qs=KR`5BA2i}h>_^N!4EnXq4TIMvo z9asLGt%OSaN5>iJN+0z_E++M^0@Z+=^2pwJT$nO>V{aZnNd?t^!D(Ck?CwQ9?CMEQ z%uSkeQ6^$>$(RJ}CsUPW9%AFXtyu+}HN>7N?xX3^A5ZV2) zxXTW0_fW*YisWPuLQjvxyADU>P%~}FJBQyq?iwFzQ^YB@yR^ZFuzEh;kmTKoQp4$r z5yuilDS!YOA~$^h$mvpZ>$}g9T~N`h+-PP&mhuxUk2Zw?$71YhQ`lo5G?u!g9N2gC-Zt2?=}z0ib34*AcXIU4Xpvu``BpE)&7X zG<8<0bFTXgXDM@#C8^;m`SBUiDTzBQB^7SDW(%2YgWSho`7mvLy z%d~tE`#Q(MGw1MDYMYx_2DUaR3-rP3ipw@CZwNhx%E_;8P`?@qkNZ$ z-{sRfS4ai&WPrxiBfoSJxp0&DFq9vCz$UJtj5OSXJ!u^*h^ zr#wp)jAiLL^Sq0`3$>ljiP(+BavLQyo8g{lVS{2lZG~s7jnA-kE9p`Tl$*`swrh8& z4f!Nr0lu>e-Jz%aBvfFrnqqqfW%$o03QZqnaoJq!g-fJ9}hnWyE4ro!OjFK@AA{&BZ!dV2J(0$Y?-hM2^vxC{s9 zMH0C23IhzA-^7X|tc%^rCw#53(C?c%S@TTx&Bd{dL>fvypo~5f`yPxYB=~?8)+}9CLaar zX>>pmHIs+K-EOP0Xu{6lAL9dolcsI+cL5q2Q7QCF+E@;LXV=Adc}Pt7@D_H}N@Itc z*pi#zpci+I1!KsA?k~h*g?4ZvZ3goKI+Pq=21L8gYD1+p`rY zzqp$?lYgXs+U&OQ=&Gn2rHu`uyDVKAmK5cG}aj+HAXkO=}LN zeDt^VajziaPfoLckG71v<3H=9u1MAFC3Y`M=eOG$6+7{sA{p>Nw4Pz(BYeGU|+ukH$fcAI$`=cgrhsCq4` zJ!K(^MhzB*>=x-4tk7KgnN0y{pQebGVPBIxzE1i>PT0H^a5Sz-9u%U0-`x5VG$GQA z3cS?I%zwYZFv|c*h#thw^{~zCBSqeU71j=k_OF}T0C=61Z>;1&*hDdm5%1{)qs;YG zo7tayk=tekv1#0duHocO!wcdZZBkE?p2D_S7UxWph_?wwI#ph}N^uW?iT!-P+4V=L zZNsGr_6?+h-_lhZNp3-Ts;5fpcnJI9#+Q=d>QZx@@4KdUzOh?D@@umQ3*h#@Qo`F@ zsq7Xa@##n^&xj-r5S}Vh?Q#2BNx}~j; zrTh&YC@FfhZ~9D-N2b>_3{-bH(2fSYL*^$pr>`;hM3259+bG@yLcIelhwX5Y3A}Hp z#5YL`(HRM?B4_cME{FjEtg0^k>^q)4T*D(J7H%nAizx^KhWq4l%KZMkMJ!qp_a zVTh?MFFm_NobWI4{0`i;;%!ZqMb@RAkZ9Pb$36DOnevkT=JsJ1@*D~iAj2g|~7+tkgA|jX$A5YWH`~_|sk1lavraD(&VHTWI1R=iqrLP>JvxlBEen134eT8-1A_S zI}5+UUFEN4NRH_MpN%Z)rqVvyJ6M)PNepLk*6qm!J;2wzp*kd7j0g^uEDC{R-dvNy zn!DQi%)Cs`Q(}!5HZoXlTqCb(@k*0K98i{AghEbPad&K5?Dd?;XTYKk5X6dYngxO@ z#A%6c4zQCVn=o1|V^)OGqp`QF+@tJc4Z73$D56YQTkRxTzI*Sfhe*y_wvdUwfM3_Q z@=?0hrGFBdovs4;&ARV2$&M;`+}ZXh@pmk_P9-KUrB?EVx>+kDsV9Mc%^ZwZ%dWAi zU+Nh;7|EnZ1Thy^FJA%Ko?@+)`Z{G!c--~pUE2J9l|`S;OcO0DF84Q7;hnk7g*JE9 z_lXt^_v~y1T;!c9+Cz&nWbw=GL00e${|jE$1*Kcees*dnl5T*?0sAJOhln3%vMbM!L)WC2kvLN&p6t^Gu+Ho zv#7XHh@X*?JlFd6pof_5Un4swOzWwG1N+KfwtgAnp6sZz?_GX2WWXM%S_QJ(Y<}qd z?FNkSZDZb+J6_%%8%#Jf+%Pvie-@o4k_a}iVWT^-9i2J5Z(ucKyDW9nbYi_wv`a|- zGe;CGAMN}imbaI&t5~dZpXxmpW-p0@v%WV~ndw`Fw-TeSViVoA z3%j%%L0jqlAl*sxbk)K+Y|h86@CfT=z1Q?$fWQ7&#fTPkMkw4D4u=}PK(~5UKe;i1Z$(_oR+3)d90c|Lu3wk@HpQS6x#lN zL8LBaRygK}_Urb{!($gG1BI{7i3=hjymB~3W`90l>ktSHUc6w|~ z0x4@EMvOO=GEmWNBgQ{9hhN6?US#(NLN`7QwfEbclr~)3^j_&}E1T&%;qUk5A_MK* zJBQk7Er;U#58Zz+hQ0}A04dvCRM4lfyD5Ef9c@qKMwpoA?ituFuQsi%<`;?w^yW^B zdQOppT$~)37I&-O_c(f3vo z$51;bYpdaO^|ytAq=b)F-fGxqCVD=%hiYEs^15CYCTM>0YxQS!q52koh z_d$8zo`c>?-`N7%H#dGK)*QB|3lCq$-a$vBrsBIJDP`Qt}@{EtW~B7_Ccg2FjD|zducRxcJJ6X z^HvE~(zgEI3Q|N&*g=ZDOYf3}>=*`;#?dxb#)VDt9OaK{FEq{^XCENRydF%|dDt-I zrb%0Y&;%po!W2`fu+2+HmH{@a+;#j>uK#z{6wgPmvg8C{@14e~Pa>{MZsDZH(ZwuF zVZ=hmLL(-_sMTz}i?x{G0qO>PukzbbSMJ+DG zKF^V-d|!_g7QZbOX0%6vYt+5PJUv#4crKF&H3O~5l|o=4Oj=4+%7>fU)QW|9v&+oh z{}7O4f;(|w=vgyCzM>bcDnF%cF{UVIhhtRsS8ZkV$Q@41LR!1*`X#DV47+5&_6D2NBDVOW94oC73+9sVKNe1e@P{ZvL8xbc4BZ1b<6$Zf&6 zRg^5|D9D`SE{UAYDFuAJPcVOkrX*j~I8^S(b1~m<($jOHa1Y}e$Bbu7ofP%&1AF9a zvqb#=?YY@7 z47;wwZsu9WwO!>hyPR(;oh(s26`St>FblRqB>I-U+Z>9-aQ-_D0Ak__;~2LaAN$Nj z!h}&Bq_05CgQE&g-a-hG<0%#GAPel`;OGLT;=Kr@>uY30!K)!CT;1{mhSYAeK*$W6 z=6+|I3TOH07wzHjIaXgV@d6~RUqtz^J-i<$(|luo{`eCn?&$Z=IW^4fRd{LJ^@$ye z)ZTyo4u`U$&}!#QqslV6iN%yAGQ8HUTxiX`gdsH!CXyY8MEuM+A$4E9e^viA^(Inn z&UnVC;!{zc<>@Az#V8d*>4)a~-ug8@#D?%`L~C$J>QyNuY&)?n(3i)u&YSnr*C%0x z52DW%|H4;)pS3CmyKp3rBKIjQXZ!3S2fgHPKYsaM^aU-!>!WJDjT;u99$fBaM@5_E znrBASns7>-360(<%GsO9yTrcn5PcvdPDC zXlFCqjC1UFoy<3N@Lz4!vhluuJQv^S-A&x_S&3pYOQ~yx$n5&=jq5Bsi5#h6B8eA! zU-pnR_hf!mNeknk!%6Rg=pBN-T}sN9vnHNWrnk)fC*sNa+t!V{*Oa-e;O}|hY0|~` z^840#-BNX%x78M;rn|w8=Td#4?YT#r``&aCF$Wp`?LrwnTy-q++!UyaKb->uvf+p$ zE`M>K*#VXpGhH?R8>2HzBlm0FnN8gz(w?5^qDbF zquf|yWwM0!%(gm>%0FSk@0PmuHQomgiB&k_oTX1X94z|I3kMW`4CQy%So+BFQ6g`C zN$jZQLVlIVcipmkk|z8mnKi#3(UP``Tzs6IiS$vv6=I$j(@zUNO1`_Bd7_eeBXRQX zZ0^FsPp7{L&2lwg;z$l`-f29Im&ZAi8;#YyZ%+2eJk7da*zEy?4!7N_z@EhDb1p4?eqOw>dSP~$0gNCCH%G3 zEXNkN|6-W~Q3JesjiY&DjDI-2UI`b*J{9gMFRtF7(dX^yXO@%Jrp~`7cv0CWX4CJw z<9a}#6mw4~G$wy8V5{-_?7}ZA+}1F8+aYTZTC%2^j5^LVw$R7sf3)0!L;K!bG>-{@> zhj@&H&NH=3+}dVpCMenJ6(^aN~J+27)a zbVw|Apw7iJ$07%`Iz70M5Y$ z{|Nd^gLqfr?j=$Q?Fu-x;ZUQMsFb{8M$u)_8hO5<_nWno0+4e3jE3w1g6Sk2WG}}D zc~KE9Wew3ekc;f?zTS|6SQu}ysvo5TmL(j!Z{}~};~XH@ijK;-2XT(bhr+HwMgzLw zzzIZ{39$hADK@ljyx>u*ep_eB(B#7yR?_&MDDaG2v@s{a4Iz(0kEAU8<4*o?P?HUA zn5fjA@$ULXyvW7=u?k}r7d|XhEj3!_$0n{M4~jY0?CN;0`@G_x}-htp;mw;~pH@dvWzl7v?zP|4TiU4+l4LO5 zD)2y#rrbNOCZa*->^|x((J+?ST5ky@z9ZjrIXMtpz0Xq%2gf;bgOgl@_`=g(3v|ff z5sm*ZW$~XJHsNAWQqXdjkI6Rj?ZMrxUKDBRgW}!QFe*B~PwZd>CByA%uB6Y}_(wvW zJ?xyWb*o146<5lZo`P6?U?dWKIsQASfnSBs#_QU-D&R7~;kp;>IRA7{Y+rrgI?HFX zcGAKisI*-6*DnF#%Hv^RxCup1vP{sWF))@g+}+y#A$0yTWpi`_xaFb>I;*V4w;;Nk zAaBqcQxLUP*==q^GTNGSU=66}wF#}hS@Zkm6FzNdDbAV~Cwqoq_wdWXQbiDDhXoSn zIM#%V4(49w{b6YN9q72#5(0QI(mI1-3ZLc6rSl1A+j4sG-M-g#Wlk-t^lyNUUBYLx zmCzyTKN=a0qS6oT)0-JR&Qr1avn5n}=RN`xK>eTL^X&Mf$w0J>^-99f&+1x?>Wy5we674^|2(#szy^6AZh|Y9>MzOy- zasYh*F7#<_kK(qxjyy(0Dn^}0@2>FSLB&8T#H;ec2J&Nx;I}o6Bx29=0M>C|Z@jER z*x0@y{!NlixP65@Vlf-vI_n{t1lPe=+~=$SrO3W7YLkObPrMvFlJR8@>j^$LzmwC>zM7{SHQ8JnV)NO z%QSGC+a2R<+`9^j_gtMwY~FvQjWA=bsP}0OkoL^cI&aJUgaSa&lPzNE)y)tH~%ubDG}9c zY;z+MBb?(La}qzF@(5H(d%_gSdmjT6EuT7n%D&5})=`w^S;Kb^_4pgr$D)Y9c3m37 z8V=dJioHf=%l-OEp|j9n{)em~>Vj06Zxn2LC@71n{LiNfZ)APMq?f|YZA#9o4nhh^ zz|WYraQgD?#-^0Kzdnw1zGaP{KzKf5pL;h!~H(_*E z^`t5GM0pDzm*7|I%#Vo^b*l7+G&omlRh3ItQ{0f3cHxQeR?yjKfjUd8Qtz?CQQ)nM z^Wg#Wx_fRaVx>k$dgv;o{WvFv(dj2~z+MWCVar8;sh5nHYr~6K-jFg(pi3Yr!xMCo z(Z1iU4n*H%6K)8^J%TiLhKxh)mmXzGRkX;x4U@9XTi~#Jt+-p)h0FcWlxHY zQ9S~$Oykwn`YEd`O?*q!JVf?YZVW8I_gZ=_-~mA7SSSH&l6TOpxV8tXTypt>UmI50 z-dBPT&YPFbN|>Iom)VxPsAj;&T+Ny;-6Kw5nS5=tZ373B*n)J}C!}|&<#3zD^hE8W z)UyuRRl796i&EF0N2baLB|CTBp)B}l0>q^m@~to!QT+0+E=Al`k#f9NQ)nPx#Y_HXpR;v{`J|`oW&tmW`-YHn+WC z5<9c2qzhi+b~PS2iHASPS1pm@QQ-k=;CDjVMKe}Z&2Vc6N<1M z^mpd7Y9eL#Eop31m8@{zjf(g4Er&Gawd~z5`fjH$uAD`Ajhs&7lY_k}Wol_=J|2$u z5m~j(9bj8h4AGZAPqPp?XmOCHuVCzqIiV0MP^tc5;H7`J1x;JGAcJ|$6J?v_EADuA zo}R2T9fOpUO!YwY{h+8buK0$#L#vVSL4EUZ`1zAD|E|S#uj?-g!pF|Dew9S#WXUT@Q~YGh4;~bhDS+58lsFld}~-*G=T@J4RX~%E`?o~|q(P)(qNH>;Q}_x>E1jb!IgqZQA|>4oBHbXJqr1CD z$LJccG5GVI|K5}P^f`GBp69+lpX-u9frpi~yjF$u($vsI)Own%Ufx4e2D|&nF|&J5 zEM&tTkp!Pnl;B8EIIWMjTZb5tDE(Z4fFgB0<`6d90{p1Yc4W)vaih-1$=CaF%CxUv ztlhc_qw^Og4p|?fm418a6T$@}y&y?)m^4E6o(H%L)TC4sWApa>o%0j+Wh_^x>daZac)3ssNEjqi%$#Tm2G;0rsowk=9PAm zb^#F^;pq}jl;oX%G_pgu1=fSf*z4=dfG26sU7&K*S9%tA2Bp|eZKu!ITwrsvcGI+-4DD)v zZYlAOa2r-;SkE1Li%V#i%K`m|W^V(<)C! zqKS5%{a__dtw7Ee`QAT8vX-d%F<#)J4WaM&aCd`%TrMQ5(!Cc(+p%89@k=?DS!3&( z&_S91axtB}&UaGJ_yI`ln|9HU>48YyWWaOxyF9=}FD^Z-OZ{##htQcLpO=0!A$q^M zs&Dw+`%HL*B=0v-kU%Sw>>RVHe^85yoC)Z0*MP!tbzU=Ue|Ob31QvVjW}Sb((l)GM z0B8cIthXCf`=V24n|)Z|0!VWImC0B#mn39uN;NmvCR5)|gxVoI9!KZXd>zh4?J2EI z_u{Ai(y`>4K3&D6#|(Rgq*eOIh3z{jgyK!M)ee&*8!;;2Xx&`>eRiA=c_nFp1*rYz zQ)}}%I%|Jeb~LzA2Y$eEX+mLF2dKgHXasSyn7Q!T+Ct6E2G< znYI_Kk$OBU&C8o-xs~8?9NZS}8!17N0^mS#?CG+x;a~g^ga?YJkLz{&**fO<3txqZ zoN0JKk1$VD;9l)HyOJ3@s!YR?P@Ns(pvnr;Sq?&%=e`!oiDBjom3W0+ekuuUGS>8t z&L+LfAN}WWNVdPrVuzzeFMLkma4E?1tQVz$w19-&3E7t9r>U#WUWdwU2okED`gI`7 zw{(Yw?s8DKFS={&YTvYu8DKDLqC4g-ED(70(;*s>qC*P&xKJ?qSQI=>nH_6aKm2sV zlz8y9Fu$pH^O*aWx%(L<=W7i_N|9rv@(;7A6ULz`aJ$4~m_v!trBBUOae)x2oC*)pc z(r@8AzE1CDmBNH}ba6Fpj2Hd@W8I~%II1&AWLSMe!2SrQHbdC@YRV}dv2C-d%5Pbp z5_7-;-ok?a?`faM$V@i9&+ao)xo{ac{aBld^XowucqiQ#Jmg4AitPCp6CPON%BL1y zjDwpk-i#0x` zR6MV7Z$@v@p|W$YWWY^+q)Nc#(ria2VL-pi!rpHR-rKE0BiSl^VYDP_iG}8AV|B(o zMfXAjgFD%`v6eE)eH@M$5c2mCm#lN&9FAigqN}2EwKt1ZQ(F^xP2?*++J`Tkm|2zw znGC-B`%w4OaY=b2o2=wV?G5maECLj}s$mvYes?Zjxv9;4VyfDJ#~%Ob$1 zJe<2n!ALn=5UzM=5~_lpQg+ZMGvqukJTjT;Gx_t6Y0UdGfVs{F3IDtc9TdHTL!w=) z)L)o4)%V?!Gdlntu|)P5+f?sgK>Z&lIb545J2_^M%Z`&xH)?T^9QDPo2-eF^zCqMr za&qb$0$H-@U4KfLcOCv9!9mKo{o;2{b_^K@u-ZK8kBo=mLUs%PtVpKuqe{HyTzr|V z=34Q50lk`^?gBT2DjK@D^#~r?0cp>kT#!Y53mt!O(uH~t&#`+-D6gj#cs0yZB-MEC zPl2WB01KmR=Q*t2qIW{Pq_4kYJ6EifAHN|aqAq$kUuKj8`0XAevWzq{mq_lT!<;HK z%~=+9)1c#NUHKUnAjmvQh#b}sHoh?f8ZuP>b+TG36hu$;YV?pJ%&Xy_*^ibIajjCr zUKUb$Kr|#CCz4tBy?S9*zf)Jj72`d4ZO9Y5mmPvLkGsb=m&9FvrADv@(4k(~RPMAJ zl%3inzaKk(H@BeECK_QdxRNsuIDoE6FtXit>Xy8k!y9t5+IdXq8JS(Zy_lVUaal0C zDtbQrg+|{5AO5skCn$)%oTS5_U|ZOY=kBYBfWaR|)PWb5&>NdSjvASCZ4tf}Q{@(I z3A~uqOS@i`b}{2i{6R@Sx1KPOMO##1EhZR;c-cA0V ze-bhRs<{7;A!hGwVoV+3(%EA-iwMsU$}NA9o4-<{BW4?XJUGmnUv?# zU1%z9Po>KXO_EIS}T zOp969lHh4Z0{IMt!{bnY~t6Vys~~e_Qm79G7!N2#;PUs zI5_;9n!M3ogt)BfP_`@8u2<36Wz>Bo zLcXG<1qE+B5J3@B0F3)GAf;sd+;NzE8yUagn^Ka>z5RqwxstjPoEh(yyHa#!kz3($ zx)9sOGIhtc{Sl>&u;-yShac<#N(DMYK68whLtaL;1^`|{8Gbh=Zmf(*1;7M)D}dEC zZ&p8Bl46JvC{qbgtTZJir2`!fE>QL{m5EkE8mA8(ug)B5lbYa|tKW%sW8N|I#qv5( zne(3cIV`o;JYA9Mz_sJZ#-Xub-Q*r+{CX(v8h@3&*xE#>h6YIddo)5aauAI+`nkGO^27ifi+Yr~@ zW7D)07_N|C+#Y%%qAv~`#a6V&4qgn&ubxLaM2m)BXN*W>1 zOsvGof2;hZwDZq*_@h=#>z*6`aS}(r92Devt}-u$^DOS4dKE$3>!p_(D!=VDKeKSA z5rwrs`>_2WeUjpG-1@0QRFx1lgz=5_+2!3$zve#i8uMAb_WDta8=j%K<+C=@c=HCn zjm4n;Nf+}Px)3p0AJP?a%XD$0(s8$c$P?T2ss0iB*Y+2tzjW~l9?Y}Dti;{}@m|Ch zbTsfK{Ql4;VqXMOY*xp~pG)MpKFe<*tp#Yya4r!u)ybQ_e@Mz>H1puwcHPNPsv)%l zu2E}=Dr5-?%fVjkHNcU5<9g^(S5FxUp}y%HlP)4Zu!H|@Smaid@#JO$OV<9e_xnL5 zY3Y0mR$XOd?tO$HbX0x{63(HF(H*d}@&|_6QAy2qAf}-oH+(!lEFYxf&$n!-`79;FaWG0p*MX6%`4=-r%oiztX`!dFqVOXdID?OT z+*WeX)A03pzO?PL-mO=mLYwRH;I643x{mnemIedMJ zagI9>Iq`*+?rq<-NRi%xUO`ja)VJq-3#(^}WL$plP~OIOZ2dip^m7G9*1RJse=50>^TAnc{TsRA$fgYdpzIEC%c z-Kq6C?beD>KONnf`=Nb635wL(7@CLQQ=KM~;ObmIUOtQ``5-U~wS=}Yh=O;E|F-?7 zoa$?>{a}^dS9=yk_1Xu|X5YL0sDF1YaA3RctR~$!e}`QKYp}yX&gGJx8p`l0Yl^*B zzywVXY`9AKMybVnko@*~^m18$&wqkEEgrjC#R`u-e8*Y&(_EJ130@_y%@xm+F`cRj0WeA0gKOjLRq2(T zqC#DguSf+`>gYyICpbu*eJ5FdoNeNKa=LE&v>?8*xOpgoa_j_hQp`QwlM!^heKf<{ z=qtyIR~n}oK`P=j(5;(UlVx(s8f3ruklQ$QwK9HQHgtk>dX?|^!UuebA&wUN2a;D; zqiw4&7i`!^DRyhg*fkvdI_cZY-pYD0#dWTAv(qe<%DLeM; zx@MT1ynB8p_Ky0a3$YctE7E(5P4VBf!~-EuKG*t`o&$QzahTY zME|Sy0WMmD7rmDne$$v|(gVI=pjYEl?^n6a!SFOg1fO!N4vkPTQBimG-h&eFKNxKp zmkg}1MX%46H&@M$P&dnV4D#~^ziV5tTlDAXX%f}`%w}L-T721yHXOzZ(<@Ku>f-Nw z;4K{=oDU^JjH(rWQBBI)&^=%>T)97R9b_0t2^IL6lnmEB6}AP+H|1|mWa@!!7H)5 zB4$JQ+pB?l93rEEhqVzBeye|)bgf()F8U`jKV{Y|*7{If{jy^36y~)r0M)AnYO=a7 z_Rv-9RSa<$ukk&forizpbd}zrH@1z$LXiB(X8$L1mEW;7#tqt`D&@J~ z3qI};VMm-O97qFC?eyci#teI@J%b1M7!C+*GhSX8u0mn!iZ~e|<6U(j{2-=vxZzoEy7W**i@tf6sk+Ic-KRNqm1bwg6s2^M z@LG`8J6)Eel?9)~^bFxq)K=)qDFuJy`oQB)Gnn*88A%DL=$t>y=>Ti3Z zS|WL__;hU7M>|f)KjyM(C&)|B*|7L#TsMgIZ%$WO@}eEPj^C&>@Q zgA*U7pg9k6$S=rezTnEQEtjf?DNp6C<_4$yU@uqW9Bv}l%_`NY82BgqTcrlex*4Vf zURL3Sx=m@f$F)>F-Nbpj_E7QD!Osm6H@z+r=;DmzwV4XU@L9B03!OTZ*9k)}3lcnp zk2TeL^blI*7H*#c#Y-hj7Vi*j!BZ$gpK{)MYvu{HSkuqhU3u8G|cp25BGF$ zga)thX`SD^yb+6En!^xuxDm;7SgRQndJUmHT#obQ8AG_=n90*+XTI6| zF^1Skb5YluYC?d4P~6X1ZHJL zh0%g=DoMS+y@iX_Lp2;y8eq;{J(wW8)V9*Xq!Jq6qU6g&dC`5l(%b4-GAI4qeP_+0 zc(r4w670%)MJ=dMl_=`+>2uxty}r7S5e8sSX_wZwOmBurB=Eeum497#l)q@|;lr!Y ztm6GIKd6n`E`|C7q{-RFZD&>oSZQx=X3v#6ftQWl`gw!4is@2TNl-@l>#pYxlzVZs zl(;g{lIZOo&atrH&&FTCa_;^Ng3X`a$cb)d_b>k(=W?|%&iCu}YEp_TnJuLBYRYE@ z7~+%SoHrGfj7O~mQNg~XEiL@+w6qhRio%4e&1`J0bZY-0G<@tVMn$xsGeU|Ky3`m);X2y`BaL zN_OAPeAbtI4@@tjP+u}YT{O>eEQnL0JBM3bxyy@ITMq6Q^(0fw!4pOAhB#*VHCzUa zkoO(G&Y^H50b*z6gSw)QZHo97c?w-&&o})%Y3s(I+}mMZq=H8WrNQ$Q*OQ7L7gOq5 z3rP=BSM7|^PeKB@;IDaO(?dnzG=5Cz5kG~fW0i}GhM=s3wVs2Pf;@L&pV)v8?#op3 zY`As94s4gUOo2CTUli}R?*H}WSmXp*SXuL#6f9*v;@9ewvPw@JhA);R0)OOSer0$m z^6+f{A82ZF2`)K-8=enD@_24*A*>^!6(-{_G699RW^mchiS&E|Ff7{N zD(>9kGo<9$X=5r8Z6aJuBTftml?riJ);B850t z?`O0_2u+=9$JId{vw|-)&OYg6Wx{GW%?j5_Z+rAl9`y)+FSN-d zZc1XW(&^Tu9|4WVs1dEuFF~B^B`sRY*5$N2^&1WBS}<`5a4j|z^wDFE%bG#rf_}tq z@vS?OiOidLyRS`hD^RzzXWwAIIi^25VV+d%g2K-{c3%-=2+sO>G^R4&RHJnJ5|;&? zgW81J=jSmd)3DPTZHwJbWuIacRB}TuW(3YK!DU`m_22m-t`&yF-U+jA*A1 z$QB38_?VFY6%alI!G5Nvb?JWM*6t4f$7U79Xk{2y=Y32aWC8h@5<)F!@D*E#JgBs& zTkB@DP;aH3sxUp;hp4L9jmY@J)vsk@oMv~*H^E18M#Py3z~9a8I#lM1XS|-43A^pv zQjKs8SU*5V#Pv1P!9(NCVCt%Il8?bLGu6<$BS@}lgTgB~C+5#e0yV$OV>Q^@hO1#R z{KK|~^|j4PJ!R%cL(JzGQH$WM@WAeG<~BjDB&9|(l|-VN0{6}V}&QR^`3RqDfkB) z*TPeED}e?s_jVrCRlcecH$A@-Nnc5&6T>@R_p65er|^BO_LTSZ1!v#8qZx4k$J`xT zitIlpVo*?GGb=LY+(xSjXLM%Eswqmne+3J4pgj}5Evw=4(%!l5NrhJ3)Wtbz*Mn{T z4g4jzwftYiMU8$=&(N39Z+4LRgQvJ(~Uj53U4g#5)6$uZRUO-!q%+S0+&>nTwJp)S8+HnjbN-O;3b{e0< zq3mBR=s5s(JSAr`Oy1?QE$S=A^$%{rkhc~pQM^4a!B!8id z^dd*cZh8i$@YVV~f5yWpWuGc=Oxw3d8)|H2!Q&-O5SCZQYqhsTeS1rc>xw@Y{7mlw2$of_ckk@_moA&KE4g@A|d zCjf1s8yYFTPs5#R-5ZiP=^L z%V+%k9z320{qZVjdwCT5N{EQOAjYpqOj}vE^Sa}xq63Yo4c0AiZz(f1+$9J6hgi5Y z_($^5JXNQx@0^Q=-Nz>YRUHTrzdPNn=2Bh2gyT5RnYqY*)4+mwqE6miZ(r zYIC*{{71P72`*><6D)Qm!Py*`{bto)%6HnIkZ6pjglw+P)kdzTc(y3VX!5$Nsxf*+ z^gXT3K{0>frBr)Mw8srSvML#}e3R0m-XfTpfmcO&(_FtVkxcJcU?D@YuQ|#mi(X8+ zm6|mCCF|-DeZk{dHICo3c5u!oe=)rGt^00ys;7F5-stN3uf?kpU(K)kc8kg=Hs4%= z8qQJ;yE(srnAmm6%DB>tVwaKv-E#ZXV48V}Bw3XtZ-qnRMp7W<6xAMS!>q47tPu%D zNlU@&Mw=%(KZ zXI+mtJ2*7#?BF9Sl(2ciVV%#L9nttycKf~ZL{Zy!5XHvy+bkt8-N83vCg;sZ0S9b?CZQ(hRfWwq8X8Z%x+qOa67?VarZX z#{v^Zs-QwLQ-bt1HEC6Z8HB$5GJ@_L;8+;xRqK@%8&&V(>(lUzxiIU*B(EBj&P3Ak z`wCn3(J){36zIa~AFYiynyXIsXr?u(#ob%BU(g#vvA)h&@T);cw$0}0x`5)QU~pPh~B;OrN790+7bjyzSS(ivA-lJ zIiVOQE4?W!{DRu!8d@bLYu?JJ4`u$lNbBu)L$s4?;%em7-7n(He17P>V^A=H%)cXr z>)(LydU}9YQaR4Oi^g9S((Q^A^EPQR1@~e9uGJF`vr-c81tGlVgGT9?L}0zEG%C{; z+#jY#$Y_iB_F>2)5IH$5)6Qn70W}@o0i9kH{>=Q0;zv{W&HRLtnrj(_d;PLImc2aS zicTyZT4y!9mTLcT_KgunQ7?&LA^c^3+8*-)GCpud$Sdnr6hH|94m~8 z7GE;m$cJ1skFM4W%z{5=Jc2qYmm)zExBy|!xz8~G+zjb-cvP?RNHb>E=;Q%kmJ6f$ zz3-LPXia*Qq%7=PR#(`18d~|(j87ozI~bj6$8Dkd8pFR-DRIafDW85qnAgH>SZ5|R zYzHutMW#Wshn5?iGN-e5@(^tXary%vPbcqPzQfPh4#lj?#2zNSVxx$s)t--BWs>+t z6Lhf7ptVNyvzd+c*ICU>Ue3w(Odq&HaP{t#N~S~uMx zq5ymO%snaI;t%qPZnNNn*2c>@nbuK~K1V9!JBEX_4jw{3=*YG)hamB#<7sPpurnH} zaSh4uk>StNHNKcT@X(zoyCog#!6}_-%kNSF@Tg|62=}7JQbdJNqcz!HqeB%h?^F{F zD{lEom3-v-SR|({Q7ZdmknR}XmG?!URB&V>b#P~JJ zf#1C~%joBy+AMi5(eJ!;7DaYBuM+2u|wqH(We( zUYk3j@bBJein?-xy#C$z(EDz8o7T^<*}W^eq$auu6H~iYr1ef+|9O z>N65&i(cTwxwEAd*TP6sa8LnCDR6zblGwj>8{We-dIYyDoHxbu&N^fk(qFp;0+7`XiyGA9gpgNo^ebY@uLYOk51o|41hO z;^8cDbF{uDBb5qu_y1e3vT2yVlocmIInT&z(v6XsEVXZ8nN2O?%O~d6D$42jTaYaU z`idZ=8Tl3=GEdf4u51}a=?4}bi+&)=kTjl$BzkzP=tb6X1~au0d9qmT!Eph+J2G^S z+>}X0T9;@$D9_YcsXa-5^=hY)d)Bx%HINEz7B={2N#q~b1wHp7t_`JppA@-Z>*C4m z_B`3H$a>a)ul1#BNVM-WCS}j=F+}RSm?2bBD6z#42tM!A%6}@x<|l3T z<+=l<5@@^MFX25Zvb*n`(xULSvVpa_}=f6?t(Sjj!11`mzr54t;9#dS_L$*k~V ztRI}=%cjyx&$&X`j~^EI#00uyz?RVg6Tw~NJc9>Nme`Ep)!QCS9E&w3ru-g z*+g@6`i2BW>*b-2#+uZp!^P9lw}ss@93jU63wnj4aaA}6ZQY? z<+rJfEsR^p!Cn4dO6@g%KPf=2(6av$jHBx+&bVYK*(UFos6687m4#;bjzRrF=H(*$ z$*NVPbrD-VZI)w0O$ws3jb-<&@u7{M6{is;Pt{JT#PX*wp?j2vSlMLe#aa4s&>h++ zQ>IP)*;IS@X9N;frEXP0Ep=W)tFi6An9<(*AF6)GZFz~H4@nYgtYd5b0}Rc)lq{;Z z0R!fcEGXjsm*bGba13d!wbr%vLN;D3blB&RkD-F0ucm8L=CJFp`qKWzD)oVe)f5HZd`+nG0Az^6;H?Viij>zXk9NC00CnO9f>d|NfI`xC1!ptEqaJf_8{>U z0t#Sos>;Y+wp}lHrtV)ehF9S{{OUAut~jiB^Ed*#HWin;KfFuldnm?LciVN38W789 z1<7%5#nz5a#-jk3~Sj7rM;D?>CkK7|*Az<2?5|7DHuok_DCMYQtSY<}G0!vfm# zE0IZIp_$E>I;*;>-gj>dbQQ&EL+PkRt*X)x! zD1Bq!#%{i8nAmcosyTOJ?u*l_MqF8_b2g#qQlRW7c&JFPZ_Mi$b@f46tHUR?Bj<0{ z)I;WNJUu{D)`70t_A$rhE`cWV$uw=Mv7~#P|U> zT8XdAi^99?ybC`11X@gCLP|J~vQ!?(kLCd^C)+Kh$lMUt8m+Gp2Jl~?KJhhSqaYfP zA-0eiz!cTOv+6D zs>35Qnt~-GH`9Wt|MbEB@v;Nr5wX_bld1U%IQ@M$Pl&2l*)rO0z)+6MocCjsBq-A+ z?0a1>d|A7)qOpNYZ0@!fWmG@o;e{p_qWEH(<@hO{H#ze z9}H^sYx?@o^R#Yb)zumqHl#9W%5#9;J(D*RF-!qv*sw52Z5sJ@TpbG)!Q)_h1~;J1 zsT|R45tD+p)W>hYjt)xc5z1F@Cn<$j%xXbys@*Y^84)ge80C2Og5x>h2i zKy)rI8#s1v*F)E!&+WClJ;~;u=~IEW;BP6;z3?|O^Hd?45hC3yv9gtcEXJoGT5$Vo zrwE~8O+nmort&)Tdv6o7^bZbcrGqm%@CBcOTtKbC_v(E6rQ2bT)3&t%hpXx?v9v16 zNMn-lf0Q^({5TefxAVQ7W6ckLKc`fD5^^wVXe(&>B*mw#b_KLuVRj&bbHwW}T_m zS5#Z6wU?xI^{ui@fHM_o5K#7UqkQkzWR&H0Pj8+|A+Ni{-Nios_vHC@xh2XQ=tRdQ zc|InA64`LeHYwd|4%xIqca%U6&!TmXl;1&nybR8G_Lu(QDh|sGHD+qS-%5m7_0(vz z3RjiBp89KUcRjbtv{!*?t*Q8ZCM

c^cFWoy>-Puep#Sm?z$;{`f15x38z#A|{sBAe*FZznz~v z1Za57m3+Y~>P{k7H(M&h&tkE408Dx&<+fIP6w}t{K9d!rLgUW9d_T|psi|kdae!W$ zJXurfqt{wmr#RwUhRuTdrssS@6;iid5`{ZygBKLPL3{U7Hd;@Jk@fazR(ns^URp`q z$sq>cvQC-Rz4CnSv|%yHq0o=92D0**e2MAtHW7Oz=>uO|l%EXrJsynmx~j=~o6I;a zk3Z5fcXs1Y-n7|TD?!#*-L71JJw^iH+Seb*s9c5px5GJ)=wkm@pb-?jwQ6)?Q;AhZ z0sHZ3;g*IJ(kb@uO|nA+qg5sPBDMU7I_2-jHuznH)I#RfA$tM1#eDpi))z#XWJvO7xvm1ZBC(3qM zzVg?BWB=u}^lAO+5;|I;k@2DBxQSf{`$D2(-@reGQvFLSL_z|Ejh&%`x1MyW#i{RNf{2*D=G~9V5G)V z=qGnlZFVR&&mS=6rZk(W)qGv@H;UuX7DjnW-(+1l9z-P>ut@5l4sVpWg8U+N@FwcqlL7x`xSEM_S@(9n*w zH(@_y%SQPKz`px#rMqbFD{1P!>%EIvr`D5yq~H7yX~*X^!5-Ev!t37)ywC0v^;?4% zIO6M3V)rKfD3V`|g|}zKLDr>+0rvoNo?D+!*aw~u?Go_l7V@;|J=5>NUy_ecQBN?z zBZ^!ZZm^UPhg%vfuA$cx>X@@W+p)^#RcO?iyWen5jV9}cpG0_Ld$-N_6G>D_8usn++Ec=G!~@z`eCO_+y3F=EG}4WoW#YtK_CMQ+U>&84&$*Szmx#0AE1V5x4Za z1F_5IgKv>uTyg;)*eQ4V1IQ1qJ`@E^PK*3p+75^8j~v;cOg!e|x~VAWV)w0r9B*{r zL(ZEn8Ci2Gh5fseA9N*7iMC|8ZY?g_svnOtY?nM9S4%z=N*=2S^nb3|Mmd`9W0aL0 z8SsjuE6R3wwi?yF(-;4{;}99pi(8GM+7Z%{o2e41KKj&KCd-D&4SP-fK7Eh)%elxt z@vN;ipu^R)!=FqR>nHGCtX1iw!hY>30qXcN+m9*ky6Q9j=fLKZw?vO%)+W#53rvj3 zE>yY$eqE5U#^oHhMl7VhcWlKyktn+gq;6Um`A%zDRu$1sJ?yt9@LBdlHLrTz`cV1V z7@B7D4~9N3`nH>z^i#jlI0JMLK?`hEIP*Ji3dLdatiSB=d~odoxuOodj}QHHCTR+_ zp{m7^jq0@l%Xmz=mstJ&I17g)s-XS9H5IuP`=2;3@-T4cTA`tIC2s8eRjjTXQKv-~ z;x+U5?iM$}{Io9iO_@M#SQAo97f^7r|E{dF#!weEdnr3d?H0@>Ev8x_>w;+)c|Lo> zYtrW`$WtU}`bMoH3}Av7YSC+J@q2PHTZC0>;>b?Bt(W6!l|KHRx*p$71MA%Juy# z6rk4nGyI?qmnkc}F(?bw04x(62yRau3jwv*e%n6TD(dimWwwQW~7CF0z$ejk)1}7SJr_= zeK8aJcLbiDB((Z7RfsHupy7nOP(A;I)n(9N6BlH{ceq;kOwMZ<-BrAE>RBq{NGe#4 zJD1qA5J}fsR{(wLDW}bq)}~7y8CWUi))F3P-vwFcd-$?I9s~Z-_ty!hRyNeCU(|@g zD7~DDfv8z!^jpPPEt5Ziy1@jeXZ2c=Gtl++bjkk_hUa9z>A&15qjGIUDh4oFSEy;f<&l0j8o~og`doO? zKr0j53W)sp!I{b(Q=o zs+pO1+@ay-j}Wy9HLQ_(=X3;nQ(AMRjcrr>vL?>oN80Wjwu`xvAz3(l(gEpQ#1UVv zf3HXxcb8l~>rl=am7vNO{7P)F1^;f!44uCa6eV}u_9X~Y@Fq~~-StWAr-x91;kFO@ zSZD@%Q|LEM{_>yB;fo0B0vNaSrgu1N8mmNr2*(hCD6XTB?6@ynvalpKBh`V9hV-$xwo{S)8-k5otKbRZo|YESNWPA# zz{RsG@5x+aF#X$Pu~KT=VB9M+ceD&@fRUAK3WoC;=TFF1lkSG?q{yo3$6E;7V&wS9Oz&ROwOMe9Rj55(ZlfA4B+C3LLRm!IDH|j0a)X6-^;IzWK zFG*Rtg*qc&t$A}Dirfb|1dJ09*-G@aFj_;`E{MIx>pD_ohXk4HYv{Z|rb58C=ilW%mn1I-51fXDMMLLGp`yo2qXJe z)61x*;gCWhq$&0yJHYOD=Wq4#*&+N2bS9QI_b zYlWWlSWpkvFOx1upC^r;RF4Mwxux)lkN#Q0Ll*tp9ZYaFBSF`$SI_GPOS^;~MpQfa z5Yk`ut3QY#-htk>$=muF3lJ9COS9@^aX)#?`c|Om70J5CANID9(~90b zu|VrdRcN6X*6gtMc$Aj$q5 zo?zlkyP_n}PT6TKpUuP;_M!K}Z{C2P7X=8W`xjCX~hzPHsJF*;l2+@D^;PyC{-=9FS3e`rsT)}9yg_QUrQi; zCU(=QoOZndRrN)OX|pw%8qT+E$=*N4{!VPBJakE1(UvAytR|6_3?L<@CEua#(i~^Q z+CZsa(7q2mHKZ$m%N-^PnYHi?_8E=c?ER|se{aT`VkL{< zkes!`gV-|6?IyebtK^$eOcH?w-t{y4@PxK7PIC}$TSY~vTK8&P<;D3<{Dbth6uFeq z!C3CYJj8ER*WtGYHZ$u_*$)lhSlNG<$b)6+PiEiH5X1Y(McW{ZQa< zY58w2sEA@_x`S!;636f4LVOBoWPxaBQ$Pu9{6_}HLjpA4hPIN)I52Ohwc5I_d_EX^ zKi@}%_~kBfU3d_@xQ;Gz0v3uDz__k%at{joZHusv;x4_$5yNp!GWSR;M?zn%%ilF- z;FHbD<}5s^OPEeO^673y;7;$J)H#Fr^RGTuZ+dSeii~aZ_~P@`%6|VRSs7eS=$h|v zs%e1R910t(Z|BKgS>9coovy7%jS_x)@KGr31bn-e z>_^)2yJ~jmyUi=~R^><%+E*+=!?~UTo(%#y_WG&&JoWJcYxAjr-d=Xrja$uyiNBs) zZ5w9j0x;M(j4PX43t)F|J&kOqv`+svCbH8?ccWgPEc*MJD?`K_Ot&cIBh!H|aWjh* zu&S@IsCp@Kgqe!{yEou`mo*z3*Mw-2$4An(ujTM{=-s_*+u z6q7H#>c!EIWwRWzuI177sd5hX`Ev?5;&@Pp#Mf28o*8O?=xYjLc37doJVW-lRbFQl zdD7oF>m!ikho{&<)L3x}2rPGw{QG~zzxnpQJh}dFvOEkPEMXQDj(4m+b~TARWXyya z*-O`643PC+3D%C5nR{npQjU+3xfxD8C!!{H-!d;?nXA)4|b~A_{n8l98f|gSitltwSw9Ar#SREmO32_rh46+XkVE ztT?%Ev!%P!S0h@F0Yzk(kDA+=w*_^tR#P)CRr9UUK*?!Fib=-enfj!{LJ&HSH_??5 zhyIG{Ft0=$VBkLSe{`MoTaypmwgm)~RFEze0cq)GDgs|xK{`eVNK5A!NDG3}As`Y% zx*JA!N%!a)F=87m;NkuGeV$+LKVZjw9QU#7bDihqGe54_EUhq~`FPsn8(;pnTfEJ5 zylexBwH9YiO``ed!0Gur4#beIXnU~OblR{t#ZNl{;J~uZ|DS1Rf?hwpwI+cu{PmTE zVMyB27I`|iuH7h*F>2UtIX-)mTU6F_22Q;|PIu)Y_^I($=X|gZN88~W6*8be3GFps zxA%ygadAb;2j*<8$zwV*)d7ite8B^m(4)##l52jsA1yAjlO0uZeXQGKY|N|z0dyz` zfOKv?K6H+hBdq~=AIscfQz>|aIT!>_+D@+N>RsqxW{79? ztcIQpD|asD!) zX8df3zYk~l3+#neu;b>+BL`|(n-i%cXCIzcV)HQUxb<-D@yB%uzXb@E1CsVE|8e&x zBp%abL4C)*rJ5E{#KEUlCCZepB@Bbv39px#ML?ZNaVzbDGn&DDYO z%ZpDGViIz@-~aqXZr+3mB4k=WD>^XzMp(G}(7BkD(jWOS9r`zHXSo=W1>Ve;j_8KH z_3d~`o>ihXnOjs*9n2aNe8`0tUbwZ4dH@XH>q-Le9?Jc$C3MwEynPXJ%N5}Egk8mi zB$kStI0-ySyqEs+v1sf6nytT;bolE#3+fF>>X^kL^zDKOX#b5K zXQoT3L=O?BKXBlQ2OPr*+T-~_?Wh>GY;^9~K)I53hZ^>E@?#V#i*8t|&gFWU)2CuI zp9J+=!1Kkq^DL86RbU7)3+{oD|N2ASfx5~z;eWC9NbMrw>=T8;0QO65O`8I^)?oe3 zHm^%XgKYrjHO2@AAU7X;cLnCf0Txy`g%&gDUBnL1@30&w|3a7%?bspvz$0hl&=^^+ zS4LhD2$9|hWKV_Xe>c8{sQDJq7lsfi?ePb$Y3ni~uskP8^WA!xp3`a%-3C5{A3^OC zajSOwfs^owxD6Xwg}Ts<_PDe`Y?v~j^Lo`GdCv>lFu~s7yCMDlb5MPiMYpsh$Oziy zXL+QAyV%9Ox#BZ5G1T*e{D``-ULD^XH@pJRXgCmQ-KoP^6o6}Ys&0` z9^2-=*cp>-vpnpK$=d$S)X?Io6=*V~_uH^}?9w<}sOpz;)Z}3+D{2y$v`6_t(bQn`~L0M#nBW?%CqNuumL`bW5$A^<~){IH82D5skKF zqG$7h{?=7;a{u`jW=%I0FLFxd(a{bcn=*h!aAl+_ks;qd^XHxhrr?TgY)pQQEDEt* zyFALTgLc}es))J6BqWKaBPljF7snT3%uHzj?c&~d{}fvM%$at}zXA~>-JTirwloco z_d}`9*cSj$haBbd{1Lm}!(mcKorqKP$7vuv1Y*iTFW8XU$gw+YgIE*4%nP)Jz23N6 zM3^+-%+G+yfG|_XwFNmFJW9r}x?_n+u&n=oHkS)zm-R^Xum^%4Hs?H@-2`Eag|nlRNBuipAoLXsrl@H`{_AYgJd zX!$U!TjW3~J*}WJpTodPK#3&Nv5KoTH^^k|A6EorW1ZkD1z)9h9bv827vA^4H++~r zy}5%ES(WXIu0K&fbIxNa1}wzIIa6N-g~7x+wluSB(%b4<;TkG-rvI$Bj5p|FbY#wb z?1UUw2lzo|(^y)>z`UR!EgXw+<`T}ZXdG*zDf?bDF#?dMywRVwo_DrL<&Q}Z{^m;w zjL3NivV)#G_sDQ{kb`YM_Xqqbkj8Q{Ei=!jI?-cj#BNGbaNp~kc)lcha+wmRa@%Dz z%-b>@$gt|&q$wP`AZ*w}D#V0q>gTTK`UD-oWxo9J(QO||vc1*JarUjv7jk2WY6IB( z3k$+eDsf#gx~?7Mxw;N|mNm^wsS{<}s3P!1BZ?{E-N}WiT13P!7Li}BeNu_`@VDP4 zt+ELTOHMi$0iZ;N?|{d3h@iL_vKYzYz>rOA{o{0Z!7n@3f*|7F zY&a(Nh|`adOM?CO9dqYdtsdsSx@PUlt8^EUoFLNo^Mv@Y?d9o;PthN(g{NUiEGK{; z_;?ubgEVhhE|c-cW}tcJwV0aHfYVZKZDUOVL_v`fUb7molps(V>zmb~<)02G?^TGT z`0|Mk;qbw*HuUi>vEq} zbaD)+=sw(BIVO7-{KwfPhkB+&f_gx7PApNC?@aZ;v~xiO_n;%J2%NJ!lAN83(vJh%0$gG8v+~)ci8qDHJ(Kg*5X+m_KLpb#>MPAs7CDv7L3HCzd~pT5jXTa z#l6JqrZPkwg&z3MqetuU@7tqx^VzN~rd7-D&WEe4H%)&k)vO@=tn>}arV5oqw7)$= zfcvLrF3m+AH5EfWFKue6dq|lSvrLCt9QAvY^N_|$RAUK}+N{tY)de~Its&-}@P1RWs!_JEp>A;z zo(Crh>cCr`-Cwdt5-lCIH}-74a;)&A;NAuwjJssit(AmI+Q@=y+?xGvSf;hz zr&K!>&){zI^#J8hAM~2?)DC~nWZT_xWf_`)D2-SNloH?fik!l|$9{;UMzF2F$Z#wB zRBIfF$VkP9OglN4Gb0+qh7Dc3{24C7v=zefUktB97McDWjqb@;xlv_*n}~B*b9y7s zlM`xexs)!dCEY-}%WLgZYUGT_>+2W=7Uo_`%s$z-uv^M-sJb(fP9TdY8@L7Tc7J*#*79n}42E-#Mj-f|^rfkh$~@VriY(1mTYzgv;5hOsEueHQ1NB$S?}AmW~Cw%;tbwUgpT;sj?d-@@53Ni|wS z8jEuam8n5NOo69L5E+xb7F*RG)hzjamLlx||D}apB)(qVH(A~6p~BVUNwyc^I!fcO z7u7Bvwo=dJxM4^~3eX1L*DbBuKPBCe$5fe|>l&sS*t^r<%J$xfUtv75sh_!TqJ<kxsyZdan@ zj(ivmzI|wY=6?@%GW%l=DVIcoB0Ewf>a-RPrBnkGZ+}vftA0c3IPtZ;rS5y31Xjv< zBs?<#B7GWh79n(QJ;LR;28m(W@N?|HTC0nhiNpKBkeBQ5tr<{o*w7zBeA2erW35w0^KI^gZTd_UDRku7q1mzUG zYTDthXl|bP{mjV^@A19QKPm8Mo#1~(vlgm=FEYp_y>glB@+pX}>qLqf?=cCh_tC8j zQBVmjnb%+WDqzg@;CK7Nm#h|%lsYmOZ7;oM^s{E~ePzDkP7D7}Y=CznOc@$k0auTM z>xGjn{n!uw?G-)DuE3fE`lz?E`vojvjj3WE%^^IixF+1LaFW;*qCi{A9Nxb=K+IU%acT^2959TzVN~mu6iv@zm1Wx)(X) z+vI)*w%f8V9eL$!-GshYXMY`YqE9amBymxQPt>ZDFN)dloXZH%f#Kbxm+SN@LlhdH zqKTbly|BEad*jAY(d@4eo?ab+cP70Y11{aXxwY$qMSa-+uQ=^4XUmg#^u5H0yiRXD z69^}p!NBmS*SBX#KFM6{4fvn%JmxdEwYq2y(I$i1s2i||G4PvX`yj}q-oB#PG$0GA z(wFys0=h5mnc&~et1J z==_dt?&E7;8 z+OWeg}b>>tQ&6-35ZTURX&pP@?AWuI1sLDk&Ksa)~81o zES%Kc{_fyUc@S9zu-XrWR_#ZG4cQZUw)=k6OWQ6wz2Mj?Fzk~?%}7OYbScjGuG=F> zuf}ywzetc2(Ju56Z<`CZte~2k`w4>+$W1v z_{vsp$vNQ$hNI|0oq*HyC!;CF3*s9K4hvx|+QX~UidtT61&;&IH+y={XZv7XzH!RF zuP!?;c1*U8>M(bkrob0A1d0Q*QOYm^A=Ueg>|lz?oOc&J-<2U^xtb$7mOKT{;Vmu6 zloChx_}iLG`7Am0B8?Ke6N;zj8q5}yBC@}JLo4BVf@*skbf!a%;-*sBu0gL-4Tsq! z(J#C^79%s?``RX-QfwhH*_8h5ZO`wQePz+|R}>VUBw+cew0?0zmq!|GnGQiq`q*mY zh_8Y*IH+Hse~64ryY(%$Z;aU5K+UED|8g**R`Qb-Q4ZJfD3I`#IGXwK-iyhfxt9AX zIEP;Fjnz*6@%1-W9QT3T2L|egYzy{crw8Yqkqk!aI@T}|?ulpx?VOX5Bc_@}HS4EO zEjBvn$T2Cml{fKN-2Wp`MDk8unmkSM?LUOS4k;*g_WTTpT+O_+;`jekZ5-!6@e;8q zIk|G1(MB40StP_5HoI1t<#;7?5GqI1Q=ettN7H? zClem%KAJ|lY5d$^dbdCF{*7`CwJHEPzQ&`!+Lj{*dVD2V)gElN1g$)v-c_7_?)I)t zsCvVh>Xh4S`ld*_)!`{C_9wY;_^I5MomHev z(_?T3-MC&n{eJWf&Q|xmR3V)v=(os;1*=c4VpLU{m7KaJqrGwYw&RLDlfKM`XLwVx zUUaJ-=-svw6Ddw|nkG4TT|i`8eBE?~Os=PPLk{a45;#4qV||xWwGpx?UJ)FU!ig{>o+dn~NRaw*|mJN7L9>TNPW2Jpe|U*MrOOk-+9+{S{-5N>2tkl-Y+l#j@) zS(@rqu5cgo;}0t8Z=W<_oG2916zdQ>SL$oEyg7o6d+El|Jr5MnGK*>6DotIp$xpcF zuq#U_9(&4iEppT!d8AN^GENSDggWt!Ng&wsl;dc{BUTXPb0Z~@+SAmSOe%ha8Wv

j3_ovzUgG1GdzwYl0h5IyptCA;Szm^~GKabBj^lE1djfLe+N$Z1o1>FC_nW*JZYl#;+|=cJTy>H zQU$20x|&8hBR!6Sepa5aK~a(X18PnUe&Wfifarqj1dr%v<2pU*oEU>fvX&YI3NC_F z>&$>}*St#Az#cBDDn2TIPf`ymf^3K0D)0OMP=;M*b*e)ucam=OgA*(+vH9}ysNSLUiXX}}mN{6)c&nZt4e)f8YP+*;Z_a+6fJV1&s(RQ7nyHGc z=bL4s`~<8p7VP8CwxEO0#6$M#F@Y}=G7p4yG6KS}r6bW3+1u*Y&`ZjTkdlN1AIH$N z#5&N;ja{tcUif<_zo_MBzRg|IQQQ%jMw06cTtH)SJm{k1S_Of9&MKP{*bE<@{0F5w zGSL5`N_H%SM1`xWrA)^abkzZhJ3InT97!eEq(Gw|B%_T_f|U{t?g!8v_L?K^aP3AS6YR^La-q-Bt48aSw`e|7T~cii zWFirQGC+D49cX{Aa&VMK<+0GXHbpk&Q1v*=O&8f|Pyht>$G;Oh;ILY!op#Y355e4Z zDv0YR>B{={w2BwxQoAR4RPW}TwkiO|s!T=>y_4`-D7iw=EE75ooOTl6RV50{RB!F6 zH6xmg&KK1SlViHaf&9E{G+xgiyugymRJv@3-FGLqs{sX){&_ayfDXHP+D{Um+`!Ya z!27jbFjLC)UR;~FRkJ*~th|quAsh~^&>tsN8y*XroTl4eJ(l#<7mUNoOs!1UyEfS4 zG_+uR!jq111hJr{jBST$NGNQrEjp3_p_^q{!Z(B- z&LV*i{H?(uo%sCH!q&omInJi}_T6!Qt2^9>t+K}9bA5zu*6_(+uEm(iL&KAL=JAA` zP5M#IPNP)$rF+m9uu8sFjg_(egkRtbj$5c&$_>z*jvH;UY8wp;$kklkJ?^NJrWQ&O z_2+~#4WnB0G=(+xcOyMHg0IS%(gn^s`t!tig9$mDMI;St-DC<<%CN*ThD{lNO1%h4 zX}Q+O-hhn$?hScm_r2pdPmU^guTG4Sq@<#MLnl*$+zzJ-XyYRpzZ~~Y&UCsIY@cSP3`=+ zLAKMwCNt{Rq2)K%BB?Sl0{eRvV~U}}8HqzpH)U!HP&;yKN7Q5PDywa>j2WFzpkgp9 zWA87UJGZ9z9w$ny>E{>Y-5d0UPiz;x0?=hz&4^T$6HR3M{Kci;x4B;Ywr=#gb6zPf z%l0grtXEkxgZT^jIU0f|gczXuWpZ*S+2p8Q%=vHdC#p;+X0)F4ZA%W837O>GNuJ52 z=>WXw|N3T*+d?($1g30gf0vLqXQOZ_@Lm{jeR-`-XqGN^zU_o#avzc~TW*&j+^vxt zszUAFGyAz@)AP2%FtfjPqNDpTGwr=nJDLzTZhiD+KC!N$Xhbu2Fn5P9WT;~wOL25y zJUv}q&add$~KzxpFwi!_*U@flh>muSzw1cI_Li*Dvyc2oL28=K7X| zk2KlaVDNhLdNbOIm+CyveC+vY-ehYmO&v|;rYcgGLJZnYo9v&rK|N3@VyzA`yk(?f zxqO0LxsKGJTDQfw!8t`&lP14=E>V#+GGwEP*+avchrw5tk=iRd689FBne2qxw))K;F;A;xOhKevI zw0arU#=%j8>|Fuerfx*X z$qNdn!9TE87~tSlnjVmKNA&ZXzUZR&60#<}LDmjCJ_ekE(CNczkm&j@=|M-b61@lS zpWqjo=Y67JNS(^!mrqQ8s@r{jrd@OgKZJZd>D#+<7mt8gAy~5sA779|n$4Eb_(HL@l{zDOF+Lr7 zcYy6kGJ0$^I;Kp@(n}k&+-sOZdFk+kYf;kD$TrQ{<&yrNa7X_e0*tEK_PPGTzr4$V zPma*uDx74_NLo$?GhpOtzpJG5$?)ha@#@;^H7>r&Hy>U|73jAUv1;t5yq^5DOW2^} zq_5BiXrdBLf3wDyI>c|~Em&LuWrL=$dyFR$?!WN8%p?0b06! zACgI%OBP!w|7XoHoij!Cgd_dQDez!7=G{U2Ao|>I<0<5g>O_h2TH4x{gAhAz`?#L_ zN{#h9ttpA4U;AhKWcKKZta68FV6d)8x&CJyjB9yI81its6CH<%!CEQGYRM>ckaVn7 zlaDpJVrK7!S)4pz5_`S5)mByzFoJZ6_lUDz>n~-)vv+AOA=a3EmXnJsvyw?2HJj*4 z8jzq9<>s$uQtePq4Zsm?;E#xUN?9`Yn-{wP;O243VD>x0yss32{=pVj9jyJ3BNlo$ z($Ei+%{1RrdrM^_R>YGz!ngg^kGaf})*` zdQhbMQI5fezW14LL1RmTQG~~hIYY05NUzSS`eN=8m?KMaN6AK%`_#TcmZE3A$Y3VZ z@dKAfud)a;1J0GQb?BshAo0n4VmgB6rz}Vav`K#ioQTuy0#EG>g6o?Z6{CFvw^Kwt zPr&N_9cr^jv;^NkKk5iiZCz~@}rEe-V zKUf)_NgaUZORr(Q-#QURjY9HOF~3E8cp;4^uDzY-^RUZsn2i0@^(kW!5E1Weng(3n zT~Y5Zv#L*9PxAvlBYokKOR-L9Juzhwlo!V84E=nivK0Wc$qF@b|8Lvwe+PVcRb^Us z)sanOMO*|)0GpyWXXPu&>a(osXKwhI8K3B;2Rm5KUKd1jlJqL;;^iOVmzqt?wY&Y| zv>n)t05<_Ou6N&y80wD^5eYYBknQpw+f_E74<44VvB7;KSa@o^mjx z6yYSzgty+PwvupVR9v4hwF;i{>HTAzNJ0-~uNW>RSM4jQIB;BmTN|X9B_FrW=f6+*r-BSf5C|}qKBQ+6c`*F&9U1f0(eQxHaQa$=$F^qfs zMpJD?b77{=D3v+up>Oy5L;$2svUL)?;L0b+*?nbw_auU~bhKQ!c(U04^Mr79g`WK9 zfWR#hRDFl7cvv*&Svl{knxPxEUnei2X)uA9(ZoGBZsKi3+#)_@uWE~%4mDA10NPwo z-ztga=&EDoFR-Lt?aqpT!Ua96w*+DkoWCWGOzG4r^;StIUdrSBftz>h$Nim7a$x1H z;^w&$SR>iFJW!>%vednX1_NrDfVAW)Z-@6aLiJ&NcY}wvnK5_}-+ucreAk8a~wpJaTW?oXVR@ zZrUPS?leIR0myMv{>BQ{+W;z0q%T%U?)j`?4??<9*j3`DBLcpc zn{JVEAebsx1TCs1X$1|?R5CHZm6WrELs^CIxmRonGNR&Kibm0mip(#=WX5Q0d9i>a z^|EWf*E2#>)*66p#OdrqN?%n`Jw#dDwf3GMkV>mn> z$>=5jH#^HrNh(1-6ZsG()X_x`_$*zGl$7BYfFLq)VnO=1%f#w9JX5^;-iI8}i$N&xUXBv=uM zu8sM6Xs;TKMZ{lb42uLFtXtdLl>(PLolV}mKP3A#d12V_A|{Ur3IxDWhlh9p$c}xH zaMQu}MwZGN1mNCP*4+eu^;3I0p%^t9<^AYi$`WBm`I!RsS;sS6cJhtlQlUoC-6L`r z=u`NPE>*~?LfZRPck8-$;O>DGg2U}^GAZBLayt1ig7~fSlWSUTdtM{y{y0rz*puQ> zUX--6+D@o%nTX{Qi5wG-)A|tcpa5Y(PHies3Nd9r^OP~VfqNxL|Cxyr3p@$Y zEeZE_fu~xaZTCq$ZSdv*oWAM|8$VuUr(22#Ge~oZw)lMA2AYYVu{yRYTi?ZW_h>fz zZpda;$7bQp6yaf!mF6Zlf)~_ut%MP$unySi=2cuxB44oD3nXDCg=u6SEySy>I`m3BH z6R+|cl&jh|Ew8euMCMjox%^-hh>ml??o3TM?&7{=!Wj!oeFHBBXEzP+RS@Gn#5S$Oa*gftp70)YXm{+N#jpJNNqI!^-0b)9 zT8H4i)h&Djift|j<|Nz*#8fO>&rPVdb{u3Ico>cob6kB~yjM#eR%-PZaP*4Qd&A=I8f?Md{0RP|9QCQS^tdU>mPhrQ@5SU^ z^x&4dsh@W-59fyIm00m82-K!Pt zYduTFwY%fQLmini!y4$s+KXxN6aSCKc@b~@^qqX?7&~CY_lIH1IRyoMDc{&LoZebE z3fKcBo6hQ5Gm%EQLaX&n`3AnWuxpAFmXPQ7aR~|m`o_PF3<^Z}~xy?q}cVQ9Z0$eXXAv8ACpfEuDm!;&iLb4rzqgN#v`sxUpd*XVXir zR_pKaDl6~8**w17XT`1YLP|0H*p?(E8ksmq))WA#`V~x%NdC>6dn71rq%amMw!c^_5=5Kb!hd)NEeJ@&;7+1>H3~t87HJcuw%~9ns;3{ zb-z5(oewgvb;9`)K1?(cX?^(VWRozhLq&R1(4Oi;M}xF8!iz>R@)1-^HIR?%l?D@P z=%i%#iZC2$i>(cRy{m@hU2dGbZeDC!RK97m2V+B;Uda&n530C9eucW-)GmEv77Oo% zzS$1xKf-$};606t0+k#mcKdZ3#%2`%+vXE5w0#PP$HV@8bNj-JUhx1~s8ekYPVPP}W<)5b*YFaAKtv zTe1_N+|;Es`nk~kO>5TxnNe?2l-`_1zQ#oGJ7Bk5G-bE1l{i_QWm_$4u&ip}Hs_IZ z@0?cZl)zIwTeGc@0PIBq4p;rnbZJX`&&nZJJjxPMw?#1Tbjr~Q$_$G)WVmt`uUDP|qq2!LZpVn{=JiDYt>r2%T{-jXU4=vQTtt!}o-h*0ijk z`^ov%$mqFTwPa6mp8wUuYI|b)I&r96QAo#WyzgxZD8fDV`H4)+`eXggduJ&+yt$N6 zU*ezIZ^42Hg1El_{1JElp_to7dWpCO5K?v2T8A=OSd2j7{p1=_+{ec&fG_6QrC*=B zDD$)>vmU2$M|GW!ilckLT9fC>H1Gnr<~_mdlN4q;smv_(Z#e0Lnf6~FICS_)8#17w z&V2v#+}BK!)sa*f*Gc4wGvyPY3x%0;J9(E%vmdDkjo;`YziGia0kZI&Ha`U$9zYMp z+#D9`3llgVFu`1nB>Ne^SIk3a(;!*Foa=F|6b6rU)<^Cbrp3*&E7tjba|~nR>*{lF z4L>__u=>yAVBLg=cZBOMa!qdT%WFQ~CQi37w*KO4aj&}9L!dclT#5T!)4ZIAcOCsx zYxVKo6j=JKLe(xz)mNIq6JxcGZPUX}as!6Q&~kR}I23yr>1K8mCjEt@^47kj2vrT^-wdseJa_Zbn8&kffY{s$J%kaf0N_F+^RkFy$F8`>1(#6aPGdL4j=?+ z-6S%gVcHaH?Y?^aIkHpe5fU4i7rSn^Xz^m>xb`Hirwj4_-I+ax`1Zpnt-6Gml|LW!Bt^@EFCpER z<-K*Y;W7qQP`SG7F3wO-Q@T1?)m@w!!mY0=$Uq}EzCb9mrD7koi*XbP!* zdt@cshzeE45`b`Q%o~HSVM?K0qn{I3cIW!OUGv`ZwtLpj&2(0_|Ix5gyPb`xmD^?( z3&fx5m4eWFhmZ9x?$lR#>=s|Ax$7vB4&Ke%RFUjMgM)KbWC=#QR~nVO71H{bN3N=( zIG1dIAd+mQm_WETx8ZyH6HJIwGdmRb^5Yr(Kyoi%`m=ZeUgUjQc3}JW zPEA0v`y4+7{;6s5YjElQr{0s9xiWyLCpi5u_-f{j{s)?gn@gl7t4y4 z+ePK6q9x&`D=liKeG0>R3N4*=R{+uWi%KTwV2IULIQrf=7ToTRxemS@aV$PjI4RHI zj4AN>35o_S97~*nBm)C~nhVEJ;4J|0w;D#;4yi$o{ z97ag`1xT)}GyMr0@?s_JerYQcM&X+`Kaanb|yS{EA8C?N-9(rslQ~G z+n~QKyoec)X^&P$gFsmSeuQ{6xbjnWe350by^>s|vhT)x0EeaXBQLZ-(J>@;@`Afc zNg0f+wz_`>!?ig`+|O+p(PaDE*oeEo=W(~zcg?l8vMe5}X53jx==Alf=L=5Yw!Y|i zSP7o<`ozxJR_AiE)71XRm-EY%e&LMW(P_CyRmvUU5j0W%g{s%AGOCuh_XZ#5Y;uze zzQ*&%vivwao8+^3INqYTx|?~uUlf{d5lXV9PUGW%O%A)aTxm5SC6f`+()oi8f?Fio zdGvB1XZ-|cfY<&)GrKRSLPiwlb}ZUm(1V|^snfCS{T}j{bVxTmqYhELu0J%Arobnsx>SnUK~+gzh(n>V*BIkH|(MN@VSRMWji3d6Pk>EfC)Eb;l`%Swv}DK zMqvX4qK^7ENcFd3{qU-*^O9Vz@yL1&JJ;Jmh+ z_g#=rxXm10*L&~4Iix?d^6=P+q>o>3`V(}_U3ka%%;fiLUoFq~e7QT^X!>6Qcg$n) zP*YXdelovUfwvgf%W0#p0R%!rK}CG2~BYq3UcAAwhzX2ukqw|EZmzNdAcOT zlX|EB>tcaIv@9MH^+Kx;&vIbSu#B18lEdp412vyrVmXle4Wan~O+nMh%MYyhj6b$Z zsBXEsPZwPj1dbf3rD;@5P}6zrH<~>{$}^ak;E-THZKaXJ;jk;2m#xN#VM09tqqFy8 z^0Qj=!F~z*{MF!Ft^m*RAS+UtR?9Kp1^{pUn201u%erciu+Kt?)rYp zSHGU9t~)vt%5tx9j}HE-Q2vk_1vuZfYu6zvzrH&Fr07}}EGEo!TZK*MJut>zrV1)# zQxB|^J{7tB*YhPWqH0C=HO#WFh?2?!uA-H6`Ij*@7PGzr?53?2sNb)e5^s9i$t}kt zXDQsw9RDJCIsS)XSA$u@WnJ%S;+n9!{XOSTblb$9BH13fnaZJ^KjYthpFnhqLyC1c zls-N4z1e&R3T<)TzdSJHcTppISXX3B+vAWw0N~M|9-`3`3HasBJE8H@W^?`bgU7Yg z6TF#%sgjC_B!&x~#guY#l*qXeOcdhTZmO$ZjDT%oudtwPiGtkzg?L5}#Nqb1`Mr#C zg&e=vKrgd4ed@0OU;Ii&6@W2VD3k2X#g$R352J4uX(R-Qj^8>Jbi9oV2#!@~ zT06cqm+n^XHpQD zul3!)Lh^rAbpdr;a~Rs?9>v|_${uV?{rg0J?dBBd*~h{MCnczb@iE`_e-NuO?Lh_m za1|%`%JKYdU-l`f&BoRGv5m#6RDv7<@?P++tL0Vd3qZ2(G=(IS+LnIwyexw@FEj@z z^@%P0sY6M)6V|Y#)P*oVr?Copk%~HM)5ba0G^_`o4AAX+{Pz{Vq{g=QAp0ZV%aE3?wE)MX9m0p>@CM}iK zX+=|o0NLR^w;3l$PkC|%PVqQ0A)%t>Gw~Mys8}*(4;4?&>Dun#(ova9NKx;8%zfA> zpbE$gS$m6q;s1%dWlTVog3{QIsx?sQSAe8D3WYqvM_<|oAh6T3S+G+r_6U`gPNC9# zT4du5G%t-1S%MwA$>MO;%p7o*zO5^JYD2~}`Ca{##jTwL;`WV|oZzq z7t=h|-Sg0%+-=LiF|{{)kUY*Kvn`!~6-C?=AXJ*=c;b#hxN}s?ZPy+l|Gyx&YmaOh zvK8%Mf3AT*lgGkynrAbtUVgY26LrzwXcFeubDeLzQ_0KHY#= zsgT6s|3I5CC&8#~t$OP@y)F}O(9#8Ef@P=#CHdXIQ-L2dZNtD8+`V$nyY=ITRyO<6 zV_|!ojpNgb%hKbrK>_#@pN_C1^2F6_Q&ye>n18p5bg6C_;&Z>liINiUca%jQW+|}l>%KqNqpng~<29J! zoL>vk;SP%Jh##LXfp8V8g`!s~S4t0V?HfLg&qJYH4~L#D3{{j>TNI8zej+;mQ}!ZV zZpf-G4tD$B#p!3Df;ax%8C>Jb(daeBkAR1NU3Xp*&#`;hJ&sgROB5UqD*GEN`$q|a z;3e4$TBo=Kmwh-tRhH7~x>G5se4Jhz_(K{R3G4e-lu)|n_g4cSvh`V&x|qO2{V1(= z+F+Q^R3|6u-0-U<0gGQH4^&T%s+%z=`gE{?-S-J?>#6=Y|2&|SLI0k4+B7MWqkwaW z&*m27w%=gGM}S(=g9n|4q++UmY%*Dd*cr-j~BDjzGi&@)n8@xuQHU~c%A~wvs#pHF=~un z&$Zr~H50EJYkeIBJHzZ(85jDp)d8u*8qK=EQho7G!sdEDM>fto;IDYqAlo#k&38r(jUqf&8N!~GI4>@L9k(0x%9*0|EIZFyR~o(ukus$rgTC|6mYdqWNS8Pisz zvS{9-P6vU)du^a#=|7A=)i*Vy$Oa-u1G1Y1AA#E`2=jce#CA3&{!&g&CmyYiA<)ok zv%s_@3!6EN6S}5<+zgJ zbIRw!t)o*9zG*ewDQXJWP6{%B*Hhz|vA*L3ILYZe*zL^j2U|w)_+3+%a=&4xzK?LI z{;}zo`VX)C&qPzt>^$g?xOd8JgLf9_Pi{KY94Zq-e}sZ2-+G?a-X2qb*e4R<*CLwT z4qB<4dqr*(-1tr8W}KeZ-e=EvS%!~(6wVCl+Gc?of|tOBq8NpN*55yjUm=HYg7p9k zQ-!~sfMKfh_iS#3;Ja*ux7PviD)RS#(JMi$`UTQIr>2yR!*lov%)AbBAMdlcKe3*d zIccIxq12D*3}`=Z!Zhjrr5V!7vA?uZn-iX`@N4Ue@oui)K53hmJ5jJ{+s(yXrtB)~ z1aUvQEivsLaS7mMshyL!0SzcnnJM1jR`A-J+&n^BWSx%&jKWY0PZ6-~)a-HTO(d*emYi77O=8WFHDxD1T`CAR}> zpIHWuhsAGjv6QJN9-Cty9faJ(L{;j_boKQyoBMv-XnL@yIJ0tmEQL5iHs44(=jJcr z1shD(IUA#|G1|MwPM+!XheO+JLrhgn>(3#@NNBSY@?QubzUu1`FM+$OI`4Xw(I(fn z8uWe#HqcsV$1i0(^_~6RkYVJ_xTX3=H@EmDxqcyntU`Th`Igt;lH5ixNIk+uc#s6%y8cgY|Cz z_rgQb*B3GFqq-TO@7Y$qy=Z=^Ehx@HZ@E6T!DQY@wdESXxh3cjYlB(rCKxsrkUkVk zEC}`aRXj4Rok^bgA5o4oo6(ecr%4(^KKqA8WLa4D3#Ae1SFN#KMeF>8tyh*Sfzg}h zxJCbsbZFqkkUG&Z9Z$f=Y(jBv8#%QC04u~saE(t8ugPJL*xGD3_lElVJj|t(Cn8t>|x{TJ=HBJ7O zKWZ%j*e#2iXOE+Qf_8HKmbe(g3RI`}1MgnNEvfHF>~ z&O^O$?CJ(` zm5DQ4iT3lC40`N4ID2Jl0h5oYCEMS2(l#WHx(IARn`)m`0*<)KhA%kyHY_qiOCzXh zq$&006Q`LkIC^Mrz@j$pqrFdseu2W?;{#(wIxBptpeK>~efaTwIWxQKSI3B{`f#k6 z2E;myxbnPOO{{wjgsHo6`^DC0&TSq`M@F381DUD=HGbn>PJ7aK4kmZmelxOK=+AJX#g~F*e)KwW-JaQ#`0n2qf4ds> zo=f%$UEr3~2a}Uy?(#5os_>-$3Z3jKP`q>n=Wowv91@^BD1S=A-?M zmbiFvK~^i4?Cq$vC*y8_e|q+PT1t{8-1!GoZ86%T^ei}YJ)-FoLuEu7{}T5p$o5Nj zjL#F&h}u_Xq9gc9k%(txnnM)7VT{z!SF&%NxeLa0Zy?oCt@!wAYcV9Drgklyh1jt# zXT_|XT1Z}~3D5m1TY&SDKEC~Ks5{KpszCTBa#>l<4`1}>@8on4_0#r888+`r0>jIH zva8hT%By+paZIdWu;(pxZ;2ce+Is)ggJBad4@re^8w=t9FtPRlR?#!L`$AFqZS`cj zmJ3ST?V1WZA}Y4i|I4At-nGA}3jDx+QEeg5E(~nta+!YLVwLDv8^8amuq-H2FIwC5 zI>Vt>8-Epn7?`;73-^QhF4^(ttkNDnWuUnJ^Ue4cYKCNDB5`a>L*AxK^bKye>(ibU z=f`w~3<=Q5)`O&=1nqUniStir)53mK)6+8?b(XU@4KD6)AgmR`O7r}cZMJ#T3TR^i zD|wGo3Tj`<81Pt4SO+sw$QJ3;;0M-<@vxQ`6A@UxZSw~eJH0BtmuR6geF`DvOW8qF zr>W&Q5>-QgSx~ou`N3?-Pgu-DM(l+mE@XJ&%rHLyANqXSifsT(#aQOC@}}@`=GzDOIzQ!J*U%u zce3j{hjr!|_lq^+{|f*6zg&c!smnp~9-9t8Y6=JJYp+3t25RwacCB1YpeGk#X!j*) zB{-y876tx{bYAHuHcP)d8tq1pz^bd?39#+CQ|TGjv$>^5>~!Qt=f`sd@@&(;17WiJ zd&FnrzX&Mh1;{ij*l)u&{njdgxjC1X*^Q%jV4raK`jJ?O{VIk~=HiP@A*H zgcWM-?()_pxAW(`ebYokKYQfb)32Cr>(!1Ws|D@(t!y!Yxl$+9(1WX-=inai5**5 zb7xJ8luDli_@6>R8^gO%xFqc=T7ezx{?Svef&y50IrQDJ*!l`S%a+Y#`c^5&uP)G) zji2-EIT@MD(8s;)!*Zd$yn9Ls;6f|@8^@u?1=Yxx0|8}HxwVF;h0gPCu*1%yeh@db zFO&950?!gLme>a>Urkm6w1dtud;X!(@zfTc51RGj;bh~*qSCdDkN{9oqJPPOEX=pC z-1Casq{J$K)F28w;KS60@dfD1@>6fq6E-96rLCF3tfaGBov-hBlyRpytBw|x{JpZS zc}F3H@{7z*0D{1e<}CO#NqfK|=OVIIDq%{^7p`7we1Vg~i9GJNSFf??$``85w)!MV zq22{)caqmf~Gb5?K#kriHyM zdnM>>p3m_%D7Ee>Vsu1M~<{$Avb%TFp&h(Pe zL4G~Idwd0xMa~JY)AerTkF2i6@SEjOW+YX3Meof-NN|E^PIwOAtaW_t)&tv!(Quk& z?AYU=pR)PxcHmJ)A53Gk+HYT29(r~RaiJx7BT~p}K6c~!v9x=Y`3v4Z0bd|$u3cS} z`JfR8awblQs^KO|4R47k`#e8@3a(;EbFICdo#Sigj#KrvbsbqR85u*c`-}0HR8hEa zCrhkA_8#`zSxO5y54W5oKgVv3=@BhFVY~JO&5m`z*wnuTRh)wT1>%W9#^Z6!?gpkO zXiR$Ev@dPSd^nHGs+cGc;smVhoG++5LR1cK$s*!_O5lw3LiIZ~55W7qqH`7@QWlyK z?ynGXVIbsRgwjD_d{^|dqD{=9iV_7)y*g{8Y5JiY<#{c z6BM!n(&iU>if9fZEI!F!3UU~HJAL&rU(IpR( z1eaT8DU)WM^W3loBbzA0f~0V`^-9LtX+?*mwA*8hr`u~KUHa>s+h~I&Oi9iQtAB#?I|+ToGCkMX&9W9FUDu8} zVC$s1gL!a$dF;gQ-RP6sh4eWhM>pDb=6u}%(8^BH08R2Js0)dq5xD+=??#W_H5@zLe?vg{n?d~2>8!-kEhi3&PN5^fG_$5 zK>x=9D_QskkD6|5Jac~VAaYf-L2uaC+5>o62Q`=cA!sa#sgk0nOw+!gbVyfci#LnbjDN>=(Oh+p?oMCY$JvC4#&$^r z(g0M3k%%)}lGfrCCL5`$wEJB;Cd9l+vM64?oY+QnBJyyfu<9rNU>2(}c!{mWs{BIV zQ;%3|2U~RWV#_l^g7pIZg%B&Z>CtCj40>!k{%bB=e))ich!I2HFX}VIr6@(L7|knA z{$M!iXwq&`VLR*{2OhoMN1w9pr&RgtShum1@e}4eFPE3X#4+_+8Yphq>HB@nZ&xmO zkRz{{9>lW)$|yOn0mt@ksY`~7-~Q#N;4|IRRB`U%edi7}q!0Cm>N`x3{n!N7?!w=^ zfjQjnbRSZv5^d@45ipg-6aK9GGg{Or)VG!C;q@J8eYw?Cg?Yj&Q1bA%h4 zTPe!4z!}JyW1ts2@-9PW{*VM6IygBG^?rPVc1l8gb~fAlW*%v;X2qGxg0O-A!`E3p zH2sHrUqwKqeu{vAfQo`jOr&d55l|@+=@g{9V;d6@kQ_)#O+-MtM~?>S?$`+Fv5hfe z19o#iKj-`bd$1>;?{&T7bq%?w6W$8uwOF8Q{AHpHJS*o53*8>h{j#S%(sjA!9y-J@ zrf<=VYk9a;-1SG&^gTkHVC0`)O^5|9?&VwPG&%-O9NqrT3B0TTS&a(Xk0Ehj{tLjl zQf>}AN_y@#9xiyWV%=^B=8RWP-qx-4b|B8sgLUVZgVM(6I=t3+d7q0d-+)*A8q48Qa(*qmpa&}EL&;Yjw`#F&J^ zY&7lArb(4qU*!e3@y}5VbjIby%={x0$v1VKUfVpiD3QwHrl$3E0ka~4vl#p~7bIT2 zO%H3`zo(%A>$duG*6LAtQ?aBC3svqFJh6dqnOqA}*y$&}0IU-Vo%AR8Cry z_pr1N=-3xwNcFpB8)fk|c2ui^GZV$}R1*8K7HHk`vZS=`s{)xYONeONe7-40B`vG~ za7&X#sH2(Fey1q|j}|$ncRvBr0{&W%kYths%~yppUWZ6VdcH3Slm|sR-_B%y)RlCn zzLzBml5Yx_DA>to@DZVi$32>F!!s;vS;ROM_)sJT+8SDbpJ-{BgO)O-hL+!eY+Ae# zZ8yk%Kc}E2+1*tu$;L5f@<{gtA>0R@{rh*TKdGfUL;tMB`%uQQgBP%ms&p1Rd$^`4 z%?^aku4@%|)vfLfH8Kx}rQDk~oh)TIY|haxo)iSmSAE=D)m*(L?9|^y8~c67D~d{O z3iB{pW4#r-X**U??s71_#(G?3VSmEUS*2A{4MDf=}E;Y_v(5Dw8(i0Qb_tBSw)BP}Eu@V4lHuuFH4p)!N zcmVzS-PLx-I{h#j`>!>ZiKs#Oi;7UtXC z;hrTkYva3QQ>-T6$Hq`nH+0ZE zT;cinjGep!&|@EN{`qqh*q_QcudShbAkp0~U@|O2mGX?U7YyX%N!9=9p5@3F?5lFz z(LeNKdg4d}*y&Q)C0{(~qA-kW(+m>q6_-|bx#D>R#}nfeb^bQD+ZuCNw`=6ntx)Vn zb7%L~(+;~vKa}PzsI5uDyR5TIL6Av+fc8wg^`!##FYkEy!-L??*iFB z^|=Mt=*u_0Ug5O&C5Q&%i-cLOD+g>a?X0hK7)wKd3S?_+Qr7$=^mVaCH@eqU8ud%t zm}&cMn)N=11?2b%o4>ELYfL~!dhMA?j9;2{bp3UAO@dqYDDAD9N#GekR*yZT|{*TDK`K&wsn3XT3sd-3iZWu+b(aJjKZGGC)An zn%)SZGyI|UD)vXpB~E`QS#lE{aeUxod93ESbJh#S-bx56F&?&aW~0H;HzO&fI~Sf4 z+<2&bS-?NLwlxN1;dFM(QhEYwOc0}PwHv?dc9`K?GX<-F&exxv~vYMjExXC z?tnNNELUUquJpJ&Fss(cQ{MoGKsG7n4OU7fpInJGkQ)7zj>2~s;v3EO{)dNU&*2^qc|CSjRcHeZ=XIf=b2kQlYQ=ajOI0_YshqH!u+PkpnDB7a zTK(>-I<(BmmU17-e%8=BPhLTBs=IKLe%D}IM+>zlI3+cYBGe`X8rhr-Ps+|Y?PP@l z2H)1d%6rH-V3ecqamRV^3u*k(?a(mM_&C)v!E5|{d+ZWF@iq0u_& zt4+CYsp~a&Z<-CyRyr9=F;zRC zwcknB%gbC|b@^qv9*`ly;UOOfCQmEU3le^SOW1Xc8v4w8sKuSL%Hid*u0`Kq4J2!1e(z7@_P~B6eZOWY)xL*z2n`8GEy>3C`vY6%z1Gpp^Wwmj2*PFph15YvP!rnU0PX` zL8|vG*0m@%Ms~jw)Xg2p8SjS7N8vS`%W+%m&4y8wtY2<#%IvdijseMA(irBm66 zkDN=KxdKJf&HhzbcDkwyh0^1GAyP0|sX^gbR;Vr8x$bI}k}+Fs^t+%DiGK=BjbA0O z&aJ67wFl6xpFYNLesI?KbN>%zGve+yP%9o%gXqRfeF^d1 z-9ak~Rjk;J`_{D#{!lh$rn*olLTQ+NJfxp7KV|1sf{q(@;TbqOiXqK#>(x5pZ!O)c z#V{fqoB~RfkhbAFbdCL2%X4-r&DKJ@f5m(n=oU{tf5X@FbJXAR#cJ!Fv@w~rD6^;_ z&0l>N1JM~C9G4GEm8ymKXW4A{Iupngx22f}ZV`Ysnd8QQ-EoyPeCW3g9p6N?ACpa! z;LQtp|Ki9^MYYe;k$lal{~dJJ)@WDO^!yzD8~e?NzftO^pbU#CVgdZsSqA;vAaD7@ zso!JHBD>sR8Rz!=miWL;@9>~nyZt1wqQ7sYw7$(n)w{S=7>3D+DkN7{ z&_GJUHQD*CnSbwx&>Ya%ZCZh6GF`VLsuxW+Cxu^URBGdbb^`2uv*~yX4;zj5&QYsi8BJ&x zVQG}wPutA4P`SL-yS(U<7FgJ0_>LOiK&X-!Dj3pyS4igh!U~G(s&wABzFUQ%-!pB? zv?JedsXm*+?Mn?kDGCftp-{|czX~6Y9ur<2#7K0t*hoexhKB-dObur3`X~t2z2>Y;w?VedTDK1vGx22+Ri@f*%;!=!QD`3KKS$bFkim`{ z_-tU~53I!v<7zI0OE_aZx#>2678+a*n&5xm5_QTpPMIBFhg2d9eA`xH7I{(O?d86P z341T-FEDJEW!p?j*VKG39O2$eN049yQWJQKFu3wF@h}8<4X5H(<3NVh9Y0YbbI5;R zQF57=oG1q{$vkb|w@d*j1>DZ%LRBjJAGmZY{`N*{klGByJK2Rhj zFX*a|rG#x*maZL@5nt%N7n)+Ze7&q(N8Kk5%G9LHf~h&A(J~wo#1N(&;&xPlnd0=}dU>sKKwIr~Kc_vm z0i7(XuPXfHIYK~}>A!)0gQVQ%=!Kw{S*=Daqbgh~FMcfYE|1K!JNb2bck-js z%PLego$lL@u3_X5KgG-8A~`TOaNHShox{adSAy(Z>kObji{J=WuK#=KlpPd6H-t~4 z3|iYAcm{bBP8kM$F6#+{CZQezCIG{x-{vuk3{0(|(2k4fv&9&mcL2r)?S0S_h+1F= z10Xr5n?grjTV|c!kI*%1wjM8oXnMEKcwN`yX@$=?{a34 zWp^~X0IfR~^1Zf1aX;A)|G)gQXxHv1hZbXp>FIkG|zH?z4-pzK3Ny@;V7 z(3&r9xgef9B73mmtlpw0s6Na+$ey9UW!Xt(q_m1nca6Q!GrY^xq(6W=s--B(Z3K5b zuMO~*t<)n_xYbIl1_5_q91VB2+W710zR(}FMggu-)Ok4FDSVbDx+KYb+CyWk|72FY z^qT#r9pk|s7Ns5Ea2+p)^gOX*f$-Mv2Nw#KDV0)xEG}_i;fj7U?OgOm*qkkVbJ!FR zDuKhu{QE2{&Ns;!YphSLb?j%N+t|_rmJz(8{v8kTvCZi^$Pgn`2d$1shNro6>lMt+ z(fMD?$nUG`!D>B@m0UK^MZM57m44m@sm^beyX3XmKc`9AJEC6Wo! zLtDzc5Lxi&`jVRC4}|-IQvp&rZQoM|<858%RfZH2dY{FLx#(ds``XIN>&~C`{P5ar zRXbxZiAWs0W*oBNI9LNv>nH>~=)En^Rlp`Dt+c%F0G<%dA`qR%lw-6%9(LsEo_tSjZg8$L}j&Cok0;#VcWJnYA#9- zf8sb8E1g-hO}N!dfTfe%B7DIFu^OI*aG1;#&DV3AS!5gMk@pbfueX%9hdRVZg7&^a~vPOIz2Lgi-+t7Z7kSklvYxae7e6nHpB4vQWC`r~C0{ekiMO zKR#3QM^HlSVPwsXPMhu^oo@Oj?dzw(DKP@3)Br2oU~N!9Jx83K8qsU{baDt}Nh@87 zi9Nm7!7l%)Gmm#bAg=e%%^{>0T*1K;C))S zSTM9j(1%9Nb>_+i#xAbNL7(h#DL<2;AI>d%Qg!mS)Oy29D@a!h)X3F+TR6CiTzt7A zMk1{A&nT0``r+WhsW`{*D&jiWcNb$4`k*_i+oZz+`q;l0cKjqO zK@G|_8ga19S)A-Y-Gtx@G=v`QvVmQdov!5J4Vi5kB@bd%sJ_oUwY4LBv0&dr}C&jJD-g z^7u9;xaBA;TMNBvUaU#Wd&Jl;qgade5u@Mt;VU}756Rm9=av@uT%4aXNZjAAec>pB zx zfhC|DS8(zfAKHjmn3{F@lP_2Bx9Hg;tUYX|j79Ny4P-c=mN8F2%k41X7k?#SSYq2b z&+ktwDzjGaU0M7o^67`%>2{M=%4mFLdw7s{IdE$bp)v$ImspA0$2^gomnEjn4UBO1k@WsXVuxju{07W^(In=O5}e=x-{sccp?0sTHT487 z_ayILkvL5RAgFk__kuE;Qckf_!cJs;(If53xNW1eM%ZDU{-lpiFRYs)dm_s2sVVKav zj2Vdy-Qw`Sl|w&-QtU$G_~htDQM-^w<7k@-+`w&>QDB*h*rb^R!*f2is94G|YHG8+>1 z!9RrUIBc;J82wMl>*=9UrR@b5=$wu@zgydobiQk=AOV-YU9Y0=!cthR8FHfFHEYu= zBKacaqzuUu_{O(GuU1(*$ ztSGkwu(cu*&R&T78CB6r=#<)Pr-Q%#Rs^dLQH3wD+HY zMH+>g*ITA+iZpU>@KtMzPhFFL$)BrfkP93WKDMlvZ*jmgDfs7v^gT{cp`*0}V7&X0L|oIy9@&6#U{%+B4QUo%y0 z5 zknxB4=WBh3C~1PT-G)=YzlhhomZyT?oV%Vv#m$V>fyKgs1*6B0dCzpBlD-7FEd!ms zlP~;LFp97KE$dzp_~1kvlooV)*jC~bRc>QT8}s$j zJVtXh^|w%La{)Kc{=S~J&#)*3Eu#Hudgu9*?1~G32bQ**Y$$|a7S~XBe=bzS-Mil1 zXrjJRNBj84P0`18wwutCrO_^3BrB>a{7g)dX4ny&M7Hl=^|8}tZ%ia)qSVK6S-g)v zHE~HV4<+61rnv3w(AL`1yf=2YwV1Q`Avz<2mqXLM;XZvn`OMqxEct!@I?^oDjXzq6 z=6F2rv3D@#b<3?e#5rQd`pXUZm!NUG4Z_|m$Y*{^q9V8aVVl6b+WWryF0ArzO?fSU zilCxHw>k*4T3I^Mr|quu+(+b#amHU&F@HNX(D5`A>9Smx7d z{%W0MSBv2;{qfIAjWCzt8DgJw-;kz=O_QTSk~!3&2z?aXVK>L_J&9u$IwyvL6lp-t z3Ita|x;~c6*K^tm=8E+mUatVi^R~sf>7~G5Aw%fyg%?X`JrkIXzUAxsyKACB8>moT zRbJ+h6?SWlmgP9gj58LW3ihT7?uiakiUD0Pe|WSz^+tb79qNMW5B*GQ8sSlJ&JCSh zzy06iv+SkJr$;AzC)do~oVvR}W`uALq)VdA{?jDJV*H@h0L7G<+6D(etN~j~R z99q+l5)y>u`H2w*?A{-?JaQqJ=gGI=Z{0^#dv2G=4QEqB@*$n`-VasNNODm#I=fMU zwPiK1OQ(_ zmycrQE2I4u1jcn#4;0$c$)T`;qT)N&o>oQz&)rkfslri_XQcjRMqE*|tsr359Pwac>717}*9K`hn3nx_q!d0y9-v<6Tx z($k`S%3@`_FvDK!-G^tk?jHuOj}h4STHgZQm0jaCoxOG$n84vo_&bVU^e=R;_tMW= z!53FpyS5#et{A8yFY5X?3sZzjjz`3tnD`HVPh1diV(Rl{WrD$YHa^FYR-S-M^rv&u z=9{#v`^t>#)ejIJ56=x9X&%wfUWz?mo=$;+wr(F-F2aQIxlC0mxg{9WfG(LdZacIS z{;T(2A?hg99UQ)uOYC)L12**-TxETTZlV#gzjUsrpjJQfAJ;oGk0HMa^}yyGZxFp) z>?v_^9B@Q_MB(LN6y1((j{efI`M&d09&q$t=}LaqINgfE2+XX=dztItx_lT2WPj_} zvOiI~3FxVjk{sv!Odhh@s*;CFr!Yfggt4$&o zaL#p7a-Z^VQS;=hD*S+B|G0U$S5>M^!T^;B%RlRHj>c6bPT~@4xrE#j77j;axp>nz zL$D8m*bQczX6#_UWyMlJQgJdkP(o#HyA%IF@3UN<`cj+Cmb|F?Q!5fT^I?KUq&x{+U+tXcQ+F~U4Z zVm#td+#!e1^RU#Km^M#3&O2f7;xEJUE>2M1DXhkh6*JFPcW?kJXS8GTkzBFF1W54gBDRZQYJDQa^EgO$8zCOi9& zyNG%(rWv6+H!OPBZ#bTH92MhmVC2@a@5YU1S(xj$Wd>AEwGYzenAXRPobI*gyhYfk z|C4KGv@oe2H9{^91u-(EZUwhP(wqM1N(U{i>yaHhYYi5Ck-8rfmPSRvAtDNU zD*QB5TW+v5aY(^tc+E8>syW9()Lp$$f5#b7n)4 z-_Gxn_OhCPf0e_0gKS>L8ne58@<(5?gNjmTW&Yb)nJBZ3WFx?vJgcr=K8Y6 z=%r5`w?Sh7#@W24Ru{+9r760_59FKxfyLsvGJazVL1!*6-u!wfdASAEiOrscrtqwc z#37KT|GdmoJaE^KI5kUwE zwi|GEx>{&W3sidVozU*es0^x!Ec@WH7t!YCAq5N=x?AwJ;eLh+w`!&I&7 z7X?|Bc+hODHOAI_q}E}}Vk5PW-gzY9xE2a`?ktz!kcRFHX^i47~R>R}2R?*_l3_ z7E-mabQ0OKspyrXJeUeES(^LV(ZP|mva|XGBw&4{^EO`saoG^aZM(^Cg%ZNf=GL17?3a%ZyM01fr zJb{hcEauSsm7%z;Bb7>Qo&HNZBvfAy?|3YP@Q+`;%EGYmsBJ~zSn*d@mCy^|PO4k1 zlF#3e)AR2RZ$~87>21Tb2FrLBx9mQ@Q+aR{ez6CTr__Kx!V*zuvk*>Cf{>>K(- zKGgQ?&=9ELBYok{tCzsk#=4sD%2CGNHj>j!vP#CcRy8o=x8D1_i;^UeIu(lHu;c&h zHVk1xGNKal-{$AK7$-M(%|D*!#CTr3lHOIJtR$k(2jrushUrhbsXQ^#{{V0Kr8ZDk zh7DXao899~iKdi?MMS;J)Ni=j33-tdYMTnhJqFix=k@pO$u6n>nVnD<)pZPyZ)DM@ zk*V65(_{HnuP+P%lhhyCjN1b{7{UkC2BUCivfEJcHA@=*UgNpS;y>7cq* zlq${CEu~qT^k+W<{_6kQaav@S3yTzy@Jxd?Izg3W4_o**Q> z@_J?-@1^XAkm8Lk?w(qKCcfdS5;Dgd>#S@B@e+Wf;Jqe zcxr2q!Qob1sUH89W&@pu&iT@)4^r^S0ZW6hK8-m0D;oSOp&5y5h%z3t;?dzH^brpb zHza?xcNn+93YaqMlBe0OY@VnmyKgrdE<}?p&wD*JJ;ZGtE{`#2<8>@_1DjfKo#fK4 z%v>>Ai~DfPtDJZz$p&>WUtl)W#)!i@{C;UH;d?`37$q~aCxEw@kw2YUINzeGyfbOd ztazx0#l+tOT6sKC74`s)HB2-g-7bKAf*?)lRbV$}X;o1NJT?0NE$y=)7`&A^4(C#! zk3P11euhshz0XtWauIHSvbFuV1o}WpGOy`L37iT2Iiyn4F9W%fxz@k=(XKsf!OQZ@ zf?MNq$Q0ea1vzb3|4G6TdCBS-OW8xUnP<(b4I?V1h-0SpVN%1-Z2Kp3f0}jYI$G#7 zz9^sA(E~Rm`S<**vEU{B>hz`g#E*9x{dF*{?r|br@{_A+Rm>)F1?Axw^3mhMISbX-9BmU!|XU8S*dk~}rJ>gKp0L0KdQua1+U zp&rUP3qA~9ZNxd$vapi4IDHtfP!)FiSwM0*%Nx=$$UgZiz$|Cj>4_`DXYORDz#dp8v}5=jh;dtCTvoC`9a_ZY{k6$wO!}mQ$vb z7&&*m(%p6wwETvuzs8Q;82cOR!H-OM&vL}(es^3t*|wmHUoSC!r)u_b=g7aId1jkEI)c=AwuHurQ#)l7dl>%v7V2e~@-cs5DR zB+MP_-2Yz!i|ubX@gVvz@?^q)i9St8u6RfeMFj(?jo*mdMQ6rS)Fx%QWMTQm3LRLE%~ zp_0`S`Ro<0hZ^SZ87Uhcf}+SV0*A#Wu9@!;{k-bY>bXfF@;h*N=G5h384IsAaEGK$2j;>*^!<<+P!fO41`53qxYJzzv<-omPy*TcL` z0e@;f4LV5Go^T%B0#5zos*4Y={uNpp)1Bl4X~taJgV4Z5Lh>u4)&_;L+{8bY7EOzx zU~U;MtU;0a4L41T0@|sigqKZkTz>ik&aS}x)j(DhjDa(50DF-LU^6`)L(}<7DB8SP;4$cAQkN$2obh1%invGhf#@;bcB&GM@O zCR_Tt>1#E}4XeqQ0&C%A&T^ibCmEcBNz$p_YpWm&t7}Ytr=@V^qT$uE<<&Y!>>uG~ zVSlMHW%S>Zk9Bg<_%J}7I-rfe>5~DMQ=c1MhGMwu?>5)wz)x%5x&M^>BaTL?@YKjA z0gD-O?o&Z~15Y8N`*}B>odQA^nHyBW!Kijtr3BM2xPUMklK#J{v{=Zmc=vJeDJ_Oc zNiy~>G=t{g!F1KSr@@!ywt$(P7`ONCmOKxq167+_?>Ai-qoWR%`o^b}M0%EXWn8lP zW536|_cdGJ@IRRJdAh5%zg8E?AL5zE0%@w|pV9qvLxD5sPOp*aAm?s_5TrGQkbI6F z)$?YmOYdI*9y=0Az^1R+Z^H%Jyd0M7Eyl(-^YI`|aL;JBbcfJV0*m6vo2{sQ{eRDg zzqu^gE0#W^V@+4J2tMC%u!iH${w||myVsD1c9BF4fBQXJ*?$%@=%f6HXBG49!NSt? zshka{*=j_p-)PUZKylYvuZt&lpddrv+NbBAnLLHBhZdZJy5~Nk_8g_za~7R$5Bi{m z`Mhh=)O6=;QMRheZ+zl19Q<)L;X80s6qzMXU!N_!H95^TG&&vW86E^diM}~JW&(4IL-3q#5_MgeXl_YT)^)o}r?E`8Dugs)82cKa}-(i~C zkCF~|hc`Ed?K``@&|awBUC0M!^tVizweqY<`(rO2^^IB-h4T&yvNB4`+F_d$h3Dph z2knQVc^NHgj|FR_6_?PVfc4#Ab%!FVwV&^_2nx~` z(qq_650mF8E$M8A*MGvZ4;>fBdBia0U*JCx+_03v;Kj3vdKZJNiy~p^_YGg1|BwqM z7Logaa(PK-y^U0R_@+9?5uvv+aV*1Is${*KM=?~ zlBT}z^LfFudQpDgr8XY0e&qQS-eX4}-J%Nwd18;pmCCw;9#1SVZ>r#u?BAs)4S}KQGs?CT3Y0>mJ9A!gwj0F1 zwTM!%XNo>9nAmvFpHK&%cj3L|J~HJQBUu0q9WE%l;l-O+>#(hzQA>(&=(lsCbID+i ztsJa2D6r7#bvlw8Jp(K^z8D_qdFji1&Aq>}i}aX9!A|qD3@Q#|*N!-d#AQpN2j*4f zmd8W7PxH+>hl}N>SXTc;5NYGZYw8J-r?tja49=L>8GB|?fJc-`MT3?H-KgR)^2cmS z=WntqmvGBBx$)ohofI=CNcs-ju>TW&aPg1I`eJ$U#~o@MxKX<)3u;Q4=(QGOmzABV zA!^~#>S1iqJeL2g7(X4pUa9{Ia&IEu~F8#YD;8b$oUmQV*jM4V9xa;-Q0h?F`^==V5XD8lE zFTMjtNQ6;Ghj+Wtp)iFtE?GkSX4>h)edZ3665(KJg5p?Ji>VWX+f(u@xt{K__1-(S ztBi_FrdSOeN~j^IQvV7EJMCTVmiICJdW&t{O#mt-Z%`QzKW9fGbqKM>wz2ndjtrt+e9Zk~V2Dn8p7 z*p{i|BsCj8kcZJ$dU4#T~VUoFcJP4@}irisH|CxO?8;#?)oSH;;L zF`j7$t<-eQT6InXKOEz_WCk!Za|Ds`Vri=W*%%nac9?L)gpqqeNxP{l11;Cz26 z)B-(SxMnNSSF=!v!`a$pQrAqP;1GOa%L(^?rq0Jd8x1r;W8UIZdxEDg!+)fQOxxC) zTCinG(%-Vd?w&TyOZA0td{meaECzP(k2AXYhh`OGX|HUF9+!in&=rH3bbmW>dk9L7u(%f~=ZLxLhhLPH_+V%L z-YadO#R*=uOXKA^Q_B+;1jO;98cWP%b2D&1Amz%fD6MpUnq5G)U6~fK zb}4gTFaGavwAFeQ${$@K6Tg~K{{sUJ7y%jredu9$@lX9(Gox1dSeWW!uG#0d<0Yz>|k)o3(z zf>`Y&L0r@JKk?5v%^N%G8CPjIF_+DyEXNXEEcS~WPw8Ffal3~+hPi0nRc%kxWeEuU zdybnGRLo?x-5hTcRB=bHbK|8}i^(GfCZN5ha&Hri z_3K-({c*P{AIaT^vleSFgnq)wmTUK1Eb*MN5<9xma5JNADNljZ z)j4j66kJU1J#YK$)XL=H(b@Ns{?laK!!@Oy7cV(Ow6a-^Y{e6zQ-*7yaNcHKM$Ty? zP<0NdzCyyE^5kh(RPHr-BB1l`s(G?lD?5Gka*iE`nK+j{H+Zk@|k~Bu%*Y< zZLk{bA?O0%tUgD5;5VvYg^cvdH6Taq{~o?v+gm>xM6rZLG``h#S{JarwH0jS0xx;t zCwF(~=7CQAU%SK`JCw&Q<)rFCl7*Occ46KvzsROa;V*#F$Jph?hX`KZ`%^rTr8?iS z1kt%TfAbpAzqEM9M%`JZCU~Q^!^wwn`d)i^sC0aW)r$+@BJRQ$j zPCg;c7)~mU-Pq3JvRQ|@wgxOuQTSaoT9%morA23Dy@GPv>>-Uumv5%{f=;&{LnGT3 zJyE;f{HBc7Nt~|p=)lNiDAVu#@}A;!fujYU3)7^(>!fb2WKO~=e}BOFWi~(uoqY~G z0qW+OKDkIlhTp=cigGvnX@nhT+&s#~*19i8?zv{vBL^Bk?man-ltZwmf0Nt{n?pJV z9dr(993Jd&@NIZU`pOXxfA|7oXsvJwmcS;b=&cJY$+&rLiB6d{LLSd7$8ex$WJ z*NK|~nBLrB3=n_n%u7(y1nTd42wTi-o(4ut2wvimn?S#&NMY}2 z_H3viRaRl(I~Ii7%>vDkM@XRot0=|xc^Q0YOH;VpUmAm)C~WhI^Ns%R+=F~6PLmP! zwimQ80X4pX}eTkH!>P*?esgMi~AvlcXBd57~sbw0>sN1wljp zfck^>6PRnR2-Wsx3wNf1t7?@8NUf_E46c(?mN-@P{Zqz7W+q2!o5^ZK9JZAiJoO|a zy9{H{tQeK{1+7TW>3qvSJ!UIdW{qcWW)mrNe0wWG%@%B-X{YS>1>X59m3Z4|BnD9T zUQC{KuaLjVA>ChE;b}6cT((6 zp6;rEm=NyK&_^vvBd%onjqd}U?9g`oLcTR4$>8Z$hVvN*%dVcmI(4iBadhO>$*A90 zu&~2{mZ!3OR&dq6;_QmU7AR13?hX^BFY6lJq?+mby*4((T(5QpNI(z!>1UzY6kD=G zR*2+C?0(&bdQj5Cb6rj!wPN_cLr5`GPj3D0sXuX+YQtsMzRtJUakTO4e|B;Bo>*k5PtFcK<~xmuxUwB{xHbl81ZyAv6Y_pb3Q{UX8HZ%b6^@pb2k(M3D}c6$(3O=qbBuB6YnUrpYpCU%;Rv< zbC5M!*0hhVlPzthGER*gZp9q}Dpe z&g1DCuwY6^6F*QY$b59Dn^-uMG3ozS3&bzW9pezifZq|dPe|#I&gyCkSeK82Ue;gF z{K`4~T`@!V%R5 zMt7Lro1Cub-Uqsj(D3{`&s(Db>K-WvowTVECr?_fw{>_zAjar4*}Po7KE%UoHvu{P z9IG@Y6kBp}^U_2%Nd=Ijs`7e!oDRfwb+@_M@IqZn~tv9jtQ%=6vkWk(KC1A1L z#OdOW)6Z03YN8l~Hmt(Zcgy?o-E?!$8%PX-kyYTJD&mB!I*FoNla=si2hE18Q4( zr4#cFSeXJovw>Uk#oLJ^ol2P-M5f(zEYdwJtCb$3mqyD1c!P7cYLc`enkA&Q1CvK0+{^Uvi4C0KrvWf+aDsANSzk?5_7s)==Fw1KCzJcJ>&=6@NiIxK5mJu>+)p&980EU0Md~8q z3SBaOJhE>06ZwC1ooP6ff84h##lM7hWSL4xwve5fN>K@w>|54JjD4R)60#>D4AMen zU&lH_ma&h08w`dS3utn8F~5Gg)EwsdxD=@aX>)AzQ`M>IDTSOKOS`|6!k1V=T$Wgd(#DU>i1lN)fP9f zyhKIQ%U0BPp;vI#9rZaXzCE#9v0@*x`(pBd_yX_|8KXI*Ud|x4)q8jr$b^D%hZ#HB%NdM-*1gVR)JM4{u|2>a~W=*p9?}AM@mv zkb6)v#n2(a%rkMPU^K67>N?x-8Yh3z#U``&xc@MKP3t4W7tZx2z!LJ^wxg-Tbk{#3 zac{hZ38^?N;TLuQK6OK+>1 zXOOzp-w9!mLf2G}rHO(1gz>sb+8KcR9{t@~69ZIWqoh93NQ(jH*p=;QBl7Y_bDH!j z{2ON}l%t<--EPMdHc8jtv}epN8W)WiTWNwqOs97r*Yt(y!zxxQ%T!BezCmHTLJ6uq zw`&71sG^~Fz>J&FGS+Y?JN8za+kQUI*Evnj&ZYS!tdQyDT?%nlK((>|(4G;Py>S5q)wxC9d3Xr+` zGF!IqF~V)E5gGW|b@guW@ZoczNO;WMofQUr{2#P-6$u>4t&vF?WLr^G$M znWv~9hqOqVHyWek{5b!m=C_6-^joUIpq-OXE@jp?NGLwxJL~D4*BtNE82}{nJbB-Q zc*#Z+5t@J5Z&s7m65%5kb@(L=_?$X3Kpg`|R%>c6bL3@M`sN2ykG%2H%zxk2$KV_k z5iAQHSiuTkP5ubnWurlVpGZ95?H%QZ^ILHlX?W&50Z{&{iDd!(R#b;}~0DX>o>qqVn_y1IA^IT9L`}V!F z^VkyebKsc6Ml#fiM*uj(^*KMH?;^tNFk><1iw7@6-Wq-=#T0gNCq8h}SO}2o2S*Z=@G@`Fh z)Q>c$F~W}cg7j6BRUNFlL&tH?RBfM?`^uv|masci-){w}1bopE>egMyhzd2|Zk|Y{ zuQpk_0!a9?AV{DF%Zk_KxVOQ9lsFW2DWzj$ApsjC>;!)w|^nDLVj z#yqg7(7pNZJlr+k2;f+4yYDtieo+khuH@d7-tW)iBW`uh%Zy(gW~o%im|y)%gC{5M z1jP>ovU6NwPmUt1GOq3^oe$(kvp4u#$V*ETcqs3?L&sx|S zEAeVbhh3d5V!zwDALqHO^cBh^`BCmmLj%TOCkp9VO0s@?3O9WHVr*!0y*| z@1krn<0z23kjF@#F3cIexp-`VcYtfp`w`%znROLgapfaedPf@eZ&#b3R*Fj7eZTbp z_jkv6;yJlTN|ON?flz{MY@-FaT*fRK6x=UCOq32Q;Xk3hzs%meX4aU-`Q}=gy1h4}qsG;D%K69n zTB<%7??xWPWU*lB)U`_`xh9`|??jsuhmRvSL@eLavy*XB-7#Oz^YIHQD{ZG{EPqN= zI>~LeEV}bG5%Ne4w6e9rm}|1w{Rg`?BPY-ZPkcLEr1X-bi#^Ai_~L{d?5NwyHyiph zruxj}eW5+9xjV5ZT!akAY!}Rg@&GdDlaJ}c9?bBPul;f&Oea z%K_fK@LLy3DhoWRio1-9Q0%-2aY)_aqi%0rVqbc5J;Q5J=q@)%!~f-#AP4?`xRZc{ zRk6A!{Hg}p6&_I2Mjm6+fHVeNQs#lPOI;h0^JWLDmK zRu(ZXG04OFSKkI&cuz-i&{Gszfe+r%m&u}9)K1SB^VWzRbr*-lFSUcZBDpwN`DP2+ zgCgtl6tWa|Bj?PUi!vMgtw*CZA;e|`VTE==((4jI_6pyz(6uXG8T)fO&j2NY>mB+b4r8Bk$XXnF z#Rq1G*NVfWaU;6kfJGFEKRwd!;*i-Fh66Y?4gPpm{K{RmANl3M<6eCzRYD~`ggBS_ z@w!s^jxn$rFj41`D=isW;B-k#Ty<86ITubEpz|AUo<#<36B@@Hwv~5=QdS-pqb7g| z#szN`PVq-s8>iT>^TnYddbj60DIg!8Bcf4DPMNxmtoIax@5zURq4sq`9;kkoo6!mN zlWF@cRK_N`cW*kcpRV~GH9o*E11uU|E_EAl@yO6?uG|bUz8a-@20>ayXq$BQWZg=(k}fcE^>jv_%0VvN=?Z%4{}bRQ`h>wa{%|w1rSL zuhfMUz~-|yi{l_&p}Vohrp5>>Gc%TvRqe#QNy&-w3A!O>iju1Bde`Gmi)U}il{@uC zd2Ac`dhl}2g}9CA1Flg}y@zk%3~JcAVG!xg()Rn%Xin0{E$2i>{=DDEgUw*HYB95^ z7*Fj{9S{%`N2q)S9tq4tFmi`y977&widp|Xw?-`(kvXz)M3fnsSe&l?#y2@fRkkxy zU)q2X%o}6mH30sv+}`xEAV-w!zF27fu*tI5<K!TtZsVL~g#G7BF*8UU z{gin7RUw^%Tn;)*Ar7a~5V^6zk7ILQvl?>_79=k-a`j;^t@rl`47)i&zmbfDkCI~9 zgDzRgq20kZ>&+s5`jWC2gAjU$p0%Qr2Z{S%b)gVYePUX1273fH=732{+Iu`Tp6qrI zMS$(dZ5LSTQ`QR<&&~`>V^nryGV=8JMMe!kF<-_1!W|f|LJiTc3>&Iwr+LVT*z^7& zM*(pD8rHPxhVOL!m7Ednx}Yb^j(Da0YRjskD{Hdt`?n|DXSta&f_btE4K{{jvVOzn zl;Jm1v{yo*NG+vpw6U5{gmt@TZ2cxt)al!j&gM8!LDGSIejK@dF^l}aSqZHpAE;XJ z5#go1drGRv8n0iEF5X);r7D&icA17gNZd{E*g^}i-bK4=cF(OUi2@Z^%5>HQ0~k)T zPs}4lFaHtWG||HB0J5{YxjM|x3jX3lm^wDYrs=EI6(CX8U>`k1@QWkl;lPzsy}6g` zoS@BaWgeej73@R@i|-Wx0m zwQ+nZq8K->fj)FW)D7!&f*emTi;_kXyp?Rki1Xj&O`46pa=gDh1Kp z;?1rMbWvUD^>OxI9&Nbe0iVI&BV{^>b7-i|UuL~Ydn{`ru5 zhp@||g?HBRWtkS!0Q+gvv4OW8*NR(VANa>FWU1=fg$&O3T3NFu$6oA_oHT z`5i;ozIDQLW5O9*j*`i&=7Z#RYRR4kyxBv&5mQ+9GsxYiry)D>$MRSk?W6O_#83<} z(ZC~{WYm%!fQ+iuVg-N57wb0-4vRh zQKwVWpLwpC|1Qoo*B(!Z_za1EgiByEQ?}KvynfsA|1ouAB~A&Ly*=AIMQUdEzQga1 zl~EnKTT`>JaqL)?VjE-WflKJ;hf(f1D=t;TL7)P2)JQp8Ho z?ZHCqnU3IsHgS8;OTcq5KnL9m%kMTT4&pZ>y=ZgX)A!hVgHGN)&_nQY<&8XpuI*`0 zjfwb{%~5ItkhQ#PKjOcxt`?j?pfkF!#lzTpWt$O42CHR~Ao>RE;ZFnSWa1J+0=^c! z>RP>5QM*=kYnLWoGb(%6J$pyc0D)Uz-mvK*zf-cy#1+k{O7b8MtlTllemDAe?v&+{wC58(3Qx?-5^vLWaM+tCB|3{s(bqmsMBKUog@M zS&o`1A({BzoxP_$*!ULOP(l0W(c4{?7r#$@H!wD>*3hHykC=&C?g<&KIy7Aq)_nZ0 z&tj=e`9Zbb+J`QL4<5Jko#2c71@I;j-#T=&`>~hcVnsU6BCXZs7^gD9uBuZTR}lZK zmKAr+YkX6*BN16je5H*}@_i85gfy_5(o;14fF^YafO>KOEG$IWcIQ4=ToUJg|9SDz zlPdwqQ|WsTu6$@a9{c&1rGIHv#-k!o)F?^@lytT5ezM7Vu1n&_UkF}mJNbfg?Uuy1 zTO8k2^to5QUCH=$%KuH7cq%MXt7Jm-^O`ahd_#oQalZXH4WCvA5ly7xPLI zW!==4t3>Qt{_Y$dL~!wTd9!;0D@7c;%VUy1gab zX2Y~-xqmGFmT$pAic};ZReSZ~4Wd~jRejLd?-@@n7FgeS*yo-G13{O@t+V&yR_j5Us9;j^^fE1GHix)IEL{!j zzRxkAsAa9Ew%%bvbD9c9TF()~As}OxhQ>}@ z@mzZ7E5R?p-`e&1fxZumm)sYU6qkw%u#8G%R=RAh=JyJzWu<-&xRIcjLj6>!dy(j; zxQTG+)8gFlyJ<i=KU->suZ_|g?Ber)x>{59OG47IGaV_X{7a~Zi;oc z<7@W>wHkK4gX%xM8Rz21jjK=c6W>WITVfO#|MdM@)6;ASHiDbJ!N~#Qc{cmhhHUQQ zg|+=D@dM|$jk30&`|BUmibc{Hv<=fTT!SeUcmC{OqKIf;idZ1=9!c|j!w^%f7MkJeA&U?vhJ6J;!-;XSWmMYuUXB zF3yc~2WL4gxe(1Jx zxdK0HI^h?AiRPWlUpX124B9ZW3Ifi+{`*U@=k1Df)ak<)l3Gc zqdCg9rS$^ndy9XoLcU~kwF4EsXg=}8r`I-{2#L;J_V}^_{FzjMYk&}E&d5!g8NpK!;$xkSx<1`O6`-sQnXlh2JzF*JvJ{@SyVf#-hb@okLK^)UvY^Uejx8 ziH0ecM4b?RF}|fmB5yjFKtp|4;V8OhU+P_8u*I!~UUsROKVK01Tdnmo?z0%Jz zi|=3GQ{By0${XY@y`xvk2%iNk$S8MXRevjt_NUW78F=3B`aK4L_sP zI%=k&wKs8sxVck7$5nmS%$Ayb-;gvw5Xw6a%ymB7qm0Wlb*Cn$8OEKuON_edcfGFX zj)TwTNK&5n(d(+nS9_Iag9u|oGg-123I0u6=ZkfW33L-La$A~Wb^o2#F{N?}L#C0Q zRCf!T(_I}G9m9oDNV~hQkp6)07~c*QJ{7fyzf`P??%{WY5}4dtv!@Bmu^y^}k%`?$ zd-LmK*~x9r!j|tisP$Q$+FmrF#T)gKUei2T-%erF5nnd7iG6M**f)6xwgTlZq`W>D zHJ$RPjDkn3E%@4H>$tskocRwu1#LOd&EZ6BxX5#P`QQLGn+X^r==hHgS$bT&X-->X zW=Y#5I=&DC?)u2ZDBtbtbmB4T(5CrXfm!aYCD>e-5;n6jaw^#_qWsAG>GPoP z!ncc>59!Z_%)Tky{Vw7c(V1Rl;Nh@&Jvv)aUFyHa(*w}fVkcnwn5p;I!T-QDk$=+;Tc zDa1%6AJUz|-=TUtG7#Eoz;wlYzdY$Jr*GFWgkpmuZLnCGQmDeozwYOK))Od)2a&S(Yit%E7c7rriR*E`(4Xy&_RWl*9Ab6 z*K&$!czo0qX*W!V#>~dYYMb34qfwy|dPVM@k-9pEMteHpnTk7{115OX+7vLe)x!+A zVboKVgTIqwCo{9~GGOZ94E0nA@e?GC0|0K$^!~yoQGFpr2;(im!-`$I@XmctBCkf0 z_XOkdxp80sR8rQ=^{5@u(@nu{+F{dO#*?|+-03VNJXoznX!fpJ{qfUX1NCuE>kv9A7Ja&63=mUJ^ao z=Qv|;cnxu;iaU;+Iz$Xt4DI17jD?-F%w?zj0rhKZs*a6j6ZD@ocej>k41dB7!SeHn zpF9}}-H4ZtO4QHZ8^9zBV)7hE-;yCf|7Kw7(kx%cO#j6ysz(n2sU)NsmR~hV5Z@=K zsF)@Q%P<4QB!yzQR%nCUdj8hosjHF^`IU;)=(cwL8D`m5JvSyo) zF47YiQ(_Yjntgl?caD0VQ44CLr#{VFsOc>sgq55!6Y+2E`hE-8 zQtoo7J8rU6t3lW2(pz3Zi_G;;jqEcOc!?jz{Nz07?uZuGk~$>~uVd}WOjDEB4=KEt zS2|X}O6_L!6(m;`t+`oDUF<|neG811-DC$kReFqDYCk_fmrV7EUIj3%=3;lAbZVxw ziINh7Zo7g~hjRV{dPrvUdi2+#l{I6=zbSi%dTFg`{APQ;Avy=yW(Tni z7rZmRyY4GZ{K|Q+>9uchbt>5y?~PsGQdhlOAFv=AVK=I$AuSfx8p-nmJFgYiK-?2T zODZqb-hHS^L60)Q8OB|6Pf1$$UsKG}l9MfH7iqk8Q8hQ#2kiP!gm#f`d6@O_UO;}! zpV*p_e`ZIODGaC-X*rI`qq{~kU!?EsbJoQ*a|oTn(ORX6;*oa?S`IaRX|^wk3TEy@&-Y z3qRgS=P9G86z^SY$7+a5W@!nPlVE8T@8=aJ zS#3G%T?^;ww1+bB^!WXj(Sr+U{#US}a?jxSRSPxcvGZBZj+-|NE8_ic)E7Q!4x9&l z0&i?6pGFYAhN2inx!gbn*pt1s<%@EXz1Y_X<+?=6r%#YCF~uduE%!(~Wrtu#b`DYL zRhAhVJjRs4#dAJk5DNnSTC0sAhOuqmd>MpvNI53exrmeaE#6QFb(adrOkMdEP2Ue7Uja#^%L&#WQs z^28~?(s9vNA@%J{OIbO4)N7y7;OE{lCI`i77Lx^Q+@=1f`vTQ()E8I2Wag5eJ(*M$ z;Y2UFj6Io+S&t+iwdlNb0I~uPCNmK}FVbWlwkuq2Z6NZ4dyL+sRaRC8Mn*Azwv@M_ zoVqV|*$trT=-f3D&eYbda$Yg5kKLxb(4 z!cl3*2Ola@+5&t-f(t8A(>-p%@2&kuC773b63VGQye}HEZw5!5>}%c`a_}uR5*a@$ z2OV3JoeASTnv@C4ZaQIl-se+vw@2;jg%%7W_QamCz zH14|dAFg`S-PbA|7FX9JW}rYdC!^iy4t2?2w*bgF6Tw<*yn7Mb+T%S_ zTH;Aw3TmTp^3ZBp-)-?i2#*u+JTcr>V6Vw@ftftq9=;xTIwgseW1KpJ{orv!Pr%De zw|rtbn~*;?G>;5udf$7(di#YYZPMmZs+y4|$H;JO(D6M_z88QB4EeK~n|H zqWKn9En+=&*ku+U(*Bq~EwPw4Kx8M^C^^^-fnx=D=0G?le@zacR8_wwIo>8rJ6gM1 z1GPJKfw&I8P#>e9HcicKUK;+19H0T=hwC%6SGwt{nc39>C-W0qF9i$%JT9}U6)=fg zmb)uU*)Oskqwo`voK}9MjtA!rQYzH^7>bn(e6=U|gT$7-Zk7;ZN}sDakD9w~Vnve^ z3dc3HOb$<`e-SX(q+Lq?21F^(R;>ydhkS)}<2$i*H0=Z2&N^#!N}lalK4!`CN>BsR zo!@EVCT|Fx)P6;ow)2r)EZ2L5_?oPA!Gt%-Y(Xf!y?tTz;anBcXzz4q@Z_I;8ZtF* zxk0s_s>v%v10=4MLuAubS}W>d@&TBn#OS7l%{Mb>JJkbc-M%nbOE^RtM#y7MC=k|BF-qViw=UjFWmzXv7I`bwq{Qv@zd){&Y%n=bvRw)D) zXN$#K#@I__$^xdv(a418+*qRdaYO~XFDvw!s#2r@ugxud_wFaO7eq~EG`=r!vyw)7 zf3MxG^M&;Y(33c&dXuZiBx^9=SVQLVfJE59>`nj~YA}}Ew0O4^t(T>$ykNQk|G~Cp zyd_pQpHC%Ro^0r!EZZ~Nx|WS}j{BZSlkkllNm0I`m$Tx8)2Rcl56E^z?m!DDwwt+@ z(li!LXUx>*DtS)2$YIpj?J_|Da(5U`*RTrI=Q@UhQ5RR}F7oj^q8>(&UBmg#TxJ66?0Pe%HmE#yt^__}&p%B}Bub|y~aKb1>~N#2ZP zZvrR6$zG-J-V0AArt4@be`oT6-pYl#w{~BIaho;6w);?GT-27Io%8GONM%Pk|+El{EbrytrKXY_2WmG=QIT9~GcySGSK6h-Spw5wXd%Q9g z-t8*-vv<@?2uYF$^2%;``MNi`<1y*4K@}E~jql1-$#UZtK^1r_A>Sa;kU71<||i z*uykoz!CGQ3WC)EHmAH$$ZOFeq9;AXLex`0bG0oi{&^T3{*5Yi1;mibgjt2%U(8oT z`}$-2Srvd=Z%6`C7H*dao7YSrud1qcH+1-4hU-oasRJRjVw-_v`IqS%TRx)YNLScB zr0++K!MplzoV1961TvrKOn#DYdVSj#XO_Uwpii7kQ6cL&b1R0CU6(dK#x0z^Z#~U< z7ZjCJ$=G%BMqVhD(0K z7r{)a+vo6@`DK&ly@UnSond%+sw)=Zel6>zr&I}DU5KwEF>7scw?D$Wjzb1{9m9IV z4OD_t0cua_Yg@dEPdr#%9kSOc&o?GENjC0Ry>wN+o${L(Npqe3FgE)X9=CI_t~Qz8 zREN^b+25~^MkD3(DhQsH(o;AcX4{5sQf~9k7L39v8eglZyp6>$BI95n=JKdV{|}%C#wsyxhF0(voXwHkd}LVGXxh$(b~AfBn#Red>68~k@+$R0qA3FFSmrQE~f?TILU ze$3ZFI;}-H7!NC-jWMhXaC(X@Z6%q4vgf&1n@%S-wK=DF^PxF3HBty+LfJapdzQDR z{_LhuoX8-v8SjQBB@3ZS?TLcF{QizdR~zv#SZ?4I?$3Qs9vD0U?Lxi$;ceP#^?aT| zd5<;~Y}1Xx168UJPa$VFzU&9P8$aMiL`5cOt_6kez#f(NCHdNPP)Yi>xAH2w)e}-V zG8tu^vzP07XjT!6#{RIAhU7UuMgX-C1sPF#z2r8rMQzp1y2N^l$gQ$qT&oF=?HOq1 zu=pmrfP^NU-;o-uCb~O6lmFTm2w$&~C9Oc;iO?5fco^}y6(97%HFA8+=ak&y^T~WJ zo+dJxs&5Tb-Nebn!w$PgIIY}5idNTs`IR}`=68`9B93H`kh6H1Mz|90>}X+?_JDJF zY(eRm%H;7YbYsQrQo4xw+-0gp5AV=3ZnPouqm3vYCxN=(q-JE{)y4EOZZtnUs4T{zAw`JtgE?+5rJScNUQ#cJ~3`RuN}O^kROnxscX|Y|)Rf zWDr@KPISavw|l7*cMOw=kQ&kknhw=&q@qjy8fXo@2fDRp);gX*u_6uIO}_zN@H?h1 za3rx>2%`sIQXOwdjUY8AciG7c_4G~bTqKF|RJB`UkzWT_E4}~MF%x7QS~ux&G^?24 zwD|$I$2|k=2{_dZVH;(OH-tqiLxll06(Atk(8uf}Fkx(B5RPu#0;Ug%rl-)v>IR++ zVpC8RtXKB?$`hMqVmGb|G)SNVsA~Dk_G)-)(dWa-^*BHPq2bU>TI3>rp}E?O+_BhI zv&8E5FIaeU4nSV7i?qE}I+o(=$Qx9>N=b08R@@S=I`1rZ0e|E4;YKp49+@{@`<*_CQaQt$bQVUd5 zwF+_53|JgmPgsGZY3KE7sr$w6Jy*N{r5lfU9vy$E@g8VP_ulm<2NAe`iC(bP^zRjl z|L|jvU=7bleSzh9THdvLss_c_P;`DrVBqaQpcm`$V#d@%`&zk8#VcTCjUBR^WMrSl z7P5zbG&K*Gu!)tUE7w5Y)z+qC|NU>?mI%WSjqt1ADyQFj2?jqUv z$=IjQb|YhP9OKI4IYlE;Ee_-IVD+r>Jb`tTi;j26(K4_k#!CeNQu#e*A8gDep8rIB z1a#HPYE`vtIi~pT%VDS#7%K{vXO#& zNdC@rO{Br+1re6(-N+b%iT`EHz8d1K!k3C(V4AX>?YG^Z`zk!)Lp0UaEp7v?-551l zB6T)IP-y`{MpSQW8{_>ZTljN~HLP1Fg}Y7X9%M{ zar2z=18(cwF{?>6gjMUfhN+CDgCoB)o7z+I8t)4@@&J1S{;vQffi z&rP|+4ftJ-n9W$w{6z6iQp1}w@<)mS2e~-+8_6?v(~&A22EZnRcHtyQn>R~5iJC!` zcBA&qOcLrAhyxB%r;V&ZKTv}j;~O`4mv63NiYqQ2kv=)OYB91xiQTaguHaNjvvPdw zJT}Alum-6In@OKt+pb;^mh0?P6v15&&^Xr3Blmw#5&q9$A=Ky6SK+s(^KTL&doCwt zt_OcFwqbQ%FXK37r{G=1i2bTbY-s_%7=iiMpegz8V-y{SS1F`oxeg{?T<^7h?)#MDKmcvuvsl`$D2vSm=WI>&9Hg zE%v7>DZ9*fk7v8n(X-&~JLvLJFhUV8B#ovCQ;@2>Hb`34J4>o$pj3khLfy>@ZcRF@ z(&paqg1JAs5_%1ItuL3@a4I&uLg4b2=mt~a*K*%8R$%U`x;-wPagntOvT8{;ZU|Uu zS(0&E%-m2Wojxc;?cB1nEO3s${*N9THxwvhS3LWa5a}W9X7_>s*dM*$$_OeP>xUj_ zd*oEw4f&;pJ9r<>VaKEn*74%&@0DLAG6S>?2s>L#0TXuLD95ZAb@iqlY)-#-*yNj( z6EmZI^&05vQHZy8UfTQFxl}W$V&vG?Ykcy2%?YbOL!Uc5(a097jn|OTvGwJ{!0n<> zdzpNkaVldx(IRA@lL5`2^?v#{i*FtIJcX{?8oAH%^)$3INIq^UCA+_g=CsW za$p7=e8=IUs9i|oDnUyD%diSzC0z-d*eNWL??|>cWoCmB{Ey8%q zLFL9c2ywGz0uI|<7moVK=k)WnQKbR8ky0)(1@uQqu8%PFX-xmv3jBa zn(xQ=$JkTX;c0<3n2^H-K@|bBPUyEftWRWT@coww%M}O(<#U_4OQ*)-w?!%jxRkTc z{C-p|t{dmOEX35U^QgBwo3LCyN_f|e_Y(AD`CHDle1^98+_T70vfX1^&vBbED2qNW z#)C(Z8;uf^v{-9M59lvd>~J?T4R{@E-Gi=eBw}PqZ||gQJMGl0ACKBBL*2rOy6)H6 z5+tE~P6Twj_l-;hXC^J_s(WJU$YSMV1Zwv{9r97+ufOs9`b@L6{u*c1&R%b~xK>#R zKOM6M^<_4%NSI+CSKoy{INQCdqPSCyq?<*>QvR-_-K<`4#cv8u+~+))CUmEN=|8(4 zTDtag^-o)xAMZmj`pN3A*|U%nDS)G8k7iya`gB0d2m}%&$335J`wJd)O{u(E(FgVy zlpOo6=JPf-2wXZr+wJnVa2lf+w1^CtkSsWA{nF=#(3fawK8Apj@PaivLzFfKx_X)f znh8B}RD2AvDhI#uN#wYJOaXd}wJy;j6f)6AHX58RSIN*e^jrsGb3J~4WSS%0IK?CG z(FNn1oByH$#4C>B{gVH3;A+tX#R%hTCfpnAt;7f zK{0B-)O@&izD%dzKrz+dbFEgPK6Gu@EAN-Y<`gb=hcNIfQ^)UTck&V_o{AQa;l>XJ zKGQ?Y%2)C@Ft`*^r|U8Ki$_%kX%%yzzi7#g6P^|DfRo+rfxGZ+^DIge2bn{Q27DV( z-C4iyEYwXqE2>7l#k^Jfi-9w<%u1dxS{+RmW4Y~TmQD}SV!ApaAA?UdZ}fa^=iwHq z`ylaI)5$-8))TDA*|TIDvKK4cG6GYG-&pNH?RE`3(94u=k?(p8;BIb%a56NgLGZQ> zY&q!XHR#>qeys;f8m}L_&)@HrlG|L1(YIQRQt0dF($)>#Wz;-92$9hlO4qpOE{E- z6=heD>f#t-%}z}-niW+UjjtKsyiAn&i>0+Qh^kaVA4k5y4Y5ciSb%6U)6j?L@3E#X z@$m4+#iYx-Mj_Cpk#>z<-AIJw%pGA$45wefEO#X>L@+-rS^Y$v@II#}Hq4E^D@l^e zW;AX^0-tA~pE@HE84Z~W4d2h!)bO1^I%A?4k;grKmhSh4(w$v11OIT%JmB)=7gb~J zO)~~~uh9ic9?wwwD%lS_1ZXy+jj~Z(jghc<)F%3L4wxv@@OJh*_0!N*>nZ+g)Fs>7 zo$5E>)!x8Uw!Gonbm2P#V{f9!ODycY=DMN)R&=g>vu^AhdOJuj|eEXw#Fclu{Gz~vCCv?~abRh2;7p-x}v22WD1 zfIhW=;UDukKs|-#Rx_<50MLo>jj{iuG6rrN*?%Pa931L52PtM@RRd`0N&X#`=E1Az0V%- zr`YU-s+&)TBz>00<8!_7u3X^&{SQa_th$Qk9t~Z>{OS6xH{}n;X4RP14RN!`bqqLh z_+Z_At)H1Vat&SdpG(k(wUe;%*`_Wfc3-WN>h9|fb|+X`uIYAl+6sTBneZqN;!D~= z-m1>>7gqFj#byAIf2pIsn7cuKAoM_nr~N#Rzy91=aGE-+F1hn!t{*4q#e|sSZf-)9 zMyRj0jGMI0?AeAmRJoy~`n}NG!RjD6HEwixIF`i4e(Db(ptfxaec$Ze+O65j*V?qQ zA%5pwxoz~y=b(?tj?&VRemymqCwy*9$my^=GeB=?HthW4zS}Eh z=ManFmdn6kfTNZm0BVGk)+qaPq0UR!#BIk{5x}o*}*E z4&gkmg??S9M^XYL(BH_-^m#lK%v#%)YkDQhJddwtW0!^8se=@bxPA+1-#y0cGaYe^I@Cj(jIZk-`$i zJ9v69pKm_mAz8^v~x_KNT|fxMx#j5VxZE1?RV&Bk0(%M{Uv#sjvaX*ZHUp=9eS z5O1;V4RoKLkMv5*YMYtmX5@l3^=ej#W%YA`b(eVd;|V3fcvaglYY5Lcz`ci1v`B$O zJv|tIX7&TgwCb(9ngdy_vEDbyK)mD+e(RcB^mzd=j&A#4Dim`l<@>Rv;GP8PvXJp#_R$ONXl;0wRs@s$cx%PzCUrWh5Wemhom)@i%34<*bbaFvI7 zU%{1Cx#p&pms%d0@i@H?lEV{1eC`4*R{7z_;m_CjZlXH%>LS4^Qozr#)jAHx#mBIy z5saqB&P3sY-1$)Qi;1wfHP5tzbqn_k71#?IDnj4|9NBAFTVsbKsoU+ckppXRE#8pD z?$~NQ<#q&=nQ67{uK1}}D(G$r|JN7SGAMZyv@qtA9&$e@G>5czE3#f>#J1Md`-=9r z+MQ3nGf`ACVN`7<`UB5b*-$tU3{}t3%+3*x1nasS_F`Vq1 z-vH?>yAXmt?X}ZTGdow87&w-aJ#zH!T#-YQ-SvI%Jo@9x-h);ndy`-{sVdapH)jbDeYkhv&JT=f3an_w!M5*r}E7 zSm7O}H8Kj(`g?1D-0S~s`&}ShzSn4X=W=@1Ie#&gX)~wQi3OEZYkML-b&rxm zoboEAf?W-jpb?mb-lU%(Y_^)T*RCe`lwPVVM$+{lI24+y< zKih{RB*Y#kWW)OvMWmX(nrA`k{*{zA{aJNUOB3NvZ~{mt1)WJS)K;UvJ!wQaFg+b}&}+qLYso7w_j zXSzrtdnQ9cPu;|rbqR6Wca&h3yfcU954SrM3zXTdb_6#$?#pc+9lmR+SENC;u7BZP z>Uzd6Eo;yg+b-RjIZ`S;kkN`k)q1I?4>!3S>&~@wkkwzkRH)CpHlI81c&qtF@(5?R z!r9K)lvPUDT37cFsCjn|KLl`D>D)fY)C{od zRlcGeYz!#N?vDp%jTqC;TKBa54lM}c(P)ADhUoojX(VFs%81($r+m3Udvf$aIZE(J z&0Ic*5xVWnZnKV0SBWgreu+y@p3#5s$^S}pzoz1RNDWx(nS< z*ncPe*3dC?>%lM?O(z_YrI5M~66b}hHa82w^r5Wm?Q&#g2 zaL5?JKljb>KVr8lR7Yd^b~f9>eRUhVDIrFCGxfM|Z+XBMSj1A$LrxA|I`0_No#zd8 zv`sfO1?`;wOIsMQ5>ptYb;p$1hUpaAQ(Mq%YXNlUFIkUuloXy`O1e4k- zX;-W2=d7-IS5zx;Mv-=dFlh8?W%AP9KeR|emKidWDbxt%A-F`xVM{oJ2y*}CDWqYh zJN)B)l*44e-rFBImYz)viLaQAJ})aS{omA*cOxP-=xXSvgDrrk4|KN@h3O8_1Ac~( zg{CCqgce@JnLSo$P8Sf1?C9R~DWV_#|m)DufUn_@|4ib)wdC5nQRJhr@oA~AaRoSSn zca9{i{KKdgegXZ^$M{whmKrt?{o7!gU@ds+GwT&WdLWIH-vPu z^-9NTGjQF)N5iDf-i@xM3{c|!G55plT~ViNX&lv9;ro$Hg86ggkj+;d{SR60WIQF> zs{1&s)nnW~TRQ6SY0#IQyz3cL^A?&+WtPFPJdDy}zU+9GeD#GF!t8;puR7$>@}M7X zTPRcopfCI5NA`~V-n)>-Ls|0FQ%KtAl5#*a`GP6BX%FJr7C;{|`)gPXvse)dl6%bj zs>^M#UEPN3=C6BH8j((a@DlAamzVcpwZH;kZNU3X=oU{T0rGe{w(E-^)O;uqiz=)t zOum}ilUJDC4vH$J*%ema%X~1~n@#(bC9EY84T~wAWv7PhRa^V?OM<2{d?#qbidwkUaSWo*xV*%+4l-Mn&pJP1?G}+i=xuP1l&vbb+#TZ_ zpd3wFy{O$rG-lOYU63f-4cP<$>L%7+2l&B5@~nMNMBNn=t}P;89v|LK^aI4(F-a2j z4}@6V*DN(-DCrt8GS@tLT{v}Hk1&>&B8UW}uSiR-zROLO9mN%G>0Zs`QG@>4Y`1zw zcE9I#wz?~JKGmLE!~Q8fwmw77r#$l1$=Ya3kigk1kZhla7~|(JrpxZ3_+dgN0_@`L zV~!pbWv-yP+DDp?KiVOK$#*pSRy}$K{0rvH*~LX-&l>*QeYNn5$h*@(G;dQuLsDKX zyJSB|#5}@=xT8ozw$r8?d(X^VdOx5^mf5YTD&t-W8Qbcb+IPA%-5xu*M@ux^dFRZT zeDqQa_0WqywQ#i@f!EFAmXeNFs;0N8gb5K^S-!^+i(7>XJs;@%a^aG4V#4&7`I3nE zg`EnXN+qrGY9w@rd~2t&|k?G>I~K}<-z z>>pe^NBPs3^FVh&z1qpu@rOn@$+5q=ZK2Rz_85JmU&P;iLH`-Lb)X>wcbk-g9Rn7; z5{;@p&zyMccqKbWROKng2k(}zlEPfYunhb^aCd*iFHu10*`yGmqZRl8S*=KL49$#dRKRADcr&RBM@9#243Vp1JI5TNJCHkaZ0THR=5)9y81_p&Sh>* z{C2K|EA+r2QsuOcbW_^^q`R{=t=;;1L;Hi?VZP4-RGBp;6io_nW$zfz_wgqKmX%8I zp?7`^-LJgX%(Ld17OZB$P>S&CpKWI5s!5+p$0@{9cr$IsJUw+vTa$XZ`X8s|$gWpr z)z(o>CitwbNF%f#xHEsRIfNioVEo?Uq>3x=$jvcfg*0fWQ+4~)vgGB zf*pDPUF*pMwM(S0!1IL+8iFuf_n2XbdJ3E9mhf!?7!z`I45c?f^b1FkZ2Sjua#h&- z{$G#u8twIEJI|40Ywp$E&&iET(SC>L9Aaje+`n=>%FJO(Xpe#&vVN08J}&jpGTD*Hj_)tFUFin-$FnhhgTmi3P|Qj_o!)<=1w(Bef$W<4UJQ&mVli0M%U1 z762BtwHRrAMbrxN72>h?Z&LbzihLdcJvtk4_Xx13t4&wPdsT_g3EwDwN^=3s;$tT< z@Y7RKzzZJ{w*p!JIM!#k%8~^e0%3vEgLJKI#dSqx*1)&qY@^<;K=?PgpeMWX?DVCh zIn<%-4-I5X%oItnM=F_S;{25~D>T|B zpnI_QuO7B}U!&&SZt+ehtcvIk8F<^n^s99Gl9mm!PzF69W~(iJf6XDC_ToY8g=}m) zeFj$y=+`}N<}LGg)S_FYYP)vsEo%I``Ap=8 zRv-B*wOee9Cm}^&_Hur;m^x2Y+tX`HF`hKfI~iZk%abweb{C3FJS8H+SUlmMFK?vBSB>7%fwU?)VK{H#%cc0XzaM|9q@& zZO@w!Bw$G2UvOSmj$%t})d7&XZ@eIAps;GgZZKu)vuK|&CIz?b?Pn5&z7d9Lbm$!7 z`?Vy4QC+?y_Pf_QoVW8uYJSp4Yfv$MBCdGpHl}r47uO)W4F23aj}O_m=d!&+9iUH`gQ6u5@TOK?doFFyH+m(KV3AGzp3wZ0)nv9bl1I@&}F1%)d`Ci)X#^K z4q~qQL`;u1T3a2w`lLvzcr`ebqMv1&$b(w*?MW@){k3%67DOGme)I_!E%h7$n}6!+ zd(A1PI8^BlAUtSl9C)nZ6q*szc#5jk-g(<|082>rlfcy7Kjhiue|Yz^hVp>qt@`!~ z11mK-v{ZhN*SI9MnR(qK7CpQj`&{iaq#PWa=?+z&f&yL!TwOiS^s}vXExQhFJm{YA zHsW=p#P=9~tG(RZS(~}OUqnvl1!k*X!mkKIo<;9wweuMZ8#PXorx!>KIpDQxax**w zZVu*TS`lw1{BiG3XP#1;6U*5U;#Sl7@JUq2NNvh)=EUdTMoavM5m3l;7T0VcMQuO6 z|M74*A;2D^K6->mFsNl^6af&b0eQ1r2lt1jCZ`AI_Qv zUf6$1l+>`p7(@rW@JCyy$;Tc<+3%@zPL}G@XbR>Bu_bk8<*6z+BwV+gDgm7B-COC~ z$9~L;%X>aQ<=$3zi3FDige+X#YyFs{GEuu{=llb$Y$zzEEj8EF0~A~cglF&o!{!2x zTw@W=v<7*trcn&jXFJNeyOgyPhdreaq3~^9J-NCU=C+!!H76Ms-i~=8ly+3d62AR- zwW>qi_fTkC|F9>I+@lcIkd#x&=QemP7|>vz8g0&I9Y`qJ{;car719FO&mr0`trvitQfp zwsmLm0a+-=`4FvWMEo;UulO5&f4gerO!N98>wctnU6o%>lWvtj2KSpR0?6r_Qj^wO zgvvJq3s{;`*Q!~P+t}KmO9geOp9dI#IILXTZ>Y`<5}u~c@0FC~nE04?WQIxCo`(Pd#Tw4o0HnSVe&V#T9p@d8cG+88lY~z-=nVfnC4yd*PXLL+$^yZScPe z2pN1QMZL5u?VxU2AkpqW1sCDtK)6riQht6}re3h|0Oy0owS%=i^o!WaE2G@P<=mmg zd&dpi5lc#E>}5hW^32UUtoH~_3uCxPV|D!dw2K-Yh~nnpgBMj>$iORIEtvwl(f*8v z^&fvh$zO;&>z!S4SY~Nbd2(oroeNrWGcQ%)psu~C0i2;K4V_03Wjrl61qDzycj>$X z5+JWtqm20xe^LI($5Y3efnu?(_C)JhUDmCbUytW*1@m|f zX|{X5D`ss>fn!kq@P*}G`1?5y?uA0AA>1uT_&`2tcq57KG^eUdej7BsSwZT*Ce8?b z@@>*71~1uc{!Y4i`>;1eQ%D~cKKL%^Dzw#qEHwNeZ}5$((T~*q?N)(9v)uy|{NC?q znkbMNIwK2x#*Q_MvR<-3`B*sd{sJ{KlAE2THx~pO5Xpt0DLK8}PPcQr0sem@Tzadm z(BB}7tt2J~(>WE;$(sC9s-KA^Iwy^nDKIPOQ9Eigs+_|aCZY!n;PqK|2Ti)cZg zR3Q9&(@Jz5JBG;~s$Em7*`cic?|o3KA^ydiV>XZ6ZPZEI2LsIL}owfz7 ziMzUa2f1mms+9etF()PLUHu* z!$_{0VS&$@?NjNlEQC}4G=nx4zKdy z^b`M8k*;`@97&1nCB)=qY9w@4%r@JTr(NnIXC{*hmd7{!D!dPJ791VGnci=88sl

*yTx9aqp9p58!I%M^=^PJ@?UCubS~Y*GjjrwSpG136w{bhN`~hnw-7l$EV8Q7_{8Hu%!tiUHQWsFd&j}mDR=2? zt9r%SB#I7F!NutF*w5x1y##L~PKQDT3H62I^~kBfbY547UKuJM{D_S*OWK#Kc-s9D z&CT~Fl6mCyTYX#{+hAvHD>ML+7wrvF^&YuW+g6)Km*i#Z7kNsSAJe=o!Pq^P%0+^| zi+iI^kpQSqJ&}Mv`YX+RN0{L)Cx~8@E!s+sKtKXbY!92K=E_f5&)YEWZ#>Eo;4=WO zn)3LGuGWD1&FbQbn|_}2b-ABYs>;Q)+LpzHSY_?udU^|rckl69japMa&2mlf8N-vc zT(Vj=qtkp=NQ+8;m!ha)il8$^jz2l!LiXiPp_9%1Tvl)jLnUY|Es+*xXWdcK&xXoNkM$Dme4Ro zy+3xljl%`vwx?RH4fC}O3V4^Mj!?S;J7)&|K%}B4=R-WOt{syLteb$Mrg6hIQNf<# zBKF15UuGiUH6ryR?qa3ph`*!4nz3(llG}o7rv*W~Uqy(EtLIqg>z~mdv+RA z+ql|^Q=?1alT@je>sod!rafe5ArajZ@?)L%%asI0M(UQdLn*nSR5 z8Dk-;n>{X)ZsZmCv^^qG0dJv(8mWcZ(~cyPaS5^WH58{`yg!t@-{{X_?=~feDKp9^ zS}Lvg49y24TC&HtwqA(v%I>L@egE+=Apj-DldWs;921NK$b|}n0#?Xzj}}~8-Z^li zHm18G<|FH>2a8){<4=?~gd{r3_TP$=WIZ(O@Y*``1}zb!Jei+Of%TGQKe|r8CT4I# zrJLWXB?L^C$+34q)!2(%Cdko{q`{O%hva9&_;bF2h_+TmxKn@jZnhImjlSOH~RnS3(kE8d8n ze0Dq|VMzC_?8G^j?Z^1LXqH$rrDhozEL;lk#L`6gyo%!Y&-q(-k1ID_b~eM_A5u|} zw$D2Qo9KI-TZg&4qu<8Q?I?^3SHz=CAS~ltd9c7l8@_!{f>jhibzP)G-+*TkRn`+W ze2$S`$9oxlHb`Xzk75Quh*_LCIqPiy@1YSU=cQ&A^K_4~cbL!+s9OOhI8j!Id{;F- zhIzQKKS21*9&#Y3%r!0km8*@Ts|X^vw$C#s5!!5F-g3`b>&4&)t9TbhJ7lRog@+g} z7HPhgYF8;sG2tIFS1NbQ9Xe^!|Kn=kJ+b7eYBejcQZkY7vO@2$Az%tK>lpq|10 zi=aL!=*y&~+$!TP+JALf@Oz0DVEJvdV)ec)%|N=X4s*H@D`hZ#@vr5z=r z89c@*J*-u+G7UmnF~%1vQ8IA_rexa9x2$2Icc?kh_U%qX={+CO!xDr%uagn>OMsTzTWdEm zy8(y9sm-)&6Vp=?ZXU)h&uERjR6#F(K@FI)rTJgR^VC~|x?q%A;f~#h-%hLIw zo_EH=t85Lws_oQT!H&W-F)~dOt@km;r+X^e`A7C^Ia3Q|J&@R@s*Gd!x^6+3^;XeB z+^MLP?dk6I)9<$0ANz?)wRK&;pqmCWJ)|sCi{i4&d<*CULjtvo6PiO}73{}7;RN(8 z5pKtWA-Pbd#-r&){-Mi+p#UWfW6>_3?W;jQ1Z<4Wu+p#aEa6?DPwM1e{*t3FS)^7j=6BEG4Dp8WDmJ(#@>?%3d(EfCKdVfr#luiwLNxL&U^@~?K3qDDit zqwKa!yMpGl>zj7}+mv3HU9r$3C~&{WH!5+sz0+xA+0IiR4i6#pvW4_9l$_dg<23=l zCO1v&n_$~wh!OAD+D_x11hDzAl$v7W8yPY_GY^Cd&Fg9HJcXJ2_mn1$zZ2(HCgEX+ z(a%}bQMc~Ke#+1O8RlU$Nr+HB(4N^i6z{&0Q9?hT`9%|4OJqp=YAagS-<(|1%p`Rj zaX63ZQeU@o-s>sz2714SJ&+qoP2$+%! z3=Er7QHn@Sx?JJ)SAAh@?2sm7KG+~d>-oyFw(4Y;;|=eyxJ30dd^e_ZuCFbp@{qWs z_RqFEJC^|~tNlg8x@dkkQPE3Kxr1G(S@I6VZch#s0x$Vg{42CobSK?K4rD@CHQM#N z%F~m#!|)BwudS_NHrLAmD(@yDb=Ylvs{Gi7<*)JObf_^&sghWqo?R!<4%2mTbhdQh zN`wI}rKbkjRv}B<^Ko#8rmzLeoMKtpN4M#q%XF}q_7)dnqyjK)Rzf&2GJrn?9C=3evm#*nMrFh7CvcJGDpAw`x&_=d|fSPXt-S z9T3*I=Vy{ASs{Hdz;?D^X#95b6+tC|R&^PM)lc|E=GE=P>CStB*@W%E4D`gNw)Q#C z7?#DP_OyuOs(+{NuIzII%&KrVImmH_HphPv{$}|kF(e{f1m|=SUY0JDJ5dLD#0F}Y z@?jtK#qaa2WIk#+q1RvMHJ@Li?}olpD^s&;sK z21dakDLt#K#9jKx*^@Cc4Ng1t<={c}9BTg=VZ%NZ&x%t-_-w_FcVdjDVlG^VSkJCt zSX(+>*wwBpHtlqf=I6Xm>_ZdAy1u(FOtXQ16vk*1TTRu9>sMAsoTZ1ki`aTe^glFA zuU34!kMAkM7U=&(7X0rNgL`o;j~kjG*4?(7$0i_-)f6Qcqr+{PRC%Z3L&byfb=F+3 zR_C;JRTS{Z3!#u2b~oY%BYVd1Gt!WU?gbCAMn)VK6<-p=&Af@kG&#uX)71!+;_S|4KR)a&X~sTR!$TW(GN;9_ zR``$O3|7#Nph#5Kb)&}HZXW(S@4D$}KPy?^s^4qJ;d>`5 z<%J~THInEcOWrRUDdvT=FO}JT=9xMBt$gvsJXq`*f4v~_OR}Db_3ONGd&d(qPY8rI zYfkQ(i!A=b)YTL;8qr-ADrW?gHI$kfg>^h-kOCVxoFWgXhnL?g)ETGe`SMX#TZ5+o zD9oI^5%iHX=v$nb=`z52!dM(s>F2ZYt?bpe`Ngc6+ZCKGFh`i$&WDA*uj`@?p??82_Pla}e=vr`T z-P2?Ev-WR~N-O#O1q-omiwCcNTGjN?5bFR+kF2S)0C^34l@8JF2}vqtv}c)@*uovG z^N*WJ`!=CxRsp$wiC)GIt71=eB{wIZI;nn?l==aL#s%E?2y)J;)!#XxbeD*wU9#D% zDR-Y=+Tn5S{qFP#ZUR%5ram!-4g#jD6@A4Jg(-A}eb;5ZIDB#F^87W*K9GHsyzZJX zc9YZJJ*Ev`Jc3D(njTE>bRf0Vn>};@9OF4H|CsgaPz$2nCv)ZCjOMX$(v|s!qDDkw zsi)-Oz1d<0E$Q;)N7S_O?sB9ov_tehC#+00Q8p$bUH1p*Hdtjx{UNYr@o5b)MN2|W zFaO7iQtx#Y`}E_Wa+2Gntd~F{26+2YEo_1z0-KLo86b*w zg{mDB@EvxON4i1QwwUxB*&l#;yIC*8G|t=D>9M7l4yG^Tg@=7?K zV;?IKHEbbpUF)NO0xPyE;Ra!gjh(AcW~*kPYuyAF+r4^fkn#~Zr?g##uUE7k5saz6 z;OPuGne3<3O4zX#AC28#2;zQy!d^vC*~sLbwx*nIc&Vaz#7t+!I{oyY(_Dc(rZJeU#HdYoPyAo zD-a;-(Q^$v9ek(F+9{&=8z7Joyt;C{M39@W&5@0n?yo;l zl4T2y+io6W;vO@gXeF= z`JCmw)U>WTNo*W8ngG`tZ4&}^L<$YNknW4h&MtEQ=xv|dC5FUN^GO&l3}l z%N@_NS?!X=az^L<53*L(KkEu9#?4K;KvNCU_~Uh3@JqX?H6V)5BHE;jZ5plnS>MWw zS0sHz$TeF?tWFS~L#mt2)W75|<|%C6`K%7{?U;8ST(ThUuMzx$kYs0oC)V!!VP-X4 zf4hl>f9Z5~Q@`1-;{-GO$-x<+o0@0e1(JL<4nCEAv)|$+T{%3a?seg?MYUqbix{hu z8MmBqgC2AJYNnP%$j;ZM=j&ePc{C&lHTpf&>y}S{PiAj8ErLttHVZ6f1s%E2g{dC$Kge44j-S7!Al38Jj7qjp7V|3-7vz5@(E4vx+i`wdOfJ$!w+!Kc93spE$gIU$a%8!mR!)-J>X^CE8s;hw5*hVJ49OtjL)| z9sIFo%Z(>!jSg$U6=a-Y`pTZU*t;tochXrt9QF5TfnX!Rd(}#+_Aoo1IuPyvYNfgch$+Q`haLef6;R(`L0%nJQBEaIt^8!1 ztJ15L_P2zas294~LjQQZLVYWipgAQVWfye=`89QB^t57=EAgteN~QVRs4G=N-X?$> z-?NYuT0VMU_i%f~%qmkZiu9$~+I_d|D2^GFh^NqRcpyuE;bba$He{yI!A;8!DQ;OR zdV-~O#}w6+fg|yon%CD;T6nds*2cLB35!QJ(toG5*DIsUHo}@F98R4HYe@F{n`gIp z88G=mxbE^M5O{dzCf5}^bPYAMLeHF+Fa99+*OH4z*n$JE;3Qs)uW;du`Q8O#>Fw|2 z;!$d4a9+K%q+vk874ucb&N1@p<>WfxBlgQ0&hJP~v#rAiG%s@=h$`Q^Qk6qLan`O6C{k1mGb4R# zz_DRfGUFCu-?t?6Hk4wpG)uuyj4`o8>e%owB=M;zxZi2C#vEz75%HE$=q(@8zW=~9 z@hM1sKXwMpAG#Xo?AU z@TaaHRHmu`xVKdkmx(?XPV3c__2g}ff(WupwHLH5W{DhGkHhTFTRiJKSpPJeQ&Mq; zZPJO{e)WNhDT070`eAeVFCs<)j`ybKBZ{}~Xa+(<^8OfYE;N+3UiH4!MjU?4(XGU@ zN<#VW(o`c>Va`C|yRgnGcUVcp2JtpEFgq80e>0U0&q=IqzMZDpSrr)TBPJ-8X^{l~ zkVuns$(cjUfpqHhAhDbd>)~%*r>9|9qa^v)lX5uv2 zOT-I~N}CcCSLdOpb*wWIh^mF=sV7qf zkRuzeBj!_B%$yV>q@X0ey)wnwB_epSryj&q-Vdq>v2$`rZNTE@GXp*=CN`(dyo1ku zib=nPC}LxKN@b@N;H>Tl%8IrpGEJSEbY+cm$VW>%YS}bd<9PdXGzJ%|uzUv{RTY+J zKO}Z-DbQC`ld(3qaz2kmePt($@CELK^(gK^60qb-ER{iVSieZfLvG2~q=XkRhFn6+ zThfzQ=Q_YUKg()+-)fmVn{b!?ffkVSF|KtCsBDP!T~q<#Ld0!vt5Lmw`zpzSX*G-1 zzffWmZ}ceYm*5#eP0|id0qfiy+;_h3QMggpMD4yNcrW*MyPd@^j_L)Lmc+-x*Bw07 z7x0~Enk6Vl>U6jSNO%jIp{~5sBc};{Q8$V6e`O7EQ`}66xbJ_%{s+a?sdNG949zde z29Lg_C9=o?p4lq$X!A#1_2jw^(#vS&gwve6aE18`45`fvqW~Rx5as^i3^oWBL@qS! zY2Y@Df&B~)prn$9NS`11y5~MwK^A7olI)73Z=3!$44ky>87U6as$~Rr=`5%NdW3Bj zq=r$tB3};J*aX%GUAqGgwsF6HoVs-AIz%;Md#JvutEi3b-?B-1VYcCX59U!e@i8cXAGY8qS7^3IHVpSR8! z5$iYbJkX>nT*4(}3Hz&HHn!#5R9WOf{}26k@Vflt!=?IcJ{{w9sah_D*wG&O@bLFTtt9J&4E|l5$Wfqa={A~fYr*~>D;}jrRXKi$v~`3rBsLmBPlJtqw=_SGT>pehrz4o z=d~%z)r7~3odb6q#pI990f;>?G@+}w((3Z;OSRx$tLM4mD^fIac!T{`c(V<*HqX&j z?3%MTA5mE?bIm9>Qa7zxtm}8D`k6AKp*K%!E|<8cAn3i~lWchq^w}xxG{CAd+-tuc zV>#v&qGdLd4PZsm017dRp*`DyaYP{TsxYs#vN7+WD^JT=pS1=9mm7#wSi#01^*$qZ zcC$}Gw{~-y?Tf9Ks!olr;V&5nqA6gAg41%Dm z*P70Q#bPHf+I?r7R=W{?mef(S>_nN-n{QaTAq!?-)8sZ>l}%@N6>-dGHN!pJyi4WA zUf5?%h`D5;Sz|Wz50ipZR%(*#AO8QB za5zFNa?V}yMTi{UhuVpHZXQAZgSUa~+G0HcQhCcdO3x1Tb(v+^9UfJ>JHvnW9Suf% zynL=*F%bMv02;Z#KRs4)ckczTokodpqI0n(JS*#-9OTBBtDN3Qebxy>jQ(v{gLZ*@ z0HY54M=5s^F;N|HUVbN0onpY3n7NBKptZOyFTOIg!-4BNQ(>CL8a${Mty!db?%TC( zmBv-A`Zn#E2kx8mTw;elv0_)fG{iH53KV0pxyliaRL&V1PfryPM%+x!;NQ%e6L+wb zr*6RJ`5YY@bQx?`mNqLqbz_L~%%w)Z)dK&YO;)|@>yY?V`U?;ye9ocG`^kKU_nJKx ziOYVdc(uC_ag|ih>Y7!pkex|f9S3yg#240qP_GVlKm5IRGd(~3Zmq$dyr$|Lo$Shf zg!B0rU85Y6pv-n(aaiR+BIlFRLONF})>jrnT*;c135FtOhS9Ddd%bT!qe_O6Sa3EH zrfDu4#s1jK*F^6BicztPsS*4_?;~Yby>R}~T6-l{^Lq(-aZoj+&GX5;>*BfUS*po* z#WGQrRS^eLYz$KkCrqMS*X4U7GBs)+%u(kd8z+Oe1rDJrbjMFu#|1ha9 z(em?1aDTqvGh;gK1$=AsB(#pP9V7%TDezf;U5e7QwWz6;6*uqkb3dWQOUxAJD+4)j z*V}mRAR`UX*FV#(wE1n-=Kk!L&;!lSx!GV}AJpuVFzFd4Kg!DmzCx=G^#E%5o|j6; z6w6cTH{A5L0Y^;FZwtiDDwk}lQ6)gIk^0`Sd7h!T6Axo}`)rID3~bWo;<0)82$tp2 zoc^pJ{*{?{l^C*o2>XV#dYU;mve@QXo)J1S`IcZ7?<_Kk)N)6Jiv8ze{9B(P6+|GK z_=wK{_=_92l)!U ze3=rndf7%rDbQLNZM_CF&gSaHkHe-;61NduA{+fU&$_P2j4!V8brlB;u;TjTaK#I- z3wtT!x|34t6=;jO0DShphqjo0$cNQyn)dU5p~8_Ni_Xluep#clT34GUu`f@87H@9G z715-l)k28UAg_X@6jX>{cy`;|9aE#Mh`Un~B+HO_lnewzkoDy1rN!haXj;(I9matz zyRRCg=rUC`J&)SJm12Ou(jONrI`y#JPgY>?5`j>-!>gJ}#C8G43zD!}+Nf3;eNp3L zw?H`P&bV-wzv&n0)$@ejZo~P_QO|T(ZV_5iRAG`neiO10@hfyGTR&9rgt}Wy6*u>) zC1WQ5i|Ej-1TW_}$UmKGYWI)csinJ3MNbNkWM=4 zTlLOiUhvm(-b3Pp@@MEt9v42NvgsB7_MPiLHWmOAvh|gWOBFJ^vA(*u1nr18ku<(o z>d`MBc9gnJbuGW8$;W&iot5Ku!ROX9(yR?;5b5Az6543ZR=h+zm%n>K*DrJUC?nnN-v-i}l?^S0?^kIjgJ3*vh5}RWWlXB|&L@69G-KP9oB-l{J&s zcF*OK@F@b5Ir-IFVL#`&kyNIg*OYU3f-!r%(+5bOUVZefit!<2uU-(qQP30)`@Bwo zmd91+Z=^I580i4$=eWn{F&)$9HYzGNHV7^cBQcW)z<*LC>OL?aTd*0shiV@?s*=yK z#857@R{hB;;q1#3Dh!?v>bhm`>IV2Vxkmg0^6hkM_7LPHOSC}sg94>1O_&?p5GHJ? zspv--A2A$lG?|%&jh&{i0hQ+CR%EXUSW0=Lz-xkKX%H&bsN7nF*O81k#y2NsSTK26 z+N1t^gtL$FmtD^zR)nT02Ky;D);Hgbb=un!RG(w4_ct2n7*^h|g!txL@*91BU0&eF zK0bNN5eqFtafC%Y51fg{3Wg);GlE1&88qZ;O04reMw5szweG;{i*;Rv*okAT*rW1b z;5@cETkC$>{?CcuXTDS?&A8C{X!_}~N!mB4*J`c$WUko=V`h?lu)aWmA6S6;o6DvK z{-o>-hFAViTe|n(c}A>{cEStBfEJMuK?OY*d#1a#Vb{=(Rv+0qKgLCj9_smFfyToO z6atd<%={0&JX-_vMYmPWTvY}P7!5?V1Xg0;T3 z>3@^dFnkwuj4w*vaQ^#ziccSjYJ{+Ls5o$G)^F;L^TPBpZe=!b&ws!n$U|Z@vz7J9s?z6K!u?{szjS_a1OV0BMK` zo`_^Qdc*1cdImdkj@`L1q2`#gm<>aX}T~#F%YZOg-1aO!*Cl4X5zi4yx zSagQ4*QOS{D@jIM%-dcu87Rx4Y3Q>vwIh-or>o`hM0HlH(8Vv#ins@JrQ;&&j zZ%jZ<7LyV0Wj*m7K*6BVa3QaqiPiB|8u^(tU+cGp-SB&zMvFPAI+p`1|CCauYWiVI zAqpXJmbD#M_`t&9PIVJCOM>S?c%ct}TPwL~w;O)-R2MzeUdv|5H@q6!*nCl=5-Eoj~FQ3achL1hWbj5Vg@yt#v>0*z7&j z!lx?qt~&`mv7zRWQ+&8Nhr|k)4{hq5PG8mc;c$BYhlWyJz0>DDq&z2n4)f3%sf7HL zPWeiObP+E)c?Rv%WvQjc#J#eVc14+ae`*X@=BeEJgf@k#qnO@{BXa$1+%Zh|`jzYp zeYMyEyJGdTu>2stn(U@;JH1SDx09lFKkSQfmGieXl`Id(xfY=!LfYo|`K~UUS~qeKV(QkumE>K|xw*OTP`D;R zH^^G4H<3Tl)zEhpYy)ceLDTKEBWAr?N}x$c&czh{x=%89FV)F5Ip4jAha9`2GjQcq$c%r|WHd3)f? zeV!tSM24gLavGPuf30gp>Y!}zxr!U^_=<5_oOXVSpy$_RZTYAW;Gw%n@v=8{(Gr?F zN8rSfP3}TsprLVNWOezd@-dzKrGuuiL!ry)p+eM)vR6FK^}R178%k5M>vbZ72WWkx z{arW-ih(N_XytO!#U7t3YC6qvStAcV-TZo4@+7zBymXBEF)&FsCm*PE@O9Y8#r&>V zELAkU`JwU^J!GCL*RUK z2a}B4%e|=@+Yu^R? zo<3M?$?QMnJDzR8z-5IGc3X8GBKvQnE92(ZQUlGTAiK&tkO^I;=VraG70*I}GxUw2 zkKN7PeqC45!sgFAl4BrB%ryQzMXVq&iz0P7%XHVzf)^!@n2MDz1&-lzOXj?uTy-25 z89N-2V7Nb=zZ)t>f&>Iac_;4XQxREh=41xnniQOD@IUxEudt@l@Y|0vf`u|xI;e~y zA|N821eH-#nu-)@L0W`Rq?d$5WPqWo2uKSmB2q)|Bp?!c0s^4~2t7a`0Rkzca5(4U zT>a0v+c)3U-p_u%cfG&0mf^LGL6h&IkCN3GNDidqEJ*t1K}%0!e|WjJG}EN_?zh^7 z0X07X-Uiz@+=KkcM+{t(-!o;Jd+IbCq<`!D;v_#eZUO2S>h4pxWzLI!EKfsxKwMwZ z#EjVdjenU`li5&OF1Bzg2s*Sn#3DR{2fhfx{Afs5L4fuZ4BV;j;qJ&EVGWh7M(;Tid8NB?VJWzR!+2YwP=dEX?INzq7fR0jw}U zQk!lY__y;uS(AP&`4qwKx#DJS=R~emnKd}qw-H;~Z@1)wR00l*jFXeJEzY*oet)e` zrJmZMjkexjQJ#JKtA)Z9*uMjUMN^|6;_vf#d57tAyCe~vhdI|nhu+k^WZC!;m2vNd zOv^w;{z-)SQ4gf2IwstGDZUE!iRa$xi@I@Nxa;S&7-}8gjQuz6(aIYoq~rM+Vz#)! zcNtp2$S?E6?Qa*b8waUvp+``)CDa44gayfSx1*U!30JbROg}JlM@r)P4$Y0L?&92NnucC78R0zHJzgZ;QFDsD4w25dyBOS4qti2><;I-OoApG#-?yUk$P!|79i~-^Qr;I$*{}h*Ym7T}MwWm^ z{rP4?TfEe6SiVXm{JRJ(jl-(P~vizakXX zFP}!3nu7~h!SR(ODJJ&({x7)W#<4AF#4m??(yyY-r{5v_C2s3-$Hm41KXpK0^Lut_ z2OiD!;e)@7dPJV$4iojKh}FSB0;S5mDm0Chv)#5U95;ap=JuLNp&IgL0^MeH4d0w{ za%jF|T9i*biAbn}q#coc^<^4FLQ>RF`17=E(JB|?flwDwk(W_#~U$iCT^wH`5P>W<<~Rl1G%&Nkv)Pw3i4Ir z(wlO)QkNAo+(gkEYSqEQ%`hicQrP+YCLu=;KTWJB_U{Fgt7Q1Adby_Ka@PfIcLSu= zZ*EjkVGl^1=GeA40a{jk>RHT3Y~4_(0?tXpGj6@|HE65Gwq@qkUs?d1Iuq|()0Anu ztT`5N1{xkU;?jRT@6c=;kUN$*IDlm|Ql2=-7XmT>8Qs!a61i=($UR|>tX7te+Qzeu zf^Ll{={=F5na@Xt3eMKmA1Sb!ogD@ngkGy0XppSZP_;p0dZ2CwVx`It{%|3z{)U$!wrPlH>{`RJNQYrq{$!D`-&Cqi@KUceCW`Y8;%&bbfoEd!_pAL6`W$30 zgh@fmQW6^~a&IO^5q0>7Yd@K4(`o3?ACeRD=wGg`sQTUTjg8q1D{SDZiGi~LDa=vw zMnQViypptd)uZsXd+2tA$HMyW%{F8XYJwm-HoeyDLcN-13j+JLx|)9;MuM0lt2(3th}|CqWtp&Kmn%VlzcgD1ZL?>!Kog{uAq*k@LUDzBFQ zhpyJF&1=x~%g8Oy3$NH|@x9el5b89ZtYWtj!WoEe2}udZ)f{~2@D3xrcF?Vf2cp@L zG4~3?4D0k~!wGXL#mGOC-)GVjG`n*AJu({}K{qwVs&NIrZ>~(~Z$Y$FFQ)EBgXUR* z9@(_Q5i0ddVzQckw_aa#1FVZ%dDw`!hKyh#!NNA%4bRxm4ek}&;)_nOLd4ok{B_k< zpQW((AN%8`W&lfMr$Dg)qc$;oUdVJjCYdb8XtoaT&plL%ffW}{mXEpijlBmKR+|$0 z-oHT6andgIrhFEU%~vc9nOeKDHHcLi1D=L$d7}~7soy$f>Yfv;HFbNxoj;M}V#p?=e z{s?iJq2cDHry^dS(cP_8lFO{OfKIzUASw?qoh&0CgM*i5MRvbAy5-5(&6bKw3FXDE z(oE7!#49!vf-}~8vWjIK_gA`=;YJqWhR^7Kw~@@(w7xQN&gJ=9nt!3*&h^eH+vPDjtyb?%lp3?FxI+6yV}Xw`e&CnU zM$^6d%VU+g#kbKzUORbXLE9|YD?G@Wn3Q2%;+xy=#>!ZAPm16DA|;*R4jEwgEf$5rtf2SO)__lXInZTdd2q#%kqy|?$ei*E*A?X!Fx2v04SGN8R)((LVEm}t zM>l&%_+Bd)7OgLS5Zm24uQ+Jx@QvH;+KgYz(QIQlIIsGv>{7 zT6d<+*zEf!aqbXGY8fh$){pK*0sfpFGKA%9-~X`UE3FD7B}24kyZ`j<)ua?ldk8NrchLqlNGaMiV4

g@Tk z_oL>l8uM=K1<@tTU!PkB?-0H$R=giC(t4n2*AgNI(A?Cv0|U?8wu#gns~vPPX-X1snN#;5~7<@e7`f0 z>r#Z}%EJn$hStY*6>yutD5o)NwounCQtpn;V&%MnCZ&$F3dm~yE{egPOJ?ew9iOwJ zmhKc9X5Sdh%+BUjEf16eZFsuf^X}Est;CD@KyN9%4$#DD488pT$_pi{Tcp*rMDUk? zx4lf(6tf&W9_Jo_qg<`UbCU;8Oo3A}lWdMDHpTjxxL>}w)#UtT>&WE4HG=Y`PpE2B z5*vY{7!wLuR!TqwygXQ>1CcM|{EnH`^O-TXNq_?bw6s%^qEsm~S!>LBLYL9EBb``R z(a{knC9DTASS}@m=vy^h*~|Ryao}JObP$D%$=O~Z@Dm>oSbZzM6-W$tW@YbJWzc}yeMu344cKw9$duv?~`j)S*+KOtA#X$Tkan6X% zwq7X*0uAs@-%f@`7flyUEd;kQ>%RQ%mDN&>4-E!-G>-uGE|0D8`t;sw7ApAsu>KlQ z5?a`d4^47x|C7q{fHkycj)xDV+D|AhuU#!HMw}B(Jww8vqwLyt>GNxfMK@hYk^Ei* zx={QML>RmwvgL`KxX6TQ-{kfk$(A@Wv^wjJ-PWiT~kA2OVmY<^2p$)J|4&cJ>yy|4;=(MYTs&% zOA8zZ1N579cRsqL6lDKZkHMUQ2o5VO2GrZkhiP1RZy%WG)*p$!_p*C8#<8#M(Q+y| zvMnt%_ki!S*gv|F6|zhWibB(fu@AJJtAKxQzs_7toqhB_san*j;_^ib4nB z&O8}~u@T6S(U|J`6Q77><^j^H%lw7Pnf(PZ-4H{Zp~hADlao#fijiQ~f4+3{Of zzAc&zNs{uJB^_VGv621r@-CoAK%8bB=1E?8nu+~BAK(e#ots?l9;GmE94CnsR&`lq z=OMFL&XSyNiF@|<#|)F$?)9Ql05#^iZe>~KlemuM0pblawjGHdajm4nmcb*QMTrUd zojisM!wO|ph2ISkr#(O0xo*9MM1NMt2F5~>)D4=aI@ zQo1tyrEQ8lFooIv-A_2CfxSgn6IEmZ)2TD5?30^ycrJ2MrOV}$DQ#`@p}*+lAmOKD zekXn@lt4mwR2{Fa|DZNl^>3-yoFt+~*A#uYPtAnEe>%%c_@?2x&7+K;%5JAwY*+qU zM6_zX^`XW-J-|dbk;13XF|vU3knb%{p?+7Y1{w;b`n0r~&35*>CJ_Ej{Ui|@QW?g~ zcJe}%tJ1U7_7&!3Aiw8mA>xS5njJ&AGCQrAwX9Rq;&iWd+l!zH^>QUCGi$HNhSLHE zA2ZePPeXar@;HOLkw08iKw!@x4d|azBai1v{(Dtfd;h@7p4J@c9et?HP=yW`=&Isz>zxv{Af_nO=oOiT+*!b(- zPA#2Ab+%$Jj0KxJiKwGyAcI$!BoJP1UmP71Q56=fqe!m~!TpoJmV?7jAhlX9~S2uu-EOvEB@ zyFfG7@R{r;#&kX;w@EGS)VfF)m?d70XNGzq`2lH?gICIv--&I-0$DXfT1Z{f@pH3p zTAp!Zdg=9D&{E*2zK`iw!9m7azMb&9AWH}#bE5Hv#7JXKxn4JfxNxO#Z7u)1t9~%m zm(~`VaJJ4wR?V>U)a1{rGFKf&e1G^zvG%PfjVmBlxD-yGwT@zmOiQ9J6D~818!07| z#)b9GA)Lf@J#$`qT;?k^WIygsi8lO`D%y#>GsynSE_!ovJR{ZD$tvfJSdo4~hLe+d z=I@QMa;vqte#}C)rT$-f$pAVavGd<0c=HJvSMyGt6AlCVRo$$A%fuO0a=z8NxpSfa znv0*zwYmWLSwgq}nc01q5j0JPmctP}z3_IO4bW41P8|ZZxJ)<0B@Vy}g}>#3yum7h ze5&zoZ{y+&OhfIEvMKL%>UFSkL*>4mhVvQhKH&zb5?u zJN(;<9=puv7~7E&B1H~VA?bi?g!i`6icaoLU~907{A_61o|v#@;Pe$rX(-%?uW(D-|(KUKxxKh__N)APn<;7+<7@mu-)<)SL% zv(RT~oS|1$Hk7(SK<@PxuJ=IGh{0-`JL2k!+mls*fUrQUROK(edU)B^BB0ttw7{q* zbhTlvc1se}NWkSK0L-WfHGcwB4;zn;T5KNvq!lCNcjo;s%<|{=5m1`(pwqGGiOcKG z!-$Qnx22s5`d8V)K;u?L(uq{XO&R_ErRe&1Y2|t#RNX%J-;nmexpj^mXoKR9_~o^X z|E*_msJGtrTcG{qzt%zq9F1uq=0(Xfv(axsN%)r|lLfJ=8^i!Z@;`yRS{L1pON^Ch zUjzBwe=kgPYU&Sewi$lZQ7iYkgIUM#lK?@1FSZ*K1lmTi7Z&st$>ulNcp9-??bRG7 zMXtNs_B`}c!b@v(Zoc22$ONc{<7ZUZ)g=hQOIPM`k|&WFH-8>2(ApmRC9bpA@}tI@ z!2+%;gH#o$upXrJSLk1?9Y+B2;@*9h>V`bN8{)+%#)iU;8AeVnrpBm+StEautgnbI ztJeCR@|5xL3D8h*bceMpTYzXdGeYz+c)*|Zi%q>WZh)89GH}MZO(Oa`F)Kf9?1`Hi z;@LhVb>8HyruPZ`KSpjKn2*t7wlF_~uv<5|;_k~ve~?= z#v2V~J1J0VPtW;4c}+=C*o>Pf)M&R62H^AXn=U@$T~7N7&Chm@DN2kUC_nVG`oSaF`uf@F{s&)K|#UEHCF zMXmRYSRQ6dI@sDpGI-6w7yP8URi=G}dtRxv5<*1k^ z3x4#0xB(U}#UhhO3{qusZMY9Bu(lTz%%qjKZq54I!)lZ7o>u$GdYpoD&^?Y{y`8ktAqkmg*!#vO17-!Af zdvWG4PdrcEcT)kHr~)nuXxMEcaRcWZuHD4On!Yvkx#suHl6Hioe?xRHd-7yZc`UZ&W#{W^xV-E}3q=>u*(;c*o8MNsw_lr@2Y^)C?o=f~9m%I+ zl=X##XbtnY42VAU6+QPD%-37qpey5R=YaMW)YcM8UvO9A)i{Z5$cCh=8^UH>Hs#Gc ztSlAc1M@gCvSeHY?-lR?VkL|wk<_+${$b)Mfv)~&ah4xR^(IYvn-`B~hCEq^) z1e^pq$H#deZ6?F&e70wl%p{rb#DXMr#o$Jz>6NaR`iHcz3tylP;L4Ta(6yhg&pz3e zDq7}T9V5Tn=JKlXds;|`Zj~_m>wb~6$cXP+;cXe4BQ)xSI?uGmPvfkt2+!KMLiM!# zDQZ?;e8^54Z-v%2>)H84j@nNa?8& z1(DXK1zy8R^RAk6?SaG#YUach`aK#bxS|+cS$cc#L5mG}OKZ0ia2P;Qt~+CEv8qXu z-8iM&J9UIPaA-|gSlrQ^{ar|3He-A=s=J#AqQs>Mo%nvl%kZg~^T;E5w{%%q+~eO?jA3m1UQyJGEGY}<``?IOhd+T zPb?Kd-D=JOy3ZWzmPWT@e1n>7l{Z|Wp((sC!*?|Q3%Z~jd}~^VYGwvSIWwIKqs)U(tta9Z{^ zvR7tDRrbcu2kKJ{RL8cOGnNDL$ZIwfm@?p^gd69#;BYS3++{~uf z&ze!c&*(E=Im>|>Q@yy3pZac*tmI!8%wLyuL^<^8r8@eu^TcoNw{Q`-bA|k4n`lf2 ze3`bt@hf*Z?*S%Uf3XRlm7j^A&dsL!5+vUh%lOF1tLy(`UosC4!;3f;w&E;8Um-t7 zTozd4ioNJenVI~seCF~9w*xZuI{Y6Ga$%8v*=By7W065#cc^zh+AA+$j-p#HpN7kw zy+dx7=DeN$j8{&}Tx^v}%Rjp#P`15Z>M*IB+uA$#d$IzOit@2H)T9J^cc!A9x5du5 zg@tU?EmP`LOJrI;c*Em0jV9OSrhrZm1kiXdV;K|rgK>X7up~r0yen1%8kyAnm3Vcy zFDO};#b3U55%8{*c!I0Gs;k2SUq$G5f5qlB6h=PWo^K4UBa*yXbQOk|_cm~+G zb$(H4@ghV`xo$-zAMd>F-+qbjJX2IeYCb4y)yBUVV!jyMRt94}O}Qd3IJ%wE zE>7WU_Ex+HP5PB2kJYquKQm_@$d#xEkSS-!wdp0vM;nL@3w|Tb z88o_n$Dt4M9peD}OwF(4jiBJTTeZ_=GO{>n;OF-9_L0Do-RPdn*YU*@P@KdtY9 zOlHc9-E@$2jF3A1h&Gp+zE$VhkN5Ovrvx+G(;))@*i5=2BhqKB04^ND4!4F}KH%y` z(yB<$R|U>& zzKhukU5#U(as87QkutHmVq@Yv{ycSfANOzA?W((MvQqwSUn2S(Gk~wYALnTFASBr# z(;#%q(=Lz|SAp&mxP8H}-|c=AYNlk^Yeyb3`9Aaf?LTDaz3HV<^$cE8jL?6tBcL1V zPxQZe1cRe7FG!=GdH=1)8MKz_1^kYkF(TWmL_cCbr}NZ``zz=PY~z9PEsDog+<5Em zDLwtA0Jc0{3&z}~J_ocxdvRjL8NXWsv@Y3`gZ0gFVg}1b&O=7EZHqI%~VNZK%{BkKlZTswYmAXMWcrU1HWpSG?3_^vDS-idk*; z&_|?fASG&>#ph5DIUvaaV@;~zz1s(Qw*sa1yHjqOdx6aMszJq=yzhxS=}F!C-PXZB zfaZ>Bkut%o$eEGIlX2#O=h-3M^QGgF?@TCo_poj$jmVvIJ3w9LY&Aj)juI}}v##DA z+ti|qq3#Dh9lMQBucT$m z69HY>9F5Rg0B)+;!)3rr_!vr7rpRa@isiED|QE!Sf|k! znGSwc&KX>u0C8#-Y?hRrO56GtKlBHSF}snM!+r*0@8F^rw7q}*p1lnkp2#a791599 zDTGIH9+>1iG~vhIgNosdZNWIsR~++`iQ8U)?syMmFPxSI`&aD>+V`a75w$Hz87?Kr zfUGb)#zii$>uGR#-piTx@j3o>R{m|hSLVNEcRB*otj}WebsY}Giwr7G*q_-i z00wFD5NWCCRf(t^A}ZKzJxg}8&)KzzqZ@|kyQ#KZ zI7uq*cUj3<`O>##Jwrrp$f3a)y1}CQ=FBYT^DZF2DwDen51<+abLl?vXy&|jfosXy ztHZM#QTymZDl1>nPN85He1>jv(G7V9KDg&9{!uU%1P=TC9VP{pcdaHu)_vL*{#!UT zJ#Vqhd^0bP7K&)C+jIZji(20BA21A`V$1EH2LA0T!C~XZ?_IY58Q!}k^34P^AQEMW zb)4-KcWG#1j{3XgEuP;QB-;u(BlvEm1+f;%7Fn54G`$I0>mTG`C7>T4nK~yAySFm0 zOHxAq0A}nvsgt_wgk0r-;*fYT`J0zP#Wm9b*z^0foQQND|+bLwdBCTK_dsws^k*Bx|fSp*8uoG(_y*mbz zkZ<97PPPb<_6^(*S#f1{E;VeFoE7+cIyIhgY$U1;WvpnPMulP zCs;kU#9Z6-33p5wVg0Ns)Ss$ovESLH{=>OiW_I$n($Kd%X-EI(hu6DL=6NZ-e(Ia-2cwb&iC_c>tJXKFBvtARbH1d;wjk#$9bKUSc;I zi{TZVavM*u9&Sn)+MIw#l*_V?A*VuXG%Z|=KZ|S|bu1b@IGxxvsbe4w`H9wkhoNs_ zQUQ_-&_|NZ4&Euym_uI6Rt@Vyw-`z4PMM1{)LD@MR6qpQJ)d%AN+`aKvB7yeytCy> z0br5B`Ay`HootdsOUSu3072!GGHk>9oWwkfr3W$3iu6~2&JTk?PE37?W) zX;Xo<qAb62+FZv441bWZk3SxeVnSdF1u zjCPpJe~O}GV~cvP9HE`@3}O%=%o^MYt@|XuS@VIj%J%KZfgu~ZZ!`0E<_*ave*K$dFs5u;KT~_|En!Lgn)%^N${&>^sf{%M7XpLDk;Y&9mJySe)UqGTcx@0svPAD&vp zd(s!yZY1AR%v^4Ko&Zu(ihBq~q@fm2Cg&-dH@WaJucqH#-pAytgqFJR)UeE}4nMBr zr=?NAb#Pvub+`$6Jq}Q{wAjeZq~-9!yXfRF`M=73jnS_HF8mC*xUMVzYiFQ@Ka*%O zr>>#?qPgziQv9pRm)j3eU4JCymP%|RVWPYsmMn1Ts4PhbZ3j;0mG6*g;O&9lQ86j3 z4>F4SQUF$Fe?mr@*weYnn4}CRsm&?wL-x4jK4?VfHDi{BrwkK0iy436BW5Er8Z4+C zqUan9b3E{)ZE88G)K8DL*XU2BSZ!?lBY;Z|JVU;Z3^{`DaPmXu5YNE_96U)rifl>6 zt27&`UoEDgi$Z;#;AdZCXDL!O;JXN@c&{aqPj{8|xfWYS|MYWQFGy=cFVDwMoznm! zy5Bl?`zNwrb-irEBSW@q(RF|@NGOE-o_4agc>Q`o9 znz$s+LIzYxdvEW}YU;K07CkbZdyZZ3jbHrZd?a zYXHS`rMfgMT2SS?2j~MIZLm>8Kh}U|nz^qCQT4OUiJ$)AB!K(6+S~!w#=sA7LsF_3 zu$i5ZD}M`p6Wlr#_LCM$laYR=x^1t29#jx<-q7>i=`r`oo4U+b`kAV)pQ~ZTUY$VM z__i{_uI_iNFsyt{RIMD9^O^{sL!EWe@5Vwt<#I}?`u2SN^l+p*tiyO4rqa;CDp_LL z7*~wE*tE%kt+u)@D{emaIqF04jwrZ0@no(;+(tEeu7?;%b)rIQo#SyI~w-wNZkVtB^%^AA(SBRKeb2ABc7UL9pb#K3Uy8V zyEpr`qu>+KwyB>1e%e$!+?codN!N*u|JEp_4lj2>fJ3uYkw?w6?y?{S);q48-)5a} zqC4{+^As^M$L-AObHkya&xDXNua3NDcHZo=-iPIzdNr6gh4 z`-h_52i4>&A|Vg zA6&O7binw>%spL*+WvF+gObI*XdCKocY6LKbL?74^i3;|yx#xKAWT3!@~sqqEvXs$ zd6n~Xr5-97hNSR5({JY`Q^Mcw#^;_pSqGhIF}MjU?g~8{Dn{kLB!0v6W|xe6M4JnA zrhb$UZ1U29{eC^GJ~RBLWuIbzYQ%2D5)_25GR0kF=b8cz9V8^5zxgyNbOKcS>ITNMyHYlX(ojel=H)Gf!R z#y8PIzG8;uYX!hu9|Cg-z@>eHmgWVO7om0np%`m$WPfHIqc&vXDZ55H%RxELPDjX4 zfe%3?+0|(L7S29(6wm$=jIu}`$x`G9}`CU^in&r&;9G1NL-@J}WZEwGQuXF;2R7@VV26bO84C^AGvgJKf}E^cKZM<@gI(b2;ZNkx|IF84hqSydX!oqSV81;*CZm!gV!m zrzu}iK1lDSgYoxbkvD+d;Ina+J{~`}W&+tU1TFj@?8T%8-m$1{#Z*h{H)tUKIsFN@=`2Eh z4Aark@I#A|NP*=v_*-rHG3b?!zz)SN3Hw%W`Ih_g+!!BGsd6O3d`7z$*OwC9`N~h% z2FCnU6O$_-L^3PE$+Mg5>gh}NPCuj+oV39!9Y)o6?QdJKN@WgY_;VToZ{^o30i`B& z7YBVcf3D+)Km%QGMI9y;q(Rqgc?o{h|LUdebs8;RV7?f$;Wf;O?Nmt+!qIT<1a1ho zJVNs>e`9loUavPFEDc{sxiPtg`0wnew~rZ z2VYY^`)_Gh4Be$j3UYoU-D|YH^ZA~ktQ-S@RNcimW|F-_r??V<>K@?VHLb9IYvjL% zDQ))We2M&UCn%V+iBK{SVoY*8Qa+L_3o(^53pHLjk;Bf&TK@Vgw; z$WSLJ&mTr7tW!T)i;fXZCR+F5AGb9&Bp3DcH1lyY@IV96CG$y%5q0k?I{yNOrQ9Q7 z0ka~aP6gQ(w1-#MHM65M+$P<_ZVys$Sm4T~B8cNbod>E(jp4DXycAw+HVNbj#ob9| z7M|2QA;WzXSEtC{63msYxHrFF)$NIKPv7hc@Pn%u!ld2Y(D`*Iuna(RL<rKa6RKrCNj?7l`c3b`#Ej81bISfylN2abP`eg`k|#l~0Wa(1TZcb5DNk{2FR$~dG2A-C+MzFgwoL-q zshyoRSN6V%=`>zPZ3Fz>f35z?JuTJLmY9(;kWN=~Z9QtfS(=rkfO?5?kdp;6{G0G@ zc6*S?h8_QFZdsf|N4FX46|5UnhLDx?u5l9bV_VOJG#j3ekVT)W#4+U|%ytX!>AAVK zKK<{Cmbx8cWYeR5nl2JwLkK@6flOXCJT*2{9e%63muTY__=WjzbCL)6axoUYb_9OE zJ(Z7T@s>v)bfb*-##lla%-@)wObyczGoX=q#~%m!v@{*QYNyR0@@sJI(>$B4g=QJq zF&%MVIX%orX8d+BJt7rdL&%z!2IlSFsXhcFjxDQ1VyrUv3Z%z*cN_e2&sJ@Z7xGkjNRh$JN@^W5Sr z0#utsWZoUyS`-5Y;xmA#u{*8Ds?DI&nW%%;jsAoCbRE-W@iiGb!rF-<*D20&MF|>Q zo_*RBwKl8F{MGdQ5+;FdtSKq0<7~{02v`hF@pmW# zkU!xU0$stxoj0OWID+YQjp!@a?~Rju5Rc>Uxo?SBiIAIhr^>M>S&F zWt)(zRNCo9O_7$}IkuPbQa$})zb?FHoZD(K{U`U|?&MJ>bwhg=WT^Go?k|!V@Vi#n z_h-}Ac7TJeXMC7#s^(Xe5$E33_eO3Xx3w6l!ElfG=GQN%KNeMHmjbq$5eO3=h_xOElbwopPx6`x~xxIjY}ww5?vZhOv$sXvAGUnJQEQ@WF?C2qUoEhEF=%SM?&k=SBN_bMWMDZe=)NVQM>nBk)C>M?$FH0CBC#y5!$ zE7H(w`B-X`dRBFEZ0}k@aP8@FOVxwEg#Fcom$hNy8>hER+Z#$4C%dHcQGvOJAF!OE zgDa;{)g)&9w1W6ZA`Vmjr}-#rD@<3u)HBhhd^5f;oNwg_1Ul=U>|3s)V7FD`1O6Im z9Bxljj);5}L|l`0Rl2P>>$%6x45;tNem5gturPIX`cSNzHw^cyscI){>0+}YWOb7< zNsId3v%YvoLxOJ=^?u%a>gZ?HUjj)N>gtQ804&YEyPlPv=5b*zS1SB*AM>F-6A8SN z2c@W;pSRUY^a8vyVZVdz?JaI+1s#+;y{bSsj}Xvfg=KUUOC0QV_Wsw^jF+?Dd{%+1 z!#uu)cf3SS68iP!rLlDY@Rs%{I6xMC1b8XrdQHRVnVe@8Jny0fN&WHXq?g?HK2wzq ziSvPq&V#Ts`V>O&cCUExg|TizgZ06%TZA*qjN*>@?I2T>Fp{TrIYY}0 zTIe0t6IiG-aTca>uA*$*DG7-^DFwDWrEoJlR*nKh4feI=9vR8%?l^}#6?$6!#Yru6{1Sm58ev+Uq!a5a$m z#NrWqR{En@&f(7IbZi5Q=-jf}QWpMs=__2dAhoJCy7jnX&57!R83;JVB>120a>|F| zgU6mfay6FsF=M;;HB+AD+|{2~4CBu4RPGhYYOOtQlgB&$OQ2;dQw%y9zK3|f-de!* zHk}f#u(J=DRdE#$R2Yx*MxVf~YGCTDF)o_}A~T(@j7ekf&GR^io9$y)6GEx*(Sd(g z*R_sj#Y?Hdtv#oka!VImA;0KCUr|}{+R8!)A3qlLrBUQ$Hl9@s+g{JyEs?tHxTS+! zl&Y2`vBT#jlUm|@tvA!eLaxadxLVLod|gz~OSkz!GZ4qq1~x9kL^-eHU6N}uM5&HI zLKt~MOr=4_@DV##$q???A9)@{t2+k{u&tb#H`!FfwLF_1{SskBA#-T?zpZ%Jn4Nfa z;#9M8UJKUYRF*%7PPw>$aKKM!QXCsURt@D`mp|mV_8xVIYtI31a@U><8Qz3?e=yzD zlE31u)!Nmao5c##^Ch#V_4(_ujIY$G_ATuVPM#~xoFjPMTLSWI!UlVx$W@RQg-%l& z>YR`aN>V@4=!wv2dJet>G<*uM>)nM8@_%R*_A+o)nV;aIqQIe@<6CuECa&O;z1EI* zJ}Y03*l(!=181i89TqkB!tnz4W7{e!*qi&61gnlp6AR{I!Gr zV6_sC&m`+;#daLxi=I}AG^4xNL0C9{$lsV~D2dZJOMc~~n|4w8Qg`rVmHN%8rxk5$ zPZh>Rxg#wb1{QPlCb{6ZwbaZFnMBxKKT9xjynA@KHst=Ty`OHx1*->{t=@r70TbSH zKKX19J?zL-272s)(V&Jf*(b5|qZnGt5plegj+WirB|sE){^@6SP*_#XS4Lk1_eq!u!U)8Kiv7;*;qE6{W!_VbCGf&jJu0;Uv(GA#0-nJ%I!EpCuWI zP>VwxX7Wvx4bR_8+InoZ{!&(i@guNk4!nnF#!v^ig>m+>@=OnENz%( z)djUCf*Vg77t{r?nmZ(=VpFy_yJI$LMB2Z6ejlrYJVuw-XfBN{bd>0{-YCGbK2VtGZx?8fonTuH@_ zQ~K3D!xYy!qB^p2c1d?~nDf}7rml2;$t;#Uo57B4Hg~eea-;ZQ`m5HN{=_@6Tgn< z7Y*C$A@>z(Uz^T{x=d?1Z43KUf_fW+owh^l`-z%6cgKop6(sv7DIATl*&n?=NvVf( z8#|@xmY%4AWUVq2=lSAC_g`UV>2Eqv$&tMTFBM9$Yy-nWk`!&-qXE$gJx&Wll6EXg zzsGxx9pnw3cTdyiWRX+U_55NUBwy%lfF!3`=gBEwe~?U5)Or zL3=S0ugdFp%Y95q?vV<}TSkC4?}L;hV+`BlRV;lT6qLeFY=RB+bX~_4o70MoOOS-U zLYK|Lox9U`QHj}0=*EM2Ek#15gSeyl4|2lH(FQBc20D2cp#9X*W@PiTHfT>P~Iy`*O`R=-Dl0b>o$go!DWPXOy(5WTpOX6n#ma+?Q6c$A#@~;1=!7| z4fT)F6*EFpBA*8jAf_YJ*9vJ&-HIa}KgQbQwRUdD>x*t`?p~u?y*a|^Wl<>S;i+D3 zy>ft?W9|h0L_wm&ErL&CouP_|B5eq1(d%|)%SGm>*wiU~S;}oEGW)wAi8i$0aLw5$ zQpAx9$x+Y3)Tj#B2`G+FLHS|0h;mHj{SrC$zu9dt1b-?xEfuMsv# z3T+dBpx>9qmsbs2^tcN#OVB=6S}OQny&sL|*clF?lrviT7k?xNZz-|XAYW&1ZTf?T zYEV0(rKBX$U%6V*^IPaN)VrzA?N>%DoujUnt%2Nj$doLlypNcJ#sK(0amsYVxBXTv z(83FEh32DxAi{(7=tlFQo%i8`Op6Rxi^~MFqvIRvm7X&b*vTQ20|M{ipqituf-92bE2budM^8{=h z*ZP7VkGc|9Mj1#N)!qiDZX6ui%DjJAexchx;}w2?ekYV%1M;XBZDplor*CPX@N?NH z@=tPTqWLg(IEaTGl24yZJM#VU_W2Y1uwTg=GWPD(L^~d$gvbdf5aLU-enm`3cBXE; zC7K}A5NsR&3*R0O03gwT_Kl+Z&Dy#+|9p{7FE`HykN8Rz2cJ@(a? z%kLuN{od!9bN*(d`4J}nX}EG3WF#D@po7>;`s!>aX~{LCRU^G~t^bMkz|$%lH<


wwiMhOilV}g z{nn6%6v9yd@{!Q0Oo`yrnQZ z&~cMJNypMVB{pGvYoz&4HT!&4 z^LW5x0PB|$u(|UC! z|L~Chh6Tp3rfP+df1M1Qxdh*`JdRCeW=l_a)W$Euu2BEUS<(u=7x4ZQAmF@a`?15K zf+}ZTfeuDw>VlU2tt7cQU&5VeGrt{MT3V0e{{HVt48E!D@RFX1f4YUMYJ#95T;yk? z^B+U;rgNm@Bi^0!4vSY}qNV)D++wvHt?JjCOu8EPc89CDQ5(%Zc^Z`+bS}rGvR6>u zuW(C*LP5Zp+~twq8Twg)hr@9jPa{RsK|d!d!|nRc%?GpFPLL-OAkp}+PAoYSGR9s5L1z%PuTxUi`ibHj}v z_XSJ%RYZf&%`fzBL@b0FPWxQmkMa^OO)%~RX7Skg36CspHh}}5&3w+w+CtwO2HzZL zn5#2HT{P<`dhf$E{a4{)W_HKD$Y@-@ex}q%kFwPJJRUQAf$v^PcqTY>ZH<`83HW$y zXfOWkc(|cG{MCIX*m#kdq5od?^Zw!t7VSm&-<4wxle_LCi)S7b8MSl7hFB=fNTB8N6K!?)8sT66}?lm#xRy!bqUKy>p?RHgErKCjA+X(}HARaH+>O_JT}B51n>7E7 z?N}ty8_8L~B0>%WE)Gk=uBn#3#9KNEoc^nW1efR!qfH~vGZu6-!Y$S%9zy=X{+KpJ zd%8thyGjGYA1DYVR>_E96wJi$IxC7>f8<;KehcH7x^mKQoU|^w%poLK30Cx%Osrv> z|Bf6c##kAAVB9pr4{38+$XNya73KAUbE(~V|JAD?M~g7 zWh23n|896KB!>NP>srh}jr366k}MWTDso7zF(pT5pN$vHnKj1p!k|I^NwR1JcLW#ET2bk2CG`@?LRl4jsa zKh*hY;XtJ>{f^#~pQ=~48XOY*6>|E|2GxJcNlK|Hr!id;Tf2c3I5ojznjaPcBTncd z$Cg6~6HOtjhccJ?Q~i8Tp$2&~VGn-zt%{8EV@=KGU5}P^6ln7w)@#x;#^20L2ZepX zs_BP)Us1FA9UnW;bJ^Gwv-3Zm{U;dXrHDfh^*iDI<(sJofuPw`O`U?XOtu>RRLbbS zT(<};TTK48^&phPY|LPAKO8|4rrGxRKv#0@7FuGQqvV5ItZ(I}^@#Y1&GgdN4{fUX zT_DR~8l~+6&NK}wQiiB~j)xr#SL~c^_!rhbz2jkx1tA~meyHPawwnF8ZHcH5-^&G* zCqb${^w~#rZc(4EfD@op6$L`%{5uFdD`F}haNf*%dk@(+a0h3q5vEpn1c07}z0vW! za;_&x=bMfq?$Xj&rOGjP-#d zAT*rOXE(Bll#8hg!&BAHGQj3gOG9xxmm~xBSbw6M(xGX;q4=+(p3LxKX41M_`MLuU zyh`Wf2l{rU;gnay+pAygU*UC}s+|CYZbX}+qI2uy=(e>~&1rs2zwYmW|CSEnx$}#B z#ToW@Fb8GXNU==$BOlxjrn%56srV?~DM_LE*ENA!o6=CZgOnV38Hunh>R-22?AR1) z9#OZ#@q~J=ODQ@T@$XdZJQTAC%V5lhc1#z{R|*`&Mq3)r`KWpms;Wi4N`IT08ecb4 zof!(7uULOivTXG@&^XF|&04!O4WsCrslS@VO;ICPLMq^V*$!GK zVyelBYc`ksB>F<}5e?FtY_y{mBUqVV>dtL)CyxOox@>}^Vx+EOM}sVGf-Cw&U`mR-f&-NTA+GO|9f;MDhD z!K?B|EAmylTXu@>snr)PYNDP!lZ&?n!HkiCZBO={)Ahn~)>IGnwa*;A>>M@%t)Z+ z{<|0QYsY1+&RyXZABs>YbV$iWkU=;sALc0oBd*DMG3cR7Zs|N zTCI3>54oo1sl#Ux9P;w|DyC8@~3<8tE75 z1HF~=5{GA+Ri7TEpxcgR7(os31YNS>tP9DF!w{liXNY04v0J@A|; zBX~g!ayc4}q?{4)AlH^XB|t4oV4oxeKKm`VQS>?EzP}puciVNG%EqT^>l4b$VP8(D z*gw7Vy^BPwuitUzK-#K;8KJ3#1KY|+k89ra^HR)q)Fbz7YsmuVU`4#dSl**6V~M__ z=jJh@rUQjUy_rAG z$xZ0?&_N~5C8QQ05azjhEx~d|L3AQgP9$JVLBnR4s_>5ZeMn{#IKAqi%QuUjy*rb6_iZ}`GdHb;a=FM zDbGuIZM9^`1N7jgR?Q6TwJ6IRjrlev_zZ@ZC319uIvuvG?9A}K0A9$ecFwUtg;NE|GjKAgZ=e+G=fU(9|*%yUeSqm0fo8XKqKWQ zEHn->o~}(1lJwqiP#+zaDs#-+4rfUnMuteuE6xSNH-762sT(Fag#JRbi=IAC5jHQf zME#sngZ0u*%cL$c?fO!a7fAq%*Ln-?N!@xz$WlQdd}M+Rm1H{{Ctb|*vV`p--zxs) z81YF=O&vOSH(DZr4sO9_bMFB38-^7i7n?uW*>sP}?A+hdg;fV+?z0S@`F51%dfnQ% zibj~FPZO{$cYv`xDY<_ha%Pj~QI`Vfi-0hx)?3ck1d)4WF;I)N-!+xzKoQi2qZ${J z^ho`7;#BMxAqOeN+>T!PswSDH?_D))$9Z1-K_vL&owG#TtKKcIagaT@QioSMf zBX%_Zh@9TU zRSxt!Zz@#7)3zqX^Dgfw??gMlJ&Yt@uOmLQB$r+8^#@&=H^%>pM#s;8XH$G!N1o4u zrk~#aSX>qSiZ;6+lq%k=P#cWUVcw%^sDRvgK`i>L!2k~6whazxG3I93)WwQZuda*BCp=tao}2btFWaU*Oj@c-8=dyZ{ zI7nnSpjBY|jeL=1SW%V#gi@zn0C)5ZDF8aFJvw|rx*AO9Cfj@FQom z_}t3zB?7D3i+B_Nq+RKaWON7mwRqPNhdZY6tIXI32R_c&)N?6=w_LZS(yg(0vgGm@!D^@r-nG8fTelP1p^u^;cKNi&sozx2Vz(Zfx{KNb zM_OAm`Y3(q2WCTV%cpYw^Y>}#yVAf@LG0V)2B(kp6eDFdLXT4WxI>HYYzWu$qn%_A>qec_TPu?E?E71}> zXjq_+xkr3d6Ih9R@4@W^a4srxc7jA468Pc2H8VcKA>ZgncIShA4hhA+QWieK5i07` zVeyb_)DA&JnEPMXY)y7ez3HIz)3D=KG5ozPR!KW7EwZ_|`+LyMuV^oVqa+4oFK-@w z9M{7(_fy$IN5P!1Erhb}+gh7aXWp)^+fJ+J`9AVSnS9MtsD3TCiM!j6LY%54BH}L_ z-KY+5&r6*%n8^H|frHVD^ z)4;0!U60rlrDW0T+-7@onef#-l@y2d5L3R}u<}ly|aB~6{V{`VRKL(bJlOK(p zjxzg8J&)bjDPg8`6Ub}f;RDf(y@xc5g7+F&yy#JiN2nA)-#=k}>K9!}IS^8;8RQo< zw#nH$$g6p}w-%R@wH+N1H&6*rNwU!nOnScUapip0tkg*$HcjayOK=taTqlDiB690; zBIo5Ei6{yLHyMJze5MO-f5TGdy&_60%#j&RKdy?r{BvV|X4}%&_Gw~p+HJ5}W8`x{ z%%H!9_0jDqPDqQzSnAXLnJkGtt$P@Bp+ZNN45g|7pZ%eVdz#6_p22dvZ?p(S?@C0v z7jI$PChc@=Bbj-u{+i#8#J1xk&JRK6dz97V*&_-ELpR{80dq&=Ppv_41FN#M2VfdJ zyJB|h3LUlb1J%?hS*Hh=-u=4zywh+D{=|(@)1%UfCg$u)x;zd~NiKjqPD)EU8JgRx zNGYAVn_FiVQ#)gQ+m2hI(>OX{1zZqRSS6Ld){&tmsOK&v{xb^cvq7O|p9iQ!dvLp< zPEylY47#@ep#V`-;&0;h9)ri0FalpWe1{SlFCPzB*1Fr5^+E z;{*Mf2ui&S+|G(-8F$H+h+6*@& z_^wS|RNzVxNY$J3aE7HiOt6b$!tC{2kTikQ)@wpOwAH}7xYYy$9z_FHh_LSu(3(2$AuGQB#=c5BWX7Q^34uL|K zHTUNS@q%?!C)&w}ajhn+vD$9Zrs7Jrm1h@_h(pB!7y@jd;#r%G zoEy#*9QaR8tJa@!fDYXhw{!FJ_dAXstK~WhO+4hD-9=CP&3e^U5oCkzdn}C3wJ}pLCNi>7_=B zQkZ(gu)6il#GSMB(?YewhdqnSCmLXwa+d74)7Kt%aw$5Pq2;Y%Q-g7455~{3#=h#X zLic$nHR?g3{*3L5@{HddvCy&uW=hDgrzqqWl}8#_rKon*4;Jaj2E&6Y5Vb-K6I9Zg zGV{{Dqi@W_$NCsxZ}IJZSG_Xl2qHcKpX}f{0R4{XI={8~4Wh5P`pCW)39)T9NMJSj z4b*ke88d^lapwZ6Zkq`T6E6ogBYqY2f40?YI@Va7{cHj{9U3nezNdiL{SICQyOZv81Sf};kA?pt^V$s=QXde5 zTNdZbxhiN@M82GkD`<;DYgt~NNv`Obif3&*izW91LJ1Y0rNc@(KaI64iTg^#Ja5w8 z37ComVlK-oMVn>EWDO)h07c#_#n7p=WGU~qRlqB@mPv|&;n`H2cwbxd3}y7^soFuW z1ZBLUZuj;NkPLxxYK_j0h__Mx4s^Y7VmxuzvxQP}%~DrQV{&IK!cCpMuMhL-gP>Zy zv&w%XNA2NaKdJ9zJUoW!xd%G)F%U0cZyUKvBrRBCm_E26{F0r#wr1f6KR59v3*o=B zs?+>szEBhI6fC(JsMKN0)f~+nIG*qvYmir{4auzOgf08rWFTdZ^~i5TrpF)+Yn9$P zW8mVC;O)}pN1+|!lR9@d4|!heFi$(fMD9XQR1eQ=)a=ouR6kZfAlGV71*9Ic0b@1$ zH%2rW0oDf64;uQnn=&eViudX15<#L7^3SFt-KC2y0W zjM-4<(5&3b{vb!n;szkiW#&|cuW>JQ*g=Va{jHiiE?&>71KZ{j-3Qe~W25Ce_eqoL z{dEZxL{Z&r#A(qgHM)$wbC}5GyLb_wllpBnw)}lNAD@*<3l`^Pn6qCR?Hh&FIoa1c z0Vut=rCsud!m=I^HkTGVG@>|X*-bYsc23*BBVp$r6;K%k-#h{&_Cycma@phl?jl}| zfR-_o>F5~)IE_a`*uDxe^P0I(p@rFYBy0AbjpYQ*Sx0^>mwP$Q9)I$y21EpZ@E*?A zTMa4}=4VGwzjIxW3!Nt1{a)Ag&%IyBN&pAP;*Usg5GGJ{uElm!Chhh1a9qr%qoq`oYWg8{%d-(`HTgTV*@N&hgwaD^O!Psh zleP7T%fWFfV<))p`Bes$1uS#T@5rEgi;b80%gqWIVO@=yDm*H-_6QDm%|o&b4h8zOIP*v zVd)XDJvp*hT7^h{a~O6o10G+Hc{<@0(4MfT3E+Ws{xB&@ffWXKK_PXY7ugER2C{rA ztvTq?`^^8lIq!c+)C-TzUssz*5tvQw!X&jT)U;X(WFubd8Fs#==NUpY_WQf*cECaZhKX;&anm2WEb zqxvn+AJIQU<};2(*S}OIaBxqQhS1;qz<387MkME-^1+G(IZ+|&#XTIS4MLPc#_)gt z*hsDq2ACanI2}Oea+3J9F!Hr_{E?d1j8L!`*Bu^F7hrnxg}VDhiTa(O+(D0R>4Blp zQHpz{Dmi;mCNOTsu!2%Dwbi2`YJ@$#I{Ch8hkAyzxWnXyP6`=h_()U@EW|l#szn;u zA`FJ3p0~%J(bJc>JOaIdnbh6S4~ z2CQxCn`DlV+@G2IzRj@RWs2L04w)DET`XE8HE&<^yKIIMXz3Y>v#F+lbAeST7!v4l%8>5zut4^mfm!Ps`4(`6?Nh= zod(OEeLkl%>VX#u5f>EK{1?@c@Q6wz(q`m4Ao*g=>$=UoO_N4@4smA7fDF%B% zhJB_-qNZNM`wxrx6(9UAf42U*g<=LHdC9Wxv7@G_x_1> zjntmx68{RsBoV{zVZ0|iO|zRZ!y|! z&2pW+(eu3>h+P^u^z*!th00j$)OCBhZ~p$mg44>OlgG|>WLM7CdDwT`E>(cl=(is4 z;Rv8+PDQXc*}^e4mhzO}-pL=kZ`9SjPO~?Ws-!Fk{~-li0|5;uYAMb(A8mYz%`d_d z5%{Rz9#qGnnCT^H3mPj62qG*jNUIw6L%pk}Cp4*CNi3FqONdH@XtZ$5Zc0?rgr~HI zrLK6I4D;=_U!upf@}L=Qp{FCy=&&0c5b@`Wf}TJoKL|r=;BBT}TLj+XM>j{IhILdJ zf-8fw!wm%_kp`Q(Z<1GDW2wI)6!9W7DC0ry8gqF5uipJIC7N>-t~Dk_y|1+up+i&D zSQ?v!Hf+N4B2BaGxDHg611;;QJ{nhgK$5`Wkk5fqA*@J;p?qrdGxfp%H-c@f!fYi2 z{xPf~k_g_pBq}5zDnjDZ*xg-T2y~$T!nmYNHG4+CGLOd3xZw&uF(TXv6Z6?yv#hXQ zBX?N+py7by@>A}%o#$Y2$6FWXAx?p5L6Rr^P@i(tZzy)*_^Kf_#-X}u62#rKy8G)? z_g=-I`jLsKAMT#YwaJ0?hU6>qZpek<{*K_R&H7FV_^-INsfG4s#VSz7vlvoQB&oLnwdhIq88H{z5NJ+sp$Y$>vjKAl3V`(%EC`eAHU3u zZccXMjNAXHuq|rpzrhOB6?NYyCg}0q+jGzjATWNPqar9(yh<@kC}hQQe^1w)2|6-g|{pGhJlMYBLjW zv!=`;NEAJ*C$u>=dIMnLI8zscl{sw|bWMnDF+2${g}a;k5m6L6qkS-4GCB0%IN`;3 zUaH3QO>s#8QH?cj=6QdL1vB(^fICkfcwl9iH-=~%R`ynac23^>}bYBt-9lS z>E?J83%)|B>nxh8MXy7%>TULWDV!8ikt#>74t)3(@AQ_+X|ars3>u<2CzoMGnaeO5 z@t_<)YL^el9&PjeoeZ{)6*c61Xauk~*f@2{!^s_|{67qA@0St!bQJtg6=Q%OKG~*y zNgZtmw|~czFv}xeoKn}Fa9=qJwwHmj=k^#IQUho7QSw;!fX*iB9@mdfc9bIIF0VwC$2;CF0qoqotYf$3^rMMGY4N5~i zt|S{~uQwl4-r6GgHn{?EY(o%>zV!jY?k$)khbp}sY_a-KnGdJZvK5?ZW1-POzHI|~ z0ByN4Y4dE0Yb^S6zq%G8l)I&Zm|(0BD?qG=TSpo$g6jM5KHAR?zv!l|-@A*c4m9&Y z%#;#+HGZRiJmL?Ku;hl((9n(*_I>@_TnJU}d_GDR5o_ikcj6wPaXrQFZy%-v)Pa@4XQ#zkK36 zV0y1M##4{hc)#z;UqepxE!XhgWC3E**EI9I-s!ED`Oq<}&g8td5C}u*yUDN6ME!SB zWHh9KU25|-_1#fNHZhzYGgAd`N?0ldk2Ntg+OVBJ2Q^CX`j z8JeuYU%4Q04V3hVt!=tI%Dg7j- z$GP_SRn8%3&VKv3fr3VL(U|ueAMMXNy&<75REd(Ikq0`i-s+t;abjQ51@)CVUr;@9 z{A@%Y(7|HZ1r)p_4%tQ59VY$eP8ewA+ce!#_MIu2Lf%uL)ddlQ7i3W%Da`Cq+GFY} z)3?=i)u@&kwCtw&vd;4WWt=am94 zF(GnCE1+q>EU=k~!FeACOAe%kv(0U%nwNT$e0I12-sX-8jQh zzd5EI5)`ebVJ$OTfk@G=5%`@E9_m`w7wMKI5$U(yv`j*3q30tKaKPr5LmTYd;&iv4 z$^wlVz1!PK)_mmu`w@!O>T{xVIbv*od+H#q6y+T_U1hyXRPZ%Nb-A`pF*8tV-RX^a~ zqsRvX5bO6wlv|mR8M}*S2~e4ly8(ZaNF=e5Op4 zax*%!{?0_!JNuLX>ujzO#E_t}3xO`5^+f`kni5Pc=PncK88R-e?w9S`gF*H!3ZUDy z&pc*yMwU+trA8ZMGc#3a0)>+XkV<16=E zuF~A27nxRC?=?l#cpY*m9cz`RmlY^m z+d4?Me2&KcUt((O3v$Zt8FC9klhS$CNJ+GYEzHAI^&EV{@ha_}W3 z$4SrYsKZNK$mTcC^R?zBnZ_!|u8UiIJ_&j6cW2gOhyfX8V*sZ=+2>nKX0n6c%IZqg zJ4lg!Xs>#NOIb+Uqcka}K<&L}!Bi*pUtv!)_VRw3g%bnYQ-dUSr*)<3eLEQXp$4p+ zQFci44^-wrzN%B>Z2SNZNBsphRrQEu*Gml$V3ztIHBUy@p#8!tY(eSDo5e66xDCT% z#Y~`obp?;>9!c8w0&wkY5Z#X&^;^B7&mMGvPuJNN6f4-y{iA(~cc&FH2HCBCQMeJ= z!mW zb10>7wc1Nyk0Xh%A@Ca&w)VCXxv(zEV(PoH-P!09Pp==8Greio^?JZ%(f!;j)I^0q zs8mFKigKZ!O0$ZtZ69-C;qW#ojMp(^4+l8M8KF#_^;&TjpwR&Bl0TRJ-(WKFBFhbv{v3Cw4Guyl*=W@p?+rc_;u zngAE3Gn!T$%3MK zFg-oEXBAUxp`gXc4pZ2QOSVqT@q_~3CLi87N#a29?va9&&%*NVK2$j)GZ=WYG!_%w zmCxvddYs<+Yn5vi8GkeYE2PD4r+P%h!3e?04^Gk9kvb=mv7iRhY3sG;UyL1HgE&ZQ zsgV}~lqH;WVnV$ZFdd`rb=cf*^v`dvAvRvI{kT6T9_J*ZT4SVJxT1?8Y3sv|Is{KS z#puiqi2;~FyM21-;4)0a`P3b772;H4?-dOkE=PV9);pI&@~!F{R7kBRNVt;&3EyCE zN~bbb)4W9Q3cK*NY@PU!92O*57Q&ckWdm<-o%`rA&Y6Aol`_HDTZMC$o5yNO>Xv2f zsL|Ucw?S`{nz{Je=hg465)#v81&)<}-?IH}$$=P?y{w>rR?(=#d&k`P)&sG!I$60X zCXB@BW1Vrb@4xi6>+aKT5c)Jo8iYs)b_G1IjNNZeGK!FDd?Qx{M1_RCq>aly%;*tu zc+Zi|6{U-cXssQGZ2FQN^D3@{nHVn%XcqoG z%&R6D9nS2eM*_#}4-o=q|W&w|9dSrLwlZvLb>*O<+-uF4qs>03qE#zzPNsBh4+Z=v9 z&&$9Zo?#$0Xj)2Lpy*c-)rg3M_F@lUn3JkUe&424T)eNDD~f8cvQ6cvI;hdl-; z$7YpU)o%N<<@<#_xATx%?iKvVh|$jl)XkS8h;Bo*4I_8mH-uM`REGIct^0A`V|ucL z+^(ciG4&9=5AAi0HCZgpvd|6p^c<@GE#ViQNplrUywXeHa31~0x3>zJIDhPSTi2}9 zCZR*geO7y(627J_S;4#sz_E6VVg?@Dn-NxjK`JSdK1jGuD6LkT4g3~YG4*RSg~E)xZ~?>%%zdyuV-Q%Ek)tQ44dy_ivH>s_yF>+N|~?`o(C zzRf$-`^_1yuwIfj@el3TiYqg9WYhE@mP(YB_N_vtFO|Q7z9Ppb`+IJRU@2SEqp`Bs z97-KE2(JPV?*K%|`8ah2vrJQ7KiF*D$Eu3Cs!Iggt7ZGXZwhbsY4VH+wJpXm- zeE3LJScY}4jw_eo&cH2OC^9(}6ta-j^hDyz*WgTy#{g1euuXK&B8@k7Lk|MhhI z)JUEM_5;w%iPxQ#FyGdnySt$%C@+IZE+v^nU|$7O%CK>nx0FBz190HF!!|Yc^w#8& zA>r&QE&_`LQI{q*|I+El!m~6w(T(=whaR`C+(mG4AGo>&X|L#X+o_sSlKV|LN6)@F zJ{BeMuCTfQ=!N8&{F9;;V+DK{mb|U-ST0iwv>=9djlybVH9bBFwHBZ5%-B*>9Jtc! zYz;j&Z(7s2_kXZE2wAFE)4Y|1a61$#?=-+H%F3#Q}{1{<|ndA!2rM}!Zj}-># z?hT}vVVYwrH@b`5PxkqMmo+w;*aGx1Hu?`q4SOdE@#1$#O70_#9aLdFEqe&aAr|fQ zTcil!%g770D0lYu?Z2TKA7Q>lThsh6wjhZDIUIQf|LKH*PewPvR$k`(b9<{BK- zcCO(hlZyKB19yXUi}X9oz+2gEqx9zMu9fq4G@U|hY#t+tH4{wQhtXzEaQdO*pc&Yi z+R97%M*Dv7#E$Jb<2HD{BeFUUF+ynCca%Wh-2v*YQUI&UNvz`fT?nf}E%Pte>=H;N}5 zStWo(4EDfxWA${`Z4nDNb)dG#w_Cs`hd<7RP6>@DaV&z1=io@H!_#CYf0_2nMCz=I zEv`aP7Az;GW>=P+Jej!%pQp{5#;_1^xnT^H&(cvP>gVn&{2hF(obI=Bn##cpB(o5H zeVa5&?OxL^pN*Ii(McGS|Ae?nJJ#Q{>ikr`mw@XnscstEn_n~;(qT;Zwh_EQ-&@7$A^vm8G{&R$_48Xvlm~N^lT4A4s{Ur zyqKHuayx%XD(&ZdohQbQk|Pxl>7cx_Q*vnQi@>75+lkXP8+F{(Y>`W|-_3%QPNUmX zcC?gsIr6IEJlbd_$b)FRZRE9qb{A=?Nn@5?P3R9i`X*toX;H0N|HeSgEOo2Ic`m7Z zFc-p@y&}Fa;__<2B*q6fe_doY)w{<5f3;zt0%;0KTnS$r7j*h?)>q^ho{B2M1s#4k z_uAjq>|Q%Ou0Copz5aT{)r`KqQ=i2UAIf+hROND39UYO&c5nQsxUXRIL#AEUw^%z6 zUR#wNb#+6F2Ch{X>XNH2NdM={yZ0!5Lb7>Bq^VPN61X0vq9M&s0hQJOJ3jL3T&k0)3jEOC z6z>q2n1grf0&UU8)uop-l8%c0Dv+UIA+W8;?#F~6uIm14s5*h+*?A9~ut3R0>v8=-46oo%w3^Pcx} zZv6}SrBr?A_#;`0GvI08IKuzKkeq52YuSkF$##Qtq4YkA&l#nNl6_U%m2IY<0$y$P zq)&SH9n>E@;yb76PS>!Zzwk*4dQX})*HG0P4=rpDgs2lWCIsQcbBr+Lv+0<$iCveH zF@ED(>SJ=1n6G%wX^yQE0q2J1`5NZ?6;`Fa{Q13hiNstslTDa|woHlc1m&SP3_ z_BDsg8B4>j!&A#K@zEt|iK$Pzz?FgL{8}TUmad%AZ*#eczm93N5l|g5Bp0aKEF|x; zlaI>ZNSx70DHR#1$_o0an(AAPs6TMZt4_G$&H<^s761FPgdoaD>^(GlS@$Y{?p;|e zsCkgfcq38eft6_dDI{58=1_5>okrl87S;`U&XmHC$+=pM=<7_w4gB}C8U3wi!A4`G z`QxV%)6&9bw7auQPF*k*Hsrqp*AI>}#}M@7#Y4x5v@*B68CmUY-Ra}+BScn*(&K@g zNM8%cE8}wVtp?@g$8W@r+G5Kw2{6x)6WKBmBN3|b?Qy>TV(TPNEQ~y<9oY;M6@UKW zW~1ASBGuFoVbUbh#dgtVA+-J2XFn)*HMTUwWAbdyewVHmNo^?AA=C$5rd}597C=M^#Jb3QIIahAm`& zo$Hxdt;u*#F38gBUHyEgsh)0jZCe=^;kkp8?7(%32v6&a3#*#MXz1*Y4)X3iY~v9Q z;mH^ZjG^4X=~D0dekAlI#&nO1;hO5km9q}LUg0C_&+(3eXpn#?RS*3dX*q*xna9yxz}q`!2XVhF88s!v&%cs zz(uq_PDH8~QJ$&<6X89){;4>rC^jVzwUhhOXGd$EBHO(Je>dZ|mXqomLdwyrjrJP6 z=y}i4)%YxW5ZXWVby!UKByBu+@jcmDGetSe#ooaOVs`klLA`%Fc6*2AhB@26Y8IK$ zro0Q}=%33@$@wEwo$E=(+jY(*x@pTJy5LX9K3`XKx^q(08>pKv;9I(*r7yMH!?)^` zkoLDVf}M{P>Jt1ZY^1csNfGArC{iof#te*R*fXEO5djqvHonWDTQ0K|hF#y`v`%Z0m z5K)DEjve3#;|#iDZ;BbeZXuL&^dbe82<8CAyQe8L2ED*?(Jv*M60qlwn?|keUYHG6 z{zT3uXnrPg{IP=llOnU*y*@WF+uh1e%3&ufXp@e4IctV_OH&b6nzaK*mg9zRU@jLQ zTeCNe>kwgRgxz-edOpp$O>7hR73GR+uU(iH0wSi=NmQ>k z3EW+I0Vi|t7JcoU5=>JgW_4!QFeMahFb*aePa6(Bii>sU<=Mkp7lAIbA#v-YO9Myo zyRS`CE~m9H`66V61-s; zpre)@UBfTdxK82n3Z=l2uqv`nbzpI z@$nZF((+a@YuAr_tp=Db9HX-`!x#y?gUf;ssHlE)*vg0e7g+}=tt&`j_tAV^*KJGX zpHrIM&OOc@O_nLr?9H7xN_mz&rTl`WaQXbs^b9ttY}H%0JBvA^?VXk>C19qfP=nVP zJ}J;LwSV3ho8wJBmn$QIy}SB*8<$`#(~O0{D*2i#H=doyR=Z1(%uL*ws) zXRR!HzgtI1b`qodYxUVj!qfdE^h2kKoT0aKQe(-HbB90cjucT3otEkZ*2Oj*&zAnRJ$uE(*5U=)CX)swi%*BPL2j2lYFgV(eQ@ z`xWZU-AHe1n0g*c8ewCw|BjC*cCOcc51nzxl#WO^JwlH?MQ-Py6{nJcAd;K)ptG{7 z)2GIT)w(Ua;$0i{hI$Kcl84H{Xx)=?@-xHkfL1fz7gVITNo2eJeSB(9vNiCkpEKuk@mkRd(&_zA2@9EUy@2_ku@ZhvL=k( zREiLavNPG2!Nk~&Q3;6|BxK)0vW|V9WZwy6HyDh4W~^h3nKS2H=fk<)bDi_P@2C0x zJl8zG`*;8Dg$Pe=8JVtbt4Z6`KDmZS=(V?_Zm7x?#oxhIJ`86}d8J2(NcBmo?uPYY zFRHx72vvNLGV>9X+qzX;M7xM_yjY>>L9Bvq^gR(GjXNgoddLCN{~T#=>ABfm-fwd? zeaAG$U5T&4)f8DLm@|V6?6THI7hERV{X(UFmBr-zq$Js>+O%ZWUut;Q;7*WHz%a7z zPnx9_`(O`;T>w!{XjMv16^914_Kp69>TotDBq;)|Wl%+Lkv0H#0g~NCAj=maeTbb>P&(03MRD!xeOdCI_J`fa?I#{ON0}ZNqk9Q6q zAwpMN!kY1X9G9Bhoo8>K#KciD45%f`H`+BzX|)+fxTgz{Z1W4y$TDlKW;k8(Eb@cM z!iE3oPkFh#_Z^&ucP+DVxVkK>6-Y>>I$$CZ#d#va9 zlbl_yegU011L>s#x(G&^_%nV20!w|)8e7aE%aT_Jnu6^XQ1cM%bS?gHz{cuw)bqRf z;jkA=cfTZuI?)70svXYjIz=cxMku+ggacqzlIUg*Hnkkd&FxLX=Cv7dl67|!c4|do zHL=<|_l2F-cWyH~@7JmqExqQKf@6{buTZT^nzTkNyby1I&W#u$goVf;ZF-d=s`DD` zp%=U_&B`b8Qx(jLAkDWB5C0;QowU=AoHWva${#&=&{hYPOzL#ceP*zr%9~VG-Ez^0 zS2#;Op{d(`wmGrmd!F~D>XhRv1uZDr#YKxD3-o|_wqu!FJ#w;eoAo+r zY%Gdz`Q4LkBRB~Rv@e&M7|GOuT*%W@V^YJi}*DRB&^LijMEB9N-SpU_& zEuQ18AIC*mgc6yof)L8?r)afL)-T*YeCKduPX7eSkdbR|=n4cn|LPYI^!_i+WXX=y zbx0lcQe)m5A=|(JL)*XUmBqsT_BwtJJH>L4bAj&e+J`r4rMZ?iwEc3e67${WHTEVW z&paPY5h^-%(dpB9Uv%_ykQeY=wPuX-VnqeaV_R!$s%A9>UOR0wtY!VF!@+6d*Q^&p zH-6OXuUf5A69U=F^l*hr;kq;C+BhzfO;x-@fxl3ecT}{bZY$mQ zd<(vt&96E%SSg=bTaat&@6j!4)U4d{jD2M{q+7sk134$7JVCISZ;)cVxXi&}(IWmBRAUXbqx_{a(kibC?-J%WIb=kFO% zMQbKyf_@f%Jo&V0zBMIF5rewuot;h&i2!Z{R#c-gaJ5DaQ#*eKWr~_zZ6k)RaRJ&g zZASuEcDIURUMurDIRXmz=KGTb?riFK%=D2#W#cGrxoWtH(5vL2)!SywaAu3?N8v%X z+Y9eOhZft~glbjTwACslqo*>(3zeILePq2BZ^=iwO1*B;6SNgs1e%rr&*#H(Q~TLQ zu2ypJgPxRa{3nAhdr>wu>iKvQ?X{E~IgPTlH%L19*jg}*aR>0E)|8DkEnE* zu75n-ZCrX|d@Wu8RycSu=at`oN+8wKfPh+JMQqgP{<@?wrRcjcfCDDOvbCOX;`%#Q z3vBhf5@dXe!NnHKg5Wc|rpX*b({GA6y$00tifD;M+C6@YHK^8CLVpY=eYJb)Bj}tU zSKov9q-LWJ+&z-I?Hq?+N`Pj&Zw*1WHgyYXIIs)z`=`Q?8W$t+zk7#a28ZU`Pd6de zzMpJEBF54_;#5=yL1P9|-Y~z)Wy6mBp?8k(7kjSXLmoET?s|R`KW!ce&1*cX|L7jb zJG1ql%|&I2Lr%)E66rzji9el`*S9l*$AZ@T_CCRdxyg=ERvY9bib>aJcqDGoj1Ca< zc{`7(EIiD8j&rBFfzi3!*GDo%+q)7on*y?NzXnb;aZfO7Up68Qfv92r4Y%8nb)PH*!ossAXslg9(LxrYC%Vzv4w}W!23Tj%xikp>xshXAUd~k2?X+({&`nrZ* zZGhOM5tKK?Tq343p`Ky{R;oU^hcOf@|jvMT(Wa4#EXw;v|<3!_Tem-TC@IB?mM*)hnhAEK5_80N{3J^o)*a&du00Ut%iOF zZggA5CwZC-qcO+l-lXx-6}zY0t^~?XC+HVO@lqVd9dM#equ81t@_7=gU*}{*PtZ!; z%t<=xdEpVa{Z};|smiUr`ur8EszFwywEIg(7M+hy_72L!zv-cQ5n-GBb1ExGTHAXT zCyJ;^B&Qvd+b9)ME{l3z zLoJkYtiHTmH_$Z@;t4q%3D{e&O2E6xr~Uy_%TL%ygN23Ov~itL9fEF&dR*pUP_@uM zQS&*@6@(pekRs&QPPW*re5qw#mM1WoJTUD)?Z0Z>#BX2(1G+K~m8vS0SWhUtgb}P-PWSxrR1j~6KB*D|uFi7=*oluG#4S~~ z*7BAds{h%L-AWTXf3veekc*mttp85I>4I2mfTonav!~_jU((o4%n*mwOTHt}RNAvm zni&=`|F!tKtFYj&5i=H^b|A zoZIOOL&udg7D2tiz#$T?Cz*Z`U1G=IG$hA6!{ZUSgm53Iw5cq>p&}8I>qf`hH>@p! zD@gsG)cS*F-GG{)nv+D-QfVXy*H*LDo|=`imK3g73d>b7F`sQ+<$a%ScA+A%b@LA3|iY4{FBBH zrB2;=XsR2Sv^Ql~AphFRv6MLd1_F+M-GELT+6z+pUxdW}Cu*j1U-hTJtKBE8wC2-j zfgL2n$ZKz1>`5CW zF6c9GnwTnQvGn(g!L5i$MX^(>KnPFu5NfV1;gl3lufM-8`%LQYb`B)Qie-*;8b4 z-O6zEshw-ah=l~*Wr3Fhj+cFc9B7J`%lGqL5FTjI=`HIRFY%FhS7hZoIN#gwR#<3w zIjsLq*KGh$^D>)PrQ*>3t;Mu2V78f;sZXsgaV)`e}FiR=pZM& znY?JrH`=ANtPgAGIOo;B&lUZvzT!LFcCD&8<2bxMw#WL5%0KcjiN+t!VKq)MAO76` z#i6oJy>R{2EzI~D;YYLaa{j@mVa5lTyLC`W6A7?5kxfSWjt6})Qn;*y-JnNf_OT^O}sH;QflMvYVpDz224&-ejx~DYn?HRzC3% z>Bwa=CW1X@6T5~th7_?%CnN9p#rr%tr}ivxJ=xE3(cL{u%rRPWo3BBa>$|OZ1Fnd9C|-_Gw4Bmb}O)^SI8>6<~gc~a#? zk(fG4*59o^Lg$5A`G|={t@0bJNvUleN2!#_u20tS@lhUj6RP|{h$8HRHwAdsZpl7u zbPlSN{-sCs4`?J&HG1kJG&MM3w@+sDx|x%Fnk<%`xx&Mu{A%CphXS(2Tt*BoeT_fv zddQk=#q6-Z=R!O_muXGN?h!xa2X?V(J%1|V-%GuEE%(aVNB8duT|=|wV}NEBHTws% ztiUWvMH8%MUeA8K$|51=piLFqd9RyQpw4CCMp*FePX1}G%YVKQmeJb!8nL4Fo|Z9! zZodV1mZ#Qe0U1F&nu!)3WKZPbTGBi^p?75tD7dBcVN26qV^qr!Hvw^Uv|FuxCqQW4m=fqPmq_WY(dUVpJPf}G=n{d zrU{05a!g1ZIoUPes{aW6$iAtO_1wz_Cw>oj6XWJFa1ohP&`U~oq2ZG zRMb48g7s4a-|te@=(?-SRa*zCBs5z!-t6*1pf3bQq_yIeZo#1Lj&w`eZw@h9oQ$$4d+wJ>9Rq7i$J-WP`%Ce}Y z%IRe^b9zNhU-g_RJw6uQR0w!bZB(2w;{WSL4)mFuv1>e_Mkj+};z0Eozr8iU@}o>k zbc7qVQUVg317Y}@Wd_pu9=N)5nL$jvVPKxVkJSgB`wptQ2YQ$v+q9V*i&ML}tT=ot zlb4faT@ZOk>33?z?&Ct0s$r<9`|cOY+Ow>S=9RR&jIPXbtqr;DbwwaC9qcS9KeT8U zEN6F?;#KM3{n?IXkiGcAkZS6QU3{u5ozM02yQsAlJ*LcrgP6=~%a;W9?;>^}ckHYh z6?BpI-4oJ@Aw7XZ7JBpD$!H7-Gdz6bBi^t{gkCW_Tg9{t^!X%L zANJdkz5$!!l2EIz>!F^1zJie>`)2Bs$CV~8BRq)$G zkQ3G6(Nk<^gOqDwi3Yl;K65Py&uX29Dfp=0`O9!ZzMHifFte=w=DPQoC9-iha4#uNFWEHuq3&4H<%L2ptSR=N3=$!K27!l~50Zll zAbCi0^bHfp%dUoDkEXxe!4G!lT871gr(~betBq0nZILUdzx%)UrW-{7f{BVhJ}adm zp!FQ;QCRqz2#GZ_lZTyxGE|jnmaaJ`po|`?w2h+fT-${&#>lV&%7e2nNEf#B!hgkU z@l#JbDYc)X^8`(3m=>nMa;r9WY`)(Rj1S9_w-YG{N~haZbVf9HwZY5M6vew6idxjt zS*K>R6K-!AZd-yEUzEoGL{1=&sVI5QaO@2&;gDU#hvpoY`pn=0eGN}4S}$6 zJFnJs@~6yNRV}mkDcGlP(scefafvSWf-N+wk+@GO4UAoGrOpQuTba5^WdG9T)E7VRx0fWZJ4_NrEN5v_* z@T>#Bv&a%hwn9)?!SGEg!;1y|Z333Pge6b58iqW<+Bx{4l)ed$%pPeTxp*3$I!v2n zbC97sAFW1N7G#rW?ETDVnlY&RfnPFh)otwvb8RLp<`$dRL-N)$w@$xqgi;)-0K2QB zSKrS_qEAaClcF+Pg6a~nqa&@;7h3(Eh#!z9Z%<@rqc5N}e(1lSU3D_9ysGx3#mzm< z0f)@a#<6yo0Dw!sbBrid-incA3yiaUHrOul^C5|I!nhXt2hDK~dGor4;L@$s zDkqui4e;WQE80k7(?0W|wu^!9EXE@p^^kB3VGO!`K({r+$D7`=&6Frj`ypi8CY6%$ zrS=W&9>Qt7PG)7u_Si+YUx@U2t)|n^#f{g!M%tT`{q!&9o)maU#pijfq|DcLL^LNs z?6GW9rEzJ;J@Gup31JU;M7nT(a+-y0b&pE?0ma^Z#ptPoq7v9VM03K@DhQeTrB2Fg zOcHz6OazJu8HK5MbJHXvOl&*3I#`8q)5~q~INg}_goBA|{w%ViOf^-D>I&V#n6Cu) z+GdEj=c|PJ`)iWBX@nr-R#)yTWfuv<`qs=Oc+!_rjgy_ls19j_E)}c68k*Nrj{VIt z%WZW=7t@EKz9@P7*GEIcz#~!!q<8L4S=M^doru!^KpVN{F0X>`NMD4P1cQsyIw(Ga zG`$R;*Q7;ZLGz{mEGEQO6nT164#P11t(bz6Ww?*|roO3!ShaFiGe9Y>d%BEy4>UZy zHgEYKtGD2k%?Ww=UfICSLeD~(!}=>HqTolXk!+e7%ak0TW$0Gx#`01tv#W3Moe*6@ zvw?Rsd6DC8`*cyW!)bK!x`6H6&K17wsnT)U1l7ctn_Z&(48j6=yC;k=)$+b&7WccU zG$!!XpO2I~h9iMJ1SYiFdyIrks z%xNWaOdP0WDr4JR{FL0m?nRs271vPzt!^-~m6jgGks_%8eZ{u8f)%uYYj+Dd_sWnl zhE&`uEmrt z3BjQClIYA`uBg(XUMi2>!(|qZc1i3_YHDc!>*<<8Q=9liPA;rVlatjI)AT6+@JgLb z#&O5(lO8t>DUL}S+{v>9PL4d60D50wAW3~!)fN(IV4@D<^5(ThDe#AyO7R5z={7oe zy3UBMzM49pQ?%w^>iHxE3p7+!6^^M`&upt6;5jVkxVH*SN@@#gw73WI|Ex~CBlPH9 z2AB%1zdea8un4e_<9jH6<2R?GxCV1_5MdIh;JNI;gKb1r39^5rAdU06I z{;V>y>5c2HbUtj;Ux1q6M)&}`s{HzIx}wc#V4j}mLuD3S?shwFb@fSE{{>tuo3aPk z+>XAl`^Z}Flz>{9*)2|L3?dKv2igjk;jXUm!A35+wNe>Sq;z4Xx?`7)6|<9G8^k~+ zrms%kZRyzOvnH3~UT^JVsBlEg7xo`|8X?94j+wZ-B2n*-hh>zw3@$8#9j_0XPu)W|FdsZNkeWzE8b3h02O%|`OQ_4GcbJ;LC$b2#pIy9h* zhM$v@nfRdJrVVw-UE8kb#tslZar+L+Hf$Jh*a?>A5u7qyPlJ6Z_^9`07v~offQ2(x zR`I>Kc*gfIRWQA8JQ>T}aZ-C}A(noOo7J;ipBKY&M@Ul&vf{zqY zU2ea$@tCVEZ%WD#4aSXFI)@B6n=Isx>u>#IVC=fSBS{d-SRz^Fnhp?0Bb$6>cnKZFrPgzW@lBJ57`#izBV zZ-_Sz^uQ+PAu{^rVjOD~N*!OCMq^bg7>AD#iQQKjac^tKTvt^H4gM4EoS7}}HDbs= z1$i0CD9?NmQ#Hq4NHDV5s(yE7Myka(wPoVFlGhK}=T9=TFP;v_*F-1*?A$hio4Ev} zd-0QWmettK>zo{k-bl|kE8hj(Fy+hXKMp&p3}=4{b=OvtA9K^jIz=d@pQo8rY7C>a zT|Vib4s$h>@d`)~cP9L^hN=M#3DeJp>g1bDmNSeib!VhK>M7W&nj2nIZ-Ums$ju~%py zkzQZPEtE@8vlo2e{Z(~$we7917-Mktcq-L~{H^Qr#=v~_C`WdENu855Htzsg&#vNHTuT&d@imyc0)z=4$qGA_0;$9XPHay>gJf{}z*Q*Xh^t}4>AVn&b;qtyM|jjY z<>BR*99xB8ltWqv0Xm$s8+NHs`dX)>vd8S*Jz6v%ctQs2_js1#@n)zB;cldXF0iqVzf+(O>SX zT{A6)WA}@Qa;f1Eidc{VYsuQ4=|pHe8^XeN8~UAPbS4ZLa@p>|ye#SFc2qS&jXT+d zq5H#cPnts_qGnRj!n`GJ$7_Cti$DH>8P%NIn9Yh)T?h5M)j$5pM9o*8(r^*7zdPHg z3Z?pc{BM3J{AP0&D*Z96d@}I&uxJz*Pf4b*%QS*ccDC0iIXBKf1vYp9di}O;S@brL$Lnhw;%P}g-f3IODX8s9$ zv~H%!t*3eBP#m3_Gcif{7x1xHrO3&DY53}K>p#y;z0#l#>v`5y1F6Iy-k^bl1j7(b zyAr>?#u2=3GrosfAUb3|ek2%l*FTwk_r&4>-2Zo3LjAuum4dHNCLB~V(3QAf&jZmf zW89-=x(Rr-t|a7TN@D?TPU}qp-Iwv`Mt|ySPe2f`nX}HKfB%2=z5g!>@JH;mb1i{$ zw9^Un;ax)~-0)v_Ezp&vfJ|}~O0HAE&dAOQFivSBklKH|Kt8t=I<=vkz%VsxM?2L1 zdW*>G!j4~WUa}|LI3TpnQD~3jffPr4OUzFhtM+PuJ_Z;gT59xvz0GUM;lvy`Er{x*P(oW z%U%=HH)9x()Ba4@L>>1{NG_$6$z91|?Gb7_MI{e)vV6_Vnh&g+40K)%UfaeH>D#SB zT2}*}#0t~$ldl7$HhVL#?S?Pb?xE?zYoADemA7toh1g)bGwYOW|BP_kj!g|JK=pJ; z8bKpB_FG*k#mqB%!Oc&Q`;yByc-U4W9`R~j?fMd|VZ3%?I?)z*H@~s|aM>RSwlC9XP5P_rAjNIF=i_$uxm#S+u+SD)a^jMKockNQ zf7zJ;3$aX{em^JF%xlJitR~FTw$)ZM;Mr?z`&p7BZ2Ep8xT#`xV-6%LFVK23{k)(3 zCre4E^E6ajbRvlcc7Qon;1nyExV;Q`qTCB%k_&BW`hCG!yhFvCkbe_ zJ4hP{GlcoVPyQ0{dF;CJLlL6JP?wXZEnx+A*C>xN5LUPFD=S;`g#3zez*_o2BT={} z@!!*Vi4wj!0tbzIYLh1=w?xT|yrvtPwpip+`NW{z_!DfL{(JrR|M@g^QJnt=oh6wJ`*UN`U-nS zt@oGtlp2iX@N<94?qz_kez97>#548R$LnN!-vt)09kxP&VfNx!H5ex$g{?)b(1Q&{ z$*WUQf;d`KoHR0;k!lS%82GI0qI`h=J?av-{oKj>!Hh;ji)0-0G`BKx<1IbMz!v2B z&I4xuw=T+Dg<TQE+`~0DT;+ZRq~9B0}u_LdFU2cT3r*~)LFUHs0_Y0Nds4`U@S#4)r^l_TCsT*eY5s{j1#oRm49YfHL-PU^BcVCeh49pw{rvSMkeL4Q=Rg#z7v zHC~zTLn>XUs>*|Pi4DNUdxLsDU%)2#+|!=Y$m?yhJXYb>_ZkkT!(%;wD295?Jw8#1|zONUO`dE*$kmFI$mK(*~5N6y}aM%R|28>1ohTgxTf zzX|MwpoQ_}tnJQ|zWR{+{YU8;?c=yq+6*jd4DXB){TQQpd3I;}YsdP7hRD_}%;o1|C3KIy%0C#S3=wcROsZt?bgl z$Qb6y(8g75_X{YOLgv_wXGNEd?J`sJi7vR1L%pM6Ei;w>PA>;@?Y}4~8-pLSmrct2 zsG0^~1Dwv4k(q0#Q;m^ExVb0_Y^OR!XmwDPmMA&4h*M^N3; ze^alR@C2Pua9`|nO)clTm_etj0$bX5ivRqPC;JKZmV7irNa;L`T^<{{bsZ+_#n63Q z4~LNS{(?BS*Ztp8tVfsoJ-yrZKM@>7wxDSN_!p#4Z<7s({fuutig|Y_*N*k@7i-5M z<27AS?uVC-2psY{EizSe^p)nZ37u|h9?firFz%FZ7@!)9B;or*`syR{yI6Fw7f)0i zi#tnO#HW-F4&Tpq+F63-&2!_WEoFhhpofl<%!Q~(q_L7*rjIn$U)wcz786tKqc0CP2dfb!2yj9^x+1*)_aHs#U(GixVavaVL#%7=g3+K~*CRs>RB7zlfCBi*mX7TwKHeTULWGC6-c*>;-b^N#)o;c=J}*ILDn6 z`S5FAH@*n;>PpB4*mI?VVdVf(lnoc~`OAWpomXv+M(#rmUy^cGksZ&sQ^?-2lB7Xh zuym}!OO=%*7{Tvat$TQm&_T%~7d*#-&=?Q10(_b;kx*dd5py?5CldiaY%0`-7gk-E!9$OkB3>md_Sstsh%X&1U3- z+Nzr1XOPLYFA&K_yJVhyKFR`atv7ABRN2+%=YsB0HX7M28 z+iSZCav>k9zi~)9V+Vt|H|ZPiQ`Pbq?j*`J?>DD4lZDG;e>8GW0$RDoL100>MBQ8^ zmi?(jXory_9D^+ZT*22goRp})i6z!gq zYIXHSTc4xYp#a&TOAP`h{2w)WE%Ly0k603b6T!*yO}CwH3P11V;XUzJDFOY@N+MIt zbK59#MI{s)-nn8pw6M51Z#?wGO?d2ssh-DOp+^QZ4mr}Sgu+WGg80_3QY4vhB8S6! zOKutAMBDi{L-=-!tg#0R*1Km*IJ8>waSI>Ve+Vg7=h2xs3Td}^UNJ2vpCODdG^EoBqUj(4Skxew`;ATUNn_9n0qJbE%}x-rt|w(mLpz+O%!pvDH(fF z*^(i`C=cOBj}FPq%h2i>ebS2!qH)apF((@!7Y zzgW~(U8*Z6k7u2d?P{36zNT1w_%{~k!Wedx6#t2K4!g=Jux8^^wsW1%#Kz!7J40y? zykp_Vt6rRrEWR(;$--W4x5n1NBgo>s_*WdB&TC(XBXD%&Z`l3G zPgZDi3%Bht^6HP{TNRJ-Bw6xe*BHla)Q%*d3ZN@4usH3+^%3S^L{4jZKSLcEOlj<$ zrGE~*8u-%3wc5WV#(JXN6n{~bTP2-P#A!cxl`I<33<%J6uGMg5c~U$6aG0uG%Et&U zyFV%45W+S6$fVVg6tFcK9+ay~l@InM;%05+ffLa!S^sg6#WGlfjUMHH(CnPC5c4z- zHR{UwjdvVX_Q1ew;ykUrN>h&Ciur9Nowzo?QFG*Z4=YjckL$6C;3%rUG=v|3<-JPB zkm5ewetepGzS$@{UAp3=yV8FLK6Bg(c|ypa z_H6ELM2ausx~G>NN3BSVRxgcpqYeycjn_BfLHeL5k?HEOW?00sG zG%_s!5afT*@^JhuHEvN3Tvrh+ip<05Q0||w0UGnw z0vbhs|7>ws3(@QBj$z;-JGTLXcYSA2oASk~R#2sbJ9DF>K^L&5L?3${8))f3#jc-B z6+jYvZ(^KS908Eu z&AasI=n&a77nxnf2MPnaQK|&ihki-dp0@&o?R@ z2>G`DOZhLT@%JgxN+!T~Q4)fgG>CjFeP!alOsOy+*s)Flad*m4*xCdq6geQOKWvL_ zEd(6L)Vd@aykA;X z1j_9PxzO}CE2nMff2P1VQ8jl7;&TaE&myiqR$zO;Us=@5_&U>qh`UCb3hBc)aZ_gm zlPEnWeIhT^xKHGp&+mT+adAlhS;gM8DfO)wcXN46S-}Ulw4Wc=|_(XUfFfQX%tXfrV_@5)*-4~q`s%njEAT^-9*Q*S=-zk|Ji4Qn=+ zIvb#1Hs68H^HPy74JT_YzqA&HePcqTjc}A!@+^!mK_Ej6f?bh#8BtObDY>(*_@LVb z9{F;F*#gAH8_EGQE;wRS1xp!n4zOFk&GPs#=5+D&EehuHC!zrwlJyQ}X!buR8A!99c;JI&k&f~8XLcEqbdcA`vNgDEVyPennF3jo z@UnNeKhU#r_lxUW@~4I&3#}=_t^%gkPf^s;A17BDk!rD-m3H@}mG0*!4c7?MsplbrJ zl8e+Wfz-oyZ7Z{&bEyeF&q4?0?;j%O@q9v&Z#jz5m=j&K&%=Z7%@8GHKGBshNFn_m zY)?yljX_!)1rpr>T(S*YDu*rEGTa-2Ieo%-lTP_)h1MZ&hG!%EY_$_l<%djx+LICt z!}1oHJ>t!i{8&8TTXgk86trMv9$QqO@!I}4CM`n0+AnVsCa9XNu&fO=*w+g9Y0a@& z>`HsDG?Bj4V6M7%vom(t)($lMGM(4?&71%?&^InxuL|%y_vArqe(UoSmz4m44h%tTH^WzKi2f~ZTxQcmS6*k`g1hx%wva+w4#GH^@m5YW7TLd^$^}xTya3w7)unv|?kf29YBVibO~F@da>JtvaLN%K}f* z3%a2!bj^EWm1FG#0=-cG0K1xaB$OHH*RLu*_O{YIXiNd+$C@tfZ}4Q5|4h=<$J&xW zpbw#LjW=4&qJ}C7F0a>`}QljgfLhJCJ*-!UO+s*YiRbzUeg{>j6;7Sdti8XN$@g}HYkZ4d+Hqd>5<7gGNlcPv%smAAS$iXiQm_+ZVs0n21VjHW~p(RHSmN%*ETOY>njZHHT52hIg zbHC9+m?URTPL6kGOiH3>hB3>Ky}0VtZ4gT9=jo1hEursg#eyX!Cao8(Qh7a_6t`t$s;LIz;A=p%hTtnYf- zwKeBf2`H4`6N9eUXRh%Yw(S?!Y=sh}zZOV0sSpS2wrTTVHLH*|V)n5mZfo@iLf!yg zAlb#gdC{m~{D1G(^Zy_poVCt%!f8l%*0%%kmO9wPUdu13d<^U86@vea+E|fUpp^o02!u^F#ZFxXtY5dZg>04|T95 z*nt>gu13XK>giati+WY05$9_^?cEQfJj!6Kl!fSLwFWy*vj4DCXEn!#j>-v}@DNh5JmhlMo77c}%bK{hB+H>XQ3F&S6luFpt6 z{`1q6hA$V8K*$}K5j3{;@)h39=y%D}_3B4VVF_=+#Wf{KI{FV)8N9fm`tU`sIL)tG zy)py!;Q4~g$-{n+eO~Y>$W^SvLToO~>YKvz!HdeQ&3`py=dD**LA>V!qhV}bQF7eR zS6r1}37sDOJq!b~S0q`IYa)Fz1jx2oc_YEEn!rmjZGpdRf-O*O47xSXC+F98R=q+o zbFhwez(m@)_lEU_u)=C@%2}{M`1G^ZGD5&$=ls;i_~~#6<18%rV4ibBJDIIbx71Vj zFQ5-)XqLmW7O{eH>2Jvxy%{h!v>V+ulb<+g5EF1*^OKTwHsBqRnXq54Qh>am{NRB9 z+Ae0jvbTQbXK6I$_P*uP5#x}oJ>Y8+MtRd~=UB29zGCUZZtm+*a>!%y(Bmmb4-WG0 zhWFdxD*ng~&)UlNd@*Rpufmd{7nhy0pi@o64F4&BysKJYA zVG{@ZSXZdqu#3M!eF}n|J4U}nSeXN29M7DgkAUb0WDrg~qAlys(-x8lmUi_^Uj?*& zCQkRG3n*Dy#sqY+b~>FFwUMnExe}Ef^b}eSEY!QY&n1|3N#O12&(-dWsM5{B%{s`NLDde*^pWfo7DHd582pV6N55@6BpmNsVlSz0@Ob=Zv`O2~Ue**1`bt zhTM^}hF#EP(#g3~hQH12Z?mXF>C|srQTNiEpaYya+3MPIwq7jTAwvCcfz=m6INTev z4=*l~eSoLkYH<_0B(L_S^mY3rvl>3I^WhZF;7*ON#p<&$`;UNwx073`c`Y;GRC#G1 z*qJc0a?L8!u;;j0Y4SyRvwlBM;KLljbd2vpZ#Df*L8h=#cyK7fO#M!OAxAvu`DY!VK7P<92-nu^&UWE#vyCwK>QP5$;jFo~xHnzeWQ? ztwhk^XO83R=6}`QNj+bO>c*A>UUeU@*3H&Fpp$3}r(Xib%_Vr=d`H?FcDI|?oTTPD zVP5?p5o{(BDR|)Qx!!_ z`<|I@lq7eQr~hrTVv&ixF`|rpH`m~kQ&IM@+JRpy{?yt*YK+SQd|aa*($!!3SV1l~ zlgJM0CZ~R4t|(B$6ZMq2M<|hF4YF#0^oIh4{fL4^0Mg?W|A~p15p>rIXC$AF*iAZn@q5bUc zSW9xFv(@PIvP^bqSihu(3UZVsE%79@CK0vx(6C9BEL##&3QvI&hk~TNRrobr=eWjC zZaFpTy-C(kz+gp7)pH(Vd@MH_V>73eW`EFE6;Dlq_p9>hppYiMj)Zz!uVT)}H+E8> zT`zZ1UFR;+cw4H`>~=!~zT1KGz2;YsH@r<|*J!n`G(i3rX^dzYZ;%VtRQj__*jOw_ zLrVefjM^h4dlx`X5GCb))Uw+W^IfQ+%)L4Ne?x2f|B+Kr1w1>3yH#USyKO*de#Nt! zqxIEKyBOz>#-y3u%sn07JS5PQm!qK>XjJL-AYCE4@!|l#nMy!&u!Li>Pd)GlCi|*X zA~9!Mj=Rlk%X{GeXzx6On!vg~zPm1>BC?8#l#um7QIVolDTyvDA|M0-X#t|LNN=Hu zB(Q?W=t>c!NQ)3!lp+X;1V{)70qHH#L~*!``jawpzB9}7#l zrkOCxdwJkz@^v1e=f)aW0~0-|s=MFXM)ARRfVt+^pTTZ;KVuM_HXTDh<1t(B zKDdAhuMNFl4VFOAn%CwAb%{53h81Ko0-Vmz6KMKd?yrnbZ&N$`AySc(`V=h<#|nlf zx%=By-0$qWRy{YH*TPaqqVB0zkd<7xFN)vZDq%FFQxSVxYUpy`$1j zoJ%!8g118Wrp-}&JH-R8@+;konxsc@mgI3O4gNJ$=}agFea5IwUcQ1^d!fa6tgxP@ z>A^J}lHR-_XpWlx>^pq9yyte>5+aP;r(*^_h`JLlglPkc^qs8yEHdtXISGHJw4Tn@Li zS4YW(ycws%!IsiLu*czC`AlDt?qqyEdlcnY2)Hu3Bf0x(@P5463@Sl}{6ukL@IYJ& zg~OLu2dVeTK8IizjJA5fgu`Flf}{kKZeE9p5UBsTlf9y25$1dzb|7ABzVh3pB1#FV zXw7f6^VGhgpn*GB2v8LnIdl|3Z9!UP302rie~W|pi<9_&$L*^9IJrKT+3r@!`x@LL zO7j$zs%tGy2bjgpf^Z>&I%71~%9B3;^WwE9iFJE3&7Im}3gvlL!BsWpZ|UbO3Hx`- z&{yNh_-VnSVBB=z(VA7<_|y75jvhK+Zg!ZdAfuKJv6%ch;8{l6NM6V4cyB|LWvrDR z`b|M^SyPAy*If${hE+8*zz5f0TQ9HbOhQ1vFFsAmEik5~Cz^T6H$C|CdqGXAlC(y> z*j+|R08-yJ{jg)ew0YrteITVceEKJr5)@Q+J^|voL2wS2>k_OSYT}SThV-e&rK_+j&@Gaz#@@g{RGib{1-6q;mhEhcC zJbr2aqq#x{04AkKg)y_#m5V^KcZhk7*FOVU{9750K!t$4_jBo!7v{_6$WHzc%iuuz zQbU++kulf7G5FT6MenyU*9;=9hCG%G`nbZ|&n%C7$nyWyi0m&M0|%Kf<=ukcfzT6J z+4h+Mj5LYI&T~1S9}vg(%Npp{?=r>7qxJAtPpw`v_!!r5B@4ODe|hKQc2MaRyM=;Q zOwepfL2)!h5?>8mCv;kb80*DZ2G%zi`86kF0Oc`LW4Rf8x%w@^uS>O4MCel5t;DLB zW-MJPlsZhWR7ncXyLmbz?6bGx-c5fqq*`6wDdABt8@f;C;4XVHms2b%fpr{t7qWdyyQ{O8Mhopj4TnVFjgatT&akgya6sqWSJz0|`c@=;Vej7EssL?_P0>)n z#l;pYa=Gi(B#6a;O6MY$>!At}-#vSt5rTnVm-r_ZGNbyG?C(4eYP?a2Ddzwgn;iT6 z1M#ZytCO2y%-ve4xD*};u#&By9X}GFXYxXEw^F1avl3UO!T%1mWt+4Ubv%nkf%rr) zHYP=TD=>rkHlvftWRK#xVh^37b!+J~fDZMn6d;VATAQ-e8dL_hAL{S$m&vAO37>Lp zR&N6Gii>lSjZpV_G}H>mmbLP^C|r}>NK1J>x;og^DF!t@Hac0@n7EPEBzfGnSLjVA zN@~+qf||?L3A;U=k|(iR?#BWnc&m_;UzXDF0WABp6>(j7UNax>dvwrxP!Rp!R3HA& zlEnWEeu)0@j3nNNw`$GH(=wONFbFXzCh8kzz5}rnu1Yn^EDKXPh$xf(MwbFusV-{w z)eL__3*j$1cUIDALlwQ?LvBIj&mPC=_{5@v#2jv~>RYE{;Qe@sY}o(~QLLBkk7PHN zOGLV0wM={_<^tYf4O+TNsAUgOZDi5oy!~Ba$4^5%<_p|=s>;cjH83Xbw`zK98sZAv zYi4QY5cAm@vWoSx^mcMq;@WF!o^$rvS8?;5?)S7a4;l?Bs>d*3s;UUA8XtSQ`u0(C z<}->aaKxImVyQg)q)UT@iyUWUedaU9&X5Q_7lT^^j8`1;C0^O1r|K~nJ9G*7;oVA~ z@3h4HujH=_^Ol@TnRV;35iMtZblnV0IwTDl8FbAUI&LdtLqi+VeRt)Jv((skzgzrf z8*I>6TIDH0CLI!Ck>9>&b)j4bLfnxtwd4A)v4^z>^uWjdfJpJx88}x^wpa<0;o1{U zv{~Y`85lBfn}t~v;*?8s{C%F_TCtH3JGo*IzFQ6Mh_t8vKx3gjNacpL^Doh8wTh41 zaR1DzoIF1}u(91Cq>cCTEeZdXK5nc2QF=d`=;Zt?s{2$N#_JkVq4y|RzadJZnj#i( zMEMLcq|Opyo4RA6uSMEN@W9>fI~`1-!&^zk8XM1-cKILuQ+nEYta22=d{-M@Qgb)* zp;>~KS<&YBw!R|N{%t3GFWROcKeWheJCOwwWbBQ?#|EK5VArtQA9vmTyCe4Fze%Ia zrBy$ipO50TtB4;c*JTK5TVO^s67Dz~%j)v!kbyt87#;qAzr6sI(Vj)HBnhNs>>Gj6 zk)WXW%3H4#Ns=bMFbUv{P=DZ!P0>+y1i26U079l`ew&`3-soJGv~srMWDweK)D;B2 znNZjzJ)ENYmQMCn7uVj7<2-QncRrGxN>}IFI25%d>>Y$w6sA~k^0@dcvFAf((lF8y zf);?j#nvJG9~nJp4{^hZ8Ica7SXLHnp6n?P2rAyzy1*O_C2qKYbW@b`JHHMwTATonvz(C0I_S(J(-8&nJ
rpk8P_OZkrWALrlQgwDn)j>yI@U0q7-871qanB^T>jDwHVU);#F() ziqb=tK_Q(R5!KBu~{`{=LPD4_caXrX)D+3fC6AT#=;_Nv$-p#rpYt*YgCh?=hhdw#dd-fQL_#ZLLIuNef%MDoSMV6Gp-)nR z4dl+_huRCZ$TMKw8RdGN5@OkGW~kg`U62@!xP+<~?Lf)Bpeg literal 0 HcmV?d00001 diff --git a/nipype/testing/data/tpms_msk.nii.gz b/nipype/testing/data/tpms_msk.nii.gz new file mode 100644 index 0000000000000000000000000000000000000000..e8ec4d9d44743b7ff0bcc02864e54919fbe9688d GIT binary patch literal 20495 zcmeIadtBOgnl?Ps$<8F%OlO)jY0bfGHHlSYvXL00h<2K&F-BSAsYujLJ>Z#%s0fH| zpJ|dN@la#bq9RQ^MG7)2m}$n#@%=Xu+m!_MsU{_%d^ z&;ErsC-wXN-S>Un*L8h=hi`A+?z`uD{2R`a-xQ+K3sLzeigI&*q-$kU^%u)hX}{;! zAHIk3YC3pe_s75c*Plej63>2d?q_ctJUi!U{kb+0Wh~nJ(UIFPZ8V!X@coTnJNg5} zZNKedm6{t9rDs<>MkiS)#26Q=)+yV+A^8?V}=IW?rM@vyDA^sT8Xr=rR#<=Uqd4i?v8`R#|QnWn6& zmhmu!(?WCGQmhq8hv<}U2wYyZmG4-ylALB_-OsfD^rIhd|KLY&{PJIawC#U>{Qt%} z9FF`w$+=Y*w!jS)Y^MJ_(4ceNy*J;cfI`S}lUsX;xm0C}S5&j;+~LERT=atFLcU1S zYEfz^`c+PD|3>Bdfvb$mBIQvJ@r906n#~ zn`Z7f=yz&m!<(~~kCsufT|xE{VN_t*!axC|&|!A>#S3z$EHMPQ$lSR9w=6W-UT9#A zO07t|-rqSkiiQC{@<+4stOA3grTNPLU2QHtso zx_J6rm08(K!>4dp6J%kSgEAt%Ca+x}aDdFvtq3%tuA_xPr$;PrO~Xa_tTAJM|0U!I z;&7uq5DBw1x(Vq6{_NFue1cvvnzfj$JTT3h*ys*gkHedlr_`L_=907oPFiFzE<4pA zX0;Q}fmq9n*oB345x*@}x{i-mj*Vt9IIV)7QA&`V1D#x`IAf4j!WlipXgNjIpt#cp zAwdNTxGJ7Z>=-t^Z$VByU#}h*rY9Mta=&yO(rPmyr=s2eZ?f%kucuq?Hgi)bUWoCV z|1wVOKWtInvp8S&(pc~?Q?f#asR$0kRrEm=L>$?I2y4L``&K1+h_DncLfEuLj|lk5 zZI!&Ei)-2lPSl*iiC8{}R*)|ujDhLJTIdK0d0;q*UTj9SHuKP7RbwOm`K|8H3k_TPTz1N_UXtIOIV20LaFkSR(U6h@%|hsiR}KL_yd@p( z<8%}8HkMFAJ8%Ok3|v>J3KWCvNK@Od{@eL^bsJ~dpl^4|i=}lJhFNC!`F^Y@BwCLD zX(3(#&i7jj>C3H*L_-l2q{u!gmCBS>ic(*hltxy`4XMW_F|_!hrq}!DKY|g5`ih5x zR93oq8u|p6G-h19)i13!E)sD5s<*zhrV8oC%V~{5IzmSitjA_&X;sQDR2>bC0lzz?T~tj(@Ab8P`^=djOO!r!{MQ!sv6!xi73{- zM8(u#Y3)Hi2pS!$R3eR|5VnO;n(jmxxf=}AEkmeENz+M3bdeHafV2{qq7&*-m4zHT z8dVjVZ!hE8a7Gqn8ZEUTs`iBdc|JJMEjE)!TWMRmHIb%3(@0XzYaXs{AZf=O|E&c6 z;Nn+KaJ6)#N`O}3(^=oy1O{t`6e(QU)N6Y(i0Ouk>^9mL!Jgt)C4Uhq*Xsj?X!NYn zjoBsl*Z80i%BMLEL3RJC1ZRqsT_HXQ)l>Ch*%QNHrH^_k-hHpHe-7f{p|Ssn)Asrn zUqD=uEfX(5vP8?k;tKf2ZoI+Ashoyy;43|>pRE*lLYS=!y!vDVtndx{X{3Oo(BG%h zEe9S_QAMP;>*?aJ!Mz489w#yhmiNG^ln#ogy!dCP#<#5c(}LXL5nhiq4`jFMPS|AG z(^y7`%Bri_kmp6Jbp5$Q4s#I{&R~^s*wWOhB&Hr>(kLlJQ>dNJGCknt4x&p1VCb|k zXo|>1$)sDcUYb#%@h;Qiw>B$|7VVy%XlY{Q$}t6&I~L-YjCme3(!E@}Uebd?yAvbH zV*9MwY+?&Ae<&kOr8tha5&B8xwZF4Je|_vz7FSq`=5(D^5H0z~@D{w_p^kwlsYILS z9*)t_?sNg7+nl*jfwPn8xXlPtWrngQb$*YD=uyHsg_>v7z7DP5FHr=~=HC?sX3L*v zg~ln4UG@SQx(}@eT=WGSZ!x)ksBrliPqn2hvIj$0Gru0*5KNqH$z7xB+@@1sTxva+ z!u@dA>ld>JXGeNqvv`&J!3n26BfUAlTDhMbS}9ko|X z3tOu+__=hQ?A?!0MEh`s12!d94&tBSUS_mPNs$zIyB9mD!66SRfEs6MQkZj#{tek+ z7}a#AuIbrDMwTtj z8-0LbrOQFZLf(Rq!j}n&Oe!w~w3Jg8uAf;XAS47tHmlW9LkQeQ5FXMSXiix{Ye;FWRs`rr>jSqH(#m~CCjyWK6I40^Xr(&i6Ycx z@m?ZRz{FoJQN|#jAYS_V+nboHloqbl$gx+niu&pT5gNsMO>wQjAGT4s&{nq3TC|{p z%od>~h8b-&z3E(^Oih<6TLXn%1|e$hHce)GXIcH){$Hy&l&3u=?i~7_4MpWwJhs(&C_aN2X4P=zIV06jXU*9J5voko3v40tv$q= zg4Ar#W_U42U$@AmD>L+GB6$AmRpUY18Gx2C&;_?>) zW-vdf-Zt4k6=kNj5LTn?GFZ`*21Ye&@pQ1Mx`N#;h)FFqoGMOj{vgcTA2Bz}`f_e& zGlHW!G80oXA%zxT9k6+9E55RYWK6B1rdmJ|YzJP6J&kLmS1*z?aqDjp?YSZ!G^q(= z)cpD#qF6KD5WZmFLFfp|&K$;S?d=s&Tve*9VOXUvlFu=-n-90KwCUuFA*s4!z@T2J zyu2z6o@&=~=G0w%207DlY{9NPa(S*@%K^=%&*jZ6$c;rQGswCgmVA0iMpT9b=g-ZR zKhKlS*%r1o^#*B*F;o+7@JmY(5}S>9X|w`8Qm@*5QiXvF=TMwx{#i@NYUCmWbL*+% zbbEG!s*oUPU%l{n;uYcy~9i^-Wnet8cBWGD9A0{7XoBU2Fy#Nk_XH7@Gg8B&g?XGlQ2KqD7_v3u@XFUZqp~<$ym6BoArN@<^*d%&2l%N}ASDxjDaL{toU`R700-XH#zLp7U1EnXb#*AYQke)CO3 zY;ke(lCpK+%>VKO7Q3lOxar5_o-#5vE01~oCF10jeS2F~H_k%W#-&yb`xE4d4eXP} z88^d5(qyITq_HB~%)f$M02OmFJPu9JMvUxBZ-lFR`luf0#C@x6TK?t3^Q8nPnSMc) zmF24Kk;=1gfH?^Z}k4|f0Ly9jk8HXHvTV)F~)4b>DtM)_<6qE2}<2kTpWq~s% zAi|JPCR(t8B(T8@+Hn-Z@_cTqL|{e|pxKZg|D#F*vE3c7m23?e>-2?S9baM^zB^8p zxDEc9SO?ZE!c6AR!~&SxAc`iydo5T&YWxnG4CJ4aopxxvEhi*FG}d{5eE#KGUqntj zqTG^Jr?RH8DPi{d^2LzFd7?u$A0-rG3HJIP<0zwL1`p0pDGb9hTIS`Rz^0M2zJ86F zdai(PBx_eeQ6k^$r{#AtwfUzP$Rac5h$a!0gO3^*-cK>BO@3k-uGE=Sr5bG&l$oWZ zPzzGHu#Sf-Lk)qO?L`Vg;9uI8AAJh-_{|nfkEs$AbHWTlldn#g=4tU=E=cn*0wowj zS3uoCdX675<17em{O2Wzr=_}0MQCKY#yYaUkcSRrcvxgwf{MUID6uWXMr*EeC)$mf zJYjnIUa)blzftdjqTSU@{v%L|+MiR)RMECMc1m|(Smy9sR=54n5R&$@l)`=VMBgiXsJ2uFHxG3i9Whqm*y=c zHk0$6ld}H30>|puPT$M}n0Hw<;6u}G_|f$ZZs6#;TZ{ixPvYNv{DIdTVxZ?OT=4#bROUyI~IC z9dFwgJMbZ^0dF%Fb-BA1+W*pygc3fmW0G-US-Al^M*?d$)p)>>=AB{OO}yGE&ZfL_ zw@!t$5`#d}1~Y#V%JDaFP8r8gV-40rT?>iKE?`V_JrQ8GEO@H=`vB=Y^|jK^mZm>W^ADbDO-tL& zUT+nDj$@NsYCkVvN7Upi?#hwhE~vVxxTz6{oFC>xGHC*{%@rDgv2Mj<`d6Y z5ZxgoP1&Jk_$|GkMdfwZ>-k5o{u~SZsp8Vd8}zUI49HF9#QPd+KVD}Fp(@L(Cy1n7M`S?ZL}O2X+-P!m8r9AY&Sxv zg?w7MOaRl9NhO9aSEV2v^kXPt zX-AZsVf5mF!c_d`pMvh+8GsTO^lyfX=u0R8mSJE;1UMXhntH?MbmWxuOG*>V%B!T^ z0fSYrsxm#D)aU45Otk8i{0Y3OYkkxooki_|B@##>35vu_s^AErnE>~%N{UI+!!Vtw z4pg*CK)ua}fNz&nCS+dZ=;MDyH0k9@_e^n#$Y@VPAMZV)Ct6Rw zYn!ARyPlRJ5Hl;9HYquveIQ@wT|$%uowfz5hVyDjw^HCcQ*t}qyE$rM@l7}knfW^+@-)UjV2`aXMQEId%i1U*0 zVHk^?@};CTYLo{FIVbnoZ&M+xiw^TYe*Z34R=Z zoU2BHjFX#bX}apgH0WN(4AVCxlK(mX(2JfAJ$+Eu#1n4v#rm|qLDxG)wgh~0bx>y0 zo{8V5_j*G|l9OyxUk(A==U!MzgcO7nQS?|PUb#bdb_VXOxt2p4}*#7>FcOjZD&vR~@cq@L!3Pqz<)o-ja)?xE4Dixq7=EG(T@K-eZa0i&P|iy z#!m&NRn0g~^|mM8hu}=@vav-hB*HS@iA}m`*3RR~4xluqcV&kg)Ucxq^kjpikWUe3 zlvG{nLp0_H)7S{cRYn@A-f^{8-spjc56M%BPKt)WHN`UfGYUa~4}HhT5@LoO=0&1&aozVnLyijHPmQE^sz5hTA|ppw z$&s2#8`YaQL6T5EX%?|k%z@l`i*IHf-*di&09T(HDN0hV986)mF^AkcG`^rnoMk0( z*<4<&r%W8w1$}rDjQ(tVS55z-TbgG;2PzUfHERO^{nP_54J&g6VDLUi64(5EPE!=D zM{vK8rL{K^$yw5ZQ2kMPn5V=s;18gsa5 zY+TRPz_dCy$JN!bi*D`gmW-ZLJ!wdoc>BSLnpkXvh3G?m)eG^H5aN^ zioo`}DuGjGZPMteL|O~mI#oft3jwKMxND0D=w3U4>APN&&F6)zU&bU6S^m-ffxznl3Co=>9A$vBZeb-iFOJ>ts4>pgv(KEg465T?50_t)zSr@DR zTKtlAv96x}atE!kKGbK38dc)enAVZKx;u^BQIVj&62B7b!;YAqJvy2cX?-xuJoYs) z!CLcW1^^&$y}I0WwG;{NUDpsZqS1Ek+z7hPGm1qP2OaH)Ojb%b>ALA1g!A5v^PGpr~J9O{$F z@Ud?+D!huDLRX=Yoj39*fNXkdIAmUN4C_wF+IF9rteFIQTpuJt2=1=e-G?OnrC0zy zHly>vC*-+$qAxx|5DfH04|YPXt0(qVLR z=RnV-J0e!i?N`StE%jH9zXs zPOgjcQ3+y`G98<)I$JcGI(+8WB?^6|kp(EbZj(R#L1upY%bcppciF-{wicsIZI|1n zBUwyxzQvbo7{v*b`P%6ty7Z&0XUGJZb}=sKbTBY4FM%{O6|E}^ZiEidM;ksU_-2+V zVOg+_oSzmHni^%zjbJGw(g|$s=>5aOzz5nIUh1j@tO1_jd$^ypt1FSpJnmaYSCgE! zo&RT2*IEsMqQHs*Z?)uGMR9b*>KCwxk42W7Exys1{#z}OXw{by!>!8!dDP>}4}J2? z$B7yHB}Gl*2h4%H;=Tt~Pzg0R(+e~j!N1$ZqTPD+FO;jKh_ZNH;3w_JCHTb8ZsgV2 zPY<-Zere{Up6n5lkhkbHBTK-V1+zz4G_&o2mJ7Yd>IJZISG4(x4wxvMZ68#@Ur0db zNGt5eOFj23kWV@HA2ATuLuYU|#-ZT`+#C4tYe?ZPya(*YczhY{_@^8n;NMOsMTlOk z-Ie&FkYwJ5Zf3Zv9*qIEk|&e;&@14jh29&fNmVm=tjb)QV@QJF-hlVaBN9l{LtI~A zC&vwow69FsUJI)x@l_^9Q&YO1Y*wb?4r@*%>%66Bth!LK>i0?mjGoi6dck8UOefGzRTT31XAs4b)`QiW-vQbnS&%?8Qyt^+5l9hKjeg#dC4-A^T>kh#< z{-t$0HdvRCumuCM_uRc ziord~M_lLTOuKlnLGCU{`lEdG_3;bSslX0QWyN)1*ta1v4W)Q0$8DA<;jJz`Fu_!|x^Ju$gUee^D`tXiZV8&Q} zf{xFRb2}akDu!1?SQ3=AX8jDArvr*^D-rw{^;ve3emfGj-^F~nIWqC23o<1Cpq1ju z4ZoK3t%dr^E$DG;&}?+$A2dsBdmnNShx?qv{T?i6@Md7#yq6RNWQj)#84Gj#O;n)YFJHPJoYQD$u_AMdtnU|0E(@GenoqQHl#Z0N^XZ>Yi|3+#&5)y3_?6ul86W!RzRfx@55} zV*|IJMPSnXqe>7jn_ly%y?i1uyaQl4mwNA?0i(lzI#u1jxbI{l#I>ERYYBLerfr3v zba);g$0Xx%N<6?9B!9c~BD6!?in*@(oY21bmJyNj1uXXCF`&SDLoxl;En$DHz>fDl zkkCtNb6>Jxm7Yo-ke8m>!r8OO>J8ZdsPT1u!IzM8OJ9D-Xnhg*N%x86q2Nd7hQ0>Y$Z!?*4@?v$*yb zm^Tr3R;KxWYt4xuPju40nxd?t8k%;YZ&4C7Xp=C9D*MzRR5BpSP4fCpajB$wR`74j z(n_**n=n&769K$X!|Nx4S)m>l?XCr@oWFqa{R2D1Qo&xZU?E??B7gU&ywTl7;_P+q zJB-^Y+#8OI;QU6ci&t&j_5CG}!+pIsI#Bl5VC1!694}wB6C)3~l4_5YD=EJ* zxOmm~=2~w6rFl|1DzR`%mh=*WTv?ZDhbokUQA^JOUp{~1G){X)zmaS1XMIzW(FJ4b zm9?p3B>6PysynHu|E#ApNBKNezB+Mk8J0GHSmsqb`Vq-i2D|xIsms!$9WC}6YJ@TK zJTv4iJEZB24^=L(8&`c%pPEV1VI+?nKaBb@a=uFuCE* zJgE^pg}e)pd+#G*!7~JUMTFXA8cBb)$LcpQ1eiv?WeB!TxY^`L;)~u7=Ned#(#LiI z+-hdUZ+FI|@-j-VajV?`ZZ-8f>Hm#5zx3MpiktZDk%pYGy;xUf?yQ>uQN2sNW-5I) zN_NoZ_qux`uIzap>jBBim+Ves4d5fx=`!hm#I>ve_V>|m&G*r8-tz_k4cls-a?pC# z%~wtLUPPPZ#1|5>Rlkt@0+=!l(Vm)Gpg1=$&B+*igvkzM;(Y)jl@w+40cgbb$?oR~ z9-H|A1+f{d!FGu0p}>*Xe*8`v_wm7hxQQ-|shulNETpn*<%rT-s!N6IfMd$|{pR`a zUbizq!3Aw4`Ka1d_$hoS(q87=VNeFC*~Q8S)trc@WRr+1U|A)0=KXfMWFJn(R2p5moi}aXP|O z0qo_Fgr%rOvVfB;vyIMx4tT&W^J z$jPMj=clF^2>q!~|6JUWO#02@smH8R<=IcSo&{G$kN{pN<2qWyzxsx>cm7-!$xnh$ z?m}z&ahBl&ktq&DxHLDYpW^9d`#yhwic+m@tzAMZj z_AkzadYW9b$H}FenY!KOqDlPV zH(&sWb?ufbb^^_AYW*&S41idHxv8t_(miPwh9{Z_Kr8|`H4AX8x`B`^jT)FQ>Flgk z0K}HT+6PKYz3b*B@_~&y?-JH=sAOd1u2@}AIK_)HoU}xn@fO#{^TK$X-aJDx$ zn(k!*8Q1tC;6tN~$@X}Ze*HS6z8?fI1M$j*^ko3gXrLa+p94e69dK>$MtRLjP1L>k zo39Gzp9a-XhOuVzlkTTvj3eJ}dJ`2q$ni4;3GjVQ2k*F($Sp-tZ|26-EPdlWdj(y3 zHCE|ulC<0|6PZWf`;%I#xcHUBjalVP+J_&T8CiCh6(UIMi&je;twq)lwZOVu)~k9j zB9U|y=agnIi)_{XE{UD1_F8Yn4E7b{Lb5lqRf1Os?-2-K;Gc*{QtqPFx41Dq9a%Zy z2*Il_@3{eF0=!8H)Y*?BLSTv`(c;#ispM{Ipix9xJsB-+1q0Cuf@zySH?nCsbXi`% z>*nHcRX!1G_d$4+wQdFc~NTgUBBTlN?C; z$W`>j!hS8o)qO;33-VA9$MTr}Zb^mw z(UR(KKr9RRM`ox3zqN$8_(qn;?d*uMqMGKu1z>o5IF0S&v}OUMo8Yx*pBaSp;97P$ zn+S0PzyBzkrQd=ytuWOe2-w192wiBWAOLmZ7`^si8;#mfp!b;y3?eJg`-r8=za`E$ z6*!x$KxXZk;!7Z|%yNnJv>h8>3k15xw1akt4$4ub?^%Hkf)HZOo-}Ya;;DcOaCJE< zK|kZ@XxTKNcHm2qVKajEI3zSs-&X%CFbaU7eZ=%sB(zKcBmN2uIAIJCnvOm>z}2}N zRRVgxlOlqavBT;wz2}lQlxMmxs=etaj@^ts3aUE~SDm4z0F%YB6Q9O-d(Jg0bSK zJKyVR<-S?2kUGqQR@o}h8owo3D%iR0&FTCN+!O8pF->SH%JltLkR$YBuGM9lApX%b zabFKi@K@-hTSx?LV0@F2RzqMS+JY9&_L@4 z6Cby!`wKi}ZQIJ`>&GWREc&8gqMDsV&F5kb^DHx~H}Ta=N@)uyrX>tvOX)lCkAMEK z!l#PJ->)J*#xJl&IN2ss>Q6OR9)d0> z4$0mC9DJ|zGkk$=t!duwYE>|(2z_gI8i1UB@m~JuZsCPOf9a0%l{a?!;bz(NM~Iiz zGA4fSYR`vL?InhOV6@Lb7o~^WdY=veRnNB~LiijIZ(U%oAgXG_XETfcPxRUk#sVKa zebzOalw4ljel_ME_@=pm(i0s>6`0?pgvH(i*8sfDpT_8Pq7HP~8G22BY&O2T>BKy$ zv|E^?Qyp2Cx_Z}og0PX`?m0w?UqN~Ty=oO0)iZ%!g$G9U8lYEoL62xAUhTi<4G?Fy z0O#kg0ez7NI6v=#8Ohkqtc`_+m0&!p>bZV&;ZfAswq==~l4w6-6% zXK8`Dn-JcSh1P1GmGd8%m|`>{^XnWnQ5t+)?vE?*ewj*;Qx=LX!xzakZ796OLM>3F zIV{_3J83=IO8k9QZ2>pVCiE_0!~syDq_-V_4iT$iJzppyti7}KUMxUy+8>|_y}5ae z7u{GJBEe@hiXQ}7qFx5Z0g}_pNj)GzMF^NsPoVc-?m8M>mY%o_Di+}POJX{kclyEl zRDC;1^`Q2-i6T!&LeAu6NOI!q>bAAEh_VB~6%yT~X7;(R7}e=@=>;euW&OONkr3)R z1lR+7J@8q1lY7|7*|zFul~51b=NpZ^N1;c^{Ehuxe$gJZTkDMgd*kVf@A*aF131{G zu6s~V&BVH9zLCjD#yG+)(uN6e2Ou%N6yO_Olo{u3oBn-8(jj(6)s6Z5^CSg3^}R>j z@W=YI-E-eb=71HYeK+~lstH_^__UnyY;2?^k^T~+zFFGbW4`+SB;(=D;eLgu>^z7b>Y!9!L;nE2;c@gvLhL z(yt>5BlK=Tr$H|1$!mS)RXZ#?`|zg(MJ!0#Tx<~WuiPsGGLQ1bxut&tGVvK7{@;l8 zaLO0q>VIo4F*FHUQyE}0EgMlUuz1P#b^v7SDhQ0jzVPK$r14U)|z1P zL@?-f1k0Z>mRwdCuGj8LacKD|VTmRfle`I6x~8-Ejf`KX%k~g4b7aBCgm(y1|Eh>z z0A9-Mij#&*xCQo{=3ZYFTAQATsr_;ZYIk#DWONUD%?H9)dqgf|2c3}MLPi)mEF1cf z^l}<2^gWI8)cSkI5vSt`+PvrWX9~sE-N=9Nr&$cEOYhv0tP!mO-%a<=t+I+l*n856 z$Md<1lR5I8h^srsFZyO`A=Rm4?Pg;(Eu%q3o)$=iT(n!&oRBzF3pG*C`f334D9F>o zA1y2^)t^C|Sw0qRo60I-3XMq%h|F|v1+n?TBHHFl!VS>cqQ8r2NWE`~3c#DEEf{AM z?A6X6(#hXj<{!PmZTP@R*BD2j6`XuK(1uEln`U|O=zM|2H*B}Fy`^ygSNstcfnb!^ zt`Hi*k64iz5k+;1&*f_VTkW2w0j=P5&Fll9^#lN;70NYQ@fT;(JWc+^k+!L)E>V4a z!fT0jK0iV!8@%txS`7y@qUNpx_24P^^&*##6~OG-2}&prIDx@XgvCKch9^0)2cxaA zgN)-4fBW`Kd~;5!%g_qM?2dfri77-UHEfys%T?d#eDnbZZ+(B)f##CrTkB0-bZ`wk z3sW;&6FuS^j+vU35XYzk09?+9cb^}X57_EXETLf>$jF*^-7R;d8#@vm#M-4unr{0n zBauA5!g9$C}& z)-%71U!E)#gS~DRsfp+*X3_d$UNnb;pyCu8p*J#ht;0zc#@+v{xRx|2fe1 zN4nx)q5x;IGq<}_*;{pu)$~Fy!;O+9g0L5ubpsRb$MU*8jBeDXDC7V$$ljtj=*4hH zgvr(S0jddrW}p#<2YOJoovmGb%e^DOAFw^tJe%SjT0NEG>xmj1-^E^8bOTr-gHv-4frvlQ~7+zaC$cV`?2%%Y{pv3BwbYB#*R`YJ+&8otfYWkJEh21RM zmF6`yI;c~%R;zxy%gsI#jAl02EyUm|dYh8h=o-@~MEwqnP4gqR`FKod!8}&~~<1L3uRbbx?6mwrx+^-qS|J zGna#yKjA@iYk1Y?0SqbV(*5Ym#PHygCtf5*0Vf9Z>eKp8;KV@Ml}8WnJc~?}J42o9YPUr%_LD+UgDfd^J5Xq!!ac3@?@x(i5xk~KzkizQmUdTMrXS!lY?$QrX2t#R9$ua>K)OZ&*c@{g> zUxhvZEZDEehZ~_2@HbW@^0^OSvMhG|BV7T`)X|c+W%Ay>_$C%kHHtZ1-cr4L#g0 zNf<;m;Y>Bh`62%6g$C|i(7yeG)4G%@E8FrG#{X0!<4GulBY5(1Q+e$J*irQPeKlj+ z#r?GUA>dS3_SZSn6NPJTvUmb`ffY&fV2r0n7+jZec9@%jM}O1EXlH7p%M;gM^R(>X8t^2*MSU-ZOl}m- z*T$45(rr97E4Vsra=mE223npth@DztjZ6hizABpMN1aU?!dB}Ku%>{)1UWMm9hfu> zoD_3o@qm+JNZ_OxnpU*7e+FfE-+Am2H_0<>-oA~3fI3*MBRipQ#volCbH75gfPM$% z(~ptcl%4fZyb;N4hLYg9lVy5B>+jAu+I~f6#9Zl#ZYU)?{OV3ykdsnXIZrNr_@~~( zKi2dg@?ZtqI-PB>6o&tN&n^*Qd`U50uqRFVnRFERQAXg6;i_wkDZ;l*AfT%<^7s>l zrv3940BDvqLZ30#)n5cP|EtA*-mE$%oA-p^05S*TOB-L=8tr5i4$YmGFlK}ti8bfxsKv=H zK+g20spB}5;N(cFR}w7_Z{0!LbAJ?GO0CU{?XP6x16HZ=qa27MIk42rT_KW>ln5L8 zxBT$JSap$~+z(QEQ|<-Z%*V-7pC#vja@3l+39r233;D})s>F-H!ByqanV2Rerf&{d zNt^fFoGf^UHnx48Xwb%svBSVk14=y(RY>T^dQ#&ZCY+uvlL%zr8KVS0k12D4R7<<(RxPzN5vpc|Gs z_4EYA^lmCtS@FhSTJb?Q1M)ASwK$nkKBbnVE(4eXUV>`StM?&c z5Egkcv8n2I5#X_redk9;pc`2{mkTBam%zYQYr(-hS-5RDZ~53kGMpU&&=JeT2rMIK z`6v#!LTgqqWsSH4z}MHes$sJ40`IFNiTc9Nswfdf9)uAQosp*W;qqF~Es#j(1AnEK zru3lV;|r%fR>@Hxw|cD6C=Y{rb+g&nK3b{1)qM9HN`*Sr18*=~0gqqhKSS-C49#A@ zvNuXVQ3i(M%}vM>k-l<{G-Gjo^Hv{J3}odw8M6VVdGdr zQv#hGEF+AZYQmdw48^8GRwD?57~XGqSunJ?TliLh?{e*g=ln6$5un1s6iu0z;bfSTr@ug9f5ugtfN%+ ztltCRK=SUm_k08}pNC}6boV#&OVzv6ymHwQ^XbHA#PeBSuRQAti5&peHE864Ec`Vi z=UMk?tcwY~a_AYU9Jnksb28{NEaRC1NXmNO{L@`ZuXYahLtuGp=?2HrGgC#RSWdFWlN}$JFWUrLq3p<_&W2nbET+6q)x5? z4s{aoaCwb152ytdb1-j9tM~!VU+DQu)6cttfgEq>O6yRYCyhyQuudS}y3}N=rnVBm z!M79Y_@&!A6-PRNgBDjWN9^lW&P3^A)u#riXI_-2WV+uJJr9@lew$GelR9FLnjy$H z^u%YfQB3tHE!q_7?JsT>v07Vh1(M<{0lAs+_kkOLOe{W2U}I{&p+((q-CdvGc@Mbi zurH;kGhWw<`n(j(aC@hx9nBt*(z$aHj ztpd2n8j6wo_)3ZT(N6nIFw#wBf5s$N7ob~E4emV(Om!&m0oH9nSx;c#a;c3doG@yh zF}>;~`a$KfgHWz<|JBg;v(D2V(hC`t1Gt}ga^-Y;u4b_uG`v;1d$R4cqMcDNo02FR z&F}h}$(-W!5)B1=MB0(5VWQLAljYO}39Xy}3*jfYzdcjGe}#RD*|wm_W!rI64m`JA z^*oemQP6Ntwkiho`N`>Zn+&=f2gYG7CNh<@$zV<6;9Pd{w4{O!9GVVv>{W;^A@RQ1 zh3)cDxS@6H7Bc@>l;Rd95W3qS+V4+0_dDCqkK#3*A;InOUpfDs zECtmzC=j$n&N5ow{E+Lxrn`1=|&)w_WMn)H_H98$R9Q4dfz5=HDh1+LJ6h40MZ;lXaFv zvQYGUyJ#unw+M8|u*-i5k`#md}YxpS$9KtQSGxkbEOzI#8Y8Lgq%wDF`y6 zaI4=Mfsa9%EE6nKfvJC*6rgxO%cX9)V;F6ecDe;I8K_CIJBl#Gw?3p9N<#}}sui(cWYsXGYHkjpK@AzVsssMBWB$+MkH7xxE8LDjXUxn> z)wy}akg2C3Cuo5soD&EP9OaxybzI7+im>SVX)gU$!NoZpR^Z+m_*q+9hSVZl;Sf!w zq=6#2&;Zmgs=@*Uz)4(&ImQC*i_w`7fW)>~CA&?!H z@Y8?h^MiWOV5xyiBr5RJJrqFMvyUnSz+EbeOe9+o%vPZ~PU&U~Eu>*&(6Gfkh=yA% zg$~4vpE+lXHw>Y%RUAsy)Le1R`UaZoZEP}~5X!8d|Ke{0!hb97_%BP<|GeS*<>;^Q bQ2(<~|Nmd8n%_Bpwk>k>> slow = make_label_dec('slow') + >>> slow.__doc__ + "Labels a test as 'slow'" + + >>> rare = make_label_dec(['slow','hard'], + ... "Mix labels 'slow' and 'hard' for rare tests") + >>> @rare + ... def f(): pass + ... + >>> + >>> f.slow + True + >>> f.hard + True + """ + if isinstance(label, str): + labels = [label] + else: + labels = label + # Validate that the given label(s) are OK for use in setattr() by doing a + # dry run on a dummy function. + tmp = lambda: None + for label in labels: + setattr(tmp, label, True) + # This is the actual decorator we'll return + + def decor(f): + for label in labels: + setattr(f, label, True) + return f + + # Apply the user's docstring + if ds is None: + ds = "Labels a test as %r" % label + decor.__doc__ = ds + return decor + + +# For tests that need further review + + +def needs_review(msg): + """ Skip a test that needs further review. + + Parameters + ---------- + msg : string + msg regarding the review that needs to be done + """ + + def skip_func(func): + return skipif(True, msg)(func) + + return skip_func + + +# Easier version of the numpy knownfailure +def knownfailure(f): + return knownfailureif(True)(f) + + +def if_datasource(ds, msg): + try: + ds.get_filename() + except DataError: + return skipif(True, msg) + return lambda f: f diff --git a/nipype/testing/fixtures.py b/nipype/testing/fixtures.py new file mode 100644 index 0000000000..6d8b3b0874 --- /dev/null +++ b/nipype/testing/fixtures.py @@ -0,0 +1,120 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Pytest fixtures used in tests. +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os +import pytest +import numpy as np +import nibabel as nb + +from io import open +from builtins import str + +from nipype.utils.filemanip import ensure_list +from nipype.interfaces.fsl import Info +from nipype.interfaces.fsl.base import FSLCommand + + +def analyze_pair_image_files(outdir, filelist, shape): + for f in ensure_list(filelist): + hdr = nb.Nifti1Header() + hdr.set_data_shape(shape) + img = np.random.random(shape) + analyze = nb.AnalyzeImage(img, np.eye(4), hdr) + analyze.to_filename(os.path.join(outdir, f)) + + +def nifti_image_files(outdir, filelist, shape): + for f in ensure_list(filelist): + img = np.random.random(shape) + nb.Nifti1Image(img, np.eye(4), None).to_filename( + os.path.join(outdir, f)) + + +@pytest.fixture() +def create_files_in_directory(request, tmpdir): + cwd = tmpdir.chdir() + filelist = ['a.nii', 'b.nii'] + nifti_image_files(tmpdir.strpath, filelist, shape=(3, 3, 3, 4)) + + def change_directory(): + cwd.chdir() + + request.addfinalizer(change_directory) + return (filelist, tmpdir.strpath) + + +@pytest.fixture() +def create_analyze_pair_file_in_directory(request, tmpdir): + cwd = tmpdir.chdir() + filelist = ['a.hdr'] + analyze_pair_image_files(tmpdir.strpath, filelist, shape=(3, 3, 3, 4)) + + def change_directory(): + cwd.chdir() + + request.addfinalizer(change_directory) + return (filelist, tmpdir.strpath) + + +@pytest.fixture() +def create_files_in_directory_plus_dummy_file(request, tmpdir): + cwd = tmpdir.chdir() + filelist = ['a.nii', 'b.nii'] + nifti_image_files(tmpdir.strpath, filelist, shape=(3, 3, 3, 4)) + + tmpdir.join('reg.dat').write('dummy file') + filelist.append('reg.dat') + + def change_directory(): + cwd.chdir() + + request.addfinalizer(change_directory) + return (filelist, tmpdir.strpath) + + +@pytest.fixture() +def create_surf_file_in_directory(request, tmpdir): + cwd = tmpdir.chdir() + surf = 'lh.a.nii' + nifti_image_files(tmpdir.strpath, filelist=surf, shape=(1, 100, 1)) + + def change_directory(): + cwd.chdir() + + request.addfinalizer(change_directory) + return (surf, tmpdir.strpath) + + +def set_output_type(fsl_output_type): + prev_output_type = os.environ.get('FSLOUTPUTTYPE', None) + + if fsl_output_type is not None: + os.environ['FSLOUTPUTTYPE'] = fsl_output_type + elif 'FSLOUTPUTTYPE' in os.environ: + del os.environ['FSLOUTPUTTYPE'] + + FSLCommand.set_default_output_type(Info.output_type()) + return prev_output_type + + +@pytest.fixture(params=[None] + sorted(Info.ftypes)) +def create_files_in_directory_plus_output_type(request, tmpdir): + func_prev_type = set_output_type(request.param) + origdir = tmpdir.chdir() + filelist = ['a.nii', 'b.nii'] + nifti_image_files(tmpdir.strpath, filelist, shape=(3, 3, 3, 4)) + + out_ext = Info.output_type_to_ext(Info.output_type()) + + def fin(): + set_output_type(func_prev_type) + origdir.chdir() + + request.addfinalizer(fin) + return (filelist, tmpdir.strpath, out_ext) diff --git a/nipype/testing/tests/test_utils.py b/nipype/testing/tests/test_utils.py new file mode 100644 index 0000000000..798f640805 --- /dev/null +++ b/nipype/testing/tests/test_utils.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Test testing utilities +""" + +import os +import warnings +import subprocess +from mock import patch, MagicMock +from nipype.testing.utils import TempFATFS + + +def test_tempfatfs(): + try: + fatfs = TempFATFS() + except (IOError, OSError): + warnings.warn("Cannot mount FAT filesystems with FUSE") + else: + with fatfs as tmp_dir: + assert os.path.exists(tmp_dir) + + +@patch( + 'subprocess.check_call', + MagicMock(side_effect=subprocess.CalledProcessError('', ''))) +def test_tempfatfs_calledprocesserror(): + try: + TempFATFS() + except IOError as e: + assert isinstance(e, IOError) + assert isinstance(e.__cause__, subprocess.CalledProcessError) + else: + assert False + + +@patch('subprocess.check_call', MagicMock()) +@patch('subprocess.Popen', MagicMock(side_effect=OSError())) +def test_tempfatfs_oserror(): + try: + TempFATFS() + except IOError as e: + assert isinstance(e, IOError) + assert isinstance(e.__cause__, OSError) + else: + assert False diff --git a/nipype/testing/utils.py b/nipype/testing/utils.py new file mode 100644 index 0000000000..716b16da78 --- /dev/null +++ b/nipype/testing/utils.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Additional handy utilities for testing +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import range, object, open + +import os +import time +import shutil +import signal +import subprocess +from subprocess import CalledProcessError +from tempfile import mkdtemp +from future.utils import raise_from +from ..utils.misc import package_check + +__docformat__ = 'restructuredtext' + +import numpy as np +import nibabel as nb + + +class TempFATFS(object): + def __init__(self, size_in_mbytes=8, delay=0.5): + """Temporary filesystem for testing non-POSIX filesystems on a POSIX + system. + + with TempFATFS() as fatdir: + target = os.path.join(fatdir, 'target') + copyfile(file1, target, copy=False) + assert not os.path.islink(target) + + Arguments + --------- + size_in_mbytes : int + Size (in MiB) of filesystem to create + delay : float + Time (in seconds) to wait for fusefat to start, stop + """ + self.delay = delay + self.tmpdir = mkdtemp() + self.dev_null = open(os.devnull, 'wb') + + vfatfile = os.path.join(self.tmpdir, 'vfatblock') + self.vfatmount = os.path.join(self.tmpdir, 'vfatmount') + self.canary = os.path.join(self.vfatmount, '.canary') + + with open(vfatfile, 'wb') as fobj: + fobj.write(b'\x00' * (int(size_in_mbytes) << 20)) + os.mkdir(self.vfatmount) + + mkfs_args = ['mkfs.vfat', vfatfile] + mount_args = ['fusefat', '-o', 'rw+', '-f', vfatfile, self.vfatmount] + + try: + subprocess.check_call( + args=mkfs_args, stdout=self.dev_null, stderr=self.dev_null) + except CalledProcessError as e: + raise_from(IOError("mkfs.vfat failed"), e) + + try: + self.fusefat = subprocess.Popen( + args=mount_args, stdout=self.dev_null, stderr=self.dev_null) + except OSError as e: + raise_from(IOError("fusefat is not installed"), e) + + time.sleep(self.delay) + + if self.fusefat.poll() is not None: + raise IOError("fusefat terminated too soon") + + open(self.canary, 'wb').close() + + def __enter__(self): + return self.vfatmount + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.fusefat is not None: + self.fusefat.send_signal(signal.SIGINT) + + # Allow 1s to return without sending terminate + for count in range(10): + time.sleep(0.1) + if self.fusefat.poll() is not None: + break + else: + self.fusefat.terminate() + time.sleep(self.delay) + assert not os.path.exists(self.canary) + self.dev_null.close() + shutil.rmtree(self.tmpdir) + + +def save_toy_nii(ndarray, filename): + toy = nb.Nifti1Image(ndarray, np.eye(4)) + nb.nifti1.save(toy, filename) + return filename diff --git a/nipype/tests/__init__.py b/nipype/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/tests/test_nipype.py b/nipype/tests/test_nipype.py new file mode 100644 index 0000000000..01fd081bc9 --- /dev/null +++ b/nipype/tests/test_nipype.py @@ -0,0 +1,19 @@ +from .. import get_info +from ..info import get_nipype_gitversion +import pytest + + +def test_nipype_info(): + exception_not_raised = True + try: + get_info() + except Exception: + exception_not_raised = False + assert exception_not_raised + + +@pytest.mark.skipif(not get_nipype_gitversion(), + reason="not able to get version from get_nipype_gitversion") +def test_git_hash(): + # removing the first "g" from gitversion + get_nipype_gitversion()[1:] == get_info()['commit_hash'] diff --git a/nipype/utils/README.txt b/nipype/utils/README.txt new file mode 100644 index 0000000000..a0f6544dd2 --- /dev/null +++ b/nipype/utils/README.txt @@ -0,0 +1,11 @@ +================== + Nipype Utilities +================== + +This directory contains various utilities used in nipype. Some of +them have been copied from nipy. Any changes to these should be done +upstream. + +* From nipy: + * onetime.py + * tmpdirs.py diff --git a/nipype/utils/__init__.py b/nipype/utils/__init__.py new file mode 100644 index 0000000000..4a0741e48e --- /dev/null +++ b/nipype/utils/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from .config import NUMPY_MMAP +from .onetime import OneTimeProperty, setattr_on_read +from .tmpdirs import TemporaryDirectory, InTemporaryDirectory diff --git a/nipype/utils/config.py b/nipype/utils/config.py new file mode 100644 index 0000000000..e4e518960c --- /dev/null +++ b/nipype/utils/config.py @@ -0,0 +1,364 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +''' +Created on 20 Apr 2010 + +logging options : INFO, DEBUG +hash_method : content, timestamp + +@author: Chris Filo Gorgolewski +''' +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os +import sys +import errno +import atexit +from warnings import warn +from distutils.version import LooseVersion +import configparser +import numpy as np + +from builtins import bytes, str, object, open +from simplejson import load, dump +from future import standard_library + +from .misc import str2bool +from ..external import portalocker + +standard_library.install_aliases() + +CONFIG_DEPRECATIONS = { + 'profile_runtime': ('monitoring.enabled', '1.0'), + 'filemanip_level': ('logging.utils_level', '1.0'), +} + +NUMPY_MMAP = LooseVersion(np.__version__) >= LooseVersion('1.12.0') + +DEFAULT_CONFIG_TPL = """\ +[logging] +workflow_level = INFO +utils_level = INFO +interface_level = INFO +log_to_file = false +log_directory = {log_dir} +log_size = 16384000 +log_rotate = 4 + +[execution] +create_report = true +crashdump_dir = {crashdump_dir} +hash_method = timestamp +job_finished_timeout = 5 +keep_inputs = false +local_hash_check = true +matplotlib_backend = Agg +plugin = Linear +remove_node_directories = false +remove_unnecessary_outputs = true +try_hard_link_datasink = true +single_thread_matlab = true +crashfile_format = pklz +stop_on_first_crash = false +stop_on_first_rerun = false +use_relative_paths = false +stop_on_unknown_version = false +write_provenance = false +parameterize_dirs = true +poll_sleep_duration = 2 +xvfb_max_wait = 10 + +[monitoring] +enabled = false +sample_frequency = 1 +summary_append = true + +[check] +interval = 1209600 +""".format + + +def mkdir_p(path): + try: + os.makedirs(path) + except OSError as exc: + if exc.errno == errno.EEXIST and os.path.isdir(path): + pass + else: + raise + + +class NipypeConfig(object): + """Base nipype config class""" + + def __init__(self, *args, **kwargs): + self._config = configparser.ConfigParser() + self._cwd = None + + config_dir = os.path.expanduser('~/.nipype') + self.data_file = os.path.join(config_dir, 'nipype.json') + + self.set_default_config() + self._display = None + self._resource_monitor = None + + if os.path.exists(config_dir): + self._config.read( + [os.path.join(config_dir, 'nipype.cfg'), 'nipype.cfg']) + + for option in CONFIG_DEPRECATIONS: + for section in ['execution', 'logging', 'monitoring']: + if self.has_option(section, option): + new_section, new_option = CONFIG_DEPRECATIONS[option][ + 0].split('.') + if not self.has_option(new_section, new_option): + # Warn implicit in get + self.set(new_section, new_option, + self.get(section, option)) + + @property + def cwd(self): + """Cache current working directory ASAP""" + # Run getcwd only once, preventing multiproc to finish + # with error having changed to the wrong path + if self._cwd is None: + try: + self._cwd = os.getcwd() + except OSError: + warn('Trying to run Nipype from a nonexistent directory "{}".'. + format(os.getenv('PWD', 'unknown')), RuntimeWarning) + raise + return self._cwd + + def set_default_config(self): + """Read default settings template and set into config object""" + default_cfg = DEFAULT_CONFIG_TPL( + log_dir=os.path.expanduser( + '~'), # Get $HOME in a platform-agnostic way + crashdump_dir=self.cwd # Read cached cwd + ) + + try: + self._config.read_string(default_cfg) # Python >= 3.2 + except AttributeError: + from io import StringIO + self._config.readfp(StringIO(default_cfg)) + + def enable_debug_mode(self): + """Enables debug configuration""" + from .. import logging + self._config.set('execution', 'stop_on_first_crash', 'true') + self._config.set('execution', 'remove_unnecessary_outputs', 'false') + self._config.set('execution', 'keep_inputs', 'true') + self._config.set('logging', 'workflow_level', 'DEBUG') + self._config.set('logging', 'interface_level', 'DEBUG') + self._config.set('logging', 'utils_level', 'DEBUG') + logging.update_logging(self._config) + + def set_log_dir(self, log_dir): + """Sets logging directory + + This should be the first thing that is done before any nipype class + with logging is imported. + """ + self._config.set('logging', 'log_directory', log_dir) + + def get(self, section, option, default=None): + """Get an option""" + if option in CONFIG_DEPRECATIONS: + msg = ('Config option "%s" has been deprecated as of nipype %s. ' + 'Please use "%s" instead.') % ( + option, CONFIG_DEPRECATIONS[option][1], + CONFIG_DEPRECATIONS[option][0]) + warn(msg) + section, option = CONFIG_DEPRECATIONS[option][0].split('.') + + if self._config.has_option(section, option): + return self._config.get(section, option) + return default + + def set(self, section, option, value): + """Set new value on option""" + if isinstance(value, bool): + value = str(value) + + if option in CONFIG_DEPRECATIONS: + msg = ('Config option "%s" has been deprecated as of nipype %s. ' + 'Please use "%s" instead.') % ( + option, CONFIG_DEPRECATIONS[option][1], + CONFIG_DEPRECATIONS[option][0]) + warn(msg) + section, option = CONFIG_DEPRECATIONS[option][0].split('.') + + return self._config.set(section, option, value) + + def getboolean(self, section, option): + """Get a boolean option from section""" + return self._config.getboolean(section, option) + + def has_option(self, section, option): + """Check if option exists in section""" + return self._config.has_option(section, option) + + @property + def _sections(self): + return self._config._sections + + def get_data(self, key): + """Read options file""" + if not os.path.exists(self.data_file): + return None + with open(self.data_file, 'rt') as file: + portalocker.lock(file, portalocker.LOCK_EX) + datadict = load(file) + if key in datadict: + return datadict[key] + return None + + def save_data(self, key, value): + """Store config flie""" + datadict = {} + if os.path.exists(self.data_file): + with open(self.data_file, 'rt') as file: + portalocker.lock(file, portalocker.LOCK_EX) + datadict = load(file) + else: + dirname = os.path.dirname(self.data_file) + if not os.path.exists(dirname): + mkdir_p(dirname) + with open(self.data_file, 'wt') as file: + portalocker.lock(file, portalocker.LOCK_EX) + datadict[key] = value + dump(datadict, file) + + def update_config(self, config_dict): + """Extend internal dictionary with config_dict""" + for section in ['execution', 'logging', 'check']: + if section in config_dict: + for key, val in list(config_dict[section].items()): + if not key.startswith('__'): + self._config.set(section, key, str(val)) + + def update_matplotlib(self): + """Set backend on matplotlib from options""" + import matplotlib + matplotlib.use(self.get('execution', 'matplotlib_backend')) + + def enable_provenance(self): + """Sets provenance storing on""" + self._config.set('execution', 'write_provenance', 'true') + self._config.set('execution', 'hash_method', 'content') + + @property + def resource_monitor(self): + """Check if resource_monitor is available""" + if self._resource_monitor is not None: + return self._resource_monitor + + # Cache config from nipype config + self.resource_monitor = str2bool( + self._config.get('monitoring', 'enabled')) or False + return self._resource_monitor + + @resource_monitor.setter + def resource_monitor(self, value): + # Accept string true/false values + if isinstance(value, (str, bytes)): + value = str2bool(value.lower()) + + if value is False: + self._resource_monitor = False + elif value is True: + if not self._resource_monitor: + # Before setting self._resource_monitor check psutil + # availability + self._resource_monitor = False + try: + import psutil + self._resource_monitor = LooseVersion( + psutil.__version__) >= LooseVersion('5.0') + except ImportError: + pass + finally: + if not self._resource_monitor: + warn('Could not enable the resource monitor: ' + 'psutil>=5.0 could not be imported.') + self._config.set('monitoring', 'enabled', + ('%s' % self._resource_monitor).lower()) + + def enable_resource_monitor(self): + """Sets the resource monitor on""" + self.resource_monitor = True + + def get_display(self): + """Returns the first display available""" + + # Check if an Xorg server is listening + # import subprocess as sp + # if not hasattr(sp, 'DEVNULL'): + # setattr(sp, 'DEVNULL', os.devnull) + # x_listening = bool(sp.call('ps au | grep -v grep | grep -i xorg', + # shell=True, stdout=sp.DEVNULL)) + + if self._display is not None: + return ':%d' % self._display.new_display + + sysdisplay = None + if self._config.has_option('execution', 'display_variable'): + sysdisplay = self._config.get('execution', 'display_variable') + + sysdisplay = sysdisplay or os.getenv('DISPLAY') + if sysdisplay: + from collections import namedtuple + + def _mock(): + pass + + # Store a fake Xvfb object. Format - :[.] + ndisp = sysdisplay.split(':')[-1].split('.')[0] + Xvfb = namedtuple('Xvfb', ['new_display', 'stop']) + self._display = Xvfb(int(ndisp), _mock) + return self.get_display() + else: + if 'darwin' in sys.platform: + raise RuntimeError( + 'Xvfb requires root permissions to run in OSX. Please ' + 'make sure that an X server is listening and set the ' + 'appropriate config on either $DISPLAY or nipype\'s ' + '"display_variable" config. Valid X servers include ' + 'VNC, XQuartz, or manually started Xvfb.') + + # If $DISPLAY is empty, it confuses Xvfb so unset + if sysdisplay == '': + del os.environ['DISPLAY'] + try: + from xvfbwrapper import Xvfb + except ImportError: + raise RuntimeError( + 'A display server was required, but $DISPLAY is not ' + 'defined and Xvfb could not be imported.') + + self._display = Xvfb(nolisten='tcp') + self._display.start() + + # Older versions of xvfbwrapper used vdisplay_num + if not hasattr(self._display, 'new_display'): + setattr(self._display, 'new_display', + self._display.vdisplay_num) + return self.get_display() + + def stop_display(self): + """Closes the display if started""" + if self._display is not None: + from .. import logging + self._display.stop() + logging.getLogger('nipype.interface').debug( + 'Closing display (if virtual)') + + +@atexit.register +def free_display(): + """Stop virtual display (if it is up)""" + from .. import config + config.stop_display() diff --git a/nipype/utils/docparse.py b/nipype/utils/docparse.py new file mode 100644 index 0000000000..1df779f2ce --- /dev/null +++ b/nipype/utils/docparse.py @@ -0,0 +1,378 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Utilities to pull in documentation from command-line tools. + +Examples +-------- + +# Instantiate bet object +from nipype.interfaces import fsl +from nipype.utils import docparse +better = fsl.Bet() +docstring = docparse.get_doc(better.cmd, better.opt_map) + +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, open, bytes + +import subprocess +from ..interfaces.base import CommandLine +from .misc import is_container + + +def grab_doc(cmd, trap_error=True): + """Run cmd without args and grab documentation. + + Parameters + ---------- + cmd : string + Command line string + trap_error : boolean + Ensure that returncode is 0 + + Returns + ------- + doc : string + The command line documentation + """ + + proc = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + stdout, stderr = proc.communicate() + + if trap_error and proc.returncode: + msg = 'Attempting to run %s. Returned Error: %s' % (cmd, stderr) + raise IOError(msg) + + if stderr: + # A few programs, like fast and fnirt, send their help to + # stderr instead of stdout. + # XXX: Test for error vs. doc in stderr + return stderr + return stdout + + +def reverse_opt_map(opt_map): + """Reverse the key/value pairs of the option map in the interface classes. + + Parameters + ---------- + opt_map : dict + Dictionary mapping the attribute name to a command line flag. + Each interface class defines these for the command it wraps. + + Returns + ------- + rev_opt_map : dict + Dictionary mapping the flags to the attribute name. + """ + + # For docs, we only care about the mapping from our attribute + # names to the command-line flags. The 'v.split()[0]' below + # strips off the string format characters. + # if (k != 'flags' and v) , key must not be flags as it is generic, + # v must not be None or it cannot be parsed by this line + revdict = {} + for key, value in list(opt_map.items()): + if is_container(value): + # The value is a tuple where the first element is the + # format string and the second element is a docstring. + value = value[0] + if (key != 'flags' and value is not None): + revdict[value.split()[0]] = key + return revdict + + +def format_params(paramlist, otherlist=None): + """Format the parameters according to the nipy style conventions. + + Since the external programs do not conform to any conventions, the + resulting docstrings are not ideal. But at a minimum the + Parameters section is reasonably close. + + Parameters + ---------- + paramlist : list + List of strings where each list item matches exactly one + parameter and it's description. These items will go into the + 'Parameters' section of the docstring. + otherlist : list + List of strings, similar to paramlist above. These items will + go into the 'Other Parameters' section of the docstring. + + Returns + ------- + doc : string + The formatted docstring. + """ + + hdr = 'Parameters' + delim = '----------' + paramlist.insert(0, delim) + paramlist.insert(0, hdr) + params = '\n'.join(paramlist) + otherparams = [] + doc = ''.join(params) + if otherlist: + hdr = 'Others Parameters' + delim = '-----------------' + otherlist.insert(0, delim) + otherlist.insert(0, hdr) + otherlist.insert(0, '\n') + otherparams = '\n'.join(otherlist) + doc = ''.join([doc, otherparams]) + return doc + + +def insert_doc(doc, new_items): + """Insert ``new_items`` into the beginning of the ``doc`` + + Docstrings in ``new_items`` will be inserted right after the + *Parameters* header but before the existing docs. + + Parameters + ---------- + doc : str + The existing docstring we're inserting docmentation into. + new_items : list + List of strings to be inserted in the ``doc``. + + Examples + -------- + >>> from nipype.utils.docparse import insert_doc + >>> doc = '''Parameters + ... ---------- + ... outline : + ... something about an outline''' + + >>> new_items = ['infile : str', ' The name of the input file'] + >>> new_items.extend(['outfile : str', ' The name of the output file']) + >>> newdoc = insert_doc(doc, new_items) + >>> print(newdoc) + Parameters + ---------- + infile : str + The name of the input file + outfile : str + The name of the output file + outline : + something about an outline + + """ + + # Insert new_items after the Parameters header + doclist = doc.split('\n') + tmpdoc = doclist[:2] + # Add new_items + tmpdoc.extend(new_items) + # Add rest of documents + tmpdoc.extend(doclist[2:]) + # Insert newlines + newdoc = [] + for line in tmpdoc: + newdoc.append(line) + newdoc.append('\n') + # We add one too many newlines, remove it. + newdoc.pop(-1) + return ''.join(newdoc) + + +def build_doc(doc, opts): + """Build docstring from doc and options + + Parameters + ---------- + rep_doc : string + Documentation string + opts : dict + Dictionary of option attributes and keys. Use reverse_opt_map + to reverse flags and attrs from opt_map class attribute. + + Returns + ------- + newdoc : string + The docstring with flags replaced with attribute names and + formated to match nipy standards (as best we can). + + """ + + # Split doc into line elements. Generally, each line is an + # individual flag/option. + doclist = doc.split('\n') + newdoc = [] + flags_doc = [] + for line in doclist: + linelist = line.split() + if not linelist: + # Probably an empty line + continue + # For lines we care about, the first item is the flag + if ',' in linelist[0]: # sometimes flags are only seperated by comma + flag = linelist[0].split(',')[0] + else: + flag = linelist[0] + attr = opts.get(flag) + if attr is not None: + # newline = line.replace(flag, attr) + # Replace the flag with our attribute name + linelist[0] = '%s :\n ' % str(attr) + # Add some line formatting + newline = ' '.join(linelist) + newdoc.append(newline) + else: + if line[0].isspace(): + # For all the docs I've looked at, the flags all have + # indentation (spaces) at the start of the line. + # Other parts of the docs, like 'usage' statements + # start with alpha-numeric characters. We only care + # about the flags. + flags_doc.append(line) + return format_params(newdoc, flags_doc) + + +def get_doc(cmd, opt_map, help_flag=None, trap_error=True): + """Get the docstring from our command and options map. + + Parameters + ---------- + cmd : string + The command whose documentation we are fetching + opt_map : dict + Dictionary of flags and option attributes. + help_flag : string + Provide additional help flag. e.g., -h + trap_error : boolean + Override if underlying command returns a non-zero returncode + + Returns + ------- + doc : string + The formated docstring + + """ + res = CommandLine( + 'which %s' % cmd.split(' ')[0], + resource_monitor=False, + terminal_output='allatonce').run() + cmd_path = res.runtime.stdout.strip() + if cmd_path == '': + raise Exception('Command %s not found' % cmd.split(' ')[0]) + if help_flag: + cmd = ' '.join((cmd, help_flag)) + doc = grab_doc(cmd, trap_error) + opts = reverse_opt_map(opt_map) + return build_doc(doc, opts) + + +def _parse_doc(doc, style=['--']): + """Parses a help doc for inputs + + Parameters + ---------- + doc : string + Documentation string + style : string default ['--'] + The help command style (--, -) + + Returns + ------- + optmap : dict of input parameters + """ + + # Split doc into line elements. Generally, each line is an + # individual flag/option. + doclist = doc.split('\n') + optmap = {} + if isinstance(style, (str, bytes)): + style = [style] + for line in doclist: + linelist = line.split() + flag = [ + item for i, item in enumerate(linelist) + if i < 2 and any([item.startswith(s) + for s in style]) and len(item) > 1 + ] + if flag: + if len(flag) == 1: + style_idx = [flag[0].startswith(s) for s in style].index(True) + flag = flag[0] + else: + style_idx = [] + for f in flag: + for i, s in enumerate(style): + if f.startswith(s): + style_idx.append(i) + break + flag = flag[style_idx.index(min(style_idx))] + style_idx = min(style_idx) + optmap[flag.split(style[style_idx])[1]] = '%s %%s' % flag + return optmap + + +def get_params_from_doc(cmd, style='--', help_flag=None, trap_error=True): + """Auto-generate option map from command line help + + Parameters + ---------- + cmd : string + The command whose documentation we are fetching + style : string default ['--'] + The help command style (--, -). Multiple styles can be provided in a + list e.g. ['--','-']. + help_flag : string + Provide additional help flag. e.g., -h + trap_error : boolean + Override if underlying command returns a non-zero returncode + + Returns + ------- + optmap : dict + Contains a mapping from input to command line variables + + """ + res = CommandLine( + 'which %s' % cmd.split(' ')[0], + resource_monitor=False, + terminal_output='allatonce').run() + cmd_path = res.runtime.stdout.strip() + if cmd_path == '': + raise Exception('Command %s not found' % cmd.split(' ')[0]) + if help_flag: + cmd = ' '.join((cmd, help_flag)) + doc = grab_doc(cmd, trap_error) + return _parse_doc(doc, style) + + +def replace_opts(rep_doc, opts): + """Replace flags with parameter names. + + This is a simple operation where we replace the command line flags + with the attribute names. + + Parameters + ---------- + rep_doc : string + Documentation string + opts : dict + Dictionary of option attributes and keys. Use reverse_opt_map + to reverse flags and attrs from opt_map class attribute. + + Returns + ------- + rep_doc : string + New docstring with flags replaces with attribute names. + + Examples + -------- + doc = grab_doc('bet') + opts = reverse_opt_map(fsl.Bet.opt_map) + rep_doc = replace_opts(doc, opts) + + """ + + # Replace flags with attribute names + for key, val in list(opts.items()): + rep_doc = rep_doc.replace(key, val) + return rep_doc diff --git a/nipype/utils/draw_gantt_chart.py b/nipype/utils/draw_gantt_chart.py new file mode 100644 index 0000000000..7a52205090 --- /dev/null +++ b/nipype/utils/draw_gantt_chart.py @@ -0,0 +1,565 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Module to draw an html gantt chart from logfile produced by +``nipype.utils.profiler.log_nodes_cb()`` +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +# Import packages +import sys +import random +import datetime +import simplejson as json +from builtins import str, range, open +# Py2 compat: http://python-future.org/compatible_idioms.html#collections-counter-and-ordereddict +from future import standard_library +standard_library.install_aliases() +from collections import OrderedDict + +# Pandas +try: + import pandas as pd +except ImportError: + print('Pandas not found; in order for full functionality of this module ' + 'install the pandas package') + pass + +PY3 = sys.version_info[0] > 2 + + +def create_event_dict(start_time, nodes_list): + ''' + Function to generate a dictionary of event (start/finish) nodes + from the nodes list + + Parameters + ---------- + start_time : datetime.datetime + a datetime object of the pipeline start time + nodes_list : list + a list of the node dictionaries that were run in the pipeline + + Returns + ------- + events : dictionary + a dictionary where the key is the timedelta from the start of + the pipeline execution to the value node it accompanies + ''' + + # Import packages + import copy + + events = {} + for node in nodes_list: + # Format node fields + estimated_threads = node.get('num_threads', 1) + estimated_memory_gb = node.get('estimated_memory_gb', 1.0) + runtime_threads = node.get('runtime_threads', 0) + runtime_memory_gb = node.get('runtime_memory_gb', 0.0) + + # Init and format event-based nodes + node['estimated_threads'] = estimated_threads + node['estimated_memory_gb'] = estimated_memory_gb + node['runtime_threads'] = runtime_threads + node['runtime_memory_gb'] = runtime_memory_gb + start_node = node + finish_node = copy.deepcopy(node) + start_node['event'] = 'start' + finish_node['event'] = 'finish' + + # Get dictionary key + start_delta = (node['start'] - start_time).total_seconds() + finish_delta = (node['finish'] - start_time).total_seconds() + + # Populate dictionary + if events.get(start_delta) or events.get(finish_delta): + err_msg = 'Event logged twice or events started at exact same time!' + raise KeyError(err_msg) + events[start_delta] = start_node + events[finish_delta] = finish_node + + # Return events dictionary + return events + + +def log_to_dict(logfile): + ''' + Function to extract log node dictionaries into a list of python + dictionaries and return the list as well as the final node + + Parameters + ---------- + logfile : string + path to the json-formatted log file generated from a nipype + workflow execution + + Returns + ------- + nodes_list : list + a list of python dictionaries containing the runtime info + for each nipype node + ''' + + # Init variables + with open(logfile, 'r') as content: + # read file separating each line + lines = content.readlines() + + nodes_list = [json.loads(l) for l in lines] + + # Return list of nodes + return nodes_list + + +def calculate_resource_timeseries(events, resource): + ''' + Given as event dictionary, calculate the resources used + as a timeseries + + Parameters + ---------- + events : dictionary + a dictionary of event-based node dictionaries of the workflow + execution statistics + resource : string + the resource of interest to return the time-series of; + e.g. 'runtime_memory_gb', 'estimated_threads', etc + + Returns + ------- + time_series : pandas Series + a pandas Series object that contains timestamps as the indices + and the resource amount as values + ''' + + # Import packages + import pandas as pd + + # Init variables + res = OrderedDict() + all_res = 0.0 + + # Iterate through the events + for _, event in sorted(events.items()): + if event['event'] == "start": + if resource in event and event[resource] != 'Unknown': + all_res += float(event[resource]) + current_time = event['start'] + elif event['event'] == "finish": + if resource in event and event[resource] != 'Unknown': + all_res -= float(event[resource]) + current_time = event['finish'] + res[current_time] = all_res + + # Formulate the pandas timeseries + time_series = pd.Series(data=list(res.values()), index=list(res.keys())) + # Downsample where there is only value-diff + ts_diff = time_series.diff() + time_series = time_series[ts_diff != 0] + + # Return the new time series + return time_series + + +def draw_lines(start, total_duration, minute_scale, scale): + ''' + Function to draw the minute line markers and timestamps + + Parameters + ---------- + start : datetime.datetime obj + start time for first minute line marker + total_duration : float + total duration of the workflow execution (in seconds) + minute_scale : integer + the scale, in minutes, at which to plot line markers for the + gantt chart; for example, minute_scale=10 means there are lines + drawn at every 10 minute interval from start to finish + scale : integer + scale factor in pixel spacing between minute line markers + + Returns + ------- + result : string + the html-formatted string for producing the minutes-based + time line markers + ''' + + # Init variables + result = '' + next_line = 220 + next_time = start + num_lines = int(((total_duration // 60) // minute_scale) + 2) + + # Iterate through the lines and create html line markers string + for line in range(num_lines): + # Line object + new_line = "
" % next_line + result += new_line + # Time digits + time = "

%02d:%02d

" % \ + (next_line-20, next_time.hour, next_time.minute) + result += time + # Increment line spacing and digits + next_line += minute_scale * scale + next_time += datetime.timedelta(minutes=minute_scale) + + # Return html string for time line markers + return result + + +def draw_nodes(start, nodes_list, cores, minute_scale, space_between_minutes, + colors): + ''' + Function to return the html-string of the node drawings for the + gantt chart + + Parameters + ---------- + start : datetime.datetime obj + start time for first node + nodes_list : list + a list of the node dictionaries + cores : integer + the number of cores given to the workflow via the 'n_procs' + plugin arg + total_duration : float + total duration of the workflow execution (in seconds) + minute_scale : integer + the scale, in minutes, at which to plot line markers for the + gantt chart; for example, minute_scale=10 means there are lines + drawn at every 10 minute interval from start to finish + space_between_minutes : integer + scale factor in pixel spacing between minute line markers + colors : list + a list of colors to choose from when coloring the nodes in the + gantt chart + + Returns + ------- + result : string + the html-formatted string for producing the minutes-based + time line markers + ''' + + # Init variables + result = '' + scale = space_between_minutes / minute_scale + space_between_minutes = space_between_minutes / scale + end_times = [ + datetime.datetime(start.year, start.month, start.day, start.hour, + start.minute, start.second) for core in range(cores) + ] + + # For each node in the pipeline + for node in nodes_list: + # Get start and finish times + node_start = node['start'] + node_finish = node['finish'] + # Calculate an offset and scale duration + offset = ((node_start - start).total_seconds() / 60) * scale * \ + space_between_minutes + 220 + # Scale duration + scale_duration = ( + node['duration'] / 60) * scale * space_between_minutes + if scale_duration < 5: + scale_duration = 5 + scale_duration -= 2 + # Left + left = 60 + for core in range(len(end_times)): + if end_times[core] < node_start: + left += core * 30 + end_times[core] = datetime.datetime( + node_finish.year, node_finish.month, node_finish.day, + node_finish.hour, node_finish.minute, node_finish.second) + break + + # Get color for node object + color = random.choice(colors) + if 'error' in node: + color = 'red' + + # Setup dictionary for node html string insertion + node_dict = { + 'left': left, + 'offset': offset, + 'scale_duration': scale_duration, + 'color': color, + 'node_name': node['name'], + 'node_dur': node['duration'] / 60.0, + 'node_start': node_start.strftime("%Y-%m-%d %H:%M:%S"), + 'node_finish': node_finish.strftime("%Y-%m-%d %H:%M:%S") + } + # Create new node string + new_node = "
" % \ + node_dict + + # Append to output result + result += new_node + + # Return html string for nodes + return result + + +def draw_resource_bar(start_time, finish_time, time_series, + space_between_minutes, minute_scale, color, left, + resource): + ''' + ''' + + # Memory header + result = "

%s

" \ + % (left, resource) + # Image scaling factors + scale = space_between_minutes / minute_scale + space_between_minutes = space_between_minutes / scale + + # Iterate through time series + if PY3: + ts_items = time_series.items() + else: + ts_items = time_series.iteritems() + + ts_len = len(time_series) + for idx, (ts_start, amount) in enumerate(ts_items): + if idx < ts_len - 1: + ts_end = time_series.index[idx + 1] + else: + ts_end = finish_time + # Calculate offset from start at top + offset = ((ts_start-start_time).total_seconds() / 60.0) * scale * \ + space_between_minutes + 220 + # Scale duration + duration_mins = (ts_end - ts_start).total_seconds() / 60.0 + height = duration_mins * scale * space_between_minutes + if height < 5: + height = 5 + height -= 2 + + # Bar width is proportional to resource amount + width = amount * 20 + + if resource.lower() == 'memory': + label = '%.3f GB' % amount + else: + label = '%d threads' % amount + + # Setup dictionary for bar html string insertion + bar_dict = { + 'color': color, + 'height': height, + 'width': width, + 'offset': offset, + 'left': left, + 'label': label, + 'duration': duration_mins, + 'start': ts_start.strftime('%Y-%m-%d %H:%M:%S'), + 'finish': ts_end.strftime('%Y-%m-%d %H:%M:%S') + } + + bar_html = "
" + # Add another bar to html line + result += bar_html % bar_dict + + # Return bar-formatted html string + return result + + +def generate_gantt_chart(logfile, + cores, + minute_scale=10, + space_between_minutes=50, + colors=["#7070FF", "#4E4EB2", "#2D2D66", "#9B9BFF"]): + ''' + Generates a gantt chart in html showing the workflow execution based on a callback log file. + This script was intended to be used with the MultiprocPlugin. + The following code shows how to set up the workflow in order to generate the log file: + + Parameters + ---------- + logfile : string + filepath to the callback log file to plot the gantt chart of + cores : integer + the number of cores given to the workflow via the 'n_procs' + plugin arg + minute_scale : integer (optional); default=10 + the scale, in minutes, at which to plot line markers for the + gantt chart; for example, minute_scale=10 means there are lines + drawn at every 10 minute interval from start to finish + space_between_minutes : integer (optional); default=50 + scale factor in pixel spacing between minute line markers + colors : list (optional) + a list of colors to choose from when coloring the nodes in the + gantt chart + + + Returns + ------- + None + the function does not return any value but writes out an html + file in the same directory as the callback log path passed in + + Usage + ----- + # import logging + # import logging.handlers + # from nipype.utils.profiler import log_nodes_cb + + # log_filename = 'callback.log' + # logger = logging.getLogger('callback') + # logger.setLevel(logging.DEBUG) + # handler = logging.FileHandler(log_filename) + # logger.addHandler(handler) + + # #create workflow + # workflow = ... + + # workflow.run(plugin='MultiProc', + # plugin_args={'n_procs':8, 'memory':12, 'status_callback': log_nodes_cb}) + + # generate_gantt_chart('callback.log', 8) + ''' + + # add the html header + html_string = ''' + + + + + +
+
+ ''' + + close_header = ''' +
+
+

Estimated Resource

+

Actual Resource

+

Failed Node

+
+ ''' + + # Read in json-log to get list of node dicts + nodes_list = log_to_dict(logfile) + + # Create the header of the report with useful information + start_node = nodes_list[0] + last_node = nodes_list[-1] + duration = (last_node['finish'] - start_node['start']).total_seconds() + + # Get events based dictionary of node run stats + events = create_event_dict(start_node['start'], nodes_list) + + # Summary strings of workflow at top + html_string += '

Start: ' + start_node['start'].strftime( + "%Y-%m-%d %H:%M:%S") + '

' + html_string += '

Finish: ' + last_node['finish'].strftime( + "%Y-%m-%d %H:%M:%S") + '

' + html_string += '

Duration: ' + "{0:.2f}".format( + duration / 60) + ' minutes

' + html_string += '

Nodes: ' + str(len(nodes_list)) + '

' + html_string += '

Cores: ' + str(cores) + '

' + html_string += close_header + # Draw nipype nodes Gantt chart and runtimes + html_string += draw_lines(start_node['start'], duration, minute_scale, + space_between_minutes) + html_string += draw_nodes(start_node['start'], nodes_list, cores, + minute_scale, space_between_minutes, colors) + + # Get memory timeseries + estimated_mem_ts = calculate_resource_timeseries(events, + 'estimated_memory_gb') + runtime_mem_ts = calculate_resource_timeseries(events, 'runtime_memory_gb') + # Plot gantt chart + resource_offset = 120 + 30 * cores + html_string += draw_resource_bar(start_node['start'], last_node['finish'], + estimated_mem_ts, space_between_minutes, + minute_scale, '#90BBD7', + resource_offset * 2 + 120, 'Memory') + html_string += draw_resource_bar(start_node['start'], last_node['finish'], + runtime_mem_ts, space_between_minutes, + minute_scale, '#03969D', + resource_offset * 2 + 120, 'Memory') + + # Get threads timeseries + estimated_threads_ts = calculate_resource_timeseries( + events, 'estimated_threads') + runtime_threads_ts = calculate_resource_timeseries(events, + 'runtime_threads') + # Plot gantt chart + html_string += draw_resource_bar(start_node['start'], last_node['finish'], + estimated_threads_ts, + space_between_minutes, minute_scale, + '#90BBD7', resource_offset, 'Threads') + html_string += draw_resource_bar(start_node['start'], last_node['finish'], + runtime_threads_ts, space_between_minutes, + minute_scale, '#03969D', resource_offset, + 'Threads') + + # finish html + html_string += ''' +
+ ''' + + # save file + with open(logfile + '.html', 'w' if PY3 else 'wb') as html_file: + html_file.write(html_string) diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py new file mode 100644 index 0000000000..80fc262f03 --- /dev/null +++ b/nipype/utils/filemanip.py @@ -0,0 +1,981 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Miscellaneous file manipulation functions +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import sys +import pickle +import errno +import subprocess as sp +import gzip +import hashlib +import locale +from hashlib import md5 +import os +import os.path as op +import re +import shutil +import contextlib +import posixpath +import simplejson as json +import numpy as np + +from builtins import str, bytes, open + +from .. import logging, config +from .misc import is_container +from future import standard_library +standard_library.install_aliases() + +fmlogger = logging.getLogger('nipype.utils') + +related_filetype_sets = [ + ('.hdr', '.img', '.mat'), + ('.nii', '.mat'), + ('.BRIK', '.HEAD'), +] + +PY3 = sys.version_info[0] >= 3 + +class FileNotFoundError(Exception): + pass + + +def split_filename(fname): + """Split a filename into parts: path, base filename and extension. + + Parameters + ---------- + fname : str + file or path name + + Returns + ------- + pth : str + base path from fname + fname : str + filename from fname, without extension + ext : str + file extension from fname + + Examples + -------- + >>> from nipype.utils.filemanip import split_filename + >>> pth, fname, ext = split_filename('/home/data/subject.nii.gz') + >>> pth + '/home/data' + + >>> fname + 'subject' + + >>> ext + '.nii.gz' + + """ + + special_extensions = [".nii.gz", ".tar.gz", ".niml.dset"] + + pth = op.dirname(fname) + fname = op.basename(fname) + + ext = None + for special_ext in special_extensions: + ext_len = len(special_ext) + if (len(fname) > ext_len) and \ + (fname[-ext_len:].lower() == special_ext.lower()): + ext = fname[-ext_len:] + fname = fname[:-ext_len] + break + if not ext: + fname, ext = op.splitext(fname) + + return pth, fname, ext + + +def to_str(value): + """ + Manipulates ordered dicts before they are hashed (Py2/3 compat.) + + """ + if sys.version_info[0] > 2: + retval = str(value) + else: + retval = to_str_py27(value) + return retval + + +def to_str_py27(value): + """ + Encode dictionary for python 2 + """ + + if isinstance(value, dict): + entry = '{}: {}'.format + retval = '{' + for key, val in list(value.items()): + if len(retval) > 1: + retval += ', ' + kenc = repr(key) + if kenc.startswith(("u'", 'u"')): + kenc = kenc[1:] + venc = to_str_py27(val) + if venc.startswith(("u'", 'u"')): + venc = venc[1:] + retval += entry(kenc, venc) + retval += '}' + return retval + + istuple = isinstance(value, tuple) + if isinstance(value, (tuple, list)): + retval = '(' if istuple else '[' + nels = len(value) + for i, v in enumerate(value): + venc = to_str_py27(v) + if venc.startswith(("u'", 'u"')): + venc = venc[1:] + retval += venc + + if i < nels - 1: + retval += ', ' + + if istuple and nels == 1: + retval += ',' + retval += ')' if istuple else ']' + return retval + + retval = repr(value).decode() + if retval.startswith(("u'", 'u"')): + retval = retval[1:] + return retval + + +def fname_presuffix(fname, prefix='', suffix='', newpath=None, use_ext=True): + """Manipulates path and name of input filename + + Parameters + ---------- + fname : string + A filename (may or may not include path) + prefix : string + Characters to prepend to the filename + suffix : string + Characters to append to the filename + newpath : string + Path to replace the path of the input fname + use_ext : boolean + If True (default), appends the extension of the original file + to the output name. + + Returns + ------- + Absolute path of the modified filename + + >>> from nipype.utils.filemanip import fname_presuffix + >>> fname = 'foo.nii.gz' + >>> fname_presuffix(fname,'pre','post','/tmp') + '/tmp/prefoopost.nii.gz' + + >>> from nipype.interfaces.base import Undefined + >>> fname_presuffix(fname, 'pre', 'post', Undefined) == \ + fname_presuffix(fname, 'pre', 'post') + True + + """ + pth, fname, ext = split_filename(fname) + if not use_ext: + ext = '' + + # No need for isdefined: bool(Undefined) evaluates to False + if newpath: + pth = op.abspath(newpath) + return op.join(pth, prefix + fname + suffix + ext) + + +def fnames_presuffix(fnames, prefix='', suffix='', newpath=None, use_ext=True): + """Calls fname_presuffix for a list of files. + """ + f2 = [] + for fname in fnames: + f2.append(fname_presuffix(fname, prefix, suffix, newpath, use_ext)) + return f2 + + +def hash_rename(filename, hashvalue): + """renames a file given original filename and hash + and sets path to output_directory + """ + path, name, ext = split_filename(filename) + newfilename = ''.join((name, '_0x', hashvalue, ext)) + return op.join(path, newfilename) + + +def check_forhash(filename): + """checks if file has a hash in its filename""" + if isinstance(filename, list): + filename = filename[0] + path, name = op.split(filename) + if re.search('(_0x[a-z0-9]{32})', name): + hashvalue = re.findall('(_0x[a-z0-9]{32})', name) + return True, hashvalue + else: + return False, None + + +def hash_infile(afile, chunk_len=8192, crypto=hashlib.md5, + raise_notfound=False): + """ + Computes hash of a file using 'crypto' module + + >>> hash_infile('smri_ants_registration_settings.json') + 'f225785dfb0db9032aa5a0e4f2c730ad' + + >>> hash_infile('surf01.vtk') + 'fdf1cf359b4e346034372cdeb58f9a88' + + >>> hash_infile('spminfo') + '0dc55e3888c98a182dab179b976dfffc' + + >>> hash_infile('fsl_motion_outliers_fd.txt') + 'defd1812c22405b1ee4431aac5bbdd73' + + + """ + if not op.isfile(afile): + if raise_notfound: + raise RuntimeError('File "%s" not found.' % afile) + return None + + crypto_obj = crypto() + with open(afile, 'rb') as fp: + while True: + data = fp.read(chunk_len) + if not data: + break + crypto_obj.update(data) + return crypto_obj.hexdigest() + + +def hash_timestamp(afile): + """ Computes md5 hash of the timestamp of a file """ + md5hex = None + if op.isfile(afile): + md5obj = md5() + stat = os.stat(afile) + md5obj.update(str(stat.st_size).encode()) + md5obj.update(str(stat.st_mtime).encode()) + md5hex = md5obj.hexdigest() + return md5hex + + +def _parse_mount_table(exit_code, output): + """Parses the output of ``mount`` to produce (path, fs_type) pairs + + Separated from _generate_cifs_table to enable testing logic with real + outputs + """ + # Not POSIX + if exit_code != 0: + return [] + + # Linux mount example: sysfs on /sys type sysfs (rw,nosuid,nodev,noexec) + # ^^^^ ^^^^^ + # OSX mount example: /dev/disk2 on / (hfs, local, journaled) + # ^ ^^^ + pattern = re.compile(r'.*? on (/.*?) (?:type |\()([^\s,\)]+)') + + # Keep line and match for error reporting (match == None on failure) + # Ignore empty lines + matches = [(l, pattern.match(l)) + for l in output.strip().splitlines() if l] + + # (path, fstype) tuples, sorted by path length (longest first) + mount_info = sorted((match.groups() for _, match in matches + if match is not None), + key=lambda x: len(x[0]), reverse=True) + cifs_paths = [path for path, fstype in mount_info + if fstype.lower() == 'cifs'] + + # Report failures as warnings + for line, match in matches: + if match is None: + fmlogger.debug("Cannot parse mount line: '%s'", line) + + return [ + mount for mount in mount_info + if any(mount[0].startswith(path) for path in cifs_paths) + ] + + +def _generate_cifs_table(): + """Construct a reverse-length-ordered list of mount points that + fall under a CIFS mount. + + This precomputation allows efficient checking for whether a given path + would be on a CIFS filesystem. + + On systems without a ``mount`` command, or with no CIFS mounts, returns an + empty list. + """ + exit_code, output = sp.getstatusoutput("mount") + return _parse_mount_table(exit_code, output) + + +_cifs_table = _generate_cifs_table() + + +def on_cifs(fname): + """ Checks whether a file path is on a CIFS filesystem mounted in a POSIX + host (i.e., has the ``mount`` command). + + On Windows, Docker mounts host directories into containers through CIFS + shares, which has support for Minshall+French symlinks, or text files that + the CIFS driver exposes to the OS as symlinks. + We have found that under concurrent access to the filesystem, this feature + can result in failures to create or read recently-created symlinks, + leading to inconsistent behavior and ``FileNotFoundError``s. + + This check is written to support disabling symlinks on CIFS shares. + """ + # Only the first match (most recent parent) counts + for fspath, fstype in _cifs_table: + if fname.startswith(fspath): + return fstype == 'cifs' + return False + + +def copyfile(originalfile, + newfile, + copy=False, + create_new=False, + hashmethod=None, + use_hardlink=False, + copy_related_files=True): + """Copy or link ``originalfile`` to ``newfile``. + + If ``use_hardlink`` is True, and the file can be hard-linked, then a + link is created, instead of copying the file. + + If a hard link is not created and ``copy`` is False, then a symbolic + link is created. + + Parameters + ---------- + originalfile : str + full path to original file + newfile : str + full path to new file + copy : Bool + specifies whether to copy or symlink files + (default=False) but only for POSIX systems + use_hardlink : Bool + specifies whether to hard-link files, when able + (Default=False), taking precedence over copy + copy_related_files : Bool + specifies whether to also operate on related files, as defined in + ``related_filetype_sets`` + + Returns + ------- + None + + """ + newhash = None + orighash = None + fmlogger.debug(newfile) + + if create_new: + while op.exists(newfile): + base, fname, ext = split_filename(newfile) + s = re.search('_c[0-9]{4,4}$', fname) + i = 0 + if s: + i = int(s.group()[2:]) + 1 + fname = fname[:-6] + "_c%04d" % i + else: + fname += "_c%04d" % i + newfile = base + os.sep + fname + ext + + if hashmethod is None: + hashmethod = config.get('execution', 'hash_method').lower() + + # Don't try creating symlinks on CIFS + if copy is False and on_cifs(newfile): + copy = True + + # Existing file + # ------------- + # Options: + # symlink + # to regular file originalfile (keep if symlinking) + # to same dest as symlink originalfile (keep if symlinking) + # to other file (unlink) + # regular file + # hard link to originalfile (keep) + # copy of file (same hash) (keep) + # different file (diff hash) (unlink) + keep = False + if op.lexists(newfile): + if op.islink(newfile): + if all((os.readlink(newfile) == op.realpath(originalfile), + not use_hardlink, not copy)): + keep = True + elif posixpath.samefile(newfile, originalfile): + keep = True + else: + if hashmethod == 'timestamp': + hashfn = hash_timestamp + elif hashmethod == 'content': + hashfn = hash_infile + else: + raise AttributeError("Unknown hash method found:", hashmethod) + newhash = hashfn(newfile) + fmlogger.debug('File: %s already exists,%s, copy:%d', newfile, + newhash, copy) + orighash = hashfn(originalfile) + keep = newhash == orighash + if keep: + fmlogger.debug('File: %s already exists, not overwriting, copy:%d', + newfile, copy) + else: + os.unlink(newfile) + + # New file + # -------- + # use_hardlink & can_hardlink => hardlink + # ~hardlink & ~copy & can_symlink => symlink + # ~hardlink & ~symlink => copy + if not keep and use_hardlink: + try: + fmlogger.debug('Linking File: %s->%s', newfile, originalfile) + # Use realpath to avoid hardlinking symlinks + os.link(op.realpath(originalfile), newfile) + except OSError: + use_hardlink = False # Disable hardlink for associated files + else: + keep = True + + if not keep and not copy and os.name == 'posix': + try: + fmlogger.debug('Symlinking File: %s->%s', newfile, originalfile) + os.symlink(originalfile, newfile) + except OSError: + copy = True # Disable symlink for associated files + else: + keep = True + + if not keep: + try: + fmlogger.debug('Copying File: %s->%s', newfile, originalfile) + shutil.copyfile(originalfile, newfile) + except shutil.Error as e: + fmlogger.warn(e.message) + + # Associated files + if copy_related_files: + related_file_pairs = (get_related_files(f, include_this_file=False) + for f in (originalfile, newfile)) + for alt_ofile, alt_nfile in zip(*related_file_pairs): + if op.exists(alt_ofile): + copyfile( + alt_ofile, + alt_nfile, + copy, + hashmethod=hashmethod, + use_hardlink=use_hardlink, + copy_related_files=False) + + return newfile + + +def get_related_files(filename, include_this_file=True): + """Returns a list of related files, as defined in + ``related_filetype_sets``, for a filename. (e.g., Nifti-Pair, Analyze (SPM) + and AFNI files). + + Parameters + ---------- + filename : str + File name to find related filetypes of. + include_this_file : bool + If true, output includes the input filename. + """ + related_files = [] + path, name, this_type = split_filename(filename) + for type_set in related_filetype_sets: + if this_type in type_set: + for related_type in type_set: + if include_this_file or related_type != this_type: + related_files.append(op.join(path, name + related_type)) + if not len(related_files): + related_files = [filename] + return related_files + + +def copyfiles(filelist, dest, copy=False, create_new=False): + """Copy or symlink files in ``filelist`` to ``dest`` directory. + + Parameters + ---------- + filelist : list + List of files to copy. + dest : path/files + full path to destination. If it is a list of length greater + than 1, then it assumes that these are the names of the new + files. + copy : Bool + specifies whether to copy or symlink files + (default=False) but only for posix systems + + Returns + ------- + None + + """ + outfiles = ensure_list(dest) + newfiles = [] + for i, f in enumerate(ensure_list(filelist)): + if isinstance(f, list): + newfiles.insert(i, + copyfiles( + f, dest, copy=copy, create_new=create_new)) + else: + if len(outfiles) > 1: + destfile = outfiles[i] + else: + destfile = fname_presuffix(f, newpath=outfiles[0]) + destfile = copyfile(f, destfile, copy, create_new=create_new) + newfiles.insert(i, destfile) + return newfiles + + +def ensure_list(filename): + """Returns a list given either a string or a list + """ + if isinstance(filename, (str, bytes)): + return [filename] + elif isinstance(filename, list): + return filename + elif is_container(filename): + return [x for x in filename] + else: + return None + + +def simplify_list(filelist): + """Returns a list if filelist is a list of length greater than 1, + otherwise returns the first element + """ + if len(filelist) > 1: + return filelist + else: + return filelist[0] + + +filename_to_list = ensure_list +list_to_filename = simplify_list + + +def check_depends(targets, dependencies): + """Return true if all targets exist and are newer than all dependencies. + + An OSError will be raised if there are missing dependencies. + """ + tgts = ensure_list(targets) + deps = ensure_list(dependencies) + return all(map(op.exists, tgts)) and \ + min(map(op.getmtime, tgts)) > \ + max(list(map(op.getmtime, deps)) + [0]) + + +def save_json(filename, data): + """Save data to a json file + + Parameters + ---------- + filename : str + Filename to save data in. + data : dict + Dictionary to save in json file. + + """ + mode = 'w' + if sys.version_info[0] < 3: + mode = 'wb' + with open(filename, mode) as fp: + json.dump(data, fp, sort_keys=True, indent=4) + + +def load_json(filename): + """Load data from a json file + + Parameters + ---------- + filename : str + Filename to load data from. + + Returns + ------- + data : dict + + """ + + with open(filename, 'r') as fp: + data = json.load(fp) + return data + + +def loadcrash(infile, *args): + if infile.endswith('pkl') or infile.endswith('pklz'): + return loadpkl(infile, versioning=True) + else: + raise ValueError('Only pickled crashfiles are supported') + + +def loadpkl(infile, versioning=False): + """Load a zipped or plain cPickled file + """ + fmlogger.debug('Loading pkl: %s', infile) + if infile.endswith('pklz'): + pkl_file = gzip.open(infile, 'rb') + else: + pkl_file = open(infile, 'rb') + + if versioning: + pkl_metadata = {} + + # Look if pkl file contains version file + try: + pkl_metadata_line = pkl_file.readline() + pkl_metadata = json.loads(pkl_metadata_line) + except: + # Could not get version info + pkl_file.seek(0) + + try: + try: + unpkl = pickle.load(pkl_file) + except UnicodeDecodeError: + unpkl = pickle.load(pkl_file, fix_imports=True, encoding='utf-8') + + return unpkl + + # Unpickling problems + except Exception as e: + if not versioning: + raise e + + from nipype import __version__ as version + + if 'version' in pkl_metadata: + if pkl_metadata['version'] != version: + fmlogger.error('Your Nipype version is: %s', + version) + fmlogger.error('Nipype version of the pkl is: %s', + pkl_metadata['version']) + else: + fmlogger.error('No metadata was found in the pkl file.') + fmlogger.error('Make sure that you are using the same Nipype' + 'version from the generated pkl.') + + raise e + + +def crash2txt(filename, record): + """ Write out plain text crash file """ + with open(filename, 'w') as fp: + if 'node' in record: + node = record['node'] + fp.write('Node: {}\n'.format(node.fullname)) + fp.write('Working directory: {}\n'.format(node.output_dir())) + fp.write('\n') + fp.write('Node inputs:\n{}\n'.format(node.inputs)) + fp.write(''.join(record['traceback'])) + + +def read_stream(stream, logger=None, encoding=None): + """ + Robustly reads a stream, sending a warning to a logger + if some decoding error was raised. + + >>> read_stream(bytearray([65, 0xc7, 65, 10, 66])) # doctest: +ELLIPSIS + ['A...A', 'B'] + + + """ + default_encoding = encoding or locale.getdefaultlocale()[1] or 'UTF-8' + logger = logger or fmlogger + try: + out = stream.decode(default_encoding) + except UnicodeDecodeError as err: + out = stream.decode(default_encoding, errors='replace') + logger.warning('Error decoding string: %s', err) + return out.splitlines() + + +def savepkl(filename, record, versioning=False): + if filename.endswith('pklz'): + pkl_file = gzip.open(filename, 'wb') + else: + pkl_file = open(filename, 'wb') + + if versioning: + from nipype import __version__ as version + metadata = json.dumps({'version': version}) + + pkl_file.write(metadata.encode('utf-8')) + pkl_file.write('\n'.encode('utf-8')) + + pickle.dump(record, pkl_file) + pkl_file.close() + + +rst_levels = ['=', '-', '~', '+'] + + +def write_rst_header(header, level=0): + return '\n'.join( + (header, ''.join([rst_levels[level] for _ in header]))) + '\n\n' + + +def write_rst_list(items, prefix=''): + out = [] + for item in items: + out.append('{} {}'.format(prefix, str(item))) + return '\n'.join(out) + '\n\n' + + +def write_rst_dict(info, prefix=''): + out = [] + for key, value in sorted(info.items()): + out.append('{}* {} : {}'.format(prefix, key, str(value))) + return '\n'.join(out) + '\n\n' + + +def dist_is_editable(dist): + """Is distribution an editable install? + + Parameters + ---------- + dist : string + Package name + + # Borrowed from `pip`'s' API + """ + for path_item in sys.path: + egg_link = op.join(path_item, dist + '.egg-link') + if op.isfile(egg_link): + return True + return False + + +def makedirs(path, exist_ok=False): + """ + Create path, if it doesn't exist. + + Parameters + ---------- + path : output directory to create + + """ + if not exist_ok: # The old makedirs + os.makedirs(path) + return path + + # this odd approach deals with concurrent directory cureation + if not op.exists(op.abspath(path)): + fmlogger.debug("Creating directory %s", path) + try: + os.makedirs(path) + except OSError: + fmlogger.debug("Problem creating directory %s", path) + if not op.exists(path): + raise OSError('Could not create directory %s' % path) + return path + + +def emptydirs(path, noexist_ok=False): + """ + Empty an existing directory, without deleting it. Do not + raise error if the path does not exist and noexist_ok is True. + + Parameters + ---------- + path : directory that should be empty + + """ + fmlogger.debug("Removing contents of %s", path) + + if noexist_ok and not op.exists(path): + return True + + if op.isfile(path): + raise OSError('path "%s" should be a directory' % path) + + try: + shutil.rmtree(path) + except OSError as ex: + elcont = os.listdir(path) + if ex.errno == errno.ENOTEMPTY and not elcont: + fmlogger.warning( + 'An exception was raised trying to remove old %s, but the path' + ' seems empty. Is it an NFS mount?. Passing the exception.', + path) + elif ex.errno == errno.ENOTEMPTY and elcont: + fmlogger.debug('Folder %s contents (%d items).', path, len(elcont)) + raise ex + else: + raise ex + + makedirs(path) + + +def which(cmd, env=None, pathext=None): + """ + Return the path to an executable which would be run if the given + cmd was called. If no cmd would be called, return ``None``. + + Code for Python < 3.3 is based on a code snippet from + http://orip.org/2009/08/python-checking-if-executable-exists-in.html + + """ + + if pathext is None: + pathext = os.getenv('PATHEXT', '').split(os.pathsep) + pathext.insert(0, '') + + path = os.getenv("PATH", os.defpath) + if env and 'PATH' in env: + path = env.get("PATH") + + if sys.version_info >= (3, 3): + for ext in pathext: + filename = shutil.which(cmd + ext, path=path) + if filename: + return filename + return None + + def isexec(path): + return os.path.isfile(path) and os.access(path, os.X_OK) + + for ext in pathext: + extcmd = cmd + ext + fpath, fname = os.path.split(extcmd) + if fpath: + if isexec(extcmd): + return extcmd + else: + for directory in path.split(os.pathsep): + filename = op.join(directory, extcmd) + if isexec(filename): + return filename + return None + + +def get_dependencies(name, environ): + """Return library dependencies of a dynamically linked executable + + Uses otool on darwin, ldd on linux. Currently doesn't support windows. + + """ + if sys.platform == 'darwin': + proc = sp.Popen( + 'otool -L `which %s`' % name, + stdout=sp.PIPE, + stderr=sp.PIPE, + shell=True, + env=environ) + elif 'linux' in sys.platform: + proc = sp.Popen( + 'ldd `which %s`' % name, + stdout=sp.PIPE, + stderr=sp.PIPE, + shell=True, + env=environ) + else: + return 'Platform %s not supported' % sys.platform + o, e = proc.communicate() + return o.rstrip() + + +def canonicalize_env(env): + """Windows requires that environment be dicts with bytes as keys and values + This function converts any unicode entries for Windows only, returning the + dictionary untouched in other environments. + + Parameters + ---------- + env : dict + environment dictionary with unicode or bytes keys and values + + Returns + ------- + env : dict + Windows: environment dictionary with bytes keys and values + Other: untouched input ``env`` + """ + if os.name != 'nt': + return env + + # convert unicode to string for python 2 + if not PY3: + from future.utils import bytes_to_native_str + out_env = {} + for key, val in env.items(): + if not isinstance(key, bytes): + key = key.encode('utf-8') + if not isinstance(val, bytes): + val = val.encode('utf-8') + if not PY3: + key = bytes_to_native_str(key) + val = bytes_to_native_str(val) + out_env[key] = val + return out_env + + +def relpath(path, start=None): + """Return a relative version of a path""" + + try: + return op.relpath(path, start) + except AttributeError: + pass + + if start is None: + start = os.curdir + if not path: + raise ValueError("no path specified") + start_list = op.abspath(start).split(op.sep) + path_list = op.abspath(path).split(op.sep) + if start_list[0].lower() != path_list[0].lower(): + unc_path, rest = op.splitunc(path) + unc_start, rest = op.splitunc(start) + if bool(unc_path) ^ bool(unc_start): + raise ValueError(("Cannot mix UNC and non-UNC paths " + "(%s and %s)") % (path, start)) + else: + raise ValueError("path is on drive %s, start on drive %s" % + (path_list[0], start_list[0])) + # Work out how much of the filepath is shared by start and path. + for i in range(min(len(start_list), len(path_list))): + if start_list[i].lower() != path_list[i].lower(): + break + else: + i += 1 + + rel_list = [op.pardir] * (len(start_list) - i) + path_list[i:] + if not rel_list: + return os.curdir + return op.join(*rel_list) + + +@contextlib.contextmanager +def indirectory(path): + cwd = os.getcwd() + os.chdir(path) + try: + yield + finally: + os.chdir(cwd) diff --git a/nipype/utils/functions.py b/nipype/utils/functions.py new file mode 100644 index 0000000000..00b9412d5d --- /dev/null +++ b/nipype/utils/functions.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +""" +Handles custom functions used in Function interface. Future imports +are avoided to keep namespace as clear as possible. +""" +from builtins import next, str +from future.utils import raise_from +import inspect +from textwrap import dedent + + +def getsource(function): + """Returns the source code of a function""" + return dedent(inspect.getsource(function)) + + +def create_function_from_source(function_source, imports=None): + """Return a function object from a function source + + Parameters + ---------- + function_source : unicode string + unicode string defining a function + imports : list of strings + list of import statements in string form that allow the function + to be executed in an otherwise empty namespace + """ + ns = {} + import_keys = [] + + try: + if imports is not None: + for statement in imports: + exec(statement, ns) + import_keys = list(ns.keys()) + exec(function_source, ns) + + except Exception as e: + msg = 'Error executing function\n{}\n'.format(function_source) + msg += ("Functions in connection strings have to be standalone. " + "They cannot be declared either interactively or inside " + "another function or inline in the connect string. Any " + "imports should be done inside the function.") + raise_from(RuntimeError(msg), e) + ns_funcs = list(set(ns) - set(import_keys + ['__builtins__'])) + assert len(ns_funcs) == 1, "Function or inputs are ill-defined" + func = ns[ns_funcs[0]] + return func diff --git a/nipype/utils/logger.py b/nipype/utils/logger.py new file mode 100644 index 0000000000..2c8fe4607a --- /dev/null +++ b/nipype/utils/logger.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from builtins import object +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import logging +from warnings import warn +import os +import sys +from .misc import str2bool + +try: + from ..external.cloghandler import ConcurrentRotatingFileHandler as \ + RFHandler +except ImportError: + # Next 2 lines are optional: issue a warning to the user + warn("ConcurrentLogHandler not installed. Using builtin log handler") + from logging.handlers import RotatingFileHandler as RFHandler + + +class Logging(object): + """Nipype logging class + """ + fmt = ('%(asctime)s,%(msecs)d %(name)-2s ' + '%(levelname)-2s:\n\t %(message)s') + datefmt = '%y%m%d-%H:%M:%S' + + def __init__(self, config): + self._config = config + # scope our logger to not interfere with user + _nipype_logger = logging.getLogger('nipype') + _nipype_hdlr = logging.StreamHandler(stream=sys.stdout) + _nipype_hdlr.setFormatter(logging.Formatter(fmt=self.fmt, + datefmt=self.datefmt)) + # if StreamHandler was added, do not stack + if not len(_nipype_logger.handlers): + _nipype_logger.addHandler(_nipype_hdlr) + + self._logger = logging.getLogger('nipype.workflow') + self._utlogger = logging.getLogger('nipype.utils') + self._fmlogger = logging.getLogger('nipype.filemanip') + self._iflogger = logging.getLogger('nipype.interface') + + self.loggers = { + 'nipype.workflow': self._logger, + 'nipype.utils': self._utlogger, + 'nipype.filemanip': self._fmlogger, + 'nipype.interface': self._iflogger + } + self._hdlr = None + self.update_logging(self._config) + + def enable_file_logging(self): + config = self._config + LOG_FILENAME = os.path.join( + config.get('logging', 'log_directory'), 'pypeline.log') + hdlr = RFHandler( + LOG_FILENAME, + maxBytes=int(config.get('logging', 'log_size')), + backupCount=int(config.get('logging', 'log_rotate'))) + formatter = logging.Formatter(fmt=self.fmt, datefmt=self.datefmt) + hdlr.setFormatter(formatter) + self._logger.addHandler(hdlr) + self._utlogger.addHandler(hdlr) + self._iflogger.addHandler(hdlr) + self._fmlogger.addHandler(hdlr) + self._hdlr = hdlr + + def disable_file_logging(self): + if self._hdlr: + self._logger.removeHandler(self._hdlr) + self._utlogger.removeHandler(self._hdlr) + self._iflogger.removeHandler(self._hdlr) + self._fmlogger.removeHandler(self._hdlr) + self._hdlr = None + + def update_logging(self, config): + self._config = config + self.disable_file_logging() + self._logger.setLevel( + logging.getLevelName(config.get('logging', 'workflow_level'))) + self._utlogger.setLevel( + logging.getLevelName(config.get('logging', 'utils_level'))) + self._iflogger.setLevel( + logging.getLevelName(config.get('logging', 'interface_level'))) + if str2bool(config.get('logging', 'log_to_file')): + self.enable_file_logging() + + def getLogger(self, name): + if name == 'filemanip': + warn('The "filemanip" logger has been deprecated and replaced by ' + 'the "utils" logger as of nipype 1.0') + if name in self.loggers: + return self.loggers[name] + return None + + def getLevelName(self, name): + return logging.getLevelName(name) + + def logdebug_dict_differences(self, dold, dnew, prefix=""): + """Helper to log what actually changed from old to new values of + dictionaries. + + typical use -- log difference for hashed_inputs + """ + from .misc import dict_diff + self._logger.warning( + "logdebug_dict_differences has been deprecated, please use " + "nipype.utils.misc.dict_diff.") + self._logger.debug(dict_diff(dold, dnew)) diff --git a/nipype/utils/matlabtools.py b/nipype/utils/matlabtools.py new file mode 100644 index 0000000000..bff9567ed5 --- /dev/null +++ b/nipype/utils/matlabtools.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" Useful Functions for working with matlab""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from builtins import range + +# Stdlib imports +import os +import re +import tempfile +import numpy as np + +# Functions, classes and other top-level code + + +def fltcols(vals): + ''' Trivial little function to make 1xN float vector ''' + return np.atleast_2d(np.array(vals, dtype=float)) + + +def mlab_tempfile(dir=None): + """Returns a temporary file-like object with valid matlab name. + + The file name is accessible as the .name attribute of the returned object. + The caller is responsible for closing the returned object, at which time + the underlying file gets deleted from the filesystem. + + Parameters + ---------- + + dir : str + A path to use as the starting directory. Note that this directory must + already exist, it is NOT created if it doesn't (in that case, OSError + is raised instead). + + Returns + ------- + f : A file-like object. + + Examples + -------- + + >>> fn = mlab_tempfile() + >>> import os + >>> filename = os.path.basename(fn.name) + >>> '-' not in filename + True + >>> fn.close() + + """ + + valid_name = re.compile(r'^\w+$') + + # Make temp files until we get one whose name is a valid matlab identifier, + # since matlab imposes that constraint. Since the temp file routines may + # return names that aren't valid matlab names, but we can't control that + # directly, we just keep trying until we get a valid name. To avoid an + # infinite loop for some strange reason, we only try 100 times. + for n in range(100): + f = tempfile.NamedTemporaryFile( + suffix='.m', prefix='tmp_matlab_', dir=dir) + # Check the file name for matlab compilance + fname = os.path.splitext(os.path.basename(f.name))[0] + if valid_name.match(fname): + break + # Close the temp file we just made if its name is not valid; the + # tempfile module then takes care of deleting the actual file on disk. + f.close() + else: + raise ValueError("Could not make temp file after 100 tries") + + return f diff --git a/nipype/utils/misc.py b/nipype/utils/misc.py new file mode 100644 index 0000000000..6b7629e32a --- /dev/null +++ b/nipype/utils/misc.py @@ -0,0 +1,326 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Miscellaneous utility functions +""" +from __future__ import (print_function, unicode_literals, division, + absolute_import) +from builtins import next, str + +import os +import sys +import re +from collections import Iterator +from warnings import warn + +from distutils.version import LooseVersion + +import numpy as np +from future.utils import raise_from +from future import standard_library +try: + from textwrap import indent as textwrap_indent +except ImportError: + + def textwrap_indent(text, prefix): + """ A textwrap.indent replacement for Python < 3.3 """ + if not prefix: + return text + splittext = text.splitlines(True) + return prefix + prefix.join(splittext) + + +standard_library.install_aliases() + + +def human_order_sorted(l): + """Sorts string in human order (i.e. 'stat10' will go after 'stat2')""" + + def atoi(text): + return int(text) if text.isdigit() else text + + def natural_keys(text): + if isinstance(text, tuple): + text = text[0] + return [atoi(c) for c in re.split('(\d+)', text)] + + return sorted(l, key=natural_keys) + + +def trim(docstring, marker=None): + if isinstance(docstring, bytes): + docstring = str(docstring, 'utf-8') + + if not docstring: + return '' + # Convert tabs to spaces (following the normal Python rules) + # and split into a list of lines: + lines = docstring.expandtabs().splitlines() + # Determine minimum indentation (first line doesn't count): + indent = sys.maxsize + for line in lines[1:]: + stripped = line.lstrip() + if stripped: + indent = min(indent, len(line) - len(stripped)) + # Remove indentation (first line is special): + trimmed = [lines[0].strip()] + if indent < sys.maxsize: + for line in lines[1:]: + # replace existing REST marker with doc level marker + stripped = line.lstrip().strip().rstrip() + if marker is not None and stripped and \ + all([s == stripped[0] for s in stripped]) and \ + stripped[0] not in [':']: + line = line.replace(stripped[0], marker) + trimmed.append(line[indent:].rstrip()) + # Strip off trailing and leading blank lines: + while trimmed and not trimmed[-1]: + trimmed.pop() + while trimmed and not trimmed[0]: + trimmed.pop(0) + # Return a single string: + return '\n'.join(trimmed) + + +def find_indices(condition): + "Return the indices where ravel(condition) is true" + res, = np.nonzero(np.ravel(condition)) + return res + + +def is_container(item): + """Checks if item is a container (list, tuple, dict, set) + + Parameters + ---------- + item : object + object to check for .__iter__ + + Returns + ------- + output : Boolean + True if container + False if not (eg string) + """ + if isinstance(item, str): + return False + elif hasattr(item, '__iter__'): + return True + else: + return False + + +def container_to_string(cont): + """Convert a container to a command line string. + + Elements of the container are joined with a space between them, + suitable for a command line parameter. + + If the container `cont` is only a sequence, like a string and not a + container, it is returned unmodified. + + Parameters + ---------- + cont : container + A container object like a list, tuple, dict, or a set. + + Returns + ------- + cont_str : string + Container elements joined into a string. + + """ + if hasattr(cont, '__iter__') and not isinstance(cont, str): + cont = ' '.join(cont) + return str(cont) + + +# Dependency checks. Copied this from Nipy, with some modificiations +# (added app as a parameter). +def package_check(pkg_name, + version=None, + app=None, + checker=LooseVersion, + exc_failed_import=ImportError, + exc_failed_check=RuntimeError): + """Check that the minimal version of the required package is installed. + + Parameters + ---------- + pkg_name : string + Name of the required package. + version : string, optional + Minimal version number for required package. + app : string, optional + Application that is performing the check. For instance, the + name of the tutorial being executed that depends on specific + packages. Default is *Nipype*. + checker : object, optional + The class that will perform the version checking. Default is + distutils.version.LooseVersion. + exc_failed_import : Exception, optional + Class of the exception to be thrown if import failed. + exc_failed_check : Exception, optional + Class of the exception to be thrown if version check failed. + + Examples + -------- + package_check('numpy', '1.3') + package_check('scipy', '0.7', 'tutorial1') + + """ + + if app: + msg = '%s requires %s' % (app, pkg_name) + else: + msg = 'Nipype requires %s' % pkg_name + if version: + msg += ' with version >= %s' % (version, ) + try: + mod = __import__(pkg_name) + except ImportError as e: + raise_from(exc_failed_import(msg), e) + if not version: + return + try: + have_version = mod.__version__ + except AttributeError as e: + raise_from( + exc_failed_check('Cannot find version for %s' % pkg_name), e) + if checker(have_version) < checker(version): + raise exc_failed_check(msg) + + +def str2bool(v): + if isinstance(v, bool): + return v + lower = v.lower() + if lower in ("yes", "true", "t", "1"): + return True + elif lower in ("no", "false", "n", "f", "0"): + return False + else: + raise ValueError("%s cannot be converted to bool" % v) + + +def flatten(S): + if S == []: + return S + if isinstance(S[0], list): + return flatten(S[0]) + flatten(S[1:]) + return S[:1] + flatten(S[1:]) + + +def unflatten(in_list, prev_structure): + if not isinstance(in_list, Iterator): + in_list = iter(in_list) + + if not isinstance(prev_structure, list): + return next(in_list) + + out = [] + for item in prev_structure: + out.append(unflatten(in_list, item)) + return out + + +def normalize_mc_params(params, source): + """ + Normalize a single row of motion parameters to the SPM format. + + SPM saves motion parameters as: + x Right-Left (mm) + y Anterior-Posterior (mm) + z Superior-Inferior (mm) + rx Pitch (rad) + ry Yaw (rad) + rz Roll (rad) + """ + if source.upper() == 'FSL': + params = params[[3, 4, 5, 0, 1, 2]] + elif source.upper() in ('AFNI', 'FSFAST'): + params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)] + params[3:] = params[3:] * np.pi / 180. + elif source.upper() == 'NIPY': + from nipy.algorithms.registration import to_matrix44, aff2euler + matrix = to_matrix44(params) + params = np.zeros(6) + params[:3] = matrix[:3, 3] + params[-1:2:-1] = aff2euler(matrix) + + return params + + +def dict_diff(dold, dnew, indent=0): + """Helper to log what actually changed from old to new values of + dictionaries. + + typical use -- log difference for hashed_inputs + """ + # First check inputs, since they usually are lists of tuples + # and dicts are required. + if isinstance(dnew, list): + dnew = dict(dnew) + if isinstance(dold, list): + dold = dict(dold) + + # Compare against hashed_inputs + # Keys: should rarely differ + new_keys = set(dnew.keys()) + old_keys = set(dold.keys()) + + diff = [] + if new_keys - old_keys: + diff += [" * keys not previously seen: %s" % (new_keys - old_keys)] + + if old_keys - new_keys: + diff += [" * keys not presently seen: %s" % (old_keys - new_keys)] + + # Add topical message + if diff: + diff.insert(0, "Dictionaries had differing keys:") + + diffkeys = len(diff) + + # Values in common keys would differ quite often, + # so we need to join the messages together + for k in new_keys.intersection(old_keys): + try: + new, old = dnew[k], dold[k] + same = new == old + if not same: + # Since JSON does not discriminate between lists and + # tuples, we might need to cast them into the same type + # as the last resort. And lets try to be more generic + same = old.__class__(new) == old + except Exception: + same = False + if not same: + diff += [" * %s: %r != %r" % (k, dnew[k], dold[k])] + + if len(diff) > diffkeys: + diff.insert(diffkeys, "Some dictionary entries had differing values:") + + return textwrap_indent('\n'.join(diff), ' ' * indent) + + +def rgetcwd(error=True): + """ + Robust replacement for getcwd when folders get removed + If error==True, this is just an alias for os.getcwd() + """ + if error: + return os.getcwd() + + try: + cwd = os.getcwd() + except OSError as exc: + # Changing back to cwd is probably not necessary + # but this makes sure there's somewhere to change to. + cwd = os.getenv('PWD') + if cwd is None: + raise OSError(( + exc.errno, 'Current directory does not exist anymore, ' + 'and nipype was not able to guess it from the environment')) + warn('Current folder does not exist, replacing with "%s" instead.' % cwd) + return cwd diff --git a/nipype/utils/nipype2boutiques.py b/nipype/utils/nipype2boutiques.py new file mode 100644 index 0000000000..21ecbc0eee --- /dev/null +++ b/nipype/utils/nipype2boutiques.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from builtins import str, open, bytes +# This tool exports a Nipype interface in the Boutiques (https://github.com/boutiques) JSON format. +# Boutiques tools can be imported in CBRAIN (https://github.com/aces/cbrain) among other platforms. +# +# Limitations: +# * List outputs are not supported. +# * Default values are not extracted from the documentation of the Nipype interface. +# * The following input types must be ignored for the output path template creation (see option -t): +# ** String restrictions, i.e. String inputs that accept only a restricted set of values. +# ** mutually exclusive inputs. +# * Path-templates are wrong when output files are not created in the execution directory (e.g. when a sub-directory is created). +# * Optional outputs, i.e. outputs that not always produced, may not be detected. + +import os +import argparse +import sys +import tempfile +import simplejson as json + +from ..scripts.instance import import_module + + +def generate_boutiques_descriptor( + module, interface_name, ignored_template_inputs, docker_image, + docker_index, verbose, ignore_template_numbers): + ''' + Returns a JSON string containing a JSON Boutiques description of a Nipype interface. + Arguments: + * module: module where the Nipype interface is declared. + * interface: Nipype interface. + * ignored_template_inputs: a list of input names that should be ignored in the generation of output path templates. + * ignore_template_numbers: True if numbers must be ignored in output path creations. + ''' + + if not module: + raise Exception("Undefined module.") + + # Retrieves Nipype interface + if isinstance(module, (str, bytes)): + import_module(module) + module_name = str(module) + module = sys.modules[module] + else: + module_name = str(module.__name__) + + interface = getattr(module, interface_name)() + inputs = interface.input_spec() + outputs = interface.output_spec() + + # Tool description + tool_desc = {} + tool_desc['name'] = interface_name + tool_desc[ + 'command-line'] = "nipype_cmd " + module_name + " " + interface_name + " " + tool_desc[ + 'description'] = interface_name + ", as implemented in Nipype (module: " + module_name + ", interface: " + interface_name + ")." + tool_desc['inputs'] = [] + tool_desc['outputs'] = [] + tool_desc['tool-version'] = interface.version + tool_desc['schema-version'] = '0.2-snapshot' + if docker_image: + tool_desc['docker-image'] = docker_image + if docker_index: + tool_desc['docker-index'] = docker_index + + # Generates tool inputs + for name, spec in sorted(interface.inputs.traits(transient=None).items()): + input = get_boutiques_input(inputs, interface, name, spec, + ignored_template_inputs, verbose, + ignore_template_numbers) + tool_desc['inputs'].append(input) + tool_desc['command-line'] += input['command-line-key'] + " " + if verbose: + print("-> Adding input " + input['name']) + + # Generates tool outputs + for name, spec in sorted(outputs.traits(transient=None).items()): + output = get_boutiques_output(name, interface, tool_desc['inputs'], + verbose) + if output['path-template'] != "": + tool_desc['outputs'].append(output) + if verbose: + print("-> Adding output " + output['name']) + elif verbose: + print("xx Skipping output " + output['name'] + + " with no path template.") + if tool_desc['outputs'] == []: + raise Exception("Tool has no output.") + + # Removes all temporary values from inputs (otherwise they will + # appear in the JSON output) + for input in tool_desc['inputs']: + del input['tempvalue'] + + return json.dumps(tool_desc, indent=4, separators=(',', ': ')) + + +def get_boutiques_input(inputs, interface, input_name, spec, + ignored_template_inputs, verbose, + ignore_template_numbers): + """ + Returns a dictionary containing the Boutiques input corresponding to a Nipype intput. + + Args: + * inputs: inputs of the Nipype interface. + * interface: Nipype interface. + * input_name: name of the Nipype input. + * spec: Nipype input spec. + * ignored_template_inputs: input names for which no temporary value must be generated. + * ignore_template_numbers: True if numbers must be ignored in output path creations. + + Assumes that: + * Input names are unique. + """ + if not spec.desc: + spec.desc = "No description provided." + spec_info = spec.full_info(inputs, input_name, None) + + input = {} + input['id'] = input_name + input['name'] = input_name.replace('_', ' ').capitalize() + input['type'] = get_type_from_spec_info(spec_info) + input['list'] = is_list(spec_info) + input['command-line-key'] = "[" + input_name.upper( + ) + "]" # assumes that input names are unique + input['command-line-flag'] = ("--%s" % input_name + " ").strip() + input['tempvalue'] = None + input['description'] = spec_info.capitalize( + ) + ". " + spec.desc.capitalize() + if not input['description'].endswith('.'): + input['description'] += '.' + if not (hasattr(spec, "mandatory") and spec.mandatory): + input['optional'] = True + else: + input['optional'] = False + if spec.usedefault: + input['default-value'] = spec.default_value()[1] + + # Create unique, temporary value. + temp_value = must_generate_value(input_name, input['type'], + ignored_template_inputs, spec_info, spec, + ignore_template_numbers) + if temp_value: + tempvalue = get_unique_value(input['type'], input_name) + setattr(interface.inputs, input_name, tempvalue) + input['tempvalue'] = tempvalue + if verbose: + print("oo Path-template creation using " + input['id'] + "=" + + str(tempvalue)) + + # Now that temp values have been generated, set Boolean types to + # Number (there is no Boolean type in Boutiques) + if input['type'] == "Boolean": + input['type'] = "Number" + + return input + + +def get_boutiques_output(name, interface, tool_inputs, verbose=False): + """ + Returns a dictionary containing the Boutiques output corresponding to a Nipype output. + + Args: + * name: name of the Nipype output. + * interface: Nipype interface. + * tool_inputs: list of tool inputs (as produced by method get_boutiques_input). + + Assumes that: + * Output names are unique. + * Input values involved in the path template are defined. + * Output files are written in the current directory. + * There is a single output value (output lists are not supported). + """ + output = {} + output['name'] = name.replace('_', ' ').capitalize() + output['id'] = name + output['type'] = "File" + output['path-template'] = "" + output[ + 'optional'] = True # no real way to determine if an output is always produced, regardless of the input values. + + # Path template creation. + + output_value = interface._list_outputs()[name] + if output_value != "" and isinstance( + output_value, + str): # FIXME: this crashes when there are multiple output values. + # Go find from which input value it was built + for input in tool_inputs: + if not input['tempvalue']: + continue + input_value = input['tempvalue'] + if input['type'] == "File": + # Take the base name + input_value = os.path.splitext( + os.path.basename(input_value))[0] + if str(input_value) in output_value: + output_value = os.path.basename( + output_value.replace(input_value, + input['command-line-key']) + ) # FIXME: this only works if output is written in the current directory + output['path-template'] = os.path.basename(output_value) + return output + + +def get_type_from_spec_info(spec_info): + ''' + Returns an input type from the spec info. There must be a better + way to get an input type in Nipype than to parse the spec info. + ''' + if ("an existing file name" in spec_info) or ( + "input volumes" in spec_info): + return "File" + elif ("an integer" in spec_info or "a float" in spec_info): + return "Number" + elif "a boolean" in spec_info: + return "Boolean" + return "String" + + +def is_list(spec_info): + ''' + Returns True if the spec info looks like it describes a list + parameter. There must be a better way in Nipype to check if an input + is a list. + ''' + if "a list" in spec_info: + return True + return False + + +def get_unique_value(type, id): + ''' + Returns a unique value of type 'type', for input with id 'id', + assuming id is unique. + ''' + return { + "File": os.path.abspath(create_tempfile()), + "Boolean": True, + "Number": abs(hash(id)), # abs in case input param must be positive... + "String": id + }[type] + + +def create_tempfile(): + ''' + Creates a temp file and returns its name. + ''' + fileTemp = tempfile.NamedTemporaryFile(delete=False) + fileTemp.write(b"hello") + fileTemp.close() + return fileTemp.name + + +def must_generate_value(name, type, ignored_template_inputs, spec_info, spec, + ignore_template_numbers): + ''' + Return True if a temporary value must be generated for this input. + Arguments: + * name: input name. + * type: input_type. + * ignored_template_inputs: a list of inputs names for which no value must be generated. + * spec_info: spec info of the Nipype input + * ignore_template_numbers: True if numbers must be ignored. + ''' + # Return false when type is number and numbers must be ignored. + if ignore_template_numbers and type == "Number": + return False + # Only generate value for the first element of mutually exclusive inputs. + if spec.xor and spec.xor[0] != name: + return False + # Directory types are not supported + if "an existing directory name" in spec_info: + return False + # Don't know how to generate a list. + if "a list" in spec_info or "a tuple" in spec_info: + return False + # Don't know how to generate a dictionary. + if "a dictionary" in spec_info: + return False + # Best guess to detect string restrictions... + if "' or '" in spec_info: + return False + if spec.default or spec.default_value(): + return False + if not ignored_template_inputs: + return True + return not (name in ignored_template_inputs) diff --git a/nipype/utils/nipype_cmd.py b/nipype/utils/nipype_cmd.py new file mode 100644 index 0000000000..b31795aa92 --- /dev/null +++ b/nipype/utils/nipype_cmd.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str +import os +import argparse +import inspect +import sys + +from ..interfaces.base import Interface, InputMultiPath, traits +from .misc import str2bool + + +def listClasses(module=None): + if module: + __import__(module) + pkg = sys.modules[module] + print("Available Interfaces:") + for k, v in sorted(list(pkg.__dict__.items())): + if inspect.isclass(v) and issubclass(v, Interface): + print("\t%s" % k) + + +def add_options(parser=None, module=None, function=None): + interface = None + if parser and module and function: + __import__(module) + interface = getattr(sys.modules[module], function)() + + inputs = interface.input_spec() + for name, spec in sorted( + interface.inputs.traits(transient=None).items()): + desc = "\n".join(interface._get_trait_desc(inputs, name, + spec))[len(name) + 2:] + args = {} + + if spec.is_trait_type(traits.Bool): + args["action"] = 'store_true' + + if hasattr(spec, "mandatory") and spec.mandatory: + if spec.is_trait_type(InputMultiPath): + args["nargs"] = "+" + parser.add_argument(name, help=desc, **args) + else: + if spec.is_trait_type(InputMultiPath): + args["nargs"] = "*" + parser.add_argument( + "--%s" % name, dest=name, help=desc, **args) + return parser, interface + + +def run_instance(interface, options): + print("setting function inputs") + + for input_name, _ in list(interface.inputs.items()): + if getattr(options, input_name) is not None: + value = getattr(options, input_name) + try: + setattr(interface.inputs, input_name, value) + except ValueError as e: + print("Error when setting the value of %s: '%s'" % (input_name, + str(e))) + + print(interface.inputs) + res = interface.run() + print(res.outputs) + + +def main(argv): + + if len(argv) == 2 and not argv[1].startswith("-"): + listClasses(argv[1]) + sys.exit(0) + + parser = argparse.ArgumentParser( + description='Nipype interface runner', prog=argv[0]) + parser.add_argument("module", type=str, help="Module name") + parser.add_argument("interface", type=str, help="Interface name") + parsed = parser.parse_args(args=argv[1:3]) + + _, prog = os.path.split(argv[0]) + interface_parser = argparse.ArgumentParser( + description="Run %s" % parsed.interface, + prog=" ".join([prog] + argv[1:3])) + interface_parser, interface = add_options(interface_parser, parsed.module, + parsed.interface) + args = interface_parser.parse_args(args=argv[3:]) + run_instance(interface, args) diff --git a/nipype/utils/onetime.py b/nipype/utils/onetime.py new file mode 100644 index 0000000000..6983bc5c0f --- /dev/null +++ b/nipype/utils/onetime.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Descriptor support for NIPY. + +Utilities to support special Python descriptors [1,2], in particular the use of +a useful pattern for properties we call 'one time properties'. These are +object attributes which are declared as properties, but become regular +attributes once they've been read the first time. They can thus be evaluated +later in the object's life cycle, but once evaluated they become normal, static +attributes with no function call overhead on access or any other constraints. + +References +---------- +[1] How-To Guide for Descriptors, Raymond +Hettinger. http://users.rcn.com/python/download/Descriptor.htm + +[2] Python data model, http://docs.python.org/reference/datamodel.html +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from builtins import object + + +class OneTimeProperty(object): + """A descriptor to make special properties that become normal attributes. + """ + + def __init__(self, func): + """Create a OneTimeProperty instance. + + Parameters + ---------- + func : method + + The method that will be called the first time to compute a value. + Afterwards, the method's name will be a standard attribute holding + the value of this computation. + """ + self.getter = func + self.name = func.__name__ + + def __get__(self, obj, type=None): + """ Called on attribute access on the class or instance. """ + if obj is None: + # Being called on the class, return the original function. + # This way, introspection works on the class. + return self.getter + + val = self.getter(obj) + # print "** setattr_on_read - loading '%s'" % self.name # dbg + setattr(obj, self.name, val) + return val + + +def setattr_on_read(func): + # XXX - beetter names for this? + # - cor_property (copy on read property) + # - sor_property (set on read property) + # - prop2attr_on_read + # ... ? + """Decorator to create OneTimeProperty attributes. + + Parameters + ---------- + func : method + The method that will be called the first time to compute a value. + Afterwards, the method's name will be a standard attribute holding the + value of this computation. + + Examples + -------- + >>> class MagicProp(object): + ... @setattr_on_read + ... def a(self): + ... return 99 + ... + >>> x = MagicProp() + >>> 'a' in x.__dict__ + False + >>> x.a + 99 + >>> 'a' in x.__dict__ + True + """ + return OneTimeProperty(func) diff --git a/nipype/utils/profiler.py b/nipype/utils/profiler.py new file mode 100644 index 0000000000..d8ec32ffe3 --- /dev/null +++ b/nipype/utils/profiler.py @@ -0,0 +1,382 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Utilities to keep track of performance +""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import threading +from time import time +try: + import psutil +except ImportError as exc: + psutil = None + +from builtins import open, range +from .. import config, logging + +proflogger = logging.getLogger('nipype.utils') +resource_monitor = config.resource_monitor + +# Init variables +_MB = 1024.0**2 + + +class ResourceMonitor(threading.Thread): + """ + A ``Thread`` to monitor a specific PID with a certain frequence + to a file + """ + + def __init__(self, pid, freq=5, fname=None, python=True): + # Make sure psutil is imported + import psutil + + if freq < 0.2: + raise RuntimeError( + 'Frequency (%0.2fs) cannot be lower than 0.2s' % freq) + + if fname is None: + fname = '.proc-%d_time-%s_freq-%0.2f' % (pid, time(), freq) + self._fname = fname + self._logfile = open(self._fname, 'w') + self._freq = freq + self._python = python + + # Leave process initialized and make first sample + self._process = psutil.Process(pid) + self._sample(cpu_interval=0.2) + + # Start thread + threading.Thread.__init__(self) + self._event = threading.Event() + + @property + def fname(self): + """Get/set the internal filename""" + return self._fname + + def stop(self): + """Stop monitoring""" + if not self._event.is_set(): + self._event.set() + self.join() + self._sample() + self._logfile.flush() + self._logfile.close() + + def _sample(self, cpu_interval=None): + cpu = 0.0 + rss = 0.0 + vms = 0.0 + try: + with self._process.oneshot(): + cpu += self._process.cpu_percent(interval=cpu_interval) + mem_info = self._process.memory_info() + rss += mem_info.rss + vms += mem_info.vms + except psutil.NoSuchProcess: + pass + + # Iterate through child processes and get number of their threads + try: + children = self._process.children(recursive=True) + except psutil.NoSuchProcess: + children = [] + + for child in children: + try: + with child.oneshot(): + cpu += child.cpu_percent() + mem_info = child.memory_info() + rss += mem_info.rss + vms += mem_info.vms + except psutil.NoSuchProcess: + pass + + print( + '%f,%f,%f,%f' % (time(), cpu, rss / _MB, vms / _MB), + file=self._logfile) + self._logfile.flush() + + def run(self): + """Core monitoring function, called by start()""" + start_time = time() + wait_til = start_time + while not self._event.is_set(): + self._sample() + wait_til += self._freq + self._event.wait(max(0, wait_til - time())) + + +# Log node stats function +def log_nodes_cb(node, status): + """Function to record node run statistics to a log file as json + dictionaries + + Parameters + ---------- + node : nipype.pipeline.engine.Node + the node being logged + status : string + acceptable values are 'start', 'end'; otherwise it is + considered and error + + Returns + ------- + None + this function does not return any values, it logs the node + status info to the callback logger + """ + + if status != 'end': + return + + # Import packages + import logging + import json + + status_dict = { + 'name': node.name, + 'id': node._id, + 'start': getattr(node.result.runtime, 'startTime'), + 'finish': getattr(node.result.runtime, 'endTime'), + 'duration': getattr(node.result.runtime, 'duration'), + 'runtime_threads': getattr(node.result.runtime, 'cpu_percent', 'N/A'), + 'runtime_memory_gb': getattr(node.result.runtime, 'mem_peak_gb', + 'N/A'), + 'estimated_memory_gb': node.mem_gb, + 'num_threads': node.n_procs, + } + + if status_dict['start'] is None or status_dict['finish'] is None: + status_dict['error'] = True + + # Dump string to log + logging.getLogger('callback').debug(json.dumps(status_dict)) + + +# Get total system RAM +def get_system_total_memory_gb(): + """ + Function to get the total RAM of the running system in GB + """ + + # Import packages + import os + import sys + + # Get memory + if 'linux' in sys.platform: + with open('/proc/meminfo', 'r') as f_in: + meminfo_lines = f_in.readlines() + mem_total_line = [ + line for line in meminfo_lines if 'MemTotal' in line + ][0] + mem_total = float(mem_total_line.split()[1]) + memory_gb = mem_total / (1024.0**2) + elif 'darwin' in sys.platform: + mem_str = os.popen('sysctl hw.memsize').read().strip().split(' ')[-1] + memory_gb = float(mem_str) / (1024.0**3) + else: + err_msg = 'System platform: %s is not supported' + raise Exception(err_msg) + + # Return memory + return memory_gb + + +# Get max resources used for process +def get_max_resources_used(pid, mem_mb, num_threads, pyfunc=False): + """ + Function to get the RAM and threads utilized by a given process + + Parameters + ---------- + pid : integer + the process ID of process to profile + mem_mb : float + the high memory watermark so far during process execution (in MB) + num_threads: int + the high thread watermark so far during process execution + + Returns + ------- + mem_mb : float + the new high memory watermark of process (MB) + num_threads : float + the new high thread watermark of process + """ + + if not resource_monitor: + raise RuntimeError('Attempted to measure resources with option ' + '"monitoring.enabled" set off.') + + try: + mem_mb = max(mem_mb, _get_ram_mb(pid, pyfunc=pyfunc)) + num_threads = max(num_threads, _get_num_threads(pid)) + except Exception as exc: + proflogger.info('Could not get resources used by process.\n%s', exc) + + return mem_mb, num_threads + + +# Get number of threads for process +def _get_num_threads(pid): + """ + Function to get the number of threads a process is using + + Parameters + ---------- + pid : integer + the process ID of process to profile + + Returns + ------- + num_threads : int + the number of threads that the process is using + + """ + + try: + proc = psutil.Process(pid) + # If process is running + if proc.status() == psutil.STATUS_RUNNING: + num_threads = proc.num_threads() + elif proc.num_threads() > 1: + tprocs = [psutil.Process(thr.id) for thr in proc.threads()] + alive_tprocs = [ + tproc for tproc in tprocs + if tproc.status() == psutil.STATUS_RUNNING + ] + num_threads = len(alive_tprocs) + else: + num_threads = 1 + + child_threads = 0 + # Iterate through child processes and get number of their threads + for child in proc.children(recursive=True): + # Leaf process + if len(child.children()) == 0: + # If process is running, get its number of threads + if child.status() == psutil.STATUS_RUNNING: + child_thr = child.num_threads() + # If its not necessarily running, but still multi-threaded + elif child.num_threads() > 1: + # Cast each thread as a process and check for only running + tprocs = [ + psutil.Process(thr.id) for thr in child.threads() + ] + alive_tprocs = [ + tproc for tproc in tprocs + if tproc.status() == psutil.STATUS_RUNNING + ] + child_thr = len(alive_tprocs) + # Otherwise, no threads are running + else: + child_thr = 0 + # Increment child threads + child_threads += child_thr + except psutil.NoSuchProcess: + return None + + # Number of threads is max between found active children and parent + num_threads = max(child_threads, num_threads) + + # Return number of threads found + return num_threads + + +# Get ram usage of process +def _get_ram_mb(pid, pyfunc=False): + """ + Function to get the RAM usage of a process and its children + Reference: http://ftp.dev411.com/t/python/python-list/095thexx8g/\ +multiprocessing-forking-memory-usage + + Parameters + ---------- + pid : integer + the PID of the process to get RAM usage of + pyfunc : boolean (optional); default=False + a flag to indicate if the process is a python function; + when Pythons are multithreaded via multiprocess or threading, + children functions include their own memory + parents. if this + is set, the parent memory will removed from children memories + + + Returns + ------- + mem_mb : float + the memory RAM in MB utilized by the process PID + + """ + try: + # Init parent + parent = psutil.Process(pid) + # Get memory of parent + parent_mem = parent.memory_info().rss + mem_mb = parent_mem / _MB + # Iterate through child processes + for child in parent.children(recursive=True): + child_mem = child.memory_info().rss + if pyfunc: + child_mem -= parent_mem + mem_mb += child_mem / _MB + except psutil.NoSuchProcess: + return None + + # Return memory + return mem_mb + + +def _use_cpu(x): + ctr = 0 + while ctr < 1e7: + ctr += 1 + x * x + + +# Spin multiple threads +def _use_resources(n_procs, mem_gb): + """ + Function to execute multiple use_gb_ram functions in parallel + """ + import os + import sys + import psutil + from multiprocessing import Pool + from nipype import logging + from nipype.utils.profiler import _use_cpu + + iflogger = logging.getLogger('nipype.interface') + + # Getsize of one character string + BSIZE = sys.getsizeof(' ') - sys.getsizeof(' ') + BOFFSET = sys.getsizeof('') + _GB = 1024.0**3 + + def _use_gb_ram(mem_gb): + """A test function to consume mem_gb GB of RAM""" + num_bytes = int(mem_gb * _GB) + # Eat mem_gb GB of memory for 1 second + gb_str = ' ' * ((num_bytes - BOFFSET) // BSIZE) + assert sys.getsizeof(gb_str) == num_bytes + return gb_str + + # Measure the amount of memory this process already holds + p = psutil.Process(os.getpid()) + mem_offset = p.memory_info().rss / _GB + big_str = _use_gb_ram(mem_gb - mem_offset) + _use_cpu(5) + mem_total = p.memory_info().rss / _GB + del big_str + iflogger.info('[%d] Memory offset %0.2fGB, total %0.2fGB', os.getpid(), + mem_offset, mem_total) + + if n_procs > 1: + pool = Pool(n_procs) + pool.map(_use_cpu, range(n_procs)) + return True diff --git a/nipype/utils/provenance.py b/nipype/utils/provenance.py new file mode 100644 index 0000000000..fb2306ee4b --- /dev/null +++ b/nipype/utils/provenance.py @@ -0,0 +1,481 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open, object, str, bytes + +# Py2 compat: http://python-future.org/compatible_idioms.html#collections-counter-and-ordereddict +from future import standard_library +standard_library.install_aliases() +from collections import OrderedDict + +from copy import deepcopy +from pickle import dumps +import os +import getpass +import platform +from uuid import uuid1 +import simplejson as json + +import numpy as np +import prov.model as pm + +from .. import get_info, logging, __version__ +from .filemanip import (md5, hashlib, hash_infile) + +logger = logging.getLogger('nipype.utils') +foaf = pm.Namespace("foaf", "http://xmlns.com/foaf/0.1/") +dcterms = pm.Namespace("dcterms", "http://purl.org/dc/terms/") +nipype_ns = pm.Namespace("nipype", "http://nipy.org/nipype/terms/") +niiri = pm.Namespace("niiri", "http://iri.nidash.org/") +crypto = pm.Namespace("crypto", ("http://id.loc.gov/vocabulary/preservation/" + "cryptographicHashFunctions/")) +get_id = lambda: niiri[uuid1().hex] + +PROV_ENVVARS = [ + 'PATH', 'FSLDIR', 'FREESURFER_HOME', 'ANTSPATH', 'CAMINOPATH', 'CLASSPATH', + 'LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH', 'FIX_VERTEX_AREA', + 'FSF_OUTPUT_FORMAT', 'FSLCONFDIR', 'FSLOUTPUTTYPE', 'LOGNAME', 'USER', + 'MKL_NUM_THREADS', 'OMP_NUM_THREADS' +] + + +def get_attr_id(attr, skip=None): + dictwithhash, hashval = get_hashval(attr, skip=skip) + return niiri[hashval] + + +max_text_len = 1024000 + + +def get_hashval(inputdict, skip=None): + """Return a dictionary of our items with hashes for each file. + + Searches through dictionary items and if an item is a file, it + calculates the md5 hash of the file contents and stores the + file name and hash value as the new key value. + + However, the overall bunch hash is calculated only on the hash + value of a file. The path and name of the file are not used in + the overall hash calculation. + + Returns + ------- + dict_withhash : dict + Copy of our dictionary with the new file hashes included + with each file. + hashvalue : str + The md5 hash value of the traited spec + + """ + + dict_withhash = {} + dict_nofilename = OrderedDict() + keys = {} + for key in inputdict: + if skip is not None and key in skip: + continue + keys[key.uri] = key + for key in sorted(keys): + val = inputdict[keys[key]] + outname = key + try: + if isinstance(val, pm.URIRef): + val = val.decode() + except AttributeError: + pass + if isinstance(val, pm.QualifiedName): + val = val.uri + if isinstance(val, pm.Literal): + val = val.value + dict_nofilename[outname] = _get_sorteddict(val) + dict_withhash[outname] = _get_sorteddict(val, True) + sorted_dict = str(sorted(dict_nofilename.items())) + return (dict_withhash, md5(sorted_dict.encode()).hexdigest()) + + +def _get_sorteddict(object, dictwithhash=False): + if isinstance(object, dict): + out = OrderedDict() + for key, val in sorted(object.items()): + if val: + out[key] = _get_sorteddict(val, dictwithhash) + elif isinstance(object, (list, tuple)): + out = [] + for val in object: + if val: + out.append(_get_sorteddict(val, dictwithhash)) + if isinstance(object, tuple): + out = tuple(out) + else: + if isinstance(object, str) and os.path.isfile(object): + hash = hash_infile(object) + if dictwithhash: + out = (object, hash) + else: + out = hash + elif isinstance(object, float): + out = '%.10f'.format(object) + else: + out = object + return out + + +def safe_encode(x, as_literal=True): + """ + Encodes a python value for prov + """ + if x is None: + value = "Unknown" + if as_literal: + return pm.Literal(value, pm.XSD['string']) + else: + return value + + if isinstance(x, (str, bytes)): + if isinstance(x, bytes): + x = str(x, 'utf-8') + if os.path.exists(x): + if x[0] != os.pathsep: + x = os.path.abspath(x) + value = 'file://{}{}'.format(platform.node().lower(), x) + if not as_literal: + return value + try: + return pm.URIRef(value) + except AttributeError: + return pm.Literal(value, pm.XSD['anyURI']) + else: + value = x + if len(x) > max_text_len: + cliptxt = '...Clipped...' + value = x[:max_text_len - len(cliptxt)] + cliptxt + + if not as_literal: + return value + + return pm.Literal(value, pm.XSD['string']) + if isinstance(x, int): + if not as_literal: + return x + return pm.Literal(int(x), pm.XSD['integer']) + if isinstance(x, float): + if not as_literal: + return x + return pm.Literal(x, pm.XSD['float']) + if isinstance(x, dict): + outdict = {} + for key, value in list(x.items()): + encoded_value = safe_encode(value, as_literal=False) + if isinstance(encoded_value, pm.Literal): + outdict[key] = encoded_value.json_representation() + else: + outdict[key] = encoded_value + + try: + jsonstr = json.dumps(outdict) + except UnicodeDecodeError as excp: + jsonstr = "Could not encode dictionary. {}".format(excp) + logger.warning('Prov: %s', jsonstr) + + if not as_literal: + return jsonstr + return pm.Literal(jsonstr, pm.XSD['string']) + if isinstance(x, (list, tuple)): + x = list(x) + is_object = False + try: + nptype = np.array(x).dtype + is_object = nptype == np.dtype(object) + except ValueError: + is_object = True + + # If the array contains an heterogeneous mixture of data types + # they should be encoded sequentially + if is_object: + outlist = [] + for value in x: + encoded_value = safe_encode(value, as_literal=False) + if isinstance(encoded_value, pm.Literal): + outlist.append(encoded_value.json_representation()) + else: + outlist.append(encoded_value) + x = outlist + + try: + jsonstr = json.dumps(x) + except UnicodeDecodeError as excp: + jsonstr = "Could not encode list/tuple. {}".format(excp) + logger.warning('Prov: %s', jsonstr) + + if not as_literal: + return jsonstr + return pm.Literal(jsonstr, pm.XSD['string']) + + # If is a literal, and as_literal do nothing. + # else bring back to json. + if isinstance(x, pm.Literal): + if as_literal: + return x + return dumps(x.json_representation()) + + jsonstr = None + ltype = pm.XSD['string'] + try: + jsonstr = json.dumps(x.__dict__) + except AttributeError: + pass + + if jsonstr is None: + try: + jsonstr = dumps(x) + ltype = nipype_ns['pickle'] + except TypeError as excp: + jsonstr = 'Could not encode object. {}'.format(excp) + + if not as_literal: + return jsonstr + return pm.Literal(jsonstr, ltype) + + +def prov_encode(graph, value, create_container=True): + if isinstance(value, (list, tuple)) and create_container: + value = list(value) + if len(value) == 0: + encoded_literal = safe_encode(value) + attr = {pm.PROV['value']: encoded_literal} + eid = get_attr_id(attr) + return graph.entity(eid, attr) + + if len(value) == 1: + return prov_encode(graph, value[0]) + + entities = [] + for item in value: + item_entity = prov_encode(graph, item) + entities.append(item_entity) + if isinstance(item, (list, tuple)): + continue + + item_entity_val = list(item_entity.value)[0] + is_str = isinstance(item_entity_val, str) + if not is_str or (is_str and 'file://' not in item_entity_val): + return prov_encode(graph, value, create_container=False) + + eid = get_id() + entity = graph.collection(identifier=eid) + for item_entity in entities: + graph.hadMember(eid, item_entity) + + return entity + else: + encoded_literal = safe_encode(value) + attr = {pm.PROV['value']: encoded_literal} + if isinstance(value, str) and os.path.exists(value): + attr.update({pm.PROV['location']: encoded_literal}) + if not os.path.isdir(value): + sha512 = hash_infile(value, crypto=hashlib.sha512) + attr.update({ + crypto['sha512']: + pm.Literal(sha512, pm.XSD['string']) + }) + eid = get_attr_id( + attr, skip=[pm.PROV['location'], pm.PROV['value']]) + else: + eid = get_attr_id(attr, skip=[pm.PROV['location']]) + else: + eid = get_attr_id(attr) + entity = graph.entity(eid, attr) + return entity + + +def write_provenance(results, filename='provenance', format='all'): + prov = None + try: + ps = ProvStore() + ps.add_results(results) + prov = ps.write_provenance(filename=filename, format=format) + except Exception as e: + import traceback + err_msg = traceback.format_exc() + if getattr(e, 'args'): + err_msg += '\n\nException arguments:\n' + ', '.join( + ['"%s"' % arg for arg in e.args]) + logger.warning('Writing provenance failed - Exception details:\n%s', + err_msg) + + return prov + + +class ProvStore(object): + def __init__(self): + self.g = pm.ProvDocument() + self.g.add_namespace(foaf) + self.g.add_namespace(dcterms) + self.g.add_namespace(nipype_ns) + self.g.add_namespace(niiri) + + def add_results(self, results, keep_provenance=False): + if keep_provenance and results.provenance: + self.g = deepcopy(results.provenance) + return self.g + runtime = results.runtime + interface = results.interface + inputs = results.inputs + outputs = results.outputs + classname = interface.__name__ + modulepath = "{0}.{1}".format(interface.__module__, interface.__name__) + activitytype = ''.join([i.capitalize() for i in modulepath.split('.')]) + + a0_attrs = { + nipype_ns['module']: interface.__module__, + nipype_ns["interface"]: classname, + pm.PROV["type"]: nipype_ns[activitytype], + pm.PROV["label"]: classname, + nipype_ns['duration']: safe_encode(runtime.duration), + nipype_ns['workingDirectory']: safe_encode(runtime.cwd), + nipype_ns['returnCode']: safe_encode(runtime.returncode), + nipype_ns['platform']: safe_encode(runtime.platform), + nipype_ns['version']: safe_encode(runtime.version), + } + a0_attrs[foaf["host"]] = pm.Literal(runtime.hostname, pm.XSD['anyURI']) + + try: + a0_attrs.update({ + nipype_ns['command']: safe_encode(runtime.cmdline) + }) + a0_attrs.update({ + nipype_ns['commandPath']: + safe_encode(runtime.command_path) + }) + a0_attrs.update({ + nipype_ns['dependencies']: + safe_encode(runtime.dependencies) + }) + except AttributeError: + pass + a0 = self.g.activity(get_id(), runtime.startTime, runtime.endTime, + a0_attrs) + # environment + id = get_id() + env_collection = self.g.collection(id) + env_collection.add_attributes({ + pm.PROV['type']: + nipype_ns['Environment'], + pm.PROV['label']: + "Environment" + }) + self.g.used(a0, id) + # write environment entities + for idx, (key, val) in enumerate(sorted(runtime.environ.items())): + if key not in PROV_ENVVARS: + continue + in_attr = { + pm.PROV["label"]: key, + nipype_ns["environmentVariable"]: key, + pm.PROV["value"]: safe_encode(val) + } + id = get_attr_id(in_attr) + self.g.entity(id, in_attr) + self.g.hadMember(env_collection, id) + # write input entities + if inputs: + id = get_id() + input_collection = self.g.collection(id) + input_collection.add_attributes({ + pm.PROV['type']: + nipype_ns['Inputs'], + pm.PROV['label']: + "Inputs" + }) + # write input entities + for idx, (key, val) in enumerate(sorted(inputs.items())): + in_entity = prov_encode(self.g, val).identifier + self.g.hadMember(input_collection, in_entity) + used_attr = {pm.PROV["label"]: key, nipype_ns["inPort"]: key} + self.g.used( + activity=a0, entity=in_entity, other_attributes=used_attr) + # write output entities + if outputs: + id = get_id() + output_collection = self.g.collection(id) + if not isinstance(outputs, dict): + outputs = outputs.get_traitsfree() + output_collection.add_attributes({ + pm.PROV['type']: + nipype_ns['Outputs'], + pm.PROV['label']: + "Outputs" + }) + self.g.wasGeneratedBy(output_collection, a0) + # write output entities + for idx, (key, val) in enumerate(sorted(outputs.items())): + out_entity = prov_encode(self.g, val).identifier + self.g.hadMember(output_collection, out_entity) + gen_attr = {pm.PROV["label"]: key, nipype_ns["outPort"]: key} + self.g.generation( + out_entity, activity=a0, other_attributes=gen_attr) + # write runtime entities + id = get_id() + runtime_collection = self.g.collection(id) + runtime_collection.add_attributes({ + pm.PROV['type']: + nipype_ns['Runtime'], + pm.PROV['label']: + "RuntimeInfo" + }) + self.g.wasGeneratedBy(runtime_collection, a0) + for key, value in sorted(runtime.items()): + if not value: + continue + if key not in ['stdout', 'stderr', 'merged']: + continue + attr = {pm.PROV["label"]: key, nipype_ns[key]: safe_encode(value)} + id = get_id() + self.g.entity(get_id(), attr) + self.g.hadMember(runtime_collection, id) + + # create agents + user_attr = { + pm.PROV["type"]: pm.PROV["Person"], + pm.PROV["label"]: getpass.getuser(), + foaf["name"]: safe_encode(getpass.getuser()) + } + user_agent = self.g.agent(get_attr_id(user_attr), user_attr) + agent_attr = { + pm.PROV["type"]: pm.PROV["SoftwareAgent"], + pm.PROV["label"]: "Nipype", + foaf["name"]: safe_encode("Nipype"), + nipype_ns["version"]: __version__ + } + for key, value in list(get_info().items()): + agent_attr.update({nipype_ns[key]: safe_encode(value)}) + software_agent = self.g.agent(get_attr_id(agent_attr), agent_attr) + self.g.wasAssociatedWith(a0, user_agent, None, None, { + pm.PROV["hadRole"]: nipype_ns["LoggedInUser"] + }) + self.g.wasAssociatedWith(a0, software_agent) + return self.g + + def write_provenance(self, filename='provenance', format='all'): + if format in ['provn', 'all']: + with open(filename + '.provn', 'wt') as fp: + fp.writelines(self.g.get_provn()) + try: + if format in ['rdf', 'all']: + if len(self.g.bundles) == 0: + rdf_format = 'turtle' + ext = '.ttl' + else: + rdf_format = 'trig' + ext = '.trig' + self.g.serialize( + filename + ext, format='rdf', rdf_format=rdf_format) + if format in ['jsonld']: + self.g.serialize( + filename + '.jsonld', + format='rdf', + rdf_format='json-ld', + indent=4) + except pm.serializers.DoNotExist: + pass + return self.g diff --git a/nipype/utils/spm_docs.py b/nipype/utils/spm_docs.py new file mode 100644 index 0000000000..4c71c05523 --- /dev/null +++ b/nipype/utils/spm_docs.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +"""Grab documentation from spm.""" +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from future.utils import raise_from + +import os + +from ..interfaces import matlab + + +def grab_doc(task_name): + """Grab the SPM documentation for the given SPM task named `task_name` + + Parameters + ---------- + task_name : string + Task name for which we are grabbing documentation. Example + task names are ``Realign: Estimate & Reslice``, ``Normalise: + Estimate & Write``. + + See Also + -------- + spm_flat_config.m : This function can print out all the possible + task names. + + """ + + cmd = matlab.MatlabCommand(resource_monitor=False) + # We need to tell Matlab where to find our spm_get_doc.m file. + cwd = os.path.dirname(__file__) + # Build matlab command + mcmd = "addpath('%s');spm_get_doc('%s')" % (cwd, task_name) + cmd.inputs.script_lines = mcmd + # Run the command and get the documentation out of the result. + out = cmd.run() + return _strip_header(out.runtime.stdout) + + +def _strip_header(doc): + """Strip Matlab header and splash info off doc. + + Searches for the tag 'NIPYPE' in the doc and returns everyting after that. + + """ + hdr = 'NIPYPE' + # There's some weird cruft at the end of the docstring, almost looks like + # the hex for the escape character 0x1b. + cruft = '\x1b' + try: + index = doc.index(hdr) + index += len(hdr) + index += 1 + doc = doc[index:] + try: + index = doc.index(cruft) + except ValueError: + index = len(doc) + return doc[:index] + except KeyError as e: + raise_from(IOError('This docstring was not generated by Nipype!\n'), e) diff --git a/nipype/utils/spm_flat_config.m b/nipype/utils/spm_flat_config.m new file mode 100644 index 0000000000..6e489251b2 --- /dev/null +++ b/nipype/utils/spm_flat_config.m @@ -0,0 +1,39 @@ +function cfgstruct = spm_flat_config(print_names) +% Get a flat spm_config structure, with option to print out names +% +% This calls spm_config() to get the the nested configuration +% structure from spm. We use this to fetch documentation, the +% flattened structure is much easier to search through. If +% print_names is true (value of 1) it will print out the configuration +% names. If print_names is false (value of 0), it will only return +% the flattened structure. +if strcmp(spm('ver'),'SPM5') + cfg = spm_config(); +else + cfgstruct = []; + return; +end +cfgstruct = spm_cfg_list(cfg, {}); +if print_names + [rows, cols] = size(cfgstruct); + for i = 1:cols + fprintf(1, '%d : %s\n', i, cfgstruct{i}.name) + end +end +end + + +function objlist = spm_cfg_list(astruct, objlist) +% Flatten the nested structure in 'astruct'. +% Returns a cell array. +% Usage: objlist = spm_cfg_list(astruct, {}) + +if isfield(astruct, 'values') + [rows, cols] = size(astruct.values); + for i = 1:cols + objlist = spm_cfg_list(astruct.values{i}, objlist); + end +else + objlist = {objlist{:} astruct}; +end +end diff --git a/nipype/utils/spm_get_doc.m b/nipype/utils/spm_get_doc.m new file mode 100644 index 0000000000..3fbff782be --- /dev/null +++ b/nipype/utils/spm_get_doc.m @@ -0,0 +1,21 @@ +function doc = spm_get_doc(docname) +% Get the documentation from SPM for the functionality named +% docname. +% +% This will search through the spm_config() object and grab the +% documentation whose name matches docname. +cfgstruct = spm_flat_config(0); +[rows, cols] = size(cfgstruct); +docstruct.help={'None'}; +% Loop over cell array and search for the docname +for i = 1:cols + if strcmp(cfgstruct{i}.name, docname) + docstruct = cfgstruct{i}; + break + end +end +% Add a tag so we can strip off the Matlab header information and +% only print out the SPM documentation. +tag = 'NIPYPE\n'; +doc = strcat(tag, docstruct.help{:}); +end diff --git a/nipype/utils/tests/__init__.py b/nipype/utils/tests/__init__.py new file mode 100644 index 0000000000..939910d6b6 --- /dev/null +++ b/nipype/utils/tests/__init__.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Please write tests for all code submitted to the repository. The code will be +used by many people, and will in due course be used in live analyses, so we +need to make sure that we have the best possible defenses against bugs. It also +helps us think about code interfaces, and gives examples of code use that can +be useful for others using the code. + +Python's unit testing framework (the +U{unittest} module) is used to +implement project tests. We use the convention that each package contains a +subpackage called tests which contains modules defining test cases (subclasses +of U{unittest.TestCase}) +for that package. The nipy.utils.tests package contains an example test case +called L{test_template.TemplateTest} to get you started writing your tests. +Please try to include working test cases for all functions and classes that +you contribute. Often, writing tests for your code before the code is written +helps to frame your thoughts about what the code should look like. +""" diff --git a/nipype/utils/tests/test_cmd.py b/nipype/utils/tests/test_cmd.py new file mode 100644 index 0000000000..0e16e0aad8 --- /dev/null +++ b/nipype/utils/tests/test_cmd.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from future import standard_library +standard_library.install_aliases() + +import pytest +import sys +from contextlib import contextmanager + +from io import StringIO +from ...utils import nipype_cmd + +PY2 = sys.version_info[0] < 3 + + +@contextmanager +def capture_sys_output(): + caputure_out, capture_err = StringIO(), StringIO() + current_out, current_err = sys.stdout, sys.stderr + try: + sys.stdout, sys.stderr = caputure_out, capture_err + yield caputure_out, capture_err + finally: + sys.stdout, sys.stderr = current_out, current_err + + +class TestNipypeCMD(): + maxDiff = None + + def test_main_returns_2_on_empty(self): + with pytest.raises(SystemExit) as cm: + with capture_sys_output() as (stdout, stderr): + nipype_cmd.main(['nipype_cmd']) + + exit_exception = cm.value + assert exit_exception.code == 2 + + msg = """usage: nipype_cmd [-h] module interface +nipype_cmd: error: the following arguments are required: module, interface +""" + + if PY2: + msg = """usage: nipype_cmd [-h] module interface +nipype_cmd: error: too few arguments +""" + assert stderr.getvalue() == msg + assert stdout.getvalue() == '' + + def test_main_returns_0_on_help(self): + with pytest.raises(SystemExit) as cm: + with capture_sys_output() as (stdout, stderr): + nipype_cmd.main(['nipype_cmd', '-h']) + + exit_exception = cm.value + assert exit_exception.code == 0 + + assert stderr.getvalue() == '' + assert stdout.getvalue() == \ + """usage: nipype_cmd [-h] module interface + +Nipype interface runner + +positional arguments: + module Module name + interface Interface name + +optional arguments: + -h, --help show this help message and exit +""" + + def test_list_nipy_interfacesp(self): + with pytest.raises(SystemExit) as cm: + with capture_sys_output() as (stdout, stderr): + nipype_cmd.main(['nipype_cmd', 'nipype.interfaces.nipy']) + + # repeat twice in case nipy raises warnings + with pytest.raises(SystemExit) as cm: + with capture_sys_output() as (stdout, stderr): + nipype_cmd.main(['nipype_cmd', 'nipype.interfaces.nipy']) + exit_exception = cm.value + assert exit_exception.code == 0 + + assert stderr.getvalue() == '' + assert stdout.getvalue() == \ + """Available Interfaces: +\tComputeMask +\tEstimateContrast +\tFitGLM +\tSimilarity +\tSpaceTimeRealigner +""" diff --git a/nipype/utils/tests/test_config.py b/nipype/utils/tests/test_config.py new file mode 100644 index 0000000000..a3c0480b29 --- /dev/null +++ b/nipype/utils/tests/test_config.py @@ -0,0 +1,292 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os +import sys +import pytest +from nipype import config +from mock import MagicMock +from builtins import object + +try: + import xvfbwrapper + has_Xvfb = True +except ImportError: + has_Xvfb = False + +# Define mocks for xvfbwrapper. Do not forget the spec to ensure that +# hasattr() checks return False with missing attributes. +xvfbpatch = MagicMock(spec=['Xvfb']) +xvfbpatch.Xvfb.return_value = MagicMock( + spec=['new_display', 'start', 'stop'], new_display=2010) + +# Mock the legacy xvfbwrapper.Xvfb class (changed display attribute name) +xvfbpatch_old = MagicMock(spec=['Xvfb']) +xvfbpatch_old.Xvfb.return_value = MagicMock( + spec=['vdisplay_num', 'start', 'stop'], vdisplay_num=2010) + + +@pytest.mark.parametrize('dispvar', [':12', 'localhost:12', 'localhost:12.1']) +def test_display_parse(monkeypatch, dispvar): + """Check that when $DISPLAY is defined, the display is correctly parsed""" + config._display = None + config._config.remove_option('execution', 'display_variable') + monkeypatch.setenv('DISPLAY', dispvar) + assert config.get_display() == ':12' + # Test that it was correctly cached + assert config.get_display() == ':12' + + +@pytest.mark.parametrize('dispnum', range(5)) +def test_display_config(monkeypatch, dispnum): + """Check that the display_variable option is used ($DISPLAY not set)""" + config._display = None + dispstr = ':%d' % dispnum + config.set('execution', 'display_variable', dispstr) + monkeypatch.delitem(os.environ, 'DISPLAY', raising=False) + assert config.get_display() == config.get('execution', 'display_variable') + # Test that it was correctly cached + assert config.get_display() == config.get('execution', 'display_variable') + + +@pytest.mark.parametrize('dispnum', range(5)) +def test_display_system(monkeypatch, dispnum): + """Check that when only a $DISPLAY is defined, it is used""" + config._display = None + config._config.remove_option('execution', 'display_variable') + dispstr = ':%d' % dispnum + monkeypatch.setenv('DISPLAY', dispstr) + assert config.get_display() == dispstr + # Test that it was correctly cached + assert config.get_display() == dispstr + + +def test_display_config_and_system(monkeypatch): + """Check that when only both config and $DISPLAY are defined, the config + takes precedence""" + config._display = None + dispstr = ':10' + config.set('execution', 'display_variable', dispstr) + monkeypatch.setenv('DISPLAY', ':0') + assert config.get_display() == dispstr + # Test that it was correctly cached + assert config.get_display() == dispstr + + +def test_display_noconfig_nosystem_patched(monkeypatch): + """Check that when no $DISPLAY nor option are specified, a virtual Xvfb is + used""" + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.delitem(os.environ, 'DISPLAY', raising=False) + monkeypatch.setitem(sys.modules, 'xvfbwrapper', xvfbpatch) + monkeypatch.setattr(sys, 'platform', value='linux') + assert config.get_display() == ":2010" + # Test that it was correctly cached + assert config.get_display() == ':2010' + + # Check that raises in Mac + config._display = None + monkeypatch.setattr(sys, 'platform', value='darwin') + with pytest.raises(RuntimeError): + config.get_display() + + +def test_display_empty_patched(monkeypatch): + """ + Check that when $DISPLAY is empty string and no option is specified, + a virtual Xvfb is used + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.setenv('DISPLAY', '') + monkeypatch.setitem(sys.modules, 'xvfbwrapper', xvfbpatch) + monkeypatch.setattr(sys, 'platform', value='linux') + assert config.get_display() == ':2010' + # Test that it was correctly cached + assert config.get_display() == ':2010' + + # Check that raises in Mac + config._display = None + monkeypatch.setattr(sys, 'platform', value='darwin') + with pytest.raises(RuntimeError): + config.get_display() + +def test_display_noconfig_nosystem_patched_oldxvfbwrapper(monkeypatch): + """ + Check that when no $DISPLAY nor option are specified, + a virtual Xvfb is used (with a legacy version of xvfbwrapper). + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.delitem(os.environ, 'DISPLAY', raising=False) + monkeypatch.setitem(sys.modules, 'xvfbwrapper', xvfbpatch_old) + monkeypatch.setattr(sys, 'platform', value='linux') + assert config.get_display() == ":2010" + # Test that it was correctly cached + assert config.get_display() == ':2010' + + # Check that raises in Mac + config._display = None + monkeypatch.setattr(sys, 'platform', value='darwin') + with pytest.raises(RuntimeError): + config.get_display() + +def test_display_empty_patched_oldxvfbwrapper(monkeypatch): + """ + Check that when $DISPLAY is empty string and no option is specified, + a virtual Xvfb is used (with a legacy version of xvfbwrapper). + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.setenv('DISPLAY', '') + monkeypatch.setitem(sys.modules, 'xvfbwrapper', xvfbpatch_old) + monkeypatch.setattr(sys, 'platform', value='linux') + assert config.get_display() == ':2010' + # Test that it was correctly cached + assert config.get_display() == ':2010' + + # Check that raises in Mac + config._display = None + monkeypatch.setattr(sys, 'platform', value='darwin') + with pytest.raises(RuntimeError): + config.get_display() + +def test_display_noconfig_nosystem_notinstalled(monkeypatch): + """ + Check that an exception is raised if xvfbwrapper is not installed + but necessary (no config and $DISPLAY unset) + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.delenv('DISPLAY', raising=False) + monkeypatch.setitem(sys.modules, 'xvfbwrapper', None) + with pytest.raises(RuntimeError): + config.get_display() + + +def test_display_empty_notinstalled(monkeypatch): + """ + Check that an exception is raised if xvfbwrapper is not installed + but necessary (no config and $DISPLAY empty) + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.setenv('DISPLAY', '') + monkeypatch.setitem(sys.modules, 'xvfbwrapper', None) + with pytest.raises(RuntimeError): + config.get_display() + + +@pytest.mark.skipif(not has_Xvfb, reason='xvfbwrapper not installed') +@pytest.mark.skipif('darwin' in sys.platform, reason='macosx requires root for Xvfb') +def test_display_noconfig_nosystem_installed(monkeypatch): + """ + Check that actually uses xvfbwrapper when installed (not mocked) + and necessary (no config and $DISPLAY unset) + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.delenv('DISPLAY', raising=False) + newdisp = config.get_display() + assert int(newdisp.split(':')[-1]) > 1000 + # Test that it was correctly cached + assert config.get_display() == newdisp + + +@pytest.mark.skipif(not has_Xvfb, reason='xvfbwrapper not installed') +@pytest.mark.skipif('darwin' in sys.platform, reason='macosx requires root for Xvfb') +def test_display_empty_installed(monkeypatch): + """ + Check that actually uses xvfbwrapper when installed (not mocked) + and necessary (no config and $DISPLAY empty) + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.setenv('DISPLAY', '') + newdisp = config.get_display() + assert int(newdisp.split(':')[-1]) > 1000 + # Test that it was correctly cached + assert config.get_display() == newdisp + + +def test_display_empty_macosx(monkeypatch): + """ + Check that an exception is raised if xvfbwrapper is necessary + (no config and $DISPLAY unset) but platform is OSX. See + https://github.com/nipy/nipype/issues/1400 + """ + config._display = None + if config.has_option('execution', 'display_variable'): + config._config.remove_option('execution', 'display_variable') + monkeypatch.delenv('DISPLAY', '') + + monkeypatch.setattr(sys, 'platform', 'darwin') + with pytest.raises(RuntimeError): + config.get_display() + + +def test_cwd_cached(tmpdir): + """Check that changing dirs does not change nipype's cwd""" + oldcwd = config.cwd + tmpdir.chdir() + assert config.cwd == oldcwd + + +def test_debug_mode(): + from ... import logging + + sofc_config = config.get('execution', 'stop_on_first_crash') + ruo_config = config.get('execution', 'remove_unnecessary_outputs') + ki_config = config.get('execution', 'keep_inputs') + wf_config = config.get('logging', 'workflow_level') + if_config = config.get('logging', 'interface_level') + ut_config = config.get('logging', 'utils_level') + + wf_level = logging.getLogger('nipype.workflow').level + if_level = logging.getLogger('nipype.interface').level + ut_level = logging.getLogger('nipype.utils').level + + config.enable_debug_mode() + + # Check config is updated and logging levels, too + assert config.get('execution', 'stop_on_first_crash') == 'true' + assert config.get('execution', 'remove_unnecessary_outputs') == 'false' + assert config.get('execution', 'keep_inputs') == 'true' + assert config.get('logging', 'workflow_level') == 'DEBUG' + assert config.get('logging', 'interface_level') == 'DEBUG' + assert config.get('logging', 'utils_level') == 'DEBUG' + + assert logging.getLogger('nipype.workflow').level == 10 + assert logging.getLogger('nipype.interface').level == 10 + assert logging.getLogger('nipype.utils').level == 10 + + # Restore config and levels + config.set('execution', 'stop_on_first_crash', sofc_config) + config.set('execution', 'remove_unnecessary_outputs', ruo_config) + config.set('execution', 'keep_inputs', ki_config) + config.set('logging', 'workflow_level', wf_config) + config.set('logging', 'interface_level', if_config) + config.set('logging', 'utils_level', ut_config) + logging.update_logging(config) + + assert config.get('execution', 'stop_on_first_crash') == sofc_config + assert config.get('execution', 'remove_unnecessary_outputs') == ruo_config + assert config.get('execution', 'keep_inputs') == ki_config + assert config.get('logging', 'workflow_level') == wf_config + assert config.get('logging', 'interface_level') == if_config + assert config.get('logging', 'utils_level') == ut_config + + assert logging.getLogger('nipype.workflow').level == wf_level + assert logging.getLogger('nipype.interface').level == if_level + assert logging.getLogger('nipype.utils').level == ut_level diff --git a/nipype/utils/tests/test_docparse.py b/nipype/utils/tests/test_docparse.py new file mode 100644 index 0000000000..c8aabed768 --- /dev/null +++ b/nipype/utils/tests/test_docparse.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from nipype.utils.docparse import reverse_opt_map, build_doc, insert_doc + +foo_opt_map = {'outline': '-o', 'fun': '-f %.2f', 'flags': '%s'} + +foo_doc = """Usage: foo infile outfile [opts] + +Bunch of options: + + -o something about an outline + -f intensity of fun factor + +Other stuff: + -v verbose + +""" + +fmtd_doc = """Parameters +---------- +outline : + something about an outline +fun : + intensity of fun factor + +Others Parameters +----------------- + -v verbose""" + + +def test_rev_opt_map(): + map = {'-f': 'fun', '-o': 'outline'} + rev_map = reverse_opt_map(foo_opt_map) + assert rev_map == map + + +def test_build_doc(): + opts = reverse_opt_map(foo_opt_map) + doc = build_doc(foo_doc, opts) + assert doc == fmtd_doc + + +inserted_doc = """Parameters +---------- +infile : str + The name of the input file +outfile : str + The name of the output file +outline : + something about an outline +fun : + intensity of fun factor + +Others Parameters +----------------- + -v verbose""" + + +def test_insert_doc(): + new_items = ['infile : str', ' The name of the input file'] + new_items.extend(['outfile : str', ' The name of the output file']) + newdoc = insert_doc(fmtd_doc, new_items) + assert newdoc == inserted_doc diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py new file mode 100644 index 0000000000..ae5316c7d7 --- /dev/null +++ b/nipype/utils/tests/test_filemanip.py @@ -0,0 +1,572 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import unicode_literals +from builtins import open + +import os +import time +import warnings + +import mock +import pytest +from ...testing import TempFATFS +from ...utils.filemanip import ( + save_json, load_json, fname_presuffix, fnames_presuffix, hash_rename, + check_forhash, _parse_mount_table, _cifs_table, on_cifs, copyfile, + copyfiles, ensure_list, simplify_list, check_depends, + split_filename, get_related_files, indirectory, + loadpkl, loadcrash, savepkl) + + +def _ignore_atime(stat): + return stat[:7] + stat[8:] + + +@pytest.mark.parametrize( + "filename, split", + [('foo.nii', ('', 'foo', '.nii')), ('foo.nii.gz', ('', 'foo', '.nii.gz')), + ('foo.niml.dset', ('', 'foo', '.niml.dset')), + ('/usr/local/foo.nii.gz', + ('/usr/local', 'foo', '.nii.gz')), ('../usr/local/foo.nii', + ('../usr/local', 'foo', '.nii')), + ('/usr/local/foo.a.b.c.d', + ('/usr/local', 'foo.a.b.c', '.d')), ('/usr/local/', + ('/usr/local', '', ''))]) +def test_split_filename(filename, split): + res = split_filename(filename) + assert res == split + + +def test_fname_presuffix(): + fname = 'foo.nii' + pth = fname_presuffix(fname, 'pre_', '_post', '/tmp') + assert pth == '/tmp/pre_foo_post.nii' + fname += '.gz' + pth = fname_presuffix(fname, 'pre_', '_post', '/tmp') + assert pth == '/tmp/pre_foo_post.nii.gz' + pth = fname_presuffix(fname, 'pre_', '_post', '/tmp', use_ext=False) + assert pth == '/tmp/pre_foo_post' + + +def test_fnames_presuffix(): + fnames = ['foo.nii', 'bar.nii'] + pths = fnames_presuffix(fnames, 'pre_', '_post', '/tmp') + assert pths == ['/tmp/pre_foo_post.nii', '/tmp/pre_bar_post.nii'] + + +@pytest.mark.parametrize("filename, newname", + [('foobar.nii', 'foobar_0xabc123.nii'), + ('foobar.nii.gz', 'foobar_0xabc123.nii.gz')]) +def test_hash_rename(filename, newname): + new_name = hash_rename(filename, 'abc123') + assert new_name == newname + + +def test_check_forhash(): + fname = 'foobar' + orig_hash = '_0x4323dbcefdc51906decd8edcb3327943' + hashed_name = ''.join((fname, orig_hash, '.nii')) + result, hash = check_forhash(hashed_name) + assert result + assert hash == [orig_hash] + result, hash = check_forhash('foobar.nii') + assert not result + assert hash is None + + +@pytest.fixture() +def _temp_analyze_files(tmpdir): + """Generate temporary analyze file pair.""" + orig_img = tmpdir.join("orig.img") + orig_hdr = tmpdir.join("orig.hdr") + orig_img.open('w+').close() + orig_hdr.open('w+').close() + return str(orig_img), str(orig_hdr) + + +@pytest.fixture() +def _temp_analyze_files_prime(tmpdir): + """Generate temporary analyze file pair.""" + orig_img = tmpdir.join("orig_prime.img") + orig_hdr = tmpdir.join("orig_prime.hdr") + orig_img.open('w+').close() + orig_hdr.open('w+').close() + return orig_img.strpath, orig_hdr.strpath + + +def test_copyfile(_temp_analyze_files): + orig_img, orig_hdr = _temp_analyze_files + pth, fname = os.path.split(orig_img) + new_img = os.path.join(pth, 'newfile.img') + new_hdr = os.path.join(pth, 'newfile.hdr') + copyfile(orig_img, new_img) + assert os.path.exists(new_img) + assert os.path.exists(new_hdr) + + +def test_copyfile_true(_temp_analyze_files): + orig_img, orig_hdr = _temp_analyze_files + pth, fname = os.path.split(orig_img) + new_img = os.path.join(pth, 'newfile.img') + new_hdr = os.path.join(pth, 'newfile.hdr') + # Test with copy=True + copyfile(orig_img, new_img, copy=True) + assert os.path.exists(new_img) + assert os.path.exists(new_hdr) + + +def test_copyfiles(_temp_analyze_files, _temp_analyze_files_prime): + orig_img1, orig_hdr1 = _temp_analyze_files + orig_img2, orig_hdr2 = _temp_analyze_files_prime + pth, fname = os.path.split(orig_img1) + new_img1 = os.path.join(pth, 'newfile.img') + new_hdr1 = os.path.join(pth, 'newfile.hdr') + pth, fname = os.path.split(orig_img2) + new_img2 = os.path.join(pth, 'secondfile.img') + new_hdr2 = os.path.join(pth, 'secondfile.hdr') + copyfiles([orig_img1, orig_img2], [new_img1, new_img2]) + assert os.path.exists(new_img1) + assert os.path.exists(new_hdr1) + assert os.path.exists(new_img2) + assert os.path.exists(new_hdr2) + + +def test_linkchain(_temp_analyze_files): + if os.name is not 'posix': + return + orig_img, orig_hdr = _temp_analyze_files + pth, fname = os.path.split(orig_img) + new_img1 = os.path.join(pth, 'newfile1.img') + new_hdr1 = os.path.join(pth, 'newfile1.hdr') + new_img2 = os.path.join(pth, 'newfile2.img') + new_hdr2 = os.path.join(pth, 'newfile2.hdr') + new_img3 = os.path.join(pth, 'newfile3.img') + new_hdr3 = os.path.join(pth, 'newfile3.hdr') + copyfile(orig_img, new_img1) + assert os.path.islink(new_img1) + assert os.path.islink(new_hdr1) + copyfile(new_img1, new_img2, copy=True) + assert not os.path.islink(new_img2) + assert not os.path.islink(new_hdr2) + assert not os.path.samefile(orig_img, new_img2) + assert not os.path.samefile(orig_hdr, new_hdr2) + copyfile(new_img1, new_img3, copy=True, use_hardlink=True) + assert not os.path.islink(new_img3) + assert not os.path.islink(new_hdr3) + assert os.path.samefile(orig_img, new_img3) + assert os.path.samefile(orig_hdr, new_hdr3) + + +def test_recopy(_temp_analyze_files): + # Re-copying with the same parameters on an unchanged file should be + # idempotent + # + # Test for copying from regular files and symlinks + orig_img, orig_hdr = _temp_analyze_files + pth, fname = os.path.split(orig_img) + img_link = os.path.join(pth, 'imglink.img') + new_img = os.path.join(pth, 'newfile.img') + new_hdr = os.path.join(pth, 'newfile.hdr') + copyfile(orig_img, img_link) + for copy in (True, False): + for use_hardlink in (True, False): + for hashmethod in ('timestamp', 'content'): + kwargs = { + 'copy': copy, + 'use_hardlink': use_hardlink, + 'hashmethod': hashmethod + } + # Copying does not preserve the original file's timestamp, so + # we may delete and re-copy, if the test is slower than a clock + # tick + if copy and not use_hardlink and hashmethod == 'timestamp': + continue + + copyfile(orig_img, new_img, **kwargs) + img_stat = _ignore_atime(os.stat(new_img)) + hdr_stat = _ignore_atime(os.stat(new_hdr)) + copyfile(orig_img, new_img, **kwargs) + err_msg = "Regular - OS: {}; Copy: {}; Hardlink: {}".format( + os.name, copy, use_hardlink) + assert img_stat == _ignore_atime(os.stat(new_img)), err_msg + assert hdr_stat == _ignore_atime(os.stat(new_hdr)), err_msg + os.unlink(new_img) + os.unlink(new_hdr) + + copyfile(img_link, new_img, **kwargs) + img_stat = _ignore_atime(os.stat(new_img)) + hdr_stat = _ignore_atime(os.stat(new_hdr)) + copyfile(img_link, new_img, **kwargs) + err_msg = "Symlink - OS: {}; Copy: {}; Hardlink: {}".format( + os.name, copy, use_hardlink) + assert img_stat == _ignore_atime(os.stat(new_img)), err_msg + assert hdr_stat == _ignore_atime(os.stat(new_hdr)), err_msg + os.unlink(new_img) + os.unlink(new_hdr) + + +def test_copyfallback(_temp_analyze_files): + if os.name is not 'posix': + return + orig_img, orig_hdr = _temp_analyze_files + pth, imgname = os.path.split(orig_img) + pth, hdrname = os.path.split(orig_hdr) + try: + fatfs = TempFATFS() + except (IOError, OSError): + warnings.warn('Fuse mount failed. copyfile fallback tests skipped.') + else: + with fatfs as fatdir: + tgt_img = os.path.join(fatdir, imgname) + tgt_hdr = os.path.join(fatdir, hdrname) + for copy in (True, False): + for use_hardlink in (True, False): + copyfile( + orig_img, + tgt_img, + copy=copy, + use_hardlink=use_hardlink) + assert os.path.exists(tgt_img) + assert os.path.exists(tgt_hdr) + assert not os.path.islink(tgt_img) + assert not os.path.islink(tgt_hdr) + assert not os.path.samefile(orig_img, tgt_img) + assert not os.path.samefile(orig_hdr, tgt_hdr) + os.unlink(tgt_img) + os.unlink(tgt_hdr) + + +def test_get_related_files(_temp_analyze_files): + orig_img, orig_hdr = _temp_analyze_files + + related_files = get_related_files(orig_img) + assert orig_img in related_files + assert orig_hdr in related_files + + related_files = get_related_files(orig_hdr) + assert orig_img in related_files + assert orig_hdr in related_files + + +def test_get_related_files_noninclusive(_temp_analyze_files): + orig_img, orig_hdr = _temp_analyze_files + + related_files = get_related_files(orig_img, include_this_file=False) + assert orig_img not in related_files + assert orig_hdr in related_files + + related_files = get_related_files(orig_hdr, include_this_file=False) + assert orig_img in related_files + assert orig_hdr not in related_files + + +@pytest.mark.parametrize("filename, expected", + [('foo.nii', ['foo.nii']), (['foo.nii'], ['foo.nii']), + (('foo', 'bar'), ['foo', 'bar']), (12.34, None)]) +def test_ensure_list(filename, expected): + x = ensure_list(filename) + assert x == expected + + +@pytest.mark.parametrize("list, expected", [ + (['foo.nii'], 'foo.nii'), + (['foo', 'bar'], ['foo', 'bar']), +]) +def test_simplify_list(list, expected): + x = simplify_list(list) + assert x == expected + + +def test_check_depends(tmpdir): + def touch(fname): + with open(fname, 'a'): + os.utime(fname, None) + + dependencies = [tmpdir.join(str(i)).strpath for i in range(3)] + targets = [tmpdir.join(str(i)).strpath for i in range(3, 6)] + + # Targets newer than dependencies + for dep in dependencies: + touch(dep) + time.sleep(1) + for tgt in targets: + touch(tgt) + assert check_depends(targets, dependencies) + + # Targets older than newest dependency + time.sleep(1) + touch(dependencies[0]) + assert not check_depends(targets, dependencies) + + # Missing dependency + os.unlink(dependencies[0]) + try: + check_depends(targets, dependencies) + except OSError: + pass + else: + assert False, "Should raise OSError on missing dependency" + + +def test_json(tmpdir): + # Simple roundtrip test of json files, just a sanity check. + adict = dict(a='one', c='three', b='two') + name = tmpdir.join('test.json').strpath + save_json(name, adict) # save_json closes the file + new_dict = load_json(name) + os.unlink(name) + assert sorted(adict.items()) == sorted(new_dict.items()) + + +@pytest.mark.parametrize( + "file, length, expected_files", + [('/path/test.img', 3, + ['/path/test.hdr', '/path/test.img', '/path/test.mat']), + ('/path/test.hdr', 3, + ['/path/test.hdr', '/path/test.img', '/path/test.mat']), + ('/path/test.BRIK', 2, ['/path/test.BRIK', '/path/test.HEAD']), + ('/path/test.HEAD', 2, ['/path/test.BRIK', '/path/test.HEAD']), + ('/path/foo.nii', 2, ['/path/foo.nii', '/path/foo.mat'])]) +def test_related_files(file, length, expected_files): + related_files = get_related_files(file) + + assert len(related_files) == length + + for ef in expected_files: + assert ef in related_files + + +MOUNT_OUTPUTS = ( +# Linux, no CIFS +(r'''sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime) +proc on /proc type proc (rw,nosuid,nodev,noexec,relatime) +udev on /dev type devtmpfs (rw,nosuid,relatime,size=8121732k,nr_inodes=2030433,mode=755) +devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000) +tmpfs on /run type tmpfs (rw,nosuid,noexec,relatime,size=1628440k,mode=755) +/dev/nvme0n1p2 on / type ext4 (rw,relatime,errors=remount-ro,data=ordered) +securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime) +tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev) +tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,mode=755) +cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd) +pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime) +efivarfs on /sys/firmware/efi/efivars type efivarfs (rw,nosuid,nodev,noexec,relatime) +cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct) +cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer) +cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids) +cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset) +systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=26,pgrp=1,timeout=0,minproto=5,maxproto=5,direct) +hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime) +debugfs on /sys/kernel/debug type debugfs (rw,relatime) +mqueue on /dev/mqueue type mqueue (rw,relatime) +fusectl on /sys/fs/fuse/connections type fusectl (rw,relatime) +/dev/nvme0n1p1 on /boot/efi type vfat (rw,relatime,fmask=0077,dmask=0077,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro) +/dev/nvme0n1p2 on /var/lib/docker/aufs type ext4 (rw,relatime,errors=remount-ro,data=ordered) +gvfsd-fuse on /run/user/1002/gvfs type fuse.gvfsd-fuse (rw,nosuid,nodev,relatime,user_id=1002,group_id=1002) +''', 0, []), +# OS X, no CIFS +(r'''/dev/disk2 on / (hfs, local, journaled) +devfs on /dev (devfs, local, nobrowse) +map -hosts on /net (autofs, nosuid, automounted, nobrowse) +map auto_home on /home (autofs, automounted, nobrowse) +map -fstab on /Network/Servers (autofs, automounted, nobrowse) +/dev/disk3s2 on /Volumes/MyBookData (hfs, local, nodev, nosuid, journaled) +afni:/elrond0 on /Volumes/afni (nfs) +afni:/var/www/INCOMING on /Volumes/INCOMING (nfs) +afni:/fraid on /Volumes/afni (nfs, asynchronous) +boromir:/raid.bot on /Volumes/raid.bot (nfs) +elros:/volume2/AFNI_SHARE on /Volumes/AFNI_SHARE (nfs) +map -static on /Volumes/safni (autofs, automounted, nobrowse) +map -static on /Volumes/raid.top (autofs, automounted, nobrowse) +/dev/disk1s3 on /Volumes/Boot OS X (hfs, local, journaled, nobrowse) +''', 0, []), +# Non-zero exit code +('', 1, []), +# Variant of Linux example with CIFS added manually +(r'''sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime) +proc on /proc type proc (rw,nosuid,nodev,noexec,relatime) +udev on /dev type devtmpfs (rw,nosuid,relatime,size=8121732k,nr_inodes=2030433,mode=755) +devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000) +tmpfs on /run type tmpfs (rw,nosuid,noexec,relatime,size=1628440k,mode=755) +/dev/nvme0n1p2 on / type ext4 (rw,relatime,errors=remount-ro,data=ordered) +securityfs on /sys/kernel/security type securityfs (rw,nosuid,nodev,noexec,relatime) +tmpfs on /dev/shm type tmpfs (rw,nosuid,nodev) +tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,mode=755) +cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd) +pstore on /sys/fs/pstore type pstore (rw,nosuid,nodev,noexec,relatime) +efivarfs on /sys/firmware/efi/efivars type efivarfs (rw,nosuid,nodev,noexec,relatime) +cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct) +cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer) +cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids) +cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset) +systemd-1 on /proc/sys/fs/binfmt_misc type autofs (rw,relatime,fd=26,pgrp=1,timeout=0,minproto=5,maxproto=5,direct) +hugetlbfs on /dev/hugepages type hugetlbfs (rw,relatime) +debugfs on /sys/kernel/debug type debugfs (rw,relatime) +mqueue on /dev/mqueue type mqueue (rw,relatime) +fusectl on /sys/fs/fuse/connections type fusectl (rw,relatime) +/dev/nvme0n1p1 on /boot/efi type vfat (rw,relatime,fmask=0077,dmask=0077,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro) +/dev/nvme0n1p2 on /var/lib/docker/aufs type ext4 (rw,relatime,errors=remount-ro,data=ordered) +gvfsd-fuse on /run/user/1002/gvfs type fuse.gvfsd-fuse (rw,nosuid,nodev,relatime,user_id=1002,group_id=1002) +''', 0, []), +# Variant of OS X example with CIFS added manually +(r'''/dev/disk2 on / (hfs, local, journaled) +devfs on /dev (devfs, local, nobrowse) +afni:/elrond0 on /Volumes/afni (cifs) +afni:/var/www/INCOMING on /Volumes/INCOMING (nfs) +afni:/fraid on /Volumes/afni/fraid (nfs, asynchronous) +boromir:/raid.bot on /Volumes/raid.bot (nfs) +elros:/volume2/AFNI_SHARE on /Volumes/AFNI_SHARE (nfs) +''', 0, [('/Volumes/afni/fraid', 'nfs'), ('/Volumes/afni', 'cifs')]), +# From Windows: docker run --rm -it -v C:\:/data busybox mount +(r'''overlay on / type overlay (rw,relatime,lowerdir=/var/lib/docker/overlay2/l/26UTYITLF24YE7KEGTMHUNHPPG:/var/lib/docker/overlay2/l/SWGNP3T2EEB4CNBJFN3SDZLXHP,upperdir=/var/lib/docker/overlay2/a4c54ab1aa031bb5a14a424abd655510521e183ee4fa4158672e8376c89df394/diff,workdir=/var/lib/docker/overlay2/a4c54ab1aa031bb5a14a424abd655510521e183ee4fa4158672e8376c89df394/work) +proc on /proc type proc (rw,nosuid,nodev,noexec,relatime) +tmpfs on /dev type tmpfs (rw,nosuid,size=65536k,mode=755) +devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=666) +sysfs on /sys type sysfs (ro,nosuid,nodev,noexec,relatime) +tmpfs on /sys/fs/cgroup type tmpfs (ro,nosuid,nodev,noexec,relatime,mode=755) +cpuset on /sys/fs/cgroup/cpuset type cgroup (ro,nosuid,nodev,noexec,relatime,cpuset) +cpu on /sys/fs/cgroup/cpu type cgroup (ro,nosuid,nodev,noexec,relatime,cpu) +cpuacct on /sys/fs/cgroup/cpuacct type cgroup (ro,nosuid,nodev,noexec,relatime,cpuacct) +blkio on /sys/fs/cgroup/blkio type cgroup (ro,nosuid,nodev,noexec,relatime,blkio) +memory on /sys/fs/cgroup/memory type cgroup (ro,nosuid,nodev,noexec,relatime,memory) +devices on /sys/fs/cgroup/devices type cgroup (ro,nosuid,nodev,noexec,relatime,devices) +freezer on /sys/fs/cgroup/freezer type cgroup (ro,nosuid,nodev,noexec,relatime,freezer) +net_cls on /sys/fs/cgroup/net_cls type cgroup (ro,nosuid,nodev,noexec,relatime,net_cls) +perf_event on /sys/fs/cgroup/perf_event type cgroup (ro,nosuid,nodev,noexec,relatime,perf_event) +net_prio on /sys/fs/cgroup/net_prio type cgroup (ro,nosuid,nodev,noexec,relatime,net_prio) +hugetlb on /sys/fs/cgroup/hugetlb type cgroup (ro,nosuid,nodev,noexec,relatime,hugetlb) +pids on /sys/fs/cgroup/pids type cgroup (ro,nosuid,nodev,noexec,relatime,pids) +cgroup on /sys/fs/cgroup/systemd type cgroup (ro,nosuid,nodev,noexec,relatime,name=systemd) +mqueue on /dev/mqueue type mqueue (rw,nosuid,nodev,noexec,relatime) +//10.0.75.1/C on /data type cifs (rw,relatime,vers=3.02,sec=ntlmsspi,cache=strict,username=filo,domain=MSI,uid=0,noforceuid,gid=0,noforcegid,addr=10.0.75.1,file_mode=0755,dir_mode=0755,iocharset=utf8,nounix,serverino,mapposix,nobrl,mfsymlinks,noperm,rsize=1048576,wsize=1048576,echo_interval=60,actimeo=1) +/dev/sda1 on /etc/resolv.conf type ext4 (rw,relatime,data=ordered) +/dev/sda1 on /etc/hostname type ext4 (rw,relatime,data=ordered) +/dev/sda1 on /etc/hosts type ext4 (rw,relatime,data=ordered) +shm on /dev/shm type tmpfs (rw,nosuid,nodev,noexec,relatime,size=65536k) +devpts on /dev/console type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=666) +proc on /proc/bus type proc (ro,relatime) +proc on /proc/fs type proc (ro,relatime) +proc on /proc/irq type proc (ro,relatime) +proc on /proc/sys type proc (ro,relatime) +proc on /proc/sysrq-trigger type proc (ro,relatime) +tmpfs on /proc/kcore type tmpfs (rw,nosuid,size=65536k,mode=755) +tmpfs on /proc/timer_list type tmpfs (rw,nosuid,size=65536k,mode=755) +tmpfs on /proc/sched_debug type tmpfs (rw,nosuid,size=65536k,mode=755) +tmpfs on /proc/scsi type tmpfs (ro,relatime) +tmpfs on /sys/firmware type tmpfs (ro,relatime) +''', 0, [('/data', 'cifs')]), +# From @yarikoptic - added blank lines to test for resilience +(r'''/proc on /proc type proc (rw,relatime) +sysfs on /sys type sysfs (rw,nosuid,nodev,noexec,relatime) +tmpfs on /dev/shm type tmpfs (rw,relatime) +devpts on /dev/pts type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=666) + +devpts on /dev/ptmx type devpts (rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=666) + +''', 0, []), +) + + +@pytest.mark.parametrize("output, exit_code, expected", MOUNT_OUTPUTS) +def test_parse_mount_table(output, exit_code, expected): + assert _parse_mount_table(exit_code, output) == expected + + +def test_cifs_check(): + assert isinstance(_cifs_table, list) + assert isinstance(on_cifs('/'), bool) + fake_table = [('/scratch/tmp', 'ext4'), ('/scratch', 'cifs')] + cifs_targets = [('/scratch/tmp/x/y', False), ('/scratch/tmp/x', False), + ('/scratch/x/y', True), ('/scratch/x', True), + ('/x/y', False), ('/x', False), ('/', False)] + + orig_table = _cifs_table[:] + _cifs_table[:] = [] + + for target, _ in cifs_targets: + assert on_cifs(target) is False + + _cifs_table.extend(fake_table) + for target, expected in cifs_targets: + assert on_cifs(target) is expected + + _cifs_table[:] = [] + _cifs_table.extend(orig_table) + + +def test_indirectory(tmpdir): + tmpdir.chdir() + + os.makedirs('subdir1/subdir2') + sd1 = os.path.abspath('subdir1') + sd2 = os.path.abspath('subdir1/subdir2') + + assert os.getcwd() == tmpdir.strpath + with indirectory('/'): + assert os.getcwd() == '/' + assert os.getcwd() == tmpdir.strpath + with indirectory('subdir1'): + assert os.getcwd() == sd1 + with indirectory('subdir2'): + assert os.getcwd() == sd2 + with indirectory('..'): + assert os.getcwd() == sd1 + with indirectory('/'): + assert os.getcwd() == '/' + assert os.getcwd() == sd1 + assert os.getcwd() == sd2 + assert os.getcwd() == sd1 + assert os.getcwd() == tmpdir.strpath + try: + with indirectory('subdir1'): + raise ValueError("Erroring out of context") + except ValueError: + pass + assert os.getcwd() == tmpdir.strpath + + +def test_pklization(tmpdir): + tmpdir.chdir() + + exc = Exception("There is something wrong here") + savepkl('./except.pkz', exc) + newexc = loadpkl('./except.pkz') + + assert exc.args == newexc.args + assert os.getcwd() == tmpdir.strpath + + +class Pickled: + + def __getstate__(self): + return self.__dict__ + + +class PickledBreaker: + + def __setstate__(self, d): + raise Exception() + + +def test_versioned_pklization(tmpdir): + tmpdir.chdir() + + obj = Pickled() + savepkl('./pickled.pkz', obj, versioning=True) + + with pytest.raises(Exception): + with mock.patch('nipype.utils.tests.test_filemanip.Pickled', PickledBreaker), \ + mock.patch('nipype.__version__', '0.0.0'): + + loadpkl('./pickled.pkz', versioning=True) + + +def test_unversioned_pklization(tmpdir): + tmpdir.chdir() + + obj = Pickled() + savepkl('./pickled.pkz', obj) + + with pytest.raises(Exception): + with mock.patch('nipype.utils.tests.test_filemanip.Pickled', PickledBreaker): + loadpkl('./pickled.pkz', versioning=True) diff --git a/nipype/utils/tests/test_functions.py b/nipype/utils/tests/test_functions.py new file mode 100644 index 0000000000..377bfe338f --- /dev/null +++ b/nipype/utils/tests/test_functions.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +import sys +import pytest +from nipype.utils.functions import (getsource, create_function_from_source) + + +def _func1(x): + return x**3 + + +def test_func_to_str(): + def func1(x): + return x**2 + + # Should be ok with both functions! + for f in _func1, func1: + f_src = getsource(f) + f_recreated = create_function_from_source(f_src) + assert f(2.3) == f_recreated(2.3) + + +def test_func_to_str_err(): + bad_src = "obbledygobbledygook" + with pytest.raises(RuntimeError): + create_function_from_source(bad_src) + + +def _print_statement(): + try: + exec('print ""') + return True + except SyntaxError: + return False + + +def test_func_string(): + def is_string(): + return isinstance('string', str) + + wrapped_func = create_function_from_source(getsource(is_string)) + assert is_string() == wrapped_func() + + +@pytest.mark.skipif(sys.version_info[0] > 2, reason="breaks python 3") +def test_func_print_py2(): + wrapped_func = create_function_from_source(getsource(_print_statement)) + assert wrapped_func() diff --git a/nipype/utils/tests/test_misc.py b/nipype/utils/tests/test_misc.py new file mode 100644 index 0000000000..8896039763 --- /dev/null +++ b/nipype/utils/tests/test_misc.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from future import standard_library +standard_library.install_aliases() + +import os +from shutil import rmtree +from builtins import next + +import pytest + +from nipype.utils.misc import (container_to_string, str2bool, flatten, + unflatten) + + +def test_cont_to_str(): + # list + x = ['a', 'b'] + assert container_to_string(x) == 'a b' + # tuple + x = tuple(x) + assert container_to_string(x) == 'a b' + # set + x = set(x) + y = container_to_string(x) + assert (y == 'a b') or (y == 'b a') + # dict + x = dict(a='a', b='b') + y = container_to_string(x) + assert (y == 'a b') or (y == 'b a') + # string + assert container_to_string('foobar') == 'foobar' + # int. Integers are not the main intent of this function, but see + # no reason why they shouldn't work. + assert (container_to_string(123) == '123') + + +@pytest.mark.parametrize("string, expected", + [("yes", True), ("true", True), ("t", True), + ("1", True), ("no", False), ("false", False), + ("n", False), ("f", False), ("0", False)]) +def test_str2bool(string, expected): + assert str2bool(string) == expected + + +def test_flatten(): + in_list = [[1, 2, 3], [4], [[5, 6], 7], 8] + + flat = flatten(in_list) + assert flat == [1, 2, 3, 4, 5, 6, 7, 8] + + back = unflatten(flat, in_list) + assert in_list == back + + new_list = [2, 3, 4, 5, 6, 7, 8, 9] + back = unflatten(new_list, in_list) + assert back == [[2, 3, 4], [5], [[6, 7], 8], 9] + + flat = flatten([]) + assert flat == [] + + back = unflatten([], []) + assert back == [] + + +def test_rgetcwd(monkeypatch, tmpdir): + from ..misc import rgetcwd + oldpath = tmpdir.strpath + tmpdir.mkdir("sub").chdir() + newpath = os.getcwd() + + # Path still there + assert rgetcwd() == newpath + + # Remove path + rmtree(newpath, ignore_errors=True) + with pytest.raises(OSError): + os.getcwd() + + monkeypatch.setenv('PWD', oldpath) + assert rgetcwd(error=False) == oldpath + + # Test when error should be raised + with pytest.raises(OSError): + rgetcwd() + + # Deleted env variable + monkeypatch.delenv('PWD') + with pytest.raises(OSError): + rgetcwd(error=False) diff --git a/nipype/utils/tests/test_nipype2boutiques.py b/nipype/utils/tests/test_nipype2boutiques.py new file mode 100644 index 0000000000..f1d0c46eed --- /dev/null +++ b/nipype/utils/tests/test_nipype2boutiques.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from future import standard_library +standard_library.install_aliases() + +from ..nipype2boutiques import generate_boutiques_descriptor + + +def test_generate(): + generate_boutiques_descriptor(module='nipype.interfaces.ants.registration', + interface_name='ANTS', + ignored_template_inputs=(), + docker_image=None, + docker_index=None, + verbose=False, + ignore_template_numbers=False) diff --git a/nipype/utils/tests/test_provenance.py b/nipype/utils/tests/test_provenance.py new file mode 100644 index 0000000000..1d7233907a --- /dev/null +++ b/nipype/utils/tests/test_provenance.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import unicode_literals +from builtins import str, bytes +from future import standard_library +standard_library.install_aliases() + +import os + +from nipype.utils.provenance import ProvStore, safe_encode + + +def test_provenance(tmpdir): + from nipype.interfaces.base import CommandLine + tmpdir.chdir() + ps = ProvStore() + results = CommandLine('echo hello').run() + ps.add_results(results) + provn = ps.g.get_provn() + assert 'echo hello' in provn + + +def test_provenance_exists(tmpdir): + tmpdir.chdir() + from nipype import config + from nipype.interfaces.base import CommandLine + provenance_state = config.get('execution', 'write_provenance') + hash_state = config.get('execution', 'hash_method') + config.enable_provenance() + CommandLine('echo hello').run() + config.set('execution', 'write_provenance', provenance_state) + config.set('execution', 'hash_method', hash_state) + assert tmpdir.join('provenance.provn').check() + + +def test_safe_encode(): + a = '\xc3\xa9lg' + out = safe_encode(a) + assert out.value == a diff --git a/nipype/utils/tests/use_resources b/nipype/utils/tests/use_resources new file mode 100755 index 0000000000..3054a17b31 --- /dev/null +++ b/nipype/utils/tests/use_resources @@ -0,0 +1,38 @@ +#!/usr/bin/env python +# +# use_resources +''' +Python script to use a certain amount of RAM on disk and number of +threads + +Usage: + use_resources -g -p +''' + +# Make main executable +if __name__ == '__main__': + + # Import packages + import argparse + from nipype.utils.profiler import _use_resources + + # Init argparser + parser = argparse.ArgumentParser(description=__doc__) + + # Add arguments + parser.add_argument( + '-g', + '--num_gb', + required=True, + type=float, + help='Number of GB RAM to use, can be float or int') + parser.add_argument( + '-p', + '--num_threads', + required=True, + type=int, + help='Number of threads to run in parallel') + + # Parse args + args = parser.parse_args() + _use_resources(args.num_threads, args.num_gb) diff --git a/nipype/utils/tmpdirs.py b/nipype/utils/tmpdirs.py new file mode 100644 index 0000000000..73f0c4ecc4 --- /dev/null +++ b/nipype/utils/tmpdirs.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from builtins import object +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +import os +import shutil +from tempfile import template, mkdtemp + + +class TemporaryDirectory(object): + """Create and return a temporary directory. This has the same + behavior as mkdtemp but can be used as a context manager. For + example: + + with TemporaryDirectory() as tmpdir: + ... + + Upon exiting the context, the directory and everthing contained + in it are removed. + """ + + def __init__(self, suffix="", prefix=template, dir=None): + self.name = mkdtemp(suffix, prefix, dir) + self._closed = False + + def __enter__(self): + return self.name + + def cleanup(self): + if not self._closed: + shutil.rmtree(self.name) + self._closed = True + + def __exit__(self, exc, value, tb): + self.cleanup() + return False + + +class InTemporaryDirectory(TemporaryDirectory): + def __enter__(self): + self._pwd = os.getcwd() + os.chdir(self.name) + return super(InTemporaryDirectory, self).__enter__() + + def __exit__(self, exc, value, tb): + os.chdir(self._pwd) + return super(InTemporaryDirectory, self).__exit__(exc, value, tb) diff --git a/nipype/workflows/__init__.py b/nipype/workflows/__init__.py new file mode 100644 index 0000000000..99fb243f19 --- /dev/null +++ b/nipype/workflows/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/workflows/data/__init__.py b/nipype/workflows/data/__init__.py new file mode 100644 index 0000000000..85fcd2dee0 --- /dev/null +++ b/nipype/workflows/data/__init__.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +import os.path as op + + +def get_flirt_schedule(name): + if name == 'ecc': + return op.abspath(op.join(op.dirname(__file__), 'ecc.sch')) + elif name == 'hmc': + return op.abspath(op.join(op.dirname(__file__), 'hmc.sch')) + else: + raise RuntimeError('Requested file does not exist.') diff --git a/nipype/workflows/data/ecc.sch b/nipype/workflows/data/ecc.sch new file mode 100644 index 0000000000..b9e8d8c3c3 --- /dev/null +++ b/nipype/workflows/data/ecc.sch @@ -0,0 +1,67 @@ +# 4mm scale +setscale 4 +setoption smoothing 6 +setoption paramsubset 1 0 0 0 0 0 0 1 1 1 1 1 1 +clear U +clear UA +clear UB +clear US +clear UP +# try the identity transform as a starting point at this resolution +clear UQ +setrow UQ 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 +optimise 7 UQ 0.0 0.0 0.0 0.0 0.0 0.0 0.0 rel 4 +sort U +copy U UA +# select best 4 optimised solutions and try perturbations of these +clear U +copy UA:1-4 U +optimise 7 UA:1-4 1.0 0.0 0.0 0.0 0.0 0.0 0.0 rel 4 +optimise 7 UA:1-4 -1.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 4 +optimise 7 UA:1-4 0.0 1.0 0.0 0.0 0.0 0.0 0.0 abs 4 +optimise 7 UA:1-4 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 abs 4 +optimise 7 UA:1-4 0.0 0.0 1.0 0.0 0.0 0.0 0.0 abs 4 +optimise 7 UA:1-4 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 abs 4 +optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 0.1 abs 4 +optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 -0.1 abs 4 +optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 0.2 abs 4 +optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 -0.2 abs 4 +sort U +copy U UB +# 2mm scale +setscale 2 +setoption smoothing 4 +setoption paramsubset 1 0 0 0 0 0 0 1 1 1 1 1 1 +clear U +clear UC +clear UD +clear UE +clear UF +# remeasure costs at this scale +measurecost 7 UB 0 0 0 0 0 0 rel +sort U +copy U UC +clear U +optimise 7 UC:1-3 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 2 +copy U UD +sort U +copy U UF +# also try the identity transform as a starting point at this resolution +sort U +clear U UG +clear U +setrow UG 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 +optimise 7 UG 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 2 +sort U +copy U UG +# 1mm scale +setscale 1 +setoption smoothing 2 +setoption boundguess 1 +setoption paramsubset 1 0 0 0 0 0 0 1 1 1 1 1 1 +clear U +#also try the identity transform as a starting point at this resolution +setrow UK 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 +optimise 12 UK:1-2 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 1 +sort U + diff --git a/nipype/workflows/data/hmc.sch b/nipype/workflows/data/hmc.sch new file mode 100644 index 0000000000..aeabcae29a --- /dev/null +++ b/nipype/workflows/data/hmc.sch @@ -0,0 +1,64 @@ +# 4mm scale +setscale 4 +setoption smoothing 6 +clear U +clear UA +clear UB +clear US +clear UP +# try the identity transform as a starting point at this resolution +clear UQ +setrow UQ 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 +optimise 7 UQ 0.0 0.0 0.0 0.0 0.0 0.0 0.0 rel 4 +sort U +copy U UA +# select best 4 optimised solutions and try perturbations of these +clear U +copy UA:1-4 U +optimise 7 UA:1-4 1.0 0.0 0.0 0.0 0.0 0.0 0.0 rel 4 +optimise 7 UA:1-4 -1.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 4 +optimise 7 UA:1-4 0.0 1.0 0.0 0.0 0.0 0.0 0.0 abs 4 +optimise 7 UA:1-4 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 abs 4 +optimise 7 UA:1-4 0.0 0.0 1.0 0.0 0.0 0.0 0.0 abs 4 +optimise 7 UA:1-4 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 abs 4 +optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 0.1 abs 4 +optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 -0.1 abs 4 +optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 0.2 abs 4 +optimise 7 UA:1-4 0.0 0.0 0.0 0.0 0.0 0.0 -0.2 abs 4 +sort U +copy U UB +# 2mm scale +setscale 2 +setoption smoothing 4 +clear U +clear UC +clear UD +clear UE +clear UF +# remeasure costs at this scale +measurecost 7 UB 0 0 0 0 0 0 rel +sort U +copy U UC +clear U +optimise 7 UC:1-3 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 2 +copy U UD +sort U +copy U UF +# also try the identity transform as a starting point at this resolution +sort U +clear U UG +clear U +setrow UG 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 +optimise 7 UG 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 2 +sort U +copy U UG +# 1mm scale +setscale 1 +setoption smoothing 2 +setoption boundguess 1 +clear U +#also try the identity transform as a starting point at this resolution +setrow UK 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 +optimise 12 UK:1-2 0.0 0.0 0.0 0.0 0.0 0.0 0.0 abs 1 +sort U + diff --git a/nipype/workflows/dmri/__init__.py b/nipype/workflows/dmri/__init__.py new file mode 100644 index 0000000000..628b6c2bc1 --- /dev/null +++ b/nipype/workflows/dmri/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from . import camino, mrtrix, fsl, dipy diff --git a/nipype/workflows/dmri/camino/__init__.py b/nipype/workflows/dmri/camino/__init__.py new file mode 100644 index 0000000000..07ba37fc52 --- /dev/null +++ b/nipype/workflows/dmri/camino/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .diffusion import create_camino_dti_pipeline +from .connectivity_mapping import create_connectivity_pipeline +from .group_connectivity import create_group_connectivity_pipeline diff --git a/nipype/workflows/dmri/camino/connectivity_mapping.py b/nipype/workflows/dmri/camino/connectivity_mapping.py new file mode 100644 index 0000000000..3283b5f4e1 --- /dev/null +++ b/nipype/workflows/dmri/camino/connectivity_mapping.py @@ -0,0 +1,534 @@ +# -*- coding: utf-8 -*- +import inspect +import os.path as op + +from ....interfaces import io as nio # Data i/o +from ....interfaces import utility as util # utility +from ....pipeline import engine as pe # pypeline engine +from ....interfaces import camino as camino +from ....interfaces import fsl as fsl +from ....interfaces import camino2trackvis as cam2trk +from ....interfaces import freesurfer as fs # freesurfer +from ....interfaces import cmtk as cmtk +from ....algorithms import misc as misc +from ...misc.utils import (get_affine, get_data_dims, get_vox_dims, + select_aparc, select_aparc_annot) + + +def create_connectivity_pipeline(name="connectivity"): + """Creates a pipeline that does the same connectivity processing as in the + :ref:`example_dmri_connectivity` example script. Given a subject id (and completed Freesurfer reconstruction) + diffusion-weighted image, b-values, and b-vectors, the workflow will return the subject's connectome + as a Connectome File Format (CFF) file for use in Connectome Viewer (http://www.cmtk.org). + + Example + ------- + + >>> from nipype.workflows.dmri.camino.connectivity_mapping import create_connectivity_pipeline + >>> conmapper = create_connectivity_pipeline("nipype_conmap") + >>> conmapper.inputs.inputnode.subjects_dir = '.' + >>> conmapper.inputs.inputnode.subject_id = 'subj1' + >>> conmapper.inputs.inputnode.dwi = 'data.nii.gz' + >>> conmapper.inputs.inputnode.bvecs = 'bvecs' + >>> conmapper.inputs.inputnode.bvals = 'bvals' + >>> conmapper.run() # doctest: +SKIP + + Inputs:: + + inputnode.subject_id + inputnode.subjects_dir + inputnode.dwi + inputnode.bvecs + inputnode.bvals + inputnode.resolution_network_file + + Outputs:: + + outputnode.connectome + outputnode.cmatrix + outputnode.gpickled_network + outputnode.fa + outputnode.struct + outputnode.trace + outputnode.tracts + outputnode.tensors + + """ + + inputnode_within = pe.Node( + interface=util.IdentityInterface(fields=[ + "subject_id", + "dwi", + "bvecs", + "bvals", + "subjects_dir", + "resolution_network_file", + ]), + name="inputnode_within") + + FreeSurferSource = pe.Node( + interface=nio.FreeSurferSource(), name='fssource') + + FreeSurferSourceLH = pe.Node( + interface=nio.FreeSurferSource(), name='fssourceLH') + FreeSurferSourceLH.inputs.hemi = 'lh' + + FreeSurferSourceRH = pe.Node( + interface=nio.FreeSurferSource(), name='fssourceRH') + FreeSurferSourceRH.inputs.hemi = 'rh' + """ + Since the b values and b vectors come from the FSL course, we must convert it to a scheme file + for use in Camino. + """ + + fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme") + fsl2scheme.inputs.usegradmod = True + """ + FSL's Brain Extraction tool is used to create a mask from the b0 image + """ + + b0Strip = pe.Node(interface=fsl.BET(mask=True), name='bet_b0') + """ + FSL's FLIRT function is used to coregister the b0 mask and the structural image. + A convert_xfm node is then used to obtain the inverse of the transformation matrix. + FLIRT is used once again to apply the inverse transformation to the parcellated brain image. + """ + + coregister = pe.Node(interface=fsl.FLIRT(dof=6), name='coregister') + coregister.inputs.cost = ('normmi') + + convertxfm = pe.Node(interface=fsl.ConvertXFM(), name='convertxfm') + convertxfm.inputs.invert_xfm = True + + inverse = pe.Node(interface=fsl.FLIRT(), name='inverse') + inverse.inputs.interp = ('nearestneighbour') + + inverse_AparcAseg = pe.Node( + interface=fsl.FLIRT(), name='inverse_AparcAseg') + inverse_AparcAseg.inputs.interp = ('nearestneighbour') + """ + A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject. + Nodes are used to convert the following: + * Original structural image to NIFTI + * Parcellated white matter image to NIFTI + * Parcellated whole-brain image to NIFTI + * Pial, white, inflated, and spherical surfaces for both the left and right hemispheres + are converted to GIFTI for visualization in ConnectomeViewer + * Parcellated annotation files for the left and right hemispheres are also converted to GIFTI + """ + + mri_convert_Brain = pe.Node( + interface=fs.MRIConvert(), name='mri_convert_Brain') + mri_convert_Brain.inputs.out_type = 'nii' + + mri_convert_AparcAseg = mri_convert_Brain.clone('mri_convert_AparcAseg') + + mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH') + mris_convertLH.inputs.out_datatype = 'gii' + mris_convertRH = mris_convertLH.clone('mris_convertRH') + mris_convertRHwhite = mris_convertLH.clone('mris_convertRHwhite') + mris_convertLHwhite = mris_convertLH.clone('mris_convertLHwhite') + mris_convertRHinflated = mris_convertLH.clone('mris_convertRHinflated') + mris_convertLHinflated = mris_convertLH.clone('mris_convertLHinflated') + mris_convertRHsphere = mris_convertLH.clone('mris_convertRHsphere') + mris_convertLHsphere = mris_convertLH.clone('mris_convertLHsphere') + mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels') + mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels') + """ + In this section we create the nodes necessary for diffusion analysis. + First, the diffusion image is converted to voxel order, since this is the format in which Camino does + its processing. + """ + + image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel") + """ + Second, diffusion tensors are fit to the voxel-order data. + If desired, these tensors can be converted to a Nifti tensor image using the DT2NIfTI interface. + """ + + dtifit = pe.Node(interface=camino.DTIFit(), name='dtifit') + """ + Next, a lookup table is generated from the schemefile and the + signal-to-noise ratio (SNR) of the unweighted (q=0) data. + """ + + dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen") + dtlutgen.inputs.snr = 16.0 + dtlutgen.inputs.inversion = 1 + """ + In this tutorial we implement probabilistic tractography using the PICo algorithm. + PICo tractography requires an estimate of the fibre direction and a model of its uncertainty in each voxel; + this probabilitiy distribution map is produced using the following node. + """ + + picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs") + picopdfs.inputs.inputmodel = 'dt' + """ + Finally, tractography is performed. In this tutorial, we will use only one iteration for time-saving purposes. + It is important to note that we use the TrackPICo interface here. This interface now expects the files required + for PICo tracking (i.e. the output from picopdfs). Similar interfaces exist for alternative types of tracking, + such as Bayesian tracking with Dirac priors (TrackBayesDirac). + """ + + track = pe.Node(interface=camino.TrackPICo(), name="track") + track.inputs.iterations = 1 + """ + Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to + convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse. + """ + + camino2trackvis = pe.Node( + interface=cam2trk.Camino2Trackvis(), name="camino2trackvis") + camino2trackvis.inputs.min_length = 30 + camino2trackvis.inputs.voxel_order = 'LAS' + trk2camino = pe.Node( + interface=cam2trk.Trackvis2Camino(), name="trk2camino") + """ + Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, + using the following two nodes. + """ + + vtkstreamlines = pe.Node( + interface=camino.VtkStreamlines(), name="vtkstreamlines") + procstreamlines = pe.Node( + interface=camino.ProcStreamlines(), name="procstreamlines") + """ + We can easily produce a variety of scalar values from our fitted tensors. The following nodes generate the + fractional anisotropy and diffusivity trace maps and their associated headers, and then merge them back + into a single .nii file. + """ + + fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(), name='fa') + trace = pe.Node(interface=camino.ComputeTensorTrace(), name='trace') + dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig') + + analyzeheader_fa = pe.Node( + interface=camino.AnalyzeHeader(), name='analyzeheader_fa') + analyzeheader_fa.inputs.datatype = 'double' + analyzeheader_trace = pe.Node( + interface=camino.AnalyzeHeader(), name='analyzeheader_trace') + analyzeheader_trace.inputs.datatype = 'double' + + fa2nii = pe.Node(interface=misc.CreateNifti(), name='fa2nii') + trace2nii = fa2nii.clone("trace2nii") + """ + This section adds the Connectome Mapping Toolkit (CMTK) nodes. + These interfaces are fairly experimental and may not function properly. + In order to perform connectivity mapping using CMTK, the parcellated structural data is rewritten + using the indices and parcellation scheme from the connectome mapper (CMP). This process has been + written into the ROIGen interface, which will output a remapped aparc+aseg image as well as a + dictionary of label information (i.e. name, display colours) pertaining to the original and remapped regions. + These label values are input from a user-input lookup table, if specified, and otherwise the default + Freesurfer LUT (/freesurfer/FreeSurferColorLUT.txt). + """ + + roigen = pe.Node(interface=cmtk.ROIGen(), name="ROIGen") + roigen_structspace = roigen.clone("ROIGen_structspace") + """ + The CreateMatrix interface takes in the remapped aparc+aseg image as well as the label dictionary and fiber tracts + and outputs a number of different files. The most important of which is the connectivity network itself, which is stored + as a 'gpickle' and can be loaded using Python's NetworkX package (see CreateMatrix docstring). Also outputted are various + NumPy arrays containing detailed tract information, such as the start and endpoint regions, and statistics on the mean and + standard deviation for the fiber length of each connection. These matrices can be used in the ConnectomeViewer to plot the + specific tracts that connect between user-selected regions. + """ + + createnodes = pe.Node(interface=cmtk.CreateNodes(), name="CreateNodes") + creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix") + creatematrix.inputs.count_region_intersections = True + """ + Here we define the endpoint of this tutorial, which is the CFFConverter node, as well as a few nodes which use + the Nipype Merge utility. These are useful for passing lists of the files we want packaged in our CFF file. + """ + + CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter") + + giftiSurfaces = pe.Node(interface=util.Merge(8), name="GiftiSurfaces") + giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels") + niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes") + fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays") + """ + Since we have now created all our nodes, we can define our workflow and start making connections. + """ + + mapping = pe.Workflow(name='mapping') + """ + First, we connect the input node to the early conversion functions. + FreeSurfer input nodes: + """ + + mapping.connect([(inputnode_within, FreeSurferSource, [("subjects_dir", + "subjects_dir")])]) + mapping.connect([(inputnode_within, FreeSurferSource, [("subject_id", + "subject_id")])]) + + mapping.connect([(inputnode_within, FreeSurferSourceLH, + [("subjects_dir", "subjects_dir")])]) + mapping.connect([(inputnode_within, FreeSurferSourceLH, [("subject_id", + "subject_id")])]) + + mapping.connect([(inputnode_within, FreeSurferSourceRH, + [("subjects_dir", "subjects_dir")])]) + mapping.connect([(inputnode_within, FreeSurferSourceRH, [("subject_id", + "subject_id")])]) + """ + Required conversions for processing in Camino: + """ + + mapping.connect([(inputnode_within, image2voxel, + [("dwi", "in_file")]), (inputnode_within, fsl2scheme, + [("bvecs", "bvec_file"), + ("bvals", "bval_file")]), + (image2voxel, dtifit, [['voxel_order', 'in_file']]), + (fsl2scheme, dtifit, [['scheme', 'scheme_file']])]) + """ + Nifti conversions for the subject's stripped brain image from Freesurfer: + """ + + mapping.connect([(FreeSurferSource, mri_convert_Brain, [('brain', + 'in_file')])]) + """ + Surface conversions to GIFTI (pial, white, inflated, and sphere for both hemispheres) + """ + + mapping.connect([(FreeSurferSourceLH, mris_convertLH, [('pial', + 'in_file')])]) + mapping.connect([(FreeSurferSourceRH, mris_convertRH, [('pial', + 'in_file')])]) + mapping.connect([(FreeSurferSourceLH, mris_convertLHwhite, [('white', + 'in_file')])]) + mapping.connect([(FreeSurferSourceRH, mris_convertRHwhite, [('white', + 'in_file')])]) + mapping.connect([(FreeSurferSourceLH, mris_convertLHinflated, + [('inflated', 'in_file')])]) + mapping.connect([(FreeSurferSourceRH, mris_convertRHinflated, + [('inflated', 'in_file')])]) + mapping.connect([(FreeSurferSourceLH, mris_convertLHsphere, + [('sphere', 'in_file')])]) + mapping.connect([(FreeSurferSourceRH, mris_convertRHsphere, + [('sphere', 'in_file')])]) + """ + The annotation files are converted using the pial surface as a map via the MRIsConvert interface. + One of the functions defined earlier is used to select the lh.aparc.annot and rh.aparc.annot files + specifically (rather than i.e. rh.aparc.a2009s.annot) from the output list given by the FreeSurferSource. + """ + + mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, + [('pial', 'in_file')])]) + mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, + [('pial', 'in_file')])]) + mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, + [(('annot', select_aparc_annot), 'annot_file')])]) + mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, + [(('annot', select_aparc_annot), 'annot_file')])]) + """ + This section coregisters the diffusion-weighted and parcellated white-matter / whole brain images. + At present the conmap node connection is left commented, as there have been recent changes in Camino + code that have presented some users with errors. + """ + + mapping.connect([(inputnode_within, b0Strip, [('dwi', 'in_file')])]) + mapping.connect([(inputnode_within, b0Strip, [('dwi', 't2_guided')]) + ]) # Added to improve damaged brain extraction + mapping.connect([(b0Strip, coregister, [('out_file', 'in_file')])]) + mapping.connect([(mri_convert_Brain, coregister, [('out_file', + 'reference')])]) + mapping.connect([(coregister, convertxfm, [('out_matrix_file', + 'in_file')])]) + mapping.connect([(b0Strip, inverse, [('out_file', 'reference')])]) + mapping.connect([(convertxfm, inverse, [('out_file', 'in_matrix_file')])]) + mapping.connect([(mri_convert_Brain, inverse, [('out_file', 'in_file')])]) + """ + The tractography pipeline consists of the following nodes. Further information about the tractography + can be found in nipype/examples/dmri_camino_dti.py. + """ + + mapping.connect([(b0Strip, track, [("mask_file", "seed_file")])]) + mapping.connect([(fsl2scheme, dtlutgen, [("scheme", "scheme_file")])]) + mapping.connect([(dtlutgen, picopdfs, [("dtLUT", "luts")])]) + mapping.connect([(dtifit, picopdfs, [("tensor_fitted", "in_file")])]) + mapping.connect([(picopdfs, track, [("pdfs", "in_file")])]) + """ + Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the + tensor fitting. This is also where our voxel- and data-grabbing functions come in. We pass these functions, + along with the original DWI image from the input node, to the header-generating nodes. This ensures that the + files will be correct and readable. + """ + + mapping.connect([(dtifit, fa, [("tensor_fitted", "in_file")])]) + mapping.connect([(fa, analyzeheader_fa, [("fa", "in_file")])]) + mapping.connect([(inputnode_within, analyzeheader_fa, + [(('dwi', get_vox_dims), 'voxel_dims'), + (('dwi', get_data_dims), 'data_dims')])]) + mapping.connect([(fa, fa2nii, [('fa', 'data_file')])]) + mapping.connect([(inputnode_within, fa2nii, [(('dwi', get_affine), + 'affine')])]) + mapping.connect([(analyzeheader_fa, fa2nii, [('header', 'header_file')])]) + + mapping.connect([(dtifit, trace, [("tensor_fitted", "in_file")])]) + mapping.connect([(trace, analyzeheader_trace, [("trace", "in_file")])]) + mapping.connect([(inputnode_within, analyzeheader_trace, + [(('dwi', get_vox_dims), 'voxel_dims'), + (('dwi', get_data_dims), 'data_dims')])]) + mapping.connect([(trace, trace2nii, [('trace', 'data_file')])]) + mapping.connect([(inputnode_within, trace2nii, [(('dwi', get_affine), + 'affine')])]) + mapping.connect([(analyzeheader_trace, trace2nii, [('header', + 'header_file')])]) + + mapping.connect([(dtifit, dteig, [("tensor_fitted", "in_file")])]) + """ + The output tracts are converted to Trackvis format (and back). Here we also use the voxel- and data-grabbing + functions defined at the beginning of the pipeline. + """ + + mapping.connect([(track, camino2trackvis, [('tracked', 'in_file')]), + (track, vtkstreamlines, [['tracked', 'in_file']]), + (camino2trackvis, trk2camino, [['trackvis', 'in_file']])]) + mapping.connect([(inputnode_within, camino2trackvis, + [(('dwi', get_vox_dims), 'voxel_dims'), + (('dwi', get_data_dims), 'data_dims')])]) + """ + Here the CMTK connectivity mapping nodes are connected. + The original aparc+aseg image is converted to NIFTI, then registered to + the diffusion image and delivered to the ROIGen node. The remapped parcellation, + original tracts, and label file are then given to CreateMatrix. + """ + + mapping.connect(inputnode_within, 'resolution_network_file', createnodes, + 'resolution_network_file') + mapping.connect(createnodes, 'node_network', creatematrix, + 'resolution_network_file') + mapping.connect([(FreeSurferSource, mri_convert_AparcAseg, + [(('aparc_aseg', select_aparc), 'in_file')])]) + + mapping.connect([(b0Strip, inverse_AparcAseg, [('out_file', + 'reference')])]) + mapping.connect([(convertxfm, inverse_AparcAseg, [('out_file', + 'in_matrix_file')])]) + mapping.connect([(mri_convert_AparcAseg, inverse_AparcAseg, + [('out_file', 'in_file')])]) + mapping.connect([(mri_convert_AparcAseg, roigen_structspace, + [('out_file', 'aparc_aseg_file')])]) + mapping.connect([(roigen_structspace, createnodes, [("roi_file", + "roi_file")])]) + + mapping.connect([(inverse_AparcAseg, roigen, [("out_file", + "aparc_aseg_file")])]) + mapping.connect([(roigen, creatematrix, [("roi_file", "roi_file")])]) + mapping.connect([(camino2trackvis, creatematrix, [("trackvis", + "tract_file")])]) + mapping.connect([(inputnode_within, creatematrix, [("subject_id", + "out_matrix_file")])]) + mapping.connect([(inputnode_within, creatematrix, + [("subject_id", "out_matrix_mat_file")])]) + """ + The merge nodes defined earlier are used here to create lists of the files which are + destined for the CFFConverter. + """ + + mapping.connect([(mris_convertLH, giftiSurfaces, [("converted", "in1")])]) + mapping.connect([(mris_convertRH, giftiSurfaces, [("converted", "in2")])]) + mapping.connect([(mris_convertLHwhite, giftiSurfaces, [("converted", + "in3")])]) + mapping.connect([(mris_convertRHwhite, giftiSurfaces, [("converted", + "in4")])]) + mapping.connect([(mris_convertLHinflated, giftiSurfaces, [("converted", + "in5")])]) + mapping.connect([(mris_convertRHinflated, giftiSurfaces, [("converted", + "in6")])]) + mapping.connect([(mris_convertLHsphere, giftiSurfaces, [("converted", + "in7")])]) + mapping.connect([(mris_convertRHsphere, giftiSurfaces, [("converted", + "in8")])]) + + mapping.connect([(mris_convertLHlabels, giftiLabels, [("converted", + "in1")])]) + mapping.connect([(mris_convertRHlabels, giftiLabels, [("converted", + "in2")])]) + + mapping.connect([(roigen, niftiVolumes, [("roi_file", "in1")])]) + mapping.connect([(inputnode_within, niftiVolumes, [("dwi", "in2")])]) + mapping.connect([(mri_convert_Brain, niftiVolumes, [("out_file", "in3")])]) + + mapping.connect([(creatematrix, fiberDataArrays, [("endpoint_file", + "in1")])]) + mapping.connect([(creatematrix, fiberDataArrays, [("endpoint_file_mm", + "in2")])]) + mapping.connect([(creatematrix, fiberDataArrays, [("fiber_length_file", + "in3")])]) + mapping.connect([(creatematrix, fiberDataArrays, [("fiber_label_file", + "in4")])]) + """ + This block actually connects the merged lists to the CFF converter. We pass the surfaces + and volumes that are to be included, as well as the tracts and the network itself. The currently + running pipeline (dmri_connectivity.py) is also scraped and included in the CFF file. This + makes it easy for the user to examine the entire processing pathway used to generate the end + product. + """ + + CFFConverter.inputs.script_files = op.abspath( + inspect.getfile(inspect.currentframe())) + mapping.connect([(giftiSurfaces, CFFConverter, [("out", + "gifti_surfaces")])]) + mapping.connect([(giftiLabels, CFFConverter, [("out", "gifti_labels")])]) + mapping.connect([(creatematrix, CFFConverter, [("matrix_files", + "gpickled_networks")])]) + + mapping.connect([(niftiVolumes, CFFConverter, [("out", "nifti_volumes")])]) + mapping.connect([(fiberDataArrays, CFFConverter, [("out", "data_files")])]) + mapping.connect([(camino2trackvis, CFFConverter, [("trackvis", + "tract_files")])]) + mapping.connect([(inputnode_within, CFFConverter, [("subject_id", + "title")])]) + """ + Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes + declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding + their names to the subject list and their data to the proper folders. + """ + + inputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + "subject_id", "dwi", "bvecs", "bvals", "subjects_dir", + "resolution_network_file" + ]), + name="inputnode") + + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + "fa", "struct", "trace", "tracts", "connectome", "cmatrix", + "networks", "rois", "mean_fiber_length", "fiber_length_std", + "tensors" + ]), + name="outputnode") + + connectivity = pe.Workflow(name="connectivity") + connectivity.base_output_dir = name + + connectivity.connect([ + (inputnode, mapping, + [("dwi", "inputnode_within.dwi"), ("bvals", "inputnode_within.bvals"), + ("bvecs", "inputnode_within.bvecs"), ("subject_id", + "inputnode_within.subject_id"), + ("subjects_dir", "inputnode_within.subjects_dir"), + ("resolution_network_file", + "inputnode_within.resolution_network_file")]) + ]) + + connectivity.connect( + [(mapping, outputnode, + [("camino2trackvis.trackvis", + "tracts"), ("CFFConverter.connectome_file", "connectome"), + ("CreateMatrix.matrix_mat_file", + "cmatrix"), ("CreateMatrix.mean_fiber_length_matrix_mat_file", + "mean_fiber_length"), + ("CreateMatrix.fiber_length_std_matrix_mat_file", + "fiber_length_std"), ("fa2nii.nifti_file", + "fa"), ("CreateMatrix.matrix_files", + "networks"), ("ROIGen.roi_file", + "rois"), + ("mri_convert_Brain.out_file", + "struct"), ("trace2nii.nifti_file", + "trace"), ("dtifit.tensor_fitted", "tensors")])]) + + return connectivity diff --git a/nipype/workflows/dmri/camino/diffusion.py b/nipype/workflows/dmri/camino/diffusion.py new file mode 100644 index 0000000000..708ddb8bc4 --- /dev/null +++ b/nipype/workflows/dmri/camino/diffusion.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +from ....interfaces import utility as util # utility +from ....pipeline import engine as pe # pypeline engine +from ....interfaces import camino as camino +from ....interfaces import fsl as fsl +from ....interfaces import camino2trackvis as cam2trk +from ....algorithms import misc as misc +from ...misc.utils import get_affine, get_data_dims, get_vox_dims + + +def create_camino_dti_pipeline(name="dtiproc"): + """Creates a pipeline that does the same diffusion processing as in the + :doc:`../../users/examples/dmri_camino_dti` example script. Given a diffusion-weighted image, + b-values, and b-vectors, the workflow will return the tractography + computed from diffusion tensors and from PICo probabilistic tractography. + + Example + ------- + + >>> import os + >>> nipype_camino_dti = create_camino_dti_pipeline("nipype_camino_dti") + >>> nipype_camino_dti.inputs.inputnode.dwi = os.path.abspath('dwi.nii') + >>> nipype_camino_dti.inputs.inputnode.bvecs = os.path.abspath('bvecs') + >>> nipype_camino_dti.inputs.inputnode.bvals = os.path.abspath('bvals') + >>> nipype_camino_dti.run() # doctest: +SKIP + + Inputs:: + + inputnode.dwi + inputnode.bvecs + inputnode.bvals + + Outputs:: + + outputnode.fa + outputnode.trace + outputnode.tracts_pico + outputnode.tracts_dt + outputnode.tensors + + """ + + inputnode1 = pe.Node( + interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), + name="inputnode1") + """ + Setup for Diffusion Tensor Computation + -------------------------------------- + In this section we create the nodes necessary for diffusion analysis. + First, the diffusion image is converted to voxel order. + """ + + image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel") + fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme") + fsl2scheme.inputs.usegradmod = True + """ + Second, diffusion tensors are fit to the voxel-order data. + """ + + dtifit = pe.Node(interface=camino.DTIFit(), name='dtifit') + """ + Next, a lookup table is generated from the schemefile and the + signal-to-noise ratio (SNR) of the unweighted (q=0) data. + """ + + dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen") + dtlutgen.inputs.snr = 16.0 + dtlutgen.inputs.inversion = 1 + """ + In this tutorial we implement probabilistic tractography using the PICo algorithm. + PICo tractography requires an estimate of the fibre direction and a model of its + uncertainty in each voxel; this is produced using the following node. + """ + + picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs") + picopdfs.inputs.inputmodel = 'dt' + """ + An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography. + """ + + bet = pe.Node(interface=fsl.BET(), name="bet") + bet.inputs.mask = True + """ + Finally, tractography is performed. + First DT streamline tractography. + """ + + trackdt = pe.Node(interface=camino.TrackDT(), name="trackdt") + """ + Now camino's Probablistic Index of connectivity algorithm. + In this tutorial, we will use only 1 iteration for time-saving purposes. + """ + + trackpico = pe.Node(interface=camino.TrackPICo(), name="trackpico") + trackpico.inputs.iterations = 1 + """ + Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse. + """ + + cam2trk_dt = pe.Node( + interface=cam2trk.Camino2Trackvis(), name="cam2trk_dt") + cam2trk_dt.inputs.min_length = 30 + cam2trk_dt.inputs.voxel_order = 'LAS' + + cam2trk_pico = pe.Node( + interface=cam2trk.Camino2Trackvis(), name="cam2trk_pico") + cam2trk_pico.inputs.min_length = 30 + cam2trk_pico.inputs.voxel_order = 'LAS' + """ + Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, using the following two nodes. + """ + + # vtkstreamlines = pe.Node(interface=camino.VtkStreamlines(), name="vtkstreamlines") + # procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines") + # procstreamlines.inputs.outputtracts = 'oogl' + """ + We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the fractional anisotropy and diffusivity trace maps and their associated headers. + """ + + fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(), name='fa') + # md = pe.Node(interface=camino.MD(),name='md') + trace = pe.Node(interface=camino.ComputeTensorTrace(), name='trace') + dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig') + + analyzeheader_fa = pe.Node( + interface=camino.AnalyzeHeader(), name="analyzeheader_fa") + analyzeheader_fa.inputs.datatype = "double" + analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace') + + # analyzeheader_md = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_md") + # analyzeheader_md.inputs.datatype = "double" + # analyzeheader_trace = analyzeheader_md.clone('analyzeheader_trace') + + fa2nii = pe.Node(interface=misc.CreateNifti(), name='fa2nii') + trace2nii = fa2nii.clone("trace2nii") + """ + Since we have now created all our nodes, we can now define our workflow and start making connections. + """ + + tractography = pe.Workflow(name='tractography') + + tractography.connect([(inputnode1, bet, [("dwi", "in_file")])]) + """ + File format conversion + """ + + tractography.connect([(inputnode1, image2voxel, [("dwi", "in_file")]), + (inputnode1, fsl2scheme, [("bvecs", "bvec_file"), + ("bvals", "bval_file")])]) + """ + Tensor fitting + """ + + tractography.connect([(image2voxel, dtifit, [['voxel_order', 'in_file']]), + (fsl2scheme, dtifit, [['scheme', 'scheme_file']])]) + """ + Workflow for applying DT streamline tractogpahy + """ + + tractography.connect([(bet, trackdt, [("mask_file", "seed_file")])]) + tractography.connect([(dtifit, trackdt, [("tensor_fitted", "in_file")])]) + """ + Workflow for applying PICo + """ + + tractography.connect([(bet, trackpico, [("mask_file", "seed_file")])]) + tractography.connect([(fsl2scheme, dtlutgen, [("scheme", "scheme_file")])]) + tractography.connect([(dtlutgen, picopdfs, [("dtLUT", "luts")])]) + tractography.connect([(dtifit, picopdfs, [("tensor_fitted", "in_file")])]) + tractography.connect([(picopdfs, trackpico, [("pdfs", "in_file")])]) + + # Mean diffusivity still appears broken + # tractography.connect([(dtifit, md,[("tensor_fitted","in_file")])]) + # tractography.connect([(md, analyzeheader_md,[("md","in_file")])]) + # tractography.connect([(inputnode, analyzeheader_md,[(('dwi', get_vox_dims), 'voxel_dims'), + # (('dwi', get_data_dims), 'data_dims')])]) + # This line is commented out because the ProcStreamlines node keeps throwing memory errors + # tractography.connect([(track, procstreamlines,[("tracked","in_file")])]) + """ + Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the + tensor fitting. + + This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with the original DWI image from the input node, to the header-generating nodes. This ensures that the files will be correct and readable. + """ + + tractography.connect([(dtifit, fa, [("tensor_fitted", "in_file")])]) + tractography.connect([(fa, analyzeheader_fa, [("fa", "in_file")])]) + tractography.connect([(inputnode1, analyzeheader_fa, + [(('dwi', get_vox_dims), 'voxel_dims'), + (('dwi', get_data_dims), 'data_dims')])]) + tractography.connect([(fa, fa2nii, [('fa', 'data_file')])]) + tractography.connect([(inputnode1, fa2nii, [(('dwi', get_affine), + 'affine')])]) + tractography.connect([(analyzeheader_fa, fa2nii, [('header', + 'header_file')])]) + + tractography.connect([(dtifit, trace, [("tensor_fitted", "in_file")])]) + tractography.connect([(trace, analyzeheader_trace, [("trace", + "in_file")])]) + tractography.connect([(inputnode1, analyzeheader_trace, + [(('dwi', get_vox_dims), 'voxel_dims'), + (('dwi', get_data_dims), 'data_dims')])]) + tractography.connect([(trace, trace2nii, [('trace', 'data_file')])]) + tractography.connect([(inputnode1, trace2nii, [(('dwi', get_affine), + 'affine')])]) + tractography.connect([(analyzeheader_trace, trace2nii, [('header', + 'header_file')])]) + + tractography.connect([(dtifit, dteig, [("tensor_fitted", "in_file")])]) + + tractography.connect([(trackpico, cam2trk_pico, [('tracked', 'in_file')])]) + tractography.connect([(trackdt, cam2trk_dt, [('tracked', 'in_file')])]) + tractography.connect([(inputnode1, cam2trk_pico, + [(('dwi', get_vox_dims), 'voxel_dims'), + (('dwi', get_data_dims), 'data_dims')])]) + + tractography.connect([(inputnode1, cam2trk_dt, + [(('dwi', get_vox_dims), 'voxel_dims'), + (('dwi', get_data_dims), 'data_dims')])]) + + inputnode = pe.Node( + interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), + name="inputnode") + + outputnode = pe.Node( + interface=util.IdentityInterface( + fields=["fa", "trace", "tracts_pico", "tracts_dt", "tensors"]), + name="outputnode") + + workflow = pe.Workflow(name=name) + workflow.base_output_dir = name + + workflow.connect([(inputnode, tractography, + [("dwi", "inputnode1.dwi"), + ("bvals", "inputnode1.bvals"), ("bvecs", + "inputnode1.bvecs")])]) + + workflow.connect([(tractography, outputnode, + [("cam2trk_dt.trackvis", "tracts_dt"), + ("cam2trk_pico.trackvis", + "tracts_pico"), ("fa2nii.nifti_file", "fa"), + ("trace2nii.nifti_file", + "trace"), ("dtifit.tensor_fitted", "tensors")])]) + + return workflow diff --git a/nipype/workflows/dmri/camino/group_connectivity.py b/nipype/workflows/dmri/camino/group_connectivity.py new file mode 100644 index 0000000000..1307f8c4b6 --- /dev/null +++ b/nipype/workflows/dmri/camino/group_connectivity.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- +import os.path as op # system functions + +from .connectivity_mapping import create_connectivity_pipeline +from ....interfaces import io as nio # Data i/o +from ....interfaces import utility as util # utility +from ....pipeline import engine as pe # pypeline engine + + +def create_group_connectivity_pipeline(group_list, + group_id, + data_dir, + subjects_dir, + output_dir, + template_args_dict=0): + """Creates a pipeline that performs basic Camino structural connectivity processing + on groups of subjects. Given a diffusion-weighted image, and text files containing + the associated b-values and b-vectors, the workflow will return each subjects' connectomes + in a Connectome File Format (CFF) file, for use in Connectome Viewer (http://www.cmtk.org). + + Example + ------- + + >>> import nipype.interfaces.freesurfer as fs + >>> import nipype.workflows.dmri.camino.group_connectivity as groupwork + >>> subjects_dir = '.' + >>> data_dir = '.' + >>> output_dir = '.' + >>> fs.FSCommand.set_default_subjects_dir(subjects_dir) + >>> group_list = {} + >>> group_list['group1'] = ['subj1', 'subj2'] + >>> group_list['group2'] = ['subj3', 'subj4'] + >>> template_args = dict(dwi=[['subject_id', 'dwi']], bvecs=[['subject_id', 'bvecs']], bvals=[['subject_id', 'bvals']]) + >>> group_id = 'group1' + >>> l1pipeline = groupwork.create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, template_args) + >>> l1pipeline.run() # doctest: +SKIP + + Inputs:: + + group_list: Dictionary of subject lists, keyed by group name + group_id: String containing the group name + data_dir: Path to the data directory + subjects_dir: Path to the Freesurfer 'subjects' directory + output_dir: Path for the output files + template_args_dict: Dictionary of template arguments for the connectivity pipeline datasource + e.g. info = dict(dwi=[['subject_id', 'dwi']], + bvecs=[['subject_id','bvecs']], + bvals=[['subject_id','bvals']]) + """ + group_infosource = pe.Node( + interface=util.IdentityInterface(fields=['group_id']), + name="group_infosource") + group_infosource.inputs.group_id = group_id + subject_list = group_list[group_id] + subj_infosource = pe.Node( + interface=util.IdentityInterface(fields=['subject_id']), + name="subj_infosource") + subj_infosource.iterables = ('subject_id', subject_list) + + if template_args_dict == 0: + info = dict( + dwi=[['subject_id', 'dwi']], + bvecs=[['subject_id', 'bvecs']], + bvals=[['subject_id', 'bvals']]) + else: + info = template_args_dict + + datasource = pe.Node( + interface=nio.DataGrabber( + infields=['subject_id'], outfields=list(info.keys())), + name='datasource') + + datasource.inputs.template = "%s/%s" + datasource.inputs.base_directory = data_dir + datasource.inputs.field_template = dict(dwi='%s/%s.nii') + datasource.inputs.template_args = info + datasource.inputs.sort_filelist = True + """ + Create a connectivity mapping workflow + """ + conmapper = create_connectivity_pipeline("nipype_conmap") + conmapper.inputs.inputnode.subjects_dir = subjects_dir + conmapper.base_dir = op.abspath('conmapper') + + datasink = pe.Node(interface=nio.DataSink(), name="datasink") + datasink.inputs.base_directory = output_dir + datasink.inputs.container = group_id + + l1pipeline = pe.Workflow(name="l1pipeline_" + group_id) + l1pipeline.base_dir = output_dir + l1pipeline.base_output_dir = group_id + l1pipeline.connect([(subj_infosource, datasource, [('subject_id', + 'subject_id')])]) + l1pipeline.connect([(subj_infosource, conmapper, + [('subject_id', 'inputnode.subject_id')])]) + l1pipeline.connect([(datasource, conmapper, [ + ("dwi", "inputnode.dwi"), + ("bvals", "inputnode.bvals"), + ("bvecs", "inputnode.bvecs"), + ])]) + l1pipeline.connect([(conmapper, datasink, [ + ("outputnode.connectome", "@l1output.cff"), + ("outputnode.fa", "@l1output.fa"), + ("outputnode.tracts", "@l1output.tracts"), + ("outputnode.trace", "@l1output.trace"), + ("outputnode.cmatrix", "@l1output.cmatrix"), + ("outputnode.rois", "@l1output.rois"), + ("outputnode.struct", "@l1output.struct"), + ("outputnode.networks", "@l1output.networks"), + ("outputnode.mean_fiber_length", "@l1output.mean_fiber_length"), + ("outputnode.fiber_length_std", "@l1output.fiber_length_std"), + ])]) + l1pipeline.connect([(group_infosource, datasink, [('group_id', + '@group_id')])]) + return l1pipeline diff --git a/nipype/workflows/dmri/connectivity/__init__.py b/nipype/workflows/dmri/connectivity/__init__.py new file mode 100644 index 0000000000..b34ca0dacb --- /dev/null +++ b/nipype/workflows/dmri/connectivity/__init__.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .nx import (create_networkx_pipeline, create_cmats_to_csv_pipeline) +from .group_connectivity import ( + create_merge_networks_by_group_workflow, + create_merge_network_results_by_group_workflow, + create_merge_group_networks_workflow, + create_merge_group_network_results_workflow, + create_average_networks_by_group_workflow) diff --git a/nipype/workflows/dmri/connectivity/group_connectivity.py b/nipype/workflows/dmri/connectivity/group_connectivity.py new file mode 100644 index 0000000000..a918104bd1 --- /dev/null +++ b/nipype/workflows/dmri/connectivity/group_connectivity.py @@ -0,0 +1,631 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open + +from future.utils import raise_from + +import os.path as op + +from ....interfaces import io as nio # Data i/o +from ....interfaces import utility as util # utility +from ....interfaces import cmtk as cmtk +from ....algorithms import misc as misc +from ....pipeline import engine as pe # pypeline engine +from ....interfaces.utility import Function +from ....utils.misc import package_check + +have_cmp = True +try: + package_check('cmp') +except Exception as e: + have_cmp = False +else: + import cmp + + +def pullnodeIDs(in_network, name_key='dn_name'): + """ This function will return the values contained, for each node in + a network, given an input key. By default it will return the node names + """ + import networkx as nx + import numpy as np + from nipype.interfaces.base import isdefined + if not isdefined(in_network): + raise ValueError + return None + try: + ntwk = nx.read_graphml(in_network) + except: + ntwk = nx.read_gpickle(in_network) + nodedata = ntwk.node + ids = [] + integer_nodelist = [] + for node in list(nodedata.keys()): + integer_nodelist.append(int(node)) + for node in np.sort(integer_nodelist): + try: + nodeid = nodedata[node][name_key] + except KeyError: + nodeid = nodedata[str(node)][name_key] + ids.append(nodeid) + return ids + + +def concatcsv(in_files): + """ This function will contatenate two "comma-separated value" + text files, but remove the first row (usually column headers) from + all but the first file. + """ + import os.path as op + from nipype.utils.filemanip import split_filename + + if not isinstance(in_files, list): + return in_files + if isinstance(in_files[0], list): + in_files = in_files[0] + first = open(in_files[0], 'r') + path, name, ext = split_filename(in_files[0]) + out_name = op.abspath('concat.csv') + out_file = open(out_name, 'w') + out_file.write(first.readline()) + first.close() + for in_file in in_files: + file_to_read = open(in_file, 'r') + file_to_read.readline() # scrap first line + for line in file_to_read: + out_file.write(line) + return out_name + + +def create_merge_networks_by_group_workflow(group_list, group_id, data_dir, + subjects_dir, output_dir): + """Creates a second-level pipeline to merge the Connectome File Format (CFF) outputs from the group-level + MRtrix structural connectivity processing pipeline into a single CFF file for each group. + + Example + ------- + + >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork + >>> from nipype.testing import example_data + >>> subjects_dir = '.' + >>> data_dir = '.' + >>> output_dir = '.' + >>> group_list = {} + >>> group_list['group1'] = ['subj1', 'subj2'] + >>> group_list['group2'] = ['subj3', 'subj4'] + >>> group_id = 'group1' + >>> l2pipeline = groupwork.create_merge_networks_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir) + >>> l2pipeline.run() # doctest: +SKIP + + Inputs:: + + group_list: Dictionary of subject lists, keyed by group name + group_id: String containing the group name + data_dir: Path to the data directory + subjects_dir: Path to the Freesurfer 'subjects' directory + output_dir: Path for the output files + """ + group_infosource = pe.Node( + interface=util.IdentityInterface(fields=['group_id']), + name="group_infosource") + group_infosource.inputs.group_id = group_id + + l2infosource = pe.Node( + interface=util.IdentityInterface(fields=['group_id']), + name='l2infosource') + + l2source = pe.Node( + nio.DataGrabber(infields=['group_id'], outfields=['CFFfiles']), + name='l2source') + l2source.inputs.template_args = dict(CFFfiles=[['group_id']]) + l2source.inputs.template = op.join(output_dir, '%s/cff/*/connectome.cff') + l2source.inputs.base_directory = data_dir + l2source.inputs.sort_filelist = True + + l2inputnode = pe.Node( + interface=util.IdentityInterface(fields=['CFFfiles']), + name='l2inputnode') + MergeCNetworks = pe.Node( + interface=cmtk.MergeCNetworks(), name="MergeCNetworks") + + l2datasink = pe.Node(interface=nio.DataSink(), name="l2datasink") + l2datasink.inputs.base_directory = output_dir + l2datasink.inputs.container = group_id + + l2pipeline = pe.Workflow(name="l2output_" + group_id) + l2pipeline.base_dir = op.join(output_dir, 'l2output') + l2pipeline.connect([(group_infosource, l2infosource, [('group_id', + 'group_id')])]) + + l2pipeline.connect([ + (l2infosource, l2source, [('group_id', 'group_id')]), + (l2source, l2inputnode, [('CFFfiles', 'CFFfiles')]), + ]) + + l2pipeline.connect([(l2inputnode, MergeCNetworks, [('CFFfiles', + 'in_files')])]) + l2pipeline.connect([(group_infosource, MergeCNetworks, [('group_id', + 'out_file')])]) + l2pipeline.connect([(MergeCNetworks, l2datasink, [('connectome_file', + '@l2output')])]) + l2pipeline.connect([(group_infosource, l2datasink, [('group_id', + '@group_id')])]) + return l2pipeline + + +def create_merge_network_results_by_group_workflow( + group_list, group_id, data_dir, subjects_dir, output_dir): + """Creates a second-level pipeline to merge the Connectome File Format (CFF) outputs from the group-level + MRtrix structural connectivity processing pipeline into a single CFF file for each group. + + Example + ------- + + >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork + >>> from nipype.testing import example_data + >>> subjects_dir = '.' + >>> data_dir = '.' + >>> output_dir = '.' + >>> group_list = {} + >>> group_list['group1'] = ['subj1', 'subj2'] + >>> group_list['group2'] = ['subj3', 'subj4'] + >>> group_id = 'group1' + >>> l2pipeline = groupwork.create_merge_network_results_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir) + >>> l2pipeline.run() # doctest: +SKIP + + Inputs:: + + group_list: Dictionary of subject lists, keyed by group name + group_id: String containing the group name + data_dir: Path to the data directory + subjects_dir: Path to the Freesurfer 'subjects' directory + output_dir: Path for the output files + """ + group_infosource = pe.Node( + interface=util.IdentityInterface(fields=['group_id']), + name="group_infosource") + group_infosource.inputs.group_id = group_id + + l2infosource = pe.Node( + interface=util.IdentityInterface(fields=['group_id', 'merged']), + name='l2infosource') + + l2source = pe.Node( + nio.DataGrabber( + infields=['group_id'], + outfields=[ + 'CFFfiles', 'CSVmatrices', 'CSVfibers', 'CSVnodal', 'CSVglobal' + ]), + name='l2source') + + l2source.inputs.template_args = dict( + CFFfiles=[['group_id']], + CSVmatrices=[['group_id']], + CSVnodal=[['group_id']], + CSVglobal=[['group_id']], + CSVfibers=[['group_id']]) + l2source.inputs.base_directory = data_dir + l2source.inputs.template = '%s/%s' + l2source.inputs.field_template = dict( + CFFfiles=op.join(output_dir, '%s/cff/*/connectome.cff'), + CSVmatrices=op.join(output_dir, '%s/cmatrices_csv/*/*.csv'), + CSVnodal=op.join(output_dir, '%s/nxcsv/*/*nodal*.csv'), + CSVglobal=op.join(output_dir, '%s/nxcsv/*/*global*.csv'), + CSVfibers=op.join(output_dir, '%s/fiber_csv/*/*fibers*.csv')) + l2source.inputs.sort_filelist = True + + l2inputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'CFFfiles', 'CSVfibers', 'CSVmatrices', 'CSVnodal', 'CSVglobal', + 'network_file' + ]), + name='l2inputnode') + + MergeCNetworks = pe.Node( + interface=cmtk.MergeCNetworks(), name="MergeCNetworks") + + l2datasink = pe.Node(interface=nio.DataSink(), name="l2datasink") + l2datasink.inputs.base_directory = output_dir + l2datasink.inputs.container = group_id + + l2pipeline = pe.Workflow(name="l2output_" + group_id) + l2pipeline.base_dir = op.join(output_dir, 'l2output') + l2pipeline.connect([(group_infosource, l2infosource, [('group_id', + 'group_id')])]) + + l2pipeline.connect([ + (l2infosource, l2source, [('group_id', 'group_id')]), + (l2source, l2inputnode, [('CFFfiles', 'CFFfiles')]), + (l2source, l2inputnode, [('CSVmatrices', 'CSVmatrices')]), + (l2source, l2inputnode, [('CSVnodal', 'CSVnodal')]), + (l2source, l2inputnode, [('CSVglobal', 'CSVglobal')]), + (l2source, l2inputnode, [('CSVfibers', 'CSVfibers')]), + ]) + + l2pipeline.connect([(l2inputnode, MergeCNetworks, [('CFFfiles', + 'in_files')])]) + + l2pipeline.connect([(group_infosource, MergeCNetworks, [('group_id', + 'out_file')])]) + l2pipeline.connect([(MergeCNetworks, l2datasink, [('connectome_file', + '@l2output')])]) + + AddCSVColumn_node = pe.Node( + interface=misc.AddCSVColumn(), name="AddCSVColumn_node") + AddCSVColumn_node.inputs.extra_column_heading = 'group' + AddCSVColumn_global = AddCSVColumn_node.clone(name="AddCSVColumn_global") + AddCSVColumn_matrices = AddCSVColumn_node.clone( + name="AddCSVColumn_matrices") + AddCSVColumn_fibers = AddCSVColumn_node.clone(name="AddCSVColumn_fibers") + + concat_csv_interface = Function( + input_names=["in_files"], + output_names=["out_name"], + function=concatcsv) + + concat_node_csvs = pe.Node( + interface=concat_csv_interface, name='concat_node_csvs') + concat_global_csvs = pe.Node( + interface=concat_csv_interface, name='concat_global_csvs') + concat_matrix_csvs = pe.Node( + interface=concat_csv_interface, name='concat_matrix_csvs') + concat_fiber_csvs = pe.Node( + interface=concat_csv_interface, name='concat_fiber_csvs') + + l2pipeline.connect([(l2inputnode, concat_node_csvs, [('CSVnodal', + 'in_files')])]) + l2pipeline.connect([(concat_node_csvs, AddCSVColumn_node, [('out_name', + 'in_file')])]) + l2pipeline.connect([(group_infosource, AddCSVColumn_node, + [('group_id', 'extra_field')])]) + l2pipeline.connect([(AddCSVColumn_node, l2datasink, + [('csv_file', '@l2output.node_csv')])]) + l2pipeline.connect([(group_infosource, l2datasink, [('group_id', + '@group_id')])]) + + l2pipeline.connect([(l2inputnode, concat_global_csvs, [('CSVglobal', + 'in_files')])]) + l2pipeline.connect([(concat_global_csvs, AddCSVColumn_global, + [('out_name', 'in_file')])]) + l2pipeline.connect([(group_infosource, AddCSVColumn_global, + [('group_id', 'extra_field')])]) + l2pipeline.connect([(AddCSVColumn_global, l2datasink, + [('csv_file', '@l2output.global_csv')])]) + + l2pipeline.connect([(l2inputnode, concat_matrix_csvs, [('CSVmatrices', + 'in_files')])]) + l2pipeline.connect([(concat_matrix_csvs, AddCSVColumn_matrices, + [('out_name', 'in_file')])]) + l2pipeline.connect([(group_infosource, AddCSVColumn_matrices, + [('group_id', 'extra_field')])]) + l2pipeline.connect([(AddCSVColumn_matrices, l2datasink, + [('csv_file', '@l2output.cmatrices_csv')])]) + + l2pipeline.connect([(l2inputnode, concat_fiber_csvs, [('CSVmatrices', + 'in_files')])]) + l2pipeline.connect([(concat_fiber_csvs, AddCSVColumn_fibers, + [('out_name', 'in_file')])]) + l2pipeline.connect([(group_infosource, AddCSVColumn_fibers, + [('group_id', 'extra_field')])]) + l2pipeline.connect([(AddCSVColumn_fibers, l2datasink, + [('csv_file', '@l2output.fibers_csv')])]) + return l2pipeline + + +def create_merge_group_networks_workflow(group_list, + data_dir, + subjects_dir, + output_dir, + title='group'): + """Creates a third-level pipeline to merge the Connectome File Format (CFF) outputs from each group + and combines them into a single CFF file for each group. + + Example + ------- + + >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork + >>> from nipype.testing import example_data + >>> subjects_dir = '.' + >>> data_dir = '.' + >>> output_dir = '.' + >>> group_list = {} + >>> group_list['group1'] = ['subj1', 'subj2'] + >>> group_list['group2'] = ['subj3', 'subj4'] + >>> l3pipeline = groupwork.create_merge_group_networks_workflow(group_list, data_dir, subjects_dir, output_dir) + >>> l3pipeline.run() # doctest: +SKIP + + Inputs:: + + group_list: Dictionary of subject lists, keyed by group name + data_dir: Path to the data directory + subjects_dir: Path to the Freesurfer 'subjects' directory + output_dir: Path for the output files + title: String to use as a title for the output merged CFF file (default 'group') + """ + l3infosource = pe.Node( + interface=util.IdentityInterface(fields=['group_id']), + name='l3infosource') + l3infosource.inputs.group_id = list(group_list.keys()) + + l3source = pe.Node( + nio.DataGrabber(infields=['group_id'], outfields=['CFFfiles']), + name='l3source') + l3source.inputs.template_args = dict(CFFfiles=[['group_id', 'group_id']]) + l3source.inputs.template = op.join(output_dir, '%s/%s.cff') + l3source.inputs.sort_filelist = True + + l3inputnode = pe.Node( + interface=util.IdentityInterface(fields=['Group_CFFs']), + name='l3inputnode') + + MergeCNetworks_grp = pe.Node( + interface=cmtk.MergeCNetworks(), name="MergeCNetworks_grp") + MergeCNetworks_grp.inputs.out_file = title + + l3datasink = pe.Node(interface=nio.DataSink(), name="l3datasink") + l3datasink.inputs.base_directory = output_dir + + l3pipeline = pe.Workflow(name="l3output") + l3pipeline.base_dir = output_dir + l3pipeline.connect([ + (l3infosource, l3source, [('group_id', 'group_id')]), + (l3source, l3inputnode, [('CFFfiles', 'Group_CFFs')]), + ]) + + l3pipeline.connect([(l3inputnode, MergeCNetworks_grp, [('Group_CFFs', + 'in_files')])]) + l3pipeline.connect([(MergeCNetworks_grp, l3datasink, [('connectome_file', + '@l3output')])]) + return l3pipeline + + +def create_merge_group_network_results_workflow(group_list, + data_dir, + subjects_dir, + output_dir, + title='group'): + """Creates a third-level pipeline to merge the Connectome File Format (CFF) outputs from each group + and combines them into a single CFF file for each group. This version of the third-level pipeline also + concatenates the comma-separated value files for the NetworkX metrics and the connectivity matrices + into single files. + + Example + ------- + + >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork + >>> from nipype.testing import example_data + >>> subjects_dir = '.' + >>> data_dir = '.' + >>> output_dir = '.' + >>> group_list = {} + >>> group_list['group1'] = ['subj1', 'subj2'] + >>> group_list['group2'] = ['subj3', 'subj4'] + >>> l3pipeline = groupwork.create_merge_group_network_results_workflow(group_list, data_dir, subjects_dir, output_dir) + >>> l3pipeline.run() # doctest: +SKIP + + Inputs:: + + group_list: Dictionary of subject lists, keyed by group name + data_dir: Path to the data directory + subjects_dir: Path to the Freesurfer 'subjects' directory + output_dir: Path for the output files + title: String to use as a title for the output merged CFF file (default 'group') + """ + l3infosource = pe.Node( + interface=util.IdentityInterface(fields=['group_id']), + name='l3infosource') + l3infosource.inputs.group_id = list(group_list.keys()) + + l3source = pe.Node( + nio.DataGrabber( + infields=['group_id'], + outfields=[ + 'CFFfiles', 'CSVnodemetrics', 'CSVglobalmetrics', 'CSVmatrices' + ]), + name='l3source') + l3source.inputs.template_args = dict( + CFFfiles=[['group_id']], + CSVnodemetrics=[['group_id']], + CSVglobalmetrics=[['group_id']], + CSVmatrices=[['group_id']]) + l3source.inputs.template = op.join(output_dir, '%s/%s') + l3source.inputs.sort_filelist = True + + l3source.inputs.field_template = dict( + CFFfiles=op.join(output_dir, '%s/*.cff'), + CSVnodemetrics=op.join(output_dir, '%s/node_csv/*.csv'), + CSVglobalmetrics=op.join(output_dir, '%s/global_csv/*.csv'), + CSVmatrices=op.join(output_dir, '%s/cmatrices_csv/*/*.csv')) + + l3inputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'Group_CFFs', 'Group_CSVnodemetrics', 'Group_CSVglobalmetrics', + 'Group_CSVmatrices' + ]), + name='l3inputnode') + + MergeCNetworks_grp = pe.Node( + interface=cmtk.MergeCNetworks(), name="MergeCNetworks_grp") + MergeCNetworks_grp.inputs.out_file = title + + l3datasink = pe.Node(interface=nio.DataSink(), name="l3datasink") + l3datasink.inputs.base_directory = output_dir + + l3pipeline = pe.Workflow(name="l3output") + l3pipeline.base_dir = output_dir + l3pipeline.connect([ + (l3infosource, l3source, [('group_id', 'group_id')]), + (l3source, l3inputnode, [('CFFfiles', 'Group_CFFs')]), + (l3source, l3inputnode, [('CSVnodemetrics', 'Group_CSVnodemetrics')]), + (l3source, l3inputnode, [('CSVglobalmetrics', + 'Group_CSVglobalmetrics')]), + (l3source, l3inputnode, [('CSVmatrices', 'Group_CSVmatrices')]), + ]) + + l3pipeline.connect([(l3inputnode, MergeCNetworks_grp, [('Group_CFFs', + 'in_files')])]) + l3pipeline.connect([(MergeCNetworks_grp, l3datasink, [('connectome_file', + '@l3output')])]) + + concat_csv_interface = Function( + input_names=["in_files"], + output_names=["out_name"], + function=concatcsv) + + concat_node_csvs = pe.Node( + interface=concat_csv_interface, name='concat_node_csvs') + concat_global_csvs = pe.Node( + interface=concat_csv_interface, name='concat_global_csvs') + concat_matrix_csvs = pe.Node( + interface=concat_csv_interface, name='concat_matrix_csvs') + + l3pipeline.connect([(l3inputnode, concat_node_csvs, + [('Group_CSVnodemetrics', 'in_files')])]) + l3pipeline.connect([(concat_node_csvs, l3datasink, + [('out_name', '@l3output.nodal_csv')])]) + + l3pipeline.connect([(l3inputnode, concat_global_csvs, + [('Group_CSVglobalmetrics', 'in_files')])]) + l3pipeline.connect([(concat_global_csvs, l3datasink, + [('out_name', '@l3output.global_csv')])]) + + l3pipeline.connect([(l3inputnode, concat_matrix_csvs, + [('Group_CSVmatrices', 'in_files')])]) + l3pipeline.connect([(concat_matrix_csvs, l3datasink, + [('out_name', '@l3output.csvmatrices')])]) + return l3pipeline + + +def create_average_networks_by_group_workflow(group_list, + data_dir, + subjects_dir, + output_dir, + title='group_average'): + """Creates a fourth-level pipeline to average the networks for two groups and merge them into a single + CFF file. This pipeline will also output the average networks in .gexf format, for visualization in other + graph viewers, such as Gephi. + + Example + ------- + + >>> import nipype.workflows.dmri.connectivity.group_connectivity as groupwork + >>> from nipype.testing import example_data + >>> subjects_dir = '.' + >>> data_dir = '.' + >>> output_dir = '.' + >>> group_list = {} + >>> group_list['group1'] = ['subj1', 'subj2'] + >>> group_list['group2'] = ['subj3', 'subj4'] + >>> l4pipeline = groupwork.create_average_networks_by_group_workflow(group_list, data_dir, subjects_dir, output_dir) + >>> l4pipeline.run() # doctest: +SKIP + + Inputs:: + + group_list: Dictionary of subject lists, keyed by group name + data_dir: Path to the data directory + subjects_dir: Path to the Freesurfer 'subjects' directory + output_dir: Path for the output files + title: String to use as a title for the output merged CFF file (default 'group') + """ + l4infosource = pe.Node( + interface=util.IdentityInterface(fields=['group_id1', 'group_id2']), + name='l4infosource') + try: + l4infosource.inputs.group_id1 = list(group_list.keys())[0] + l4infosource.inputs.group_id2 = list(group_list.keys())[1] + except IndexError as e: + raise_from( + Exception( + 'The create_average_networks_by_group_workflow requires 2 groups' + ), e) + + l4info = dict( + networks=[['group_id', '']], + CMatrices=[['group_id', '']], + fibmean=[['group_id', 'mean_fiber_length']], + fibdev=[['group_id', 'fiber_length_std']]) + + l4source_grp1 = pe.Node( + nio.DataGrabber(infields=['group_id'], outfields=list(l4info.keys())), + name='l4source_grp1') + l4source_grp1.inputs.template = '%s/%s' + l4source_grp1.inputs.field_template = dict( + networks=op.join(output_dir, '%s/networks/*/*%s*intersections*.pck'), + CMatrices=op.join(output_dir, '%s/cmatrix/*/*%s*.mat'), + fibmean=op.join(output_dir, '%s/mean_fiber_length/*/*%s*.mat'), + fibdev=op.join(output_dir, '%s/fiber_length_std/*/*%s*.mat')) + l4source_grp1.inputs.base_directory = output_dir + l4source_grp1.inputs.template_args = l4info + l4source_grp1.inputs.sort_filelist = True + + l4source_grp2 = l4source_grp1.clone(name='l4source_grp2') + + l4inputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'networks_grp1', 'networks_grp2', 'CMatrices_grp1', + 'CMatrices_grp2', 'fibmean_grp1', 'fibmean_grp2', 'fibdev_grp1', + 'fibdev_grp2' + ]), + name='l4inputnode') + + average_networks_grp1 = pe.Node( + interface=cmtk.AverageNetworks(), name='average_networks_grp1') + average_networks_grp2 = average_networks_grp1.clone( + 'average_networks_grp2') + + averagecff = pe.Node(interface=cmtk.CFFConverter(), name="averagecff") + averagecff.inputs.out_file = title + + merge_gpickled_averages = pe.Node( + interface=util.Merge(2), name='merge_gpickled_averages') + merge_gexf_averages = merge_gpickled_averages.clone('merge_gexf_averages') + + l4datasink = pe.Node(interface=nio.DataSink(), name="l4datasink") + l4datasink.inputs.base_directory = output_dir + + l4pipeline = pe.Workflow(name="l4output") + l4pipeline.base_dir = output_dir + l4pipeline.connect([ + (l4infosource, l4source_grp1, [('group_id1', 'group_id')]), + (l4infosource, l4source_grp2, [('group_id2', 'group_id')]), + (l4source_grp1, l4inputnode, [('CMatrices', 'CMatrices_grp1')]), + (l4source_grp2, l4inputnode, [('CMatrices', 'CMatrices_grp2')]), + (l4source_grp1, l4inputnode, [('networks', 'networks_grp1')]), + (l4source_grp2, l4inputnode, [('networks', 'networks_grp2')]), + (l4source_grp1, l4inputnode, [('fibmean', 'fibmean_grp1')]), + (l4source_grp2, l4inputnode, [('fibmean', 'fibmean_grp2')]), + (l4source_grp1, l4inputnode, [('fibdev', 'fibdev_grp1')]), + (l4source_grp2, l4inputnode, [('fibdev', 'fibdev_grp2')]), + ]) + + l4pipeline.connect([(l4inputnode, average_networks_grp1, [('networks_grp1', + 'in_files')])]) + l4pipeline.connect([(l4infosource, average_networks_grp1, [('group_id1', + 'group_id')])]) + + l4pipeline.connect([(l4inputnode, average_networks_grp2, [('networks_grp2', + 'in_files')])]) + l4pipeline.connect([(l4infosource, average_networks_grp2, [('group_id2', + 'group_id')])]) + + l4pipeline.connect([(average_networks_grp1, merge_gpickled_averages, + [('gpickled_groupavg', 'in1')])]) + l4pipeline.connect([(average_networks_grp2, merge_gpickled_averages, + [('gpickled_groupavg', 'in2')])]) + + l4pipeline.connect([(average_networks_grp1, merge_gexf_averages, + [('gexf_groupavg', 'in1')])]) + l4pipeline.connect([(average_networks_grp2, merge_gexf_averages, + [('gexf_groupavg', 'in2')])]) + + l4pipeline.connect([(merge_gpickled_averages, l4datasink, + [('out', '@l4output.gpickled')])]) + l4pipeline.connect([(merge_gpickled_averages, averagecff, + [('out', 'gpickled_networks')])]) + l4pipeline.connect([(averagecff, l4datasink, [('connectome_file', + '@l4output.averagecff')])]) + + l4pipeline.connect([(merge_gexf_averages, l4datasink, + [('out', '@l4output.gexf')])]) + return l4pipeline diff --git a/nipype/workflows/dmri/connectivity/nx.py b/nipype/workflows/dmri/connectivity/nx.py new file mode 100644 index 0000000000..95159dae8f --- /dev/null +++ b/nipype/workflows/dmri/connectivity/nx.py @@ -0,0 +1,178 @@ +# -*- coding: utf-8 -*- +from ....pipeline import engine as pe +from ....interfaces import utility as util +from ....interfaces import cmtk as cmtk +from ....algorithms import misc as misc +from ....algorithms.misc import remove_identical_paths +from .group_connectivity import pullnodeIDs + + +def add_global_to_filename(in_file): + from nipype.utils.filemanip import split_filename + path, name, ext = split_filename(in_file) + return name + '_global' + ext + + +def add_nodal_to_filename(in_file): + from nipype.utils.filemanip import split_filename + path, name, ext = split_filename(in_file) + return name + '_nodal' + ext + + +def create_networkx_pipeline(name="networkx", extra_column_heading="subject"): + """Creates a workflow to calculate various graph measures (via NetworkX) on + an input network. The output measures are then converted to comma-separated value + text files, and an extra column / field is also added. Typically, the user would + connect the subject name to this field. + + Example + ------- + + >>> from nipype.workflows.dmri.connectivity.nx import create_networkx_pipeline + >>> nx = create_networkx_pipeline("networkx", "subject_id") + >>> nx.inputs.inputnode.extra_field = 'subj1' + >>> nx.inputs.inputnode.network_file = 'subj1.pck' + >>> nx.run() # doctest: +SKIP + + Inputs:: + + inputnode.extra_field + inputnode.network_file + + Outputs:: + + outputnode.network_files + outputnode.csv_files + outputnode.matlab_files + + """ + inputnode = pe.Node( + interface=util.IdentityInterface( + fields=["extra_field", "network_file"]), + name="inputnode") + + pipeline = pe.Workflow(name=name) + + ntwkMetrics = pe.Node( + interface=cmtk.NetworkXMetrics(), name="NetworkXMetrics") + Matlab2CSV_node = pe.Node( + interface=misc.Matlab2CSV(), name="Matlab2CSV_node") + MergeCSVFiles_node = pe.Node( + interface=misc.MergeCSVFiles(), name="MergeCSVFiles_node") + MergeCSVFiles_node.inputs.extra_column_heading = extra_column_heading + + Matlab2CSV_global = Matlab2CSV_node.clone(name="Matlab2CSV_global") + MergeCSVFiles_global = MergeCSVFiles_node.clone( + name="MergeCSVFiles_global") + MergeCSVFiles_global.inputs.extra_column_heading = extra_column_heading + + mergeNetworks = pe.Node(interface=util.Merge(2), name="mergeNetworks") + mergeCSVs = mergeNetworks.clone("mergeCSVs") + + pipeline.connect([(inputnode, ntwkMetrics, [("network_file", "in_file")])]) + pipeline.connect([(ntwkMetrics, Matlab2CSV_node, [("node_measures_matlab", + "in_file")])]) + pipeline.connect([(ntwkMetrics, Matlab2CSV_global, + [("global_measures_matlab", "in_file")])]) + + pipeline.connect([(Matlab2CSV_node, MergeCSVFiles_node, [("csv_files", + "in_files")])]) + pipeline.connect([(inputnode, MergeCSVFiles_node, + [(("extra_field", add_nodal_to_filename), + "out_file")])]) + pipeline.connect([(inputnode, MergeCSVFiles_node, [("extra_field", + "extra_field")])]) + pipeline.connect([(inputnode, MergeCSVFiles_node, + [(("network_file", pullnodeIDs), "row_headings")])]) + + pipeline.connect([(Matlab2CSV_global, MergeCSVFiles_global, + [("csv_files", "in_files")])]) + pipeline.connect([(Matlab2CSV_global, MergeCSVFiles_global, + [(("csv_files", remove_identical_paths), + "column_headings")])]) + # MergeCSVFiles_global.inputs.row_heading_title = 'metric' + # MergeCSVFiles_global.inputs.column_headings = ['average'] + + pipeline.connect([(inputnode, MergeCSVFiles_global, + [(("extra_field", add_global_to_filename), + "out_file")])]) + pipeline.connect([(inputnode, MergeCSVFiles_global, [("extra_field", + "extra_field")])]) + + pipeline.connect([(inputnode, mergeNetworks, [("network_file", "in1")])]) + pipeline.connect([(ntwkMetrics, mergeNetworks, [("gpickled_network_files", + "in2")])]) + + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + "network_files", "csv_files", "matlab_files", "node_csv", + "global_csv" + ]), + name="outputnode") + + pipeline.connect([(MergeCSVFiles_node, outputnode, [("csv_file", + "node_csv")])]) + pipeline.connect([(MergeCSVFiles_global, outputnode, [("csv_file", + "global_csv")])]) + + pipeline.connect([(MergeCSVFiles_node, mergeCSVs, [("csv_file", "in1")])]) + pipeline.connect([(MergeCSVFiles_global, mergeCSVs, [("csv_file", + "in2")])]) + pipeline.connect([(mergeNetworks, outputnode, [("out", "network_files")])]) + pipeline.connect([(mergeCSVs, outputnode, [("out", "csv_files")])]) + pipeline.connect([(ntwkMetrics, outputnode, [("matlab_matrix_files", + "matlab_files")])]) + return pipeline + + +def create_cmats_to_csv_pipeline(name="cmats_to_csv", + extra_column_heading="subject"): + """Creates a workflow to convert the outputs from CreateMatrix into a single + comma-separated value text file. An extra column / field is also added to the + text file. Typically, the user would connect the subject name to this field. + + Example + ------- + + >>> from nipype.workflows.dmri.connectivity.nx import create_cmats_to_csv_pipeline + >>> csv = create_cmats_to_csv_pipeline("cmats_to_csv", "subject_id") + >>> csv.inputs.inputnode.extra_field = 'subj1' + >>> csv.inputs.inputnode.matlab_matrix_files = ['subj1_cmatrix.mat', 'subj1_mean_fiber_length.mat', 'subj1_median_fiber_length.mat', 'subj1_fiber_length_std.mat'] + >>> csv.run() # doctest: +SKIP + + Inputs:: + + inputnode.extra_field + inputnode.matlab_matrix_files + + Outputs:: + + outputnode.csv_file + + """ + inputnode = pe.Node( + interface=util.IdentityInterface( + fields=["extra_field", "matlab_matrix_files"]), + name="inputnode") + + pipeline = pe.Workflow(name=name) + + Matlab2CSV = pe.MapNode( + interface=misc.Matlab2CSV(), name="Matlab2CSV", iterfield=["in_file"]) + MergeCSVFiles = pe.Node( + interface=misc.MergeCSVFiles(), name="MergeCSVFiles") + MergeCSVFiles.inputs.extra_column_heading = extra_column_heading + + pipeline.connect([(inputnode, Matlab2CSV, [("matlab_matrix_files", + "in_file")])]) + pipeline.connect([(Matlab2CSV, MergeCSVFiles, [("csv_files", + "in_files")])]) + pipeline.connect([(inputnode, MergeCSVFiles, [("extra_field", + "extra_field")])]) + + outputnode = pe.Node( + interface=util.IdentityInterface(fields=["csv_file"]), + name="outputnode") + + pipeline.connect([(MergeCSVFiles, outputnode, [("csv_file", "csv_file")])]) + return pipeline diff --git a/nipype/workflows/dmri/dipy/__init__.py b/nipype/workflows/dmri/dipy/__init__.py new file mode 100644 index 0000000000..354ba7a7e6 --- /dev/null +++ b/nipype/workflows/dmri/dipy/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from __future__ import absolute_import +from .denoise import nlmeans_pipeline diff --git a/nipype/workflows/dmri/dipy/denoise.py b/nipype/workflows/dmri/dipy/denoise.py new file mode 100644 index 0000000000..a45f507b3c --- /dev/null +++ b/nipype/workflows/dmri/dipy/denoise.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from builtins import range +from ....pipeline import engine as pe +from ....interfaces import utility as niu +from ....interfaces import dipy + + +def nlmeans_pipeline(name='Denoise', + params={ + 'patch_radius': 1, + 'block_radius': 5 + }): + """ + Workflow that performs nlmeans denoising + + Example + ------- + + >>> from nipype.workflows.dmri.dipy.denoise import nlmeans_pipeline + >>> denoise = nlmeans_pipeline() + >>> denoise.inputs.inputnode.in_file = 'diffusion.nii' + >>> denoise.inputs.inputnode.in_mask = 'mask.nii' + >>> denoise.run() # doctest: +SKIP + + + """ + inputnode = pe.Node( + niu.IdentityInterface(fields=['in_file', 'in_mask']), name='inputnode') + outputnode = pe.Node( + niu.IdentityInterface(fields=['out_file']), name='outputnode') + + nmask = pe.Node( + niu.Function( + input_names=['in_file', 'in_mask'], + output_names=['out_file'], + function=bg_mask), + name='NoiseMsk') + nlmeans = pe.Node(dipy.Denoise(**params), name='NLMeans') + + wf = pe.Workflow(name=name) + wf.connect([(inputnode, nmask, [ + ('in_file', 'in_file'), ('in_mask', 'in_mask') + ]), (inputnode, nlmeans, [('in_file', 'in_file'), ('in_mask', 'in_mask')]), + (nmask, nlmeans, [('out_file', 'noise_mask')]), + (nlmeans, outputnode, [('out_file', 'out_file')])]) + return wf + + +def csf_mask(in_file, in_mask, out_file=None): + """ + Artesanal mask of csf in T2w-like images + """ + import nibabel as nb + import numpy as np + from scipy.ndimage import binary_erosion, binary_opening, label + import scipy.ndimage as nd + import os.path as op + from nipype.utils import NUMPY_MMAP + + if out_file is None: + fname, ext = op.splitext(op.basename(in_file)) + if ext == ".gz": + fname, ext2 = op.splitext(fname) + ext = ext2 + ext + out_file = op.abspath("%s_csfmask%s" % (fname, ext)) + + im = nb.load(in_file, mmap=NUMPY_MMAP) + hdr = im.header.copy() + hdr.set_data_dtype(np.uint8) + hdr.set_xyzt_units('mm') + imdata = im.get_data() + msk = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() + msk = binary_erosion(msk, structure=np.ones((15, 15, 10))).astype(np.uint8) + thres = np.percentile(imdata[msk > 0].reshape(-1), 90.0) + imdata[imdata < thres] = 0 + imdata = imdata * msk + imdata[imdata > 0] = 1 + imdata = binary_opening( + imdata, structure=np.ones((2, 2, 2))).astype(np.uint8) + + label_im, nb_labels = label(imdata) + sizes = nd.sum(imdata, label_im, list(range(nb_labels + 1))) + mask_size = sizes != sizes.max() + remove_pixel = mask_size[label_im] + label_im[remove_pixel] = 0 + label_im[label_im > 0] = 1 + nb.Nifti1Image(label_im.astype(np.uint8), im.affine, + hdr).to_filename(out_file) + return out_file + + +def bg_mask(in_file, in_mask, out_file=None): + """ + Rough mask of background from brain masks + """ + import nibabel as nb + import numpy as np + from scipy.ndimage import binary_dilation + import scipy.ndimage as nd + import os.path as op + from nipype.utils import NUMPY_MMAP + + if out_file is None: + fname, ext = op.splitext(op.basename(in_file)) + if ext == ".gz": + fname, ext2 = op.splitext(fname) + ext = ext2 + ext + out_file = op.abspath("%s_bgmask%s" % (fname, ext)) + + im = nb.load(in_file, mmap=NUMPY_MMAP) + hdr = im.header.copy() + hdr.set_data_dtype(np.uint8) + hdr.set_xyzt_units('mm') + msk = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() + msk = 1 - binary_dilation(msk, structure=np.ones((20, 20, 20))) + nb.Nifti1Image(msk.astype(np.uint8), im.affine, hdr).to_filename(out_file) + return out_file diff --git a/nipype/workflows/dmri/dtitk/__init__.py b/nipype/workflows/dmri/dtitk/__init__.py new file mode 100644 index 0000000000..02dbf25549 --- /dev/null +++ b/nipype/workflows/dmri/dtitk/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from __future__ import absolute_import +from .tensor_registration import (affine_tensor_pipeline, + diffeomorphic_tensor_pipeline) diff --git a/nipype/workflows/dmri/dtitk/tensor_registration.py b/nipype/workflows/dmri/dtitk/tensor_registration.py new file mode 100644 index 0000000000..faae608a44 --- /dev/null +++ b/nipype/workflows/dmri/dtitk/tensor_registration.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from ....pipeline import engine as pe +from ....interfaces import utility as niu +from ....interfaces import dtitk + + +def affine_tensor_pipeline(name='AffTen'): + + """ + Workflow that performs a linear registration + (Rigid followed by Affine) + + Example + ------- + + >>> from nipype.workflows.dmri.dtitk.tensor_registration import affine_tensor_pipeline + >>> affine = affine_tensor_pipeline() + >>> affine.inputs.inputnode.fixed_file = 'im1.nii' + >>> affine.inputs.inputnode.moving_file = 'im2.nii' + >>> affine.run() # doctest: +SKIP + + + """ + inputnode = pe.Node(niu.IdentityInterface( + fields=['fixed_file', 'moving_file']), + name='inputnode') + outputnode = pe.Node(niu.IdentityInterface( + fields=['out_file', 'out_file_xfm']), + name='outputnode') + + rigid_node = pe.Node(dtitk.Rigid(), name='rigid_node') + affine_node = pe.Node(dtitk.Affine(), name='affine_node') + + wf = pe.Workflow(name=name) + + wf.connect(inputnode, 'fixed_file', rigid_node, 'fixed_file') + wf.connect(inputnode, 'moving_file', rigid_node, 'moving_file') + wf.connect(rigid_node, 'out_file_xfm', affine_node, 'initialize_xfm') + wf.connect(inputnode, 'fixed_file', affine_node, 'fixed_file') + wf.connect(inputnode, 'moving_file', affine_node, 'moving_file') + wf.connect(affine_node, 'out_file', outputnode, 'out_file') + wf.connect(affine_node, 'out_file_xfm', outputnode, 'out_file_xfm') + + return wf + + +def diffeomorphic_tensor_pipeline(name='DiffeoTen', + params={'array_size': (128, 128, 64)}): + """ + Workflow that performs a diffeomorphic registration + (Rigid and Affine followed by Diffeomorphic) + Note: the requirements for a diffeomorphic registration specify that + the dimension 0 is a power of 2 so images are resliced prior to + registration. Remember to move origin and reslice prior to applying xfm to + another file! + + Example + ------- + + >>> from nipype.workflows.dmri.dtitk.tensor_registration import diffeomorphic_tensor_pipeline + >>> diffeo = diffeomorphic_tensor_pipeline() + >>> diffeo.inputs.inputnode.fixed_file = 'im1.nii' + >>> diffeo.inputs.inputnode.moving_file = 'im2.nii' + >>> diffeo.run() # doctest: +SKIP + + + """ + inputnode = pe.Node(niu.IdentityInterface( + fields=['fixed_file', 'moving_file']), + name='inputnode') + outputnode = pe.Node(niu.IdentityInterface( + fields=['out_file', 'out_file_xfm', + 'fixed_resliced', 'moving_resliced']), + name='outputnode') + origin_node_fixed = pe.Node(dtitk.TVAdjustVoxSp(origin=(0, 0, 0)), + name='origin_node_fixed') + origin_node_moving = origin_node_fixed.clone(name='origin_node_moving') + reslice_node_pow2 = pe.Node(dtitk.TVResample( + origin=(0, 0, 0), + array_size=params['array_size']), + name='reslice_node_pow2') + reslice_node_moving = pe.Node(dtitk.TVResample(), + name='reslice_node_moving') + mask_node = pe.Node(dtitk.BinThresh(lower_bound=0.01, upper_bound=100, + inside_value=1, outside_value=0), + name='mask_node') + rigid_node = pe.Node(dtitk.Rigid(), name='rigid_node') + affine_node = pe.Node(dtitk.Affine(), name='affine_node') + diffeo_node = pe.Node(dtitk.Diffeo(n_iters=6, ftol=0.002), + name='diffeo_node') + compose_xfm_node = pe.Node(dtitk.ComposeXfm(), name='compose_xfm_node') + apply_xfm_node = pe.Node(dtitk.DiffeoSymTensor3DVol(), + name='apply_xfm_node') + adjust_vs_node_to_input = pe.Node(dtitk.TVAdjustVoxSp(), + name='adjust_vs_node_to_input') + reslice_node_to_input = pe.Node(dtitk.TVResample(), + name='reslice_node_to_input') + input_fa = pe.Node(dtitk.TVtool(in_flag='fa'), name='input_fa') + + wf = pe.Workflow(name=name) + + # calculate input FA image for origin reference + wf.connect(inputnode, 'fixed_file', input_fa, 'in_file') + # Reslice input images + wf.connect(inputnode, 'fixed_file', origin_node_fixed, 'in_file') + wf.connect(origin_node_fixed, 'out_file', reslice_node_pow2, 'in_file') + wf.connect(reslice_node_pow2, 'out_file', + reslice_node_moving, 'target_file') + wf.connect(inputnode, 'moving_file', origin_node_moving, 'in_file') + wf.connect(origin_node_moving, 'out_file', reslice_node_moving, 'in_file') + # Rigid registration + wf.connect(reslice_node_pow2, 'out_file', rigid_node, 'fixed_file') + wf.connect(reslice_node_moving, 'out_file', rigid_node, 'moving_file') + # Affine registration + wf.connect(rigid_node, 'out_file_xfm', affine_node, 'initialize_xfm') + wf.connect(reslice_node_pow2, 'out_file', affine_node, 'fixed_file') + wf.connect(reslice_node_moving, 'out_file', affine_node, 'moving_file') + # Diffeo registration + wf.connect(reslice_node_pow2, 'out_file', mask_node, 'in_file') + wf.connect(reslice_node_pow2, 'out_file', diffeo_node, 'fixed_file') + wf.connect(affine_node, 'out_file', diffeo_node, 'moving_file') + wf.connect(mask_node, 'out_file', diffeo_node, 'mask_file') + # Compose transform + wf.connect(diffeo_node, 'out_file_xfm', compose_xfm_node, 'in_df') + wf.connect(affine_node, 'out_file_xfm', compose_xfm_node, 'in_aff') + # Apply transform + wf.connect(reslice_node_moving, 'out_file', apply_xfm_node, 'in_file') + wf.connect(compose_xfm_node, 'out_file', apply_xfm_node, 'transform') + # Move origin and reslice to match original fixed input image + wf.connect(apply_xfm_node, 'out_file', adjust_vs_node_to_input, 'in_file') + wf.connect(input_fa, 'out_file', adjust_vs_node_to_input, 'target_file') + wf.connect(adjust_vs_node_to_input, 'out_file', reslice_node_to_input, 'in_file') + wf.connect(input_fa, 'out_file', reslice_node_to_input, 'target_file') + # Send to output + wf.connect(reslice_node_to_input, 'out_file', outputnode, 'out_file') + wf.connect(compose_xfm_node, 'out_file', outputnode, 'out_file_xfm') + wf.connect(reslice_node_pow2, 'out_file', outputnode, 'fixed_resliced') + wf.connect(reslice_node_moving, 'out_file', outputnode, 'moving_resliced') + + return wf diff --git a/nipype/workflows/dmri/fsl/__init__.py b/nipype/workflows/dmri/fsl/__init__.py new file mode 100644 index 0000000000..66be352b84 --- /dev/null +++ b/nipype/workflows/dmri/fsl/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .dti import create_bedpostx_pipeline, bedpostx_parallel + +from .artifacts import (all_fmb_pipeline, all_peb_pipeline, all_fsl_pipeline, + hmc_pipeline, ecc_pipeline, sdc_fmb, sdc_peb, + remove_bias) + +from .epi import (fieldmap_correction, topup_correction, + create_eddy_correct_pipeline, create_epidewarp_pipeline, + create_dmri_preprocessing) + +from .tbss import (create_tbss_1_preproc, create_tbss_2_reg, + create_tbss_3_postreg, create_tbss_4_prestats, + create_tbss_all, create_tbss_non_FA) diff --git a/nipype/workflows/dmri/fsl/artifacts.py b/nipype/workflows/dmri/fsl/artifacts.py new file mode 100644 index 0000000000..3b29c5a07c --- /dev/null +++ b/nipype/workflows/dmri/fsl/artifacts.py @@ -0,0 +1,1061 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from ....interfaces.io import JSONFileGrabber +from ....interfaces import utility as niu +from ....interfaces import ants +from ....interfaces import fsl +from ....pipeline import engine as pe +from ...data import get_flirt_schedule + +from .utils import ( + b0_indices, + time_avg, + apply_all_corrections, + b0_average, + hmc_split, + dwi_flirt, + eddy_rotate_bvecs, + rotate_bvecs, + insert_mat, + extract_bval, + recompose_dwi, + recompose_xfm, + siemens2rads, + rads2radsec, + demean_image, + cleanup_edge_pipeline, + add_empty_vol, + vsm2warp, + compute_readout, +) + + +def all_fmb_pipeline(name='hmc_sdc_ecc', fugue_params=dict(smooth3d=2.0)): + """ + Builds a pipeline including three artifact corrections: head-motion + correction (HMC), susceptibility-derived distortion correction (SDC), + and Eddy currents-derived distortion correction (ECC). + + The displacement fields from each kind of distortions are combined. Thus, + only one interpolation occurs between input data and result. + + .. warning:: this workflow rotates the gradients table (*b*-vectors) + [Leemans09]_. + + + Examples + -------- + + >>> from nipype.workflows.dmri.fsl.artifacts import all_fmb_pipeline + >>> allcorr = all_fmb_pipeline() + >>> allcorr.inputs.inputnode.in_file = 'epi.nii' + >>> allcorr.inputs.inputnode.in_bval = 'diffusion.bval' + >>> allcorr.inputs.inputnode.in_bvec = 'diffusion.bvec' + >>> allcorr.inputs.inputnode.bmap_mag = 'magnitude.nii' + >>> allcorr.inputs.inputnode.bmap_pha = 'phase.nii' + >>> allcorr.inputs.inputnode.epi_param = 'epi_param.txt' + >>> allcorr.run() # doctest: +SKIP + + """ + inputnode = pe.Node( + niu.IdentityInterface(fields=[ + 'in_file', 'in_bvec', 'in_bval', 'bmap_pha', 'bmap_mag', + 'epi_param' + ]), + name='inputnode') + + outputnode = pe.Node( + niu.IdentityInterface(fields=['out_file', 'out_mask', 'out_bvec']), + name='outputnode') + + list_b0 = pe.Node( + niu.Function( + input_names=['in_bval'], + output_names=['out_idx'], + function=b0_indices), + name='B0indices') + + avg_b0_0 = pe.Node( + niu.Function( + input_names=['in_file', 'index'], + output_names=['out_file'], + function=time_avg), + name='b0_avg_pre') + avg_b0_1 = pe.Node( + niu.Function( + input_names=['in_file', 'index'], + output_names=['out_file'], + function=time_avg), + name='b0_avg_post') + + bet_dwi0 = pe.Node( + fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_pre') + bet_dwi1 = pe.Node( + fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_post') + + hmc = hmc_pipeline() + sdc = sdc_fmb(fugue_params=fugue_params) + ecc = ecc_pipeline() + unwarp = apply_all_corrections() + + wf = pe.Workflow(name=name) + wf.connect( + [(inputnode, hmc, + [('in_file', 'inputnode.in_file'), ('in_bvec', 'inputnode.in_bvec'), + ('in_bval', 'inputnode.in_bval')]), (inputnode, list_b0, + [('in_bval', 'in_bval')]), + (inputnode, avg_b0_0, [('in_file', 'in_file')]), (list_b0, avg_b0_0, + [('out_idx', + 'index')]), + (avg_b0_0, bet_dwi0, [('out_file', 'in_file')]), (bet_dwi0, hmc, [ + ('mask_file', 'inputnode.in_mask') + ]), (hmc, sdc, [('outputnode.out_file', 'inputnode.in_file')]), + (bet_dwi0, sdc, + [('mask_file', 'inputnode.in_mask')]), (inputnode, sdc, [ + ('bmap_pha', 'inputnode.bmap_pha'), + ('bmap_mag', 'inputnode.bmap_mag'), ('epi_param', + 'inputnode.settings') + ]), (list_b0, sdc, [('out_idx', 'inputnode.in_ref')]), (hmc, ecc, [ + ('outputnode.out_xfms', 'inputnode.in_xfms') + ]), (inputnode, ecc, + [('in_file', 'inputnode.in_file'), + ('in_bval', 'inputnode.in_bval')]), (bet_dwi0, ecc, [ + ('mask_file', 'inputnode.in_mask') + ]), (ecc, avg_b0_1, [('outputnode.out_file', + 'in_file')]), (list_b0, avg_b0_1, + [('out_idx', 'index')]), + (avg_b0_1, bet_dwi1, [('out_file', 'in_file')]), (inputnode, unwarp, [ + ('in_file', 'inputnode.in_dwi') + ]), (hmc, unwarp, + [('outputnode.out_xfms', 'inputnode.in_hmc')]), (ecc, unwarp, [ + ('outputnode.out_xfms', 'inputnode.in_ecc') + ]), (sdc, unwarp, [('outputnode.out_warp', + 'inputnode.in_sdc')]), (hmc, outputnode, [ + ('outputnode.out_bvec', 'out_bvec') + ]), (unwarp, outputnode, + [('outputnode.out_file', + 'out_file')]), (bet_dwi1, outputnode, + [('mask_file', + 'out_mask')])]) + return wf + + +def all_peb_pipeline(name='hmc_sdc_ecc', + epi_params=dict( + echospacing=0.77e-3, + acc_factor=3, + enc_dir='y-', + epi_factor=1), + altepi_params=dict( + echospacing=0.77e-3, + acc_factor=3, + enc_dir='y', + epi_factor=1)): + """ + Builds a pipeline including three artifact corrections: head-motion + correction (HMC), susceptibility-derived distortion correction (SDC), + and Eddy currents-derived distortion correction (ECC). + + .. warning:: this workflow rotates the gradients table (*b*-vectors) + [Leemans09]_. + + + Examples + -------- + + >>> from nipype.workflows.dmri.fsl.artifacts import all_peb_pipeline + >>> allcorr = all_peb_pipeline() + >>> allcorr.inputs.inputnode.in_file = 'epi.nii' + >>> allcorr.inputs.inputnode.alt_file = 'epi_rev.nii' + >>> allcorr.inputs.inputnode.in_bval = 'diffusion.bval' + >>> allcorr.inputs.inputnode.in_bvec = 'diffusion.bvec' + >>> allcorr.run() # doctest: +SKIP + + """ + inputnode = pe.Node( + niu.IdentityInterface( + fields=['in_file', 'in_bvec', 'in_bval', 'alt_file']), + name='inputnode') + + outputnode = pe.Node( + niu.IdentityInterface(fields=['out_file', 'out_mask', 'out_bvec']), + name='outputnode') + + avg_b0_0 = pe.Node( + niu.Function( + input_names=['in_dwi', 'in_bval'], + output_names=['out_file'], + function=b0_average), + name='b0_avg_pre') + avg_b0_1 = pe.Node( + niu.Function( + input_names=['in_dwi', 'in_bval'], + output_names=['out_file'], + function=b0_average), + name='b0_avg_post') + bet_dwi0 = pe.Node( + fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_pre') + bet_dwi1 = pe.Node( + fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_post') + + hmc = hmc_pipeline() + sdc = sdc_peb(epi_params=epi_params, altepi_params=altepi_params) + ecc = ecc_pipeline() + + unwarp = apply_all_corrections() + + wf = pe.Workflow(name=name) + wf.connect( + [(inputnode, hmc, + [('in_file', 'inputnode.in_file'), ('in_bvec', 'inputnode.in_bvec'), + ('in_bval', 'inputnode.in_bval')]), (inputnode, avg_b0_0, + [('in_file', 'in_dwi'), + ('in_bval', 'in_bval')]), + (avg_b0_0, bet_dwi0, [('out_file', 'in_file')]), (bet_dwi0, hmc, [ + ('mask_file', 'inputnode.in_mask') + ]), (hmc, sdc, [('outputnode.out_file', 'inputnode.in_file')]), + (bet_dwi0, sdc, + [('mask_file', 'inputnode.in_mask')]), (inputnode, sdc, [ + ('in_bval', 'inputnode.in_bval'), ('alt_file', + 'inputnode.alt_file') + ]), (inputnode, ecc, [('in_file', 'inputnode.in_file'), + ('in_bval', 'inputnode.in_bval')]), + (bet_dwi0, ecc, [('mask_file', 'inputnode.in_mask')]), (hmc, ecc, [ + ('outputnode.out_xfms', 'inputnode.in_xfms') + ]), (ecc, avg_b0_1, [('outputnode.out_file', + 'in_dwi')]), (inputnode, avg_b0_1, + [('in_bval', 'in_bval')]), + (avg_b0_1, bet_dwi1, [('out_file', 'in_file')]), (inputnode, unwarp, [ + ('in_file', 'inputnode.in_dwi') + ]), (hmc, unwarp, + [('outputnode.out_xfms', 'inputnode.in_hmc')]), (ecc, unwarp, [ + ('outputnode.out_xfms', 'inputnode.in_ecc') + ]), (sdc, unwarp, [('outputnode.out_warp', + 'inputnode.in_sdc')]), (hmc, outputnode, [ + ('outputnode.out_bvec', 'out_bvec') + ]), (unwarp, outputnode, + [('outputnode.out_file', + 'out_file')]), (bet_dwi1, outputnode, + [('mask_file', + 'out_mask')])]) + return wf + + +def all_fsl_pipeline(name='fsl_all_correct', + epi_params=dict( + echospacing=0.77e-3, acc_factor=3, enc_dir='y-'), + altepi_params=dict( + echospacing=0.77e-3, acc_factor=3, enc_dir='y')): + """ + Workflow that integrates FSL ``topup`` and ``eddy``. + + + .. warning:: this workflow rotates the gradients table (*b*-vectors) + [Leemans09]_. + + + .. warning:: this workflow does not perform jacobian modulation of each + *DWI* [Jones10]_. + + + Examples + -------- + + >>> from nipype.workflows.dmri.fsl.artifacts import all_fsl_pipeline + >>> allcorr = all_fsl_pipeline() + >>> allcorr.inputs.inputnode.in_file = 'epi.nii' + >>> allcorr.inputs.inputnode.alt_file = 'epi_rev.nii' + >>> allcorr.inputs.inputnode.in_bval = 'diffusion.bval' + >>> allcorr.inputs.inputnode.in_bvec = 'diffusion.bvec' + >>> allcorr.run() # doctest: +SKIP + + """ + + inputnode = pe.Node( + niu.IdentityInterface( + fields=['in_file', 'in_bvec', 'in_bval', 'alt_file']), + name='inputnode') + + outputnode = pe.Node( + niu.IdentityInterface(fields=['out_file', 'out_mask', 'out_bvec']), + name='outputnode') + + def gen_index(in_file): + import numpy as np + import nibabel as nb + import os + from nipype.utils import NUMPY_MMAP + out_file = os.path.abspath('index.txt') + vols = nb.load(in_file, mmap=NUMPY_MMAP).get_data().shape[-1] + np.savetxt(out_file, np.ones((vols, )).T) + return out_file + + gen_idx = pe.Node( + niu.Function( + input_names=['in_file'], + output_names=['out_file'], + function=gen_index), + name='gen_index') + avg_b0_0 = pe.Node( + niu.Function( + input_names=['in_dwi', 'in_bval'], + output_names=['out_file'], + function=b0_average), + name='b0_avg_pre') + bet_dwi0 = pe.Node( + fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_pre') + + sdc = sdc_peb(epi_params=epi_params, altepi_params=altepi_params) + ecc = pe.Node(fsl.Eddy(method='jac'), name='fsl_eddy') + rot_bvec = pe.Node( + niu.Function( + input_names=['in_bvec', 'eddy_params'], + output_names=['out_file'], + function=eddy_rotate_bvecs), + name='Rotate_Bvec') + avg_b0_1 = pe.Node( + niu.Function( + input_names=['in_dwi', 'in_bval'], + output_names=['out_file'], + function=b0_average), + name='b0_avg_post') + bet_dwi1 = pe.Node( + fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_post') + + wf = pe.Workflow(name=name) + wf.connect( + [(inputnode, avg_b0_0, [('in_file', 'in_dwi'), ('in_bval', + 'in_bval')]), + (avg_b0_0, bet_dwi0, [('out_file', 'in_file')]), (bet_dwi0, sdc, [ + ('mask_file', 'inputnode.in_mask') + ]), (inputnode, sdc, [('in_file', 'inputnode.in_file'), + ('alt_file', 'inputnode.alt_file'), + ('in_bval', 'inputnode.in_bval')]), + (sdc, ecc, [('topup.out_enc_file', 'in_acqp'), + ('topup.out_fieldcoef', 'in_topup_fieldcoef'), + ('topup.out_movpar', + 'in_topup_movpar')]), (bet_dwi0, ecc, [('mask_file', + 'in_mask')]), + (inputnode, gen_idx, [('in_file', 'in_file')]), (inputnode, ecc, [ + ('in_file', 'in_file'), ('in_bval', 'in_bval'), ('in_bvec', + 'in_bvec') + ]), (gen_idx, ecc, + [('out_file', 'in_index')]), (inputnode, rot_bvec, [ + ('in_bvec', 'in_bvec') + ]), (ecc, rot_bvec, + [('out_parameter', 'eddy_params')]), (ecc, avg_b0_1, [ + ('out_corrected', 'in_dwi') + ]), (inputnode, avg_b0_1, [('in_bval', 'in_bval')]), + (avg_b0_1, bet_dwi1, [('out_file', 'in_file')]), (ecc, outputnode, [ + ('out_corrected', 'out_file') + ]), (rot_bvec, outputnode, + [('out_file', 'out_bvec')]), (bet_dwi1, outputnode, + [('mask_file', 'out_mask')])]) + return wf + + +def hmc_pipeline(name='motion_correct'): + """ + HMC stands for head-motion correction. + + Creates a pipeline that corrects for head motion artifacts in dMRI + sequences. + It takes a series of diffusion weighted images and rigidly co-registers + them to one reference image. Finally, the `b`-matrix is rotated accordingly + [Leemans09]_ making use of the rotation matrix obtained by FLIRT. + + Search angles have been limited to 4 degrees, based on results in + [Yendiki13]_. + + A list of rigid transformation matrices is provided, so that transforms + can be chained. + This is useful to correct for artifacts with only one interpolation process + (as previously discussed `here + `_), + and also to compute nuisance regressors as proposed by [Yendiki13]_. + + .. warning:: This workflow rotates the `b`-vectors, so please be advised + that not all the dicom converters ensure the consistency between the + resulting nifti orientation and the gradients table (e.g. dcm2nii + checks it). + + .. admonition:: References + + .. [Leemans09] Leemans A, and Jones DK, `The B-matrix must be rotated + when correcting for subject motion in DTI data + `_, + Magn Reson Med. 61(6):1336-49. 2009. doi: 10.1002/mrm.21890. + + .. [Yendiki13] Yendiki A et al., `Spurious group differences due to head + motion in a diffusion MRI study + `_. + Neuroimage. 21(88C):79-90. 2013. doi: 10.1016/j.neuroimage.2013.11.027 + + Example + ------- + + >>> from nipype.workflows.dmri.fsl.artifacts import hmc_pipeline + >>> hmc = hmc_pipeline() + >>> hmc.inputs.inputnode.in_file = 'diffusion.nii' + >>> hmc.inputs.inputnode.in_bvec = 'diffusion.bvec' + >>> hmc.inputs.inputnode.in_bval = 'diffusion.bval' + >>> hmc.inputs.inputnode.in_mask = 'mask.nii' + >>> hmc.run() # doctest: +SKIP + + Inputs:: + + inputnode.in_file - input dwi file + inputnode.in_mask - weights mask of reference image (a file with data \ +range in [0.0, 1.0], indicating the weight of each voxel when computing the \ +metric. + inputnode.in_bval - b-values file + inputnode.in_bvec - gradients file (b-vectors) + inputnode.ref_num (optional, default=0) index of the b0 volume that \ +should be taken as reference + + Outputs:: + + outputnode.out_file - corrected dwi file + outputnode.out_bvec - rotated gradient vectors table + outputnode.out_xfms - list of transformation matrices + + """ + params = dict( + dof=6, + bgvalue=0, + save_log=True, + no_search=True, + # cost='mutualinfo', cost_func='mutualinfo', bins=64, + schedule=get_flirt_schedule('hmc')) + + inputnode = pe.Node( + niu.IdentityInterface( + fields=['in_file', 'ref_num', 'in_bvec', 'in_bval', 'in_mask']), + name='inputnode') + split = pe.Node( + niu.Function( + output_names=['out_ref', 'out_mov', 'out_bval', 'volid'], + input_names=['in_file', 'in_bval', 'ref_num'], + function=hmc_split), + name='SplitDWI') + flirt = dwi_flirt(flirt_param=params) + insmat = pe.Node( + niu.Function( + input_names=['inlist', 'volid'], + output_names=['out'], + function=insert_mat), + name='InsertRefmat') + rot_bvec = pe.Node( + niu.Function( + function=rotate_bvecs, + input_names=['in_bvec', 'in_matrix'], + output_names=['out_file']), + name='Rotate_Bvec') + outputnode = pe.Node( + niu.IdentityInterface(fields=['out_file', 'out_bvec', 'out_xfms']), + name='outputnode') + + wf = pe.Workflow(name=name) + wf.connect([(inputnode, split, + [('in_file', 'in_file'), ('in_bval', 'in_bval'), + ('ref_num', 'ref_num')]), (inputnode, flirt, [ + ('in_mask', 'inputnode.ref_mask') + ]), (split, flirt, [('out_ref', 'inputnode.reference'), + ('out_mov', 'inputnode.in_file'), + ('out_bval', 'inputnode.in_bval')]), + (flirt, insmat, [('outputnode.out_xfms', 'inlist')]), + (split, insmat, [('volid', 'volid')]), (inputnode, rot_bvec, [ + ('in_bvec', 'in_bvec') + ]), (insmat, rot_bvec, + [('out', 'in_matrix')]), (rot_bvec, outputnode, + [('out_file', 'out_bvec')]), + (flirt, outputnode, [('outputnode.out_file', + 'out_file')]), (insmat, outputnode, + [('out', 'out_xfms')])]) + return wf + + +def ecc_pipeline(name='eddy_correct'): + """ + ECC stands for Eddy currents correction. + + Creates a pipeline that corrects for artifacts induced by Eddy currents in + dMRI sequences. + It takes a series of diffusion weighted images and linearly co-registers + them to one reference image (the average of all b0s in the dataset). + + DWIs are also modulated by the determinant of the Jacobian as indicated by + [Jones10]_ and [Rohde04]_. + + A list of rigid transformation matrices can be provided, sourcing from a + :func:`.hmc_pipeline` workflow, to initialize registrations in a *motion + free* framework. + + A list of affine transformation matrices is available as output, so that + transforms can be chained (discussion + `here `_). + + .. admonition:: References + + .. [Jones10] Jones DK, `The signal intensity must be modulated by the + determinant of the Jacobian when correcting for eddy currents in + diffusion MRI + `_, + Proc. ISMRM 18th Annual Meeting, (2010). + + .. [Rohde04] Rohde et al., `Comprehensive Approach for Correction of + Motion and Distortion in Diffusion-Weighted MRI + `_, MRM + 51:103-114 (2004). + + Example + ------- + + >>> from nipype.workflows.dmri.fsl.artifacts import ecc_pipeline + >>> ecc = ecc_pipeline() + >>> ecc.inputs.inputnode.in_file = 'diffusion.nii' + >>> ecc.inputs.inputnode.in_bval = 'diffusion.bval' + >>> ecc.inputs.inputnode.in_mask = 'mask.nii' + >>> ecc.run() # doctest: +SKIP + + Inputs:: + + inputnode.in_file - input dwi file + inputnode.in_mask - weights mask of reference image (a file with data \ +range sin [0.0, 1.0], indicating the weight of each voxel when computing the \ +metric. + inputnode.in_bval - b-values table + inputnode.in_xfms - list of matrices to initialize registration (from \ +head-motion correction) + + Outputs:: + + outputnode.out_file - corrected dwi file + outputnode.out_xfms - list of transformation matrices + """ + + params = dict( + dof=12, + no_search=True, + interp='spline', + bgvalue=0, + schedule=get_flirt_schedule('ecc')) + # cost='normmi', cost_func='normmi', bins=64, + + inputnode = pe.Node( + niu.IdentityInterface( + fields=['in_file', 'in_bval', 'in_mask', 'in_xfms']), + name='inputnode') + avg_b0 = pe.Node( + niu.Function( + input_names=['in_dwi', 'in_bval'], + output_names=['out_file'], + function=b0_average), + name='b0_avg') + pick_dws = pe.Node( + niu.Function( + input_names=['in_dwi', 'in_bval', 'b'], + output_names=['out_file'], + function=extract_bval), + name='ExtractDWI') + pick_dws.inputs.b = 'diff' + + flirt = dwi_flirt(flirt_param=params, excl_nodiff=True) + + mult = pe.MapNode( + fsl.BinaryMaths(operation='mul'), + name='ModulateDWIs', + iterfield=['in_file', 'operand_value']) + thres = pe.MapNode( + fsl.Threshold(thresh=0.0), + iterfield=['in_file'], + name='RemoveNegative') + + split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') + get_mat = pe.Node( + niu.Function( + input_names=['in_bval', 'in_xfms'], + output_names=['out_files'], + function=recompose_xfm), + name='GatherMatrices') + merge = pe.Node( + niu.Function( + input_names=['in_dwi', 'in_bval', 'in_corrected'], + output_names=['out_file'], + function=recompose_dwi), + name='MergeDWIs') + + outputnode = pe.Node( + niu.IdentityInterface(fields=['out_file', 'out_xfms']), + name='outputnode') + + wf = pe.Workflow(name=name) + wf.connect([ + (inputnode, avg_b0, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), + (inputnode, pick_dws, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), + (inputnode, merge, + [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), (inputnode, flirt, [ + ('in_mask', 'inputnode.ref_mask'), + ('in_xfms', 'inputnode.in_xfms'), ('in_bval', 'inputnode.in_bval') + ]), (inputnode, get_mat, [('in_bval', 'in_bval')]), (avg_b0, flirt, [ + ('out_file', 'inputnode.reference') + ]), (pick_dws, flirt, [('out_file', 'inputnode.in_file')]), + (flirt, get_mat, [('outputnode.out_xfms', 'in_xfms')]), (flirt, mult, [ + (('outputnode.out_xfms', _xfm_jacobian), 'operand_value') + ]), (flirt, split, + [('outputnode.out_file', 'in_file')]), (split, mult, [ + ('out_files', 'in_file') + ]), (mult, thres, [('out_file', 'in_file')]), (thres, merge, [ + ('out_file', 'in_corrected') + ]), (get_mat, outputnode, + [('out_files', 'out_xfms')]), (merge, outputnode, + [('out_file', 'out_file')]) + ]) + return wf + + +def sdc_fmb(name='fmb_correction', + interp='Linear', + fugue_params=dict(smooth3d=2.0)): + """ + SDC stands for susceptibility distortion correction. FMB stands for + fieldmap-based. + + The fieldmap based (FMB) method implements SDC by using a mapping of the + B0 field as proposed by [Jezzard95]_. This workflow uses the implementation + of FSL (`FUGUE `_). Phase + unwrapping is performed using `PRELUDE + `_ + [Jenkinson03]_. Preparation of the fieldmap is performed reproducing the + script in FSL `fsl_prepare_fieldmap + `_. + + + + Example + ------- + + >>> from nipype.workflows.dmri.fsl.artifacts import sdc_fmb + >>> fmb = sdc_fmb() + >>> fmb.inputs.inputnode.in_file = 'diffusion.nii' + >>> fmb.inputs.inputnode.in_ref = list(range(0, 30, 6)) + >>> fmb.inputs.inputnode.in_mask = 'mask.nii' + >>> fmb.inputs.inputnode.bmap_mag = 'magnitude.nii' + >>> fmb.inputs.inputnode.bmap_pha = 'phase.nii' + >>> fmb.inputs.inputnode.settings = 'epi_param.txt' + >>> fmb.run() # doctest: +SKIP + + .. warning:: Only SIEMENS format fieldmaps are supported. + + .. admonition:: References + + .. [Jezzard95] Jezzard P, and Balaban RS, `Correction for geometric + distortion in echo planar images from B0 field variations + `_, + MRM 34(1):65-73. (1995). doi: 10.1002/mrm.1910340111. + + .. [Jenkinson03] Jenkinson M., `Fast, automated, N-dimensional + phase-unwrapping algorithm `_, + MRM 49(1):193-197, 2003, doi: 10.1002/mrm.10354. + + """ + + epi_defaults = { + 'delta_te': 2.46e-3, + 'echospacing': 0.77e-3, + 'acc_factor': 2, + 'enc_dir': u'AP' + } + + inputnode = pe.Node( + niu.IdentityInterface(fields=[ + 'in_file', 'in_ref', 'in_mask', 'bmap_pha', 'bmap_mag', 'settings' + ]), + name='inputnode') + + outputnode = pe.Node( + niu.IdentityInterface(fields=['out_file', 'out_vsm', 'out_warp']), + name='outputnode') + + r_params = pe.Node( + JSONFileGrabber(defaults=epi_defaults), name='SettingsGrabber') + eff_echo = pe.Node( + niu.Function( + function=_eff_t_echo, + input_names=['echospacing', 'acc_factor'], + output_names=['eff_echo']), + name='EffEcho') + + firstmag = pe.Node(fsl.ExtractROI(t_min=0, t_size=1), name='GetFirst') + n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3), name='Bias') + bet = pe.Node(fsl.BET(frac=0.4, mask=True), name='BrainExtraction') + dilate = pe.Node( + fsl.maths.MathsCommand(nan2zeros=True, args='-kernel sphere 5 -dilM'), + name='MskDilate') + pha2rads = pe.Node( + niu.Function( + input_names=['in_file'], + output_names=['out_file'], + function=siemens2rads), + name='PreparePhase') + prelude = pe.Node(fsl.PRELUDE(process3d=True), name='PhaseUnwrap') + rad2rsec = pe.Node( + niu.Function( + input_names=['in_file', 'delta_te'], + output_names=['out_file'], + function=rads2radsec), + name='ToRadSec') + + baseline = pe.Node( + niu.Function( + input_names=['in_file', 'index'], + output_names=['out_file'], + function=time_avg), + name='Baseline') + + fmm2b0 = pe.Node( + ants.Registration(output_warped_image=True), name="FMm_to_B0") + fmm2b0.inputs.transforms = ['Rigid'] * 2 + fmm2b0.inputs.transform_parameters = [(1.0, )] * 2 + fmm2b0.inputs.number_of_iterations = [[50], [20]] + fmm2b0.inputs.dimension = 3 + fmm2b0.inputs.metric = ['Mattes', 'Mattes'] + fmm2b0.inputs.metric_weight = [1.0] * 2 + fmm2b0.inputs.radius_or_number_of_bins = [64, 64] + fmm2b0.inputs.sampling_strategy = ['Regular', 'Random'] + fmm2b0.inputs.sampling_percentage = [None, 0.2] + fmm2b0.inputs.convergence_threshold = [1.e-5, 1.e-8] + fmm2b0.inputs.convergence_window_size = [20, 10] + fmm2b0.inputs.smoothing_sigmas = [[6.0], [2.0]] + fmm2b0.inputs.sigma_units = ['vox'] * 2 + fmm2b0.inputs.shrink_factors = [[6], [1]] # ,[1] ] + fmm2b0.inputs.use_estimate_learning_rate_once = [True] * 2 + fmm2b0.inputs.use_histogram_matching = [True] * 2 + fmm2b0.inputs.initial_moving_transform_com = 0 + fmm2b0.inputs.collapse_output_transforms = True + fmm2b0.inputs.winsorize_upper_quantile = 0.995 + + applyxfm = pe.Node( + ants.ApplyTransforms(dimension=3, interpolation=interp), + name='FMp_to_B0') + + pre_fugue = pe.Node(fsl.FUGUE(save_fmap=True), name='PreliminaryFugue') + demean = pe.Node( + niu.Function( + input_names=['in_file', 'in_mask'], + output_names=['out_file'], + function=demean_image), + name='DemeanFmap') + + cleanup = cleanup_edge_pipeline() + + addvol = pe.Node( + niu.Function( + input_names=['in_file'], + output_names=['out_file'], + function=add_empty_vol), + name='AddEmptyVol') + + vsm = pe.Node( + fsl.FUGUE(save_shift=True, **fugue_params), name="ComputeVSM") + + split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') + merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs') + unwarp = pe.MapNode( + fsl.FUGUE(icorr=True, forward_warping=False), + iterfield=['in_file'], + name='UnwarpDWIs') + thres = pe.MapNode( + fsl.Threshold(thresh=0.0), + iterfield=['in_file'], + name='RemoveNegative') + vsm2dfm = vsm2warp() + vsm2dfm.inputs.inputnode.scaling = 1.0 + + wf = pe.Workflow(name=name) + wf.connect([ + (inputnode, r_params, + [('settings', 'in_file')]), (r_params, eff_echo, [ + ('echospacing', 'echospacing'), ('acc_factor', 'acc_factor') + ]), (inputnode, pha2rads, + [('bmap_pha', 'in_file')]), (inputnode, firstmag, + [('bmap_mag', 'in_file')]), + (inputnode, baseline, + [('in_file', 'in_file'), ('in_ref', 'index')]), (firstmag, n4, [ + ('roi_file', 'input_image') + ]), (n4, bet, [('output_image', 'in_file')]), (bet, dilate, [ + ('mask_file', 'in_file') + ]), (pha2rads, prelude, [('out_file', 'phase_file')]), (n4, prelude, [ + ('output_image', 'magnitude_file') + ]), (dilate, prelude, [('out_file', 'mask_file')]), + (r_params, rad2rsec, [('delta_te', 'delta_te')]), (prelude, rad2rsec, [ + ('unwrapped_phase_file', 'in_file') + ]), (baseline, fmm2b0, [('out_file', 'fixed_image')]), (n4, fmm2b0, [ + ('output_image', 'moving_image') + ]), (inputnode, fmm2b0, + [('in_mask', 'fixed_image_mask')]), (dilate, fmm2b0, [ + ('out_file', 'moving_image_mask') + ]), (baseline, applyxfm, [('out_file', 'reference_image')]), + (rad2rsec, applyxfm, + [('out_file', 'input_image')]), (fmm2b0, applyxfm, [ + ('forward_transforms', 'transforms'), ('forward_invert_flags', + 'invert_transform_flags') + ]), (applyxfm, pre_fugue, + [('output_image', 'fmap_in_file')]), (inputnode, pre_fugue, [ + ('in_mask', 'mask_file') + ]), (pre_fugue, demean, + [('fmap_out_file', 'in_file')]), (inputnode, demean, [ + ('in_mask', 'in_mask') + ]), (demean, cleanup, [('out_file', 'inputnode.in_file')]), + (inputnode, cleanup, + [('in_mask', 'inputnode.in_mask')]), (cleanup, addvol, [ + ('outputnode.out_file', 'in_file') + ]), (inputnode, vsm, [('in_mask', 'mask_file')]), (addvol, vsm, [ + ('out_file', 'fmap_in_file') + ]), (r_params, vsm, [('delta_te', 'asym_se_time')]), (eff_echo, vsm, [ + ('eff_echo', 'dwell_time') + ]), (inputnode, split, [('in_file', 'in_file')]), (split, unwarp, [ + ('out_files', 'in_file') + ]), (vsm, unwarp, + [('shift_out_file', 'shift_in_file')]), (r_params, unwarp, [ + (('enc_dir', _fix_enc_dir), 'unwarp_direction') + ]), (unwarp, thres, [('unwarped_file', 'in_file')]), + (thres, merge, [('out_file', 'in_files')]), (r_params, vsm2dfm, [ + (('enc_dir', _fix_enc_dir), 'inputnode.enc_dir') + ]), (merge, vsm2dfm, + [('merged_file', 'inputnode.in_ref')]), (vsm, vsm2dfm, [ + ('shift_out_file', 'inputnode.in_vsm') + ]), (merge, outputnode, + [('merged_file', 'out_file')]), (vsm, outputnode, [ + ('shift_out_file', 'out_vsm') + ]), (vsm2dfm, outputnode, [('outputnode.out_warp', + 'out_warp')]) + ]) + return wf + + +def sdc_peb(name='peb_correction', + epi_params=dict( + echospacing=0.77e-3, acc_factor=3, enc_dir='y-', epi_factor=1), + altepi_params=dict( + echospacing=0.77e-3, acc_factor=3, enc_dir='y', epi_factor=1)): + """ + SDC stands for susceptibility distortion correction. PEB stands for + phase-encoding-based. + + The phase-encoding-based (PEB) method implements SDC by acquiring + diffusion images with two different enconding directions [Andersson2003]_. + The most typical case is acquiring with opposed phase-gradient blips + (e.g. *A>>>P* and *P>>>A*, or equivalently, *-y* and *y*) + as in [Chiou2000]_, but it is also possible to use orthogonal + configurations [Cordes2000]_ (e.g. *A>>>P* and *L>>>R*, + or equivalently *-y* and *x*). + This workflow uses the implementation of FSL + (`TOPUP `_). + + Example + ------- + + >>> from nipype.workflows.dmri.fsl.artifacts import sdc_peb + >>> peb = sdc_peb() + >>> peb.inputs.inputnode.in_file = 'epi.nii' + >>> peb.inputs.inputnode.alt_file = 'epi_rev.nii' + >>> peb.inputs.inputnode.in_bval = 'diffusion.bval' + >>> peb.inputs.inputnode.in_mask = 'mask.nii' + >>> peb.run() # doctest: +SKIP + + .. admonition:: References + + .. [Andersson2003] Andersson JL et al., `How to correct susceptibility + distortions in spin-echo echo-planar images: application to diffusion + tensor imaging `_. + Neuroimage. 2003 Oct;20(2):870-88. doi: 10.1016/S1053-8119(03)00336-7 + + .. [Cordes2000] Cordes D et al., Geometric distortion correction in EPI + using two images with orthogonal phase-encoding directions, in Proc. + ISMRM (8), p.1712, Denver, US, 2000. + + .. [Chiou2000] Chiou JY, and Nalcioglu O, A simple method to correct + off-resonance related distortion in echo planar imaging, in Proc. + ISMRM (8), p.1712, Denver, US, 2000. + + """ + + inputnode = pe.Node( + niu.IdentityInterface( + fields=['in_file', 'in_bval', 'in_mask', 'alt_file', 'ref_num']), + name='inputnode') + outputnode = pe.Node( + niu.IdentityInterface(fields=['out_file', 'out_vsm', 'out_warp']), + name='outputnode') + + b0_ref = pe.Node(fsl.ExtractROI(t_size=1), name='b0_ref') + b0_alt = pe.Node(fsl.ExtractROI(t_size=1), name='b0_alt') + b0_comb = pe.Node(niu.Merge(2), name='b0_list') + b0_merge = pe.Node(fsl.Merge(dimension='t'), name='b0_merged') + + topup = pe.Node(fsl.TOPUP(), name='topup') + topup.inputs.encoding_direction = [ + epi_params['enc_dir'], altepi_params['enc_dir'] + ] + + readout = compute_readout(epi_params) + topup.inputs.readout_times = [readout, compute_readout(altepi_params)] + + unwarp = pe.Node(fsl.ApplyTOPUP(in_index=[1], method='jac'), name='unwarp') + + # scaling = pe.Node(niu.Function(input_names=['in_file', 'enc_dir'], + # output_names=['factor'], function=_get_zoom), + # name='GetZoom') + # scaling.inputs.enc_dir = epi_params['enc_dir'] + vsm2dfm = vsm2warp() + vsm2dfm.inputs.inputnode.enc_dir = epi_params['enc_dir'] + vsm2dfm.inputs.inputnode.scaling = readout + + wf = pe.Workflow(name=name) + wf.connect([ + (inputnode, b0_ref, [('in_file', 'in_file'), (('ref_num', _checkrnum), + 't_min')]), + (inputnode, b0_alt, [('alt_file', 'in_file'), (('ref_num', _checkrnum), + 't_min')]), + (b0_ref, b0_comb, [('roi_file', 'in1')]), + (b0_alt, b0_comb, [('roi_file', 'in2')]), + (b0_comb, b0_merge, [('out', 'in_files')]), + (b0_merge, topup, [('merged_file', 'in_file')]), + (topup, unwarp, [('out_fieldcoef', 'in_topup_fieldcoef'), + ('out_movpar', 'in_topup_movpar'), + ('out_enc_file', 'encoding_file')]), + (inputnode, unwarp, [('in_file', 'in_files')]), + (unwarp, outputnode, [('out_corrected', 'out_file')]), + # (b0_ref, scaling, [('roi_file', 'in_file')]), + # (scaling, vsm2dfm, [('factor', 'inputnode.scaling')]), + (b0_ref, vsm2dfm, [('roi_file', 'inputnode.in_ref')]), + (topup, vsm2dfm, [('out_field', 'inputnode.in_vsm')]), + (topup, outputnode, [('out_field', 'out_vsm')]), + (vsm2dfm, outputnode, [('outputnode.out_warp', 'out_warp')]) + ]) + return wf + + +def remove_bias(name='bias_correct'): + """ + This workflow estimates a single multiplicative bias field from the + averaged *b0* image, as suggested in [Jeurissen2014]_. + + .. admonition:: References + + .. [Jeurissen2014] Jeurissen B. et al., `Multi-tissue constrained + spherical deconvolution for improved analysis of multi-shell diffusion + MRI data `_. + NeuroImage (2014). doi: 10.1016/j.neuroimage.2014.07.061 + + + Example + ------- + + >>> from nipype.workflows.dmri.fsl.artifacts import remove_bias + >>> bias = remove_bias() + >>> bias.inputs.inputnode.in_file = 'epi.nii' + >>> bias.inputs.inputnode.in_bval = 'diffusion.bval' + >>> bias.inputs.inputnode.in_mask = 'mask.nii' + >>> bias.run() # doctest: +SKIP + + """ + inputnode = pe.Node( + niu.IdentityInterface(fields=['in_file', 'in_bval', 'in_mask']), + name='inputnode') + + outputnode = pe.Node( + niu.IdentityInterface(fields=['out_file']), name='outputnode') + + avg_b0 = pe.Node( + niu.Function( + input_names=['in_dwi', 'in_bval'], + output_names=['out_file'], + function=b0_average), + name='b0_avg') + n4 = pe.Node( + ants.N4BiasFieldCorrection( + dimension=3, save_bias=True, bspline_fitting_distance=600), + name='Bias_b0') + split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') + mult = pe.MapNode( + fsl.MultiImageMaths(op_string='-div %s'), + iterfield=['in_file'], + name='RemoveBiasOfDWIs') + thres = pe.MapNode( + fsl.Threshold(thresh=0.0), + iterfield=['in_file'], + name='RemoveNegative') + merge = pe.Node(fsl.utils.Merge(dimension='t'), name='MergeDWIs') + + wf = pe.Workflow(name=name) + wf.connect([(inputnode, avg_b0, [ + ('in_file', 'in_dwi'), ('in_bval', 'in_bval') + ]), (avg_b0, n4, [('out_file', 'input_image')]), (inputnode, n4, [ + ('in_mask', 'mask_image') + ]), (inputnode, split, [('in_file', 'in_file')]), (n4, mult, [ + ('bias_image', 'operand_files') + ]), (split, mult, [('out_files', 'in_file')]), (mult, thres, + [('out_file', 'in_file')]), + (thres, merge, [('out_file', 'in_files')]), + (merge, outputnode, [('merged_file', 'out_file')])]) + return wf + + +def _eff_t_echo(echospacing, acc_factor): + eff_echo = echospacing / (1.0 * acc_factor) + return eff_echo + + +def _fix_enc_dir(enc_dir): + enc_dir = enc_dir.lower() + if enc_dir == 'lr': + return 'x-' + if enc_dir == 'rl': + return 'x' + if enc_dir == 'ap': + return 'y-' + if enc_dir == 'pa': + return 'y' + return enc_dir + + +def _checkrnum(ref_num): + from nipype.interfaces.base import isdefined + if (ref_num is None) or not isdefined(ref_num): + return 0 + return ref_num + + +def _nonb0(in_bval): + import numpy as np + bvals = np.loadtxt(in_bval) + return np.where(bvals != 0)[0].tolist() + + +def _xfm_jacobian(in_xfm): + import numpy as np + from math import fabs + return [fabs(np.linalg.det(np.loadtxt(xfm))) for xfm in in_xfm] + + +def _get_zoom(in_file, enc_dir): + import nibabel as nb + from nipype.utils import NUMPY_MMAP + + zooms = nb.load(in_file, mmap=NUMPY_MMAP).header.get_zooms() + + if 'y' in enc_dir: + return zooms[1] + elif 'x' in enc_dir: + return zooms[0] + elif 'z' in enc_dir: + return zooms[2] + else: + raise ValueError('Wrong encoding direction string') diff --git a/nipype/workflows/dmri/fsl/dti.py b/nipype/workflows/dmri/fsl/dti.py new file mode 100644 index 0000000000..ee7e48dd5a --- /dev/null +++ b/nipype/workflows/dmri/fsl/dti.py @@ -0,0 +1,276 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 + +from __future__ import absolute_import + +from ....pipeline import engine as pe +from ....interfaces import utility as niu +from ....interfaces import fsl +from ....algorithms import misc + +# backwards compatibility +from .epi import create_eddy_correct_pipeline + + +def transpose(samples_over_fibres): + import numpy as np + a = np.array(samples_over_fibres) + return np.squeeze(a.T).tolist() + + +def create_bedpostx_pipeline( + name='bedpostx', + params={ + 'n_fibres': 2, + 'fudge': 1, + 'burn_in': 1000, + 'n_jumps': 1250, + 'sample_every': 25, + 'model': 2, + 'cnlinear': True + }): + """ + Creates a pipeline that does the same as bedpostx script from FSL - + calculates diffusion model parameters (distributions not MLE) voxelwise for + the whole volume (by splitting it slicewise). + + Example + ------- + + >>> from nipype.workflows.dmri.fsl.dti import create_bedpostx_pipeline + >>> params = dict(n_fibres = 2, fudge = 1, burn_in = 1000, + ... n_jumps = 1250, sample_every = 25) + >>> bpwf = create_bedpostx_pipeline('nipype_bedpostx', params) + >>> bpwf.inputs.inputnode.dwi = 'diffusion.nii' + >>> bpwf.inputs.inputnode.mask = 'mask.nii' + >>> bpwf.inputs.inputnode.bvecs = 'bvecs' + >>> bpwf.inputs.inputnode.bvals = 'bvals' + >>> bpwf.run() # doctest: +SKIP + + Inputs:: + + inputnode.dwi + inputnode.mask + inputnode.bvecs + inputnode.bvals + + Outputs:: + + outputnode wraps all XFibres outputs + + """ + + inputnode = pe.Node( + niu.IdentityInterface(fields=['dwi', 'mask', 'bvecs', 'bvals']), + name='inputnode') + + slice_dwi = pe.Node(fsl.Split(dimension='z'), name='slice_dwi') + slice_msk = pe.Node(fsl.Split(dimension='z'), name='slice_msk') + mask_dwi = pe.MapNode( + fsl.ImageMaths(op_string='-mas'), + iterfield=['in_file', 'in_file2'], + name='mask_dwi') + + xfib_if = fsl.XFibres(**params) + xfibres = pe.MapNode(xfib_if, name='xfibres', iterfield=['dwi', 'mask']) + + make_dyads = pe.MapNode( + fsl.MakeDyadicVectors(), + name="make_dyads", + iterfield=['theta_vol', 'phi_vol']) + out_fields = [ + 'dyads', 'dyads_disp', 'thsamples', 'phsamples', 'fsamples', + 'mean_thsamples', 'mean_phsamples', 'mean_fsamples' + ] + + outputnode = pe.Node( + niu.IdentityInterface(fields=out_fields), name='outputnode') + + wf = pe.Workflow(name=name) + wf.connect( + [(inputnode, slice_dwi, [('dwi', 'in_file')]), (inputnode, slice_msk, + [('mask', 'in_file')]), + (slice_dwi, mask_dwi, + [('out_files', 'in_file')]), (slice_msk, mask_dwi, [('out_files', + 'in_file2')]), + (slice_dwi, xfibres, + [('out_files', 'dwi')]), (mask_dwi, xfibres, [('out_file', 'mask')]), + (inputnode, xfibres, [('bvecs', 'bvecs'), + ('bvals', 'bvals')]), (inputnode, make_dyads, + [('mask', 'mask')])]) + + mms = {} + for k in ['thsamples', 'phsamples', 'fsamples']: + mms[k] = merge_and_mean(k) + wf.connect([(xfibres, mms[k], [(k, 'inputnode.in_files')]), + (mms[k], outputnode, [('outputnode.merged', k), + ('outputnode.mean', + 'mean_%s' % k)])]) + + # m_mdsamples = pe.Node(fsl.Merge(dimension="z"), + # name="merge_mean_dsamples") + wf.connect([ + (mms['thsamples'], make_dyads, [('outputnode.merged', 'theta_vol')]), + (mms['phsamples'], make_dyads, [('outputnode.merged', 'phi_vol')]), + # (xfibres, m_mdsamples, [('mean_dsamples', 'in_files')]), + (make_dyads, outputnode, [('dyads', 'dyads'), ('dispersion', + 'dyads_disp')]) + ]) + return wf + + +def merge_and_mean(name='mm'): + inputnode = pe.Node( + niu.IdentityInterface(fields=['in_files']), name='inputnode') + outputnode = pe.Node( + niu.IdentityInterface(fields=['merged', 'mean']), name='outputnode') + merge = pe.MapNode( + fsl.Merge(dimension='z'), name='Merge', iterfield=['in_files']) + mean = pe.MapNode( + fsl.ImageMaths(op_string='-Tmean'), name='Mean', iterfield=['in_file']) + + wf = pe.Workflow(name=name) + wf.connect([(inputnode, merge, [(('in_files', transpose), 'in_files')]), + (merge, mean, [('merged_file', 'in_file')]), + (merge, outputnode, + [('merged_file', 'merged')]), (mean, outputnode, [('out_file', + 'mean')])]) + return wf + + +def bedpostx_parallel( + name='bedpostx_parallel', + compute_all_outputs=True, + params={ + 'n_fibres': 2, + 'fudge': 1, + 'burn_in': 1000, + 'n_jumps': 1250, + 'sample_every': 25, + 'model': 1, + 'cnlinear': True + }): + """ + Does the same as :func:`.create_bedpostx_pipeline` by splitting + the input dMRI in small ROIs that are better suited for parallel + processing). + + Example + ------- + + >>> from nipype.workflows.dmri.fsl.dti import bedpostx_parallel + >>> params = dict(n_fibres = 2, fudge = 1, burn_in = 1000, + ... n_jumps = 1250, sample_every = 25) + >>> bpwf = bedpostx_parallel('nipype_bedpostx_parallel', params=params) + >>> bpwf.inputs.inputnode.dwi = 'diffusion.nii' + >>> bpwf.inputs.inputnode.mask = 'mask.nii' + >>> bpwf.inputs.inputnode.bvecs = 'bvecs' + >>> bpwf.inputs.inputnode.bvals = 'bvals' + >>> bpwf.run(plugin='CondorDAGMan') # doctest: +SKIP + + Inputs:: + + inputnode.dwi + inputnode.mask + inputnode.bvecs + inputnode.bvals + + Outputs:: + + outputnode wraps all XFibres outputs + + """ + + inputnode = pe.Node( + niu.IdentityInterface(fields=['dwi', 'mask', 'bvecs', 'bvals']), + name='inputnode') + slice_dwi = pe.Node(misc.SplitROIs(roi_size=(5, 5, 1)), name='slice_dwi') + if params is not None: + xfib_if = fsl.XFibres5(**params) + else: + xfib_if = fsl.XFibres5() + xfibres = pe.MapNode(xfib_if, name='xfibres', iterfield=['dwi', 'mask']) + + mrg_dyads = pe.MapNode( + misc.MergeROIs(), name='Merge_dyads', iterfield=['in_files']) + mrg_fsamp = pe.MapNode( + misc.MergeROIs(), name='Merge_mean_fsamples', iterfield=['in_files']) + out_fields = ['dyads', 'fsamples'] + + if compute_all_outputs: + out_fields += [ + 'dyads_disp', 'thsamples', 'phsamples', 'mean_fsamples', + 'mean_thsamples', 'mean_phsamples', 'merged_fsamples', + 'merged_thsamples', 'merged_phsamples' + ] + + outputnode = pe.Node( + niu.IdentityInterface(fields=out_fields), name='outputnode') + + wf = pe.Workflow(name=name) + wf.connect( + [(inputnode, slice_dwi, [('dwi', 'in_file'), ('mask', 'in_mask')]), + (slice_dwi, xfibres, [('out_files', 'dwi'), ('out_masks', 'mask')]), + (inputnode, xfibres, + [('bvecs', 'bvecs'), ('bvals', 'bvals')]), (inputnode, mrg_dyads, [ + ('mask', 'in_reference') + ]), (xfibres, mrg_dyads, + [(('dyads', transpose), 'in_files')]), (slice_dwi, mrg_dyads, [ + ('out_index', 'in_index') + ]), (inputnode, mrg_fsamp, + [('mask', 'in_reference')]), (xfibres, mrg_fsamp, [ + (('mean_fsamples', transpose), 'in_files') + ]), (slice_dwi, mrg_fsamp, [('out_index', 'in_index')]), + (mrg_dyads, outputnode, + [('merged_file', 'dyads')]), (mrg_fsamp, outputnode, + [('merged_file', 'fsamples')])]) + + if compute_all_outputs: + make_dyads = pe.MapNode( + fsl.MakeDyadicVectors(), + name="Make_dyads", + iterfield=['theta_vol', 'phi_vol']) + + wf.connect([(inputnode, make_dyads, [('mask', 'mask')])]) + mms = {} + for k in ['thsamples', 'phsamples', 'fsamples']: + mms[k] = merge_and_mean_parallel(k) + wf.connect( + [(slice_dwi, mms[k], [('out_index', 'inputnode.in_index')]), + (inputnode, mms[k], [('mask', 'inputnode.in_reference')]), + (xfibres, mms[k], [(k, 'inputnode.in_files')]), + (mms[k], outputnode, [('outputnode.merged', 'merged_%s' % k), + ('outputnode.mean', 'mean_%s' % k)])]) + + # m_mdsamples = pe.Node(fsl.Merge(dimension="z"), + # name="merge_mean_dsamples") + wf.connect([ + (mms['thsamples'], make_dyads, [('outputnode.merged', + 'theta_vol')]), + (mms['phsamples'], make_dyads, [('outputnode.merged', 'phi_vol')]), + # (xfibres, m_mdsamples, [('mean_dsamples', 'in_files')]), + (make_dyads, outputnode, [('dispersion', 'dyads_disp')]) + ]) + + return wf + + +def merge_and_mean_parallel(name='mm'): + inputnode = pe.Node( + niu.IdentityInterface(fields=['in_files', 'in_reference', 'in_index']), + name='inputnode') + outputnode = pe.Node( + niu.IdentityInterface(fields=['merged', 'mean']), name='outputnode') + merge = pe.MapNode(misc.MergeROIs(), name='Merge', iterfield=['in_files']) + mean = pe.MapNode( + fsl.ImageMaths(op_string='-Tmean'), name='Mean', iterfield=['in_file']) + + wf = pe.Workflow(name=name) + wf.connect([(inputnode, merge, + [(('in_files', transpose), 'in_files'), + ('in_reference', 'in_reference'), ('in_index', 'in_index')]), + (merge, mean, [('merged_file', 'in_file')]), + (merge, outputnode, + [('merged_file', 'merged')]), (mean, outputnode, [('out_file', + 'mean')])]) + return wf diff --git a/nipype/workflows/dmri/fsl/epi.py b/nipype/workflows/dmri/fsl/epi.py new file mode 100644 index 0000000000..3bd88a99b7 --- /dev/null +++ b/nipype/workflows/dmri/fsl/epi.py @@ -0,0 +1,885 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import open, str + +import warnings + +from ....pipeline import engine as pe +from ....interfaces import utility as niu +from ....interfaces import fsl as fsl + + +def create_dmri_preprocessing(name='dMRI_preprocessing', + use_fieldmap=True, + fieldmap_registration=False): + """ + Creates a workflow that chains the necessary pipelines to + correct for motion, eddy currents, and, if selected, susceptibility + artifacts in EPI dMRI sequences. + + .. deprecated:: 0.9.3 + Use :func:`nipype.workflows.dmri.preprocess.epi.all_fmb_pipeline` or + :func:`nipype.workflows.dmri.preprocess.epi.all_peb_pipeline` instead. + + + .. warning:: This workflow rotates the b-vectors, so please be + advised that not all the dicom converters ensure the consistency between the resulting + nifti orientation and the b matrix table (e.g. dcm2nii checks it). + + + Example + ------- + + >>> nipype_dmri_preprocess = create_dmri_preprocessing('nipype_dmri_prep') + >>> nipype_dmri_preprocess.inputs.inputnode.in_file = 'diffusion.nii' + >>> nipype_dmri_preprocess.inputs.inputnode.in_bvec = 'diffusion.bvec' + >>> nipype_dmri_preprocess.inputs.inputnode.ref_num = 0 + >>> nipype_dmri_preprocess.inputs.inputnode.fieldmap_mag = 'magnitude.nii' + >>> nipype_dmri_preprocess.inputs.inputnode.fieldmap_pha = 'phase.nii' + >>> nipype_dmri_preprocess.inputs.inputnode.te_diff = 2.46 + >>> nipype_dmri_preprocess.inputs.inputnode.epi_echospacing = 0.77 + >>> nipype_dmri_preprocess.inputs.inputnode.epi_rev_encoding = False + >>> nipype_dmri_preprocess.inputs.inputnode.pi_accel_factor = True + >>> nipype_dmri_preprocess.run() # doctest: +SKIP + + + Inputs:: + + inputnode.in_file - The diffusion data + inputnode.in_bvec - The b-matrix file, in FSL format and consistent with the in_file orientation + inputnode.ref_num - The reference volume (a b=0 volume in dMRI) + inputnode.fieldmap_mag - The magnitude of the fieldmap + inputnode.fieldmap_pha - The phase difference of the fieldmap + inputnode.te_diff - TE increment used (in msec.) on the fieldmap acquisition (generally 2.46ms for 3T scanners) + inputnode.epi_echospacing - The EPI EchoSpacing parameter (in msec.) + inputnode.epi_rev_encoding - True if reverse encoding was used (generally False) + inputnode.pi_accel_factor - Parallel imaging factor (aka GRAPPA acceleration factor) + inputnode.vsm_sigma - Sigma (in mm.) of the gaussian kernel used for in-slice smoothing of the deformation field (voxel shift map, vsm) + + + Outputs:: + + outputnode.dmri_corrected + outputnode.bvec_rotated + + + Optional arguments:: + + use_fieldmap - True if there are fieldmap files that should be used (default True) + fieldmap_registration - True if registration to fieldmap should be performed (default False) + + + """ + + warnings.warn( + ('This workflow is deprecated from v.1.0.0, use of available ' + 'nipype.workflows.dmri.preprocess.epi.all_*'), DeprecationWarning) + + pipeline = pe.Workflow(name=name) + + inputnode = pe.Node( + niu.IdentityInterface(fields=[ + 'in_file', 'in_bvec', 'ref_num', 'fieldmap_mag', 'fieldmap_pha', + 'te_diff', 'epi_echospacing', 'epi_rev_encoding', + 'pi_accel_factor', 'vsm_sigma' + ]), + name='inputnode') + + outputnode = pe.Node( + niu.IdentityInterface(fields=['dmri_corrected', 'bvec_rotated']), + name='outputnode') + + motion = create_motion_correct_pipeline() + eddy = create_eddy_correct_pipeline() + + if use_fieldmap: # we have a fieldmap, so lets use it (yay!) + susceptibility = create_epidewarp_pipeline( + fieldmap_registration=fieldmap_registration) + + pipeline.connect( + [(inputnode, motion, [('in_file', 'inputnode.in_file'), + ('in_bvec', 'inputnode.in_bvec'), + ('ref_num', 'inputnode.ref_num')]), + (inputnode, eddy, + [('ref_num', 'inputnode.ref_num')]), (motion, eddy, [ + ('outputnode.motion_corrected', 'inputnode.in_file') + ]), (eddy, susceptibility, + [('outputnode.eddy_corrected', 'inputnode.in_file')]), + (inputnode, susceptibility, + [('ref_num', 'inputnode.ref_num'), ('fieldmap_mag', + 'inputnode.fieldmap_mag'), + ('fieldmap_pha', 'inputnode.fieldmap_pha'), + ('te_diff', 'inputnode.te_diff'), ('epi_echospacing', + 'inputnode.epi_echospacing'), + ('epi_rev_encoding', + 'inputnode.epi_rev_encoding'), ('pi_accel_factor', + 'inputnode.pi_accel_factor'), + ('vsm_sigma', 'inputnode.vsm_sigma')]), (motion, outputnode, [ + ('outputnode.out_bvec', 'bvec_rotated') + ]), (susceptibility, outputnode, [('outputnode.epi_corrected', + 'dmri_corrected')])]) + else: # we don't have a fieldmap, so we just carry on without it :( + pipeline.connect([(inputnode, motion, [ + ('in_file', 'inputnode.in_file'), ('in_bvec', 'inputnode.in_bvec'), + ('ref_num', 'inputnode.ref_num') + ]), (inputnode, eddy, [('ref_num', 'inputnode.ref_num')]), + (motion, eddy, [('outputnode.motion_corrected', + 'inputnode.in_file')]), + (motion, outputnode, + [('outputnode.out_bvec', + 'bvec_rotated')]), (eddy, outputnode, + [('outputnode.eddy_corrected', + 'dmri_corrected')])]) + + return pipeline + + +def create_motion_correct_pipeline(name='motion_correct'): + """Creates a pipeline that corrects for motion artifact in dMRI sequences. + It takes a series of diffusion weighted images and rigidly co-registers + them to one reference image. Finally, the b-matrix is rotated accordingly + (Leemans et al. 2009 - http://www.ncbi.nlm.nih.gov/pubmed/19319973), + making use of the rotation matrix obtained by FLIRT. + + + .. deprecated:: 0.9.3 + Use :func:`nipype.workflows.dmri.preprocess.epi.hmc_pipeline` instead. + + + .. warning:: This workflow rotates the b-vectors, so please be adviced + that not all the dicom converters ensure the consistency between the resulting + nifti orientation and the b matrix table (e.g. dcm2nii checks it). + + + Example + ------- + + >>> nipype_motioncorrect = create_motion_correct_pipeline('nipype_motioncorrect') + >>> nipype_motioncorrect.inputs.inputnode.in_file = 'diffusion.nii' + >>> nipype_motioncorrect.inputs.inputnode.in_bvec = 'diffusion.bvec' + >>> nipype_motioncorrect.inputs.inputnode.ref_num = 0 + >>> nipype_motioncorrect.run() # doctest: +SKIP + + Inputs:: + + inputnode.in_file + inputnode.ref_num + inputnode.in_bvec + + Outputs:: + + outputnode.motion_corrected + outputnode.out_bvec + + """ + + warnings.warn( + ('This workflow is deprecated from v.1.0.0, use ' + 'nipype.workflows.dmri.preprocess.epi.hmc_pipeline instead'), + DeprecationWarning) + + inputnode = pe.Node( + niu.IdentityInterface(fields=['in_file', 'ref_num', 'in_bvec']), + name='inputnode') + + pipeline = pe.Workflow(name=name) + + split = pe.Node(fsl.Split(dimension='t'), name='split') + pick_ref = pe.Node(niu.Select(), name='pick_ref') + coregistration = pe.MapNode( + fsl.FLIRT(no_search=True, interp='spline', padding_size=1, dof=6), + name='coregistration', + iterfield=['in_file']) + rotate_bvecs = pe.Node( + niu.Function( + input_names=['in_bvec', 'in_matrix'], + output_names=['out_file'], + function=_rotate_bvecs), + name='rotate_b_matrix') + merge = pe.Node(fsl.Merge(dimension='t'), name='merge') + outputnode = pe.Node( + niu.IdentityInterface(fields=['motion_corrected', 'out_bvec']), + name='outputnode') + + pipeline.connect( + [(inputnode, split, [('in_file', 'in_file')]), + (split, pick_ref, [('out_files', 'inlist')]), (inputnode, pick_ref, [ + ('ref_num', 'index') + ]), (split, coregistration, + [('out_files', 'in_file')]), (inputnode, rotate_bvecs, + [('in_bvec', 'in_bvec')]), + (coregistration, rotate_bvecs, + [('out_matrix_file', 'in_matrix')]), (pick_ref, coregistration, + [('out', 'reference')]), + (coregistration, merge, + [('out_file', 'in_files')]), (merge, outputnode, [ + ('merged_file', 'motion_corrected') + ]), (rotate_bvecs, outputnode, [('out_file', 'out_bvec')])]) + + return pipeline + + +def create_eddy_correct_pipeline(name='eddy_correct'): + """ + + .. deprecated:: 0.9.3 + Use :func:`nipype.workflows.dmri.preprocess.epi.ecc_pipeline` instead. + + + Creates a pipeline that replaces eddy_correct script in FSL. It takes a + series of diffusion weighted images and linearly co-registers them to one + reference image. No rotation of the B-matrix is performed, so this pipeline + should be executed after the motion correction pipeline. + + Example + ------- + + >>> nipype_eddycorrect = create_eddy_correct_pipeline('nipype_eddycorrect') + >>> nipype_eddycorrect.inputs.inputnode.in_file = 'diffusion.nii' + >>> nipype_eddycorrect.inputs.inputnode.ref_num = 0 + >>> nipype_eddycorrect.run() # doctest: +SKIP + + Inputs:: + + inputnode.in_file + inputnode.ref_num + + Outputs:: + + outputnode.eddy_corrected + """ + + warnings.warn( + ('This workflow is deprecated from v.1.0.0, use ' + 'nipype.workflows.dmri.preprocess.epi.ecc_pipeline instead'), + DeprecationWarning) + + inputnode = pe.Node( + niu.IdentityInterface(fields=['in_file', 'ref_num']), name='inputnode') + + pipeline = pe.Workflow(name=name) + + split = pe.Node(fsl.Split(dimension='t'), name='split') + pick_ref = pe.Node(niu.Select(), name='pick_ref') + coregistration = pe.MapNode( + fsl.FLIRT(no_search=True, padding_size=1, interp='trilinear'), + name='coregistration', + iterfield=['in_file']) + merge = pe.Node(fsl.Merge(dimension='t'), name='merge') + outputnode = pe.Node( + niu.IdentityInterface(fields=['eddy_corrected']), name='outputnode') + + pipeline.connect([(inputnode, split, [('in_file', 'in_file')]), + (split, pick_ref, + [('out_files', 'inlist')]), (inputnode, pick_ref, + [('ref_num', 'index')]), + (split, coregistration, + [('out_files', 'in_file')]), (pick_ref, coregistration, + [('out', 'reference')]), + (coregistration, merge, + [('out_file', 'in_files')]), (merge, outputnode, + [('merged_file', + 'eddy_corrected')])]) + return pipeline + + +def fieldmap_correction(name='fieldmap_correction', nocheck=False): + """ + + .. deprecated:: 0.9.3 + Use :func:`nipype.workflows.dmri.preprocess.epi.sdc_fmb` instead. + + + Fieldmap-based retrospective correction of EPI images for the susceptibility distortion + artifact (Jezzard et al., 1995). Fieldmap images are assumed to be already registered + to EPI data, and a brain mask is required. + + Replaces the former workflow, still available as create_epidewarp_pipeline(). The difference + with respect the epidewarp pipeline is that now the workflow uses the new fsl_prepare_fieldmap + available as of FSL 5.0. + + + Example + ------- + + >>> nipype_epicorrect = fieldmap_correction('nipype_epidewarp') + >>> nipype_epicorrect.inputs.inputnode.in_file = 'diffusion.nii' + >>> nipype_epicorrect.inputs.inputnode.in_mask = 'brainmask.nii' + >>> nipype_epicorrect.inputs.inputnode.fieldmap_pha = 'phase.nii' + >>> nipype_epicorrect.inputs.inputnode.fieldmap_mag = 'magnitude.nii' + >>> nipype_epicorrect.inputs.inputnode.te_diff = 2.46 + >>> nipype_epicorrect.inputs.inputnode.epi_echospacing = 0.77 + >>> nipype_epicorrect.inputs.inputnode.encoding_direction = 'y' + >>> nipype_epicorrect.run() # doctest: +SKIP + + Inputs:: + + inputnode.in_file - The volume acquired with EPI sequence + inputnode.in_mask - A brain mask + inputnode.fieldmap_pha - The phase difference map from the fieldmapping, registered to in_file + inputnode.fieldmap_mag - The magnitud maps (usually 4D, one magnitude per GRE scan) + from the fieldmapping, registered to in_file + inputnode.te_diff - Time difference in msec. between TE in ms of the fieldmapping (usually a GRE sequence). + inputnode.epi_echospacing - The effective echo spacing (aka dwell time) in msec. of the EPI sequence. If + EPI was acquired with parallel imaging, then the effective echo spacing is + eff_es = es / acc_factor. + inputnode.encoding_direction - The phase encoding direction in EPI acquisition (default y) + inputnode.vsm_sigma - Sigma value of the gaussian smoothing filter applied to the vsm (voxel shift map) + + + Outputs:: + + outputnode.epi_corrected + outputnode.out_vsm + + """ + + warnings.warn(('This workflow is deprecated from v.1.0.0, use ' + 'nipype.workflows.dmri.preprocess.epi.sdc_fmb instead'), + DeprecationWarning) + + inputnode = pe.Node( + niu.IdentityInterface(fields=[ + 'in_file', 'in_mask', 'fieldmap_pha', 'fieldmap_mag', 'te_diff', + 'epi_echospacing', 'vsm_sigma', 'encoding_direction' + ]), + name='inputnode') + + pipeline = pe.Workflow(name=name) + + # Keep first frame from magnitude + select_mag = pe.Node( + fsl.utils.ExtractROI(t_size=1, t_min=0), name='select_magnitude') + + # Mask magnitude (it is required by PreparedFieldMap) + mask_mag = pe.Node(fsl.maths.ApplyMask(), name='mask_magnitude') + + # Run fsl_prepare_fieldmap + fslprep = pe.Node(fsl.PrepareFieldmap(), name='prepare_fieldmap') + + if nocheck: + fslprep.inputs.nocheck = True + + # Use FUGUE to generate the voxel shift map (vsm) + vsm = pe.Node(fsl.FUGUE(save_shift=True), name='generate_vsm') + + # VSM demean is not anymore present in the epi_reg script + # vsm_mean = pe.Node(niu.Function(input_names=['in_file', 'mask_file', 'in_unwarped'], output_names=[ + # 'out_file'], function=_vsm_remove_mean), name='vsm_mean_shift') + + # fugue_epi + dwi_split = pe.Node( + niu.Function( + input_names=['in_file'], + output_names=['out_files'], + function=_split_dwi), + name='dwi_split') + + # 'fugue -i %s -u %s --loadshift=%s --mask=%s' % ( vol_name, out_vol_name, vsm_name, mask_name ) + dwi_applyxfm = pe.MapNode( + fsl.FUGUE(icorr=True, save_shift=False), + iterfield=['in_file'], + name='dwi_fugue') + # Merge back all volumes + dwi_merge = pe.Node(fsl.utils.Merge(dimension='t'), name='dwi_merge') + + outputnode = pe.Node( + niu.IdentityInterface(fields=['epi_corrected', 'out_vsm']), + name='outputnode') + + pipeline.connect( + [(inputnode, select_mag, + [('fieldmap_mag', 'in_file')]), (inputnode, fslprep, [ + ('fieldmap_pha', 'in_phase'), ('te_diff', 'delta_TE') + ]), (inputnode, mask_mag, + [('in_mask', 'mask_file')]), (select_mag, mask_mag, + [('roi_file', 'in_file')]), + (mask_mag, fslprep, [('out_file', 'in_magnitude')]), (fslprep, vsm, [ + ('out_fieldmap', 'phasemap_in_file') + ]), (inputnode, + vsm, [('fieldmap_mag', + 'in_file'), ('encoding_direction', 'unwarp_direction'), + (('te_diff', _ms2sec), 'asym_se_time'), + ('vsm_sigma', 'smooth2d'), (('epi_echospacing', _ms2sec), + 'dwell_time')]), + (mask_mag, vsm, [('out_file', 'mask_file')]), (inputnode, dwi_split, [ + ('in_file', 'in_file') + ]), (dwi_split, dwi_applyxfm, + [('out_files', 'in_file')]), (mask_mag, dwi_applyxfm, + [('out_file', 'mask_file')]), + (vsm, dwi_applyxfm, + [('shift_out_file', 'shift_in_file')]), (inputnode, dwi_applyxfm, [ + ('encoding_direction', 'unwarp_direction') + ]), (dwi_applyxfm, dwi_merge, + [('unwarped_file', 'in_files')]), (dwi_merge, outputnode, [ + ('merged_file', 'epi_corrected') + ]), (vsm, outputnode, [('shift_out_file', 'out_vsm')])]) + + return pipeline + + +def topup_correction(name='topup_correction'): + """ + + .. deprecated:: 0.9.3 + Use :func:`nipype.workflows.dmri.preprocess.epi.sdc_peb` instead. + + + Corrects for susceptibilty distortion of EPI images when one reverse encoding dataset has + been acquired + + + Example + ------- + + >>> nipype_epicorrect = topup_correction('nipype_topup') + >>> nipype_epicorrect.inputs.inputnode.in_file_dir = 'epi.nii' + >>> nipype_epicorrect.inputs.inputnode.in_file_rev = 'epi_rev.nii' + >>> nipype_epicorrect.inputs.inputnode.encoding_direction = ['y', 'y-'] + >>> nipype_epicorrect.inputs.inputnode.ref_num = 0 + >>> nipype_epicorrect.run() # doctest: +SKIP + + + Inputs:: + + inputnode.in_file_dir - EPI volume acquired in 'forward' phase encoding + inputnode.in_file_rev - EPI volume acquired in 'reversed' phase encoding + inputnode.encoding_direction - Direction encoding of in_file_dir + inputnode.ref_num - Identifier of the reference volumes (usually B0 volume) + + + Outputs:: + + outputnode.epi_corrected + + + """ + + warnings.warn(('This workflow is deprecated from v.1.0.0, use ' + 'nipype.workflows.dmri.preprocess.epi.sdc_peb instead'), + DeprecationWarning) + + pipeline = pe.Workflow(name=name) + + inputnode = pe.Node( + niu.IdentityInterface(fields=[ + 'in_file_dir', 'in_file_rev', 'encoding_direction', + 'readout_times', 'ref_num' + ]), + name='inputnode') + + outputnode = pe.Node( + niu.IdentityInterface(fields=[ + 'out_fieldcoef', 'out_movpar', 'out_enc_file', 'epi_corrected' + ]), + name='outputnode') + + b0_dir = pe.Node(fsl.ExtractROI(t_size=1), name='b0_1') + b0_rev = pe.Node(fsl.ExtractROI(t_size=1), name='b0_2') + combin = pe.Node(niu.Merge(2), name='merge') + combin2 = pe.Node(niu.Merge(2), name='merge2') + merged = pe.Node(fsl.Merge(dimension='t'), name='b0_comb') + + topup = pe.Node(fsl.TOPUP(), name='topup') + applytopup = pe.Node(fsl.ApplyTOPUP(in_index=[1, 2]), name='applytopup') + + pipeline.connect( + [(inputnode, b0_dir, [('in_file_dir', 'in_file'), ('ref_num', + 't_min')]), + (inputnode, b0_rev, + [('in_file_rev', + 'in_file'), ('ref_num', 't_min')]), (inputnode, combin2, [ + ('in_file_dir', 'in1'), ('in_file_rev', 'in2') + ]), (b0_dir, combin, [('roi_file', 'in1')]), (b0_rev, combin, [ + ('roi_file', 'in2') + ]), (combin, merged, [('out', 'in_files')]), + (merged, topup, [('merged_file', 'in_file')]), (inputnode, topup, [ + ('encoding_direction', 'encoding_direction'), ('readout_times', + 'readout_times') + ]), (topup, applytopup, [('out_fieldcoef', 'in_topup_fieldcoef'), + ('out_movpar', 'in_topup_movpar'), + ('out_enc_file', 'encoding_file')]), + (combin2, applytopup, [('out', 'in_files')]), (topup, outputnode, [ + ('out_fieldcoef', 'out_fieldcoef'), ('out_movpar', 'out_movpar'), + ('out_enc_file', 'out_enc_file') + ]), (applytopup, outputnode, [('out_corrected', 'epi_corrected')])]) + + return pipeline + + +def create_epidewarp_pipeline(name='epidewarp', fieldmap_registration=False): + """ + Replaces the epidewarp.fsl script (http://www.nmr.mgh.harvard.edu/~greve/fbirn/b0/epidewarp.fsl) + for susceptibility distortion correction of dMRI & fMRI acquired with EPI sequences and the fieldmap + information (Jezzard et al., 1995) using FSL's FUGUE. The registration to the (warped) fieldmap + (strictly following the original script) is available using fieldmap_registration=True. + + + .. warning:: This workflow makes use of ``epidewarp.fsl`` a script of FSL deprecated long + time ago. The use of this workflow is not recommended, use + :func:`nipype.workflows.dmri.preprocess.epi.sdc_fmb` instead. + + + Example + ------- + + >>> nipype_epicorrect = create_epidewarp_pipeline('nipype_epidewarp', fieldmap_registration=False) + >>> nipype_epicorrect.inputs.inputnode.in_file = 'diffusion.nii' + >>> nipype_epicorrect.inputs.inputnode.fieldmap_mag = 'magnitude.nii' + >>> nipype_epicorrect.inputs.inputnode.fieldmap_pha = 'phase.nii' + >>> nipype_epicorrect.inputs.inputnode.te_diff = 2.46 + >>> nipype_epicorrect.inputs.inputnode.epi_echospacing = 0.77 + >>> nipype_epicorrect.inputs.inputnode.epi_rev_encoding = False + >>> nipype_epicorrect.inputs.inputnode.ref_num = 0 + >>> nipype_epicorrect.inputs.inputnode.pi_accel_factor = 1.0 + >>> nipype_epicorrect.run() # doctest: +SKIP + + Inputs:: + + inputnode.in_file - The volume acquired with EPI sequence + inputnode.fieldmap_mag - The magnitude of the fieldmap + inputnode.fieldmap_pha - The phase difference of the fieldmap + inputnode.te_diff - Time difference between TE in ms. + inputnode.epi_echospacing - The echo spacing (aka dwell time) in the EPI sequence + inputnode.epi_ph_encoding_dir - The phase encoding direction in EPI acquisition (default y) + inputnode.epi_rev_encoding - True if it is acquired with reverse encoding + inputnode.pi_accel_factor - Acceleration factor used for EPI parallel imaging (GRAPPA) + inputnode.vsm_sigma - Sigma value of the gaussian smoothing filter applied to the vsm (voxel shift map) + inputnode.ref_num - The reference volume (B=0 in dMRI or a central frame in fMRI) + + + Outputs:: + + outputnode.epi_corrected + + + Optional arguments:: + + fieldmap_registration - True if registration to fieldmap should be done (default False) + + """ + + warnings.warn(('This workflow reproduces a deprecated FSL script.'), + DeprecationWarning) + + inputnode = pe.Node( + niu.IdentityInterface(fields=[ + 'in_file', 'fieldmap_mag', 'fieldmap_pha', 'te_diff', + 'epi_echospacing', 'epi_ph_encoding_dir', 'epi_rev_encoding', + 'pi_accel_factor', 'vsm_sigma', 'ref_num', 'unwarp_direction' + ]), + name='inputnode') + + pipeline = pe.Workflow(name=name) + + # Keep first frame from magnitude + select_mag = pe.Node( + fsl.utils.ExtractROI(t_size=1, t_min=0), name='select_magnitude') + + # mask_brain + mask_mag = pe.Node(fsl.BET(mask=True), name='mask_magnitude') + mask_mag_dil = pe.Node( + niu.Function( + input_names=['in_file'], + output_names=['out_file'], + function=_dilate_mask), + name='mask_dilate') + + # Compute dwell time + dwell_time = pe.Node( + niu.Function( + input_names=['dwell_time', 'pi_factor', 'is_reverse_encoding'], + output_names=['dwell_time'], + function=_compute_dwelltime), + name='dwell_time') + + # Normalize phase diff to be [-pi, pi) + norm_pha = pe.Node( + niu.Function( + input_names=['in_file'], + output_names=['out_file'], + function=_prepare_phasediff), + name='normalize_phasediff') + # Execute FSL PRELUDE: prelude -p %s -a %s -o %s -f -v -m %s + prelude = pe.Node(fsl.PRELUDE(process3d=True), name='phase_unwrap') + fill_phase = pe.Node( + niu.Function( + input_names=['in_file'], + output_names=['out_file'], + function=_fill_phase), + name='fill_phasediff') + + # to assure that vsm is same dimension as mag. The input only affects the output dimension. + # The content of the input has no effect on the vsm. The de-warped mag volume is + # meaningless and will be thrown away + # fugue -i %s -u %s -p %s --dwell=%s --asym=%s --mask=%s --saveshift=%s % + # ( mag_name, magdw_name, ph_name, esp, tediff, mask_name, vsmmag_name) + vsm = pe.Node(fsl.FUGUE(save_shift=True), name='generate_vsm') + vsm_mean = pe.Node( + niu.Function( + input_names=['in_file', 'mask_file', 'in_unwarped'], + output_names=['out_file'], + function=_vsm_remove_mean), + name='vsm_mean_shift') + + # fugue_epi + dwi_split = pe.Node( + niu.Function( + input_names=['in_file'], + output_names=['out_files'], + function=_split_dwi), + name='dwi_split') + # 'fugue -i %s -u %s --loadshift=%s --mask=%s' % ( vol_name, out_vol_name, vsm_name, mask_name ) + dwi_applyxfm = pe.MapNode( + fsl.FUGUE(icorr=True, save_shift=False), + iterfield=['in_file'], + name='dwi_fugue') + # Merge back all volumes + dwi_merge = pe.Node(fsl.utils.Merge(dimension='t'), name='dwi_merge') + + outputnode = pe.Node( + niu.IdentityInterface(fields=['epi_corrected']), name='outputnode') + + pipeline.connect( + [(inputnode, dwell_time, + [('epi_echospacing', 'dwell_time'), ('pi_accel_factor', 'pi_factor'), + ('epi_rev_encoding', + 'is_reverse_encoding')]), (inputnode, select_mag, [('fieldmap_mag', + 'in_file')]), + (inputnode, norm_pha, [('fieldmap_pha', + 'in_file')]), (select_mag, mask_mag, + [('roi_file', 'in_file')]), + (mask_mag, mask_mag_dil, + [('mask_file', 'in_file')]), (select_mag, prelude, [ + ('roi_file', 'magnitude_file') + ]), (norm_pha, prelude, + [('out_file', 'phase_file')]), (mask_mag_dil, prelude, [ + ('out_file', 'mask_file') + ]), (prelude, fill_phase, + [('unwrapped_phase_file', 'in_file')]), (inputnode, vsm, [ + ('fieldmap_mag', 'in_file') + ]), (fill_phase, vsm, [('out_file', 'phasemap_in_file')]), + (inputnode, vsm, [(('te_diff', _ms2sec), 'asym_se_time'), + ('vsm_sigma', 'smooth2d')]), (dwell_time, vsm, [ + (('dwell_time', _ms2sec), 'dwell_time') + ]), (mask_mag_dil, vsm, [('out_file', + 'mask_file')]), + (mask_mag_dil, vsm_mean, + [('out_file', 'mask_file')]), (vsm, vsm_mean, [ + ('unwarped_file', 'in_unwarped'), ('shift_out_file', 'in_file') + ]), (inputnode, dwi_split, + [('in_file', 'in_file')]), (dwi_split, dwi_applyxfm, [ + ('out_files', 'in_file') + ]), (dwi_applyxfm, dwi_merge, + [('unwarped_file', 'in_files')]), (dwi_merge, outputnode, + [('merged_file', + 'epi_corrected')])]) + + if fieldmap_registration: + """ Register magfw to example epi. There are some parameters here that may need to be tweaked. Should probably strip the mag + Pre-condition: forward warp the mag in order to reg with func. What does mask do here? + """ + # Select reference volume from EPI (B0 in dMRI and a middle frame in + # fMRI) + select_epi = pe.Node(fsl.utils.ExtractROI(t_size=1), name='select_epi') + + # fugue -i %s -w %s --loadshift=%s --mask=%s % ( mag_name, magfw_name, + # vsmmag_name, mask_name ), log ) # Forward Map + vsm_fwd = pe.Node(fsl.FUGUE(forward_warping=True), name='vsm_fwd') + vsm_reg = pe.Node( + fsl.FLIRT( + bins=256, + cost='corratio', + dof=6, + interp='spline', + searchr_x=[-10, 10], + searchr_y=[-10, 10], + searchr_z=[-10, 10]), + name='vsm_registration') + # 'flirt -in %s -ref %s -out %s -init %s -applyxfm' % ( vsmmag_name, ref_epi, vsmmag_name, magfw_mat_out ) + vsm_applyxfm = pe.Node( + fsl.ApplyXfm(interp='spline'), name='vsm_apply_xfm') + # 'flirt -in %s -ref %s -out %s -init %s -applyxfm' % ( mask_name, ref_epi, mask_name, magfw_mat_out ) + msk_applyxfm = pe.Node( + fsl.ApplyXfm(interp='nearestneighbour'), name='msk_apply_xfm') + + pipeline.connect( + [(inputnode, select_epi, + [('in_file', 'in_file'), + ('ref_num', 't_min')]), (select_epi, vsm_reg, [('roi_file', + 'reference')]), + (vsm, vsm_fwd, [('shift_out_file', 'shift_in_file')]), + (mask_mag_dil, vsm_fwd, + [('out_file', 'mask_file')]), (inputnode, vsm_fwd, [ + ('fieldmap_mag', 'in_file') + ]), (vsm_fwd, vsm_reg, + [('warped_file', 'in_file')]), (vsm_reg, msk_applyxfm, [ + ('out_matrix_file', 'in_matrix_file') + ]), (select_epi, msk_applyxfm, [('roi_file', 'reference')]), + (mask_mag_dil, msk_applyxfm, + [('out_file', 'in_file')]), (vsm_reg, vsm_applyxfm, [ + ('out_matrix_file', 'in_matrix_file') + ]), (select_epi, vsm_applyxfm, + [('roi_file', 'reference')]), (vsm_mean, vsm_applyxfm, + [('out_file', 'in_file')]), + (msk_applyxfm, dwi_applyxfm, + [('out_file', 'mask_file')]), (vsm_applyxfm, dwi_applyxfm, + [('out_file', 'shift_in_file')])]) + else: + pipeline.connect( + [(mask_mag_dil, dwi_applyxfm, [('out_file', 'mask_file')]), + (vsm_mean, dwi_applyxfm, [('out_file', 'shift_in_file')])]) + + return pipeline + + +def _rotate_bvecs(in_bvec, in_matrix): + import os + import numpy as np + + name, fext = os.path.splitext(os.path.basename(in_bvec)) + if fext == '.gz': + name, _ = os.path.splitext(name) + out_file = os.path.abspath('./%s_rotated.bvec' % name) + bvecs = np.loadtxt(in_bvec) + new_bvecs = np.zeros( + shape=bvecs.T.shape) # pre-initialise array, 3 col format + + for i, vol_matrix in enumerate(in_matrix[0::]): # start index at 0 + bvec = np.matrix(bvecs[:, i]) + rot = np.matrix(np.loadtxt(vol_matrix)[0:3, 0:3]) + new_bvecs[i] = (np.array( + rot * bvec.T).T)[0] # fill each volume with x,y,z as we go along + np.savetxt(out_file, np.array(new_bvecs).T, fmt=b'%0.15f') + return out_file + + +def _cat_logs(in_files): + import shutil + import os + + name, fext = os.path.splitext(os.path.basename(in_files[0])) + if fext == '.gz': + name, _ = os.path.splitext(name) + out_file = os.path.abspath('./%s_ecclog.log' % name) + with open(out_file, 'wb') as totallog: + for i, fname in enumerate(in_files): + totallog.write('\n\npreprocessing %d\n' % i) + with open(fname) as inlog: + for line in inlog: + totallog.write(line) + return out_file + + +def _compute_dwelltime(dwell_time=0.68, + pi_factor=1.0, + is_reverse_encoding=False): + dwell_time *= (1.0 / pi_factor) + + if is_reverse_encoding: + dwell_time *= -1.0 + + return dwell_time + + +def _effective_echospacing(dwell_time, pi_factor=1.0): + dwelltime = 1.0e-3 * dwell_time * (1.0 / pi_factor) + return dwelltime + + +def _prepare_phasediff(in_file): + import nibabel as nb + import os + import numpy as np + from nipype.utils import NUMPY_MMAP + img = nb.load(in_file, mmap=NUMPY_MMAP) + max_diff = np.max(img.get_data().reshape(-1)) + min_diff = np.min(img.get_data().reshape(-1)) + A = (2.0 * np.pi) / (max_diff - min_diff) + B = np.pi - (A * max_diff) + diff_norm = img.get_data() * A + B + + name, fext = os.path.splitext(os.path.basename(in_file)) + if fext == '.gz': + name, _ = os.path.splitext(name) + out_file = os.path.abspath('./%s_2pi.nii.gz' % name) + nb.save(nb.Nifti1Image(diff_norm, img.affine, img.header), out_file) + return out_file + + +def _dilate_mask(in_file, iterations=4): + import nibabel as nb + import scipy.ndimage as ndimage + import os + from nipype.utils import NUMPY_MMAP + img = nb.load(in_file, mmap=NUMPY_MMAP) + dilated_img = img.__class__( + ndimage.binary_dilation(img.get_data(), iterations=iterations), + img.affine, img.header) + + name, fext = os.path.splitext(os.path.basename(in_file)) + if fext == '.gz': + name, _ = os.path.splitext(name) + out_file = os.path.abspath('./%s_dil.nii.gz' % name) + nb.save(dilated_img, out_file) + return out_file + + +def _fill_phase(in_file): + import nibabel as nb + import os + import numpy as np + from nipype.utils import NUMPY_MMAP + img = nb.load(in_file, mmap=NUMPY_MMAP) + dumb_img = nb.Nifti1Image(np.zeros(img.shape), img.affine, img.header) + out_nii = nb.funcs.concat_images((img, dumb_img)) + name, fext = os.path.splitext(os.path.basename(in_file)) + if fext == '.gz': + name, _ = os.path.splitext(name) + out_file = os.path.abspath('./%s_fill.nii.gz' % name) + nb.save(out_nii, out_file) + return out_file + + +def _vsm_remove_mean(in_file, mask_file, in_unwarped): + import nibabel as nb + import os + import numpy as np + import numpy.ma as ma + from nipype.utils import NUMPY_MMAP + img = nb.load(in_file, mmap=NUMPY_MMAP) + msk = nb.load(mask_file, mmap=NUMPY_MMAP).get_data() + img_data = img.get_data() + img_data[msk == 0] = 0 + vsmmag_masked = ma.masked_values(img_data.reshape(-1), 0.0) + vsmmag_masked = vsmmag_masked - vsmmag_masked.mean() + masked_img = img.__class__( + vsmmag_masked.reshape(img.shape), img.affine, img.header) + name, fext = os.path.splitext(os.path.basename(in_file)) + if fext == '.gz': + name, _ = os.path.splitext(name) + out_file = os.path.abspath('./%s_demeaned.nii.gz' % name) + nb.save(masked_img, out_file) + return out_file + + +def _ms2sec(val): + return val * 1e-3 + + +def _split_dwi(in_file): + import nibabel as nb + import os + from nipype.utils import NUMPY_MMAP + out_files = [] + frames = nb.funcs.four_to_three(nb.load(in_file, mmap=NUMPY_MMAP)) + name, fext = os.path.splitext(os.path.basename(in_file)) + if fext == '.gz': + name, _ = os.path.splitext(name) + for i, frame in enumerate(frames): + out_file = os.path.abspath('./%s_%03d.nii.gz' % (name, i)) + nb.save(frame, out_file) + out_files.append(out_file) + return out_files diff --git a/nipype/workflows/dmri/fsl/tbss.py b/nipype/workflows/dmri/fsl/tbss.py new file mode 100644 index 0000000000..3aef3e734a --- /dev/null +++ b/nipype/workflows/dmri/fsl/tbss.py @@ -0,0 +1,590 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import os +from warnings import warn + +from ....pipeline import engine as pe +from ....interfaces import utility as util +from ....interfaces import fsl as fsl + + +def tbss1_op_string(in_files): + import nibabel as nb + from nipype.utils import NUMPY_MMAP + op_strings = [] + for infile in in_files: + img = nb.load(infile, mmap=NUMPY_MMAP) + dimtup = tuple(d - 2 for d in img.shape) + dimtup = dimtup[0:3] + op_str = '-min 1 -ero -roi 1 %d 1 %d 1 %d 0 1' % dimtup + op_strings.append(op_str) + return op_strings + + +def create_tbss_1_preproc(name='tbss_1_preproc'): + """Preprocess FA data for TBSS: erodes a little and zero end slicers and + creates masks(for use in FLIRT & FNIRT from FSL). + A pipeline that does the same as tbss_1_preproc script in FSL + + Example + ------- + + >>> from nipype.workflows.dmri.fsl import tbss + >>> tbss1 = tbss.create_tbss_1_preproc() + >>> tbss1.inputs.inputnode.fa_list = ['s1_FA.nii', 's2_FA.nii', 's3_FA.nii'] + + Inputs:: + + inputnode.fa_list + + Outputs:: + + outputnode.fa_list + outputnode.mask_list + outputnode.slices + + """ + + # Define the inputnode + inputnode = pe.Node( + interface=util.IdentityInterface(fields=["fa_list"]), name="inputnode") + + # Prep the FA images + prepfa = pe.MapNode( + fsl.ImageMaths(suffix="_prep"), + name="prepfa", + iterfield=['in_file', 'op_string']) + + # Slicer + slicer = pe.MapNode( + fsl.Slicer(all_axial=True, image_width=1280), + name='slicer', + iterfield=['in_file']) + + # Create a mask + getmask1 = pe.MapNode( + fsl.ImageMaths(op_string="-bin", suffix="_mask"), + name="getmask1", + iterfield=['in_file']) + getmask2 = pe.MapNode( + fsl.MultiImageMaths(op_string="-dilD -dilD -sub 1 -abs -add %s"), + name="getmask2", + iterfield=['in_file', 'operand_files']) + + # $FSLDIR/bin/fslmaths FA/${f}_FA_mask -dilD -dilD -sub 1 -abs -add FA/${f}_FA_mask FA/${f}_FA_mask -odt char + # Define the tbss1 workflow + tbss1 = pe.Workflow(name=name) + tbss1.connect([ + (inputnode, prepfa, [("fa_list", "in_file")]), + (inputnode, prepfa, [(("fa_list", tbss1_op_string), "op_string")]), + (prepfa, getmask1, [("out_file", "in_file")]), + (getmask1, getmask2, [("out_file", "in_file"), ("out_file", + "operand_files")]), + (prepfa, slicer, [('out_file', 'in_file')]), + ]) + + # Define the outputnode + outputnode = pe.Node( + interface=util.IdentityInterface( + fields=["fa_list", "mask_list", "slices"]), + name="outputnode") + tbss1.connect([(prepfa, outputnode, [("out_file", "fa_list")]), + (getmask2, outputnode, [("out_file", "mask_list")]), + (slicer, outputnode, [('out_file', 'slices')])]) + return tbss1 + + +def create_tbss_2_reg(name="tbss_2_reg"): + """TBSS nonlinear registration: + A pipeline that does the same as 'tbss_2_reg -t' script in FSL. '-n' option + is not supported at the moment. + + Example + ------- + + >>> from nipype.workflows.dmri.fsl import tbss + >>> tbss2 = create_tbss_2_reg(name="tbss2") + >>> tbss2.inputs.inputnode.target = fsl.Info.standard_image("FMRIB58_FA_1mm.nii.gz") # doctest: +SKIP + >>> tbss2.inputs.inputnode.fa_list = ['s1_FA.nii', 's2_FA.nii', 's3_FA.nii'] + >>> tbss2.inputs.inputnode.mask_list = ['s1_mask.nii', 's2_mask.nii', 's3_mask.nii'] + + Inputs:: + + inputnode.fa_list + inputnode.mask_list + inputnode.target + + Outputs:: + + outputnode.field_list + + """ + + # Define the inputnode + inputnode = pe.Node( + interface=util.IdentityInterface( + fields=["fa_list", "mask_list", "target"]), + name="inputnode") + + # Flirt the FA image to the target + flirt = pe.MapNode( + interface=fsl.FLIRT(dof=12), + iterfield=['in_file', 'in_weight'], + name="flirt") + + fnirt = pe.MapNode( + interface=fsl.FNIRT(fieldcoeff_file=True), + iterfield=['in_file', 'inmask_file', 'affine_file'], + name="fnirt") + # Fnirt the FA image to the target + if fsl.no_fsl(): + warn('NO FSL found') + else: + config_file = os.path.join(os.environ["FSLDIR"], + "etc/flirtsch/FA_2_FMRIB58_1mm.cnf") + fnirt.inputs.config_file = config_file + + # Define the registration workflow + tbss2 = pe.Workflow(name=name) + + # Connect up the registration workflow + tbss2.connect([ + (inputnode, flirt, [("fa_list", "in_file"), ("target", "reference"), + ("mask_list", "in_weight")]), + (inputnode, fnirt, [("fa_list", "in_file"), + ("mask_list", "inmask_file"), ("target", + "ref_file")]), + (flirt, fnirt, [("out_matrix_file", "affine_file")]), + ]) + + # Define the outputnode + outputnode = pe.Node( + interface=util.IdentityInterface(fields=['field_list']), + name="outputnode") + + tbss2.connect([(fnirt, outputnode, [('fieldcoeff_file', 'field_list')])]) + return tbss2 + + +def create_tbss_3_postreg(name='tbss_3_postreg', estimate_skeleton=True): + """Post-registration processing: derive mean_FA and mean_FA_skeleton from + mean of all subjects in study. Target is assumed to be FMRIB58_FA_1mm. + A pipeline that does the same as 'tbss_3_postreg -S' script from FSL + Setting 'estimate_skeleton to False will use precomputed FMRIB58_FA-skeleton_1mm + skeleton (same as 'tbss_3_postreg -T'). + + Example + ------- + + >>> from nipype.workflows.dmri.fsl import tbss + >>> tbss3 = tbss.create_tbss_3_postreg() + >>> tbss3.inputs.inputnode.fa_list = ['s1_wrapped_FA.nii', 's2_wrapped_FA.nii', 's3_wrapped_FA.nii'] + + Inputs:: + + inputnode.field_list + inputnode.fa_list + + Outputs:: + + outputnode.groupmask + outputnode.skeleton_file + outputnode.meanfa_file + outputnode.mergefa_file + + """ + + # Create the inputnode + inputnode = pe.Node( + interface=util.IdentityInterface(fields=['field_list', 'fa_list']), + name='inputnode') + + # Apply the warpfield to the masked FA image + applywarp = pe.MapNode( + interface=fsl.ApplyWarp(), + iterfield=['in_file', 'field_file'], + name="applywarp") + if fsl.no_fsl(): + warn('NO FSL found') + else: + applywarp.inputs.ref_file = fsl.Info.standard_image( + "FMRIB58_FA_1mm.nii.gz") + + # Merge the FA files into a 4D file + mergefa = pe.Node(fsl.Merge(dimension="t"), name="mergefa") + + # Get a group mask + groupmask = pe.Node( + fsl.ImageMaths( + op_string="-max 0 -Tmin -bin", + out_data_type="char", + suffix="_mask"), + name="groupmask") + + maskgroup = pe.Node( + fsl.ImageMaths(op_string="-mas", suffix="_masked"), name="maskgroup") + + tbss3 = pe.Workflow(name=name) + tbss3.connect([ + (inputnode, applywarp, [("fa_list", "in_file"), ("field_list", + "field_file")]), + (applywarp, mergefa, [("out_file", "in_files")]), + (mergefa, groupmask, [("merged_file", "in_file")]), + (mergefa, maskgroup, [("merged_file", "in_file")]), + (groupmask, maskgroup, [("out_file", "in_file2")]), + ]) + + # Create outputnode + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'groupmask', 'skeleton_file', 'meanfa_file', 'mergefa_file' + ]), + name='outputnode') + + if estimate_skeleton: + # Take the mean over the fourth dimension + meanfa = pe.Node( + fsl.ImageMaths(op_string="-Tmean", suffix="_mean"), name="meanfa") + + # Use the mean FA volume to generate a tract skeleton + makeskeleton = pe.Node( + fsl.TractSkeleton(skeleton_file=True), name="makeskeleton") + tbss3.connect( + [(maskgroup, meanfa, [("out_file", "in_file")]), + (meanfa, makeskeleton, + [("out_file", "in_file")]), (groupmask, outputnode, + [('out_file', 'groupmask')]), + (makeskeleton, outputnode, + [('skeleton_file', 'skeleton_file')]), (meanfa, outputnode, [ + ('out_file', 'meanfa_file') + ]), (maskgroup, outputnode, [('out_file', 'mergefa_file')])]) + else: + # $FSLDIR/bin/fslmaths $FSLDIR/data/standard/FMRIB58_FA_1mm -mas mean_FA_mask mean_FA + maskstd = pe.Node( + fsl.ImageMaths(op_string="-mas", suffix="_masked"), name="maskstd") + if fsl.no_fsl(): + warn('NO FSL found') + else: + maskstd.inputs.in_file = fsl.Info.standard_image( + "FMRIB58_FA_1mm.nii.gz") + + # $FSLDIR/bin/fslmaths mean_FA -bin mean_FA_mask + binmaskstd = pe.Node( + fsl.ImageMaths(op_string="-bin"), name="binmaskstd") + + # $FSLDIR/bin/fslmaths all_FA -mas mean_FA_mask all_FA + maskgroup2 = pe.Node( + fsl.ImageMaths(op_string="-mas", suffix="_masked"), + name="maskgroup2") + + tbss3.connect([(groupmask, maskstd, [("out_file", "in_file2")]), + (maskstd, binmaskstd, [("out_file", "in_file")]), + (maskgroup, maskgroup2, [("out_file", "in_file")]), + (binmaskstd, maskgroup2, [("out_file", "in_file2")])]) + + if fsl.no_fsl(): + warn('NO FSL found') + else: + outputnode.inputs.skeleton_file = fsl.Info.standard_image( + "FMRIB58_FA-skeleton_1mm.nii.gz") + tbss3.connect([(binmaskstd, outputnode, [('out_file', 'groupmask')]), + (maskstd, outputnode, [('out_file', 'meanfa_file')]), + (maskgroup2, outputnode, [('out_file', + 'mergefa_file')])]) + return tbss3 + + +def tbss4_op_string(skeleton_thresh): + op_string = "-thr %.1f -bin" % skeleton_thresh + return op_string + + +def create_tbss_4_prestats(name='tbss_4_prestats'): + """Post-registration processing:Creating skeleton mask using a threshold + projecting all FA data onto skeleton. + A pipeline that does the same as tbss_4_prestats script from FSL + + Example + ------- + + >>> from nipype.workflows.dmri.fsl import tbss + >>> tbss4 = tbss.create_tbss_4_prestats(name='tbss4') + >>> tbss4.inputs.inputnode.skeleton_thresh = 0.2 + + Inputs:: + + inputnode.skeleton_thresh + inputnode.groupmask + inputnode.skeleton_file + inputnode.meanfa_file + inputnode.mergefa_file + + Outputs:: + + outputnode.all_FA_skeletonised + outputnode.mean_FA_skeleton_mask + outputnode.distance_map + outputnode.skeleton_file + + """ + # Create inputnode + inputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'groupmask', 'skeleton_file', 'meanfa_file', 'mergefa_file', + 'skeleton_thresh' + ]), + name='inputnode') + + # Mask the skeleton at the threshold + skeletonmask = pe.Node(fsl.ImageMaths(suffix="_mask"), name="skeletonmask") + + # Invert the brainmask then add in the tract skeleton + invertmask = pe.Node( + fsl.ImageMaths(suffix="_inv", op_string="-mul -1 -add 1 -add"), + name="invertmask") + + # Generate a distance map with the tract skeleton + distancemap = pe.Node(fsl.DistanceMap(), name="distancemap") + + # Project the FA values onto the skeleton + projectfa = pe.Node( + fsl.TractSkeleton( + project_data=True, skeleton_file=True, use_cingulum_mask=True), + name="projectfa") + + # Create tbss4 workflow + tbss4 = pe.Workflow(name=name) + tbss4.connect([ + (inputnode, invertmask, [("groupmask", "in_file")]), + (inputnode, skeletonmask, [("skeleton_file", "in_file"), + (('skeleton_thresh', tbss4_op_string), + 'op_string')]), + (inputnode, projectfa, [('skeleton_thresh', 'threshold'), + ("meanfa_file", "in_file"), ("mergefa_file", + "data_file")]), + (skeletonmask, invertmask, [("out_file", "in_file2")]), + (invertmask, distancemap, [("out_file", "in_file")]), + (distancemap, projectfa, [("distance_map", "distance_map")]), + ]) + + # Create the outputnode + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'projectedfa_file', 'skeleton_mask', 'distance_map', + 'skeleton_file' + ]), + name='outputnode') + + tbss4.connect( + [(projectfa, outputnode, [('projected_data', 'projectedfa_file'), + ('skeleton_file', 'skeleton_file')]), + (distancemap, outputnode, [('distance_map', 'distance_map')]), + (skeletonmask, outputnode, [('out_file', 'skeleton_mask')])]) + + return tbss4 + + +def create_tbss_all(name='tbss_all', estimate_skeleton=True): + """Create a pipeline that combines create_tbss_* pipelines + + Example + ------- + + >>> from nipype.workflows.dmri.fsl import tbss + >>> tbss_wf = tbss.create_tbss_all('tbss', estimate_skeleton=True) + >>> tbss_wf.inputs.inputnode.skeleton_thresh = 0.2 + >>> tbss_wf.inputs.inputnode.fa_list = ['s1_wrapped_FA.nii', 's2_wrapped_FA.nii', 's3_wrapped_FA.nii'] + + >>> tbss_wf = tbss.create_tbss_all('tbss', estimate_skeleton=False) + >>> tbss_wf.inputs.inputnode.skeleton_thresh = 0.2 + >>> tbss_wf.inputs.inputnode.fa_list = ['s1_wrapped_FA.nii', 's2_wrapped_FA.nii', 's3_wrapped_FA.nii'] + + + Inputs:: + + inputnode.fa_list + inputnode.skeleton_thresh + + Outputs:: + + outputnode.meanfa_file + outputnode.projectedfa_file + outputnode.skeleton_file + outputnode.skeleton_mask + + """ + + # Define the inputnode + inputnode = pe.Node( + interface=util.IdentityInterface( + fields=['fa_list', 'skeleton_thresh']), + name='inputnode') + + tbss1 = create_tbss_1_preproc(name='tbss1') + tbss2 = create_tbss_2_reg(name='tbss2') + if fsl.no_fsl(): + warn('NO FSL found') + else: + tbss2.inputs.inputnode.target = fsl.Info.standard_image( + "FMRIB58_FA_1mm.nii.gz") + tbss3 = create_tbss_3_postreg( + name='tbss3', estimate_skeleton=estimate_skeleton) + tbss4 = create_tbss_4_prestats(name='tbss4') + + tbss_all = pe.Workflow(name=name) + tbss_all.connect( + [(inputnode, tbss1, [('fa_list', 'inputnode.fa_list')]), + (inputnode, tbss4, + [('skeleton_thresh', 'inputnode.skeleton_thresh')]), (tbss1, tbss2, [ + ('outputnode.fa_list', 'inputnode.fa_list'), + ('outputnode.mask_list', 'inputnode.mask_list') + ]), (tbss1, tbss3, [('outputnode.fa_list', 'inputnode.fa_list')]), + (tbss2, tbss3, [('outputnode.field_list', 'inputnode.field_list')]), + (tbss3, tbss4, [('outputnode.groupmask', + 'inputnode.groupmask'), ('outputnode.skeleton_file', + 'inputnode.skeleton_file'), + ('outputnode.meanfa_file', 'inputnode.meanfa_file'), + ('outputnode.mergefa_file', + 'inputnode.mergefa_file')])]) + + # Define the outputnode + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'groupmask', 'skeleton_file3', 'meanfa_file', 'mergefa_file', + 'projectedfa_file', 'skeleton_file4', 'skeleton_mask', + 'distance_map' + ]), + name='outputnode') + outputall_node = pe.Node( + interface=util.IdentityInterface(fields=[ + 'fa_list1', 'mask_list1', 'field_list2', 'groupmask3', + 'skeleton_file3', 'meanfa_file3', 'mergefa_file3', + 'projectedfa_file4', 'skeleton_mask4', 'distance_map4' + ]), + name='outputall_node') + + tbss_all.connect([ + (tbss3, outputnode, [ + ('outputnode.meanfa_file', 'meanfa_file'), + ('outputnode.mergefa_file', 'mergefa_file'), + ('outputnode.groupmask', 'groupmask'), + ('outputnode.skeleton_file', 'skeleton_file3'), + ]), + (tbss4, outputnode, [ + ('outputnode.projectedfa_file', 'projectedfa_file'), + ('outputnode.skeleton_file', 'skeleton_file4'), + ('outputnode.skeleton_mask', 'skeleton_mask'), + ('outputnode.distance_map', 'distance_map'), + ]), + (tbss1, outputall_node, [ + ('outputnode.fa_list', 'fa_list1'), + ('outputnode.mask_list', 'mask_list1'), + ]), + (tbss2, outputall_node, [ + ('outputnode.field_list', 'field_list2'), + ]), + (tbss3, outputall_node, [ + ('outputnode.meanfa_file', 'meanfa_file3'), + ('outputnode.mergefa_file', 'mergefa_file3'), + ('outputnode.groupmask', 'groupmask3'), + ('outputnode.skeleton_file', 'skeleton_file3'), + ]), + (tbss4, outputall_node, [ + ('outputnode.projectedfa_file', 'projectedfa_file4'), + ('outputnode.skeleton_mask', 'skeleton_mask4'), + ('outputnode.distance_map', 'distance_map4'), + ]), + ]) + return tbss_all + + +def create_tbss_non_FA(name='tbss_non_FA'): + """ + A pipeline that implement tbss_non_FA in FSL + + Example + ------- + + >>> from nipype.workflows.dmri.fsl import tbss + >>> tbss_MD = tbss.create_tbss_non_FA() + >>> tbss_MD.inputs.inputnode.file_list = [] + >>> tbss_MD.inputs.inputnode.field_list = [] + >>> tbss_MD.inputs.inputnode.skeleton_thresh = 0.2 + >>> tbss_MD.inputs.inputnode.groupmask = './xxx' + >>> tbss_MD.inputs.inputnode.meanfa_file = './xxx' + >>> tbss_MD.inputs.inputnode.distance_map = [] + >>> tbss_MD.inputs.inputnode.all_FA_file = './xxx' + + Inputs:: + + inputnode.file_list + inputnode.field_list + inputnode.skeleton_thresh + inputnode.groupmask + inputnode.meanfa_file + inputnode.distance_map + inputnode.all_FA_file + + Outputs:: + + outputnode.projected_nonFA_file + + """ + + # Define the inputnode + inputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'file_list', 'field_list', 'skeleton_thresh', 'groupmask', + 'meanfa_file', 'distance_map', 'all_FA_file' + ]), + name='inputnode') + + # Apply the warpfield to the non FA image + applywarp = pe.MapNode( + interface=fsl.ApplyWarp(), + iterfield=['in_file', 'field_file'], + name="applywarp") + if fsl.no_fsl(): + warn('NO FSL found') + else: + applywarp.inputs.ref_file = fsl.Info.standard_image( + "FMRIB58_FA_1mm.nii.gz") + # Merge the non FA files into a 4D file + merge = pe.Node(fsl.Merge(dimension="t"), name="merge") + # merged_file="all_FA.nii.gz" + maskgroup = pe.Node( + fsl.ImageMaths(op_string="-mas", suffix="_masked"), name="maskgroup") + projectfa = pe.Node( + fsl.TractSkeleton( + project_data=True, + # projected_data = 'test.nii.gz', + use_cingulum_mask=True), + name="projectfa") + + tbss_non_FA = pe.Workflow(name=name) + tbss_non_FA.connect([ + (inputnode, applywarp, [ + ('file_list', 'in_file'), + ('field_list', 'field_file'), + ]), + (applywarp, merge, [("out_file", "in_files")]), + (merge, maskgroup, [("merged_file", "in_file")]), + (inputnode, maskgroup, [('groupmask', 'in_file2')]), + (maskgroup, projectfa, [('out_file', 'alt_data_file')]), + (inputnode, projectfa, + [('skeleton_thresh', 'threshold'), ("meanfa_file", "in_file"), + ("distance_map", "distance_map"), ("all_FA_file", 'data_file')]), + ]) + + # Define the outputnode + outputnode = pe.Node( + interface=util.IdentityInterface(fields=['projected_nonFA_file']), + name='outputnode') + tbss_non_FA.connect([ + (projectfa, outputnode, [ + ('projected_data', 'projected_nonFA_file'), + ]), + ]) + return tbss_non_FA diff --git a/nipype/workflows/dmri/fsl/tests/__init__.py b/nipype/workflows/dmri/fsl/tests/__init__.py new file mode 100644 index 0000000000..99fb243f19 --- /dev/null +++ b/nipype/workflows/dmri/fsl/tests/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/workflows/dmri/fsl/tests/test_dti.py b/nipype/workflows/dmri/fsl/tests/test_dti.py new file mode 100644 index 0000000000..23cd8f37d8 --- /dev/null +++ b/nipype/workflows/dmri/fsl/tests/test_dti.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals, print_function, absolute_import +import os + +import pytest +import nipype.interfaces.fsl as fsl +import nipype.interfaces.utility as util +from nipype.interfaces.fsl import no_fsl, no_fsl_course_data + +import nipype.pipeline.engine as pe +import warnings +from nipype.workflows.dmri.fsl.dti import create_bedpostx_pipeline +from nipype.utils.filemanip import simplify_list + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +@pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available") +def test_create_bedpostx_pipeline(tmpdir): + fsl_course_dir = os.path.abspath(os.environ['FSL_COURSE_DATA']) + + mask_file = os.path.join(fsl_course_dir, + "fdt2/subj1.bedpostX/nodif_brain_mask.nii.gz") + bvecs_file = os.path.join(fsl_course_dir, "fdt2/subj1/bvecs") + bvals_file = os.path.join(fsl_course_dir, "fdt2/subj1/bvals") + dwi_file = os.path.join(fsl_course_dir, "fdt2/subj1/data.nii.gz") + z_min = 62 + z_size = 2 + + slice_mask = pe.Node( + fsl.ExtractROI( + x_min=0, x_size=-1, y_min=0, y_size=-1, z_min=z_min, + z_size=z_size), + name="slice_mask") + slice_mask.inputs.in_file = mask_file + + slice_dwi = pe.Node( + fsl.ExtractROI( + x_min=0, x_size=-1, y_min=0, y_size=-1, z_min=z_min, + z_size=z_size), + name="slice_dwi") + slice_dwi.inputs.in_file = dwi_file + + nipype_bedpostx = create_bedpostx_pipeline("nipype_bedpostx") + nipype_bedpostx.inputs.inputnode.bvecs = bvecs_file + nipype_bedpostx.inputs.inputnode.bvals = bvals_file + nipype_bedpostx.inputs.xfibres.n_fibres = 1 + nipype_bedpostx.inputs.xfibres.fudge = 1 + nipype_bedpostx.inputs.xfibres.burn_in = 0 + nipype_bedpostx.inputs.xfibres.n_jumps = 1 + nipype_bedpostx.inputs.xfibres.sample_every = 1 + nipype_bedpostx.inputs.xfibres.cnlinear = True + nipype_bedpostx.inputs.xfibres.seed = 0 + nipype_bedpostx.inputs.xfibres.model = 2 + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + original_bedpostx = pe.Node( + interface=fsl.BEDPOSTX(), name="original_bedpostx") + original_bedpostx.inputs.bvecs = bvecs_file + original_bedpostx.inputs.bvals = bvals_file + original_bedpostx.inputs.environ['FSLPARALLEL'] = "" + original_bedpostx.inputs.n_fibres = 1 + original_bedpostx.inputs.fudge = 1 + original_bedpostx.inputs.burn_in = 0 + original_bedpostx.inputs.n_jumps = 1 + original_bedpostx.inputs.sample_every = 1 + original_bedpostx.inputs.seed = 0 + original_bedpostx.inputs.model = 2 + + test_f1 = pe.Node(util.AssertEqual(), name="mean_f1_test") + + pipeline = pe.Workflow(name="test_bedpostx") + pipeline.base_dir = tmpdir.mkdir("nipype_test_bedpostx_").strpath + + pipeline.connect([ + (slice_mask, original_bedpostx, [("roi_file", "mask")]), + (slice_mask, nipype_bedpostx, [("roi_file", "inputnode.mask")]), + (slice_dwi, original_bedpostx, [("roi_file", "dwi")]), + (slice_dwi, nipype_bedpostx, [("roi_file", "inputnode.dwi")]), + (nipype_bedpostx, test_f1, [(("outputnode.mean_fsamples", + simplify_list), "volume1")]), + (original_bedpostx, test_f1, [("mean_fsamples", "volume2")]), + ]) + + pipeline.run(plugin='Linear') diff --git a/nipype/workflows/dmri/fsl/tests/test_epi.py b/nipype/workflows/dmri/fsl/tests/test_epi.py new file mode 100644 index 0000000000..24400d0747 --- /dev/null +++ b/nipype/workflows/dmri/fsl/tests/test_epi.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +import os + +import pytest +import nipype.workflows.fmri.fsl as fsl_wf +import nipype.interfaces.fsl as fsl +import nipype.interfaces.utility as util +from nipype.interfaces.fsl import no_fsl, no_fsl_course_data + +import nipype.pipeline.engine as pe +import warnings +from nipype.workflows.dmri.fsl.epi import create_eddy_correct_pipeline + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +@pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available") +def test_create_eddy_correct_pipeline(tmpdir): + fsl_course_dir = os.path.abspath(os.environ['FSL_COURSE_DATA']) + + dwi_file = os.path.join(fsl_course_dir, "fdt1/subj1/data.nii.gz") + + trim_dwi = pe.Node(fsl.ExtractROI(t_min=0, t_size=2), name="trim_dwi") + trim_dwi.inputs.in_file = dwi_file + + nipype_eddycorrect = create_eddy_correct_pipeline("nipype_eddycorrect") + nipype_eddycorrect.inputs.inputnode.ref_num = 0 + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + original_eddycorrect = pe.Node( + interface=fsl.EddyCorrect(), name="original_eddycorrect") + original_eddycorrect.inputs.ref_num = 0 + + test = pe.Node(util.AssertEqual(), name="eddy_corrected_dwi_test") + + pipeline = pe.Workflow(name="test_eddycorrect") + pipeline.base_dir = tmpdir.mkdir("nipype_test_eddycorrect_").strpath + + pipeline.connect([ + (trim_dwi, original_eddycorrect, [("roi_file", "in_file")]), + (trim_dwi, nipype_eddycorrect, [("roi_file", "inputnode.in_file")]), + (nipype_eddycorrect, test, [("outputnode.eddy_corrected", "volume1")]), + (original_eddycorrect, test, [("eddy_corrected", "volume2")]), + ]) + + pipeline.run(plugin='Linear') diff --git a/nipype/workflows/dmri/fsl/tests/test_tbss.py b/nipype/workflows/dmri/fsl/tests/test_tbss.py new file mode 100644 index 0000000000..34b49a1f86 --- /dev/null +++ b/nipype/workflows/dmri/fsl/tests/test_tbss.py @@ -0,0 +1,211 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import os +from nipype.interfaces.fsl.base import no_fsl, no_fsl_course_data +import nipype.pipeline.engine as pe +import nipype.interfaces.utility as util +import pytest +import tempfile +import shutil +from subprocess import call +from nipype.workflows.dmri.fsl.tbss import create_tbss_all +import nipype.interfaces.io as nio +from nipype.interfaces import fsl + + +def _tbss_test_helper(estimate_skeleton): + fsl_course_dir = os.path.abspath(os.environ['FSL_COURSE_DATA']) + fsl.FSLCommand.set_default_output_type('NIFTI_GZ') + test_dir = tempfile.mkdtemp(prefix="nipype_test_tbss_") + tbss_orig_dir = os.path.join(test_dir, "tbss_all_original") + os.mkdir(tbss_orig_dir) + old_dir = os.getcwd() + os.chdir(tbss_orig_dir) + + subjects = ['1260', '1549'] + FA_list = [ + os.path.join(fsl_course_dir, 'tbss', subject_id + '.nii.gz') + for subject_id in subjects + ] + for f in FA_list: + shutil.copy(f, os.getcwd()) + + call( + ['tbss_1_preproc'] + + [subject_id + '.nii.gz' for subject_id in subjects], + env=os.environ.update({ + 'FSLOUTPUTTYPE': 'NIFTI_GZ' + })) + tbss1_orig_dir = os.path.join(test_dir, "tbss1_original") + shutil.copytree(tbss_orig_dir, tbss1_orig_dir) + + call( + ['tbss_2_reg', '-T'], + env=os.environ.update({ + 'FSLOUTPUTTYPE': 'NIFTI_GZ' + })) + tbss2_orig_dir = os.path.join(test_dir, "tbss2_original") + shutil.copytree(tbss_orig_dir, tbss2_orig_dir) + + if estimate_skeleton: + call( + ['tbss_3_postreg', '-S'], + env=os.environ.update({ + 'FSLOUTPUTTYPE': 'NIFTI_GZ' + })) + else: + call( + ['tbss_3_postreg', '-T'], + env=os.environ.update({ + 'FSLOUTPUTTYPE': 'NIFTI_GZ' + })) + tbss3_orig_dir = os.path.join(test_dir, "tbss3_original") + shutil.copytree(tbss_orig_dir, tbss3_orig_dir) + + call( + ['tbss_4_prestats', '0.2'], + env=os.environ.update({ + 'FSLOUTPUTTYPE': 'NIFTI_GZ' + })) + tbss4_orig_dir = os.path.join(test_dir, "tbss4_original") + shutil.copytree(tbss_orig_dir, tbss4_orig_dir) + + pipeline = pe.Workflow(name="test_tbss") + pipeline.base_dir = os.path.join(test_dir, "tbss_nipype") + + tbss = create_tbss_all(estimate_skeleton=estimate_skeleton) + tbss.inputs.inputnode.fa_list = FA_list + tbss.inputs.inputnode.skeleton_thresh = 0.2 + + tbss1_original_datasource = pe.Node( + nio.DataGrabber( + outfields=['fa_list', 'mask_list'], sort_filelist=False), + name='tbss1_original_datasource') + tbss1_original_datasource.inputs.base_directory = tbss1_orig_dir + tbss1_original_datasource.inputs.template = 'FA/%s_FA%s.nii.gz' + tbss1_original_datasource.inputs.template_args = dict( + fa_list=[[subjects, '']], mask_list=[[subjects, '_mask']]) + + tbss1_test_fa = pe.MapNode( + util.AssertEqual(), + name="tbss1_fa_test", + iterfield=['volume1', 'volume2']) + tbss1_test_mask = pe.MapNode( + util.AssertEqual(), + name="tbss1_mask_test", + iterfield=['volume1', 'volume2']) + + pipeline.connect(tbss, 'tbss1.outputnode.fa_list', tbss1_test_fa, + 'volume1') + pipeline.connect(tbss, 'tbss1.outputnode.mask_list', tbss1_test_mask, + 'volume1') + pipeline.connect(tbss1_original_datasource, 'fa_list', tbss1_test_fa, + 'volume2') + pipeline.connect(tbss1_original_datasource, 'mask_list', tbss1_test_mask, + 'volume2') + tbss2_original_datasource = pe.Node( + nio.DataGrabber(outfields=['field_list'], sort_filelist=False), + name='tbss2_original_datasource') + + tbss2_original_datasource.inputs.base_directory = tbss2_orig_dir + tbss2_original_datasource.inputs.template = 'FA/%s_FA%s.nii.gz' + tbss2_original_datasource.inputs.template_args = dict( + field_list=[[subjects, '_to_target_warp']]) + tbss2_test_field = pe.MapNode( + util.AssertEqual(), + name="tbss2_test_field", + iterfield=['volume1', 'volume2']) + + pipeline.connect(tbss, 'tbss2.outputnode.field_list', tbss2_test_field, + 'volume1') + pipeline.connect(tbss2_original_datasource, 'field_list', tbss2_test_field, + 'volume2') + + tbss3_original_datasource = pe.Node( + nio.DataGrabber( + outfields=[ + 'groupmask', 'skeleton_file', 'meanfa_file', 'mergefa_file' + ], + sort_filelist=False), + name='tbss3_original_datasource') + tbss3_original_datasource.inputs.base_directory = tbss3_orig_dir + tbss3_original_datasource.inputs.template = 'stats/%s.nii.gz' + tbss3_original_datasource.inputs.template_args = dict( + groupmask=[['mean_FA_mask']], + skeleton_file=[['mean_FA_skeleton']], + meanfa_file=[['mean_FA']], + mergefa_file=[['all_FA']]) + + tbss3_test_groupmask = pe.Node( + util.AssertEqual(), name="tbss3_test_groupmask") + tbss3_test_skeleton_file = pe.Node( + util.AssertEqual(), name="tbss3_test_skeleton_file") + tbss3_test_meanfa_file = pe.Node( + util.AssertEqual(), name="tbss3_test_meanfa_file") + tbss3_test_mergefa_file = pe.Node( + util.AssertEqual(), name="tbss3_test_mergefa_file") + + pipeline.connect(tbss, 'tbss3.outputnode.groupmask', tbss3_test_groupmask, + 'volume1') + pipeline.connect(tbss3_original_datasource, 'groupmask', + tbss3_test_groupmask, 'volume2') + pipeline.connect(tbss, 'tbss3.outputnode.skeleton_file', + tbss3_test_skeleton_file, 'volume1') + pipeline.connect(tbss3_original_datasource, 'skeleton_file', + tbss3_test_skeleton_file, 'volume2') + pipeline.connect(tbss, 'tbss3.outputnode.meanfa_file', + tbss3_test_meanfa_file, 'volume1') + pipeline.connect(tbss3_original_datasource, 'meanfa_file', + tbss3_test_meanfa_file, 'volume2') + pipeline.connect(tbss, 'tbss3.outputnode.mergefa_file', + tbss3_test_mergefa_file, 'volume1') + pipeline.connect(tbss3_original_datasource, 'mergefa_file', + tbss3_test_mergefa_file, 'volume2') + + tbss4_original_datasource = pe.Node( + nio.DataGrabber( + outfields=['all_FA_skeletonised', 'mean_FA_skeleton_mask'], + sort_filelist=False), + name='tbss4_original_datasource') + tbss4_original_datasource.inputs.base_directory = tbss4_orig_dir + tbss4_original_datasource.inputs.template = 'stats/%s.nii.gz' + tbss4_original_datasource.inputs.template_args = dict( + all_FA_skeletonised=[['all_FA_skeletonised']], + mean_FA_skeleton_mask=[['mean_FA_skeleton_mask']]) + tbss4_test_all_FA_skeletonised = pe.Node( + util.AssertEqual(), name="tbss4_test_all_FA_skeletonised") + tbss4_test_mean_FA_skeleton_mask = pe.Node( + util.AssertEqual(), name="tbss4_test_mean_FA_skeleton_mask") + + pipeline.connect(tbss, 'tbss4.outputnode.projectedfa_file', + tbss4_test_all_FA_skeletonised, 'volume1') + pipeline.connect(tbss4_original_datasource, 'all_FA_skeletonised', + tbss4_test_all_FA_skeletonised, 'volume2') + pipeline.connect(tbss, 'tbss4.outputnode.skeleton_mask', + tbss4_test_mean_FA_skeleton_mask, 'volume1') + pipeline.connect(tbss4_original_datasource, 'mean_FA_skeleton_mask', + tbss4_test_mean_FA_skeleton_mask, 'volume2') + + pipeline.run(plugin='Linear') + os.chdir(old_dir) + shutil.rmtree(test_dir) + + +# this test is disabled until we figure out what is wrong with TBSS in 5.0.9 + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +@pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available") +def test_disabled_tbss_est_skeleton(): + _tbss_test_helper(True) + + +# this test is disabled until we figure out what is wrong with TBSS in 5.0.9 + + +@pytest.mark.skipif(no_fsl(), reason="fsl is not installed") +@pytest.mark.skipif(no_fsl_course_data(), reason="fsl data not available") +def test_disabled_tbss_est_skeleton_use_precomputed_skeleton(): + _tbss_test_helper(False) diff --git a/nipype/workflows/dmri/fsl/utils.py b/nipype/workflows/dmri/fsl/utils.py new file mode 100644 index 0000000000..bd53f5cb55 --- /dev/null +++ b/nipype/workflows/dmri/fsl/utils.py @@ -0,0 +1,847 @@ +# -*- coding: utf-8 -*- +# coding: utf-8 +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import zip, next, range, str + +from ....pipeline import engine as pe +from ....interfaces import utility as niu +from ....interfaces import fsl +from ....interfaces import ants + + +def cleanup_edge_pipeline(name='Cleanup'): + """ + Perform some de-spiking filtering to clean up the edge of the fieldmap + (copied from fsl_prepare_fieldmap) + """ + inputnode = pe.Node( + niu.IdentityInterface(fields=['in_file', 'in_mask']), name='inputnode') + outputnode = pe.Node( + niu.IdentityInterface(fields=['out_file']), name='outputnode') + + fugue = pe.Node( + fsl.FUGUE( + save_fmap=True, despike_2dfilter=True, despike_threshold=2.1), + name='Despike') + erode = pe.Node( + fsl.maths.MathsCommand(nan2zeros=True, args='-kernel 2D -ero'), + name='MskErode') + newmsk = pe.Node( + fsl.MultiImageMaths(op_string='-sub %s -thr 0.5 -bin'), name='NewMask') + applymsk = pe.Node(fsl.ApplyMask(nan2zeros=True), name='ApplyMask') + join = pe.Node(niu.Merge(2), name='Merge') + addedge = pe.Node( + fsl.MultiImageMaths(op_string='-mas %s -add %s'), name='AddEdge') + + wf = pe.Workflow(name=name) + wf.connect([(inputnode, fugue, [ + ('in_file', 'fmap_in_file'), ('in_mask', 'mask_file') + ]), (inputnode, erode, [('in_mask', 'in_file')]), (inputnode, newmsk, [ + ('in_mask', 'in_file') + ]), (erode, newmsk, [('out_file', 'operand_files')]), (fugue, applymsk, [ + ('fmap_out_file', 'in_file') + ]), (newmsk, applymsk, + [('out_file', 'mask_file')]), (erode, join, [('out_file', 'in1')]), + (applymsk, join, [('out_file', 'in2')]), (inputnode, addedge, [ + ('in_file', 'in_file') + ]), (join, addedge, [('out', 'operand_files')]), + (addedge, outputnode, [('out_file', 'out_file')])]) + return wf + + +def vsm2warp(name='Shiftmap2Warping'): + """ + Converts a voxel shift map (vsm) to a displacements field (warp). + """ + inputnode = pe.Node( + niu.IdentityInterface( + fields=['in_vsm', 'in_ref', 'scaling', 'enc_dir']), + name='inputnode') + outputnode = pe.Node( + niu.IdentityInterface(fields=['out_warp']), name='outputnode') + fixhdr = pe.Node( + niu.Function( + input_names=['in_file', 'in_file_hdr'], + output_names=['out_file'], + function=copy_hdr), + name='Fix_hdr') + vsm = pe.Node(fsl.maths.BinaryMaths(operation='mul'), name='ScaleField') + vsm2dfm = pe.Node( + fsl.ConvertWarp(relwarp=True, out_relwarp=True), name='vsm2dfm') + + wf = pe.Workflow(name=name) + wf.connect([(inputnode, fixhdr, [('in_vsm', 'in_file'), ('in_ref', + 'in_file_hdr')]), + (inputnode, vsm, + [('scaling', 'operand_value')]), (fixhdr, vsm, [('out_file', + 'in_file')]), + (vsm, vsm2dfm, + [('out_file', 'shift_in_file')]), (inputnode, vsm2dfm, [ + ('in_ref', 'reference'), ('enc_dir', 'shift_direction') + ]), (vsm2dfm, outputnode, [('out_file', 'out_warp')])]) + return wf + + +def dwi_flirt(name='DWICoregistration', excl_nodiff=False, flirt_param={}): + """ + Generates a workflow for linear registration of dwi volumes + """ + inputnode = pe.Node( + niu.IdentityInterface( + fields=['reference', 'in_file', 'ref_mask', 'in_xfms', 'in_bval']), + name='inputnode') + + initmat = pe.Node( + niu.Function( + input_names=['in_bval', 'in_xfms', 'excl_nodiff'], + output_names=['init_xfms'], + function=_checkinitxfm), + name='InitXforms') + initmat.inputs.excl_nodiff = excl_nodiff + dilate = pe.Node( + fsl.maths.MathsCommand(nan2zeros=True, args='-kernel sphere 5 -dilM'), + name='MskDilate') + split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') + n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3), name='Bias') + enhb0 = pe.Node( + niu.Function( + input_names=['in_file', 'in_mask', 'clip_limit'], + output_names=['out_file'], + function=enhance), + name='B0Equalize') + enhb0.inputs.clip_limit = 0.015 + enhdw = pe.MapNode( + niu.Function( + input_names=['in_file', 'in_mask'], + output_names=['out_file'], + function=enhance), + name='DWEqualize', + iterfield=['in_file']) + flirt = pe.MapNode( + fsl.FLIRT(**flirt_param), + name='CoRegistration', + iterfield=['in_file', 'in_matrix_file']) + apply_xfms = pe.MapNode( + fsl.ApplyXFM( + apply_xfm=True, + interp='spline', + bgvalue=0), + name='ApplyXFMs', + iterfield=['in_file', 'in_matrix_file'] + ) + thres = pe.MapNode( + fsl.Threshold(thresh=0.0), + iterfield=['in_file'], + name='RemoveNegative') + merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs') + outputnode = pe.Node( + niu.IdentityInterface(fields=['out_file', 'out_xfms']), + name='outputnode') + wf = pe.Workflow(name=name) + wf.connect([ + (inputnode, split, [('in_file', 'in_file')]), + (inputnode, dilate, [('ref_mask', 'in_file')]), + (inputnode, enhb0, [('ref_mask', 'in_mask')]), + (inputnode, initmat, [('in_xfms', 'in_xfms'), + ('in_bval', 'in_bval')]), + (inputnode, n4, [('reference', 'input_image'), + ('ref_mask', 'mask_image')]), + (dilate, flirt, [('out_file', 'ref_weight'), + ('out_file', 'in_weight')]), + (n4, enhb0, [('output_image', 'in_file')]), + (split, enhdw, [('out_files', 'in_file')]), + (split, apply_xfms, [('out_files', 'in_file')]), + (dilate, enhdw, [('out_file', 'in_mask')]), + (enhb0, flirt, [('out_file', 'reference')]), + (enhb0, apply_xfms, [('out_file', 'reference')]), + (enhdw, flirt, [('out_file', 'in_file')]), + (initmat, flirt, [('init_xfms', 'in_matrix_file')]), + (flirt, apply_xfms, [('out_matrix_file', 'in_matrix_file')]), + (apply_xfms, thres, [('out_file', 'in_file')]), + (thres, merge, [('out_file', 'in_files')]), + (merge, outputnode, [('merged_file', 'out_file')]), + (flirt, outputnode, [('out_matrix_file', 'out_xfms')]) + ]) + return wf + + +def apply_all_corrections(name='UnwarpArtifacts'): + """ + Combines two lists of linear transforms with the deformation field + map obtained typically after the SDC process. + Additionally, computes the corresponding bspline coefficients and + the map of determinants of the jacobian. + """ + + inputnode = pe.Node( + niu.IdentityInterface(fields=['in_sdc', 'in_hmc', 'in_ecc', 'in_dwi']), + name='inputnode') + outputnode = pe.Node( + niu.IdentityInterface( + fields=['out_file', 'out_warp', 'out_coeff', 'out_jacobian']), + name='outputnode') + warps = pe.MapNode( + fsl.ConvertWarp(relwarp=True), + iterfield=['premat', 'postmat'], + name='ConvertWarp') + + selref = pe.Node(niu.Select(index=[0]), name='Reference') + + split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') + unwarp = pe.MapNode( + fsl.ApplyWarp(), + iterfield=['in_file', 'field_file'], + name='UnwarpDWIs') + + coeffs = pe.MapNode( + fsl.WarpUtils(out_format='spline'), + iterfield=['in_file'], + name='CoeffComp') + jacobian = pe.MapNode( + fsl.WarpUtils(write_jacobian=True), + iterfield=['in_file'], + name='JacobianComp') + jacmult = pe.MapNode( + fsl.MultiImageMaths(op_string='-mul %s'), + iterfield=['in_file', 'operand_files'], + name='ModulateDWIs') + + thres = pe.MapNode( + fsl.Threshold(thresh=0.0), + iterfield=['in_file'], + name='RemoveNegative') + merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs') + + wf = pe.Workflow(name=name) + wf.connect([(inputnode, warps, [ + ('in_sdc', 'warp1'), ('in_hmc', 'premat'), ('in_ecc', 'postmat'), + ('in_dwi', 'reference') + ]), (inputnode, split, [('in_dwi', 'in_file')]), (split, selref, [ + ('out_files', 'inlist') + ]), (warps, unwarp, [('out_file', 'field_file')]), (split, unwarp, [ + ('out_files', 'in_file') + ]), (selref, unwarp, [('out', 'ref_file')]), (selref, coeffs, [ + ('out', 'reference') + ]), (warps, coeffs, [('out_file', 'in_file')]), (selref, jacobian, [ + ('out', 'reference') + ]), (coeffs, jacobian, [('out_file', 'in_file')]), (unwarp, jacmult, [ + ('out_file', 'in_file') + ]), (jacobian, jacmult, [('out_jacobian', 'operand_files')]), + (jacmult, thres, [('out_file', 'in_file')]), (thres, merge, [ + ('out_file', 'in_files') + ]), (warps, outputnode, [('out_file', 'out_warp')]), + (coeffs, outputnode, + [('out_file', 'out_coeff')]), (jacobian, outputnode, [ + ('out_jacobian', 'out_jacobian') + ]), (merge, outputnode, [('merged_file', 'out_file')])]) + return wf + + +def extract_bval(in_dwi, in_bval, b=0, out_file=None): + """ + Writes an image containing only the volumes with b-value specified at + input + """ + import numpy as np + import nibabel as nb + import os.path as op + from nipype.utils import NUMPY_MMAP + + if out_file is None: + fname, ext = op.splitext(op.basename(in_dwi)) + if ext == ".gz": + fname, ext2 = op.splitext(fname) + ext = ext2 + ext + out_file = op.abspath("%s_tsoi%s" % (fname, ext)) + + im = nb.load(in_dwi, mmap=NUMPY_MMAP) + dwidata = im.get_data() + bvals = np.loadtxt(in_bval) + + if b == 'diff': + selection = np.where(bvals != 0) + elif b == 'nodiff': + selection = np.where(bvals == 0) + else: + selection = np.where(bvals == b) + + extdata = np.squeeze(dwidata.take(selection, axis=3)) + hdr = im.header.copy() + hdr.set_data_shape(extdata.shape) + nb.Nifti1Image(extdata, im.affine, hdr).to_filename(out_file) + return out_file + + +def hmc_split(in_file, in_bval, ref_num=0, lowbval=5.0): + """ + Selects the reference and moving volumes from a dwi dataset + for the purpose of HMC. + """ + import numpy as np + import nibabel as nb + import os.path as op + from nipype.interfaces.base import isdefined + from nipype.utils import NUMPY_MMAP + + im = nb.load(in_file, mmap=NUMPY_MMAP) + data = im.get_data() + hdr = im.header.copy() + bval = np.loadtxt(in_bval) + + lowbs = np.where(bval <= lowbval)[0] + + volid = lowbs[0] + if (isdefined(ref_num) and (ref_num < len(lowbs))): + volid = ref_num + + if volid == 0: + data = data[..., 1:] + bval = bval[1:] + elif volid == (data.shape[-1] - 1): + data = data[..., :-1] + bval = bval[:-1] + else: + data = np.concatenate( + (data[..., :volid], data[..., (volid + 1):]), axis=3) + bval = np.hstack((bval[:volid], bval[(volid + 1):])) + + out_ref = op.abspath('hmc_ref.nii.gz') + out_mov = op.abspath('hmc_mov.nii.gz') + out_bval = op.abspath('bval_split.txt') + + refdata = data[..., volid] + hdr.set_data_shape(refdata.shape) + nb.Nifti1Image(refdata, im.affine, hdr).to_filename(out_ref) + + hdr.set_data_shape(data.shape) + nb.Nifti1Image(data, im.affine, hdr).to_filename(out_mov) + np.savetxt(out_bval, bval) + return [out_ref, out_mov, out_bval, volid] + + +def remove_comp(in_file, in_bval, volid=0, out_file=None): + """ + Removes the volume ``volid`` from the 4D nifti file + """ + import numpy as np + import nibabel as nb + import os.path as op + from nipype.utils import NUMPY_MMAP + + if out_file is None: + fname, ext = op.splitext(op.basename(in_file)) + if ext == ".gz": + fname, ext2 = op.splitext(fname) + ext = ext2 + ext + out_file = op.abspath("%s_extract%s" % (fname, ext)) + + im = nb.load(in_file, mmap=NUMPY_MMAP) + data = im.get_data() + hdr = im.header.copy() + bval = np.loadtxt(in_bval) + + if volid == 0: + data = data[..., 1:] + bval = bval[1:] + elif volid == (data.shape[-1] - 1): + data = data[..., :-1] + bval = bval[:-1] + else: + data = np.concatenate( + (data[..., :volid], data[..., (volid + 1):]), axis=3) + bval = np.hstack((bval[:volid], bval[(volid + 1):])) + hdr.set_data_shape(data.shape) + nb.Nifti1Image(data, im.affine, hdr).to_filename(out_file) + + out_bval = op.abspath('bval_extract.txt') + np.savetxt(out_bval, bval) + return out_file, out_bval + + +def insert_mat(inlist, volid=0): + import numpy as np + import os.path as op + idfname = op.abspath('identity.mat') + out = inlist + np.savetxt(idfname, np.eye(4)) + out.insert(volid, idfname) + return out + + +def recompose_dwi(in_dwi, in_bval, in_corrected, out_file=None): + """ + Recompose back the dMRI data accordingly the b-values table after EC + correction + """ + import numpy as np + import nibabel as nb + import os.path as op + from nipype.utils import NUMPY_MMAP + + if out_file is None: + fname, ext = op.splitext(op.basename(in_dwi)) + if ext == ".gz": + fname, ext2 = op.splitext(fname) + ext = ext2 + ext + out_file = op.abspath("%s_eccorrect%s" % (fname, ext)) + + im = nb.load(in_dwi, mmap=NUMPY_MMAP) + dwidata = im.get_data() + bvals = np.loadtxt(in_bval) + dwis = np.where(bvals != 0)[0].tolist() + + if len(dwis) != len(in_corrected): + raise RuntimeError(('Length of DWIs in b-values table and after' + 'correction should match')) + + for bindex, dwi in zip(dwis, in_corrected): + dwidata[..., bindex] = nb.load(dwi, mmap=NUMPY_MMAP).get_data() + + nb.Nifti1Image(dwidata, im.affine, im.header).to_filename(out_file) + return out_file + + +def recompose_xfm(in_bval, in_xfms): + """ + Insert identity transformation matrices in b0 volumes to build up a list + """ + import numpy as np + import os.path as op + + bvals = np.loadtxt(in_bval) + xfms = iter([np.loadtxt(xfm) for xfm in in_xfms]) + out_files = [] + + for i, b in enumerate(bvals): + if b == 0.0: + mat = np.eye(4) + else: + mat = next(xfms) + + out_name = op.abspath('eccor_%04d.mat' % i) + out_files.append(out_name) + np.savetxt(out_name, mat) + + return out_files + + +def time_avg(in_file, index=[0], out_file=None): + """ + Average the input time-series, selecting the indices given in index + + .. warning:: time steps should be already registered (corrected for + head motion artifacts). + + """ + import numpy as np + import nibabel as nb + import os.path as op + from nipype.utils import NUMPY_MMAP + + if out_file is None: + fname, ext = op.splitext(op.basename(in_file)) + if ext == ".gz": + fname, ext2 = op.splitext(fname) + ext = ext2 + ext + out_file = op.abspath("%s_baseline%s" % (fname, ext)) + + index = np.atleast_1d(index).tolist() + + imgs = np.array(nb.four_to_three(nb.load(in_file, mmap=NUMPY_MMAP)))[index] + if len(index) == 1: + data = imgs[0].get_data().astype(np.float32) + else: + data = np.average( + np.array([im.get_data().astype(np.float32) for im in imgs]), + axis=0) + + hdr = imgs[0].header.copy() + hdr.set_data_shape(data.shape) + hdr.set_xyzt_units('mm') + hdr.set_data_dtype(np.float32) + nb.Nifti1Image(data, imgs[0].affine, hdr).to_filename(out_file) + return out_file + + +def b0_indices(in_bval, max_b=10.0): + """ + Extract the indices of slices in a b-values file with a low b value + """ + import numpy as np + bval = np.loadtxt(in_bval) + return np.argwhere(bval <= max_b).flatten().tolist() + + +def b0_average(in_dwi, in_bval, max_b=10.0, out_file=None): + """ + A function that averages the *b0* volumes from a DWI dataset. + As current dMRI data are being acquired with all b-values > 0.0, + the *lowb* volumes are selected by specifying the parameter max_b. + + .. warning:: *b0* should be already registered (head motion artifact should + be corrected). + + """ + import numpy as np + import nibabel as nb + import os.path as op + from nipype.utils import NUMPY_MMAP + + if out_file is None: + fname, ext = op.splitext(op.basename(in_dwi)) + if ext == ".gz": + fname, ext2 = op.splitext(fname) + ext = ext2 + ext + out_file = op.abspath("%s_avg_b0%s" % (fname, ext)) + + imgs = np.array(nb.four_to_three(nb.load(in_dwi, mmap=NUMPY_MMAP))) + bval = np.loadtxt(in_bval) + index = np.argwhere(bval <= max_b).flatten().tolist() + + b0s = [im.get_data().astype(np.float32) for im in imgs[index]] + b0 = np.average(np.array(b0s), axis=0) + + hdr = imgs[0].header.copy() + hdr.set_data_shape(b0.shape) + hdr.set_xyzt_units('mm') + hdr.set_data_dtype(np.float32) + nb.Nifti1Image(b0, imgs[0].affine, hdr).to_filename(out_file) + return out_file + + +def rotate_bvecs(in_bvec, in_matrix): + """ + Rotates the input bvec file accordingly with a list of matrices. + + .. note:: the input affine matrix transforms points in the destination + image to their corresponding coordinates in the original image. + Therefore, this matrix should be inverted first, as we want to know + the target position of :math:`\\vec{r}`. + + """ + import os + import numpy as np + + name, fext = os.path.splitext(os.path.basename(in_bvec)) + if fext == '.gz': + name, _ = os.path.splitext(name) + out_file = os.path.abspath('%s_rotated.bvec' % name) + bvecs = np.loadtxt(in_bvec).T + new_bvecs = [] + + if len(bvecs) != len(in_matrix): + raise RuntimeError(('Number of b-vectors (%d) and rotation ' + 'matrices (%d) should match.') % (len(bvecs), + len(in_matrix))) + + for bvec, mat in zip(bvecs, in_matrix): + if np.all(bvec == 0.0): + new_bvecs.append(bvec) + else: + invrot = np.linalg.inv(np.loadtxt(mat))[:3, :3] + newbvec = invrot.dot(bvec) + new_bvecs.append((newbvec / np.linalg.norm(newbvec))) + + np.savetxt(out_file, np.array(new_bvecs).T, fmt=b'%0.15f') + return out_file + + +def eddy_rotate_bvecs(in_bvec, eddy_params): + """ + Rotates the input bvec file accordingly with a list of parameters sourced + from ``eddy``, as explained `here + `_. + """ + import os + import numpy as np + from math import sin, cos + + name, fext = os.path.splitext(os.path.basename(in_bvec)) + if fext == '.gz': + name, _ = os.path.splitext(name) + out_file = os.path.abspath('%s_rotated.bvec' % name) + bvecs = np.loadtxt(in_bvec).T + new_bvecs = [] + + params = np.loadtxt(eddy_params) + + if len(bvecs) != len(params): + raise RuntimeError(('Number of b-vectors and rotation ' + 'matrices should match.')) + + for bvec, row in zip(bvecs, params): + if np.all(bvec == 0.0): + new_bvecs.append(bvec) + else: + ax = row[3] + ay = row[4] + az = row[5] + + Rx = np.array([[1.0, 0.0, 0.0], [0.0, cos(ax), -sin(ax)], + [0.0, sin(ax), cos(ax)]]) + Ry = np.array([[cos(ay), 0.0, sin(ay)], [0.0, 1.0, 0.0], + [-sin(ay), 0.0, cos(ay)]]) + Rz = np.array([[cos(az), -sin(az), 0.0], [sin(az), + cos(az), 0.0], + [0.0, 0.0, 1.0]]) + R = Rx.dot(Ry).dot(Rz) + + invrot = np.linalg.inv(R) + newbvec = invrot.dot(bvec) + new_bvecs.append(newbvec / np.linalg.norm(newbvec)) + + np.savetxt(out_file, np.array(new_bvecs).T, fmt=b'%0.15f') + return out_file + + +def compute_readout(params): + """ + Computes readout time from epi params (see `eddy documentation + `_). + + .. warning:: ``params['echospacing']`` should be in *sec* units. + + + """ + epi_factor = 1.0 + acc_factor = 1.0 + try: + if params['epi_factor'] > 1: + epi_factor = float(params['epi_factor'] - 1) + except: + pass + try: + if params['acc_factor'] > 1: + acc_factor = 1.0 / params['acc_factor'] + except: + pass + return acc_factor * epi_factor * params['echospacing'] + + +def siemens2rads(in_file, out_file=None): + """ + Converts input phase difference map to rads + """ + import numpy as np + import nibabel as nb + import os.path as op + import math + + if out_file is None: + fname, fext = op.splitext(op.basename(in_file)) + if fext == '.gz': + fname, _ = op.splitext(fname) + out_file = op.abspath('./%s_rads.nii.gz' % fname) + + in_file = np.atleast_1d(in_file).tolist() + im = nb.load(in_file[0]) + data = im.get_data().astype(np.float32) + hdr = im.header.copy() + + if len(in_file) == 2: + data = nb.load(in_file[1]).get_data().astype(np.float32) - data + elif (data.ndim == 4) and (data.shape[-1] == 2): + data = np.squeeze(data[..., 1] - data[..., 0]) + hdr.set_data_shape(data.shape[:3]) + + imin = data.min() + imax = data.max() + data = (2.0 * math.pi * (data - imin) / (imax - imin)) - math.pi + hdr.set_data_dtype(np.float32) + hdr.set_xyzt_units('mm') + hdr['datatype'] = 16 + nb.Nifti1Image(data, im.affine, hdr).to_filename(out_file) + return out_file + + +def rads2radsec(in_file, delta_te, out_file=None): + """ + Converts input phase difference map to rads + """ + import numpy as np + import nibabel as nb + import os.path as op + import math + from nipype.utils import NUMPY_MMAP + + if out_file is None: + fname, fext = op.splitext(op.basename(in_file)) + if fext == '.gz': + fname, _ = op.splitext(fname) + out_file = op.abspath('./%s_radsec.nii.gz' % fname) + + im = nb.load(in_file, mmap=NUMPY_MMAP) + data = im.get_data().astype(np.float32) * (1.0 / delta_te) + nb.Nifti1Image(data, im.affine, im.header).to_filename(out_file) + return out_file + + +def demean_image(in_file, in_mask=None, out_file=None): + """ + Demean image data inside mask + """ + import numpy as np + import nibabel as nb + import os.path as op + import math + from nipype.utils import NUMPY_MMAP + + if out_file is None: + fname, fext = op.splitext(op.basename(in_file)) + if fext == '.gz': + fname, _ = op.splitext(fname) + out_file = op.abspath('./%s_demean.nii.gz' % fname) + + im = nb.load(in_file, mmap=NUMPY_MMAP) + data = im.get_data().astype(np.float32) + msk = np.ones_like(data) + + if in_mask is not None: + msk = nb.load(in_mask, mmap=NUMPY_MMAP).get_data().astype(np.float32) + msk[msk > 0] = 1.0 + msk[msk < 1] = 0.0 + + mean = np.median(data[msk == 1].reshape(-1)) + data[msk == 1] = data[msk == 1] - mean + nb.Nifti1Image(data, im.affine, im.header).to_filename(out_file) + return out_file + + +def add_empty_vol(in_file, out_file=None): + """ + Adds an empty vol to the phase difference image + """ + import nibabel as nb + import os.path as op + import numpy as np + import math + from nipype.utils import NUMPY_MMAP + + if out_file is None: + fname, fext = op.splitext(op.basename(in_file)) + if fext == '.gz': + fname, _ = op.splitext(fname) + out_file = op.abspath('./%s_4D.nii.gz' % fname) + + im = nb.load(in_file, mmap=NUMPY_MMAP) + zim = nb.Nifti1Image(np.zeros_like(im.get_data()), im.affine, im.header) + nb.funcs.concat_images([im, zim]).to_filename(out_file) + return out_file + + +def reorient_bvecs(in_dwi, old_dwi, in_bvec): + """ + Checks reorientations of ``in_dwi`` w.r.t. ``old_dwi`` and + reorients the in_bvec table accordingly. + """ + import os + import numpy as np + import nibabel as nb + from nipype.utils import NUMPY_MMAP + + name, fext = os.path.splitext(os.path.basename(in_bvec)) + if fext == '.gz': + name, _ = os.path.splitext(name) + out_file = os.path.abspath('%s_reorient.bvec' % name) + bvecs = np.loadtxt(in_bvec).T + new_bvecs = [] + + N = nb.load(in_dwi, mmap=NUMPY_MMAP).affine + O = nb.load(old_dwi, mmap=NUMPY_MMAP).affine + RS = N.dot(np.linalg.inv(O))[:3, :3] + sc_idx = np.where((np.abs(RS) != 1) & (RS != 0)) + S = np.ones_like(RS) + S[sc_idx] = RS[sc_idx] + R = RS / S + + new_bvecs = [R.dot(b) for b in bvecs] + np.savetxt(out_file, np.array(new_bvecs).T, fmt=b'%0.15f') + return out_file + + +def copy_hdr(in_file, in_file_hdr, out_file=None): + import numpy as np + import nibabel as nb + import os.path as op + from nipype.utils import NUMPY_MMAP + + if out_file is None: + fname, fext = op.splitext(op.basename(in_file)) + if fext == '.gz': + fname, _ = op.splitext(fname) + out_file = op.abspath('./%s_fixhdr.nii.gz' % fname) + + imref = nb.load(in_file_hdr, mmap=NUMPY_MMAP) + hdr = imref.header.copy() + hdr.set_data_dtype(np.float32) + vsm = nb.load(in_file, mmap=NUMPY_MMAP).get_data().astype(np.float32) + hdr.set_data_shape(vsm.shape) + hdr.set_xyzt_units('mm') + nii = nb.Nifti1Image(vsm, imref.affine, hdr) + nii.to_filename(out_file) + return out_file + + +def enhance(in_file, clip_limit=0.010, in_mask=None, out_file=None): + import numpy as np + import nibabel as nb + import os.path as op + from skimage import exposure, img_as_int + from nipype.utils import NUMPY_MMAP + + if out_file is None: + fname, fext = op.splitext(op.basename(in_file)) + if fext == '.gz': + fname, _ = op.splitext(fname) + out_file = op.abspath('./%s_enh.nii.gz' % fname) + + im = nb.load(in_file, mmap=NUMPY_MMAP) + imdata = im.get_data() + imshape = im.shape + + if in_mask is not None: + msk = nb.load(in_mask, mmap=NUMPY_MMAP).get_data() + msk[msk > 0] = 1 + msk[msk < 1] = 0 + imdata = imdata * msk + + immin = imdata.min() + imdata = (imdata - immin).astype(np.uint16) + + adapted = exposure.equalize_adapthist( + imdata.reshape(imshape[0], -1), clip_limit=clip_limit) + + nb.Nifti1Image(adapted.reshape(imshape), im.affine, + im.header).to_filename(out_file) + + return out_file + + +def _checkinitxfm(in_bval, excl_nodiff, in_xfms=None): + from nipype.interfaces.base import isdefined + import numpy as np + import os.path as op + bvals = np.loadtxt(in_bval) + + gen_id = ((in_xfms is None) or (not isdefined(in_xfms)) + or (len(in_xfms) != len(bvals))) + + init_xfms = [] + if excl_nodiff: + dws = np.where(bvals != 0)[0].tolist() + else: + dws = list(range(len(bvals))) + + if gen_id: + for i in dws: + xfm_file = op.abspath('init_%04d.mat' % i) + np.savetxt(xfm_file, np.eye(4)) + init_xfms.append(xfm_file) + else: + init_xfms = [in_xfms[i] for i in dws] + + return init_xfms diff --git a/nipype/workflows/dmri/mrtrix/__init__.py b/nipype/workflows/dmri/mrtrix/__init__.py new file mode 100644 index 0000000000..6851021111 --- /dev/null +++ b/nipype/workflows/dmri/mrtrix/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from .diffusion import create_mrtrix_dti_pipeline +from .connectivity_mapping import create_connectivity_pipeline +from .group_connectivity import (create_group_connectivity_pipeline) diff --git a/nipype/workflows/dmri/mrtrix/connectivity_mapping.py b/nipype/workflows/dmri/mrtrix/connectivity_mapping.py new file mode 100644 index 0000000000..e47dcb9531 --- /dev/null +++ b/nipype/workflows/dmri/mrtrix/connectivity_mapping.py @@ -0,0 +1,639 @@ +# -*- coding: utf-8 -*- +import inspect +import os.path as op # system functions + +from ....interfaces import io as nio # Data i/o +from ....interfaces import utility as util # utility +from ....pipeline import engine as pe # pypeline engine +from ....interfaces import fsl as fsl +from ....interfaces import freesurfer as fs # freesurfer +from ....interfaces import mrtrix as mrtrix +from ....interfaces import cmtk as cmtk +from ....interfaces import dipy as dipy +from ....algorithms import misc as misc +from ..fsl.epi import create_eddy_correct_pipeline +from ..connectivity.nx import create_networkx_pipeline, create_cmats_to_csv_pipeline +from ....interfaces.utility import Function +from ...misc.utils import select_aparc_annot + + +def create_connectivity_pipeline(name="connectivity", + parcellation_name='scale500'): + """Creates a pipeline that does the same connectivity processing as in the + :ref:`example_dmri_connectivity_advanced` example script. Given a subject id (and completed Freesurfer reconstruction) + diffusion-weighted image, b-values, and b-vectors, the workflow will return the subject's connectome + as a Connectome File Format (CFF) file for use in Connectome Viewer (http://www.cmtk.org). + + Example + ------- + + >>> from nipype.workflows.dmri.mrtrix.connectivity_mapping import create_connectivity_pipeline + >>> conmapper = create_connectivity_pipeline("nipype_conmap") + >>> conmapper.inputs.inputnode.subjects_dir = '.' + >>> conmapper.inputs.inputnode.subject_id = 'subj1' + >>> conmapper.inputs.inputnode.dwi = 'data.nii.gz' + >>> conmapper.inputs.inputnode.bvecs = 'bvecs' + >>> conmapper.inputs.inputnode.bvals = 'bvals' + >>> conmapper.run() # doctest: +SKIP + + Inputs:: + + inputnode.subject_id + inputnode.subjects_dir + inputnode.dwi + inputnode.bvecs + inputnode.bvals + inputnode.resolution_network_file + + Outputs:: + + outputnode.connectome + outputnode.cmatrix + outputnode.networks + outputnode.fa + outputnode.struct + outputnode.tracts + outputnode.rois + outputnode.odfs + outputnode.filtered_tractography + outputnode.tdi + outputnode.nxstatscff + outputnode.nxcsv + outputnode.cmatrices_csv + outputnode.mean_fiber_length + outputnode.median_fiber_length + outputnode.fiber_length_std + """ + + inputnode_within = pe.Node( + util.IdentityInterface(fields=[ + "subject_id", "dwi", "bvecs", "bvals", "subjects_dir", + "resolution_network_file" + ]), + name="inputnode_within") + + FreeSurferSource = pe.Node( + interface=nio.FreeSurferSource(), name='fssource') + FreeSurferSourceLH = pe.Node( + interface=nio.FreeSurferSource(), name='fssourceLH') + FreeSurferSourceLH.inputs.hemi = 'lh' + + FreeSurferSourceRH = pe.Node( + interface=nio.FreeSurferSource(), name='fssourceRH') + FreeSurferSourceRH.inputs.hemi = 'rh' + """ + Creating the workflow's nodes + ============================= + """ + """ + Conversion nodes + ---------------- + """ + """ + A number of conversion operations are required to obtain NIFTI files from the FreesurferSource for each subject. + Nodes are used to convert the following: + * Original structural image to NIFTI + * Pial, white, inflated, and spherical surfaces for both the left and right hemispheres are converted to GIFTI for visualization in ConnectomeViewer + * Parcellated annotation files for the left and right hemispheres are also converted to GIFTI + + """ + + mri_convert_Brain = pe.Node( + interface=fs.MRIConvert(), name='mri_convert_Brain') + mri_convert_Brain.inputs.out_type = 'nii' + mri_convert_ROI_scale500 = mri_convert_Brain.clone( + 'mri_convert_ROI_scale500') + + mris_convertLH = pe.Node(interface=fs.MRIsConvert(), name='mris_convertLH') + mris_convertLH.inputs.out_datatype = 'gii' + mris_convertRH = mris_convertLH.clone('mris_convertRH') + mris_convertRHwhite = mris_convertLH.clone('mris_convertRHwhite') + mris_convertLHwhite = mris_convertLH.clone('mris_convertLHwhite') + mris_convertRHinflated = mris_convertLH.clone('mris_convertRHinflated') + mris_convertLHinflated = mris_convertLH.clone('mris_convertLHinflated') + mris_convertRHsphere = mris_convertLH.clone('mris_convertRHsphere') + mris_convertLHsphere = mris_convertLH.clone('mris_convertLHsphere') + mris_convertLHlabels = mris_convertLH.clone('mris_convertLHlabels') + mris_convertRHlabels = mris_convertLH.clone('mris_convertRHlabels') + """ + Diffusion processing nodes + -------------------------- + + .. seealso:: + + dmri_mrtrix_dti.py + Tutorial that focuses solely on the MRtrix diffusion processing + + http://www.brain.org.au/software/mrtrix/index.html + MRtrix's online documentation + """ + """ + b-values and b-vectors stored in FSL's format are converted into a single encoding file for MRTrix. + """ + + fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(), name='fsl2mrtrix') + """ + Distortions induced by eddy currents are corrected prior to fitting the tensors. + The first image is used as a reference for which to warp the others. + """ + + eddycorrect = create_eddy_correct_pipeline(name='eddycorrect') + eddycorrect.inputs.inputnode.ref_num = 1 + """ + Tensors are fitted to each voxel in the diffusion-weighted image and from these three maps are created: + * Major eigenvector in each voxel + * Apparent diffusion coefficient + * Fractional anisotropy + """ + + dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(), name='dwi2tensor') + tensor2vector = pe.Node( + interface=mrtrix.Tensor2Vector(), name='tensor2vector') + tensor2adc = pe.Node( + interface=mrtrix.Tensor2ApparentDiffusion(), name='tensor2adc') + tensor2fa = pe.Node( + interface=mrtrix.Tensor2FractionalAnisotropy(), name='tensor2fa') + MRconvert_fa = pe.Node(interface=mrtrix.MRConvert(), name='MRconvert_fa') + MRconvert_fa.inputs.extension = 'nii' + """ + + These nodes are used to create a rough brain mask from the b0 image. + The b0 image is extracted from the original diffusion-weighted image, + put through a simple thresholding routine, and smoothed using a 3x3 median filter. + """ + + MRconvert = pe.Node(interface=mrtrix.MRConvert(), name='MRconvert') + MRconvert.inputs.extract_at_axis = 3 + MRconvert.inputs.extract_at_coordinate = [0] + threshold_b0 = pe.Node(interface=mrtrix.Threshold(), name='threshold_b0') + median3d = pe.Node(interface=mrtrix.MedianFilter3D(), name='median3d') + """ + The brain mask is also used to help identify single-fiber voxels. + This is done by passing the brain mask through two erosion steps, + multiplying the remaining mask with the fractional anisotropy map, and + thresholding the result to obtain some highly anisotropic within-brain voxels. + """ + + erode_mask_firstpass = pe.Node( + interface=mrtrix.Erode(), name='erode_mask_firstpass') + erode_mask_secondpass = pe.Node( + interface=mrtrix.Erode(), name='erode_mask_secondpass') + MRmultiply = pe.Node(interface=mrtrix.MRMultiply(), name='MRmultiply') + MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge') + threshold_FA = pe.Node(interface=mrtrix.Threshold(), name='threshold_FA') + threshold_FA.inputs.absolute_threshold_value = 0.7 + """ + For whole-brain tracking we also require a broad white-matter seed mask. + This is created by generating a white matter mask, given a brainmask, and + thresholding it at a reasonably high level. + """ + + bet = pe.Node(interface=fsl.BET(mask=True), name='bet_b0') + gen_WM_mask = pe.Node( + interface=mrtrix.GenerateWhiteMatterMask(), name='gen_WM_mask') + threshold_wmmask = pe.Node( + interface=mrtrix.Threshold(), name='threshold_wmmask') + threshold_wmmask.inputs.absolute_threshold_value = 0.4 + """ + The spherical deconvolution step depends on the estimate of the response function + in the highly anisotropic voxels we obtained above. + + .. warning:: + + For damaged or pathological brains one should take care to lower the maximum harmonic order of these steps. + + """ + + estimateresponse = pe.Node( + interface=mrtrix.EstimateResponseForSH(), name='estimateresponse') + estimateresponse.inputs.maximum_harmonic_order = 6 + csdeconv = pe.Node( + interface=mrtrix.ConstrainedSphericalDeconvolution(), name='csdeconv') + csdeconv.inputs.maximum_harmonic_order = 6 + """ + Finally, we track probabilistically using the orientation distribution functions obtained earlier. + The tracts are then used to generate a tract-density image, and they are also converted to TrackVis format. + """ + + probCSDstreamtrack = pe.Node( + interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(), + name='probCSDstreamtrack') + probCSDstreamtrack.inputs.inputmodel = 'SD_PROB' + probCSDstreamtrack.inputs.desired_number_of_tracks = 150000 + tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(), name='tracks2prob') + tracks2prob.inputs.colour = True + MRconvert_tracks2prob = MRconvert_fa.clone(name='MRconvert_tracks2prob') + tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(), name='tck2trk') + trk2tdi = pe.Node(interface=dipy.TrackDensityMap(), name='trk2tdi') + """ + Structural segmentation nodes + ----------------------------- + """ + """ + The following node identifies the transformation between the diffusion-weighted + image and the structural image. This transformation is then applied to the tracts + so that they are in the same space as the regions of interest. + """ + + coregister = pe.Node(interface=fsl.FLIRT(dof=6), name='coregister') + coregister.inputs.cost = ('normmi') + """ + Parcellation is performed given the aparc+aseg image from Freesurfer. + The CMTK Parcellation step subdivides these regions to return a higher-resolution parcellation scheme. + The parcellation used here is entitled "scale500" and returns 1015 regions. + """ + + parcellate = pe.Node(interface=cmtk.Parcellate(), name="Parcellate") + parcellate.inputs.parcellation_name = parcellation_name + """ + The CreateMatrix interface takes in the remapped aparc+aseg image as well as the label dictionary and fiber tracts + and outputs a number of different files. The most important of which is the connectivity network itself, which is stored + as a 'gpickle' and can be loaded using Python's NetworkX package (see CreateMatrix docstring). Also outputted are various + NumPy arrays containing detailed tract information, such as the start and endpoint regions, and statistics on the mean and + standard deviation for the fiber length of each connection. These matrices can be used in the ConnectomeViewer to plot the + specific tracts that connect between user-selected regions. + + Here we choose the Lausanne2008 parcellation scheme, since we are incorporating the CMTK parcellation step. + """ + + creatematrix = pe.Node(interface=cmtk.CreateMatrix(), name="CreateMatrix") + creatematrix.inputs.count_region_intersections = True + """ + Next we define the endpoint of this tutorial, which is the CFFConverter node, as well as a few nodes which use + the Nipype Merge utility. These are useful for passing lists of the files we want packaged in our CFF file. + The inspect.getfile command is used to package this script into the resulting CFF file, so that it is easy to + look back at the processing parameters that were used. + """ + + CFFConverter = pe.Node(interface=cmtk.CFFConverter(), name="CFFConverter") + CFFConverter.inputs.script_files = op.abspath( + inspect.getfile(inspect.currentframe())) + giftiSurfaces = pe.Node(interface=util.Merge(8), name="GiftiSurfaces") + giftiLabels = pe.Node(interface=util.Merge(2), name="GiftiLabels") + niftiVolumes = pe.Node(interface=util.Merge(3), name="NiftiVolumes") + fiberDataArrays = pe.Node(interface=util.Merge(4), name="FiberDataArrays") + """ + We also create a node to calculate several network metrics on our resulting file, and another CFF converter + which will be used to package these networks into a single file. + """ + + networkx = create_networkx_pipeline(name='networkx') + cmats_to_csv = create_cmats_to_csv_pipeline(name='cmats_to_csv') + nfibs_to_csv = pe.Node(interface=misc.Matlab2CSV(), name='nfibs_to_csv') + merge_nfib_csvs = pe.Node( + interface=misc.MergeCSVFiles(), name='merge_nfib_csvs') + merge_nfib_csvs.inputs.extra_column_heading = 'Subject' + merge_nfib_csvs.inputs.out_file = 'fibers.csv' + NxStatsCFFConverter = pe.Node( + interface=cmtk.CFFConverter(), name="NxStatsCFFConverter") + NxStatsCFFConverter.inputs.script_files = op.abspath( + inspect.getfile(inspect.currentframe())) + """ + Connecting the workflow + ======================= + Here we connect our processing pipeline. + """ + """ + Connecting the inputs, FreeSurfer nodes, and conversions + -------------------------------------------------------- + """ + + mapping = pe.Workflow(name='mapping') + """ + First, we connect the input node to the FreeSurfer input nodes. + """ + + mapping.connect([(inputnode_within, FreeSurferSource, [("subjects_dir", + "subjects_dir")])]) + mapping.connect([(inputnode_within, FreeSurferSource, [("subject_id", + "subject_id")])]) + + mapping.connect([(inputnode_within, FreeSurferSourceLH, + [("subjects_dir", "subjects_dir")])]) + mapping.connect([(inputnode_within, FreeSurferSourceLH, [("subject_id", + "subject_id")])]) + + mapping.connect([(inputnode_within, FreeSurferSourceRH, + [("subjects_dir", "subjects_dir")])]) + mapping.connect([(inputnode_within, FreeSurferSourceRH, [("subject_id", + "subject_id")])]) + + mapping.connect([(inputnode_within, parcellate, [("subjects_dir", + "subjects_dir")])]) + mapping.connect([(inputnode_within, parcellate, [("subject_id", + "subject_id")])]) + mapping.connect([(parcellate, mri_convert_ROI_scale500, [('roi_file', + 'in_file')])]) + """ + Nifti conversion for subject's stripped brain image from Freesurfer: + """ + + mapping.connect([(FreeSurferSource, mri_convert_Brain, [('brain', + 'in_file')])]) + """ + Surface conversions to GIFTI (pial, white, inflated, and sphere for both hemispheres) + """ + + mapping.connect([(FreeSurferSourceLH, mris_convertLH, [('pial', + 'in_file')])]) + mapping.connect([(FreeSurferSourceRH, mris_convertRH, [('pial', + 'in_file')])]) + mapping.connect([(FreeSurferSourceLH, mris_convertLHwhite, [('white', + 'in_file')])]) + mapping.connect([(FreeSurferSourceRH, mris_convertRHwhite, [('white', + 'in_file')])]) + mapping.connect([(FreeSurferSourceLH, mris_convertLHinflated, + [('inflated', 'in_file')])]) + mapping.connect([(FreeSurferSourceRH, mris_convertRHinflated, + [('inflated', 'in_file')])]) + mapping.connect([(FreeSurferSourceLH, mris_convertLHsphere, + [('sphere', 'in_file')])]) + mapping.connect([(FreeSurferSourceRH, mris_convertRHsphere, + [('sphere', 'in_file')])]) + """ + The annotation files are converted using the pial surface as a map via the MRIsConvert interface. + One of the functions defined earlier is used to select the lh.aparc.annot and rh.aparc.annot files + specifically (rather than e.g. rh.aparc.a2009s.annot) from the output list given by the FreeSurferSource. + """ + + mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, + [('pial', 'in_file')])]) + mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, + [('pial', 'in_file')])]) + mapping.connect([(FreeSurferSourceLH, mris_convertLHlabels, + [(('annot', select_aparc_annot), 'annot_file')])]) + mapping.connect([(FreeSurferSourceRH, mris_convertRHlabels, + [(('annot', select_aparc_annot), 'annot_file')])]) + """ + Diffusion Processing + -------------------- + Now we connect the tensor computations: + """ + + mapping.connect([(inputnode_within, fsl2mrtrix, [("bvecs", "bvec_file"), + ("bvals", "bval_file")])]) + mapping.connect([(inputnode_within, eddycorrect, [("dwi", + "inputnode.in_file")])]) + mapping.connect([(eddycorrect, dwi2tensor, [("outputnode.eddy_corrected", + "in_file")])]) + mapping.connect([(fsl2mrtrix, dwi2tensor, [("encoding_file", + "encoding_file")])]) + + mapping.connect([ + (dwi2tensor, tensor2vector, [['tensor', 'in_file']]), + (dwi2tensor, tensor2adc, [['tensor', 'in_file']]), + (dwi2tensor, tensor2fa, [['tensor', 'in_file']]), + ]) + mapping.connect([(tensor2fa, MRmult_merge, [("FA", "in1")])]) + mapping.connect([(tensor2fa, MRconvert_fa, [("FA", "in_file")])]) + """ + + This block creates the rough brain mask to be multiplied, mulitplies it with the + fractional anisotropy image, and thresholds it to get the single-fiber voxels. + """ + + mapping.connect([(eddycorrect, MRconvert, [("outputnode.eddy_corrected", + "in_file")])]) + mapping.connect([(MRconvert, threshold_b0, [("converted", "in_file")])]) + mapping.connect([(threshold_b0, median3d, [("out_file", "in_file")])]) + mapping.connect([(median3d, erode_mask_firstpass, [("out_file", + "in_file")])]) + mapping.connect([(erode_mask_firstpass, erode_mask_secondpass, + [("out_file", "in_file")])]) + mapping.connect([(erode_mask_secondpass, MRmult_merge, [("out_file", + "in2")])]) + mapping.connect([(MRmult_merge, MRmultiply, [("out", "in_files")])]) + mapping.connect([(MRmultiply, threshold_FA, [("out_file", "in_file")])]) + """ + Here the thresholded white matter mask is created for seeding the tractography. + """ + + mapping.connect([(eddycorrect, bet, [("outputnode.eddy_corrected", + "in_file")])]) + mapping.connect([(eddycorrect, gen_WM_mask, [("outputnode.eddy_corrected", + "in_file")])]) + mapping.connect([(bet, gen_WM_mask, [("mask_file", "binary_mask")])]) + mapping.connect([(fsl2mrtrix, gen_WM_mask, [("encoding_file", + "encoding_file")])]) + mapping.connect([(gen_WM_mask, threshold_wmmask, [("WMprobabilitymap", + "in_file")])]) + """ + Next we estimate the fiber response distribution. + """ + + mapping.connect([(eddycorrect, estimateresponse, + [("outputnode.eddy_corrected", "in_file")])]) + mapping.connect([(fsl2mrtrix, estimateresponse, [("encoding_file", + "encoding_file")])]) + mapping.connect([(threshold_FA, estimateresponse, [("out_file", + "mask_image")])]) + """ + Run constrained spherical deconvolution. + """ + + mapping.connect([(eddycorrect, csdeconv, [("outputnode.eddy_corrected", + "in_file")])]) + mapping.connect([(gen_WM_mask, csdeconv, [("WMprobabilitymap", + "mask_image")])]) + mapping.connect([(estimateresponse, csdeconv, [("response", + "response_file")])]) + mapping.connect([(fsl2mrtrix, csdeconv, [("encoding_file", + "encoding_file")])]) + """ + Connect the tractography and compute the tract density image. + """ + + mapping.connect([(threshold_wmmask, probCSDstreamtrack, [("out_file", + "seed_file")])]) + mapping.connect([(csdeconv, probCSDstreamtrack, + [("spherical_harmonics_image", "in_file")])]) + mapping.connect([(probCSDstreamtrack, tracks2prob, [("tracked", + "in_file")])]) + mapping.connect([(eddycorrect, tracks2prob, [("outputnode.eddy_corrected", + "template_file")])]) + mapping.connect([(tracks2prob, MRconvert_tracks2prob, [("tract_image", + "in_file")])]) + """ + Structural Processing + --------------------- + First, we coregister the diffusion image to the structural image + """ + + mapping.connect([(eddycorrect, coregister, [("outputnode.eddy_corrected", + "in_file")])]) + mapping.connect([(mri_convert_Brain, coregister, [('out_file', + 'reference')])]) + """ + The MRtrix-tracked fibers are converted to TrackVis format (with voxel and data dimensions grabbed from the DWI). + The connectivity matrix is created with the transformed .trk fibers and the parcellation file. + """ + + mapping.connect([(eddycorrect, tck2trk, [("outputnode.eddy_corrected", + "image_file")])]) + mapping.connect([(mri_convert_Brain, tck2trk, + [("out_file", "registration_image_file")])]) + mapping.connect([(coregister, tck2trk, [("out_matrix_file", + "matrix_file")])]) + mapping.connect([(probCSDstreamtrack, tck2trk, [("tracked", "in_file")])]) + mapping.connect([(tck2trk, creatematrix, [("out_file", "tract_file")])]) + mapping.connect([(tck2trk, trk2tdi, [("out_file", "in_file")])]) + mapping.connect(inputnode_within, 'resolution_network_file', creatematrix, + 'resolution_network_file') + mapping.connect([(inputnode_within, creatematrix, [("subject_id", + "out_matrix_file")])]) + mapping.connect([(inputnode_within, creatematrix, + [("subject_id", "out_matrix_mat_file")])]) + mapping.connect([(parcellate, creatematrix, [("roi_file", "roi_file")])]) + """ + The merge nodes defined earlier are used here to create lists of the files which are + destined for the CFFConverter. + """ + + mapping.connect([(mris_convertLH, giftiSurfaces, [("converted", "in1")])]) + mapping.connect([(mris_convertRH, giftiSurfaces, [("converted", "in2")])]) + mapping.connect([(mris_convertLHwhite, giftiSurfaces, [("converted", + "in3")])]) + mapping.connect([(mris_convertRHwhite, giftiSurfaces, [("converted", + "in4")])]) + mapping.connect([(mris_convertLHinflated, giftiSurfaces, [("converted", + "in5")])]) + mapping.connect([(mris_convertRHinflated, giftiSurfaces, [("converted", + "in6")])]) + mapping.connect([(mris_convertLHsphere, giftiSurfaces, [("converted", + "in7")])]) + mapping.connect([(mris_convertRHsphere, giftiSurfaces, [("converted", + "in8")])]) + + mapping.connect([(mris_convertLHlabels, giftiLabels, [("converted", + "in1")])]) + mapping.connect([(mris_convertRHlabels, giftiLabels, [("converted", + "in2")])]) + + mapping.connect([(parcellate, niftiVolumes, [("roi_file", "in1")])]) + mapping.connect([(eddycorrect, niftiVolumes, [("outputnode.eddy_corrected", + "in2")])]) + mapping.connect([(mri_convert_Brain, niftiVolumes, [("out_file", "in3")])]) + + mapping.connect([(creatematrix, fiberDataArrays, [("endpoint_file", + "in1")])]) + mapping.connect([(creatematrix, fiberDataArrays, [("endpoint_file_mm", + "in2")])]) + mapping.connect([(creatematrix, fiberDataArrays, [("fiber_length_file", + "in3")])]) + mapping.connect([(creatematrix, fiberDataArrays, [("fiber_label_file", + "in4")])]) + """ + This block actually connects the merged lists to the CFF converter. We pass the surfaces + and volumes that are to be included, as well as the tracts and the network itself. The currently + running pipeline (dmri_connectivity_advanced.py) is also scraped and included in the CFF file. This + makes it easy for the user to examine the entire processing pathway used to generate the end + product. + """ + + mapping.connect([(giftiSurfaces, CFFConverter, [("out", + "gifti_surfaces")])]) + mapping.connect([(giftiLabels, CFFConverter, [("out", "gifti_labels")])]) + mapping.connect([(creatematrix, CFFConverter, [("matrix_files", + "gpickled_networks")])]) + mapping.connect([(niftiVolumes, CFFConverter, [("out", "nifti_volumes")])]) + mapping.connect([(fiberDataArrays, CFFConverter, [("out", "data_files")])]) + mapping.connect([(creatematrix, CFFConverter, [("filtered_tractography", + "tract_files")])]) + mapping.connect([(inputnode_within, CFFConverter, [("subject_id", + "title")])]) + """ + The graph theoretical metrics which have been generated are placed into another CFF file. + """ + + mapping.connect([(inputnode_within, networkx, + [("subject_id", "inputnode.extra_field")])]) + mapping.connect([(creatematrix, networkx, [("intersection_matrix_file", + "inputnode.network_file")])]) + + mapping.connect([(networkx, NxStatsCFFConverter, + [("outputnode.network_files", "gpickled_networks")])]) + mapping.connect([(giftiSurfaces, NxStatsCFFConverter, + [("out", "gifti_surfaces")])]) + mapping.connect([(giftiLabels, NxStatsCFFConverter, [("out", + "gifti_labels")])]) + mapping.connect([(niftiVolumes, NxStatsCFFConverter, [("out", + "nifti_volumes")])]) + mapping.connect([(fiberDataArrays, NxStatsCFFConverter, [("out", + "data_files")])]) + mapping.connect([(inputnode_within, NxStatsCFFConverter, [("subject_id", + "title")])]) + + mapping.connect([(inputnode_within, cmats_to_csv, + [("subject_id", "inputnode.extra_field")])]) + mapping.connect([(creatematrix, cmats_to_csv, + [("matlab_matrix_files", + "inputnode.matlab_matrix_files")])]) + mapping.connect([(creatematrix, nfibs_to_csv, [("stats_file", + "in_file")])]) + mapping.connect([(nfibs_to_csv, merge_nfib_csvs, [("csv_files", + "in_files")])]) + mapping.connect([(inputnode_within, merge_nfib_csvs, [("subject_id", + "extra_field")])]) + """ + Create a higher-level workflow + -------------------------------------- + Finally, we create another higher-level workflow to connect our mapping workflow with the info and datagrabbing nodes + declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding + their names to the subject list and their data to the proper folders. + """ + + inputnode = pe.Node( + interface=util.IdentityInterface( + fields=["subject_id", "dwi", "bvecs", "bvals", "subjects_dir"]), + name="inputnode") + + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + "fa", "struct", "tracts", "tracks2prob", "connectome", + "nxstatscff", "nxmatlab", "nxcsv", "fiber_csv", "cmatrices_csv", + "nxmergedcsv", "cmatrix", "networks", "filtered_tracts", "rois", + "odfs", "tdi", "mean_fiber_length", "median_fiber_length", + "fiber_length_std" + ]), + name="outputnode") + + connectivity = pe.Workflow(name="connectivity") + connectivity.base_output_dir = name + connectivity.base_dir = name + + connectivity.connect([ + (inputnode, mapping, + [("dwi", "inputnode_within.dwi"), ("bvals", "inputnode_within.bvals"), + ("bvecs", "inputnode_within.bvecs"), ("subject_id", + "inputnode_within.subject_id"), + ("subjects_dir", "inputnode_within.subjects_dir")]) + ]) + + connectivity.connect( + [(mapping, outputnode, + [("tck2trk.out_file", + "tracts"), ("CFFConverter.connectome_file", + "connectome"), ("NxStatsCFFConverter.connectome_file", + "nxstatscff"), + ("CreateMatrix.matrix_mat_file", + "cmatrix"), ("CreateMatrix.mean_fiber_length_matrix_mat_file", + "mean_fiber_length"), + ("CreateMatrix.median_fiber_length_matrix_mat_file", + "median_fiber_length"), + ("CreateMatrix.fiber_length_std_matrix_mat_file", + "fiber_length_std"), ("CreateMatrix.matrix_files", "networks"), + ("CreateMatrix.filtered_tractographies", + "filtered_tracts"), ("merge_nfib_csvs.csv_file", "fiber_csv"), + ("mri_convert_ROI_scale500.out_file", + "rois"), ("trk2tdi.out_file", + "tdi"), ("csdeconv.spherical_harmonics_image", "odfs"), + ("mri_convert_Brain.out_file", + "struct"), ("MRconvert_fa.converted", + "fa"), ("MRconvert_tracks2prob.converted", + "tracks2prob")])]) + + connectivity.connect([(cmats_to_csv, outputnode, [("outputnode.csv_file", + "cmatrices_csv")])]) + connectivity.connect([(networkx, outputnode, [("outputnode.csv_files", + "nxcsv")])]) + return connectivity diff --git a/nipype/workflows/dmri/mrtrix/diffusion.py b/nipype/workflows/dmri/mrtrix/diffusion.py new file mode 100644 index 0000000000..a4305bf04e --- /dev/null +++ b/nipype/workflows/dmri/mrtrix/diffusion.py @@ -0,0 +1,186 @@ +# -*- coding: utf-8 -*- +from ....interfaces import utility as util # utility +from ....pipeline import engine as pe # pypeline engine +from ....interfaces import fsl as fsl +from ....interfaces import mrtrix as mrtrix + + +def create_mrtrix_dti_pipeline(name="dtiproc", + tractography_type='probabilistic'): + """Creates a pipeline that does the same diffusion processing as in the + :doc:`../../users/examples/dmri_mrtrix_dti` example script. Given a diffusion-weighted image, + b-values, and b-vectors, the workflow will return the tractography + computed from spherical deconvolution and probabilistic streamline tractography + + Example + ------- + + >>> dti = create_mrtrix_dti_pipeline("mrtrix_dti") + >>> dti.inputs.inputnode.dwi = 'data.nii' + >>> dti.inputs.inputnode.bvals = 'bvals' + >>> dti.inputs.inputnode.bvecs = 'bvecs' + >>> dti.run() # doctest: +SKIP + + Inputs:: + + inputnode.dwi + inputnode.bvecs + inputnode.bvals + + Outputs:: + + outputnode.fa + outputnode.tdi + outputnode.tracts_tck + outputnode.tracts_trk + outputnode.csdeconv + + """ + + inputnode = pe.Node( + interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), + name="inputnode") + + bet = pe.Node(interface=fsl.BET(), name="bet") + bet.inputs.mask = True + + fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(), name='fsl2mrtrix') + fsl2mrtrix.inputs.invert_y = True + + dwi2tensor = pe.Node(interface=mrtrix.DWI2Tensor(), name='dwi2tensor') + + tensor2vector = pe.Node( + interface=mrtrix.Tensor2Vector(), name='tensor2vector') + tensor2adc = pe.Node( + interface=mrtrix.Tensor2ApparentDiffusion(), name='tensor2adc') + tensor2fa = pe.Node( + interface=mrtrix.Tensor2FractionalAnisotropy(), name='tensor2fa') + + erode_mask_firstpass = pe.Node( + interface=mrtrix.Erode(), name='erode_mask_firstpass') + erode_mask_secondpass = pe.Node( + interface=mrtrix.Erode(), name='erode_mask_secondpass') + + threshold_b0 = pe.Node(interface=mrtrix.Threshold(), name='threshold_b0') + + threshold_FA = pe.Node(interface=mrtrix.Threshold(), name='threshold_FA') + threshold_FA.inputs.absolute_threshold_value = 0.7 + + threshold_wmmask = pe.Node( + interface=mrtrix.Threshold(), name='threshold_wmmask') + threshold_wmmask.inputs.absolute_threshold_value = 0.4 + + MRmultiply = pe.Node(interface=mrtrix.MRMultiply(), name='MRmultiply') + MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge') + + median3d = pe.Node(interface=mrtrix.MedianFilter3D(), name='median3D') + + MRconvert = pe.Node(interface=mrtrix.MRConvert(), name='MRconvert') + MRconvert.inputs.extract_at_axis = 3 + MRconvert.inputs.extract_at_coordinate = [0] + + csdeconv = pe.Node( + interface=mrtrix.ConstrainedSphericalDeconvolution(), name='csdeconv') + + gen_WM_mask = pe.Node( + interface=mrtrix.GenerateWhiteMatterMask(), name='gen_WM_mask') + + estimateresponse = pe.Node( + interface=mrtrix.EstimateResponseForSH(), name='estimateresponse') + + if tractography_type == 'probabilistic': + CSDstreamtrack = pe.Node( + interface=mrtrix. + ProbabilisticSphericallyDeconvolutedStreamlineTrack(), + name='CSDstreamtrack') + else: + CSDstreamtrack = pe.Node( + interface=mrtrix.SphericallyDeconvolutedStreamlineTrack(), + name='CSDstreamtrack') + CSDstreamtrack.inputs.desired_number_of_tracks = 15000 + + tracks2prob = pe.Node(interface=mrtrix.Tracks2Prob(), name='tracks2prob') + tracks2prob.inputs.colour = True + tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(), name='tck2trk') + + workflow = pe.Workflow(name=name) + workflow.base_output_dir = name + + workflow.connect([(inputnode, fsl2mrtrix, [("bvecs", "bvec_file"), + ("bvals", "bval_file")])]) + workflow.connect([(inputnode, dwi2tensor, [("dwi", "in_file")])]) + workflow.connect([(fsl2mrtrix, dwi2tensor, [("encoding_file", + "encoding_file")])]) + + workflow.connect([ + (dwi2tensor, tensor2vector, [['tensor', 'in_file']]), + (dwi2tensor, tensor2adc, [['tensor', 'in_file']]), + (dwi2tensor, tensor2fa, [['tensor', 'in_file']]), + ]) + + workflow.connect([(inputnode, MRconvert, [("dwi", "in_file")])]) + workflow.connect([(MRconvert, threshold_b0, [("converted", "in_file")])]) + workflow.connect([(threshold_b0, median3d, [("out_file", "in_file")])]) + workflow.connect([(median3d, erode_mask_firstpass, [("out_file", + "in_file")])]) + workflow.connect([(erode_mask_firstpass, erode_mask_secondpass, + [("out_file", "in_file")])]) + + workflow.connect([(tensor2fa, MRmult_merge, [("FA", "in1")])]) + workflow.connect([(erode_mask_secondpass, MRmult_merge, [("out_file", + "in2")])]) + workflow.connect([(MRmult_merge, MRmultiply, [("out", "in_files")])]) + workflow.connect([(MRmultiply, threshold_FA, [("out_file", "in_file")])]) + workflow.connect([(threshold_FA, estimateresponse, [("out_file", + "mask_image")])]) + + workflow.connect([(inputnode, bet, [("dwi", "in_file")])]) + workflow.connect([(inputnode, gen_WM_mask, [("dwi", "in_file")])]) + workflow.connect([(bet, gen_WM_mask, [("mask_file", "binary_mask")])]) + workflow.connect([(fsl2mrtrix, gen_WM_mask, [("encoding_file", + "encoding_file")])]) + + workflow.connect([(inputnode, estimateresponse, [("dwi", "in_file")])]) + workflow.connect([(fsl2mrtrix, estimateresponse, [("encoding_file", + "encoding_file")])]) + + workflow.connect([(inputnode, csdeconv, [("dwi", "in_file")])]) + workflow.connect([(gen_WM_mask, csdeconv, [("WMprobabilitymap", + "mask_image")])]) + workflow.connect([(estimateresponse, csdeconv, [("response", + "response_file")])]) + workflow.connect([(fsl2mrtrix, csdeconv, [("encoding_file", + "encoding_file")])]) + + workflow.connect([(gen_WM_mask, threshold_wmmask, [("WMprobabilitymap", + "in_file")])]) + workflow.connect([(threshold_wmmask, CSDstreamtrack, [("out_file", + "seed_file")])]) + workflow.connect([(csdeconv, CSDstreamtrack, [("spherical_harmonics_image", + "in_file")])]) + + if tractography_type == 'probabilistic': + workflow.connect([(CSDstreamtrack, tracks2prob, [("tracked", + "in_file")])]) + workflow.connect([(inputnode, tracks2prob, [("dwi", + "template_file")])]) + + workflow.connect([(CSDstreamtrack, tck2trk, [("tracked", "in_file")])]) + workflow.connect([(inputnode, tck2trk, [("dwi", "image_file")])]) + + output_fields = ["fa", "tracts_trk", "csdeconv", "tracts_tck"] + if tractography_type == 'probabilistic': + output_fields.append("tdi") + outputnode = pe.Node( + interface=util.IdentityInterface(fields=output_fields), + name="outputnode") + + workflow.connect([(CSDstreamtrack, outputnode, + [("tracked", "tracts_tck")]), (csdeconv, outputnode, [ + ("spherical_harmonics_image", "csdeconv") + ]), (tensor2fa, outputnode, [("FA", "fa")]), + (tck2trk, outputnode, [("out_file", "tracts_trk")])]) + if tractography_type == 'probabilistic': + workflow.connect([(tracks2prob, outputnode, [("tract_image", "tdi")])]) + + return workflow diff --git a/nipype/workflows/dmri/mrtrix/group_connectivity.py b/nipype/workflows/dmri/mrtrix/group_connectivity.py new file mode 100644 index 0000000000..10d961a18c --- /dev/null +++ b/nipype/workflows/dmri/mrtrix/group_connectivity.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +import os.path as op +import warnings + +from ....interfaces import io as nio # Data i/o +from ....interfaces import utility as util # utility +from ....interfaces import cmtk as cmtk +from ....algorithms import misc as misc +from ....pipeline import engine as pe # pipeline engine +from ....utils.misc import package_check +from .connectivity_mapping import create_connectivity_pipeline + +try: + package_check('cmp') +except Exception as e: + warnings.warn('cmp not installed') +else: + import cmp + + +def create_group_connectivity_pipeline(group_list, + group_id, + data_dir, + subjects_dir, + output_dir, + template_args_dict=0): + """Creates a pipeline that performs MRtrix structural connectivity processing + on groups of subjects. Given a diffusion-weighted image, and text files containing + the associated b-values and b-vectors, the workflow will return each subjects' connectomes + in a Connectome File Format (CFF) file, for use in Connectome Viewer (http://www.cmtk.org). + + Example + ------- + + >>> import nipype.interfaces.freesurfer as fs + >>> import nipype.workflows.dmri.mrtrix.group_connectivity as groupwork + >>> import cmp # doctest: +SKIP + >>> from nipype.testing import example_data + >>> subjects_dir = '.' + >>> data_dir = '.' + >>> output_dir = '.' + >>> fs.FSCommand.set_default_subjects_dir(subjects_dir) + >>> group_list = {} + >>> group_list['group1'] = ['subj1', 'subj2'] + >>> group_list['group2'] = ['subj3', 'subj4'] + >>> template_args = dict(dwi=[['subject_id', 'dwi']], bvecs=[['subject_id', 'bvecs']], bvals=[['subject_id', 'bvals']]) + >>> group_id = 'group1' + >>> l1pipeline = groupwork.create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, template_args) + >>> parcellation_name = 'scale500' + >>> l1pipeline.inputs.connectivity.mapping.Parcellate.parcellation_name = parcellation_name + >>> cmp_config = cmp.configuration.PipelineConfiguration() # doctest: +SKIP + >>> cmp_config.parcellation_scheme = "Lausanne2008" # doctest: +SKIP + >>> l1pipeline.inputs.connectivity.mapping.inputnode_within.resolution_network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml'] # doctest: +SKIP + >>> l1pipeline.run() # doctest: +SKIP + + + Inputs:: + + group_list: Dictionary of subject lists, keyed by group name + group_id: String containing the group name + data_dir: Path to the data directory + subjects_dir: Path to the Freesurfer 'subjects' directory + output_dir: Path for the output files + template_args_dict: Dictionary of template arguments for the connectivity pipeline datasource + e.g. info = dict(dwi=[['subject_id', 'dwi']], + bvecs=[['subject_id','bvecs']], + bvals=[['subject_id','bvals']]) + """ + group_infosource = pe.Node( + interface=util.IdentityInterface(fields=['group_id']), + name="group_infosource") + group_infosource.inputs.group_id = group_id + subject_list = group_list[group_id] + subj_infosource = pe.Node( + interface=util.IdentityInterface(fields=['subject_id']), + name="subj_infosource") + subj_infosource.iterables = ('subject_id', subject_list) + + if template_args_dict == 0: + info = dict( + dwi=[['subject_id', 'dwi']], + bvecs=[['subject_id', 'bvecs']], + bvals=[['subject_id', 'bvals']]) + else: + info = template_args_dict + + datasource = pe.Node( + interface=nio.DataGrabber( + infields=['subject_id'], outfields=list(info.keys())), + name='datasource') + + datasource.inputs.template = "%s/%s" + datasource.inputs.base_directory = data_dir + datasource.inputs.field_template = dict(dwi='%s/%s.nii') + datasource.inputs.template_args = info + datasource.inputs.sort_filelist = True + """ + Create a connectivity mapping workflow + """ + conmapper = create_connectivity_pipeline("nipype_conmap") + conmapper.inputs.inputnode.subjects_dir = subjects_dir + conmapper.base_dir = op.abspath('conmapper') + + datasink = pe.Node(interface=nio.DataSink(), name="datasink") + datasink.inputs.base_directory = output_dir + datasink.inputs.container = group_id + + l1pipeline = pe.Workflow(name="l1pipeline_" + group_id) + l1pipeline.base_dir = output_dir + l1pipeline.base_output_dir = group_id + l1pipeline.connect([(subj_infosource, conmapper, + [('subject_id', 'inputnode.subject_id')])]) + l1pipeline.connect([(subj_infosource, datasource, [('subject_id', + 'subject_id')])]) + l1pipeline.connect([(datasource, conmapper, [ + ("dwi", "inputnode.dwi"), + ("bvals", "inputnode.bvals"), + ("bvecs", "inputnode.bvecs"), + ])]) + l1pipeline.connect([(conmapper, datasink, [ + ("outputnode.connectome", "@l1output.cff"), + ("outputnode.nxstatscff", "@l1output.nxstatscff"), + ("outputnode.nxmatlab", "@l1output.nxmatlab"), + ("outputnode.nxcsv", "@l1output.nxcsv"), + ("outputnode.fiber_csv", "@l1output.fiber_csv"), + ("outputnode.cmatrices_csv", "@l1output.cmatrices_csv"), + ("outputnode.fa", "@l1output.fa"), + ("outputnode.filtered_tracts", "@l1output.filtered_tracts"), + ("outputnode.cmatrix", "@l1output.cmatrix"), + ("outputnode.rois", "@l1output.rois"), + ("outputnode.odfs", "@l1output.odfs"), + ("outputnode.struct", "@l1output.struct"), + ("outputnode.networks", "@l1output.networks"), + ("outputnode.mean_fiber_length", "@l1output.mean_fiber_length"), + ("outputnode.fiber_length_std", "@l1output.fiber_length_std"), + ])]) + l1pipeline.connect([(group_infosource, datasink, [('group_id', + '@group_id')])]) + return l1pipeline diff --git a/nipype/workflows/fmri/__init__.py b/nipype/workflows/fmri/__init__.py new file mode 100644 index 0000000000..5523a0c412 --- /dev/null +++ b/nipype/workflows/fmri/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from . import fsl, spm diff --git a/nipype/workflows/fmri/fsl/__init__.py b/nipype/workflows/fmri/fsl/__init__.py new file mode 100644 index 0000000000..9f6ca78ee8 --- /dev/null +++ b/nipype/workflows/fmri/fsl/__init__.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- +from .preprocess import (create_susan_smooth, create_fsl_fs_preproc, + create_parallelfeat_preproc, create_featreg_preproc, + create_reg_workflow) +from .estimate import create_modelfit_workflow, create_fixed_effects_flow + +# backwards compatibility +from ...rsfmri.fsl.resting import create_resting_preproc diff --git a/nipype/workflows/fmri/fsl/estimate.py b/nipype/workflows/fmri/fsl/estimate.py new file mode 100644 index 0000000000..638e422bfc --- /dev/null +++ b/nipype/workflows/fmri/fsl/estimate.py @@ -0,0 +1,298 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from builtins import range + +from ....interfaces import fsl as fsl # fsl +from ....interfaces import utility as util # utility +from ....pipeline import engine as pe # pypeline engine + +from .... import LooseVersion + + +def create_modelfit_workflow(name='modelfit', f_contrasts=False): + """Create an FSL individual modelfitting workflow + + Example + ------- + + >>> modelfit = create_modelfit_workflow() + >>> modelfit.base_dir = '.' + >>> info = dict() + >>> modelfit.inputs.inputspec.session_info = info + >>> modelfit.inputs.inputspec.interscan_interval = 3. + >>> modelfit.inputs.inputspec.film_threshold = 1000 + >>> modelfit.run() #doctest: +SKIP + + Inputs:: + + inputspec.session_info : info generated by modelgen.SpecifyModel + inputspec.interscan_interval : interscan interval + inputspec.contrasts : list of contrasts + inputspec.film_threshold : image threshold for FILM estimation + inputspec.model_serial_correlations + inputspec.bases + + Outputs:: + + outputspec.copes + outputspec.varcopes + outputspec.dof_file + outputspec.pfiles + outputspec.zfiles + outputspec.parameter_estimates + """ + + version = 0 + if fsl.Info.version() and \ + LooseVersion(fsl.Info.version()) > LooseVersion('5.0.6'): + version = 507 + + modelfit = pe.Workflow(name=name) + """ + Create the nodes + """ + + inputspec = pe.Node( + util.IdentityInterface(fields=[ + 'session_info', 'interscan_interval', 'contrasts', + 'film_threshold', 'functional_data', 'bases', + 'model_serial_correlations' + ]), + name='inputspec') + level1design = pe.Node(interface=fsl.Level1Design(), name="level1design") + modelgen = pe.MapNode( + interface=fsl.FEATModel(), + name='modelgen', + iterfield=['fsf_file', 'ev_files']) + if version < 507: + modelestimate = pe.MapNode( + interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5), + name='modelestimate', + iterfield=['design_file', 'in_file']) + else: + if f_contrasts: + iterfield = ['design_file', 'in_file', 'tcon_file', 'fcon_file'] + else: + iterfield = ['design_file', 'in_file', 'tcon_file'] + modelestimate = pe.MapNode( + interface=fsl.FILMGLS(smooth_autocorr=True, mask_size=5), + name='modelestimate', + iterfield=iterfield) + + if version < 507: + if f_contrasts: + iterfield = [ + 'tcon_file', 'fcon_file', 'param_estimates', 'sigmasquareds', + 'corrections', 'dof_file' + ] + else: + iterfield = [ + 'tcon_file', 'param_estimates', 'sigmasquareds', 'corrections', + 'dof_file' + ] + conestimate = pe.MapNode( + interface=fsl.ContrastMgr(), + name='conestimate', + iterfield=[ + 'tcon_file', 'fcon_file', 'param_estimates', 'sigmasquareds', + 'corrections', 'dof_file' + ]) + + if f_contrasts: + iterfield = ['in1', 'in2'] + else: + iterfield = ['in1'] + merge_contrasts = pe.MapNode( + interface=util.Merge(2), name='merge_contrasts', iterfield=iterfield) + ztopval = pe.MapNode( + interface=fsl.ImageMaths(op_string='-ztop', suffix='_pval'), + nested=True, + name='ztop', + iterfield=['in_file']) + outputspec = pe.Node( + util.IdentityInterface(fields=[ + 'copes', 'varcopes', 'dof_file', 'pfiles', 'zfiles', + 'parameter_estimates' + ]), + name='outputspec') + """ + Setup the connections + """ + + modelfit.connect([ + (inputspec, level1design, + [('interscan_interval', 'interscan_interval'), + ('session_info', 'session_info'), ('contrasts', 'contrasts'), + ('bases', 'bases'), ('model_serial_correlations', + 'model_serial_correlations')]), + (inputspec, modelestimate, [('film_threshold', 'threshold'), + ('functional_data', 'in_file')]), + (level1design, modelgen, [('fsf_files', 'fsf_file'), ('ev_files', + 'ev_files')]), + (modelgen, modelestimate, [('design_file', 'design_file')]), + (merge_contrasts, ztopval, [('out', 'in_file')]), + (ztopval, outputspec, [('out_file', 'pfiles')]), + (merge_contrasts, outputspec, [('out', 'zfiles')]), + (modelestimate, outputspec, [('param_estimates', + 'parameter_estimates'), ('dof_file', + 'dof_file')]), + ]) + if version < 507: + modelfit.connect([ + (modelgen, conestimate, [('con_file', 'tcon_file'), + ('fcon_file', 'fcon_file')]), + (modelestimate, conestimate, + [('param_estimates', 'param_estimates'), ('sigmasquareds', + 'sigmasquareds'), + ('corrections', 'corrections'), ('dof_file', 'dof_file')]), + (conestimate, merge_contrasts, [('zstats', 'in1'), ('zfstats', + 'in2')]), + (conestimate, outputspec, [('copes', 'copes'), ('varcopes', + 'varcopes')]), + ]) + else: + modelfit.connect([ + (modelgen, modelestimate, [('con_file', 'tcon_file'), + ('fcon_file', 'fcon_file')]), + (modelestimate, merge_contrasts, [('zstats', 'in1'), ('zfstats', + 'in2')]), + (modelestimate, outputspec, [('copes', 'copes'), ('varcopes', + 'varcopes')]), + ]) + return modelfit + + +def create_overlay_workflow(name='overlay'): + """Setup overlay workflow + """ + + overlay = pe.Workflow(name='overlay') + overlaystats = pe.MapNode( + interface=fsl.Overlay(), name="overlaystats", iterfield=['stat_image']) + overlaystats.inputs.show_negative_stats = True + overlaystats.inputs.auto_thresh_bg = True + + slicestats = pe.MapNode( + interface=fsl.Slicer(), name="slicestats", iterfield=['in_file']) + slicestats.inputs.all_axial = True + slicestats.inputs.image_width = 512 + + overlay.connect(overlaystats, 'out_file', slicestats, 'in_file') + return overlay + + +def create_fixed_effects_flow(name='fixedfx'): + """Create a fixed-effects workflow + + This workflow is used to combine registered copes and varcopes across runs + for an individual subject + + Example + ------- + + >>> fixedfx = create_fixed_effects_flow() + >>> fixedfx.base_dir = '.' + >>> fixedfx.inputs.inputspec.copes = [['cope1run1.nii.gz', 'cope1run2.nii.gz'], ['cope2run1.nii.gz', 'cope2run2.nii.gz']] # per contrast + >>> fixedfx.inputs.inputspec.varcopes = [['varcope1run1.nii.gz', 'varcope1run2.nii.gz'], ['varcope2run1.nii.gz', 'varcope2run2.nii.gz']] # per contrast + >>> fixedfx.inputs.inputspec.dof_files = ['dofrun1', 'dofrun2'] # per run + >>> fixedfx.run() #doctest: +SKIP + + Inputs:: + + inputspec.copes : list of list of cope files (one list per contrast) + inputspec.varcopes : list of list of varcope files (one list per + contrast) + inputspec.dof_files : degrees of freedom files for each run + + Outputs:: + + outputspec.res4d : 4d residual time series + outputspec.copes : contrast parameter estimates + outputspec.varcopes : variance of contrast parameter estimates + outputspec.zstats : z statistics of contrasts + outputspec.tstats : t statistics of contrasts + """ + + fixed_fx = pe.Workflow(name=name) + + inputspec = pe.Node( + util.IdentityInterface(fields=['copes', 'varcopes', 'dof_files']), + name='inputspec') + """ + Use :class:`nipype.interfaces.fsl.Merge` to merge the copes and + varcopes for each condition + """ + + copemerge = pe.MapNode( + interface=fsl.Merge(dimension='t'), + iterfield=['in_files'], + name="copemerge") + + varcopemerge = pe.MapNode( + interface=fsl.Merge(dimension='t'), + iterfield=['in_files'], + name="varcopemerge") + """ + Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition + specific level 2 model design files + """ + + level2model = pe.Node(interface=fsl.L2Model(), name='l2model') + """ + Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model + """ + + flameo = pe.MapNode( + interface=fsl.FLAMEO(run_mode='fe'), + name="flameo", + iterfield=['cope_file', 'var_cope_file']) + + def get_dofvolumes(dof_files, cope_files): + import os + import nibabel as nb + import numpy as np + img = nb.load(cope_files[0]) + if len(img.shape) > 3: + out_data = np.zeros(img.shape) + else: + out_data = np.zeros(list(img.shape) + [1]) + for i in range(out_data.shape[-1]): + dof = np.loadtxt(dof_files[i]) + out_data[:, :, :, i] = dof + filename = os.path.join(os.getcwd(), 'dof_file.nii.gz') + newimg = nb.Nifti1Image(out_data, None, img.header) + newimg.to_filename(filename) + return filename + + gendof = pe.Node( + util.Function( + input_names=['dof_files', 'cope_files'], + output_names=['dof_volume'], + function=get_dofvolumes), + name='gendofvolume') + + outputspec = pe.Node( + util.IdentityInterface( + fields=['res4d', 'copes', 'varcopes', 'zstats', 'tstats']), + name='outputspec') + + fixed_fx.connect( + [(inputspec, copemerge, + [('copes', 'in_files')]), (inputspec, varcopemerge, [('varcopes', + 'in_files')]), + (inputspec, gendof, [('dof_files', 'dof_files')]), (copemerge, gendof, + [('merged_file', + 'cope_files')]), + (copemerge, flameo, + [('merged_file', 'cope_file')]), (varcopemerge, flameo, [ + ('merged_file', 'var_cope_file') + ]), (level2model, flameo, + [('design_mat', 'design_file'), ('design_con', 't_con_file'), + ('design_grp', 'cov_split_file')]), (gendof, flameo, + [('dof_volume', + 'dof_var_cope_file')]), + (flameo, outputspec, + [('res4d', 'res4d'), ('copes', 'copes'), ('var_copes', 'varcopes'), + ('zstats', 'zstats'), ('tstats', 'tstats')])]) + return fixed_fx diff --git a/nipype/workflows/fmri/fsl/preprocess.py b/nipype/workflows/fmri/fsl/preprocess.py new file mode 100644 index 0000000000..ac235bdba1 --- /dev/null +++ b/nipype/workflows/fmri/fsl/preprocess.py @@ -0,0 +1,1293 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import division + +import os +from ....interfaces import fsl as fsl # fsl +from ....interfaces import utility as util # utility +from ....pipeline import engine as pe # pypeline engine +from ....interfaces import freesurfer as fs # freesurfer +from ....interfaces import spm as spm +from ...smri.freesurfer.utils import create_getmask_flow +from .... import LooseVersion + + +def getthreshop(thresh): + return ['-thr %.10f -Tmin -bin' % (0.1 * val[1]) for val in thresh] + + +def pickrun(files, whichrun): + """pick file from list of files""" + + filemap = {'first': 0, 'last': -1, 'middle': len(files) // 2} + + if isinstance(files, list): + + # whichrun is given as integer + if isinstance(whichrun, int): + return files[whichrun] + # whichrun is given as string + elif isinstance(whichrun, str): + if whichrun not in filemap.keys(): + raise (KeyError, 'Sorry, whichrun must be either integer index' + 'or string in form of "first", "last" or "middle') + else: + return files[filemap[whichrun]] + else: + # in case single file name is given + return files + + +def pickfirst(files): + if isinstance(files, list): + return files[0] + else: + return files + + +def pickmiddle(files): + from nibabel import load + import numpy as np + from nipype.utils import NUMPY_MMAP + middlevol = [] + for f in files: + middlevol.append(int(np.ceil(load(f, mmap=NUMPY_MMAP).shape[3] / 2))) + return middlevol + + +def pickvol(filenames, fileidx, which): + from nibabel import load + import numpy as np + from nipype.utils import NUMPY_MMAP + if which.lower() == 'first': + idx = 0 + elif which.lower() == 'middle': + idx = int( + np.ceil(load(filenames[fileidx], mmap=NUMPY_MMAP).shape[3] / 2)) + elif which.lower() == 'last': + idx = load(filenames[fileidx]).shape[3] - 1 + else: + raise Exception('unknown value for volume selection : %s' % which) + return idx + + +def getbtthresh(medianvals): + return [0.75 * val for val in medianvals] + + +def chooseindex(fwhm): + if fwhm < 1: + return [0] + else: + return [1] + + +def getmeanscale(medianvals): + return ['-mul %.10f' % (10000. / val) for val in medianvals] + + +def getusans(x): + return [[tuple([val[0], 0.75 * val[1]])] for val in x] + + +tolist = lambda x: [x] +highpass_operand = lambda x: '-bptf %.10f -1' % x + + +def create_parallelfeat_preproc(name='featpreproc', highpass=True): + """Preprocess each run with FSL independently of the others + + Parameters + ---------- + + :: + + name : name of workflow (default: featpreproc) + highpass : boolean (default: True) + + Inputs:: + + inputspec.func : functional runs (filename or list of filenames) + inputspec.fwhm : fwhm for smoothing with SUSAN + inputspec.highpass : HWHM in TRs (if created with highpass=True) + + Outputs:: + + outputspec.reference : volume to which runs are realigned + outputspec.motion_parameters : motion correction parameters + outputspec.realigned_files : motion corrected files + outputspec.motion_plots : plots of motion correction parameters + outputspec.mask : mask file used to mask the brain + outputspec.smoothed_files : smoothed functional data + outputspec.highpassed_files : highpassed functional data (if highpass=True) + outputspec.mean : mean file + + Example + ------- + + >>> preproc = create_parallelfeat_preproc() + >>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii'] + >>> preproc.inputs.inputspec.fwhm = 5 + >>> preproc.inputs.inputspec.highpass = 128./(2*2.5) + >>> preproc.base_dir = '/tmp' + >>> preproc.run() # doctest: +SKIP + + >>> preproc = create_parallelfeat_preproc(highpass=False) + >>> preproc.inputs.inputspec.func = 'f3.nii' + >>> preproc.inputs.inputspec.fwhm = 5 + >>> preproc.base_dir = '/tmp' + >>> preproc.run() # doctest: +SKIP + """ + version = 0 + if fsl.Info.version() and \ + LooseVersion(fsl.Info.version()) > LooseVersion('5.0.6'): + version = 507 + + featpreproc = pe.Workflow(name=name) + """ + Set up a node to define all inputs required for the preprocessing workflow + + """ + + if highpass: + inputnode = pe.Node( + interface=util.IdentityInterface( + fields=['func', 'fwhm', 'highpass']), + name='inputspec') + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'reference', 'motion_parameters', 'realigned_files', + 'motion_plots', 'mask', 'smoothed_files', 'highpassed_files', + 'mean' + ]), + name='outputspec') + else: + inputnode = pe.Node( + interface=util.IdentityInterface(fields=['func', 'fwhm']), + name='inputspec') + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'reference', 'motion_parameters', 'realigned_files', + 'motion_plots', 'mask', 'smoothed_files', 'mean' + ]), + name='outputspec') + """ + Set up a node to define outputs for the preprocessing workflow + + """ + """ + Convert functional images to float representation. Since there can + be more than one functional run we use a MapNode to convert each + run. + """ + + img2float = pe.MapNode( + interface=fsl.ImageMaths( + out_data_type='float', op_string='', suffix='_dtype'), + iterfield=['in_file'], + name='img2float') + featpreproc.connect(inputnode, 'func', img2float, 'in_file') + """ + Extract the first volume of the first run as the reference + """ + + extract_ref = pe.MapNode( + interface=fsl.ExtractROI(t_size=1), + iterfield=['in_file', 't_min'], + name='extractref') + + featpreproc.connect(img2float, 'out_file', extract_ref, 'in_file') + featpreproc.connect(img2float, ('out_file', pickmiddle), extract_ref, + 't_min') + featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference') + """ + Realign the functional runs to the reference (1st volume of first run) + """ + + motion_correct = pe.MapNode( + interface=fsl.MCFLIRT(save_mats=True, save_plots=True), + name='realign', + iterfield=['in_file', 'ref_file']) + featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file') + featpreproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file') + featpreproc.connect(motion_correct, 'par_file', outputnode, + 'motion_parameters') + featpreproc.connect(motion_correct, 'out_file', outputnode, + 'realigned_files') + """ + Plot the estimated motion parameters + """ + + plot_motion = pe.MapNode( + interface=fsl.PlotMotionParams(in_source='fsl'), + name='plot_motion', + iterfield=['in_file']) + plot_motion.iterables = ('plot_type', ['rotations', 'translations']) + featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file') + featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots') + """ + Extract the mean volume of the first functional run + """ + + meanfunc = pe.MapNode( + interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), + iterfield=['in_file'], + name='meanfunc') + featpreproc.connect(motion_correct, 'out_file', meanfunc, 'in_file') + """ + Strip the skull from the mean functional to generate a mask + """ + + meanfuncmask = pe.MapNode( + interface=fsl.BET(mask=True, no_output=True, frac=0.3), + iterfield=['in_file'], + name='meanfuncmask') + featpreproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file') + """ + Mask the functional runs with the extracted mask + """ + + maskfunc = pe.MapNode( + interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'), + iterfield=['in_file', 'in_file2'], + name='maskfunc') + featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') + featpreproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2') + """ + Determine the 2nd and 98th percentile intensities of each functional run + """ + + getthresh = pe.MapNode( + interface=fsl.ImageStats(op_string='-p 2 -p 98'), + iterfield=['in_file'], + name='getthreshold') + featpreproc.connect(maskfunc, 'out_file', getthresh, 'in_file') + """ + Threshold the first run of the functional data at 10% of the 98th percentile + """ + + threshold = pe.MapNode( + interface=fsl.ImageMaths(out_data_type='char', suffix='_thresh'), + iterfield=['in_file', 'op_string'], + name='threshold') + featpreproc.connect(maskfunc, 'out_file', threshold, 'in_file') + """ + Define a function to get 10% of the intensity + """ + + featpreproc.connect(getthresh, ('out_stat', getthreshop), threshold, + 'op_string') + """ + Determine the median value of the functional runs using the mask + """ + + medianval = pe.MapNode( + interface=fsl.ImageStats(op_string='-k %s -p 50'), + iterfield=['in_file', 'mask_file'], + name='medianval') + featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file') + featpreproc.connect(threshold, 'out_file', medianval, 'mask_file') + """ + Dilate the mask + """ + + dilatemask = pe.MapNode( + interface=fsl.ImageMaths(suffix='_dil', op_string='-dilF'), + iterfield=['in_file'], + name='dilatemask') + featpreproc.connect(threshold, 'out_file', dilatemask, 'in_file') + featpreproc.connect(dilatemask, 'out_file', outputnode, 'mask') + """ + Mask the motion corrected functional runs with the dilated mask + """ + + maskfunc2 = pe.MapNode( + interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), + iterfield=['in_file', 'in_file2'], + name='maskfunc2') + featpreproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file') + featpreproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2') + """ + Smooth each run using SUSAN with the brightness threshold set to 75% + of the median value for each run and a mask consituting the mean + functional + """ + + smooth = create_susan_smooth() + + featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm') + featpreproc.connect(maskfunc2, 'out_file', smooth, 'inputnode.in_files') + featpreproc.connect(dilatemask, 'out_file', smooth, 'inputnode.mask_file') + """ + Mask the smoothed data with the dilated mask + """ + + maskfunc3 = pe.MapNode( + interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), + iterfield=['in_file', 'in_file2'], + name='maskfunc3') + featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, + 'in_file') + + featpreproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2') + + concatnode = pe.Node(interface=util.Merge(2), name='concat') + featpreproc.connect(maskfunc2, ('out_file', tolist), concatnode, 'in1') + featpreproc.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2') + """ + The following nodes select smooth or unsmoothed data depending on the + fwhm. This is because SUSAN defaults to smoothing the data with about the + voxel size of the input data if the fwhm parameter is less than 1/3 of the + voxel size. + """ + selectnode = pe.Node(interface=util.Select(), name='select') + + featpreproc.connect(concatnode, 'out', selectnode, 'inlist') + + featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index') + featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files') + """ + Scale the median value of the run is set to 10000 + """ + + meanscale = pe.MapNode( + interface=fsl.ImageMaths(suffix='_gms'), + iterfield=['in_file', 'op_string'], + name='meanscale') + featpreproc.connect(selectnode, 'out', meanscale, 'in_file') + """ + Define a function to get the scaling factor for intensity normalization + """ + + featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, + 'op_string') + """ + Perform temporal highpass filtering on the data + """ + + if highpass: + highpass = pe.MapNode( + interface=fsl.ImageMaths(suffix='_tempfilt'), + iterfield=['in_file'], + name='highpass') + featpreproc.connect(inputnode, ('highpass', highpass_operand), + highpass, 'op_string') + featpreproc.connect(meanscale, 'out_file', highpass, 'in_file') + + if version < 507: + featpreproc.connect(highpass, 'out_file', outputnode, + 'highpassed_files') + else: + """ + Add back the mean removed by the highpass filter operation as of FSL 5.0.7 + """ + meanfunc4 = pe.MapNode( + interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), + iterfield=['in_file'], + name='meanfunc4') + + featpreproc.connect(meanscale, 'out_file', meanfunc4, 'in_file') + addmean = pe.MapNode( + interface=fsl.BinaryMaths(operation='add'), + iterfield=['in_file', 'operand_file'], + name='addmean') + featpreproc.connect(highpass, 'out_file', addmean, 'in_file') + featpreproc.connect(meanfunc4, 'out_file', addmean, 'operand_file') + featpreproc.connect(addmean, 'out_file', outputnode, + 'highpassed_files') + """ + Generate a mean functional image from the first run + """ + + meanfunc3 = pe.MapNode( + interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), + iterfield=['in_file'], + name='meanfunc3') + + featpreproc.connect(meanscale, 'out_file', meanfunc3, 'in_file') + featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean') + + return featpreproc + + +def create_featreg_preproc(name='featpreproc', + highpass=True, + whichvol='middle', + whichrun=0): + """Create a FEAT preprocessing workflow with registration to one volume of the first run + + Parameters + ---------- + + :: + + name : name of workflow (default: featpreproc) + highpass : boolean (default: True) + whichvol : which volume of the first run to register to ('first', 'middle', 'last', 'mean') + whichrun : which run to draw reference volume from (integer index or 'first', 'middle', 'last') + + Inputs:: + + inputspec.func : functional runs (filename or list of filenames) + inputspec.fwhm : fwhm for smoothing with SUSAN + inputspec.highpass : HWHM in TRs (if created with highpass=True) + + Outputs:: + + outputspec.reference : volume to which runs are realigned + outputspec.motion_parameters : motion correction parameters + outputspec.realigned_files : motion corrected files + outputspec.motion_plots : plots of motion correction parameters + outputspec.mask : mask file used to mask the brain + outputspec.smoothed_files : smoothed functional data + outputspec.highpassed_files : highpassed functional data (if highpass=True) + outputspec.mean : mean file + + Example + ------- + + >>> preproc = create_featreg_preproc() + >>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii'] + >>> preproc.inputs.inputspec.fwhm = 5 + >>> preproc.inputs.inputspec.highpass = 128./(2*2.5) + >>> preproc.base_dir = '/tmp' + >>> preproc.run() # doctest: +SKIP + + >>> preproc = create_featreg_preproc(highpass=False, whichvol='mean') + >>> preproc.inputs.inputspec.func = 'f3.nii' + >>> preproc.inputs.inputspec.fwhm = 5 + >>> preproc.base_dir = '/tmp' + >>> preproc.run() # doctest: +SKIP + """ + + version = 0 + if fsl.Info.version() and \ + LooseVersion(fsl.Info.version()) > LooseVersion('5.0.6'): + version = 507 + + featpreproc = pe.Workflow(name=name) + """ + Set up a node to define all inputs required for the preprocessing workflow + + """ + + if highpass: + inputnode = pe.Node( + interface=util.IdentityInterface( + fields=['func', 'fwhm', 'highpass']), + name='inputspec') + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'reference', 'motion_parameters', 'realigned_files', + 'motion_plots', 'mask', 'smoothed_files', 'highpassed_files', + 'mean' + ]), + name='outputspec') + else: + inputnode = pe.Node( + interface=util.IdentityInterface(fields=['func', 'fwhm']), + name='inputspec') + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'reference', 'motion_parameters', 'realigned_files', + 'motion_plots', 'mask', 'smoothed_files', 'mean' + ]), + name='outputspec') + """ + Set up a node to define outputs for the preprocessing workflow + + """ + """ + Convert functional images to float representation. Since there can + be more than one functional run we use a MapNode to convert each + run. + """ + + img2float = pe.MapNode( + interface=fsl.ImageMaths( + out_data_type='float', op_string='', suffix='_dtype'), + iterfield=['in_file'], + name='img2float') + featpreproc.connect(inputnode, 'func', img2float, 'in_file') + """ + Extract the middle (or what whichvol points to) volume of the first run as the reference + """ + + if whichvol != 'mean': + extract_ref = pe.Node( + interface=fsl.ExtractROI(t_size=1), + iterfield=['in_file'], + name='extractref') + featpreproc.connect(img2float, ('out_file', pickrun, whichrun), + extract_ref, 'in_file') + featpreproc.connect(img2float, ('out_file', pickvol, 0, whichvol), + extract_ref, 't_min') + featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference') + """ + Realign the functional runs to the reference (`whichvol` volume of first run) + """ + + motion_correct = pe.MapNode( + interface=fsl.MCFLIRT( + save_mats=True, save_plots=True, interpolation='spline'), + name='realign', + iterfield=['in_file']) + featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file') + if whichvol != 'mean': + featpreproc.connect(extract_ref, 'roi_file', motion_correct, + 'ref_file') + else: + motion_correct.inputs.mean_vol = True + featpreproc.connect(motion_correct, ('mean_img', pickrun, whichrun), + outputnode, 'reference') + + featpreproc.connect(motion_correct, 'par_file', outputnode, + 'motion_parameters') + featpreproc.connect(motion_correct, 'out_file', outputnode, + 'realigned_files') + """ + Plot the estimated motion parameters + """ + + plot_motion = pe.MapNode( + interface=fsl.PlotMotionParams(in_source='fsl'), + name='plot_motion', + iterfield=['in_file']) + plot_motion.iterables = ('plot_type', ['rotations', 'translations']) + featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file') + featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots') + """ + Extract the mean volume of the first functional run + """ + + meanfunc = pe.Node( + interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), + name='meanfunc') + featpreproc.connect(motion_correct, ('out_file', pickrun, whichrun), + meanfunc, 'in_file') + """ + Strip the skull from the mean functional to generate a mask + """ + + meanfuncmask = pe.Node( + interface=fsl.BET(mask=True, no_output=True, frac=0.3), + name='meanfuncmask') + featpreproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file') + """ + Mask the functional runs with the extracted mask + """ + + maskfunc = pe.MapNode( + interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'), + iterfield=['in_file'], + name='maskfunc') + featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') + featpreproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2') + """ + Determine the 2nd and 98th percentile intensities of each functional run + """ + + getthresh = pe.MapNode( + interface=fsl.ImageStats(op_string='-p 2 -p 98'), + iterfield=['in_file'], + name='getthreshold') + featpreproc.connect(maskfunc, 'out_file', getthresh, 'in_file') + """ + Threshold the first run of the functional data at 10% of the 98th percentile + """ + + threshold = pe.MapNode( + interface=fsl.ImageMaths(out_data_type='char', suffix='_thresh'), + iterfield=['in_file', 'op_string'], + name='threshold') + featpreproc.connect(maskfunc, 'out_file', threshold, 'in_file') + """ + Define a function to get 10% of the intensity + """ + + featpreproc.connect(getthresh, ('out_stat', getthreshop), threshold, + 'op_string') + """ + Determine the median value of the functional runs using the mask + """ + + medianval = pe.MapNode( + interface=fsl.ImageStats(op_string='-k %s -p 50'), + iterfield=['in_file', 'mask_file'], + name='medianval') + featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file') + featpreproc.connect(threshold, 'out_file', medianval, 'mask_file') + """ + Dilate the mask + """ + + dilatemask = pe.MapNode( + interface=fsl.ImageMaths(suffix='_dil', op_string='-dilF'), + iterfield=['in_file'], + name='dilatemask') + featpreproc.connect(threshold, 'out_file', dilatemask, 'in_file') + featpreproc.connect(dilatemask, 'out_file', outputnode, 'mask') + """ + Mask the motion corrected functional runs with the dilated mask + """ + + maskfunc2 = pe.MapNode( + interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), + iterfield=['in_file', 'in_file2'], + name='maskfunc2') + featpreproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file') + featpreproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2') + """ + Smooth each run using SUSAN with the brightness threshold set to 75% + of the median value for each run and a mask constituting the mean + functional + """ + + smooth = create_susan_smooth() + + featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm') + featpreproc.connect(maskfunc2, 'out_file', smooth, 'inputnode.in_files') + featpreproc.connect(dilatemask, 'out_file', smooth, 'inputnode.mask_file') + """ + Mask the smoothed data with the dilated mask + """ + + maskfunc3 = pe.MapNode( + interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), + iterfield=['in_file', 'in_file2'], + name='maskfunc3') + featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, + 'in_file') + + featpreproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2') + + concatnode = pe.Node(interface=util.Merge(2), name='concat') + featpreproc.connect(maskfunc2, ('out_file', tolist), concatnode, 'in1') + featpreproc.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2') + """ + The following nodes select smooth or unsmoothed data depending on the + fwhm. This is because SUSAN defaults to smoothing the data with about the + voxel size of the input data if the fwhm parameter is less than 1/3 of the + voxel size. + """ + selectnode = pe.Node(interface=util.Select(), name='select') + + featpreproc.connect(concatnode, 'out', selectnode, 'inlist') + + featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index') + featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files') + """ + Scale the median value of the run is set to 10000 + """ + + meanscale = pe.MapNode( + interface=fsl.ImageMaths(suffix='_gms'), + iterfield=['in_file', 'op_string'], + name='meanscale') + featpreproc.connect(selectnode, 'out', meanscale, 'in_file') + """ + Define a function to get the scaling factor for intensity normalization + """ + + featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, + 'op_string') + """ + Generate a mean functional image from the first run + """ + + meanfunc3 = pe.Node( + interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), + iterfield=['in_file'], + name='meanfunc3') + + featpreproc.connect(meanscale, ('out_file', pickrun, whichrun), meanfunc3, + 'in_file') + featpreproc.connect(meanfunc3, 'out_file', outputnode, 'mean') + """ + Perform temporal highpass filtering on the data + """ + + if highpass: + highpass = pe.MapNode( + interface=fsl.ImageMaths(suffix='_tempfilt'), + iterfield=['in_file'], + name='highpass') + featpreproc.connect(inputnode, ('highpass', highpass_operand), + highpass, 'op_string') + featpreproc.connect(meanscale, 'out_file', highpass, 'in_file') + + if version < 507: + featpreproc.connect(highpass, 'out_file', outputnode, + 'highpassed_files') + else: + """ + Add back the mean removed by the highpass filter operation as of FSL 5.0.7 + """ + meanfunc4 = pe.MapNode( + interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), + iterfield=['in_file'], + name='meanfunc4') + + featpreproc.connect(meanscale, 'out_file', meanfunc4, 'in_file') + addmean = pe.MapNode( + interface=fsl.BinaryMaths(operation='add'), + iterfield=['in_file', 'operand_file'], + name='addmean') + featpreproc.connect(highpass, 'out_file', addmean, 'in_file') + featpreproc.connect(meanfunc4, 'out_file', addmean, 'operand_file') + featpreproc.connect(addmean, 'out_file', outputnode, + 'highpassed_files') + + return featpreproc + + +def create_susan_smooth(name="susan_smooth", separate_masks=True): + """Create a SUSAN smoothing workflow + + Parameters + ---------- + + :: + + name : name of workflow (default: susan_smooth) + separate_masks : separate masks for each run + + Inputs:: + + inputnode.in_files : functional runs (filename or list of filenames) + inputnode.fwhm : fwhm for smoothing with SUSAN (float or list of floats) + inputnode.mask_file : mask used for estimating SUSAN thresholds (but not for smoothing) + + Outputs:: + + outputnode.smoothed_files : functional runs (filename or list of filenames) + + Example + ------- + + >>> smooth = create_susan_smooth() + >>> smooth.inputs.inputnode.in_files = 'f3.nii' + >>> smooth.inputs.inputnode.fwhm = 5 + >>> smooth.inputs.inputnode.mask_file = 'mask.nii' + >>> smooth.run() # doctest: +SKIP + + """ + + # replaces the functionality of a "for loop" + def cartesian_product(fwhms, in_files, usans, btthresh): + from nipype.utils.filemanip import ensure_list + # ensure all inputs are lists + in_files = ensure_list(in_files) + fwhms = [fwhms] if isinstance(fwhms, (int, float)) else fwhms + # create cartesian product lists (s_ = single element of list) + cart_in_file = [ + s_in_file for s_in_file in in_files for s_fwhm in fwhms + ] + cart_fwhm = [s_fwhm for s_in_file in in_files for s_fwhm in fwhms] + cart_usans = [s_usans for s_usans in usans for s_fwhm in fwhms] + cart_btthresh = [ + s_btthresh for s_btthresh in btthresh for s_fwhm in fwhms + ] + + return cart_in_file, cart_fwhm, cart_usans, cart_btthresh + + susan_smooth = pe.Workflow(name=name) + """ + Set up a node to define all inputs required for the preprocessing workflow + + """ + + inputnode = pe.Node( + interface=util.IdentityInterface( + fields=['in_files', 'fwhm', 'mask_file']), + name='inputnode') + """ + Smooth each run using SUSAN with the brightness threshold set to 75% + of the median value for each run and a mask consituting the mean + functional + """ + + multi_inputs = pe.Node( + util.Function( + function=cartesian_product, + output_names=[ + 'cart_in_file', 'cart_fwhm', 'cart_usans', 'cart_btthresh' + ]), + name='multi_inputs') + + smooth = pe.MapNode( + interface=fsl.SUSAN(), + iterfield=['in_file', 'brightness_threshold', 'usans', 'fwhm'], + name='smooth') + """ + Determine the median value of the functional runs using the mask + """ + + if separate_masks: + median = pe.MapNode( + interface=fsl.ImageStats(op_string='-k %s -p 50'), + iterfield=['in_file', 'mask_file'], + name='median') + else: + median = pe.MapNode( + interface=fsl.ImageStats(op_string='-k %s -p 50'), + iterfield=['in_file'], + name='median') + susan_smooth.connect(inputnode, 'in_files', median, 'in_file') + susan_smooth.connect(inputnode, 'mask_file', median, 'mask_file') + """ + Mask the motion corrected functional runs with the dilated mask + """ + + if separate_masks: + mask = pe.MapNode( + interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), + iterfield=['in_file', 'in_file2'], + name='mask') + else: + mask = pe.MapNode( + interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), + iterfield=['in_file'], + name='mask') + susan_smooth.connect(inputnode, 'in_files', mask, 'in_file') + susan_smooth.connect(inputnode, 'mask_file', mask, 'in_file2') + """ + Determine the mean image from each functional run + """ + + meanfunc = pe.MapNode( + interface=fsl.ImageMaths(op_string='-Tmean', suffix='_mean'), + iterfield=['in_file'], + name='meanfunc2') + susan_smooth.connect(mask, 'out_file', meanfunc, 'in_file') + """ + Merge the median values with the mean functional images into a coupled list + """ + + merge = pe.Node(interface=util.Merge(2, axis='hstack'), name='merge') + susan_smooth.connect(meanfunc, 'out_file', merge, 'in1') + susan_smooth.connect(median, 'out_stat', merge, 'in2') + """ + Define a function to get the brightness threshold for SUSAN + """ + + susan_smooth.connect([ + (inputnode, multi_inputs, [('in_files', 'in_files'), ('fwhm', + 'fwhms')]), + (median, multi_inputs, [(('out_stat', getbtthresh), 'btthresh')]), + (merge, multi_inputs, [(('out', getusans), 'usans')]), + (multi_inputs, smooth, + [('cart_in_file', 'in_file'), ('cart_fwhm', 'fwhm'), + ('cart_btthresh', 'brightness_threshold'), ('cart_usans', 'usans')]), + ]) + + outputnode = pe.Node( + interface=util.IdentityInterface(fields=['smoothed_files']), + name='outputnode') + + susan_smooth.connect(smooth, 'smoothed_file', outputnode, 'smoothed_files') + + return susan_smooth + + +def create_fsl_fs_preproc(name='preproc', highpass=True, whichvol='middle'): + """Create a FEAT preprocessing workflow together with freesurfer + + Parameters + ---------- + + :: + + name : name of workflow (default: preproc) + highpass : boolean (default: True) + whichvol : which volume of the first run to register to ('first', 'middle', 'mean') + + Inputs:: + + inputspec.func : functional runs (filename or list of filenames) + inputspec.fwhm : fwhm for smoothing with SUSAN + inputspec.highpass : HWHM in TRs (if created with highpass=True) + inputspec.subject_id : freesurfer subject id + inputspec.subjects_dir : freesurfer subjects dir + + Outputs:: + + outputspec.reference : volume to which runs are realigned + outputspec.motion_parameters : motion correction parameters + outputspec.realigned_files : motion corrected files + outputspec.motion_plots : plots of motion correction parameters + outputspec.mask_file : mask file used to mask the brain + outputspec.smoothed_files : smoothed functional data + outputspec.highpassed_files : highpassed functional data (if highpass=True) + outputspec.reg_file : bbregister registration files + outputspec.reg_cost : bbregister registration cost files + + Example + ------- + + >>> preproc = create_fsl_fs_preproc(whichvol='first') + >>> preproc.inputs.inputspec.highpass = 128./(2*2.5) + >>> preproc.inputs.inputspec.func = ['f3.nii', 'f5.nii'] + >>> preproc.inputs.inputspec.subjects_dir = '.' + >>> preproc.inputs.inputspec.subject_id = 's1' + >>> preproc.inputs.inputspec.fwhm = 6 + >>> preproc.run() # doctest: +SKIP + """ + + featpreproc = pe.Workflow(name=name) + """ + Set up a node to define all inputs required for the preprocessing workflow + + """ + + if highpass: + inputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'func', 'fwhm', 'subject_id', 'subjects_dir', 'highpass' + ]), + name='inputspec') + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'reference', 'motion_parameters', 'realigned_files', + 'motion_plots', 'mask_file', 'smoothed_files', + 'highpassed_files', 'reg_file', 'reg_cost' + ]), + name='outputspec') + else: + inputnode = pe.Node( + interface=util.IdentityInterface( + fields=['func', 'fwhm', 'subject_id', 'subjects_dir']), + name='inputspec') + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'reference', 'motion_parameters', 'realigned_files', + 'motion_plots', 'mask_file', 'smoothed_files', 'reg_file', + 'reg_cost' + ]), + name='outputspec') + """ + Set up a node to define outputs for the preprocessing workflow + + """ + """ + Convert functional images to float representation. Since there can + be more than one functional run we use a MapNode to convert each + run. + """ + + img2float = pe.MapNode( + interface=fsl.ImageMaths( + out_data_type='float', op_string='', suffix='_dtype'), + iterfield=['in_file'], + name='img2float') + featpreproc.connect(inputnode, 'func', img2float, 'in_file') + """ + Extract the first volume of the first run as the reference + """ + + if whichvol != 'mean': + extract_ref = pe.Node( + interface=fsl.ExtractROI(t_size=1), + iterfield=['in_file'], + name='extractref') + featpreproc.connect(img2float, ('out_file', pickfirst), extract_ref, + 'in_file') + featpreproc.connect(img2float, ('out_file', pickvol, 0, whichvol), + extract_ref, 't_min') + featpreproc.connect(extract_ref, 'roi_file', outputnode, 'reference') + """ + Realign the functional runs to the reference (1st volume of first run) + """ + + motion_correct = pe.MapNode( + interface=fsl.MCFLIRT( + save_mats=True, save_plots=True, interpolation='sinc'), + name='realign', + iterfield=['in_file']) + featpreproc.connect(img2float, 'out_file', motion_correct, 'in_file') + if whichvol != 'mean': + featpreproc.connect(extract_ref, 'roi_file', motion_correct, + 'ref_file') + else: + motion_correct.inputs.mean_vol = True + featpreproc.connect(motion_correct, 'mean_img', outputnode, + 'reference') + + featpreproc.connect(motion_correct, 'par_file', outputnode, + 'motion_parameters') + featpreproc.connect(motion_correct, 'out_file', outputnode, + 'realigned_files') + """ + Plot the estimated motion parameters + """ + + plot_motion = pe.MapNode( + interface=fsl.PlotMotionParams(in_source='fsl'), + name='plot_motion', + iterfield=['in_file']) + plot_motion.iterables = ('plot_type', ['rotations', 'translations']) + featpreproc.connect(motion_correct, 'par_file', plot_motion, 'in_file') + featpreproc.connect(plot_motion, 'out_file', outputnode, 'motion_plots') + """Get the mask from subject for each run + """ + + maskflow = create_getmask_flow() + featpreproc.connect([(inputnode, maskflow, + [('subject_id', 'inputspec.subject_id'), + ('subjects_dir', 'inputspec.subjects_dir')])]) + maskflow.inputs.inputspec.contrast_type = 't2' + if whichvol != 'mean': + featpreproc.connect(extract_ref, 'roi_file', maskflow, + 'inputspec.source_file') + else: + featpreproc.connect(motion_correct, ('mean_img', pickfirst), maskflow, + 'inputspec.source_file') + """ + Mask the functional runs with the extracted mask + """ + + maskfunc = pe.MapNode( + interface=fsl.ImageMaths(suffix='_bet', op_string='-mas'), + iterfield=['in_file'], + name='maskfunc') + featpreproc.connect(motion_correct, 'out_file', maskfunc, 'in_file') + featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), + maskfunc, 'in_file2') + """ + Smooth each run using SUSAN with the brightness threshold set to 75% + of the median value for each run and a mask consituting the mean + functional + """ + + smooth = create_susan_smooth(separate_masks=False) + + featpreproc.connect(inputnode, 'fwhm', smooth, 'inputnode.fwhm') + featpreproc.connect(maskfunc, 'out_file', smooth, 'inputnode.in_files') + featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), smooth, + 'inputnode.mask_file') + """ + Mask the smoothed data with the dilated mask + """ + + maskfunc3 = pe.MapNode( + interface=fsl.ImageMaths(suffix='_mask', op_string='-mas'), + iterfield=['in_file'], + name='maskfunc3') + featpreproc.connect(smooth, 'outputnode.smoothed_files', maskfunc3, + 'in_file') + featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), + maskfunc3, 'in_file2') + + concatnode = pe.Node(interface=util.Merge(2), name='concat') + featpreproc.connect(maskfunc, ('out_file', tolist), concatnode, 'in1') + featpreproc.connect(maskfunc3, ('out_file', tolist), concatnode, 'in2') + """ + The following nodes select smooth or unsmoothed data depending on the + fwhm. This is because SUSAN defaults to smoothing the data with about the + voxel size of the input data if the fwhm parameter is less than 1/3 of the + voxel size. + """ + selectnode = pe.Node(interface=util.Select(), name='select') + + featpreproc.connect(concatnode, 'out', selectnode, 'inlist') + + featpreproc.connect(inputnode, ('fwhm', chooseindex), selectnode, 'index') + featpreproc.connect(selectnode, 'out', outputnode, 'smoothed_files') + """ + Scale the median value of the run is set to 10000 + """ + + meanscale = pe.MapNode( + interface=fsl.ImageMaths(suffix='_gms'), + iterfield=['in_file', 'op_string'], + name='meanscale') + featpreproc.connect(selectnode, 'out', meanscale, 'in_file') + """ + Determine the median value of the functional runs using the mask + """ + + medianval = pe.MapNode( + interface=fsl.ImageStats(op_string='-k %s -p 50'), + iterfield=['in_file'], + name='medianval') + featpreproc.connect(motion_correct, 'out_file', medianval, 'in_file') + featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), + medianval, 'mask_file') + """ + Define a function to get the scaling factor for intensity normalization + """ + + featpreproc.connect(medianval, ('out_stat', getmeanscale), meanscale, + 'op_string') + """ + Perform temporal highpass filtering on the data + """ + + if highpass: + highpass = pe.MapNode( + interface=fsl.ImageMaths(suffix='_tempfilt'), + iterfield=['in_file'], + name='highpass') + featpreproc.connect(inputnode, ('highpass', highpass_operand), + highpass, 'op_string') + featpreproc.connect(meanscale, 'out_file', highpass, 'in_file') + featpreproc.connect(highpass, 'out_file', outputnode, + 'highpassed_files') + + featpreproc.connect(maskflow, ('outputspec.mask_file', pickfirst), + outputnode, 'mask_file') + featpreproc.connect(maskflow, 'outputspec.reg_file', outputnode, + 'reg_file') + featpreproc.connect(maskflow, 'outputspec.reg_cost', outputnode, + 'reg_cost') + + return featpreproc + + +def create_reg_workflow(name='registration'): + """Create a FEAT preprocessing workflow + + Parameters + ---------- + + :: + + name : name of workflow (default: 'registration') + + Inputs:: + + inputspec.source_files : files (filename or list of filenames to register) + inputspec.mean_image : reference image to use + inputspec.anatomical_image : anatomical image to coregister to + inputspec.target_image : registration target + + Outputs:: + + outputspec.func2anat_transform : FLIRT transform + outputspec.anat2target_transform : FLIRT+FNIRT transform + outputspec.transformed_files : transformed files in target space + outputspec.transformed_mean : mean image in target space + + Example + ------- + + """ + + register = pe.Workflow(name=name) + + inputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'source_files', 'mean_image', 'anatomical_image', 'target_image', + 'target_image_brain', 'config_file' + ]), + name='inputspec') + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'func2anat_transform', + 'anat2target_transform', + 'transformed_files', + 'transformed_mean', + ]), + name='outputspec') + """ + Estimate the tissue classes from the anatomical image. But use spm's segment + as FSL appears to be breaking. + """ + + stripper = pe.Node(fsl.BET(), name='stripper') + register.connect(inputnode, 'anatomical_image', stripper, 'in_file') + fast = pe.Node(fsl.FAST(), name='fast') + register.connect(stripper, 'out_file', fast, 'in_files') + """ + Binarize the segmentation + """ + + binarize = pe.Node( + fsl.ImageMaths(op_string='-nan -thr 0.5 -bin'), name='binarize') + pickindex = lambda x, i: x[i] + register.connect(fast, ('partial_volume_files', pickindex, 2), binarize, + 'in_file') + """ + Calculate rigid transform from mean image to anatomical image + """ + + mean2anat = pe.Node(fsl.FLIRT(), name='mean2anat') + mean2anat.inputs.dof = 6 + register.connect(inputnode, 'mean_image', mean2anat, 'in_file') + register.connect(stripper, 'out_file', mean2anat, 'reference') + """ + Now use bbr cost function to improve the transform + """ + + mean2anatbbr = pe.Node(fsl.FLIRT(), name='mean2anatbbr') + mean2anatbbr.inputs.dof = 6 + mean2anatbbr.inputs.cost = 'bbr' + mean2anatbbr.inputs.schedule = os.path.join( + os.getenv('FSLDIR'), 'etc/flirtsch/bbr.sch') + register.connect(inputnode, 'mean_image', mean2anatbbr, 'in_file') + register.connect(binarize, 'out_file', mean2anatbbr, 'wm_seg') + register.connect(inputnode, 'anatomical_image', mean2anatbbr, 'reference') + register.connect(mean2anat, 'out_matrix_file', mean2anatbbr, + 'in_matrix_file') + """ + Calculate affine transform from anatomical to target + """ + + anat2target_affine = pe.Node(fsl.FLIRT(), name='anat2target_linear') + anat2target_affine.inputs.searchr_x = [-180, 180] + anat2target_affine.inputs.searchr_y = [-180, 180] + anat2target_affine.inputs.searchr_z = [-180, 180] + register.connect(stripper, 'out_file', anat2target_affine, 'in_file') + register.connect(inputnode, 'target_image_brain', anat2target_affine, + 'reference') + """ + Calculate nonlinear transform from anatomical to target + """ + + anat2target_nonlinear = pe.Node(fsl.FNIRT(), name='anat2target_nonlinear') + anat2target_nonlinear.inputs.fieldcoeff_file = True + register.connect(anat2target_affine, 'out_matrix_file', + anat2target_nonlinear, 'affine_file') + register.connect(inputnode, 'anatomical_image', anat2target_nonlinear, + 'in_file') + register.connect(inputnode, 'config_file', anat2target_nonlinear, + 'config_file') + register.connect(inputnode, 'target_image', anat2target_nonlinear, + 'ref_file') + """ + Transform the mean image. First to anatomical and then to target + """ + + warpmean = pe.Node(fsl.ApplyWarp(interp='spline'), name='warpmean') + register.connect(inputnode, 'mean_image', warpmean, 'in_file') + register.connect(mean2anatbbr, 'out_matrix_file', warpmean, 'premat') + register.connect(inputnode, 'target_image', warpmean, 'ref_file') + register.connect(anat2target_nonlinear, 'fieldcoeff_file', warpmean, + 'field_file') + """ + Transform the remaining images. First to anatomical and then to target + """ + + warpall = pe.MapNode( + fsl.ApplyWarp(interp='spline'), + iterfield=['in_file'], + nested=True, + name='warpall') + register.connect(inputnode, 'source_files', warpall, 'in_file') + register.connect(mean2anatbbr, 'out_matrix_file', warpall, 'premat') + register.connect(inputnode, 'target_image', warpall, 'ref_file') + register.connect(anat2target_nonlinear, 'fieldcoeff_file', warpall, + 'field_file') + """ + Assign all the output files + """ + + register.connect(warpmean, 'out_file', outputnode, 'transformed_mean') + register.connect(warpall, 'out_file', outputnode, 'transformed_files') + register.connect(mean2anatbbr, 'out_matrix_file', outputnode, + 'func2anat_transform') + register.connect(anat2target_nonlinear, 'fieldcoeff_file', outputnode, + 'anat2target_transform') + + return register diff --git a/nipype/workflows/fmri/fsl/tests/__init__.py b/nipype/workflows/fmri/fsl/tests/__init__.py new file mode 100644 index 0000000000..2986294d9d --- /dev/null +++ b/nipype/workflows/fmri/fsl/tests/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +__author__ = 'satra' diff --git a/nipype/workflows/fmri/fsl/tests/test_preprocess.py b/nipype/workflows/fmri/fsl/tests/test_preprocess.py new file mode 100644 index 0000000000..4f382bdc1a --- /dev/null +++ b/nipype/workflows/fmri/fsl/tests/test_preprocess.py @@ -0,0 +1,25 @@ +__author__ = 'oliver' + +from ..preprocess import create_featreg_preproc, pickrun + + +def test_pickrun(): + files = ['1', '2', '3', '4'] + assert pickrun(files, 0) == '1' + assert pickrun(files, 'first') == '1' + assert pickrun(files, -1) == '4' + assert pickrun(files, 'last') == '4' + assert pickrun(files, 'middle') == '3' + + +def test_create_featreg_preproc(): + """smoke test""" + wf = create_featreg_preproc(whichrun=0) + + # test type + import nipype + assert type(wf) == nipype.pipeline.engine.Workflow + + # test methods + assert wf.get_node('extractref') + assert wf._get_dot() diff --git a/nipype/workflows/fmri/spm/__init__.py b/nipype/workflows/fmri/spm/__init__.py new file mode 100644 index 0000000000..f974a663db --- /dev/null +++ b/nipype/workflows/fmri/spm/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +from .preprocess import (create_spm_preproc, create_vbm_preproc, + create_DARTEL_template) diff --git a/nipype/workflows/fmri/spm/estimate.py b/nipype/workflows/fmri/spm/estimate.py new file mode 100644 index 0000000000..99fb243f19 --- /dev/null +++ b/nipype/workflows/fmri/spm/estimate.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: diff --git a/nipype/workflows/fmri/spm/preprocess.py b/nipype/workflows/fmri/spm/preprocess.py new file mode 100644 index 0000000000..f2957e4b03 --- /dev/null +++ b/nipype/workflows/fmri/spm/preprocess.py @@ -0,0 +1,332 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import os + +from ....algorithms import rapidart as ra +from ....interfaces import spm as spm +from ....interfaces import utility as niu +from ....pipeline import engine as pe +from ...smri.freesurfer.utils import create_getmask_flow + +from .... import logging +logger = logging.getLogger('nipype.workflow') + + +def create_spm_preproc(name='preproc'): + """Create an spm preprocessing workflow with freesurfer registration and + artifact detection. + + The workflow realigns and smooths and registers the functional images with + the subject's freesurfer space. + + Example + ------- + + >>> preproc = create_spm_preproc() + >>> preproc.base_dir = '.' + >>> preproc.inputs.inputspec.fwhm = 6 + >>> preproc.inputs.inputspec.subject_id = 's1' + >>> preproc.inputs.inputspec.subjects_dir = '.' + >>> preproc.inputs.inputspec.functionals = ['f3.nii', 'f5.nii'] + >>> preproc.inputs.inputspec.norm_threshold = 1 + >>> preproc.inputs.inputspec.zintensity_threshold = 3 + + Inputs:: + + inputspec.functionals : functional runs use 4d nifti + inputspec.subject_id : freesurfer subject id + inputspec.subjects_dir : freesurfer subjects dir + inputspec.fwhm : smoothing fwhm + inputspec.norm_threshold : norm threshold for outliers + inputspec.zintensity_threshold : intensity threshold in z-score + + Outputs:: + + outputspec.realignment_parameters : realignment parameter files + outputspec.smoothed_files : smoothed functional files + outputspec.outlier_files : list of outliers + outputspec.outlier_stats : statistics of outliers + outputspec.outlier_plots : images of outliers + outputspec.mask_file : binary mask file in reference image space + outputspec.reg_file : registration file that maps reference image to + freesurfer space + outputspec.reg_cost : cost of registration (useful for detecting misalignment) + """ + """ + Initialize the workflow + """ + + workflow = pe.Workflow(name=name) + """ + Define the inputs to this workflow + """ + + inputnode = pe.Node( + niu.IdentityInterface(fields=[ + 'functionals', 'subject_id', 'subjects_dir', 'fwhm', + 'norm_threshold', 'zintensity_threshold' + ]), + name='inputspec') + """ + Setup the processing nodes and create the mask generation and coregistration + workflow + """ + + poplist = lambda x: x.pop() + realign = pe.Node(spm.Realign(), name='realign') + workflow.connect(inputnode, 'functionals', realign, 'in_files') + maskflow = create_getmask_flow() + workflow.connect([(inputnode, maskflow, + [('subject_id', 'inputspec.subject_id'), + ('subjects_dir', 'inputspec.subjects_dir')])]) + maskflow.inputs.inputspec.contrast_type = 't2' + workflow.connect(realign, 'mean_image', maskflow, 'inputspec.source_file') + smooth = pe.Node(spm.Smooth(), name='smooth') + workflow.connect(inputnode, 'fwhm', smooth, 'fwhm') + workflow.connect(realign, 'realigned_files', smooth, 'in_files') + artdetect = pe.Node( + ra.ArtifactDetect( + mask_type='file', + parameter_source='SPM', + use_differences=[True, False], + use_norm=True, + save_plot=True), + name='artdetect') + workflow.connect([(inputnode, artdetect, + [('norm_threshold', 'norm_threshold'), + ('zintensity_threshold', 'zintensity_threshold')])]) + workflow.connect([(realign, artdetect, [('realigned_files', + 'realigned_files'), + ('realignment_parameters', + 'realignment_parameters')])]) + workflow.connect(maskflow, ('outputspec.mask_file', poplist), artdetect, + 'mask_file') + """ + Define the outputs of the workflow and connect the nodes to the outputnode + """ + + outputnode = pe.Node( + niu.IdentityInterface(fields=[ + "realignment_parameters", "smoothed_files", "mask_file", + "reg_file", "reg_cost", 'outlier_files', 'outlier_stats', + 'outlier_plots' + ]), + name="outputspec") + workflow.connect( + [(maskflow, outputnode, [("outputspec.reg_file", "reg_file")]), + (maskflow, outputnode, + [("outputspec.reg_cost", "reg_cost")]), (maskflow, outputnode, [ + (("outputspec.mask_file", poplist), "mask_file") + ]), (realign, outputnode, [('realignment_parameters', + 'realignment_parameters')]), + (smooth, outputnode, [('smoothed_files', 'smoothed_files')]), + (artdetect, outputnode, [('outlier_files', 'outlier_files'), + ('statistic_files', 'outlier_stats'), + ('plot_files', 'outlier_plots')])]) + return workflow + + +def create_vbm_preproc(name='vbmpreproc'): + """Create a vbm workflow that generates DARTEL-based warps to MNI space + + Based on: http://www.fil.ion.ucl.ac.uk/~john/misc/VBMclass10.pdf + + Example + ------- + + >>> preproc = create_vbm_preproc() + >>> preproc.inputs.inputspec.fwhm = 8 + >>> preproc.inputs.inputspec.structural_files = [ + ... os.path.abspath('s1.nii'), os.path.abspath('s3.nii')] + >>> preproc.inputs.inputspec.template_prefix = 'Template' + >>> preproc.run() # doctest: +SKIP + + Inputs:: + + inputspec.structural_files : structural data to be used to create templates + inputspec.fwhm: single of triplet for smoothing when normalizing to MNI space + inputspec.template_prefix : prefix for dartel template + + Outputs:: + + outputspec.normalized_files : normalized gray matter files + outputspec.template_file : DARTEL template + outputspec.icv : intracranial volume (cc - assuming dimensions in mm) + + """ + + workflow = pe.Workflow(name=name) + """ + Define the inputs to this workflow + """ + + inputnode = pe.Node( + niu.IdentityInterface( + fields=['structural_files', 'fwhm', 'template_prefix']), + name='inputspec') + + dartel_template = create_DARTEL_template() + + workflow.connect(inputnode, 'template_prefix', dartel_template, + 'inputspec.template_prefix') + workflow.connect(inputnode, 'structural_files', dartel_template, + 'inputspec.structural_files') + + norm2mni = pe.Node(spm.DARTELNorm2MNI(modulate=True), name='norm2mni') + workflow.connect(dartel_template, 'outputspec.template_file', norm2mni, + 'template_file') + workflow.connect(dartel_template, 'outputspec.flow_fields', norm2mni, + 'flowfield_files') + + def getclass1images(class_images): + class1images = [] + for session in class_images: + class1images.extend(session[0]) + return class1images + + workflow.connect(dartel_template, + ('segment.native_class_images', getclass1images), + norm2mni, 'apply_to_files') + workflow.connect(inputnode, 'fwhm', norm2mni, 'fwhm') + + def compute_icv(class_images): + from nibabel import load + from numpy import prod + icv = [] + for session in class_images: + voxel_volume = prod(load(session[0][0]).header.get_zooms()) + img = load(session[0][0]).get_data() + \ + load(session[1][0]).get_data() + \ + load(session[2][0]).get_data() + img_icv = (img > 0.5).astype(int).sum() * voxel_volume * 1e-3 + icv.append(img_icv) + return icv + + calc_icv = pe.Node( + niu.Function( + function=compute_icv, + input_names=['class_images'], + output_names=['icv']), + name='calc_icv') + + workflow.connect(dartel_template, 'segment.native_class_images', calc_icv, + 'class_images') + """ + Define the outputs of the workflow and connect the nodes to the outputnode + """ + + outputnode = pe.Node( + niu.IdentityInterface( + fields=["normalized_files", "template_file", "icv"]), + name="outputspec") + workflow.connect([ + (dartel_template, outputnode, [('outputspec.template_file', + 'template_file')]), + (norm2mni, outputnode, [("normalized_files", "normalized_files")]), + (calc_icv, outputnode, [("icv", "icv")]), + ]) + + return workflow + + +def create_DARTEL_template(name='dartel_template'): + """Create a vbm workflow that generates DARTEL-based template + + + Example + ------- + + >>> preproc = create_DARTEL_template() + >>> preproc.inputs.inputspec.structural_files = [ + ... os.path.abspath('s1.nii'), os.path.abspath('s3.nii')] + >>> preproc.inputs.inputspec.template_prefix = 'Template' + >>> preproc.run() # doctest: +SKIP + + Inputs:: + + inputspec.structural_files : structural data to be used to create templates + inputspec.template_prefix : prefix for dartel template + + Outputs:: + + outputspec.template_file : DARTEL template + outputspec.flow_fields : warps from input struct files to the template + + """ + + workflow = pe.Workflow(name=name) + + inputnode = pe.Node( + niu.IdentityInterface(fields=['structural_files', 'template_prefix']), + name='inputspec') + + segment = pe.MapNode( + spm.NewSegment(), iterfield=['channel_files'], name='segment') + workflow.connect(inputnode, 'structural_files', segment, 'channel_files') + + spm_info = spm.Info.getinfo() + if spm_info: + spm_path = spm_info['path'] + if spm_info['name'] == 'SPM8': + tissue1 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 1), 2, + (True, True), (False, False)) + tissue2 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 2), 2, + (True, True), (False, False)) + tissue3 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 3), 2, + (True, False), (False, False)) + tissue4 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 4), 3, + (False, False), (False, False)) + tissue5 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 5), 4, + (False, False), (False, False)) + tissue6 = ((os.path.join(spm_path, 'toolbox/Seg/TPM.nii'), 6), 2, + (False, False), (False, False)) + elif spm_info['name'] == 'SPM12': + spm_path = spm_info['path'] + tissue1 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 1), 1, + (True, True), (False, False)) + tissue2 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 2), 1, + (True, True), (False, False)) + tissue3 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 3), 2, + (True, False), (False, False)) + tissue4 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 4), 3, + (False, False), (False, False)) + tissue5 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 5), 4, + (False, False), (False, False)) + tissue6 = ((os.path.join(spm_path, 'tpm/TPM.nii'), 6), 2, + (False, False), (False, False)) + else: + logger.critical('Unsupported version of SPM') + + segment.inputs.tissues = [ + tissue1, tissue2, tissue3, tissue4, tissue5, tissue6 + ] + else: + logger.critical('SPM not found') + + dartel = pe.Node(spm.DARTEL(), name='dartel') + """Get the gray and white segmentation classes generated by NewSegment + """ + + def get2classes(dartel_files): + class1images = [] + class2images = [] + for session in dartel_files: + class1images.extend(session[0]) + class2images.extend(session[1]) + return [class1images, class2images] + + workflow.connect(segment, ('dartel_input_images', get2classes), dartel, + 'image_files') + workflow.connect(inputnode, 'template_prefix', dartel, 'template_prefix') + + outputnode = pe.Node( + niu.IdentityInterface(fields=["template_file", "flow_fields"]), + name="outputspec") + workflow.connect([ + (dartel, outputnode, [('final_template_file', 'template_file'), + ('dartel_flow_fields', 'flow_fields')]), + ]) + + return workflow diff --git a/nipype/workflows/fmri/spm/tests/__init__.py b/nipype/workflows/fmri/spm/tests/__init__.py new file mode 100644 index 0000000000..2986294d9d --- /dev/null +++ b/nipype/workflows/fmri/spm/tests/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +__author__ = 'satra' diff --git a/nipype/workflows/graph/__init__.py b/nipype/workflows/graph/__init__.py new file mode 100644 index 0000000000..ead6180dc8 --- /dev/null +++ b/nipype/workflows/graph/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) diff --git a/nipype/workflows/misc/__init__.py b/nipype/workflows/misc/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/workflows/misc/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/nipype/workflows/misc/utils.py b/nipype/workflows/misc/utils.py new file mode 100644 index 0000000000..b581ec8c54 --- /dev/null +++ b/nipype/workflows/misc/utils.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from builtins import map, range + + +def get_vox_dims(volume): + import nibabel as nb + from nipype.utils import NUMPY_MMAP + if isinstance(volume, list): + volume = volume[0] + nii = nb.load(volume, mmap=NUMPY_MMAP) + hdr = nii.header + voxdims = hdr.get_zooms() + return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] + + +def get_data_dims(volume): + import nibabel as nb + from nipype.utils import NUMPY_MMAP + if isinstance(volume, list): + volume = volume[0] + nii = nb.load(volume, mmap=NUMPY_MMAP) + hdr = nii.header + datadims = hdr.get_data_shape() + return [int(datadims[0]), int(datadims[1]), int(datadims[2])] + + +def get_affine(volume): + import nibabel as nb + from nipype.utils import NUMPY_MMAP + nii = nb.load(volume, mmap=NUMPY_MMAP) + return nii.affine + + +def select_aparc(list_of_files): + for in_file in list_of_files: + if 'aparc+aseg.mgz' in in_file: + idx = list_of_files.index(in_file) + return list_of_files[idx] + + +def select_aparc_annot(list_of_files): + for in_file in list_of_files: + if '.aparc.annot' in in_file: + idx = list_of_files.index(in_file) + return list_of_files[idx] + + +def region_list_from_volume(in_file): + import nibabel as nb + import numpy as np + from nipype.utils import NUMPY_MMAP + segmentation = nb.load(in_file, mmap=NUMPY_MMAP) + segmentationdata = segmentation.get_data() + rois = np.unique(segmentationdata) + region_list = list(rois) + region_list.sort() + region_list.remove(0) + region_list = list(map(int, region_list)) + return region_list + + +def id_list_from_lookup_table(lookup_file, region_list): + import numpy as np + LUTlabelsRGBA = np.loadtxt( + lookup_file, + skiprows=4, + usecols=[0, 1, 2, 3, 4, 5], + comments='#', + dtype={ + 'names': ('index', 'label', 'R', 'G', 'B', 'A'), + 'formats': ('int', '|S30', 'int', 'int', 'int', 'int') + }) + numLUTLabels = np.size(LUTlabelsRGBA) + LUTlabelDict = {} + for labels in range(0, numLUTLabels): + LUTlabelDict[LUTlabelsRGBA[labels][0]] = [ + LUTlabelsRGBA[labels][1], LUTlabelsRGBA[labels][2], + LUTlabelsRGBA[labels][3], LUTlabelsRGBA[labels][4], + LUTlabelsRGBA[labels][5] + ] + id_list = [] + for region in region_list: + label = LUTlabelDict[region][0] + id_list.append(label) + id_list = list(map(str, id_list)) + return id_list diff --git a/nipype/workflows/rsfmri/__init__.py b/nipype/workflows/rsfmri/__init__.py new file mode 100644 index 0000000000..bd58039343 --- /dev/null +++ b/nipype/workflows/rsfmri/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from . import fsl diff --git a/nipype/workflows/rsfmri/fsl/__init__.py b/nipype/workflows/rsfmri/fsl/__init__.py new file mode 100644 index 0000000000..2e17899066 --- /dev/null +++ b/nipype/workflows/rsfmri/fsl/__init__.py @@ -0,0 +1,2 @@ +# -*- coding: utf-8 -*- +from .resting import create_resting_preproc diff --git a/nipype/workflows/rsfmri/fsl/resting.py b/nipype/workflows/rsfmri/fsl/resting.py new file mode 100644 index 0000000000..12d44a83cf --- /dev/null +++ b/nipype/workflows/rsfmri/fsl/resting.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str + +from ....interfaces import fsl as fsl # fsl +from ....interfaces import utility as util # utility +from ....pipeline import engine as pe # pypeline engine +from ....algorithms import confounds + + +def select_volume(filename, which): + """Return the middle index of a file + """ + from nibabel import load + import numpy as np + from nipype.utils import NUMPY_MMAP + + if which.lower() == 'first': + idx = 0 + elif which.lower() == 'middle': + idx = int(np.ceil(load(filename, mmap=NUMPY_MMAP).shape[3] / 2)) + else: + raise Exception('unknown value for volume selection : %s' % which) + return idx + + +def create_realign_flow(name='realign'): + """Realign a time series to the middle volume using spline interpolation + + Uses MCFLIRT to realign the time series and ApplyWarp to apply the rigid + body transformations using spline interpolation (unknown order). + + Example + ------- + + >>> wf = create_realign_flow() + >>> wf.inputs.inputspec.func = 'f3.nii' + >>> wf.run() # doctest: +SKIP + + """ + realignflow = pe.Workflow(name=name) + inputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'func', + ]), name='inputspec') + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'realigned_file', + ]), + name='outputspec') + realigner = pe.Node( + fsl.MCFLIRT(save_mats=True, stats_imgs=True), name='realigner') + splitter = pe.Node(fsl.Split(dimension='t'), name='splitter') + warper = pe.MapNode( + fsl.ApplyWarp(interp='spline'), + iterfield=['in_file', 'premat'], + name='warper') + joiner = pe.Node(fsl.Merge(dimension='t'), name='joiner') + + realignflow.connect(inputnode, 'func', realigner, 'in_file') + realignflow.connect(inputnode, ('func', select_volume, 'middle'), + realigner, 'ref_vol') + realignflow.connect(realigner, 'out_file', splitter, 'in_file') + realignflow.connect(realigner, 'mat_file', warper, 'premat') + realignflow.connect(realigner, 'variance_img', warper, 'ref_file') + realignflow.connect(splitter, 'out_files', warper, 'in_file') + realignflow.connect(warper, 'out_file', joiner, 'in_files') + realignflow.connect(joiner, 'merged_file', outputnode, 'realigned_file') + return realignflow + + +def create_resting_preproc(name='restpreproc', base_dir=None): + """Create a "resting" time series preprocessing workflow + + The noise removal is based on Behzadi et al. (2007) + + Parameters + ---------- + + name : name of workflow (default: restpreproc) + + Inputs:: + + inputspec.func : functional run (filename or list of filenames) + + Outputs:: + + outputspec.noise_mask_file : voxels used for PCA to derive noise + components + outputspec.filtered_file : bandpass filtered and noise-reduced time + series + + Example + ------- + + >>> TR = 3.0 + >>> wf = create_resting_preproc() + >>> wf.inputs.inputspec.func = 'f3.nii' + >>> wf.inputs.inputspec.num_noise_components = 6 + >>> wf.inputs.inputspec.highpass_sigma = 100/(2*TR) + >>> wf.inputs.inputspec.lowpass_sigma = 12.5/(2*TR) + >>> wf.run() # doctest: +SKIP + + """ + + restpreproc = pe.Workflow(name=name, base_dir=base_dir) + + # Define nodes + inputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'func', 'num_noise_components', 'highpass_sigma', 'lowpass_sigma' + ]), + name='inputspec') + outputnode = pe.Node( + interface=util.IdentityInterface(fields=[ + 'noise_mask_file', + 'filtered_file', + ]), + name='outputspec') + slicetimer = pe.Node(fsl.SliceTimer(), name='slicetimer') + realigner = create_realign_flow() + tsnr = pe.Node(confounds.TSNR(regress_poly=2), name='tsnr') + getthresh = pe.Node( + interface=fsl.ImageStats(op_string='-p 98'), name='getthreshold') + threshold_stddev = pe.Node(fsl.Threshold(), name='threshold') + compcor = pe.Node( + confounds.ACompCor( + components_file="noise_components.txt", pre_filter=False), + name='compcor') + remove_noise = pe.Node( + fsl.FilterRegressor(filter_all=True), name='remove_noise') + bandpass_filter = pe.Node(fsl.TemporalFilter(), name='bandpass_filter') + + # Define connections + restpreproc.connect(inputnode, 'func', slicetimer, 'in_file') + restpreproc.connect(slicetimer, 'slice_time_corrected_file', realigner, + 'inputspec.func') + restpreproc.connect(realigner, 'outputspec.realigned_file', tsnr, + 'in_file') + restpreproc.connect(tsnr, 'stddev_file', threshold_stddev, 'in_file') + restpreproc.connect(tsnr, 'stddev_file', getthresh, 'in_file') + restpreproc.connect(getthresh, 'out_stat', threshold_stddev, 'thresh') + restpreproc.connect(realigner, 'outputspec.realigned_file', compcor, + 'realigned_file') + restpreproc.connect(threshold_stddev, 'out_file', compcor, 'mask_files') + restpreproc.connect(inputnode, 'num_noise_components', compcor, + 'num_components') + restpreproc.connect(tsnr, 'detrended_file', remove_noise, 'in_file') + restpreproc.connect(compcor, 'components_file', remove_noise, + 'design_file') + restpreproc.connect(inputnode, 'highpass_sigma', bandpass_filter, + 'highpass_sigma') + restpreproc.connect(inputnode, 'lowpass_sigma', bandpass_filter, + 'lowpass_sigma') + restpreproc.connect(remove_noise, 'out_file', bandpass_filter, 'in_file') + restpreproc.connect(threshold_stddev, 'out_file', outputnode, + 'noise_mask_file') + restpreproc.connect(bandpass_filter, 'out_file', outputnode, + 'filtered_file') + return restpreproc diff --git a/nipype/workflows/rsfmri/fsl/tests/__init__.py b/nipype/workflows/rsfmri/fsl/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/nipype/workflows/rsfmri/fsl/tests/test_resting.py b/nipype/workflows/rsfmri/fsl/tests/test_resting.py new file mode 100644 index 0000000000..799041df37 --- /dev/null +++ b/nipype/workflows/rsfmri/fsl/tests/test_resting.py @@ -0,0 +1,106 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +import pytest +import os +import mock +import numpy as np + +from .....testing import utils +from .....interfaces import IdentityInterface +from .....pipeline.engine import Node, Workflow + +from ..resting import create_resting_preproc + +ALL_FIELDS = [ + 'func', 'in_file', 'slice_time_corrected_file', 'stddev_file', 'out_stat', + 'thresh', 'num_noise_components', 'detrended_file', 'design_file', + 'highpass_sigma', 'lowpass_sigma', 'out_file', 'noise_mask_file', + 'filtered_file' +] + + +def stub_node_factory(*args, **kwargs): + if 'name' not in kwargs.keys(): + raise Exception() + name = kwargs['name'] + if name == 'compcor': + return Node(*args, **kwargs) + else: # replace with an IdentityInterface + return Node(IdentityInterface(fields=ALL_FIELDS), name=name) + + +def stub_wf(*args, **kwargs): + wflow = Workflow(name='realigner') + inputnode = Node(IdentityInterface(fields=['func']), name='inputspec') + outputnode = Node( + interface=IdentityInterface(fields=['realigned_file']), + name='outputspec') + wflow.connect(inputnode, 'func', outputnode, 'realigned_file') + return wflow + + +class TestResting(): + + in_filenames = { + 'realigned_file': 'rsfmrifunc.nii', + 'mask_file': 'rsfmrimask.nii' + } + + out_filenames = { + 'components_file': 'restpreproc/compcor/noise_components.txt' + } + + num_noise_components = 6 + + @pytest.fixture(autouse=True) + def setup_class(self, tmpdir): + # setup temp folder + tmpdir.chdir() + self.in_filenames = { + key: os.path.abspath(value) + for key, value in self.in_filenames.items() + } + + # create&save input files + utils.save_toy_nii(self.fake_data, self.in_filenames['realigned_file']) + mask = np.zeros(self.fake_data.shape[:3]) + for i in range(mask.shape[0]): + for j in range(mask.shape[1]): + if i == j: + mask[i, j] = 1 + utils.save_toy_nii(mask, self.in_filenames['mask_file']) + + @mock.patch( + 'nipype.workflows.rsfmri.fsl.resting.create_realign_flow', + side_effect=stub_wf) + @mock.patch('nipype.pipeline.engine.Node', side_effect=stub_node_factory) + def test_create_resting_preproc(self, mock_node, mock_realign_wf): + wflow = create_resting_preproc(base_dir=os.getcwd()) + + wflow.inputs.inputspec.num_noise_components = self.num_noise_components + mask_in = wflow.get_node('threshold').inputs + mask_in.out_file = self.in_filenames['mask_file'] + func_in = wflow.get_node('slicetimer').inputs + func_in.slice_time_corrected_file = self.in_filenames['realigned_file'] + + wflow.run() + + # assert + expected_file = os.path.abspath(self.out_filenames['components_file']) + with open(expected_file, 'r') as components_file: + components_data = [line.split() for line in components_file] + num_got_components = len(components_data) + assert (num_got_components == self.num_noise_components + or num_got_components == self.fake_data.shape[3]) + first_two = [row[:2] for row in components_data[1:]] + assert first_two == [['-0.5172356654', '-0.6973053243'], [ + '0.2574722644', '0.1645270737' + ], ['-0.0806469590', + '0.5156853779'], ['0.7187176051', '-0.3235820287'], + ['-0.3783072450', '0.3406749013']] + + fake_data = np.array([[[[2, 4, 3, 9, 1], [3, 6, 4, 7, 4]], + [[8, 3, 4, 6, 2], [4, 0, 4, 4, 2]]], + [[[9, 7, 5, 5, 7], [7, 8, 4, 8, 4]], + [[0, 4, 7, 1, 7], [6, 8, 8, 8, 7]]]]) diff --git a/nipype/workflows/smri/__init__.py b/nipype/workflows/smri/__init__.py new file mode 100644 index 0000000000..b6d7bf5731 --- /dev/null +++ b/nipype/workflows/smri/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) + +from . import ants +from . import freesurfer +from . import niftyreg diff --git a/nipype/workflows/smri/ants/ANTSBuildTemplate.py b/nipype/workflows/smri/ants/ANTSBuildTemplate.py new file mode 100644 index 0000000000..5a43d47bac --- /dev/null +++ b/nipype/workflows/smri/ants/ANTSBuildTemplate.py @@ -0,0 +1,388 @@ +# -*- coding: utf-8 -*- +################################################################################# +# Program: Build Template Parallel +# Language: Python +## +# Authors: Jessica Forbes, Grace Murray, and Hans Johnson, University of Iowa +## +# This software is distributed WITHOUT ANY WARRANTY; without even +# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +# PURPOSE. +## +################################################################################# +from __future__ import print_function +from builtins import map +from builtins import zip +from builtins import range + +from ....pipeline import engine as pe +from ....interfaces import utility as util +from ....interfaces.utility import Function + +from ....interfaces.ants import (ANTS, WarpImageMultiTransform, AverageImages, + MultiplyImages, AverageAffineTransform) + + +def GetFirstListElement(this_list): + return this_list[0] + + +def MakeTransformListWithGradientWarps(averageAffineTranform, + gradientStepWarp): + return [ + averageAffineTranform, gradientStepWarp, gradientStepWarp, + gradientStepWarp, gradientStepWarp + ] + + +def RenestDeformedPassiveImages(deformedPassiveImages, + flattened_image_nametypes): + import os + """ Now make a list of lists of images where the outter list is per image type, + and the inner list is the same size as the number of subjects to be averaged. + In this case, the first element will be a list of all the deformed T2's, and + the second element will be a list of all deformed POSTERIOR_AIR, etc.. + """ + all_images_size = len(deformedPassiveImages) + image_dictionary_of_lists = dict() + nested_imagetype_list = list() + outputAverageImageName_list = list() + image_type_list = list() + # make empty_list, this is not efficient, but it works + for name in flattened_image_nametypes: + image_dictionary_of_lists[name] = list() + for index in range(0, all_images_size): + curr_name = flattened_image_nametypes[index] + curr_file = deformedPassiveImages[index] + image_dictionary_of_lists[curr_name].append(curr_file) + for image_type, image_list in list(image_dictionary_of_lists.items()): + nested_imagetype_list.append(image_list) + outputAverageImageName_list.append('AVG_' + image_type + '.nii.gz') + image_type_list.append('WARP_AVG_' + image_type) + print("\n" * 10) + print("HACK: ", nested_imagetype_list) + print("HACK: ", outputAverageImageName_list) + print("HACK: ", image_type_list) + return nested_imagetype_list, outputAverageImageName_list, image_type_list + + +# Utility Function +# This will make a list of list pairs for defining the concatenation of transforms +# wp=['wp1.nii','wp2.nii','wp3.nii'] +# af=['af1.mat','af2.mat','af3.mat'] +# ll=map(list,zip(af,wp)) +# ll +# #[['af1.mat', 'wp1.nii'], ['af2.mat', 'wp2.nii'], ['af3.mat', 'wp3.nii']] + + +def MakeListsOfTransformLists(warpTransformList, AffineTransformList): + return list(map(list, list(zip(warpTransformList, AffineTransformList)))) + + +# Flatten and return equal length transform and images lists. + + +def FlattenTransformAndImagesList(ListOfPassiveImagesDictionaries, + transformation_series): + import sys + print("HACK: DEBUG: ListOfPassiveImagesDictionaries\n{lpi}\n".format( + lpi=ListOfPassiveImagesDictionaries)) + subjCount = len(ListOfPassiveImagesDictionaries) + tranCount = len(transformation_series) + if subjCount != tranCount: + print("ERROR: subjCount must equal tranCount {0} != {1}".format( + subjCount, tranCount)) + sys.exit(-1) + flattened_images = list() + flattened_image_nametypes = list() + flattened_transforms = list() + passiveImagesCount = len(ListOfPassiveImagesDictionaries[0]) + for subjIndex in range(0, subjCount): + # if passiveImagesCount != len(ListOfPassiveImagesDictionaries[subjIndex]): + # print "ERROR: all image lengths must be equal {0} != {1}".format(passiveImagesCount,len(ListOfPassiveImagesDictionaries[subjIndex])) + # sys.exit(-1) + subjImgDictionary = ListOfPassiveImagesDictionaries[subjIndex] + subjToAtlasTransform = transformation_series[subjIndex] + for imgname, img in list(subjImgDictionary.items()): + flattened_images.append(img) + flattened_image_nametypes.append(imgname) + flattened_transforms.append(subjToAtlasTransform) + print("HACK: flattened images {0}\n".format(flattened_images)) + print("HACK: flattened nametypes {0}\n".format(flattened_image_nametypes)) + print("HACK: flattened txfms {0}\n".format(flattened_transforms)) + return flattened_images, flattened_transforms, flattened_image_nametypes + + +def ANTSTemplateBuildSingleIterationWF(iterationPhasePrefix=''): + """ + + Inputs:: + + inputspec.images : + inputspec.fixed_image : + inputspec.ListOfPassiveImagesDictionaries : + + Outputs:: + + outputspec.template : + outputspec.transforms_list : + outputspec.passive_deformed_templates : + """ + + TemplateBuildSingleIterationWF = pe.Workflow( + name='ANTSTemplateBuildSingleIterationWF_' + + str(str(iterationPhasePrefix))) + + inputSpec = pe.Node( + interface=util.IdentityInterface(fields=[ + 'images', 'fixed_image', 'ListOfPassiveImagesDictionaries' + ]), + run_without_submitting=True, + name='inputspec') + # HACK: TODO: Need to move all local functions to a common untility file, or at the top of the file so that + # they do not change due to re-indenting. Otherwise re-indenting for flow control will trigger + # their hash to change. + # HACK: TODO: REMOVE 'transforms_list' it is not used. That will change all the hashes + # HACK: TODO: Need to run all python files through the code beutifiers. It has gotten pretty ugly. + outputSpec = pe.Node( + interface=util.IdentityInterface(fields=[ + 'template', 'transforms_list', 'passive_deformed_templates' + ]), + run_without_submitting=True, + name='outputspec') + + # NOTE MAP NODE! warp each of the original images to the provided fixed_image as the template + BeginANTS = pe.MapNode( + interface=ANTS(), name='BeginANTS', iterfield=['moving_image']) + BeginANTS.inputs.dimension = 3 + BeginANTS.inputs.output_transform_prefix = str( + iterationPhasePrefix) + '_tfm' + BeginANTS.inputs.metric = ['CC'] + BeginANTS.inputs.metric_weight = [1.0] + BeginANTS.inputs.radius = [5] + BeginANTS.inputs.transformation_model = 'SyN' + BeginANTS.inputs.gradient_step_length = 0.25 + BeginANTS.inputs.number_of_iterations = [50, 35, 15] + BeginANTS.inputs.number_of_affine_iterations = [ + 10000, 10000, 10000, 10000, 10000 + ] + BeginANTS.inputs.use_histogram_matching = True + BeginANTS.inputs.mi_option = [32, 16000] + BeginANTS.inputs.regularization = 'Gauss' + BeginANTS.inputs.regularization_gradient_field_sigma = 3 + BeginANTS.inputs.regularization_deformation_field_sigma = 0 + TemplateBuildSingleIterationWF.connect(inputSpec, 'images', BeginANTS, + 'moving_image') + TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', BeginANTS, + 'fixed_image') + + MakeTransformsLists = pe.Node( + interface=util.Function( + function=MakeListsOfTransformLists, + input_names=['warpTransformList', 'AffineTransformList'], + output_names=['out']), + run_without_submitting=True, + name='MakeTransformsLists') + MakeTransformsLists.interface.ignore_exception = True + TemplateBuildSingleIterationWF.connect( + BeginANTS, 'warp_transform', MakeTransformsLists, 'warpTransformList') + TemplateBuildSingleIterationWF.connect(BeginANTS, 'affine_transform', + MakeTransformsLists, + 'AffineTransformList') + + # Now warp all the input_images images + wimtdeformed = pe.MapNode( + interface=WarpImageMultiTransform(), + iterfield=['transformation_series', 'input_image'], + name='wimtdeformed') + TemplateBuildSingleIterationWF.connect(inputSpec, 'images', wimtdeformed, + 'input_image') + TemplateBuildSingleIterationWF.connect( + MakeTransformsLists, 'out', wimtdeformed, 'transformation_series') + + # Shape Update Next ===== + # Now Average All input_images deformed images together to create an updated template average + AvgDeformedImages = pe.Node( + interface=AverageImages(), name='AvgDeformedImages') + AvgDeformedImages.inputs.dimension = 3 + AvgDeformedImages.inputs.output_average_image = str( + iterationPhasePrefix) + '.nii.gz' + AvgDeformedImages.inputs.normalize = True + TemplateBuildSingleIterationWF.connect(wimtdeformed, "output_image", + AvgDeformedImages, 'images') + + # Now average all affine transforms together + AvgAffineTransform = pe.Node( + interface=AverageAffineTransform(), name='AvgAffineTransform') + AvgAffineTransform.inputs.dimension = 3 + AvgAffineTransform.inputs.output_affine_transform = 'Avererage_' + str( + iterationPhasePrefix) + '_Affine.mat' + TemplateBuildSingleIterationWF.connect(BeginANTS, 'affine_transform', + AvgAffineTransform, 'transforms') + + # Now average the warp fields togther + AvgWarpImages = pe.Node(interface=AverageImages(), name='AvgWarpImages') + AvgWarpImages.inputs.dimension = 3 + AvgWarpImages.inputs.output_average_image = str( + iterationPhasePrefix) + 'warp.nii.gz' + AvgWarpImages.inputs.normalize = True + TemplateBuildSingleIterationWF.connect(BeginANTS, 'warp_transform', + AvgWarpImages, 'images') + + # Now average the images together + # TODO: For now GradientStep is set to 0.25 as a hard coded default value. + GradientStep = 0.25 + GradientStepWarpImage = pe.Node( + interface=MultiplyImages(), name='GradientStepWarpImage') + GradientStepWarpImage.inputs.dimension = 3 + GradientStepWarpImage.inputs.second_input = -1.0 * GradientStep + GradientStepWarpImage.inputs.output_product_image = 'GradientStep0.25_' + str( + iterationPhasePrefix) + '_warp.nii.gz' + TemplateBuildSingleIterationWF.connect( + AvgWarpImages, 'output_average_image', GradientStepWarpImage, + 'first_input') + + # Now create the new template shape based on the average of all deformed images + UpdateTemplateShape = pe.Node( + interface=WarpImageMultiTransform(), name='UpdateTemplateShape') + UpdateTemplateShape.inputs.invert_affine = [1] + TemplateBuildSingleIterationWF.connect( + AvgDeformedImages, 'output_average_image', UpdateTemplateShape, + 'reference_image') + TemplateBuildSingleIterationWF.connect( + AvgAffineTransform, 'affine_transform', UpdateTemplateShape, + 'transformation_series') + TemplateBuildSingleIterationWF.connect(GradientStepWarpImage, + 'output_product_image', + UpdateTemplateShape, 'input_image') + + ApplyInvAverageAndFourTimesGradientStepWarpImage = pe.Node( + interface=util.Function( + function=MakeTransformListWithGradientWarps, + input_names=['averageAffineTranform', 'gradientStepWarp'], + output_names=['TransformListWithGradientWarps']), + run_without_submitting=True, + name='MakeTransformListWithGradientWarps') + ApplyInvAverageAndFourTimesGradientStepWarpImage.interface.ignore_exception = True + + TemplateBuildSingleIterationWF.connect( + AvgAffineTransform, 'affine_transform', + ApplyInvAverageAndFourTimesGradientStepWarpImage, + 'averageAffineTranform') + TemplateBuildSingleIterationWF.connect( + UpdateTemplateShape, 'output_image', + ApplyInvAverageAndFourTimesGradientStepWarpImage, 'gradientStepWarp') + + ReshapeAverageImageWithShapeUpdate = pe.Node( + interface=WarpImageMultiTransform(), + name='ReshapeAverageImageWithShapeUpdate') + ReshapeAverageImageWithShapeUpdate.inputs.invert_affine = [1] + ReshapeAverageImageWithShapeUpdate.inputs.out_postfix = '_Reshaped' + TemplateBuildSingleIterationWF.connect( + AvgDeformedImages, 'output_average_image', + ReshapeAverageImageWithShapeUpdate, 'input_image') + TemplateBuildSingleIterationWF.connect( + AvgDeformedImages, 'output_average_image', + ReshapeAverageImageWithShapeUpdate, 'reference_image') + TemplateBuildSingleIterationWF.connect( + ApplyInvAverageAndFourTimesGradientStepWarpImage, + 'TransformListWithGradientWarps', ReshapeAverageImageWithShapeUpdate, + 'transformation_series') + TemplateBuildSingleIterationWF.connect(ReshapeAverageImageWithShapeUpdate, + 'output_image', outputSpec, + 'template') + + ###### + ###### + # Process all the passive deformed images in a way similar to the main image used for registration + ###### + ###### + ###### + ############################################## + # Now warp all the ListOfPassiveImagesDictionaries images + FlattenTransformAndImagesListNode = pe.Node( + Function( + function=FlattenTransformAndImagesList, + input_names=[ + 'ListOfPassiveImagesDictionaries', 'transformation_series' + ], + output_names=[ + 'flattened_images', 'flattened_transforms', + 'flattened_image_nametypes' + ]), + run_without_submitting=True, + name="99_FlattenTransformAndImagesList") + TemplateBuildSingleIterationWF.connect( + inputSpec, 'ListOfPassiveImagesDictionaries', + FlattenTransformAndImagesListNode, 'ListOfPassiveImagesDictionaries') + TemplateBuildSingleIterationWF.connect(MakeTransformsLists, 'out', + FlattenTransformAndImagesListNode, + 'transformation_series') + wimtPassivedeformed = pe.MapNode( + interface=WarpImageMultiTransform(), + iterfield=['transformation_series', 'input_image'], + name='wimtPassivedeformed') + TemplateBuildSingleIterationWF.connect( + AvgDeformedImages, 'output_average_image', wimtPassivedeformed, + 'reference_image') + TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, + 'flattened_images', + wimtPassivedeformed, 'input_image') + TemplateBuildSingleIterationWF.connect( + FlattenTransformAndImagesListNode, 'flattened_transforms', + wimtPassivedeformed, 'transformation_series') + + RenestDeformedPassiveImagesNode = pe.Node( + Function( + function=RenestDeformedPassiveImages, + input_names=['deformedPassiveImages', 'flattened_image_nametypes'], + output_names=[ + 'nested_imagetype_list', 'outputAverageImageName_list', + 'image_type_list' + ]), + run_without_submitting=True, + name="99_RenestDeformedPassiveImages") + TemplateBuildSingleIterationWF.connect(wimtPassivedeformed, 'output_image', + RenestDeformedPassiveImagesNode, + 'deformedPassiveImages') + TemplateBuildSingleIterationWF.connect( + FlattenTransformAndImagesListNode, 'flattened_image_nametypes', + RenestDeformedPassiveImagesNode, 'flattened_image_nametypes') + # Now Average All passive input_images deformed images together to create an updated template average + AvgDeformedPassiveImages = pe.MapNode( + interface=AverageImages(), + iterfield=['images', 'output_average_image'], + name='AvgDeformedPassiveImages') + AvgDeformedPassiveImages.inputs.dimension = 3 + AvgDeformedPassiveImages.inputs.normalize = False + TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, + "nested_imagetype_list", + AvgDeformedPassiveImages, 'images') + TemplateBuildSingleIterationWF.connect( + RenestDeformedPassiveImagesNode, "outputAverageImageName_list", + AvgDeformedPassiveImages, 'output_average_image') + + # -- TODO: Now neeed to reshape all the passive images as well + ReshapeAveragePassiveImageWithShapeUpdate = pe.MapNode( + interface=WarpImageMultiTransform(), + iterfield=['input_image', 'reference_image', 'out_postfix'], + name='ReshapeAveragePassiveImageWithShapeUpdate') + ReshapeAveragePassiveImageWithShapeUpdate.inputs.invert_affine = [1] + TemplateBuildSingleIterationWF.connect( + RenestDeformedPassiveImagesNode, "image_type_list", + ReshapeAveragePassiveImageWithShapeUpdate, 'out_postfix') + TemplateBuildSingleIterationWF.connect( + AvgDeformedPassiveImages, 'output_average_image', + ReshapeAveragePassiveImageWithShapeUpdate, 'input_image') + TemplateBuildSingleIterationWF.connect( + AvgDeformedPassiveImages, 'output_average_image', + ReshapeAveragePassiveImageWithShapeUpdate, 'reference_image') + TemplateBuildSingleIterationWF.connect( + ApplyInvAverageAndFourTimesGradientStepWarpImage, + 'TransformListWithGradientWarps', + ReshapeAveragePassiveImageWithShapeUpdate, 'transformation_series') + TemplateBuildSingleIterationWF.connect( + ReshapeAveragePassiveImageWithShapeUpdate, 'output_image', outputSpec, + 'passive_deformed_templates') + + return TemplateBuildSingleIterationWF diff --git a/nipype/workflows/smri/ants/__init__.py b/nipype/workflows/smri/ants/__init__.py new file mode 100644 index 0000000000..3cb140771c --- /dev/null +++ b/nipype/workflows/smri/ants/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +from .ANTSBuildTemplate import ANTSTemplateBuildSingleIterationWF +from .antsRegistrationBuildTemplate import antsRegistrationTemplateBuildSingleIterationWF diff --git a/nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py b/nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py new file mode 100644 index 0000000000..3574935fc1 --- /dev/null +++ b/nipype/workflows/smri/ants/antsRegistrationBuildTemplate.py @@ -0,0 +1,535 @@ +# -*- coding: utf-8 -*- +################################################################################# +# Program: Build Template Parallel +# Language: Python +## +# Authors: Jessica Forbes, Grace Murray, and Hans Johnson, University of Iowa +## +# This software is distributed WITHOUT ANY WARRANTY; without even +# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +# PURPOSE. +## +################################################################################# +from __future__ import print_function +from builtins import range + +from ....pipeline import engine as pe +from ....interfaces import utility as util +from ....interfaces.utility import Function + +from ....interfaces.ants import (Registration, ApplyTransforms, AverageImages, + MultiplyImages, AverageAffineTransform) + + +def makeListOfOneElement(inputFile): + outputList = [inputFile] + return outputList + + +def GetFirstListElement(this_list): + return this_list[0] + + +def MakeTransformListWithGradientWarps(averageAffineTranform, + gradientStepWarp): + return [ + averageAffineTranform, gradientStepWarp, gradientStepWarp, + gradientStepWarp, gradientStepWarp + ] + + +def RenestDeformedPassiveImages(deformedPassiveImages, + flattened_image_nametypes, + interpolationMapping): + import os + """ Now make a list of lists of images where the outter list is per image type, + and the inner list is the same size as the number of subjects to be averaged. + In this case, the first element will be a list of all the deformed T2's, and + the second element will be a list of all deformed POSTERIOR_AIR, etc.. + """ + all_images_size = len(deformedPassiveImages) + image_dictionary_of_lists = dict() + nested_imagetype_list = list() + outputAverageImageName_list = list() + image_type_list = list() + nested_interpolation_type = list() + # make empty_list, this is not efficient, but it works + for name in flattened_image_nametypes: + image_dictionary_of_lists[name] = list() + for index in range(0, all_images_size): + curr_name = flattened_image_nametypes[index] + curr_file = deformedPassiveImages[index] + image_dictionary_of_lists[curr_name].append(curr_file) + for image_type, image_list in list(image_dictionary_of_lists.items()): + nested_imagetype_list.append(image_list) + outputAverageImageName_list.append('AVG_' + image_type + '.nii.gz') + image_type_list.append('WARP_AVG_' + image_type) + if image_type in interpolationMapping: + nested_interpolation_type.append(interpolationMapping[image_type]) + else: + nested_interpolation_type.append( + 'Linear') # Linear is the default. + print("\n" * 10) + print("HACK: ", nested_imagetype_list) + print("HACK: ", outputAverageImageName_list) + print("HACK: ", image_type_list) + print("HACK: ", nested_interpolation_type) + return nested_imagetype_list, outputAverageImageName_list, image_type_list, nested_interpolation_type + + +def SplitAffineAndWarpComponents(list_of_transforms_lists): + # Nota bene: The outputs will include the initial_moving_transform from Registration (which depends on what + # the invert_initial_moving_transform is set to) + affine_component_list = [] + warp_component_list = [] + for transform in list_of_transforms_lists: + affine_component_list.append(transform[0]) + warp_component_list.append(transform[1]) + print("HACK ", affine_component_list, " ", warp_component_list) + return affine_component_list, warp_component_list + + +# Flatten and return equal length transform and images lists. + + +def FlattenTransformAndImagesList(ListOfPassiveImagesDictionaries, transforms, + invert_transform_flags, + interpolationMapping): + import sys + print("HACK: DEBUG: ListOfPassiveImagesDictionaries\n{lpi}\n".format( + lpi=ListOfPassiveImagesDictionaries)) + subjCount = len(ListOfPassiveImagesDictionaries) + tranCount = len(transforms) + if subjCount != tranCount: + print("ERROR: subjCount must equal tranCount {0} != {1}".format( + subjCount, tranCount)) + sys.exit(-1) + invertTfmsFlagsCount = len(invert_transform_flags) + if subjCount != invertTfmsFlagsCount: + print("ERROR: subjCount must equal invertTfmsFlags {0} != {1}".format( + subjCount, invertTfmsFlagsCount)) + sys.exit(-1) + flattened_images = list() + flattened_image_nametypes = list() + flattened_transforms = list() + flattened_invert_transform_flags = list() + flattened_interpolation_type = list() + passiveImagesCount = len(ListOfPassiveImagesDictionaries[0]) + for subjIndex in range(0, subjCount): + # if passiveImagesCount != len(ListOfPassiveImagesDictionaries[subjIndex]): + # print "ERROR: all image lengths must be equal {0} != {1}".format(passiveImagesCount,len(ListOfPassiveImagesDictionaries[subjIndex])) + # sys.exit(-1) + subjImgDictionary = ListOfPassiveImagesDictionaries[subjIndex] + subjToAtlasTransform = transforms[subjIndex] + subjToAtlasInvertFlags = invert_transform_flags[subjIndex] + for imgname, img in list(subjImgDictionary.items()): + flattened_images.append(img) + flattened_image_nametypes.append(imgname) + flattened_transforms.append(subjToAtlasTransform) + flattened_invert_transform_flags.append(subjToAtlasInvertFlags) + if imgname in interpolationMapping: + flattened_interpolation_type.append( + interpolationMapping[imgname]) + else: + flattened_interpolation_type.append( + 'Linear') # Linear is the default. + print("HACK: flattened images {0}\n".format(flattened_images)) + print("HACK: flattened nametypes {0}\n".format(flattened_image_nametypes)) + print("HACK: flattened txfms {0}\n".format(flattened_transforms)) + print("HACK: flattened txfmsFlags{0}\n".format( + flattened_invert_transform_flags)) + return flattened_images, flattened_transforms, flattened_invert_transform_flags, flattened_image_nametypes, flattened_interpolation_type + + +def GetMovingImages(ListOfImagesDictionaries, registrationImageTypes, + interpolationMapping): + """ This currently ONLY works when registrationImageTypes has + length of exactly 1. When the new multi-variate registration + is introduced, it will be expanded. + """ + if len(registrationImageTypes) != 1: + print("ERROR: Multivariate imageing not supported yet!") + return [] + moving_images = [ + mdict[registrationImageTypes[0]] for mdict in ListOfImagesDictionaries + ] + moving_interpolation_type = interpolationMapping[registrationImageTypes[0]] + return moving_images, moving_interpolation_type + + +def GetPassiveImages(ListOfImagesDictionaries, registrationImageTypes): + if len(registrationImageTypes) != 1: + print("ERROR: Multivariate imageing not supported yet!") + return [dict()] + passive_images = list() + for mdict in ListOfImagesDictionaries: + ThisSubjectPassiveImages = dict() + for key, value in list(mdict.items()): + if key not in registrationImageTypes: + ThisSubjectPassiveImages[key] = value + passive_images.append(ThisSubjectPassiveImages) + return passive_images + + +## +# NOTE: The modes can be either 'SINGLE_IMAGE' or 'MULTI' +# 'SINGLE_IMAGE' is quick shorthand when you are building an atlas with a single subject, then registration can +# be short-circuted +# any other string indicates the normal mode that you would expect and replicates the shell script build_template_parallel.sh + + +def antsRegistrationTemplateBuildSingleIterationWF(iterationPhasePrefix=''): + """ + + Inputs:: + + inputspec.images : + inputspec.fixed_image : + inputspec.ListOfPassiveImagesDictionaries : + inputspec.interpolationMapping : + + Outputs:: + + outputspec.template : + outputspec.transforms_list : + outputspec.passive_deformed_templates : + """ + TemplateBuildSingleIterationWF = pe.Workflow( + name='antsRegistrationTemplateBuildSingleIterationWF_' + + str(iterationPhasePrefix)) + + inputSpec = pe.Node( + interface=util.IdentityInterface(fields=[ + 'ListOfImagesDictionaries', 'registrationImageTypes', + 'interpolationMapping', 'fixed_image' + ]), + run_without_submitting=True, + name='inputspec') + # HACK: TODO: Need to move all local functions to a common untility file, or at the top of the file so that + # they do not change due to re-indenting. Otherwise re-indenting for flow control will trigger + # their hash to change. + # HACK: TODO: REMOVE 'transforms_list' it is not used. That will change all the hashes + # HACK: TODO: Need to run all python files through the code beutifiers. It has gotten pretty ugly. + outputSpec = pe.Node( + interface=util.IdentityInterface(fields=[ + 'template', 'transforms_list', 'passive_deformed_templates' + ]), + run_without_submitting=True, + name='outputspec') + + # NOTE MAP NODE! warp each of the original images to the provided fixed_image as the template + BeginANTS = pe.MapNode( + interface=Registration(), name='BeginANTS', iterfield=['moving_image']) + BeginANTS.inputs.dimension = 3 + BeginANTS.inputs.output_transform_prefix = str( + iterationPhasePrefix) + '_tfm' + BeginANTS.inputs.transforms = ["Affine", "SyN"] + BeginANTS.inputs.transform_parameters = [[0.9], [0.25, 3.0, 0.0]] + BeginANTS.inputs.metric = ['Mattes', 'CC'] + BeginANTS.inputs.metric_weight = [1.0, 1.0] + BeginANTS.inputs.radius_or_number_of_bins = [32, 5] + BeginANTS.inputs.number_of_iterations = [[1000, 1000, 1000], [50, 35, 15]] + BeginANTS.inputs.use_histogram_matching = [True, True] + BeginANTS.inputs.use_estimate_learning_rate_once = [False, False] + BeginANTS.inputs.shrink_factors = [[3, 2, 1], [3, 2, 1]] + BeginANTS.inputs.smoothing_sigmas = [[3, 2, 0], [3, 2, 0]] + BeginANTS.inputs.sigma_units = ["vox"] * 2 + + GetMovingImagesNode = pe.Node( + interface=util.Function( + function=GetMovingImages, + input_names=[ + 'ListOfImagesDictionaries', 'registrationImageTypes', + 'interpolationMapping' + ], + output_names=['moving_images', 'moving_interpolation_type']), + run_without_submitting=True, + name='99_GetMovingImagesNode') + TemplateBuildSingleIterationWF.connect( + inputSpec, 'ListOfImagesDictionaries', GetMovingImagesNode, + 'ListOfImagesDictionaries') + TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes', + GetMovingImagesNode, + 'registrationImageTypes') + TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping', + GetMovingImagesNode, + 'interpolationMapping') + + TemplateBuildSingleIterationWF.connect( + GetMovingImagesNode, 'moving_images', BeginANTS, 'moving_image') + TemplateBuildSingleIterationWF.connect(GetMovingImagesNode, + 'moving_interpolation_type', + BeginANTS, 'interpolation') + TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', BeginANTS, + 'fixed_image') + + # Now warp all the input_images images + wimtdeformed = pe.MapNode( + interface=ApplyTransforms(), + iterfield=['transforms', 'invert_transform_flags', 'input_image'], + name='wimtdeformed') + wimtdeformed.inputs.interpolation = 'Linear' + wimtdeformed.default_value = 0 + TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms', + wimtdeformed, 'transforms') + TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags', + wimtdeformed, + 'invert_transform_flags') + TemplateBuildSingleIterationWF.connect( + GetMovingImagesNode, 'moving_images', wimtdeformed, 'input_image') + TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', + wimtdeformed, 'reference_image') + + # Shape Update Next ===== + # Now Average All input_images deformed images together to create an updated template average + AvgDeformedImages = pe.Node( + interface=AverageImages(), name='AvgDeformedImages') + AvgDeformedImages.inputs.dimension = 3 + AvgDeformedImages.inputs.output_average_image = str( + iterationPhasePrefix) + '.nii.gz' + AvgDeformedImages.inputs.normalize = True + TemplateBuildSingleIterationWF.connect(wimtdeformed, "output_image", + AvgDeformedImages, 'images') + + # Now average all affine transforms together + AvgAffineTransform = pe.Node( + interface=AverageAffineTransform(), name='AvgAffineTransform') + AvgAffineTransform.inputs.dimension = 3 + AvgAffineTransform.inputs.output_affine_transform = 'Avererage_' + str( + iterationPhasePrefix) + '_Affine.mat' + + SplitAffineAndWarpsNode = pe.Node( + interface=util.Function( + function=SplitAffineAndWarpComponents, + input_names=['list_of_transforms_lists'], + output_names=['affine_component_list', 'warp_component_list']), + run_without_submitting=True, + name='99_SplitAffineAndWarpsNode') + TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms', + SplitAffineAndWarpsNode, + 'list_of_transforms_lists') + TemplateBuildSingleIterationWF.connect(SplitAffineAndWarpsNode, + 'affine_component_list', + AvgAffineTransform, 'transforms') + + # Now average the warp fields togther + AvgWarpImages = pe.Node(interface=AverageImages(), name='AvgWarpImages') + AvgWarpImages.inputs.dimension = 3 + AvgWarpImages.inputs.output_average_image = str( + iterationPhasePrefix) + 'warp.nii.gz' + AvgWarpImages.inputs.normalize = True + TemplateBuildSingleIterationWF.connect(SplitAffineAndWarpsNode, + 'warp_component_list', + AvgWarpImages, 'images') + + # Now average the images together + # TODO: For now GradientStep is set to 0.25 as a hard coded default value. + GradientStep = 0.25 + GradientStepWarpImage = pe.Node( + interface=MultiplyImages(), name='GradientStepWarpImage') + GradientStepWarpImage.inputs.dimension = 3 + GradientStepWarpImage.inputs.second_input = -1.0 * GradientStep + GradientStepWarpImage.inputs.output_product_image = 'GradientStep0.25_' + str( + iterationPhasePrefix) + '_warp.nii.gz' + TemplateBuildSingleIterationWF.connect( + AvgWarpImages, 'output_average_image', GradientStepWarpImage, + 'first_input') + + # Now create the new template shape based on the average of all deformed images + UpdateTemplateShape = pe.Node( + interface=ApplyTransforms(), name='UpdateTemplateShape') + UpdateTemplateShape.inputs.invert_transform_flags = [True] + UpdateTemplateShape.inputs.interpolation = 'Linear' + UpdateTemplateShape.default_value = 0 + + TemplateBuildSingleIterationWF.connect( + AvgDeformedImages, 'output_average_image', UpdateTemplateShape, + 'reference_image') + TemplateBuildSingleIterationWF.connect([ + (AvgAffineTransform, UpdateTemplateShape, + [(('affine_transform', makeListOfOneElement), 'transforms')]), + ]) + TemplateBuildSingleIterationWF.connect(GradientStepWarpImage, + 'output_product_image', + UpdateTemplateShape, 'input_image') + + ApplyInvAverageAndFourTimesGradientStepWarpImage = pe.Node( + interface=util.Function( + function=MakeTransformListWithGradientWarps, + input_names=['averageAffineTranform', 'gradientStepWarp'], + output_names=['TransformListWithGradientWarps']), + run_without_submitting=True, + name='99_MakeTransformListWithGradientWarps') + ApplyInvAverageAndFourTimesGradientStepWarpImage.interface.ignore_exception = True + + TemplateBuildSingleIterationWF.connect( + AvgAffineTransform, 'affine_transform', + ApplyInvAverageAndFourTimesGradientStepWarpImage, + 'averageAffineTranform') + TemplateBuildSingleIterationWF.connect( + UpdateTemplateShape, 'output_image', + ApplyInvAverageAndFourTimesGradientStepWarpImage, 'gradientStepWarp') + + ReshapeAverageImageWithShapeUpdate = pe.Node( + interface=ApplyTransforms(), name='ReshapeAverageImageWithShapeUpdate') + ReshapeAverageImageWithShapeUpdate.inputs.invert_transform_flags = [ + True, False, False, False, False + ] + ReshapeAverageImageWithShapeUpdate.inputs.interpolation = 'Linear' + ReshapeAverageImageWithShapeUpdate.default_value = 0 + ReshapeAverageImageWithShapeUpdate.inputs.output_image = 'ReshapeAverageImageWithShapeUpdate.nii.gz' + TemplateBuildSingleIterationWF.connect( + AvgDeformedImages, 'output_average_image', + ReshapeAverageImageWithShapeUpdate, 'input_image') + TemplateBuildSingleIterationWF.connect( + AvgDeformedImages, 'output_average_image', + ReshapeAverageImageWithShapeUpdate, 'reference_image') + TemplateBuildSingleIterationWF.connect( + ApplyInvAverageAndFourTimesGradientStepWarpImage, + 'TransformListWithGradientWarps', ReshapeAverageImageWithShapeUpdate, + 'transforms') + TemplateBuildSingleIterationWF.connect(ReshapeAverageImageWithShapeUpdate, + 'output_image', outputSpec, + 'template') + + ###### + ###### + # Process all the passive deformed images in a way similar to the main image used for registration + ###### + ###### + ###### + ############################################## + # Now warp all the ListOfPassiveImagesDictionaries images + FlattenTransformAndImagesListNode = pe.Node( + Function( + function=FlattenTransformAndImagesList, + input_names=[ + 'ListOfPassiveImagesDictionaries', 'transforms', + 'invert_transform_flags', 'interpolationMapping' + ], + output_names=[ + 'flattened_images', 'flattened_transforms', + 'flattened_invert_transform_flags', + 'flattened_image_nametypes', 'flattened_interpolation_type' + ]), + run_without_submitting=True, + name="99_FlattenTransformAndImagesList") + + GetPassiveImagesNode = pe.Node( + interface=util.Function( + function=GetPassiveImages, + input_names=['ListOfImagesDictionaries', 'registrationImageTypes'], + output_names=['ListOfPassiveImagesDictionaries']), + run_without_submitting=True, + name='99_GetPassiveImagesNode') + TemplateBuildSingleIterationWF.connect( + inputSpec, 'ListOfImagesDictionaries', GetPassiveImagesNode, + 'ListOfImagesDictionaries') + TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes', + GetPassiveImagesNode, + 'registrationImageTypes') + + TemplateBuildSingleIterationWF.connect( + GetPassiveImagesNode, 'ListOfPassiveImagesDictionaries', + FlattenTransformAndImagesListNode, 'ListOfPassiveImagesDictionaries') + TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping', + FlattenTransformAndImagesListNode, + 'interpolationMapping') + TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms', + FlattenTransformAndImagesListNode, + 'transforms') + TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags', + FlattenTransformAndImagesListNode, + 'invert_transform_flags') + wimtPassivedeformed = pe.MapNode( + interface=ApplyTransforms(), + iterfield=[ + 'transforms', 'invert_transform_flags', 'input_image', + 'interpolation' + ], + name='wimtPassivedeformed') + wimtPassivedeformed.default_value = 0 + TemplateBuildSingleIterationWF.connect( + AvgDeformedImages, 'output_average_image', wimtPassivedeformed, + 'reference_image') + TemplateBuildSingleIterationWF.connect( + FlattenTransformAndImagesListNode, 'flattened_interpolation_type', + wimtPassivedeformed, 'interpolation') + TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, + 'flattened_images', + wimtPassivedeformed, 'input_image') + TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode, + 'flattened_transforms', + wimtPassivedeformed, 'transforms') + TemplateBuildSingleIterationWF.connect( + FlattenTransformAndImagesListNode, 'flattened_invert_transform_flags', + wimtPassivedeformed, 'invert_transform_flags') + + RenestDeformedPassiveImagesNode = pe.Node( + Function( + function=RenestDeformedPassiveImages, + input_names=[ + 'deformedPassiveImages', 'flattened_image_nametypes', + 'interpolationMapping' + ], + output_names=[ + 'nested_imagetype_list', 'outputAverageImageName_list', + 'image_type_list', 'nested_interpolation_type' + ]), + run_without_submitting=True, + name="99_RenestDeformedPassiveImages") + TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping', + RenestDeformedPassiveImagesNode, + 'interpolationMapping') + TemplateBuildSingleIterationWF.connect(wimtPassivedeformed, 'output_image', + RenestDeformedPassiveImagesNode, + 'deformedPassiveImages') + TemplateBuildSingleIterationWF.connect( + FlattenTransformAndImagesListNode, 'flattened_image_nametypes', + RenestDeformedPassiveImagesNode, 'flattened_image_nametypes') + # Now Average All passive input_images deformed images together to create an updated template average + AvgDeformedPassiveImages = pe.MapNode( + interface=AverageImages(), + iterfield=['images', 'output_average_image'], + name='AvgDeformedPassiveImages') + AvgDeformedPassiveImages.inputs.dimension = 3 + AvgDeformedPassiveImages.inputs.normalize = False + TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode, + "nested_imagetype_list", + AvgDeformedPassiveImages, 'images') + TemplateBuildSingleIterationWF.connect( + RenestDeformedPassiveImagesNode, "outputAverageImageName_list", + AvgDeformedPassiveImages, 'output_average_image') + + # -- TODO: Now neeed to reshape all the passive images as well + ReshapeAveragePassiveImageWithShapeUpdate = pe.MapNode( + interface=ApplyTransforms(), + iterfield=[ + 'input_image', 'reference_image', 'output_image', 'interpolation' + ], + name='ReshapeAveragePassiveImageWithShapeUpdate') + ReshapeAveragePassiveImageWithShapeUpdate.inputs.invert_transform_flags = [ + True, False, False, False, False + ] + ReshapeAveragePassiveImageWithShapeUpdate.default_value = 0 + TemplateBuildSingleIterationWF.connect( + RenestDeformedPassiveImagesNode, 'nested_interpolation_type', + ReshapeAveragePassiveImageWithShapeUpdate, 'interpolation') + TemplateBuildSingleIterationWF.connect( + RenestDeformedPassiveImagesNode, 'outputAverageImageName_list', + ReshapeAveragePassiveImageWithShapeUpdate, 'output_image') + TemplateBuildSingleIterationWF.connect( + AvgDeformedPassiveImages, 'output_average_image', + ReshapeAveragePassiveImageWithShapeUpdate, 'input_image') + TemplateBuildSingleIterationWF.connect( + AvgDeformedPassiveImages, 'output_average_image', + ReshapeAveragePassiveImageWithShapeUpdate, 'reference_image') + TemplateBuildSingleIterationWF.connect( + ApplyInvAverageAndFourTimesGradientStepWarpImage, + 'TransformListWithGradientWarps', + ReshapeAveragePassiveImageWithShapeUpdate, 'transforms') + TemplateBuildSingleIterationWF.connect( + ReshapeAveragePassiveImageWithShapeUpdate, 'output_image', outputSpec, + 'passive_deformed_templates') + + return TemplateBuildSingleIterationWF diff --git a/nipype/workflows/smri/freesurfer/__init__.py b/nipype/workflows/smri/freesurfer/__init__.py new file mode 100644 index 0000000000..caa854f9c9 --- /dev/null +++ b/nipype/workflows/smri/freesurfer/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +from .utils import (create_getmask_flow, create_get_stats_flow, + create_tessellation_flow) +from .bem import create_bem_flow +from .recon import create_skullstripped_recon_flow, create_reconall_workflow diff --git a/nipype/workflows/smri/freesurfer/autorecon1.py b/nipype/workflows/smri/freesurfer/autorecon1.py new file mode 100644 index 0000000000..0973e210a7 --- /dev/null +++ b/nipype/workflows/smri/freesurfer/autorecon1.py @@ -0,0 +1,512 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from ....utils import NUMPY_MMAP +from ....pipeline import engine as pe +from ....interfaces.utility import Function, IdentityInterface +from ....interfaces.freesurfer import * +from .utils import copy_file + + +def checkT1s(T1_files, cw256=False): + """Verifying size of inputs and setting workflow parameters""" + import sys + import nibabel as nb + from nipype.utils.filemanip import ensure_list + + T1_files = ensure_list(T1_files) + if len(T1_files) == 0: + print("ERROR: No T1's Given") + sys.exit(-1) + + shape = nb.load(T1_files[0]).shape + for t1 in T1_files[1:]: + if nb.load(t1, mmap=NUMPY_MMAP).shape != shape: + print("ERROR: T1s not the same size. Cannot process {0} and {1} " + "together".format(T1_files[0], t1)) + sys.exit(-1) + + origvol_names = ["{0:03d}.mgz".format(i + 1) for i in range(len(T1_files))] + + # check if cw256 is set to crop the images if size is larger than 256 + if not cw256 and any(dim > 256 for dim in shape): + print("Setting MRI Convert to crop images to 256 FOV") + cw256 = True + + resample_type = 'cubic' if len(T1_files) > 1 else 'interpolate' + return T1_files, cw256, resample_type, origvol_names + + +def create_AutoRecon1(name="AutoRecon1", + longitudinal=False, + distance=None, + custom_atlas=None, + plugin_args=None, + shrink=None, + stop=None, + fsvernum=5.3): + """Creates the AutoRecon1 workflow in nipype. + + Inputs:: + inputspec.T1_files : T1 files (mandatory) + inputspec.T2_file : T2 file (optional) + inputspec.FLAIR_file : FLAIR file (optional) + inputspec.cw256 : Conform inputs to 256 FOV (optional) + inputspec.num_threads: Number of threads to use with EM Register (default=1) + Outpus:: + + """ + ar1_wf = pe.Workflow(name=name) + inputspec = pe.Node( + interface=IdentityInterface(fields=[ + 'T1_files', 'T2_file', 'FLAIR_file', 'cw256', 'num_threads', + 'reg_template_withskull', 'awk_file' + ]), + run_without_submitting=True, + name='inputspec') + + if not longitudinal: + # single session processing + verify_inputs = pe.Node( + Function(["T1_files", "cw256"], + ["T1_files", "cw256", "resample_type", "origvol_names"], + checkT1s), + name="Check_T1s") + ar1_wf.connect([(inputspec, verify_inputs, [('T1_files', 'T1_files'), + ('cw256', 'cw256')])]) + + # T1 image preparation + # For all T1's mri_convert ${InputVol} ${out_file} + T1_image_preparation = pe.MapNode( + MRIConvert(), iterfield=['in_file', 'out_file'], name="T1_prep") + + ar1_wf.connect([ + (verify_inputs, T1_image_preparation, + [('T1_files', 'in_file'), ('origvol_names', 'out_file')]), + ]) + + def convert_modalities(in_file=None, out_file=None): + """Returns an undefined output if the in_file is not defined""" + from nipype.interfaces.freesurfer import MRIConvert + import os + if in_file: + convert = MRIConvert() + convert.inputs.in_file = in_file + convert.inputs.out_file = out_file + convert.inputs.no_scale = True + out = convert.run() + out_file = os.path.abspath(out.outputs.out_file) + return out_file + + T2_convert = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], + convert_modalities), + name="T2_Convert") + T2_convert.inputs.out_file = 'T2raw.mgz' + ar1_wf.connect([(inputspec, T2_convert, [('T2_file', 'in_file')])]) + + FLAIR_convert = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], + convert_modalities), + name="FLAIR_Convert") + FLAIR_convert.inputs.out_file = 'FLAIRraw.mgz' + ar1_wf.connect([(inputspec, FLAIR_convert, [('FLAIR_file', + 'in_file')])]) + else: + # longitudinal inputs + inputspec = pe.Node( + interface=IdentityInterface(fields=[ + 'T1_files', 'iscales', 'ltas', 'subj_to_template_lta', + 'template_talairach_xfm', 'template_brainmask' + ]), + run_without_submitting=True, + name='inputspec') + + def output_names(T1_files): + """Create file names that are dependent on the number of T1 inputs""" + iscale_names = list() + lta_names = list() + for i, t1 in enumerate(T1_files): + # assign an input number + file_num = str(i + 1) + while len(file_num) < 3: + file_num = '0' + file_num + iscale_names.append("{0}-iscale.txt".format(file_num)) + lta_names.append("{0}.lta".format(file_num)) + return iscale_names, lta_names + + filenames = pe.Node( + Function(['T1_files'], ['iscale_names', 'lta_names'], + output_names), + name="Longitudinal_Filenames") + ar1_wf.connect([(inputspec, filenames, [('T1_files', 'T1_files')])]) + + copy_ltas = pe.MapNode( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + iterfield=['in_file', 'out_file'], + name='Copy_ltas') + ar1_wf.connect([(inputspec, copy_ltas, [('ltas', 'in_file')]), + (filenames, copy_ltas, [('lta_names', 'out_file')])]) + + copy_iscales = pe.MapNode( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + iterfield=['in_file', 'out_file'], + name='Copy_iscales') + ar1_wf.connect([(inputspec, copy_iscales, [('iscales', 'in_file')]), + (filenames, copy_iscales, [('iscale_names', + 'out_file')])]) + + concatenate_lta = pe.MapNode( + ConcatenateLTA(), iterfield=['in_file'], name="Concatenate_ltas") + ar1_wf.connect([(copy_ltas, concatenate_lta, [('out_file', + 'in_file')]), + (inputspec, concatenate_lta, [('subj_to_template_lta', + 'subj_to_base')])]) + + # Motion Correction + """ + When there are multiple source volumes, this step will correct for small + motions between them and then average them together. The output of the + motion corrected average is mri/rawavg.mgz which is then conformed to + 255 cubed char images (1mm isotropic voxels) in mri/orig.mgz. + """ + + def createTemplate(in_files, out_file): + import os + import shutil + if len(in_files) == 1: + # if only 1 T1 scan given, no need to run RobustTemplate + print( + "WARNING: only one run found. This is OK, but motion correction " + + + "cannot be performed on one run, so I'll copy the run to rawavg " + + "and continue.") + shutil.copyfile(in_files[0], out_file) + intensity_scales = None + transforms = None + else: + from nipype.interfaces.freesurfer import RobustTemplate + # if multiple T1 scans are given run RobustTemplate + intensity_scales = [ + os.path.basename(f.replace('.mgz', '-iscale.txt')) + for f in in_files + ] + transforms = [ + os.path.basename(f.replace('.mgz', '.lta')) for f in in_files + ] + robtemp = RobustTemplate() + robtemp.inputs.in_files = in_files + robtemp.inputs.average_metric = 'median' + robtemp.inputs.out_file = out_file + robtemp.inputs.no_iteration = True + robtemp.inputs.fixed_timepoint = True + robtemp.inputs.auto_detect_sensitivity = True + robtemp.inputs.initial_timepoint = 1 + robtemp.inputs.scaled_intensity_outputs = intensity_scales + robtemp.inputs.transform_outputs = transforms + robtemp.inputs.subsample_threshold = 200 + robtemp.inputs.intensity_scaling = True + robtemp_result = robtemp.run() + # collect the outputs from RobustTemplate + out_file = robtemp_result.outputs.out_file + intensity_scales = [ + os.path.abspath(f) + for f in robtemp_result.outputs.scaled_intensity_outputs + ] + transforms = [ + os.path.abspath(f) + for f in robtemp_result.outputs.transform_outputs + ] + out_file = os.path.abspath(out_file) + return out_file, intensity_scales, transforms + + if not longitudinal: + create_template = pe.Node( + Function(['in_files', 'out_file'], + ['out_file', 'intensity_scales', 'transforms'], + createTemplate), + name="Robust_Template") + create_template.inputs.out_file = 'rawavg.mgz' + ar1_wf.connect([(T1_image_preparation, create_template, + [('out_file', 'in_files')])]) + else: + create_template = pe.Node(RobustTemplate(), name="Robust_Template") + create_template.inputs.average_metric = 'median' + create_template.inputs.out_file = 'rawavg.mgz' + create_template.inputs.no_iteration = True + ar1_wf.connect([(concatenate_lta, create_template, + [('out_file', 'initial_transforms')]), + (inputSpec, create_template, [('in_t1s', 'in_files')]), + (copy_iscales, create_template, + [('out_file', 'in_intensity_scales')])]) + + # mri_convert + conform_template = pe.Node(MRIConvert(), name='Conform_Template') + conform_template.inputs.out_file = 'orig.mgz' + if not longitudinal: + conform_template.inputs.conform = True + ar1_wf.connect([(verify_inputs, conform_template, + [('cw256', 'cw256'), ('resample_type', + 'resample_type')])]) + else: + conform_template.inputs.out_datatype = 'uchar' + + ar1_wf.connect([(create_template, conform_template, [('out_file', + 'in_file')])]) + + # Talairach + """ + This computes the affine transform from the orig volume to the MNI305 atlas using Avi Snyders 4dfp + suite of image registration tools, through a FreeSurfer script called talairach_avi. + Several of the downstream programs use talairach coordinates as seed points. + """ + + bias_correction = pe.Node(MNIBiasCorrection(), name="Bias_correction") + bias_correction.inputs.iterations = 1 + bias_correction.inputs.protocol_iterations = 1000 + bias_correction.inputs.distance = distance + if stop: + bias_correction.inputs.stop = stop + if shrink: + bias_correction.inputs.shrink = shrink + bias_correction.inputs.no_rescale = True + bias_correction.inputs.out_file = 'orig_nu.mgz' + + ar1_wf.connect([ + (conform_template, bias_correction, [('out_file', 'in_file')]), + ]) + + if not longitudinal: + # single session processing + talairach_avi = pe.Node(TalairachAVI(), name="Compute_Transform") + if custom_atlas is not None: + # allows to specify a custom atlas + talairach_avi.inputs.atlas = custom_atlas + talairach_avi.inputs.out_file = 'talairach.auto.xfm' + ar1_wf.connect([(bias_correction, talairach_avi, [('out_file', + 'in_file')])]) + else: + # longitudinal processing + # Just copy the template xfm + talairach_avi = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + name='Copy_Template_Transform') + talairach_avi.inputs.out_file = 'talairach.auto.xfm' + + ar1_wf.connect([(inputspec, talairach_avi, [('template_talairach_xfm', + 'in_file')])]) + + copy_transform = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + name='Copy_Transform') + copy_transform.inputs.out_file = 'talairach.xfm' + + ar1_wf.connect([(talairach_avi, copy_transform, [('out_file', + 'in_file')])]) + + # In recon-all the talairach.xfm is added to orig.mgz, even though + # it does not exist yet. This is a compromise to keep from + # having to change the time stamp of the orig volume after talairaching. + # Here we are going to add xfm to the header after the xfm has been created. + # This may mess up the timestamp. + + add_xform_to_orig = pe.Node( + AddXFormToHeader(), name="Add_Transform_to_Orig") + add_xform_to_orig.inputs.copy_name = True + add_xform_to_orig.inputs.out_file = conform_template.inputs.out_file + + ar1_wf.connect( + [(conform_template, add_xform_to_orig, [('out_file', 'in_file')]), + (copy_transform, add_xform_to_orig, [('out_file', 'transform')])]) + + # This node adds the transform to the orig_nu.mgz file. This step does not + # exist in the recon-all workflow, because that workflow adds the talairach + # to the orig.mgz file header before the talairach actually exists. + add_xform_to_orig_nu = pe.Node( + AddXFormToHeader(), name="Add_Transform_to_Orig_Nu") + add_xform_to_orig_nu.inputs.copy_name = True + add_xform_to_orig_nu.inputs.out_file = bias_correction.inputs.out_file + + ar1_wf.connect( + [(bias_correction, add_xform_to_orig_nu, [('out_file', 'in_file')]), + (copy_transform, add_xform_to_orig_nu, [('out_file', 'transform')])]) + + # check the alignment of the talairach + # TODO: Figure out how to read output from this node. + check_alignment = pe.Node( + CheckTalairachAlignment(), name="Check_Talairach_Alignment") + check_alignment.inputs.threshold = 0.005 + ar1_wf.connect([ + (copy_transform, check_alignment, [('out_file', 'in_file')]), + ]) + + if not longitudinal: + + def awkfile(in_file, log_file): + """ + This method uses 'awk' which must be installed prior to running the workflow and is not a + part of nipype or freesurfer. + """ + import subprocess + import os + command = ['awk', '-f', in_file, log_file] + print(''.join(command)) + subprocess.call(command) + log_file = os.path.abspath(log_file) + return log_file + + awk_logfile = pe.Node( + Function(['in_file', 'log_file'], ['log_file'], awkfile), + name='Awk') + + ar1_wf.connect([(talairach_avi, awk_logfile, [('out_log', + 'log_file')]), + (inputspec, awk_logfile, [('awk_file', 'in_file')])]) + + # TODO datasink the output from TalirachQC...not sure how to do this + tal_qc = pe.Node(TalairachQC(), name="Detect_Aligment_Failures") + ar1_wf.connect([(awk_logfile, tal_qc, [('log_file', 'log_file')])]) + + if fsvernum < 6: + # intensity correction is performed before normalization + intensity_correction = pe.Node( + MNIBiasCorrection(), name="Intensity_Correction") + intensity_correction.inputs.out_file = 'nu.mgz' + intensity_correction.inputs.iterations = 2 + ar1_wf.connect([(add_xform_to_orig, intensity_correction, + [('out_file', 'in_file')]), + (copy_transform, intensity_correction, + [('out_file', 'transform')])]) + + add_to_header_nu = pe.Node(AddXFormToHeader(), name="Add_XForm_to_NU") + add_to_header_nu.inputs.copy_name = True + add_to_header_nu.inputs.out_file = 'nu.mgz' + ar1_wf.connect([(intensity_correction, add_to_header_nu, [ + ('out_file', 'in_file'), + ]), (copy_transform, add_to_header_nu, [('out_file', 'transform')])]) + + # Intensity Normalization + # Performs intensity normalization of the orig volume and places the result in mri/T1.mgz. + # Attempts to correct for fluctuations in intensity that would otherwise make intensity-based + # segmentation much more difficult. Intensities for all voxels are scaled so that the mean + # intensity of the white matter is 110. + + mri_normalize = pe.Node(Normalize(), name="Normalize_T1") + mri_normalize.inputs.gradient = 1 + mri_normalize.inputs.out_file = 'T1.mgz' + + if fsvernum < 6: + ar1_wf.connect([(add_to_header_nu, mri_normalize, [('out_file', + 'in_file')])]) + else: + ar1_wf.connect([(add_xform_to_orig_nu, mri_normalize, [('out_file', + 'in_file')])]) + + ar1_wf.connect([(copy_transform, mri_normalize, [('out_file', + 'transform')])]) + + # Skull Strip + """ + Removes the skull from mri/T1.mgz and stores the result in + mri/brainmask.auto.mgz and mri/brainmask.mgz. Runs the mri_watershed program. + """ + if not longitudinal: + mri_em_register = pe.Node(EMRegister(), name="EM_Register") + mri_em_register.inputs.out_file = 'talairach_with_skull.lta' + mri_em_register.inputs.skull = True + if plugin_args: + mri_em_register.plugin_args = plugin_args + + if fsvernum < 6: + ar1_wf.connect(add_to_header_nu, 'out_file', mri_em_register, + 'in_file') + else: + ar1_wf.connect(add_xform_to_orig_nu, 'out_file', mri_em_register, + 'in_file') + + ar1_wf.connect([(inputspec, mri_em_register, + [('num_threads', 'num_threads'), + ('reg_template_withskull', 'template')])]) + + brainmask = pe.Node( + WatershedSkullStrip(), name='Watershed_Skull_Strip') + brainmask.inputs.t1 = True + brainmask.inputs.out_file = 'brainmask.auto.mgz' + ar1_wf.connect([(mri_normalize, brainmask, [('out_file', 'in_file')]), + (mri_em_register, brainmask, [('out_file', + 'transform')]), + (inputspec, brainmask, [('reg_template_withskull', + 'brain_atlas')])]) + else: + copy_template_brainmask = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + name='Copy_Template_Brainmask') + copy_template_brainmask.inputs.out_file = 'brainmask_{0}.mgz'.format( + config['long_template']) + + ar1_wf.connect([(inputspec, copy_template_brainmask, + [('template_brainmask', 'in_file')])]) + + mask1 = pe.Node(ApplyMask(), name="ApplyMask1") + mask1.inputs.keep_mask_deletion_edits = True + mask1.inputs.out_file = 'brainmask.auto.mgz' + + ar1_wf.connect([(mri_normalize, mask1, [('out_file', 'in_file')]), + (copy_template_brainmask, mask1, [('out_file', + 'mask_file')])]) + + brainmask = pe.Node(ApplyMask(), name="ApplyMask2") + brainmask.inputs.keep_mask_deletion_edits = True + brainmask.inputs.transfer = 255 + brainmask.inputs.out_file = mask1.inputs.out_file + + ar1_wf.connect([(mask1, brainmask, [('out_file', 'in_file')]), + (copy_template_brainmask, brainmask, [('out_file', + 'mask_file')])]) + + copy_brainmask = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + name='Copy_Brainmask') + copy_brainmask.inputs.out_file = 'brainmask.mgz' + + ar1_wf.connect([(brainmask, copy_brainmask, [('out_file', 'in_file')])]) + + outputs = [ + 'origvols', 't2_raw', 'flair', 'rawavg', 'orig_nu', 'orig', + 'talairach_auto', 'talairach', 't1', 'talskull', 'brainmask_auto', + 'brainmask', 'braintemplate' + ] + + if fsvernum < 6: + outputspec = pe.Node( + IdentityInterface(fields=outputs + ['nu']), name="outputspec") + ar1_wf.connect([(add_to_header_nu, outputspec, [('out_file', 'nu')])]) + else: + outputspec = pe.Node( + IdentityInterface(fields=outputs), name="outputspec") + + ar1_wf.connect([ + (T1_image_preparation, outputspec, [('out_file', 'origvols')]), + (T2_convert, outputspec, [('out_file', 't2_raw')]), + (FLAIR_convert, outputspec, [('out_file', 'flair')]), + (create_template, outputspec, [('out_file', 'rawavg')]), + (add_xform_to_orig, outputspec, [('out_file', 'orig')]), + (add_xform_to_orig_nu, outputspec, [('out_file', 'orig_nu')]), + (talairach_avi, outputspec, [('out_file', 'talairach_auto')]), + (copy_transform, outputspec, [('out_file', 'talairach')]), + (mri_normalize, outputspec, [('out_file', 't1')]), + (brainmask, outputspec, [('out_file', 'brainmask_auto')]), + (copy_brainmask, outputspec, [('out_file', 'brainmask')]), + ]) + + if not longitudinal: + ar1_wf.connect([ + (mri_em_register, outputspec, [('out_file', 'talskull')]), + ]) + else: + ar1_wf.connect([ + (copy_template_brainmask, outputspec, [('out_file', + 'braintemplate')]), + ]) + + return ar1_wf, outputs diff --git a/nipype/workflows/smri/freesurfer/autorecon2.py b/nipype/workflows/smri/freesurfer/autorecon2.py new file mode 100644 index 0000000000..a11587412d --- /dev/null +++ b/nipype/workflows/smri/freesurfer/autorecon2.py @@ -0,0 +1,720 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from ....interfaces.utility import Function, IdentityInterface, Merge +from ....pipeline import engine as pe +from ....interfaces.freesurfer import * +from .utils import copy_file + + +def copy_ltas(in_file, subjects_dir, subject_id, long_template): + import os + out_file = copy_file(in_file, + os.path.basename(in_file).replace( + long_template, subject_id)) + return out_file + + +def create_AutoRecon2(name="AutoRecon2", + longitudinal=False, + plugin_args=None, + fsvernum=5.3, + stop=None, + shrink=None, + distance=None): + # AutoRecon2 + # Workflow + ar2_wf = pe.Workflow(name=name) + + inputspec = pe.Node( + IdentityInterface(fields=[ + 'orig', + 'nu', # version < 6 + 'brainmask', + 'transform', + 'subject_id', + 'template_talairach_lta', + 'template_talairach_m3z', + 'template_label_intensities', + 'template_aseg', + 'subj_to_template_lta', + 'alltps_to_template_ltas', + 'template_lh_white', + 'template_rh_white', + 'template_lh_pial', + 'template_rh_pial', + 'init_wm', + 'timepoints', + 'alltps_segs', + 'alltps_segs_noCC', + 'alltps_norms', + 'num_threads', + 'reg_template', + 'reg_template_withskull' + ]), + run_without_submitting=True, + name='inputspec') + + # Input node + if longitudinal: + # TODO: Work on longitudinal workflow + inputspec.inputs.timepoints = config['timepoints'] + + if fsvernum >= 6: + # NU Intensity Correction + """ + Non-parametric Non-uniform intensity Normalization (N3), corrects for + intensity non-uniformity in MR data, making relatively few assumptions about + the data. This runs the MINC tool 'nu_correct'. + """ + intensity_correction = pe.Node( + MNIBiasCorrection(), name="Intensity_Correction") + intensity_correction.inputs.out_file = 'nu.mgz' + ar2_wf.connect([(inputspec, intensity_correction, + [('orig', 'in_file'), ('brainmask', 'mask'), + ('transform', 'transform')])]) + + # intensity correction parameters are more specific in 6+ + intensity_correction.inputs.iterations = 1 + intensity_correction.inputs.protocol_iterations = 1000 + if stop: + intensity_correction.inputs.stop = stop + if shrink: + intensity_correction.inputs.shrink = shrink + intensity_correction.inputs.distance = distance + + add_to_header_nu = pe.Node(AddXFormToHeader(), name="Add_XForm_to_NU") + add_to_header_nu.inputs.copy_name = True + add_to_header_nu.inputs.out_file = 'nu.mgz' + ar2_wf.connect([(intensity_correction, add_to_header_nu, [ + ('out_file', 'in_file'), + ]), (inputspec, add_to_header_nu, [('transform', 'transform')])]) + + # EM Registration + """ + Computes the transform to align the mri/nu.mgz volume to the default GCA + atlas found in FREESURFER_HOME/average (see -gca flag for more info). + """ + if longitudinal: + align_transform = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + name='Copy_Talairach_lta') + align_transform.inputs.out_file = 'talairach.lta' + + ar2_wf.connect([(inputspec, align_transform, + [('template_talairach_lta', 'in_file')])]) + else: + align_transform = pe.Node(EMRegister(), name="Align_Transform") + align_transform.inputs.out_file = 'talairach.lta' + align_transform.inputs.nbrspacing = 3 + if plugin_args: + align_transform.plugin_args = plugin_args + ar2_wf.connect([(inputspec, align_transform, + [('brainmask', 'mask'), ('reg_template', 'template'), + ('num_threads', 'num_threads')])]) + if fsvernum >= 6: + ar2_wf.connect([(add_to_header_nu, align_transform, + [('out_file', 'in_file')])]) + else: + ar2_wf.connect([(inputspec, align_transform, [('nu', 'in_file')])]) + + # CA Normalize + """ + Further normalization, based on GCA model. The normalization is based on an + estimate of the most certain segmentation voxels, which it then uses to + estimate the bias field/scalings. Creates mri/norm.mgz. + """ + ca_normalize = pe.Node(CANormalize(), name='CA_Normalize') + ca_normalize.inputs.out_file = 'norm.mgz' + if not longitudinal: + ca_normalize.inputs.control_points = 'ctrl_pts.mgz' + else: + copy_template_aseg = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + name='Copy_Template_Aseg') + copy_template_aseg.inputs.out_file = 'aseg_{0}.mgz'.format( + config['long_template']) + + ar1_wf.connect( + [(inputspec, copy_template, [('template_aseg', 'in_file')]), + (copy_template, ca_normalize, [('out_file', 'long_file')])]) + + ar2_wf.connect([(align_transform, ca_normalize, [('out_file', + 'transform')]), + (inputspec, ca_normalize, [('brainmask', 'mask'), + ('reg_template', 'atlas')])]) + if fsvernum >= 6: + ar2_wf.connect([(add_to_header_nu, ca_normalize, [('out_file', + 'in_file')])]) + else: + ar2_wf.connect([(inputspec, ca_normalize, [('nu', 'in_file')])]) + + # CA Register + # Computes a nonlinear transform to align with GCA atlas. + ca_register = pe.Node(CARegister(), name='CA_Register') + ca_register.inputs.align = 'after' + ca_register.inputs.no_big_ventricles = True + ca_register.inputs.out_file = 'talairach.m3z' + if plugin_args: + ca_register.plugin_args = plugin_args + ar2_wf.connect([(ca_normalize, ca_register, [('out_file', 'in_file')]), + (inputspec, ca_register, + [('brainmask', 'mask'), ('num_threads', 'num_threads'), + ('reg_template', 'template')])]) + if not longitudinal: + ar2_wf.connect([(align_transform, ca_register, [('out_file', + 'transform')])]) + else: + ca_register.inputs.levels = 2 + ca_register.inputs.A = 1 + ar2_wf.connect([(inputspec, ca_register, [('template_talairach_m3z', + 'l_files')])]) + + # Remove Neck + """ + The neck region is removed from the NU-corrected volume mri/nu.mgz. Makes use + of transform computed from prior CA Register stage. + """ + remove_neck = pe.Node(RemoveNeck(), name='Remove_Neck') + remove_neck.inputs.radius = 25 + remove_neck.inputs.out_file = 'nu_noneck.mgz' + ar2_wf.connect([(ca_register, remove_neck, [('out_file', 'transform')]), + (inputspec, remove_neck, [('reg_template', 'template')])]) + if fsvernum >= 6: + ar2_wf.connect([(add_to_header_nu, remove_neck, [('out_file', + 'in_file')])]) + else: + ar2_wf.connect([(inputspec, remove_neck, [('nu', 'in_file')])]) + + # SkullLTA (EM Registration, with Skull) + # Computes transform to align volume mri/nu_noneck.mgz with GCA volume + # possessing the skull. + em_reg_withskull = pe.Node(EMRegister(), name='EM_Register_withSkull') + em_reg_withskull.inputs.skull = True + em_reg_withskull.inputs.out_file = 'talairach_with_skull_2.lta' + if plugin_args: + em_reg_withskull.plugin_args = plugin_args + ar2_wf.connect([(align_transform, em_reg_withskull, [('out_file', + 'transform')]), + (remove_neck, em_reg_withskull, [('out_file', 'in_file')]), + (inputspec, em_reg_withskull, + [('num_threads', 'num_threads'), + ('reg_template_withskull', 'template')])]) + + # SubCort Seg (CA Label) + # Labels subcortical structures, based in GCA model. + if longitudinal: + copy_long_ltas = pe.MapNode( + Function( + ['in_file', 'subjects_dir', 'subject_id', 'long_template'], + ['out_file'], copy_ltas), + iterfield=['in_file'], + name='Copy_long_ltas') + ar2_wf.connect([(inputspec, copy_long_ltas, + [('alltps_to_template_ltas', 'in_file'), + ('subjects_dir', 'subjects_dir'), ('subject_id', + 'subject_id')])]) + copy_long_ltas.inputs.long_template = config['long_template'] + + merge_norms = pe.Node(Merge(2), name="Merge_Norms") + + ar2_wf.connect([(inputspec, merge_norms, [('alltps_norms', 'in1')]), + (ca_normalize, merge_norms, [('out_file', 'in2')])]) + + fuse_segmentations = pe.Node( + FuseSegmentations(), name="Fuse_Segmentations") + + ar2_wf.connect([(inputspec, fuse_segmentations, [ + ('timepoints', 'timepoints'), ('alltps_segs', 'in_segmentations'), + ('alltps_segs_noCC', 'in_segmentations_noCC'), ('subject_id', + 'subject_id') + ]), (merge_norms, fuse_segmentations, [('out', 'in_norms')])]) + fuse_segmentations.inputs.out_file = 'aseg.fused.mgz' + + ca_label = pe.Node(CALabel(), name='CA_Label') + if fsvernum >= 6: + ca_label.inputs.relabel_unlikely = (9, .3) + ca_label.inputs.prior = 0.5 + ca_label.inputs.align = True + ca_label.inputs.out_file = 'aseg.auto_noCCseg.mgz' + if plugin_args: + ca_label.plugin_args = plugin_args + ar2_wf.connect([(ca_normalize, ca_label, [('out_file', 'in_file')]), + (ca_register, ca_label, [('out_file', 'transform')]), + (inputspec, ca_label, [('num_threads', 'num_threads'), + ('reg_template', 'template')])]) + + if longitudinal: + ar2_wf.connect([(fuse_segmentations, ca_label, [('out_file', + 'in_vol')]), + (inputspec, ca_label, [('template_label_intensities', + 'intensities')])]) + + # mri_cc - segments the corpus callosum into five separate labels in the + # subcortical segmentation volume 'aseg.mgz' + segment_cc = pe.Node(SegmentCC(), name="Segment_CorpusCallosum") + segment_cc.inputs.out_rotation = 'cc_up.lta' + segment_cc.inputs.out_file = 'aseg.auto.mgz' + segment_cc.inputs.copy_inputs = True + ar2_wf.connect([ + (ca_label, segment_cc, [('out_file', 'in_file')]), + (ca_normalize, segment_cc, [('out_file', 'in_norm')]), + ]) + + copy_cc = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + name='Copy_CCSegmentation') + copy_cc.inputs.out_file = 'aseg.presurf.mgz' + + ar2_wf.connect([(segment_cc, copy_cc, [('out_file', 'in_file')])]) + + # Normalization2 + """ + Performs a second (major) intensity correction using only the brain volume a + s the input (so that it has to be done after the skull strip). Intensity + normalization works better when the skull has been removed. Creates a new + brain.mgz volume. The -autorecon2-cp stage begins here. + """ + normalization2 = pe.Node(Normalize(), name="Normalization2") + normalization2.inputs.out_file = 'brain.mgz' + ar2_wf.connect([(copy_cc, normalization2, [('out_file', 'segmentation')]), + (inputspec, normalization2, [('brainmask', 'mask')]), + (ca_normalize, normalization2, [('out_file', 'in_file')])]) + + # Mask Brain Final Surface + + # Applies brainmask.mgz to brain.mgz to create brain.finalsurfs.mgz. + mri_mask = pe.Node(ApplyMask(), name="Mask_Brain_Final_Surface") + mri_mask.inputs.mask_thresh = 5 + mri_mask.inputs.out_file = 'brain.finalsurfs.mgz' + + ar2_wf.connect([(normalization2, mri_mask, [('out_file', 'in_file')]), + (inputspec, mri_mask, [('brainmask', 'mask_file')])]) + + # WM Segmentation + """ + Attempts to separate white matter from everything else. The input is + mri/brain.mgz, and the output is mri/wm.mgz. Uses intensity, neighborhood, + and smoothness constraints. This is the volume that is edited when manually + fixing defects. Calls mri_segment, mri_edit_wm_with_aseg, and mri_pretess. + """ + + wm_seg = pe.Node(SegmentWM(), name="Segment_WM") + wm_seg.inputs.out_file = 'wm.seg.mgz' + ar2_wf.connect([(normalization2, wm_seg, [('out_file', 'in_file')])]) + + edit_wm = pe.Node(EditWMwithAseg(), name='Edit_WhiteMatter') + edit_wm.inputs.out_file = 'wm.asegedit.mgz' + edit_wm.inputs.keep_in = True + ar2_wf.connect([(wm_seg, edit_wm, [('out_file', 'in_file')]), + (copy_cc, edit_wm, [('out_file', 'seg_file')]), + (normalization2, edit_wm, [('out_file', 'brain_file')])]) + + pretess = pe.Node(MRIPretess(), name="MRI_Pretess") + pretess.inputs.out_file = 'wm.mgz' + pretess.inputs.label = 'wm' + ar2_wf.connect([(edit_wm, pretess, [('out_file', 'in_filled')]), + (ca_normalize, pretess, [('out_file', 'in_norm')])]) + + if longitudinal: + transfer_init_wm = pe.Node(ApplyMask(), name="Transfer_Initial_WM") + transfer_init_wm.inputs.transfer = 255 + transfer_init_wm.inputs.keep_mask_deletion_edits = True + transfer_init_wm.inputs.out_file = 'wm.mgz' + ar2_wf.connect([(pretess, transfer_init_wm, [('out_file', 'in_file')]), + (inputspec, transfer_init_wm, + [('init_wm', 'mask_file'), ('subj_to_template_lta', + 'xfm_file')])]) + # changing the pretess variable so that the rest of the connections still work!!! + pretess = transfer_init_wm + + # Fill + """ This creates the subcortical mass from which the orig surface is created. + The mid brain is cut from the cerebrum, and the hemispheres are cut from each + other. The left hemisphere is binarized to 255. The right hemisphere is binarized + to 127. The input is mri/wm.mgz and the output is mri/filled.mgz. Calls mri_fill. + """ + + fill = pe.Node(MRIFill(), name="Fill") + fill.inputs.log_file = 'ponscc.cut.log' + fill.inputs.out_file = 'filled.mgz' + + ar2_wf.connect([ + (pretess, fill, [('out_file', 'in_file')]), + (align_transform, fill, [('out_file', 'transform')]), + (ca_label, fill, [('out_file', 'segmentation')]), + ]) + + ar2_lh = pe.Workflow("AutoRecon2_Left") + ar2_rh = pe.Workflow("AutoRecon2_Right") + + # iterate by hemisphere + for hemisphere in ['lh', 'rh']: + if hemisphere == 'lh': + label = 255 + hemi_wf = ar2_lh + else: + label = 127 + hemi_wf = ar2_rh + + hemi_inputspec = pe.Node( + IdentityInterface(fields=[ + 'norm', 'filled', 'aseg', 't1', 'wm', 'brain', 'num_threads' + ]), + name="inputspec") + + if longitudinal: + # Make White Surf + # Copy files from longitudinal base + copy_template_white = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + name='Copy_Template_White') + copy_template_white.inputs.out_file = '{0}.orig'.format(hemisphere) + + copy_template_orig_white = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + name='Copy_Template_Orig_White') + copy_template_orig_white.inputs.out_file = '{0}.orig_white'.format( + hemisphere) + + copy_template_orig_pial = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + name='Copy_Template_Orig_Pial') + copy_template_orig_pial.inputs.out_file = '{0}.orig_pial'.format( + hemisphere) + + # White + + # This function implicitly calls other inputs based on the subject_id + # wf attempts to make sure files are data sinked to the correct + # folders before calling + make_surfaces = pe.Node(MakeSurfaces(), name="Make_Surfaces") + make_surfaces.inputs.noaparc = True + make_surfaces.inputs.mgz = True + make_surfaces.inputs.white_only = True + make_surfaces.inputs.hemisphere = hemisphere + make_surfaces.inputs.maximum = 3.5 + make_surfaces.inputs.longitudinal = True + make_surfaces.inputs.copy_inputs = True + + hemi_wf.connect([(copy_template_orig_white, make_surfaces, + [('out_file', 'orig_white')]), + (copy_template_white, make_surfaces, + [('out_file', 'in_orig')])]) + + else: + # If running single session + # Tessellate by hemisphere + """ + This is the step where the orig surface (ie, surf/?h.orig.nofix) is created. + The surface is created by covering the filled hemisphere with triangles. + Runs mri_pretess to create a connected WM volume (neighboring voxels must + have faces in common) and then mri_tessellate to create the surface. The + places where the points of the triangles meet are called vertices. Creates + the file surf/?h.orig.nofix Note: the topology fixer will create the surface + ?h.orig. Finally mris_extract_main_component will remove small surface + components, not connected to the main body. + """ + pretess2 = pe.Node(MRIPretess(), name='Pretess2') + pretess2.inputs.out_file = 'filled-pretess{0}.mgz'.format(label) + pretess2.inputs.label = label + + hemi_wf.connect([(hemi_inputspec, pretess2, + [('norm', 'in_norm'), ('filled', 'in_filled')])]) + + tesselate = pe.Node(MRITessellate(), name="Tesselation") + tesselate.inputs.out_file = "{0}.orig.nofix".format(hemisphere) + tesselate.inputs.label_value = label + hemi_wf.connect([(pretess2, tesselate, [('out_file', 'in_file')])]) + + extract_main_component = pe.Node( + ExtractMainComponent(), name="Extract_Main_Component") + extract_main_component.inputs.out_file = "{0}.orig.nofix".format( + hemisphere) + hemi_wf.connect([(tesselate, extract_main_component, + [('surface', 'in_file')])]) + + copy_orig = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + name='Copy_Orig') + copy_orig.inputs.out_file = '{0}.orig'.format(hemisphere) + hemi_wf.connect([(extract_main_component, copy_orig, + [('out_file', 'in_file')])]) + + # Orig Surface Smoothing 1 + """ + After tesselation, the orig surface is very jagged because each triangle is + on the edge of a voxel face and so are at right angles to each other. The + vertex positions are adjusted slightly here to reduce the angle. This is + only necessary for the inflation processes. Creates surf/?h.smoothwm(.nofix). + Calls mris_smooth. Smooth1 is the step just after tessellation. + """ + + smooth1 = pe.Node(SmoothTessellation(), name="Smooth1") + smooth1.inputs.disable_estimates = True + smooth1.inputs.seed = 1234 + smooth1.inputs.out_file = '{0}.smoothwm.nofix'.format(hemisphere) + hemi_wf.connect([(extract_main_component, smooth1, [('out_file', + 'in_file')])]) + + # Inflation 1 + """ + Inflation of the surf/?h.smoothwm(.nofix) surface to create surf/?h.inflated. + The inflation attempts to minimize metric distortion so that distances and + areas are preserved (ie, the surface is not stretched). In this sense, it is + like inflating a paper bag and not a balloon. Inflate1 is the step just after + tessellation. + """ + + inflate1 = pe.Node(MRIsInflate(), name="inflate1") + inflate1.inputs.no_save_sulc = True + inflate1.inputs.out_file = '{0}.inflated.nofix'.format(hemisphere) + + copy_inflate1 = pe.Node( + Function(['in_file', 'out_file'], ['out_file'], copy_file), + name='Copy_Inflate1') + copy_inflate1.inputs.out_file = '{0}.inflated'.format(hemisphere) + hemi_wf.connect([ + (smooth1, inflate1, [('surface', 'in_file')]), + (inflate1, copy_inflate1, [('out_file', 'in_file')]), + ]) + + # Sphere + """ + This is the initial step of automatic topology fixing. It is a + quasi-homeomorphic spherical transformation of the inflated surface designed + to localize topological defects for the subsequent automatic topology fixer. + Calls mris_sphere. + """ + + qsphere = pe.Node(Sphere(), name="Sphere") + qsphere.inputs.seed = 1234 + qsphere.inputs.magic = True + qsphere.inputs.out_file = '{0}.qsphere.nofix'.format(hemisphere) + if plugin_args: + qsphere.plugin_args = plugin_args + hemi_wf.connect([(inflate1, qsphere, [('out_file', 'in_file')]), + (hemi_inputspec, qsphere, [('num_threads', + 'num_threads')])]) + + # Automatic Topology Fixer + """ + Finds topological defects (ie, holes in a filled hemisphere) using + surf/?h.qsphere.nofix, and changes the orig surface (surf/?h.orig.nofix) to + remove the defects. Changes the number of vertices. All the defects will be + removed, but the user should check the orig surface in the volume to make + sure that it looks appropriate. + + This mris_fix_topology does not take in the {lh,rh}.orig file, but instead takes in the + subject ID and hemisphere and tries to find it from the subjects + directory. + """ + fix_topology = pe.Node(FixTopology(), name="Fix_Topology") + fix_topology.inputs.mgz = True + fix_topology.inputs.ga = True + fix_topology.inputs.seed = 1234 + fix_topology.inputs.hemisphere = hemisphere + fix_topology.inputs.copy_inputs = True + hemi_wf.connect([(copy_orig, fix_topology, + [('out_file', + 'in_orig')]), (copy_inflate1, fix_topology, + [('out_file', 'in_inflated')]), + (qsphere, fix_topology, [('out_file', 'sphere')]), + (hemi_inputspec, fix_topology, + [('wm', 'in_wm'), ('brain', 'in_brain')])]) + + # TODO: halt workflow for bad euler number + euler_number = pe.Node(EulerNumber(), name="Euler_Number") + + hemi_wf.connect([ + (fix_topology, euler_number, [('out_file', 'in_file')]), + ]) + + remove_intersection = pe.Node( + RemoveIntersection(), name="Remove_Intersection") + remove_intersection.inputs.out_file = "{0}.orig".format(hemisphere) + + hemi_wf.connect([(euler_number, remove_intersection, + [('out_file', 'in_file')])]) + + # White + + # This function implicitly calls other inputs based on the subject_id + # need to make sure files are data sinked to the correct folders before + # calling + make_surfaces = pe.Node(MakeSurfaces(), name="Make_Surfaces") + make_surfaces.inputs.noaparc = True + make_surfaces.inputs.mgz = True + make_surfaces.inputs.white_only = True + make_surfaces.inputs.hemisphere = hemisphere + make_surfaces.inputs.copy_inputs = True + hemi_wf.connect([(remove_intersection, make_surfaces, + [('out_file', 'in_orig')]), + (hemi_inputspec, make_surfaces, + [('aseg', 'in_aseg'), ('t1', 'in_T1'), + ('filled', 'in_filled'), ('wm', 'in_wm')])]) + # end of non-longitudinal specific steps + + # Orig Surface Smoothing 2 + """ + After tesselation, the orig surface is very jagged because each triangle is on + the edge of a voxel face and so are at right angles to each other. The vertex + positions are adjusted slightly here to reduce the angle. This is only necessary + for the inflation processes. Smooth2 is the step just after topology + fixing. + """ + smooth2 = pe.Node(SmoothTessellation(), name="Smooth2") + smooth2.inputs.disable_estimates = True + smooth2.inputs.smoothing_iterations = 3 + smooth2.inputs.seed = 1234 + smooth2.inputs.out_file = '{0}.smoothwm'.format(hemisphere) + hemi_wf.connect([(make_surfaces, smooth2, [('out_white', 'in_file')])]) + + # Inflation 2 + """ + Inflation of the surf/?h.smoothwm(.nofix) surface to create surf/?h.inflated. + The inflation attempts to minimize metric distortion so that distances and areas + are preserved (ie, the surface is not stretched). In this sense, it is like + inflating a paper bag and not a balloon. Inflate2 is the step just after + topology fixing. + """ + inflate2 = pe.Node(MRIsInflate(), name="inflate2") + inflate2.inputs.out_sulc = '{0}.sulc'.format(hemisphere) + inflate2.inputs.out_file = '{0}.inflated'.format(hemisphere) + hemi_wf.connect([ + (smooth2, inflate2, [('surface', 'in_file')]), + ]) + + # Compute Curvature + """No documentation on this step""" + + curvature1 = pe.Node(Curvature(), name="Curvature1") + curvature1.inputs.save = True + curvature1.inputs.copy_input = True + hemi_wf.connect([ + (make_surfaces, curvature1, [('out_white', 'in_file')]), + ]) + + curvature2 = pe.Node(Curvature(), name="Curvature2") + curvature2.inputs.threshold = .999 + curvature2.inputs.n = True + curvature2.inputs.averages = 5 + curvature2.inputs.save = True + curvature2.inputs.distances = (10, 10) + curvature1.inputs.copy_input = True + hemi_wf.connect([ + (inflate2, curvature2, [('out_file', 'in_file')]), + ]) + + curvature_stats = pe.Node(CurvatureStats(), name="Curvature_Stats") + curvature_stats.inputs.min_max = True + curvature_stats.inputs.write = True + curvature_stats.inputs.values = True + curvature_stats.inputs.hemisphere = hemisphere + curvature_stats.inputs.copy_inputs = True + curvature_stats.inputs.out_file = '{0}.curv.stats'.format(hemisphere) + hemi_wf.connect([ + (smooth2, curvature_stats, [('surface', 'surface')]), + (make_surfaces, curvature_stats, [('out_curv', 'curvfile1')]), + (inflate2, curvature_stats, [('out_sulc', 'curvfile2')]), + ]) + + if longitudinal: + ar2_wf.connect([(inputspec, hemi_wf, + [('template_{0}_white'.format(hemisphere), + 'Copy_Template_White.in_file'), + ('template_{0}_white'.format(hemisphere), + 'Copy_Template_Orig_White.in_file'), + ('template_{0}_pial'.format(hemisphere), + 'Copy_Template_Pial.in_file')])]) + + # Connect inputs for the hemisphere workflows + ar2_wf.connect( + [(ca_normalize, hemi_wf, + [('out_file', 'inputspec.norm')]), (fill, hemi_wf, [ + ('out_file', 'inputspec.filled') + ]), (copy_cc, hemi_wf, [('out_file', 'inputspec.aseg')]), + (mri_mask, hemi_wf, [('out_file', 'inputspec.t1')]), + (pretess, hemi_wf, [('out_file', + 'inputspec.wm')]), (normalization2, hemi_wf, + [('out_file', + 'inputspec.brain')]), + (inputspec, hemi_wf, [('num_threads', 'inputspec.num_threads')])]) + + # Outputs for hemisphere workflow + hemi_outputs = [ + 'orig_nofix', 'orig', 'smoothwm_nofix', 'inflated_nofix', + 'qsphere_nofix', 'white', 'curv', 'area', 'cortex', 'pial_auto', + 'thickness', 'smoothwm', 'sulc', 'inflated', 'white_H', 'white_K', + 'inflated_H', 'inflated_K', 'curv_stats' + ] + + hemi_outputspec = pe.Node( + IdentityInterface(fields=hemi_outputs), name="outputspec") + + hemi_wf.connect( + [(extract_main_component, hemi_outputspec, + [('out_file', 'orig_nofix')]), (inflate1, hemi_outputspec, [ + ('out_file', 'inflated_nofix') + ]), (smooth1, hemi_outputspec, [('surface', 'smoothwm_nofix')]), + (qsphere, hemi_outputspec, [('out_file', 'qsphere_nofix')]), + (remove_intersection, hemi_outputspec, + [('out_file', 'orig')]), (make_surfaces, hemi_outputspec, [ + ('out_white', 'white'), ('out_curv', 'curv'), + ('out_area', 'area'), ('out_cortex', 'cortex'), ('out_pial', + 'pial_auto') + ]), (smooth2, hemi_outputspec, + [('surface', 'smoothwm')]), (inflate2, hemi_outputspec, + [('out_sulc', 'sulc'), + ('out_file', 'inflated')]), + (curvature1, hemi_outputspec, + [('out_mean', 'white_H'), + ('out_gauss', 'white_K')]), (curvature2, hemi_outputspec, [ + ('out_mean', 'inflated_H'), ('out_gauss', 'inflated_K') + ]), (curvature_stats, hemi_outputspec, [('out_file', + 'curv_stats')])]) + + outputs = [ + 'nu', 'tal_lta', 'norm', 'ctrl_pts', 'tal_m3z', 'nu_noneck', + 'talskull2', 'aseg_noCC', 'cc_up', 'aseg_auto', 'aseg_presurf', + 'brain', 'brain_finalsurfs', 'wm_seg', 'wm_aseg', 'wm', 'ponscc_log', + 'filled' + ] + for hemi in ('lh', 'rh'): + for field in hemi_outputs: + outputs.append("{0}_".format(hemi) + field) + outputspec = pe.Node(IdentityInterface(fields=outputs), name="outputspec") + + if fsvernum >= 6: + ar2_wf.connect([(add_to_header_nu, outputspec, [('out_file', 'nu')])]) + else: + # add to outputspec to perserve datasinking + ar2_wf.connect([(inputspec, outputspec, [('nu', 'nu')])]) + + ar2_wf.connect([ + (align_transform, outputspec, [('out_file', 'tal_lta')]), + (ca_normalize, outputspec, [('out_file', 'norm')]), + (ca_normalize, outputspec, [('control_points', 'ctrl_pts')]), + (ca_register, outputspec, [('out_file', 'tal_m3z')]), + (remove_neck, outputspec, [('out_file', 'nu_noneck')]), + (em_reg_withskull, outputspec, [('out_file', 'talskull2')]), + (ca_label, outputspec, [('out_file', 'aseg_noCC')]), + (segment_cc, outputspec, [('out_rotation', 'cc_up'), ('out_file', + 'aseg_auto')]), + (copy_cc, outputspec, [('out_file', 'aseg_presurf')]), + (normalization2, outputspec, [('out_file', 'brain')]), + (mri_mask, outputspec, [('out_file', 'brain_finalsurfs')]), + (wm_seg, outputspec, [('out_file', 'wm_seg')]), + (edit_wm, outputspec, [('out_file', 'wm_aseg')]), + (pretess, outputspec, [('out_file', 'wm')]), + (fill, outputspec, [('out_file', 'filled'), ('log_file', + 'ponscc_log')]), + ]) + + for hemi, hemi_wf in [('lh', ar2_lh), ('rh', ar2_rh)]: + for field in hemi_outputs: + output = "{0}_".format(hemi) + field + ar2_wf.connect([(hemi_wf, outputspec, [("outputspec." + field, + output)])]) + + return ar2_wf, outputs diff --git a/nipype/workflows/smri/freesurfer/autorecon3.py b/nipype/workflows/smri/freesurfer/autorecon3.py new file mode 100644 index 0000000000..477198d2da --- /dev/null +++ b/nipype/workflows/smri/freesurfer/autorecon3.py @@ -0,0 +1,959 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from ....interfaces.utility import IdentityInterface, Merge, Function +from ....pipeline import engine as pe +from ....interfaces.freesurfer import * +from .ba_maps import create_ba_maps_wf +from ....interfaces.io import DataGrabber + + +def create_AutoRecon3(name="AutoRecon3", + qcache=False, + plugin_args=None, + th3=True, + exvivo=True, + entorhinal=True, + fsvernum=5.3): + + # AutoRecon3 + # Workflow + ar3_wf = pe.Workflow(name=name) + + # Input Node + inputspec = pe.Node( + IdentityInterface(fields=[ + 'lh_inflated', 'rh_inflated', 'lh_smoothwm', 'rh_smoothwm', + 'lh_white', 'rh_white', 'lh_white_H', 'rh_white_H', 'lh_white_K', + 'rh_white_K', 'lh_cortex_label', 'rh_cortex_label', 'lh_orig', + 'rh_orig', 'lh_sulc', 'rh_sulc', 'lh_area', 'rh_area', 'lh_curv', + 'rh_curv', 'lh_orig_nofix', 'rh_orig_nofix', 'aseg_presurf', + 'brain_finalsurfs', 'wm', 'filled', 'brainmask', 'transform', + 'orig_mgz', 'rawavg', 'norm', 'lh_atlas', 'rh_atlas', + 'lh_classifier1', 'rh_classifier1', 'lh_classifier2', + 'rh_classifier2', 'lh_classifier3', 'rh_classifier3', + 'lookup_table', 'wm_lookup_table', 'src_subject_id', + 'src_subject_dir', 'color_table', 'num_threads' + ]), + name='inputspec') + + ar3_lh_wf1 = pe.Workflow(name="AutoRecon3_Left_1") + ar3_rh_wf1 = pe.Workflow(name="AutoRecon3_Right_1") + for hemisphere, hemi_wf in [('lh', ar3_lh_wf1), ('rh', ar3_rh_wf1)]: + hemi_inputspec1 = pe.Node( + IdentityInterface(fields=[ + 'inflated', 'smoothwm', 'white', 'cortex_label', 'orig', + 'aseg_presurf', 'brain_finalsurfs', 'wm', 'filled', 'sphere', + 'sulc', 'area', 'curv', 'classifier', 'atlas', 'num_threads' + ]), + name="inputspec") + + # Spherical Inflation + + # Inflates the orig surface into a sphere while minimizing metric distortion. + # This step is necessary in order to register the surface to the spherical + # atlas (also known as the spherical morph). Calls mris_sphere. Creates + # surf/?h.sphere. The -autorecon3 stage begins here. + + ar3_sphere = pe.Node(Sphere(), name="Spherical_Inflation") + ar3_sphere.inputs.seed = 1234 + ar3_sphere.inputs.out_file = '{0}.sphere'.format(hemisphere) + if plugin_args: + ar3_sphere.plugin_args = plugin_args + hemi_wf.connect([(hemi_inputspec1, ar3_sphere, + [('inflated', 'in_file'), + ('smoothwm', 'in_smoothwm'), ('num_threads', + 'num_threads')])]) + + # Ipsilateral Surface Registation (Spherical Morph) + + # Registers the orig surface to the spherical atlas through surf/?h.sphere. + # The surfaces are first coarsely registered by aligning the large scale + # folding patterns found in ?h.sulc and then fine tuned using the small-scale + # patterns as in ?h.curv. Calls mris_register. Creates surf/?h.sphere.reg. + + ar3_surfreg = pe.Node(Register(), name="Surface_Registration") + ar3_surfreg.inputs.out_file = '{0}.sphere.reg'.format(hemisphere) + ar3_surfreg.inputs.curv = True + hemi_wf.connect([(ar3_sphere, ar3_surfreg, [('out_file', 'in_surf')]), + (hemi_inputspec1, ar3_surfreg, + [('smoothwm', 'in_smoothwm'), ('sulc', 'in_sulc'), + ('atlas', 'target')])]) + + # Jacobian + + # Computes how much the white surface was distorted in order to register to + # the spherical atlas during the -surfreg step. + + ar3_jacobian = pe.Node(Jacobian(), name="Jacobian") + ar3_jacobian.inputs.out_file = '{0}.jacobian_white'.format(hemisphere) + hemi_wf.connect( + [(hemi_inputspec1, ar3_jacobian, [('white', 'in_origsurf')]), + (ar3_surfreg, ar3_jacobian, [('out_file', 'in_mappedsurf')])]) + + # Average Curvature + + # Resamples the average curvature from the atlas to that of the subject. + # Allows the user to display activity on the surface of an individual + # with the folding pattern (ie, anatomy) of a group. + + ar3_paint = pe.Node(Paint(), name="Average_Curvature") + ar3_paint.inputs.averages = 5 + ar3_paint.inputs.template_param = 6 + ar3_paint.inputs.out_file = "{0}.avg_curv".format(hemisphere) + hemi_wf.connect([(ar3_surfreg, ar3_paint, [('out_file', 'in_surf')]), + (hemi_inputspec1, ar3_paint, [('atlas', + 'template')])]) + + # Cortical Parcellation + + # Assigns a neuroanatomical label to each location on the cortical + # surface. Incorporates both geometric information derived from the + # cortical model (sulcus and curvature), and neuroanatomical convention. + + ar3_parcellation = pe.Node(MRIsCALabel(), "Cortical_Parcellation") + ar3_parcellation.inputs.seed = 1234 + ar3_parcellation.inputs.hemisphere = hemisphere + ar3_parcellation.inputs.copy_inputs = True + ar3_parcellation.inputs.out_file = "{0}.aparc.annot".format(hemisphere) + if plugin_args: + ar3_parcellation.plugin_args = plugin_args + hemi_wf.connect( + [(hemi_inputspec1, ar3_parcellation, + [('smoothwm', 'smoothwm'), ('cortex_label', 'label'), + ('aseg_presurf', 'aseg'), ('classifier', 'classifier'), + ('curv', 'curv'), ('sulc', 'sulc'), ('num_threads', + 'num_threads')]), + (ar3_surfreg, ar3_parcellation, [('out_file', 'canonsurf')])]) + + # Pial Surface + + ar3_pial = pe.Node(MakeSurfaces(), name="Make_Pial_Surface") + ar3_pial.inputs.mgz = True + ar3_pial.inputs.hemisphere = hemisphere + ar3_pial.inputs.copy_inputs = True + + if fsvernum < 6: + ar3_pial.inputs.white = 'NOWRITE' + hemi_wf.connect(hemi_inputspec1, 'white', ar3_pial, 'in_white') + else: + ar3_pial.inputs.no_white = True + hemi_wf.connect([(hemi_inputspec1, ar3_pial, + [('white', 'orig_pial'), ('white', + 'orig_white')])]) + + hemi_wf.connect( + [(hemi_inputspec1, ar3_pial, + [('wm', 'in_wm'), ('orig', 'in_orig'), ('filled', 'in_filled'), + ('brain_finalsurfs', 'in_T1'), ('aseg_presurf', 'in_aseg')]), + (ar3_parcellation, ar3_pial, [('out_file', 'in_label')])]) + + # Surface Volume + """ + Creates the ?h.volume file by first creating the ?h.mid.area file by + adding ?h.area(.white) to ?h.area.pial, then dividing by two. Then ?h.volume + is created by multiplying ?.mid.area with ?h.thickness. + """ + + ar3_add = pe.Node(MRIsCalc(), name="Add_Pial_Area") + ar3_add.inputs.action = "add" + ar3_add.inputs.out_file = '{0}.area.mid'.format(hemisphere) + hemi_wf.connect([ + (ar3_pial, ar3_add, [('out_area', 'in_file2')]), + (hemi_inputspec1, ar3_add, [('area', 'in_file1')]), + ]) + + ar3_divide = pe.Node(MRIsCalc(), name="Mid_Pial") + ar3_divide.inputs.action = "div" + ar3_divide.inputs.in_int = 2 + ar3_divide.inputs.out_file = '{0}.area.mid'.format(hemisphere) + hemi_wf.connect([ + (ar3_add, ar3_divide, [('out_file', 'in_file1')]), + ]) + + ar3_volume = pe.Node(MRIsCalc(), name="Calculate_Volume") + ar3_volume.inputs.action = "mul" + ar3_volume.inputs.out_file = '{0}.volume'.format(hemisphere) + hemi_wf.connect([ + (ar3_divide, ar3_volume, [('out_file', 'in_file1')]), + (ar3_pial, ar3_volume, [('out_thickness', 'in_file2')]), + ]) + + # Connect the inputs + ar3_wf.connect( + [(inputspec, hemi_wf, + [('{0}_inflated'.format(hemisphere), 'inputspec.inflated'), + ('{0}_smoothwm'.format(hemisphere), + 'inputspec.smoothwm'), ('{0}_white'.format(hemisphere), + 'inputspec.white'), + ('{0}_cortex_label'.format(hemisphere), + 'inputspec.cortex_label'), ('{0}_orig'.format(hemisphere), + 'inputspec.orig'), + ('{0}_sulc'.format(hemisphere), + 'inputspec.sulc'), ('{0}_area'.format(hemisphere), + 'inputspec.area'), + ('{0}_curv'.format(hemisphere), + 'inputspec.curv'), ('aseg_presurf', 'inputspec.aseg_presurf'), + ('brain_finalsurfs', + 'inputspec.brain_finalsurfs'), ('wm', 'inputspec.wm'), + ('filled', 'inputspec.filled'), ('{0}_atlas'.format(hemisphere), + 'inputspec.atlas'), + ('{0}_classifier1'.format(hemisphere), + 'inputspec.classifier'), ('num_threads', + 'inputspec.num_threads')])]) + + # Workflow1 Outputs + hemi_outputs1 = [ + 'sphere', 'sphere_reg', 'jacobian_white', 'avg_curv', + 'aparc_annot', 'area_pial', 'curv_pial', 'pial', 'thickness_pial', + 'area_mid', 'volume' + ] + hemi_outputspec1 = pe.Node( + IdentityInterface(fields=hemi_outputs1), name="outputspec") + hemi_wf.connect([(ar3_pial, hemi_outputspec1, [ + ('out_pial', 'pial'), ('out_curv', 'curv_pial'), + ('out_area', 'area_pial'), ('out_thickness', 'thickness_pial') + ]), (ar3_divide, hemi_outputspec1, + [('out_file', 'area_mid')]), (ar3_volume, hemi_outputspec1, + [('out_file', 'volume')]), + (ar3_parcellation, hemi_outputspec1, + [('out_file', 'aparc_annot')]), + (ar3_jacobian, hemi_outputspec1, + [('out_file', + 'jacobian_white')]), (ar3_paint, hemi_outputspec1, + [('out_file', 'avg_curv')]), + (ar3_surfreg, hemi_outputspec1, + [('out_file', + 'sphere_reg')]), (ar3_sphere, hemi_outputspec1, + [('out_file', 'sphere')])]) + + # Cortical Ribbon Mask + """ + Creates binary volume masks of the cortical ribbon + ie, each voxel is either a 1 or 0 depending upon whether it falls in the ribbon or not. + """ + volume_mask = pe.Node(VolumeMask(), name="Mask_Ribbon") + volume_mask.inputs.left_whitelabel = 2 + volume_mask.inputs.left_ribbonlabel = 3 + volume_mask.inputs.right_whitelabel = 41 + volume_mask.inputs.right_ribbonlabel = 42 + volume_mask.inputs.save_ribbon = True + volume_mask.inputs.copy_inputs = True + + ar3_wf.connect([ + (inputspec, volume_mask, [('lh_white', 'lh_white'), ('rh_white', + 'rh_white')]), + (ar3_lh_wf1, volume_mask, [('outputspec.pial', 'lh_pial')]), + (ar3_rh_wf1, volume_mask, [('outputspec.pial', 'rh_pial')]), + ]) + + if fsvernum >= 6: + ar3_wf.connect([(inputspec, volume_mask, [('aseg_presurf', + 'in_aseg')])]) + else: + ar3_wf.connect([(inputspec, volume_mask, [('aseg_presurf', 'aseg')])]) + + ar3_lh_wf2 = pe.Workflow(name="AutoRecon3_Left_2") + ar3_rh_wf2 = pe.Workflow(name="AutoRecon3_Right_2") + + for hemisphere, hemiwf2 in [('lh', ar3_lh_wf2), ('rh', ar3_rh_wf2)]: + if hemisphere == 'lh': + hemiwf1 = ar3_lh_wf1 + else: + hemiwf1 = ar3_rh_wf1 + + hemi_inputs2 = [ + 'wm', + 'lh_white', + 'rh_white', + 'transform', + 'brainmask', + 'aseg_presurf', + 'cortex_label', + 'lh_pial', + 'rh_pial', + 'thickness', + 'aparc_annot', + 'ribbon', + 'smoothwm', + 'sphere_reg', + 'orig_mgz', + 'rawavg', + 'curv', + 'sulc', + 'classifier2', + 'classifier3', + ] + + hemi_inputspec2 = pe.Node( + IdentityInterface(fields=hemi_inputs2), name="inputspec") + + # Parcellation Statistics + """ + Runs mris_anatomical_stats to create a summary table of cortical parcellation statistics for each structure, including + structure name + number of vertices + total surface area (mm^2) + total gray matter volume (mm^3) + average cortical thickness (mm) + standard error of cortical thicknessr (mm) + integrated rectified mean curvature + integrated rectified Gaussian curvature + folding index + intrinsic curvature index. + """ + parcellation_stats_white = pe.Node( + ParcellationStats(), + name="Parcellation_Stats_{0}_White".format(hemisphere)) + parcellation_stats_white.inputs.mgz = True + parcellation_stats_white.inputs.th3 = th3 + parcellation_stats_white.inputs.tabular_output = True + parcellation_stats_white.inputs.surface = 'white' + parcellation_stats_white.inputs.hemisphere = hemisphere + parcellation_stats_white.inputs.out_color = 'aparc.annot.ctab' + parcellation_stats_white.inputs.out_table = '{0}.aparc.stats'.format( + hemisphere) + parcellation_stats_white.inputs.copy_inputs = True + + hemiwf2.connect([ + (hemi_inputspec2, parcellation_stats_white, [ + ('wm', 'wm'), + ('lh_white', 'lh_white'), + ('rh_white', 'rh_white'), + ('transform', 'transform'), + ('brainmask', 'brainmask'), + ('aseg_presurf', 'aseg'), + ('cortex_label', 'in_cortex'), + ('cortex_label', 'cortex_label'), + ('lh_pial', 'lh_pial'), + ('rh_pial', 'rh_pial'), + ('thickness', 'thickness'), + ('aparc_annot', 'in_annotation'), + ('ribbon', 'ribbon'), + ]), + ]) + + parcellation_stats_pial = pe.Node( + ParcellationStats(), + name="Parcellation_Stats_{0}_Pial".format(hemisphere)) + parcellation_stats_pial.inputs.mgz = True + parcellation_stats_pial.inputs.th3 = th3 + parcellation_stats_pial.inputs.tabular_output = True + parcellation_stats_pial.inputs.surface = 'pial' + parcellation_stats_pial.inputs.hemisphere = hemisphere + parcellation_stats_pial.inputs.copy_inputs = True + parcellation_stats_pial.inputs.out_color = 'aparc.annot.ctab' + parcellation_stats_pial.inputs.out_table = '{0}.aparc.pial.stats'.format( + hemisphere) + + hemiwf2.connect([ + (hemi_inputspec2, parcellation_stats_pial, [ + ('wm', 'wm'), + ('lh_white', 'lh_white'), + ('rh_white', 'rh_white'), + ('transform', 'transform'), + ('brainmask', 'brainmask'), + ('aseg_presurf', 'aseg'), + ('cortex_label', 'cortex_label'), + ('cortex_label', 'in_cortex'), + ('lh_pial', 'lh_pial'), + ('rh_pial', 'rh_pial'), + ('thickness', 'thickness'), + ('aparc_annot', 'in_annotation'), + ('ribbon', 'ribbon'), + ]), + ]) + + # Cortical Parcellation 2 + cortical_parcellation_2 = pe.Node( + MRIsCALabel(), + name="Cortical_Parcellation_{0}_2".format(hemisphere)) + cortical_parcellation_2.inputs.out_file = '{0}.aparc.a2009s.annot'.format( + hemisphere) + cortical_parcellation_2.inputs.seed = 1234 + cortical_parcellation_2.inputs.copy_inputs = True + cortical_parcellation_2.inputs.hemisphere = hemisphere + + hemiwf2.connect([(hemi_inputspec2, cortical_parcellation_2, + [('smoothwm', 'smoothwm'), ('aseg_presurf', 'aseg'), + ('cortex_label', 'label'), ('sphere_reg', + 'canonsurf'), ('curv', + 'curv'), + ('sulc', 'sulc'), ('classifier2', 'classifier')])]) + + # Parcellation Statistics 2 + parcellation_stats_white_2 = parcellation_stats_white.clone( + name="Parcellation_Statistics_{0}_2".format(hemisphere)) + parcellation_stats_white_2.inputs.hemisphere = hemisphere + parcellation_stats_white_2.inputs.out_color = 'aparc.annot.a2009s.ctab' + parcellation_stats_white_2.inputs.out_table = '{0}.aparc.a2009s.stats'.format( + hemisphere) + hemiwf2.connect([(hemi_inputspec2, parcellation_stats_white_2, [ + ('wm', 'wm'), + ('lh_white', 'lh_white'), + ('rh_white', 'rh_white'), + ('transform', 'transform'), + ('brainmask', 'brainmask'), + ('aseg_presurf', 'aseg'), + ('cortex_label', 'cortex_label'), + ('cortex_label', 'in_cortex'), + ('lh_pial', 'lh_pial'), + ('rh_pial', 'rh_pial'), + ('thickness', 'thickness'), + ('ribbon', 'ribbon'), + ]), (cortical_parcellation_2, parcellation_stats_white_2, + [('out_file', 'in_annotation')])]) + + # Cortical Parcellation 3 + cortical_parcellation_3 = pe.Node( + MRIsCALabel(), + name="Cortical_Parcellation_{0}_3".format(hemisphere)) + cortical_parcellation_3.inputs.out_file = '{0}.aparc.DKTatlas40.annot'.format( + hemisphere) + cortical_parcellation_3.inputs.hemisphere = hemisphere + cortical_parcellation_3.inputs.seed = 1234 + cortical_parcellation_3.inputs.copy_inputs = True + hemiwf2.connect([(hemi_inputspec2, cortical_parcellation_3, + [('smoothwm', 'smoothwm'), ('aseg_presurf', 'aseg'), + ('cortex_label', 'label'), ('sphere_reg', + 'canonsurf'), ('curv', + 'curv'), + ('sulc', 'sulc'), ('classifier3', 'classifier')])]) + + # Parcellation Statistics 3 + parcellation_stats_white_3 = parcellation_stats_white.clone( + name="Parcellation_Statistics_{0}_3".format(hemisphere)) + parcellation_stats_white_3.inputs.out_color = 'aparc.annot.DKTatlas40.ctab' + parcellation_stats_white_3.inputs.out_table = '{0}.aparc.DKTatlas40.stats'.format( + hemisphere) + parcellation_stats_white_3.inputs.hemisphere = hemisphere + + hemiwf2.connect([(hemi_inputspec2, parcellation_stats_white_3, [ + ('wm', 'wm'), + ('lh_white', 'lh_white'), + ('rh_white', 'rh_white'), + ('transform', 'transform'), + ('brainmask', 'brainmask'), + ('aseg_presurf', 'aseg'), + ('cortex_label', 'cortex_label'), + ('cortex_label', 'in_cortex'), + ('lh_pial', 'lh_pial'), + ('rh_pial', 'rh_pial'), + ('thickness', 'thickness'), + ('ribbon', 'ribbon'), + ]), (cortical_parcellation_3, parcellation_stats_white_3, + [('out_file', 'in_annotation')])]) + + # WM/GM Contrast + contrast = pe.Node( + Contrast(), name="WM_GM_Contrast_{0}".format(hemisphere)) + contrast.inputs.hemisphere = hemisphere + contrast.inputs.copy_inputs = True + + hemiwf2.connect([ + (hemi_inputspec2, contrast, [ + ('orig_mgz', 'orig'), + ('rawavg', 'rawavg'), + ('{0}_white'.format(hemisphere), 'white'), + ('cortex_label', 'cortex'), + ('aparc_annot', 'annotation'), + ('thickness', 'thickness'), + ]), + ]) + + hemi_outputs2 = [ + 'aparc_annot_ctab', + 'aparc_stats', + 'aparc_pial_stats', + 'aparc_a2009s_annot', + 'aparc_a2009s_annot_ctab', + 'aparc_a2009s_annot_stats', + 'aparc_DKTatlas40_annot', + 'aparc_DKTatlas40_annot_ctab', + 'aparc_DKTatlas40_annot_stats', + 'wg_pct_mgh', + 'wg_pct_stats', + 'pctsurfcon_log', + ] + hemi_outputspec2 = pe.Node( + IdentityInterface(fields=hemi_outputs2), name="outputspec") + + hemiwf2.connect([ + (contrast, hemi_outputspec2, + [('out_contrast', 'wg_pct_mgh'), ('out_stats', 'wg_pct_stats'), + ('out_log', 'pctsurfcon_log')]), + (parcellation_stats_white_3, hemi_outputspec2, + [('out_color', 'aparc_DKTatlas40_annot_ctab'), + ('out_table', 'aparc_DKTatlas40_annot_stats')]), + (cortical_parcellation_3, hemi_outputspec2, + [('out_file', 'aparc_DKTatlas40_annot')]), + (parcellation_stats_white_2, hemi_outputspec2, + [('out_color', 'aparc_a2009s_annot_ctab'), + ('out_table', 'aparc_a2009s_annot_stats')]), + (cortical_parcellation_2, hemi_outputspec2, + [('out_file', 'aparc_a2009s_annot')]), + (parcellation_stats_white, hemi_outputspec2, + [('out_color', 'aparc_annot_ctab'), ('out_table', + 'aparc_stats')]), + (parcellation_stats_pial, hemi_outputspec2, + [('out_table', 'aparc_pial_stats')]), + ]) + # connect inputs to hemisphere2 workflow + ar3_wf.connect([ + (inputspec, hemiwf2, [ + ('wm', 'inputspec.wm'), + ('lh_white', 'inputspec.lh_white'), + ('rh_white', 'inputspec.rh_white'), + ('transform', 'inputspec.transform'), + ('brainmask', 'inputspec.brainmask'), + ('aseg_presurf', 'inputspec.aseg_presurf'), + ('{0}_cortex_label'.format(hemisphere), + 'inputspec.cortex_label'), + ('{0}_smoothwm'.format(hemisphere), 'inputspec.smoothwm'), + ('orig_mgz', 'inputspec.orig_mgz'), + ('rawavg', 'inputspec.rawavg'), + ('{0}_curv'.format(hemisphere), 'inputspec.curv'), + ('{0}_sulc'.format(hemisphere), 'inputspec.sulc'), + ('{0}_classifier2'.format(hemisphere), + 'inputspec.classifier2'), + ('{0}_classifier3'.format(hemisphere), + 'inputspec.classifier3'), + ]), + (ar3_lh_wf1, hemiwf2, [('outputspec.pial', 'inputspec.lh_pial')]), + (ar3_rh_wf1, hemiwf2, [('outputspec.pial', 'inputspec.rh_pial')]), + (hemiwf1, hemiwf2, + [('outputspec.thickness_pial', 'inputspec.thickness'), + ('outputspec.aparc_annot', 'inputspec.aparc_annot'), + ('outputspec.sphere_reg', 'inputspec.sphere_reg')]), + (volume_mask, hemiwf2, [('out_ribbon', 'inputspec.ribbon')]), + ]) + # End hemisphere2 workflow + + # APARC to ASEG + # Adds information from the ribbon into the aseg.mgz (volume parcellation). + aparc_2_aseg = pe.Node(Aparc2Aseg(), name="Aparc2Aseg") + aparc_2_aseg.inputs.volmask = True + aparc_2_aseg.inputs.copy_inputs = True + aparc_2_aseg.inputs.out_file = "aparc+aseg.mgz" + ar3_wf.connect([(inputspec, aparc_2_aseg, [ + ('lh_white', 'lh_white'), + ('rh_white', 'rh_white'), + ]), (ar3_lh_wf1, aparc_2_aseg, [ + ('outputspec.pial', 'lh_pial'), + ('outputspec.aparc_annot', 'lh_annotation'), + ]), (ar3_rh_wf1, aparc_2_aseg, [ + ('outputspec.pial', 'rh_pial'), + ('outputspec.aparc_annot', 'rh_annotation'), + ]), (volume_mask, aparc_2_aseg, [ + ('rh_ribbon', 'rh_ribbon'), + ('lh_ribbon', 'lh_ribbon'), + ('out_ribbon', 'ribbon'), + ])]) + if fsvernum < 6: + ar3_wf.connect([(inputspec, aparc_2_aseg, [('aseg_presurf', 'aseg')])]) + else: + # Relabel Hypointensities + relabel_hypos = pe.Node( + RelabelHypointensities(), name="Relabel_Hypointensities") + relabel_hypos.inputs.out_file = 'aseg.presurf.hypos.mgz' + ar3_wf.connect([(inputspec, relabel_hypos, + [('aseg_presurf', 'aseg'), ('lh_white', 'lh_white'), + ('rh_white', 'rh_white')])]) + ar3_wf.connect([(relabel_hypos, aparc_2_aseg, [('out_file', 'aseg')])]) + + aparc_2_aseg_2009 = pe.Node(Aparc2Aseg(), name="Aparc2Aseg_2009") + aparc_2_aseg_2009.inputs.volmask = True + aparc_2_aseg_2009.inputs.a2009s = True + aparc_2_aseg_2009.inputs.copy_inputs = True + aparc_2_aseg_2009.inputs.out_file = "aparc.a2009s+aseg.mgz" + ar3_wf.connect([(inputspec, aparc_2_aseg_2009, [ + ('lh_white', 'lh_white'), + ('rh_white', 'rh_white'), + ]), (ar3_lh_wf1, aparc_2_aseg_2009, [ + ('outputspec.pial', 'lh_pial'), + ]), (ar3_lh_wf2, aparc_2_aseg_2009, [('outputspec.aparc_a2009s_annot', + 'lh_annotation')]), + (ar3_rh_wf2, aparc_2_aseg_2009, + [('outputspec.aparc_a2009s_annot', + 'rh_annotation')]), (ar3_rh_wf1, aparc_2_aseg_2009, [ + ('outputspec.pial', 'rh_pial'), + ]), (volume_mask, aparc_2_aseg_2009, + [('rh_ribbon', 'rh_ribbon'), + ('lh_ribbon', 'lh_ribbon'), ('out_ribbon', + 'ribbon')])]) + + if fsvernum >= 6: + apas_2_aseg = pe.Node(Apas2Aseg(), name="Apas_2_Aseg") + ar3_wf.connect([(aparc_2_aseg, apas_2_aseg, [('out_file', 'in_file')]), + (relabel_hypos, aparc_2_aseg_2009, [('out_file', + 'aseg')])]) + else: + # aseg.mgz gets edited in place, so we'll copy and pass it to the + # outputspec once aparc_2_aseg has completed + def out_aseg(in_aparcaseg, in_aseg, out_file): + import shutil + import os + out_file = os.path.abspath(out_file) + shutil.copy(in_aseg, out_file) + return out_file + + apas_2_aseg = pe.Node( + Function(['in_aparcaseg', 'in_aseg', 'out_file'], ['out_file'], + out_aseg), + name="Aseg") + ar3_wf.connect( + [(aparc_2_aseg, apas_2_aseg, [('out_file', 'in_aparcaseg')]), + (inputspec, apas_2_aseg, [('aseg_presurf', 'in_aseg')]), + (inputspec, aparc_2_aseg_2009, [('aseg_presurf', 'aseg')])]) + + apas_2_aseg.inputs.out_file = "aseg.mgz" + + # Segmentation Stats + """ + Computes statistics on the segmented subcortical structures found in + mri/aseg.mgz. Writes output to file stats/aseg.stats. + """ + + segstats = pe.Node(SegStatsReconAll(), name="Segmentation_Statistics") + segstats.inputs.empty = True + segstats.inputs.brain_vol = 'brain-vol-from-seg' + segstats.inputs.exclude_ctx_gm_wm = True + segstats.inputs.supratent = True + segstats.inputs.subcort_gm = True + segstats.inputs.etiv = True + segstats.inputs.wm_vol_from_surf = True + segstats.inputs.cortex_vol_from_surf = True + segstats.inputs.total_gray = True + segstats.inputs.euler = True + segstats.inputs.exclude_id = 0 + segstats.inputs.intensity_units = "MR" + segstats.inputs.summary_file = 'aseg.stats' + segstats.inputs.copy_inputs = True + + ar3_wf.connect([ + (apas_2_aseg, segstats, [('out_file', 'segmentation_file')]), + (inputspec, segstats, [ + ('lh_white', 'lh_white'), + ('rh_white', 'rh_white'), + ('transform', 'transform'), + ('norm', 'in_intensity'), + ('norm', 'partial_volume_file'), + ('brainmask', 'brainmask_file'), + ('lh_orig_nofix', 'lh_orig_nofix'), + ('rh_orig_nofix', 'rh_orig_nofix'), + ('lookup_table', 'color_table_file'), + ]), + (volume_mask, segstats, [('out_ribbon', 'ribbon')]), + (ar3_lh_wf1, segstats, [ + ('outputspec.pial', 'lh_pial'), + ]), + (ar3_rh_wf1, segstats, [ + ('outputspec.pial', 'rh_pial'), + ]), + ]) + + if fsvernum >= 6: + ar3_wf.connect(inputspec, 'aseg_presurf', segstats, 'presurf_seg') + else: + ar3_wf.connect(inputspec, 'aseg_presurf', segstats, 'aseg') + + # White Matter Parcellation + + # Adds WM Parcellation info into the aseg and computes stat. + + wm_parcellation = pe.Node(Aparc2Aseg(), name="WM_Parcellation") + wm_parcellation.inputs.volmask = True + wm_parcellation.inputs.label_wm = True + wm_parcellation.inputs.hypo_wm = True + wm_parcellation.inputs.rip_unknown = True + wm_parcellation.inputs.copy_inputs = True + wm_parcellation.inputs.out_file = "wmparc.mgz" + + ar3_wf.connect([(inputspec, wm_parcellation, [ + ('lh_white', 'lh_white'), + ('rh_white', 'rh_white'), + ]), (ar3_lh_wf1, wm_parcellation, [ + ('outputspec.pial', 'lh_pial'), + ('outputspec.aparc_annot', 'lh_annotation'), + ]), (ar3_rh_wf1, wm_parcellation, [ + ('outputspec.pial', 'rh_pial'), + ('outputspec.aparc_annot', 'rh_annotation'), + ]), (volume_mask, wm_parcellation, [ + ('rh_ribbon', 'rh_ribbon'), + ('lh_ribbon', 'lh_ribbon'), + ('out_ribbon', 'ribbon'), + ]), (apas_2_aseg, wm_parcellation, [('out_file', 'aseg')]), + (aparc_2_aseg, wm_parcellation, [('out_file', 'ctxseg')])]) + + if fsvernum < 6: + ar3_wf.connect([(inputspec, wm_parcellation, [('filled', 'filled')])]) + + # White Matter Segmentation Stats + + wm_segstats = pe.Node( + SegStatsReconAll(), name="WM_Segmentation_Statistics") + wm_segstats.inputs.intensity_units = "MR" + wm_segstats.inputs.wm_vol_from_surf = True + wm_segstats.inputs.etiv = True + wm_segstats.inputs.copy_inputs = True + wm_segstats.inputs.exclude_id = 0 + wm_segstats.inputs.summary_file = "wmparc.stats" + + ar3_wf.connect([ + (wm_parcellation, wm_segstats, [('out_file', 'segmentation_file')]), + (inputspec, wm_segstats, [ + ('lh_white', 'lh_white'), + ('rh_white', 'rh_white'), + ('transform', 'transform'), + ('norm', 'in_intensity'), + ('norm', 'partial_volume_file'), + ('brainmask', 'brainmask_file'), + ('lh_orig_nofix', 'lh_orig_nofix'), + ('rh_orig_nofix', 'rh_orig_nofix'), + ('wm_lookup_table', 'color_table_file'), + ]), + (volume_mask, wm_segstats, [('out_ribbon', 'ribbon')]), + (ar3_lh_wf1, wm_segstats, [ + ('outputspec.pial', 'lh_pial'), + ]), + (ar3_rh_wf1, wm_segstats, [ + ('outputspec.pial', 'rh_pial'), + ]), + ]) + + if fsvernum >= 6: + ar3_wf.connect(inputspec, 'aseg_presurf', wm_segstats, 'presurf_seg') + else: + ar3_wf.connect(inputspec, 'aseg_presurf', wm_segstats, 'aseg') + + # add brodman area maps to the workflow + ba_WF, ba_outputs = create_ba_maps_wf( + th3=th3, exvivo=exvivo, entorhinal=entorhinal) + + ar3_wf.connect([(ar3_lh_wf1, ba_WF, [ + ('outputspec.sphere_reg', 'inputspec.lh_sphere_reg'), + ('outputspec.thickness_pial', 'inputspec.lh_thickness'), + ('outputspec.pial', 'inputspec.lh_pial'), + ]), (ar3_rh_wf1, ba_WF, [ + ('outputspec.sphere_reg', 'inputspec.rh_sphere_reg'), + ('outputspec.thickness_pial', 'inputspec.rh_thickness'), + ('outputspec.pial', 'inputspec.rh_pial'), + ]), (inputspec, ba_WF, [ + ('lh_white', 'inputspec.lh_white'), + ('rh_white', 'inputspec.rh_white'), + ('transform', 'inputspec.transform'), + ('aseg_presurf', 'inputspec.aseg'), + ('brainmask', 'inputspec.brainmask'), + ('wm', 'inputspec.wm'), + ('lh_orig', 'inputspec.lh_orig'), + ('rh_orig', 'inputspec.rh_orig'), + ('lh_cortex_label', 'inputspec.lh_cortex_label'), + ('rh_cortex_label', 'inputspec.rh_cortex_label'), + ('src_subject_dir', 'inputspec.src_subject_dir'), + ('src_subject_id', 'inputspec.src_subject_id'), + ('color_table', 'inputspec.color_table'), + ]), (volume_mask, ba_WF, [('out_ribbon', 'inputspec.ribbon')])]) + + if qcache: + source_inputs = ['lh_sphere_reg', 'rh_sphere_reg'] + source_subject = pe.Node( + DataGrabber(outfields=source_inputs), + name="{0}_srcsubject".format(hemisphere)) + source_subject.inputs.template = '*' + source_subject.inputs.sort_filelist = False + source_subject.inputs.field_template = dict( + lh_sphere_reg='surf/lh.sphere.reg', + rh_sphere_reg='surf/rh.sphere.reg') + + qcache_wf = pe.Workflow("QCache") + + measurements = [ + 'thickness', 'area', 'area.pial', 'volume', 'curv', 'sulc', + 'white.K', 'white.H', 'jacobian_white', 'w-g.pct.mgh' + ] + + qcache_inputs = list() + for source_file in source_inputs: + qcache_inputs.append('source_' + source_file) + qcache_config = dict() + qcache_outputs = list() + for hemisphere in ['lh', 'rh']: + qcache_config[hemisphere] = dict() + for meas_name in measurements: + qcache_config[hemisphere][meas_name] = dict() + + if meas_name == 'thickness': + meas_file = hemisphere + '_' + meas_name + '_pial' + else: + meas_file = hemisphere + '_' + meas_name.replace( + '.', '_').replace('-', '') + qcache_inputs.append(meas_file) + + preproc_name = "Preproc_{0}".format(meas_file) + preproc_out = '{0}.{1}.{2}.mgh'.format( + hemisphere, meas_name, config['src_subject_id']) + preproc_out_name = preproc_out.replace('.', '_') + qcache_config[hemisphere][meas_name]['preproc'] = dict( + infile=meas_file, + name=preproc_name, + out=preproc_out, + out_name=preproc_out_name) + qcache_outputs.append(preproc_out_name) + + qcache_config[hemisphere][meas_name]['smooth'] = dict() + for value in range(0, 26, 5): + smooth_name = "Smooth_{0}_{1}".format(meas_file, value) + smooth_out = "{0}.{1}.fwhm{2}.{3}.mgh".format( + hemisphere, meas_name, value, config['src_subject_id']) + smooth_out_name = smooth_out.replace('.', '_') + qcache_config[hemisphere][meas_name]['smooth'][ + value] = dict( + name=smooth_name, + out=smooth_out, + out_name=smooth_out_name) + qcache_outputs.append(smooth_out_name) + + qcache_inputs.append(hemisphere + '_sphere_reg') + + qcache_inputspec = pe.Node( + IdentityInterface(fields=qcache_inputs), name="inputspec") + + qcache_outputspec = pe.Node( + IdentityInterface(fields=qcache_outputs), name="outputspec") + + for hemi in qcache_config.iterkeys(): + for meas_config in qcache_config[hemi].itervalues(): + preprocess = pe.Node( + MRISPreprocReconAll(), name=meas_config['preproc']['name']) + target_id = config['src_subject_id'] + preprocess.inputs.out_file = meas_config['preproc']['out'] + preprocess.inputs.target = target_id + preprocess.inputs.hemi = hemi + preprocess.inputs.copy_inputs = True + + qcache_merge = pe.Node( + Merge(2), + name="Merge{0}".format(meas_config['preproc']['name'])) + + qcache_wf.connect([ + (qcache_inputspec, qcache_merge, + [('lh_sphere_reg', 'in1'), ('rh_sphere_reg', 'in2')]), + (qcache_inputspec, preprocess, + [(meas_config['preproc']['infile'], 'surf_measure_file'), + ('source_lh_sphere_reg', 'lh_surfreg_target'), + ('source_rh_sphere_reg', 'rh_surfreg_target')]), + (qcache_merge, preprocess, [('out', 'surfreg_files')]), + (preprocess, qcache_outputspec, + [('out_file', meas_config['preproc']['out_name'])]), + ]) + + for value, val_config in meas_config['smooth'].iteritems(): + surf2surf = pe.Node( + SurfaceSmooth(), name=val_config['name']) + surf2surf.inputs.fwhm = value + surf2surf.inputs.cortex = True + surf2surf.inputs.subject_id = target_id + surf2surf.inputs.hemi = hemisphere + surf2surf.inputs.out_file = val_config['out'] + qcache_wf.connect( + [(preprocess, surf2surf, [('out_file', 'in_file')]), + (surf2surf, qcache_outputspec, + [('out_file', val_config['out_name'])])]) + + # connect qcache inputs + ar3_wf.connect([ + (inputspec, qcache_wf, + [('lh_curv', 'inputspec.lh_curv'), ('rh_curv', + 'inputspec.rh_curv'), + ('lh_sulc', 'inputspec.lh_sulc'), ('rh_sulc', + 'inputspec.rh_sulc'), + ('lh_white_K', 'inputspec.lh_white_K'), ('rh_white_K', + 'inputspec.rh_white_K'), + ('lh_area', 'inputspec.lh_area'), ('rh_area', + 'inputspec.rh_area')]), + (ar3_lh_wf1, qcache_wf, + [('outputspec.thickness_pial', 'inputspec.lh_thickness_pial'), + ('outputspec.area_pial', + 'inputspec.lh_area_pial'), ('outputspec.volume', + 'inputspec.lh_volume'), + ('outputspec.jacobian_white', + 'inputspec.lh_jacobian_white'), ('outputspec.sphere_reg', + 'inputspec.lh_sphere_reg')]), + (ar3_lh_wf2, qcache_wf, [('outputspec.wg_pct_mgh', + 'inputspec.lh_wg_pct_mgh')]), + (ar3_rh_wf1, qcache_wf, + [('outputspec.thickness_pial', 'inputspec.rh_thickness_pial'), + ('outputspec.area_pial', + 'inputspec.rh_area_pial'), ('outputspec.volume', + 'inputspec.rh_volume'), + ('outputspec.jacobian_white', + 'inputspec.rh_jacobian_white'), ('outputspec.sphere_reg', + 'inputspec.rh_sphere_reg')]), + (ar3_rh_wf2, qcache_wf, [('outputspec.wg_pct_mgh', + 'inputspec.rh_wg_pct_mgh')]), + ]) + for source_file in source_inputs: + ar3_wf.connect([(inputspec, source_subject, [('source_subject_dir', + 'base_directory')]), + (source_subject, qcache_wf, + [(source_file, + 'inputspec.source_' + source_file)])]) + # end qcache workflow + + # Add outputs to outputspec + ar3_outputs = [ + 'aseg', 'wmparc', 'wmparc_stats', 'aseg_stats', 'aparc_a2009s_aseg', + 'aparc_aseg', 'aseg_presurf_hypos', 'ribbon', 'rh_ribbon', 'lh_ribbon' + ] + for output in hemi_outputs1 + hemi_outputs2: + for hemi in ('lh_', 'rh_'): + ar3_outputs.append(hemi + output) + if qcache: + ar3_outputs.extend(qcache_outputs) + + ar3_outputs.extend(ba_outputs) + + outputspec = pe.Node( + IdentityInterface(fields=ar3_outputs), name="outputspec") + + ar3_wf.connect([(apas_2_aseg, outputspec, + [('out_file', 'aseg')]), (wm_parcellation, outputspec, + [('out_file', 'wmparc')]), + (wm_segstats, outputspec, + [('summary_file', + 'wmparc_stats')]), (segstats, outputspec, + [('summary_file', 'aseg_stats')]), + (aparc_2_aseg_2009, outputspec, + [('out_file', + 'aparc_a2009s_aseg')]), (aparc_2_aseg, outputspec, + [('out_file', 'aparc_aseg')]), + (volume_mask, outputspec, + [('out_ribbon', 'ribbon'), ('lh_ribbon', 'lh_ribbon'), + ('rh_ribbon', 'rh_ribbon')])]) + if fsvernum >= 6: + ar3_wf.connect([(relabel_hypos, outputspec, [('out_file', + 'aseg_presurf_hypos')])]) + + for i, outputs in enumerate([hemi_outputs1, hemi_outputs2]): + if i == 0: + lhwf = ar3_lh_wf1 + rhwf = ar3_rh_wf1 + else: + lhwf = ar3_lh_wf2 + rhwf = ar3_rh_wf2 + for output in outputs: + ar3_wf.connect([(lhwf, outputspec, [('outputspec.' + output, + 'lh_' + output)]), + (rhwf, outputspec, [('outputspec.' + output, + 'rh_' + output)])]) + + for output in ba_outputs: + ar3_wf.connect([(ba_WF, outputspec, [('outputspec.' + output, + output)])]) + + if qcache: + for output in qcache_outputs: + ar3_wf.connect([(qcache_wf, outputspec, [('outputspec.' + output, + output)])]) + + return ar3_wf, ar3_outputs diff --git a/nipype/workflows/smri/freesurfer/ba_maps.py b/nipype/workflows/smri/freesurfer/ba_maps.py new file mode 100644 index 0000000000..8a4ae6caf1 --- /dev/null +++ b/nipype/workflows/smri/freesurfer/ba_maps.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +import os +from ....interfaces.utility import Function, IdentityInterface +from ....pipeline import engine as pe # pypeline engine +from ....interfaces.freesurfer import Label2Label, Label2Annot, ParcellationStats +from ....interfaces.io import DataGrabber +from ....interfaces.utility import Merge + + +def create_ba_maps_wf(name="Brodmann_Area_Maps", + th3=True, + exvivo=True, + entorhinal=True): + # Brodmann Area Maps (BA Maps) and Hinds V1 Atlas + inputs = [ + 'lh_sphere_reg', 'rh_sphere_reg', 'lh_white', 'rh_white', 'lh_pial', + 'rh_pial', 'lh_orig', 'rh_orig', 'transform', 'lh_thickness', + 'rh_thickness', 'lh_cortex_label', 'rh_cortex_label', 'brainmask', + 'aseg', 'ribbon', 'wm', 'src_subject_id', 'src_subject_dir', + 'color_table' + ] + + inputspec = pe.Node(IdentityInterface(fields=inputs), name="inputspec") + + ba_WF = pe.Workflow(name=name) + + ba_outputs = [ + 'lh_BAMaps_stats', 'lh_color', 'lh_BAMaps_labels', + 'lh_BAMaps_annotation', 'lh_thresh_BAMaps_stats', 'lh_thresh_color', + 'lh_thresh_BAMaps_labels', 'lh_thresh_BAMaps_annotation', + 'rh_BAMaps_stats', 'rh_color', 'rh_BAMaps_labels', + 'rh_BAMaps_annotation', 'rh_thresh_BAMaps_stats', 'rh_thresh_color', + 'rh_thresh_BAMaps_labels', 'rh_thresh_BAMaps_annotation' + ] + + outputspec = pe.Node( + IdentityInterface(fields=ba_outputs), name="outputspec") + + labels = [ + "BA1", "BA2", "BA3a", "BA3b", "BA4a", "BA4p", "BA6", "BA44", "BA45", + "V1", "V2", "MT", "perirhinal" + ] + if entorhinal: + labels.insert(-1, 'entorhinal') + for hemisphere in ['lh', 'rh']: + for threshold in [True, False]: + field_template = dict( + sphere_reg='surf/{0}.sphere.reg'.format(hemisphere), + white='surf/{0}.white'.format(hemisphere)) + + out_files = list() + source_fields = list() + if threshold: + for label in labels: + if label == 'perirhinal' and not entorhinal: + # versions < 6.0 do not use thresh.perirhinal + continue + if exvivo: + out_file = '{0}.{1}_exvivo.thresh.label'.format( + hemisphere, label) + else: + out_file = '{0}.{1}.thresh.label'.format( + hemisphere, label) + out_files.append(out_file) + field_template[label] = 'label/' + out_file + source_fields.append(label) + node_name = 'BA_Maps_' + hemisphere + '_Thresh' + else: + for label in labels: + if exvivo: + out_file = '{0}.{1}_exvivo.label'.format( + hemisphere, label) + else: + out_file = '{0}.{1}.label'.format(hemisphere, label) + + out_files.append(out_file) + field_template[label] = 'label/' + out_file + source_fields.append(label) + node_name = 'BA_Maps_' + hemisphere + + source_subject = pe.Node( + DataGrabber(outfields=source_fields + ['sphere_reg', 'white']), + name=node_name + "_srcsubject") + source_subject.inputs.template = '*' + source_subject.inputs.sort_filelist = False + source_subject.inputs.field_template = field_template + ba_WF.connect([(inputspec, source_subject, [('src_subject_dir', + 'base_directory')])]) + + merge_labels = pe.Node( + Merge(len(out_files)), name=node_name + "_Merge") + for i, label in enumerate(source_fields): + ba_WF.connect([(source_subject, merge_labels, + [(label, 'in{0}'.format(i + 1))])]) + + node = pe.MapNode( + Label2Label(), + name=node_name + '_Label2Label', + iterfield=['source_label', 'out_file']) + node.inputs.hemisphere = hemisphere + node.inputs.out_file = out_files + node.inputs.copy_inputs = True + + ba_WF.connect( + [(merge_labels, node, [('out', 'source_label')]), + (source_subject, node, [('sphere_reg', 'source_sphere_reg'), + ('white', 'source_white')]), + (inputspec, node, [('src_subject_id', 'source_subject')])]) + + label2annot = pe.Node(Label2Annot(), name=node_name + '_2_Annot') + label2annot.inputs.hemisphere = hemisphere + label2annot.inputs.verbose_off = True + label2annot.inputs.keep_max = True + label2annot.inputs.copy_inputs = True + + stats_node = pe.Node( + ParcellationStats(), name=node_name + '_Stats') + stats_node.inputs.hemisphere = hemisphere + stats_node.inputs.mgz = True + stats_node.inputs.th3 = th3 + stats_node.inputs.surface = 'white' + stats_node.inputs.tabular_output = True + stats_node.inputs.copy_inputs = True + + if threshold: + label2annot.inputs.out_annot = "BA_exvivo.thresh" + ba_WF.connect( + [(stats_node, outputspec, + [('out_color', '{0}_thresh_color'.format(hemisphere)), + ('out_table', + '{0}_thresh_BAMaps_stats'.format(hemisphere))]), + (label2annot, outputspec, + [('out_file', + '{0}_thresh_BAMaps_annotation'.format(hemisphere))]), + (node, outputspec, + [('out_file', + '{0}_thresh_BAMaps_labels'.format(hemisphere))])]) + else: + label2annot.inputs.out_annot = "BA_exvivo" + ba_WF.connect( + [(stats_node, outputspec, + [('out_color', '{0}_color'.format(hemisphere)), + ('out_table', '{0}_BAMaps_stats'.format(hemisphere))]), + (label2annot, outputspec, + [('out_file', + '{0}_BAMaps_annotation'.format(hemisphere))]), + (node, outputspec, + [('out_file', '{0}_BAMaps_labels'.format(hemisphere))])]) + + ba_WF.connect( + [(inputspec, node, [ + ('{0}_sphere_reg'.format(hemisphere), 'sphere_reg'), + ('{0}_white'.format(hemisphere), 'white'), + ]), (node, label2annot, [('out_file', 'in_labels')]), + (inputspec, label2annot, + [('{0}_orig'.format(hemisphere), 'orig'), + ('color_table', 'color_table')]), (label2annot, stats_node, + [('out_file', + 'in_annotation')]), + (inputspec, stats_node, + [('{0}_thickness'.format(hemisphere), + 'thickness'), ('{0}_cortex_label'.format(hemisphere), + 'cortex_label'), ('lh_white', 'lh_white'), + ('rh_white', 'rh_white'), ('lh_pial', 'lh_pial'), + ('rh_pial', 'rh_pial'), ('transform', + 'transform'), ('brainmask', + 'brainmask'), + ('aseg', 'aseg'), ('wm', 'wm'), ('ribbon', 'ribbon')])]) + + return ba_WF, ba_outputs diff --git a/nipype/workflows/smri/freesurfer/bem.py b/nipype/workflows/smri/freesurfer/bem.py new file mode 100644 index 0000000000..b959de4852 --- /dev/null +++ b/nipype/workflows/smri/freesurfer/bem.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from ....pipeline import engine as pe +from ....interfaces import mne as mne +from ....interfaces import freesurfer as fs +from ....interfaces import utility as niu + + +def create_bem_flow(name='bem', out_format='stl'): + """Uses MNE's Watershed algorithm to create Boundary Element Meshes (BEM) + for a subject's brain, inner/outer skull, and skin. The surfaces are + returned in the desired (by default, stereolithic .stl) format. + + Example + ------- + >>> from nipype.workflows.smri.freesurfer import create_bem_flow + >>> bemflow = create_bem_flow() + >>> bemflow.inputs.inputspec.subject_id = 'subj1' + >>> bemflow.inputs.inputspec.subjects_dir = '.' + >>> bemflow.run() # doctest: +SKIP + + + Inputs:: + + inputspec.subject_id : freesurfer subject id + inputspec.subjects_dir : freesurfer subjects directory + + Outputs:: + + outputspec.meshes : output boundary element meshes in (by default) + stereolithographic (.stl) format + """ + """ + Initialize the workflow + """ + + bemflow = pe.Workflow(name=name) + """ + Define the inputs to the workflow. + """ + + inputnode = pe.Node( + niu.IdentityInterface(fields=['subject_id', 'subjects_dir']), + name='inputspec') + """ + Define all the nodes of the workflow: + + fssource: used to retrieve aseg.mgz + mri_convert : converts aseg.mgz to aseg.nii + tessellate : tessellates regions in aseg.mgz + surfconvert : converts regions to stereolithographic (.stl) format + + """ + + watershed_bem = pe.Node(interface=mne.WatershedBEM(), name='WatershedBEM') + + surfconvert = pe.MapNode( + fs.MRIsConvert(out_datatype=out_format), + iterfield=['in_file'], + name='surfconvert') + """ + Connect the nodes + """ + + bemflow.connect([ + (inputnode, watershed_bem, [('subject_id', 'subject_id'), + ('subjects_dir', 'subjects_dir')]), + (watershed_bem, surfconvert, [('mesh_files', 'in_file')]), + ]) + """ + Setup an outputnode that defines relevant inputs of the workflow. + """ + + outputnode = pe.Node( + niu.IdentityInterface(fields=["meshes"]), name="outputspec") + bemflow.connect([ + (surfconvert, outputnode, [("converted", "meshes")]), + ]) + return bemflow diff --git a/nipype/workflows/smri/freesurfer/recon.py b/nipype/workflows/smri/freesurfer/recon.py new file mode 100644 index 0000000000..f0ad4ad6fd --- /dev/null +++ b/nipype/workflows/smri/freesurfer/recon.py @@ -0,0 +1,604 @@ +# -*- coding: utf-8 -*- +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from ....pipeline import engine as pe +from ....interfaces import freesurfer as fs +from ....interfaces import utility as niu +from .autorecon1 import create_AutoRecon1 +from .autorecon2 import create_AutoRecon2 +from .autorecon3 import create_AutoRecon3 +from ....interfaces.freesurfer import AddXFormToHeader, Info +from ....interfaces.io import DataSink +from .utils import getdefaultconfig +from .... import logging + +logger = logging.getLogger('nipype.workflow') + + +def create_skullstripped_recon_flow(name="skullstripped_recon_all"): + """Performs recon-all on voulmes that are already skull stripped. + FreeSurfer failes to perform skullstrippig on some volumes (especially + MP2RAGE). This can be avoided by doing skullstripping before running + recon-all (using for example SPECTRE algorithm). + + Example + ------- + >>> from nipype.workflows.smri.freesurfer import create_skullstripped_recon_flow + >>> recon_flow = create_skullstripped_recon_flow() + >>> recon_flow.inputs.inputspec.subject_id = 'subj1' + >>> recon_flow.inputs.inputspec.T1_files = 'T1.nii.gz' + >>> recon_flow.run() # doctest: +SKIP + + + Inputs:: + inputspec.T1_files : skullstripped T1_files (mandatory) + inputspec.subject_id : freesurfer subject id (optional) + inputspec.subjects_dir : freesurfer subjects directory (optional) + + Outputs:: + + outputspec.subject_id : freesurfer subject id + outputspec.subjects_dir : freesurfer subjects directory + """ + wf = pe.Workflow(name=name) + + inputnode = pe.Node( + niu.IdentityInterface( + fields=['subject_id', 'subjects_dir', 'T1_files']), + name='inputspec') + + autorecon1 = pe.Node(fs.ReconAll(), name="autorecon1") + autorecon1.plugin_args = {'submit_specs': 'request_memory = 2500'} + autorecon1.inputs.directive = "autorecon1" + autorecon1.inputs.args = "-noskullstrip" + autorecon1._interface._can_resume = False + + wf.connect(inputnode, "T1_files", autorecon1, "T1_files") + wf.connect(inputnode, "subjects_dir", autorecon1, "subjects_dir") + wf.connect(inputnode, "subject_id", autorecon1, "subject_id") + + def link_masks(subjects_dir, subject_id): + import os + os.symlink( + os.path.join(subjects_dir, subject_id, "mri", "T1.mgz"), + os.path.join(subjects_dir, subject_id, "mri", + "brainmask.auto.mgz")) + os.symlink( + os.path.join(subjects_dir, subject_id, "mri", + "brainmask.auto.mgz"), + os.path.join(subjects_dir, subject_id, "mri", "brainmask.mgz")) + return subjects_dir, subject_id + + masks = pe.Node( + niu.Function( + input_names=['subjects_dir', 'subject_id'], + output_names=['subjects_dir', 'subject_id'], + function=link_masks), + name="link_masks") + + wf.connect(autorecon1, "subjects_dir", masks, "subjects_dir") + wf.connect(autorecon1, "subject_id", masks, "subject_id") + + autorecon_resume = pe.Node(fs.ReconAll(), name="autorecon_resume") + autorecon_resume.plugin_args = {'submit_specs': 'request_memory = 2500'} + autorecon_resume.inputs.args = "-no-isrunning" + wf.connect(masks, "subjects_dir", autorecon_resume, "subjects_dir") + wf.connect(masks, "subject_id", autorecon_resume, "subject_id") + + outputnode = pe.Node( + niu.IdentityInterface(fields=['subject_id', 'subjects_dir']), + name='outputspec') + + wf.connect(autorecon_resume, "subjects_dir", outputnode, "subjects_dir") + wf.connect(autorecon_resume, "subject_id", outputnode, "subject_id") + return wf + + +def create_reconall_workflow(name="ReconAll", plugin_args=None): + """Creates the ReconAll workflow in Nipype. This workflow is designed to + run the same commands as FreeSurfer's reconall script but with the added + features that a Nipype workflow provides. Before running this workflow, it + is necessary to have the FREESURFER_HOME environmental variable set to the + directory containing the version of FreeSurfer to be used in this workflow. + + Example + ------- + >>> from nipype.workflows.smri.freesurfer import create_reconall_workflow + >>> recon_all = create_reconall_workflow() + >>> recon_all.inputs.inputspec.subject_id = 'subj1' + >>> recon_all.inputs.inputspec.subjects_dir = '.' + >>> recon_all.inputs.inputspec.T1_files = 'T1.nii.gz' + >>> recon_flow.run() # doctest: +SKIP + + + Inputs:: + inputspec.subjects_dir : subjects directory (mandatory) + inputspec.subject_id : name of subject (mandatory) + inputspec.T1_files : T1 files (mandatory) + inputspec.T2_file : T2 file (optional) + inputspec.FLAIR_file : FLAIR file (optional) + inputspec.cw256 : Conform inputs to 256 FOV (optional) + inputspec.num_threads: Number of threads on nodes that utilize OpenMP (default=1) + plugin_args : Dictionary of plugin args to set to nodes that utilize OpenMP (optional) + Outputs:: + postdatasink_outputspec.subject_id : name of the datasinked output folder in the subjects directory + + Note: + The input subject_id is not passed to the commands in the workflow. Commands + that require subject_id are reading implicit inputs from + {SUBJECTS_DIR}/{subject_id}. For those commands the subject_id is set to the + default value and SUBJECTS_DIR is set to the node directory. The implicit + inputs are then copied to the node directory in order to mimic a SUBJECTS_DIR + structure. For example, if the command implicitly reads in brainmask.mgz, the + interface would copy that input file to + {node_dir}/{subject_id}/mri/brainmask.mgz and set SUBJECTS_DIR to node_dir. + The workflow only uses the input subject_id to datasink the outputs to + {subjects_dir}/{subject_id}. + """ + reconall = pe.Workflow(name=name) + + inputspec = pe.Node( + niu.IdentityInterface(fields=[ + 'subject_id', 'subjects_dir', 'T1_files', 'T2_file', 'FLAIR_file', + 'num_threads', 'cw256', 'reg_template', 'reg_template_withskull', + 'lh_atlas', 'rh_atlas', 'lh_classifier1', 'rh_classifier1', + 'lh_classifier2', 'rh_classifier2', 'lh_classifier3', + 'rh_classifier3', 'lookup_table', 'wm_lookup_table', + 'src_subject_id', 'src_subject_dir', 'color_table', 'awk_file' + ]), + run_without_submitting=True, + name='inputspec') + + # check freesurfer version and set parameters + fs_version_full = Info.version() + if fs_version_full and ('v6.0' in fs_version_full + or 'dev' in fs_version_full): + # assuming that dev is 6.0 + fsvernum = 6.0 + fs_version = 'v6.0' + th3 = True + shrink = 2 + distance = 200 # 3T should be 50 + stop = 0.0001 + exvivo = True + entorhinal = True + rb_date = "2014-08-21" + else: + # 5.3 is default + fsvernum = 5.3 + if fs_version_full: + if 'v5.3' in fs_version_full: + fs_version = 'v5.3' + else: + fs_version = fs_version_full.split('-')[-1] + logger.info(("Warning: Workflow may not work properly if " + "FREESURFER_HOME environmental variable is not " + "set or if you are using an older version of " + "FreeSurfer")) + else: + fs_version = 5.3 # assume version 5.3 + th3 = False + shrink = None + distance = 50 + stop = None + exvivo = False + entorhinal = False + rb_date = "2008-03-26" + + logger.info("FreeSurfer Version: {0}".format(fs_version)) + + def setconfig(reg_template=None, + reg_template_withskull=None, + lh_atlas=None, + rh_atlas=None, + lh_classifier1=None, + rh_classifier1=None, + lh_classifier2=None, + rh_classifier2=None, + lh_classifier3=None, + rh_classifier3=None, + src_subject_id=None, + src_subject_dir=None, + color_table=None, + lookup_table=None, + wm_lookup_table=None, + awk_file=None, + rb_date=None): + """Set optional configurations to the default""" + + def checkarg(arg, default): + """Returns the value if defined; otherwise default""" + if arg: + return arg + else: + return default + + defaultconfig = getdefaultconfig(exitonfail=True, rb_date=rb_date) + # set the default template and classifier files + reg_template = checkarg(reg_template, + defaultconfig['registration_template']) + reg_template_withskull = checkarg( + reg_template_withskull, + defaultconfig['registration_template_withskull']) + lh_atlas = checkarg(lh_atlas, defaultconfig['lh_atlas']) + rh_atlas = checkarg(rh_atlas, defaultconfig['rh_atlas']) + lh_classifier1 = checkarg(lh_classifier1, + defaultconfig['lh_classifier']) + rh_classifier1 = checkarg(rh_classifier1, + defaultconfig['rh_classifier']) + lh_classifier2 = checkarg(lh_classifier2, + defaultconfig['lh_classifier2']) + rh_classifier2 = checkarg(rh_classifier2, + defaultconfig['rh_classifier2']) + lh_classifier3 = checkarg(lh_classifier3, + defaultconfig['lh_classifier3']) + rh_classifier3 = checkarg(rh_classifier3, + defaultconfig['rh_classifier3']) + src_subject_id = checkarg(src_subject_id, + defaultconfig['src_subject_id']) + src_subject_dir = checkarg(src_subject_dir, + defaultconfig['src_subject_dir']) + color_table = checkarg(color_table, defaultconfig['AvgColorTable']) + lookup_table = checkarg(lookup_table, defaultconfig['LookUpTable']) + wm_lookup_table = checkarg(wm_lookup_table, + defaultconfig['WMLookUpTable']) + awk_file = checkarg(awk_file, defaultconfig['awk_file']) + return reg_template, reg_template_withskull, lh_atlas, rh_atlas, \ + lh_classifier1, rh_classifier1, lh_classifier2, rh_classifier2, \ + lh_classifier3, rh_classifier3, src_subject_id, src_subject_dir, \ + color_table, lookup_table, wm_lookup_table, awk_file + + # list of params to check + params = [ + 'reg_template', 'reg_template_withskull', 'lh_atlas', 'rh_atlas', + 'lh_classifier1', 'rh_classifier1', 'lh_classifier2', 'rh_classifier2', + 'lh_classifier3', 'rh_classifier3', 'src_subject_id', + 'src_subject_dir', 'color_table', 'lookup_table', 'wm_lookup_table', + 'awk_file' + ] + + config_node = pe.Node( + niu.Function(params + ['rb_date'], params, setconfig), name="config") + + config_node.inputs.rb_date = rb_date + + for param in params: + reconall.connect(inputspec, param, config_node, param) + + # create AutoRecon1 + ar1_wf, ar1_outputs = create_AutoRecon1( + plugin_args=plugin_args, + stop=stop, + distance=distance, + shrink=shrink, + fsvernum=fsvernum) + # connect inputs for AutoRecon1 + reconall.connect([(inputspec, ar1_wf, [ + ('T1_files', 'inputspec.T1_files'), ('T2_file', 'inputspec.T2_file'), + ('FLAIR_file', 'inputspec.FLAIR_file'), + ('num_threads', 'inputspec.num_threads'), ('cw256', 'inputspec.cw256') + ]), (config_node, ar1_wf, [('reg_template_withskull', + 'inputspec.reg_template_withskull'), + ('awk_file', 'inputspec.awk_file')])]) + # create AutoRecon2 + ar2_wf, ar2_outputs = create_AutoRecon2( + plugin_args=plugin_args, + fsvernum=fsvernum, + stop=stop, + shrink=shrink, + distance=distance) + # connect inputs for AutoRecon2 + reconall.connect( + [(inputspec, ar2_wf, [('num_threads', 'inputspec.num_threads')]), + (config_node, ar2_wf, [('reg_template_withskull', + 'inputspec.reg_template_withskull'), + ('reg_template', 'inputspec.reg_template')]), + (ar1_wf, ar2_wf, [('outputspec.brainmask', 'inputspec.brainmask'), + ('outputspec.talairach', 'inputspec.transform'), + ('outputspec.orig', 'inputspec.orig')])]) + + if fsvernum < 6: + reconall.connect([(ar1_wf, ar2_wf, [('outputspec.nu', + 'inputspec.nu')])]) + + # create AutoRecon3 + ar3_wf, ar3_outputs = create_AutoRecon3( + plugin_args=plugin_args, + th3=th3, + exvivo=exvivo, + entorhinal=entorhinal, + fsvernum=fsvernum) + # connect inputs for AutoRecon3 + reconall.connect( + [(config_node, ar3_wf, + [('lh_atlas', 'inputspec.lh_atlas'), + ('rh_atlas', 'inputspec.rh_atlas'), ('lh_classifier1', + 'inputspec.lh_classifier1'), + ('rh_classifier1', + 'inputspec.rh_classifier1'), ('lh_classifier2', + 'inputspec.lh_classifier2'), + ('rh_classifier2', + 'inputspec.rh_classifier2'), ('lh_classifier3', + 'inputspec.lh_classifier3'), + ('rh_classifier3', + 'inputspec.rh_classifier3'), ('lookup_table', + 'inputspec.lookup_table'), + ('wm_lookup_table', + 'inputspec.wm_lookup_table'), ('src_subject_dir', + 'inputspec.src_subject_dir'), + ('src_subject_id', + 'inputspec.src_subject_id'), ('color_table', + 'inputspec.color_table')]), + (ar1_wf, ar3_wf, [('outputspec.brainmask', 'inputspec.brainmask'), + ('outputspec.talairach', 'inputspec.transform'), + ('outputspec.orig', + 'inputspec.orig_mgz'), ('outputspec.rawavg', + 'inputspec.rawavg')]), + (ar2_wf, ar3_wf, + [('outputspec.aseg_presurf', 'inputspec.aseg_presurf'), + ('outputspec.brain_finalsurfs', + 'inputspec.brain_finalsurfs'), ('outputspec.wm', 'inputspec.wm'), + ('outputspec.filled', 'inputspec.filled'), ('outputspec.norm', + 'inputspec.norm')])]) + for hemi in ('lh', 'rh'): + reconall.connect([(ar2_wf, ar3_wf, + [('outputspec.{0}_inflated'.format(hemi), + 'inputspec.{0}_inflated'.format(hemi)), + ('outputspec.{0}_smoothwm'.format(hemi), + 'inputspec.{0}_smoothwm'.format(hemi)), + ('outputspec.{0}_white'.format(hemi), + 'inputspec.{0}_white'.format(hemi)), + ('outputspec.{0}_cortex'.format(hemi), + 'inputspec.{0}_cortex_label'.format(hemi)), + ('outputspec.{0}_area'.format(hemi), + 'inputspec.{0}_area'.format(hemi)), + ('outputspec.{0}_curv'.format(hemi), + 'inputspec.{0}_curv'.format(hemi)), + ('outputspec.{0}_sulc'.format(hemi), + 'inputspec.{0}_sulc'.format(hemi)), + ('outputspec.{0}_orig_nofix'.format(hemi), + 'inputspec.{0}_orig_nofix'.format(hemi)), + ('outputspec.{0}_orig'.format(hemi), + 'inputspec.{0}_orig'.format(hemi)), + ('outputspec.{0}_white_H'.format(hemi), + 'inputspec.{0}_white_H'.format(hemi)), + ('outputspec.{0}_white_K'.format(hemi), + 'inputspec.{0}_white_K'.format(hemi))])]) + + # Add more outputs to outputspec + outputs = ar1_outputs + ar2_outputs + ar3_outputs + outputspec = pe.Node( + niu.IdentityInterface(fields=outputs, mandatory_inputs=True), + name="outputspec") + + for outfields, wf in [(ar1_outputs, ar1_wf), (ar2_outputs, ar2_wf), + (ar3_outputs, ar3_wf)]: + for field in outfields: + reconall.connect([(wf, outputspec, [('outputspec.' + field, + field)])]) + + # PreDataSink: Switch Transforms to datasinked transfrom + # The transforms in the header files of orig.mgz, orig_nu.mgz, and nu.mgz + # are all reference a transform in the cache directory. We need to rewrite the + # headers to reference the datasinked transform + + # get the filepath to where the transform will be datasinked + def getDSTransformPath(subjects_dir, subject_id): + import os + transform = os.path.join(subjects_dir, subject_id, 'mri', 'transforms', + 'talairach.xfm') + return transform + + dstransform = pe.Node( + niu.Function(['subjects_dir', 'subject_id'], ['transform'], + getDSTransformPath), + name="PreDataSink_GetTransformPath") + reconall.connect([(inputspec, dstransform, + [('subjects_dir', 'subjects_dir'), ('subject_id', + 'subject_id')])]) + # add the data sink transfrom location to the headers + predatasink_orig = pe.Node(AddXFormToHeader(), name="PreDataSink_Orig") + predatasink_orig.inputs.copy_name = True + predatasink_orig.inputs.out_file = 'orig.mgz' + reconall.connect([(outputspec, predatasink_orig, [('orig', 'in_file')]), + (dstransform, predatasink_orig, [('transform', + 'transform')])]) + predatasink_orig_nu = pe.Node( + AddXFormToHeader(), name="PreDataSink_Orig_Nu") + predatasink_orig_nu.inputs.copy_name = True + predatasink_orig_nu.inputs.out_file = 'orig_nu.mgz' + reconall.connect( + [(outputspec, predatasink_orig_nu, [('orig_nu', 'in_file')]), + (dstransform, predatasink_orig_nu, [('transform', 'transform')])]) + predatasink_nu = pe.Node(AddXFormToHeader(), name="PreDataSink_Nu") + predatasink_nu.inputs.copy_name = True + predatasink_nu.inputs.out_file = 'nu.mgz' + reconall.connect([(outputspec, predatasink_nu, [('nu', 'in_file')]), + (dstransform, predatasink_nu, [('transform', + 'transform')])]) + + # Datasink outputs + datasink = pe.Node(DataSink(), name="DataSink") + datasink.inputs.parameterization = False + + reconall.connect([(inputspec, datasink, + [('subjects_dir', 'base_directory'), ('subject_id', + 'container')])]) + + # assign datasink inputs + reconall.connect([ + (predatasink_orig, datasink, [('out_file', 'mri.@orig')]), + (predatasink_orig_nu, datasink, [('out_file', 'mri.@orig_nu')]), + (predatasink_nu, datasink, [('out_file', 'mri.@nu')]), + (outputspec, datasink, [ + ('origvols', 'mri.orig'), + ('t2_raw', 'mri.orig.@t2raw'), + ('flair', 'mri.orig.@flair'), + ('rawavg', 'mri.@rawavg'), + ('talairach_auto', 'mri.transforms.@tal_auto'), + ('talairach', 'mri.transforms.@tal'), + ('t1', 'mri.@t1'), + ('brainmask_auto', 'mri.@brainmask_auto'), + ('brainmask', 'mri.@brainmask'), + ('braintemplate', 'mri.@braintemplate'), + ('tal_lta', 'mri.transforms.@tal_lta'), + ('norm', 'mri.@norm'), + ('ctrl_pts', 'mri.@ctrl_pts'), + ('tal_m3z', 'mri.transforms.@tal_m3z'), + ('nu_noneck', 'mri.@nu_noneck'), + ('talskull2', 'mri.transforms.@talskull2'), + ('aseg_noCC', 'mri.@aseg_noCC'), + ('cc_up', 'mri.transforms.@cc_up'), + ('aseg_auto', 'mri.@aseg_auto'), + ('aseg_presurf', 'mri.@aseg_presuf'), + ('brain', 'mri.@brain'), + ('brain_finalsurfs', 'mri.@brain_finalsurfs'), + ('wm_seg', 'mri.@wm_seg'), + ('wm_aseg', 'mri.@wm_aseg'), + ('wm', 'mri.@wm'), + ('filled', 'mri.@filled'), + ('ponscc_log', 'mri.@ponscc_log'), + ('lh_orig_nofix', 'surf.@lh_orig_nofix'), + ('lh_orig', 'surf.@lh_orig'), + ('lh_smoothwm_nofix', 'surf.@lh_smoothwm_nofix'), + ('lh_inflated_nofix', 'surf.@lh_inflated_nofix'), + ('lh_qsphere_nofix', 'surf.@lh_qsphere_nofix'), + ('lh_white', 'surf.@lh_white'), + ('lh_curv', 'surf.@lh_curv'), + ('lh_area', 'surf.@lh_area'), + ('lh_cortex', 'label.@lh_cortex'), + ('lh_smoothwm', 'surf.@lh_smoothwm'), + ('lh_sulc', 'surf.@lh_sulc'), + ('lh_inflated', 'surf.@lh_inflated'), + ('lh_white_H', 'surf.@lh_white_H'), + ('lh_white_K', 'surf.@lh_white_K'), + ('lh_inflated_H', 'surf.@lh_inflated_H'), + ('lh_inflated_K', 'surf.@lh_inflated_K'), + ('lh_curv_stats', 'stats.@lh_curv_stats'), + ('rh_orig_nofix', 'surf.@rh_orig_nofix'), + ('rh_orig', 'surf.@rh_orig'), + ('rh_smoothwm_nofix', 'surf.@rh_smoothwm_nofix'), + ('rh_inflated_nofix', 'surf.@rh_inflated_nofix'), + ('rh_qsphere_nofix', 'surf.@rh_qsphere_nofix'), + ('rh_white', 'surf.@rh_white'), + ('rh_curv', 'surf.@rh_curv'), + ('rh_area', 'surf.@rh_area'), + ('rh_cortex', 'label.@rh_cortex'), + ('rh_smoothwm', 'surf.@rh_smoothwm'), + ('rh_sulc', 'surf.@rh_sulc'), + ('rh_inflated', 'surf.@rh_inflated'), + ('rh_white_H', 'surf.@rh_white_H'), + ('rh_white_K', 'surf.@rh_white_K'), + ('rh_inflated_H', 'surf.@rh_inflated_H'), + ('rh_inflated_K', 'surf.@rh_inflated_K'), + ('rh_curv_stats', 'stats.@rh_curv_stats'), + ('lh_aparc_annot_ctab', 'label.@aparc_annot_ctab'), + ('aseg', 'mri.@aseg'), + ('wmparc', 'mri.@wmparc'), + ('wmparc_stats', 'stats.@wmparc_stats'), + ('aseg_stats', 'stats.@aseg_stats'), + ('aparc_a2009s_aseg', 'mri.@aparc_a2009s_aseg'), + ('aparc_aseg', 'mri.@aparc_aseg'), + ('aseg_presurf_hypos', 'mri.@aseg_presurf_hypos'), + ('ribbon', 'mri.@ribbon'), + ('rh_ribbon', 'mri.@rh_ribbon'), + ('lh_ribbon', 'mri.@lh_ribbon'), + ('lh_sphere', 'surf.@lh_sphere'), + ('rh_sphere', 'surf.@rh_sphere'), + ('lh_sphere_reg', 'surf.@lh_sphere_reg'), + ('rh_sphere_reg', 'surf.@rh_sphere_reg'), + ('lh_jacobian_white', 'surf.@lh_jacobian_white'), + ('rh_jacobian_white', 'surf.@rh_jacobian_white'), + ('lh_avg_curv', 'surf.@lh_avg_curv'), + ('rh_avg_curv', 'surf.@rh_avg_curv'), + ('lh_aparc_annot', 'label.@lh_aparc_annot'), + ('rh_aparc_annot', 'label.@rh_aparc_annot'), + ('lh_area_pial', 'surf.@lh_area_pial'), + ('rh_area_pial', 'surf.@rh_area_pial'), + ('lh_curv_pial', 'surf.@lh_curv_pial'), + ('rh_curv_pial', 'surf.@rh_curv_pial'), + ('lh_pial', 'surf.@lh_pial'), + ('rh_pial', 'surf.@rh_pial'), + ('lh_thickness_pial', 'surf.@lh_thickness_pial'), + ('rh_thickness_pial', 'surf.@rh_thickness_pial'), + ('lh_area_mid', 'surf.@lh_area_mid'), + ('rh_area_mid', 'surf.@rh_area_mid'), + ('lh_volume', 'surf.@lh_volume'), + ('rh_volume', 'surf.@rh_volume'), + ('lh_aparc_annot_ctab', 'label.@lh_aparc_annot_ctab'), + ('rh_aparc_annot_ctab', 'label.@rh_aparc_annot_ctab'), + ('lh_aparc_stats', 'stats.@lh_aparc_stats'), + ('rh_aparc_stats', 'stats.@rh_aparc_stats'), + ('lh_aparc_pial_stats', 'stats.@lh_aparc_pial_stats'), + ('rh_aparc_pial_stats', 'stats.@rh_aparc_pial_stats'), + ('lh_aparc_a2009s_annot', 'label.@lh_aparc_a2009s_annot'), + ('rh_aparc_a2009s_annot', 'label.@rh_aparc_a2009s_annot'), + ('lh_aparc_a2009s_annot_ctab', + 'label.@lh_aparc_a2009s_annot_ctab'), + ('rh_aparc_a2009s_annot_ctab', + 'label.@rh_aparc_a2009s_annot_ctab'), + ('lh_aparc_a2009s_annot_stats', + 'stats.@lh_aparc_a2009s_annot_stats'), + ('rh_aparc_a2009s_annot_stats', + 'stats.@rh_aparc_a2009s_annot_stats'), + ('lh_aparc_DKTatlas40_annot', 'label.@lh_aparc_DKTatlas40_annot'), + ('rh_aparc_DKTatlas40_annot', 'label.@rh_aparc_DKTatlas40_annot'), + ('lh_aparc_DKTatlas40_annot_ctab', + 'label.@lh_aparc_DKTatlas40_annot_ctab'), + ('rh_aparc_DKTatlas40_annot_ctab', + 'label.@rh_aparc_DKTatlas40_annot_ctab'), + ('lh_aparc_DKTatlas40_annot_stats', + 'stats.@lh_aparc_DKTatlas40_annot_stats'), + ('rh_aparc_DKTatlas40_annot_stats', + 'stats.@rh_aparc_DKTatlas40_annot_stats'), + ('lh_wg_pct_mgh', 'surf.@lh_wg_pct_mgh'), + ('rh_wg_pct_mgh', 'surf.@rh_wg_pct_mgh'), + ('lh_wg_pct_stats', 'stats.@lh_wg_pct_stats'), + ('rh_wg_pct_stats', 'stats.@rh_wg_pct_stats'), + ('lh_pctsurfcon_log', 'log.@lh_pctsurfcon_log'), + ('rh_pctsurfcon_log', 'log.@rh_pctsurfcon_log'), + ('lh_BAMaps_stats', 'stats.@lh_BAMaps_stats'), + ('lh_color', 'label.@lh_color'), + ('lh_thresh_BAMaps_stats', 'stats.@lh_thresh_BAMaps_stats'), + ('lh_thresh_color', 'label.@lh_thresh_color'), + ('rh_BAMaps_stats', 'stats.@rh_BAMaps_stats'), + ('rh_color', 'label.@rh_color'), + ('rh_thresh_BAMaps_stats', 'stats.@rh_thresh_BAMaps_stats'), + ('rh_thresh_color', 'label.@rh_thresh_color'), + ('lh_BAMaps_labels', 'label.@lh_BAMaps_labels'), + ('lh_thresh_BAMaps_labels', 'label.@lh_thresh_BAMaps_labels'), + ('rh_BAMaps_labels', 'label.@rh_BAMaps_labels'), + ('rh_thresh_BAMaps_labels', 'label.@rh_thresh_BAMaps_labels'), + ('lh_BAMaps_annotation', 'label.@lh_BAMaps_annotation'), + ('lh_thresh_BAMaps_annotation', + 'label.@lh_thresh_BAMaps_annotation'), + ('rh_BAMaps_annotation', 'label.@rh_BAMaps_annotation'), + ('rh_thresh_BAMaps_annotation', + 'label.@rh_thresh_BAMaps_annotation'), + ]), + ]) + + # compeltion node + # since recon-all outputs so many files a completion node is added + # that will output the subject_id once the workflow has completed + def completemethod(datasinked_files, subject_id): + print("recon-all has finished executing for subject: {0}".format( + subject_id)) + return subject_id + + completion = pe.Node( + niu.Function(['datasinked_files', 'subject_id'], ['subject_id'], + completemethod), + name="Completion") + + # create a special identity interface for outputing the subject_id + + postds_outputspec = pe.Node( + niu.IdentityInterface(['subject_id']), name="postdatasink_outputspec") + + reconall.connect( + [(datasink, completion, [('out_file', 'datasinked_files')]), + (inputspec, completion, [('subject_id', 'subject_id')]), + (completion, postds_outputspec, [('subject_id', 'subject_id')])]) + + return reconall diff --git a/nipype/workflows/smri/freesurfer/utils.py b/nipype/workflows/smri/freesurfer/utils.py new file mode 100644 index 0000000000..40f1f205b6 --- /dev/null +++ b/nipype/workflows/smri/freesurfer/utils.py @@ -0,0 +1,498 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from ....pipeline import engine as pe +from ....interfaces import fsl as fsl +from ....interfaces import freesurfer as fs +from ....interfaces import meshfix as mf +from ....interfaces import io as nio +from ....interfaces import utility as niu +from ....algorithms import misc as misc +from ....interfaces.utility import Function +from ....workflows.misc.utils import region_list_from_volume, id_list_from_lookup_table +import os + + +def get_aparc_aseg(files): + """Return the aparc+aseg.mgz file""" + for name in files: + if 'aparc+aseg' in name: + return name + raise ValueError('aparc+aseg.mgz not found') + + +def create_getmask_flow(name='getmask', dilate_mask=True): + """Registers a source file to freesurfer space and create a brain mask in + source space + + Requires fsl tools for initializing registration + + Parameters + ---------- + + name : string + name of workflow + dilate_mask : boolean + indicates whether to dilate mask or not + + Example + ------- + + >>> getmask = create_getmask_flow() + >>> getmask.inputs.inputspec.source_file = 'mean.nii' + >>> getmask.inputs.inputspec.subject_id = 's1' + >>> getmask.inputs.inputspec.subjects_dir = '.' + >>> getmask.inputs.inputspec.contrast_type = 't2' + + + Inputs:: + + inputspec.source_file : reference image for mask generation + inputspec.subject_id : freesurfer subject id + inputspec.subjects_dir : freesurfer subjects directory + inputspec.contrast_type : MR contrast of reference image + + Outputs:: + + outputspec.mask_file : binary mask file in reference image space + outputspec.reg_file : registration file that maps reference image to + freesurfer space + outputspec.reg_cost : cost of registration (useful for detecting misalignment) + """ + """ + Initialize the workflow + """ + + getmask = pe.Workflow(name=name) + """ + Define the inputs to the workflow. + """ + + inputnode = pe.Node( + niu.IdentityInterface(fields=[ + 'source_file', 'subject_id', 'subjects_dir', 'contrast_type' + ]), + name='inputspec') + """ + Define all the nodes of the workflow: + + fssource: used to retrieve aseg.mgz + threshold : binarize aseg + register : coregister source file to freesurfer space + voltransform: convert binarized aseg to source file space + """ + + fssource = pe.Node(nio.FreeSurferSource(), name='fssource') + threshold = pe.Node(fs.Binarize(min=0.5, out_type='nii'), name='threshold') + register = pe.MapNode( + fs.BBRegister(init='fsl'), iterfield=['source_file'], name='register') + voltransform = pe.MapNode( + fs.ApplyVolTransform(inverse=True), + iterfield=['source_file', 'reg_file'], + name='transform') + """ + Connect the nodes + """ + + getmask.connect([(inputnode, fssource, [ + ('subject_id', 'subject_id'), ('subjects_dir', 'subjects_dir') + ]), (inputnode, register, + [('source_file', 'source_file'), ('subject_id', 'subject_id'), + ('subjects_dir', 'subjects_dir'), + ('contrast_type', 'contrast_type')]), (inputnode, voltransform, [ + ('subjects_dir', 'subjects_dir'), ('source_file', 'source_file') + ]), (fssource, threshold, [(('aparc_aseg', get_aparc_aseg), + 'in_file')]), + (register, voltransform, [('out_reg_file', 'reg_file')]), + (threshold, voltransform, [('binary_file', + 'target_file')])]) + """ + Add remaining nodes and connections + + dilate : dilate the transformed file in source space + threshold2 : binarize transformed file + """ + + threshold2 = pe.MapNode( + fs.Binarize(min=0.5, out_type='nii'), + iterfield=['in_file'], + name='threshold2') + if dilate_mask: + threshold2.inputs.dilate = 1 + getmask.connect([(voltransform, threshold2, [('transformed_file', + 'in_file')])]) + """ + Setup an outputnode that defines relevant inputs of the workflow. + """ + + outputnode = pe.Node( + niu.IdentityInterface(fields=["mask_file", "reg_file", "reg_cost"]), + name="outputspec") + getmask.connect([ + (register, outputnode, [("out_reg_file", "reg_file")]), + (register, outputnode, [("min_cost_file", "reg_cost")]), + (threshold2, outputnode, [("binary_file", "mask_file")]), + ]) + return getmask + + +def create_get_stats_flow(name='getstats', withreg=False): + """Retrieves stats from labels + + Parameters + ---------- + + name : string + name of workflow + withreg : boolean + indicates whether to register source to label + + Example + ------- + + + Inputs:: + + inputspec.source_file : reference image for mask generation + inputspec.label_file : label file from which to get ROIs + + (optionally with registration) + inputspec.reg_file : bbreg file (assumes reg from source to label + inputspec.inverse : boolean whether to invert the registration + inputspec.subjects_dir : freesurfer subjects directory + + Outputs:: + + outputspec.stats_file : stats file + """ + """ + Initialize the workflow + """ + + getstats = pe.Workflow(name=name) + """ + Define the inputs to the workflow. + """ + + if withreg: + inputnode = pe.Node( + niu.IdentityInterface(fields=[ + 'source_file', 'label_file', 'reg_file', 'subjects_dir' + ]), + name='inputspec') + else: + inputnode = pe.Node( + niu.IdentityInterface(fields=['source_file', 'label_file']), + name='inputspec') + + statnode = pe.MapNode( + fs.SegStats(), + iterfield=['segmentation_file', 'in_file'], + name='segstats') + """ + Convert between source and label spaces if registration info is provided + + """ + if withreg: + voltransform = pe.MapNode( + fs.ApplyVolTransform(inverse=True), + iterfield=['source_file', 'reg_file'], + name='transform') + getstats.connect(inputnode, 'reg_file', voltransform, 'reg_file') + getstats.connect(inputnode, 'source_file', voltransform, 'source_file') + getstats.connect(inputnode, 'label_file', voltransform, 'target_file') + getstats.connect(inputnode, 'subjects_dir', voltransform, + 'subjects_dir') + + def switch_labels(inverse, transform_output, source_file, label_file): + if inverse: + return transform_output, source_file + else: + return label_file, transform_output + + chooser = pe.MapNode( + niu.Function( + input_names=[ + 'inverse', 'transform_output', 'source_file', 'label_file' + ], + output_names=['label_file', 'source_file'], + function=switch_labels), + iterfield=['transform_output', 'source_file'], + name='chooser') + getstats.connect(inputnode, 'source_file', chooser, 'source_file') + getstats.connect(inputnode, 'label_file', chooser, 'label_file') + getstats.connect(inputnode, 'inverse', chooser, 'inverse') + getstats.connect(voltransform, 'transformed_file', chooser, + 'transform_output') + getstats.connect(chooser, 'label_file', statnode, 'segmentation_file') + getstats.connect(chooser, 'source_file', statnode, 'in_file') + else: + getstats.connect(inputnode, 'label_file', statnode, + 'segmentation_file') + getstats.connect(inputnode, 'source_file', statnode, 'in_file') + """ + Setup an outputnode that defines relevant inputs of the workflow. + """ + + outputnode = pe.Node( + niu.IdentityInterface(fields=["stats_file"]), name="outputspec") + getstats.connect([ + (statnode, outputnode, [("summary_file", "stats_file")]), + ]) + return getstats + + +def create_tessellation_flow(name='tessellate', out_format='stl'): + """Tessellates the input subject's aseg.mgz volume and returns + the surfaces for each region in stereolithic (.stl) format + + Example + ------- + >>> from nipype.workflows.smri.freesurfer import create_tessellation_flow + >>> tessflow = create_tessellation_flow() + >>> tessflow.inputs.inputspec.subject_id = 'subj1' + >>> tessflow.inputs.inputspec.subjects_dir = '.' + >>> tessflow.inputs.inputspec.lookup_file = 'FreeSurferColorLUT.txt' # doctest: +SKIP + >>> tessflow.run() # doctest: +SKIP + + + Inputs:: + + inputspec.subject_id : freesurfer subject id + inputspec.subjects_dir : freesurfer subjects directory + inputspec.lookup_file : lookup file from freesurfer directory + + Outputs:: + + outputspec.meshes : output region meshes in (by default) stereolithographic (.stl) format + """ + """ + Initialize the workflow + """ + + tessflow = pe.Workflow(name=name) + """ + Define the inputs to the workflow. + """ + + inputnode = pe.Node( + niu.IdentityInterface( + fields=['subject_id', 'subjects_dir', 'lookup_file']), + name='inputspec') + """ + Define all the nodes of the workflow: + + fssource: used to retrieve aseg.mgz + mri_convert : converts aseg.mgz to aseg.nii + tessellate : tessellates regions in aseg.mgz + surfconvert : converts regions to stereolithographic (.stl) format + smoother: smooths the tessellated regions + + """ + + fssource = pe.Node(nio.FreeSurferSource(), name='fssource') + volconvert = pe.Node(fs.MRIConvert(out_type='nii'), name='volconvert') + tessellate = pe.MapNode( + fs.MRIMarchingCubes(), + iterfield=['label_value', 'out_file'], + name='tessellate') + surfconvert = pe.MapNode( + fs.MRIsConvert(out_datatype='stl'), + iterfield=['in_file'], + name='surfconvert') + smoother = pe.MapNode( + mf.MeshFix(), iterfield=['in_file1'], name='smoother') + if out_format == 'gii': + stl_to_gifti = pe.MapNode( + fs.MRIsConvert(out_datatype=out_format), + iterfield=['in_file'], + name='stl_to_gifti') + smoother.inputs.save_as_stl = True + smoother.inputs.laplacian_smoothing_steps = 1 + + region_list_from_volume_interface = Function( + input_names=["in_file"], + output_names=["region_list"], + function=region_list_from_volume) + + id_list_from_lookup_table_interface = Function( + input_names=["lookup_file", "region_list"], + output_names=["id_list"], + function=id_list_from_lookup_table) + + region_list_from_volume_node = pe.Node( + interface=region_list_from_volume_interface, + name='region_list_from_volume_node') + id_list_from_lookup_table_node = pe.Node( + interface=id_list_from_lookup_table_interface, + name='id_list_from_lookup_table_node') + """ + Connect the nodes + """ + + tessflow.connect([ + (inputnode, fssource, [('subject_id', 'subject_id'), + ('subjects_dir', 'subjects_dir')]), + (fssource, volconvert, [('aseg', 'in_file')]), + (volconvert, region_list_from_volume_node, [('out_file', 'in_file')]), + (region_list_from_volume_node, tessellate, [('region_list', + 'label_value')]), + (region_list_from_volume_node, id_list_from_lookup_table_node, + [('region_list', 'region_list')]), + (inputnode, id_list_from_lookup_table_node, [('lookup_file', + 'lookup_file')]), + (id_list_from_lookup_table_node, tessellate, [('id_list', + 'out_file')]), + (fssource, tessellate, [('aseg', 'in_file')]), + (tessellate, surfconvert, [('surface', 'in_file')]), + (surfconvert, smoother, [('converted', 'in_file1')]), + ]) + """ + Setup an outputnode that defines relevant inputs of the workflow. + """ + + outputnode = pe.Node( + niu.IdentityInterface(fields=["meshes"]), name="outputspec") + + if out_format == 'gii': + tessflow.connect([ + (smoother, stl_to_gifti, [("mesh_file", "in_file")]), + ]) + tessflow.connect([ + (stl_to_gifti, outputnode, [("converted", "meshes")]), + ]) + else: + tessflow.connect([ + (smoother, outputnode, [("mesh_file", "meshes")]), + ]) + return tessflow + + +def copy_files(in_files, out_files): + """ + Create a function to copy a file that can be modified by a following node + without changing the original file + """ + import shutil + import sys + if len(in_files) != len(out_files): + print( + "ERROR: Length of input files must be identical to the length of " + + "outrput files to be copied") + sys.exit(-1) + for i, in_file in enumerate(in_files): + out_file = out_files[i] + print("copying {0} to {1}".format(in_file, out_file)) + shutil.copy(in_file, out_file) + return out_files + + +def copy_file(in_file, out_file=None): + """ + Create a function to copy a file that can be modified by a following node + without changing the original file. + """ + import os + import shutil + if out_file is None: + out_file = os.path.join(os.getcwd(), os.path.basename(in_file)) + if type(in_file) is list and len(in_file) == 1: + in_file = in_file[0] + out_file = os.path.abspath(out_file) + in_file = os.path.abspath(in_file) + print("copying {0} to {1}".format(in_file, out_file)) + shutil.copy(in_file, out_file) + return out_file + + +def mkdir_p(path): + import errno + import os + try: + os.makedirs(path) + except OSError as exc: # Python >2.5 + if exc.errno == errno.EEXIST and os.path.isdir(path): + pass + else: + raise + + +def getdefaultconfig(exitonfail=False, rb_date="2014-08-21"): + config = { + 'custom_atlas': None, + 'cw256': False, + 'field_strength': '1.5T', + 'fs_home': checkenv(exitonfail), + 'longitudinal': False, + 'long_base': None, + 'openmp': None, + 'plugin_args': None, + 'qcache': False, + 'queue': None, + 'recoding_file': None, + 'src_subject_id': 'fsaverage', + 'th3': True + } + + config['src_subject_dir'] = os.path.join(config['fs_home'], 'subjects', + config['src_subject_id']) + config['awk_file'] = os.path.join(config['fs_home'], 'bin', + 'extract_talairach_avi_QA.awk') + config['registration_template'] = os.path.join( + config['fs_home'], 'average', 'RB_all_{0}.gca'.format(rb_date)) + config['registration_template_withskull'] = os.path.join( + config['fs_home'], 'average', + 'RB_all_withskull_{0}.gca'.format(rb_date)) + for hemi in ('lh', 'rh'): + config['{0}_atlas'.format(hemi)] = os.path.join( + config['fs_home'], 'average', + '{0}.average.curvature.filled.buckner40.tif'.format(hemi)) + config['{0}_classifier'.format(hemi)] = os.path.join( + config['fs_home'], 'average', + '{0}.curvature.buckner40.filled.desikan_killiany.2010-03-25.gcs'. + format(hemi)) + config['{0}_classifier2'.format(hemi)] = os.path.join( + config['fs_home'], 'average', + '{0}.destrieux.simple.2009-07-29.gcs'.format(hemi)) + config['{0}_classifier3'.format(hemi)] = os.path.join( + config['fs_home'], 'average', '{0}.DKTatlas40.gcs'.format(hemi)) + config['LookUpTable'] = os.path.join(config['fs_home'], 'ASegStatsLUT.txt') + config['WMLookUpTable'] = os.path.join(config['fs_home'], + 'WMParcStatsLUT.txt') + config['AvgColorTable'] = os.path.join(config['fs_home'], 'average', + 'colortable_BA.txt') + + return config + + +def checkenv(exitonfail=False): + """Check for the necessary FS environment variables""" + import sys + fs_home = os.environ.get('FREESURFER_HOME') + path = os.environ.get('PATH') + print("FREESURFER_HOME: {0}".format(fs_home)) + if fs_home is None: + msg = "please set FREESURFER_HOME before running the workflow" + elif not os.path.isdir(fs_home): + msg = "FREESURFER_HOME must be set to a valid directory before running this workflow" + elif os.path.join(fs_home, 'bin') not in path.replace('//', '/'): + print(path) + msg = "Could not find necessary executable in path" + setupscript = os.path.join(fs_home, 'SetUpFreeSurfer.sh') + if os.path.isfile(setupscript): + print("Please source the setup script before running the workflow:" + + "\nsource {0}".format(setupscript)) + else: + print( + "Please ensure that FREESURFER_HOME is set to a valid fs " + + "directory and source the necessary SetUpFreeSurfer.sh script before running " + + "this workflow") + else: + return fs_home + + if exitonfail: + print("ERROR: " + msg) + sys.exit(2) + else: + print("Warning: " + msg) diff --git a/nipype/workflows/smri/niftyreg/__init__.py b/nipype/workflows/smri/niftyreg/__init__.py new file mode 100644 index 0000000000..b9d0c9c85b --- /dev/null +++ b/nipype/workflows/smri/niftyreg/__init__.py @@ -0,0 +1,5 @@ +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: + +from .groupwise import (create_groupwise_average, create_nonlinear_gw_step, + create_linear_gw_step) diff --git a/nipype/workflows/smri/niftyreg/groupwise.py b/nipype/workflows/smri/niftyreg/groupwise.py new file mode 100644 index 0000000000..fd8d25541b --- /dev/null +++ b/nipype/workflows/smri/niftyreg/groupwise.py @@ -0,0 +1,384 @@ +# -*- coding: utf-8 -*- +# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- +# vi: set ft=python sts=4 ts=4 sw=4 et: +""" +Example of registration workflows using niftyreg, useful for a variety of +pipelines. Including linear and non-linear image co-registration +""" + +from __future__ import (print_function, division, unicode_literals, + absolute_import) +from builtins import str, range +from ....interfaces import utility as niu +from ....interfaces import niftyreg as niftyreg +from ....pipeline import engine as pe + + +def create_linear_gw_step(name="linear_gw_niftyreg", + demean=True, + linear_options_hash=None, + use_mask=False, + verbose=False): + """ + Creates a workflow that performs linear co-registration of a set of images + using RegAladin, producing an average image and a set of affine + transformation matrices linking each of the floating images to the average. + + Inputs:: + + inputspec.in_files - The input files to be registered + inputspec.ref_file - The initial reference image that the input files + are registered to + inputspec.rmask_file - Mask of the reference image + inputspec.in_aff_files - Initial affine transformation files + + Outputs:: + + outputspec.average_image - The average image + outputspec.aff_files - The affine transformation files + + Optional arguments:: + + linear_options_hash - An options dictionary containing a list of + parameters for RegAladin that take + the same form as given in the interface (default None) + demean - Selects whether to demean the transformation matrices when + performing the averaging (default True) + initial_affines - Selects whether to iterate over initial affine + images, which we generally won't have (default False) + + Example + ------- + + >>> from nipype.workflows.smri.niftyreg import create_linear_gw_step + >>> lgw = create_linear_gw_step('my_linear_coreg') # doctest: +SKIP + >>> lgw.inputs.inputspec.in_files = [ + ... 'file1.nii.gz', 'file2.nii.gz'] # doctest: +SKIP + >>> lgw.inputs.inputspec.ref_file = ['ref.nii.gz'] # doctest: +SKIP + >>> lgw.run() # doctest: +SKIP + + """ + # Create the sub workflow + workflow = pe.Workflow(name=name) + workflow.base_output_dir = name + + # We need to create an input node for the workflow + inputnode = pe.Node( + niu.IdentityInterface(fields=['in_files', 'ref_file', 'rmask_file']), + name='inputspec') + + if linear_options_hash is None: + linear_options_hash = dict() + + # Rigidly register each of the images to the average + lin_reg = pe.MapNode( + interface=niftyreg.RegAladin(**linear_options_hash), + name="lin_reg", + iterfield=['flo_file']) + + if verbose is False: + lin_reg.inputs.verbosity_off_flag = True + + # Average the images + ave_ims = pe.Node(interface=niftyreg.RegAverage(), name="ave_ims") + + # We have a new average image and the affine + # transformations, which are returned as an output node. + outputnode = pe.Node( + niu.IdentityInterface(fields=['average_image', 'trans_files']), + name='outputspec') + + # Connect the inputs to the lin_reg node + workflow.connect([(inputnode, lin_reg, [('ref_file', 'ref_file')]), + (inputnode, lin_reg, [('in_files', 'flo_file')])]) + if use_mask: + workflow.connect(inputnode, 'rmask_file', lin_reg, 'rmask_file') + + if demean: + workflow.connect([(inputnode, ave_ims, [('ref_file', + 'demean1_ref_file')]), + (lin_reg, ave_ims, [('avg_output', 'warp_files')])]) + else: + workflow.connect(lin_reg, 'res_file', ave_ims, 'avg_files') + + # Connect up the output node + workflow.connect([(lin_reg, outputnode, [('aff_file', 'trans_files')]), + (ave_ims, outputnode, [('out_file', 'average_image')])]) + + return workflow + + +def create_nonlinear_gw_step(name="nonlinear_gw_niftyreg", + demean=True, + nonlinear_options_hash=None, + initial_affines=False, + use_mask=False, + verbose=False): + """ + Creates a workflow that perform non-linear co-registrations of a set of + images using RegF3d, producing an non-linear average image and a set of + cpp transformation linking each of the floating images to the average. + + Inputs:: + + inputspec.in_files - The input files to be registered + inputspec.ref_file - The initial reference image that the input files + are registered to + inputspec.rmask_file - Mask of the reference image + inputspec.in_trans_files - Initial transformation files (affine or + cpps) + + Outputs:: + + outputspec.average_image - The average image + outputspec.cpp_files - The bspline transformation files + + Optional arguments:: + + nonlinear_options_hash - An options dictionary containing a list of + parameters for RegAladin that take the + same form as given in the interface (default None) + initial_affines - Selects whether to iterate over initial affine + images, which we generally won't have (default False) + + Example + ------- + >>> from nipype.workflows.smri.niftyreg import create_nonlinear_gw_step + >>> nlc = create_nonlinear_gw_step('nonlinear_coreg') # doctest: +SKIP + >>> nlc.inputs.inputspec.in_files = [ + ... 'file1.nii.gz', 'file2.nii.gz'] # doctest: +SKIP + >>> nlc.inputs.inputspec.ref_file = ['ref.nii.gz'] # doctest: +SKIP + >>> nlc.run() # doctest: +SKIP + + """ + + # Create the workflow + workflow = pe.Workflow(name=name) + workflow.base_output_dir = name + + # We need to create an input node for the workflow + inputnode = pe.Node( + niu.IdentityInterface( + fields=['in_files', 'ref_file', 'rmask_file', 'input_aff_files']), + name='inputspec') + + if nonlinear_options_hash is None: + nonlinear_options_hash = dict() + + # non-rigidly register each of the images to the average + # flo_file can take a list of files + # Need to be able to iterate over input affine files, but what about the + # cases where we have no input affine files? + # Passing empty strings are not valid filenames, and undefined fields can + # not be iterated over. + # Current simple solution, as this is not generally required, is to use a + # flag which specifies wherther to iterate + if initial_affines: + nonlin_reg = pe.MapNode( + interface=niftyreg.RegF3D(**nonlinear_options_hash), + name="nonlin_reg", + iterfield=['flo_file', 'aff_file']) + else: + nonlin_reg = pe.MapNode( + interface=niftyreg.RegF3D(**nonlinear_options_hash), + name="nonlin_reg", + iterfield=['flo_file']) + + if verbose is False: + nonlin_reg.inputs.verbosity_off_flag = True + + # Average the images + ave_ims = pe.Node(interface=niftyreg.RegAverage(), name="ave_ims") + + # We have a new centered average image, the resampled original images and + # the affine transformations, which are returned as an output node. + outputnode = pe.Node( + niu.IdentityInterface(fields=['average_image', 'trans_files']), + name='outputspec') + + # Connect the inputs to the lin_reg node, which is split over in_files + workflow.connect([(inputnode, nonlin_reg, [('in_files', 'flo_file')]), + (inputnode, nonlin_reg, [('ref_file', 'ref_file')])]) + + if use_mask: + workflow.connect(inputnode, 'rmask_file', nonlin_reg, 'rmask_file') + + # If we have initial affine transforms, we need to connect them in + if initial_affines: + workflow.connect(inputnode, 'input_aff_files', nonlin_reg, 'aff_file') + + if demean: + if 'vel_flag' in list(nonlinear_options_hash.keys()) and \ + nonlinear_options_hash['vel_flag'] is True and \ + initial_affines: + workflow.connect(inputnode, 'ref_file', ave_ims, + 'demean3_ref_file') + else: + workflow.connect(inputnode, 'ref_file', ave_ims, + 'demean2_ref_file') + workflow.connect(nonlin_reg, 'avg_output', ave_ims, 'warp_files') + else: + workflow.connect(nonlin_reg, 'res_file', ave_ims, 'avg_files') + + # Connect up the output node + workflow.connect([(nonlin_reg, outputnode, [('cpp_file', 'trans_files')]), + (ave_ims, outputnode, [('out_file', 'average_image')])]) + + return workflow + + +# Creates an atlas image by iterative registration. An initial reference image +# can be provided, otherwise one will be made. +def create_groupwise_average(name="atlas_creation", + itr_rigid=3, + itr_affine=3, + itr_non_lin=5, + linear_options_hash=None, + nonlinear_options_hash=None, + use_mask=False, + verbose=False): + """ + Create the overall workflow that embeds all the rigid, affine and + non-linear components. + + Inputs:: + + inputspec.in_files - The input files to be registered + inputspec.ref_file - The initial reference image that the input files + are registered to + inputspec.rmask_file - Mask of the reference image + inputspec.in_trans_files - Initial transformation files (affine or + cpps) + + Outputs:: + + outputspec.average_image - The average image + outputspec.cpp_files - The bspline transformation files + + + Example + ------- + + >>> from nipype.workflows.smri.niftyreg import create_groupwise_average + >>> node = create_groupwise_average('groupwise_av') # doctest: +SKIP + >>> node.inputs.inputspec.in_files = [ + ... 'file1.nii.gz', 'file2.nii.gz'] # doctest: +SKIP + >>> node.inputs.inputspec.ref_file = ['ref.nii.gz'] # doctest: +SKIP + >>> node.inputs.inputspec.rmask_file = ['mask.nii.gz'] # doctest: +SKIP + >>> node.run() # doctest: +SKIP + + """ + # Create workflow + workflow = pe.Workflow(name=name) + + if linear_options_hash is None: + linear_options_hash = dict() + + if nonlinear_options_hash is None: + nonlinear_options_hash = dict() + + # Create the input and output node + inputnode = pe.Node( + niu.IdentityInterface(fields=['in_files', 'ref_file', 'rmask_file']), + name='inputspec') + + outputnode = pe.Node( + niu.IdentityInterface(fields=['average_image', 'trans_files']), + name='outputspec') + + # Create lists to store the rigid, affine and non-linear sub-workflow + lin_workflows = [] + nonlin_workflows = [] + + # Create the linear groupwise registration sub-workflows + for i in range(itr_rigid + itr_affine): + # Define is the sub-workflow is rigid or affine + if i >= itr_rigid: + linear_options_hash['rig_only_flag'] = False + else: + linear_options_hash['rig_only_flag'] = True + + # Define if the average image should be demean to ensure we have a + # barycenter + if (i < itr_rigid) or (i == (itr_rigid + itr_affine - 1)): + demean_arg = False + else: + demean_arg = True + + # Create the rigid or affine sub-workflow and add it to the relevant + # list + wf = create_linear_gw_step( + name='lin_reg' + str(i), + linear_options_hash=linear_options_hash, + demean=demean_arg, + verbose=verbose) + lin_workflows.append(wf) + + # Connect up the input data to the workflow + workflow.connect(inputnode, 'in_files', wf, 'inputspec.in_files') + if use_mask: + workflow.connect(inputnode, 'rmask_file', wf, + 'inputspec.rmask_file') + # If it exist, connect the previous workflow to the current one + if i == 0: + workflow.connect(inputnode, 'ref_file', wf, 'inputspec.ref_file') + else: + workflow.connect(lin_workflows[i - 1], 'outputspec.average_image', + wf, 'inputspec.ref_file') + + demean_arg = True + + # Create the nonlinear groupwise registration sub-workflows + for i in range(itr_non_lin): + + if len(lin_workflows) > 0: + initial_affines_arg = True + if i == (itr_non_lin - 1): + demean_arg = False + + wf = create_nonlinear_gw_step( + name='nonlin' + str(i), + demean=demean_arg, + initial_affines=initial_affines_arg, + nonlinear_options_hash=nonlinear_options_hash, + verbose=verbose) + + # Connect up the input data to the workflows + workflow.connect(inputnode, 'in_files', wf, 'inputspec.in_files') + if use_mask: + workflow.connect(inputnode, 'rmask_file', wf, + 'inputspec.rmask_file') + + if initial_affines_arg: + # Take the final linear registration results and use them to + # initialise the NR + workflow.connect(lin_workflows[-1], 'outputspec.trans_files', wf, + 'inputspec.input_aff_files') + + if i == 0: + if len(lin_workflows) > 0: + workflow.connect(lin_workflows[-1], 'outputspec.average_image', + wf, 'inputspec.ref_file') + else: + workflow.connect(inputnode, 'ref_file', wf, + 'inputspec.ref_file') + else: + workflow.connect(nonlin_workflows[i - 1], + 'outputspec.average_image', wf, + 'inputspec.ref_file') + + nonlin_workflows.append(wf) + + # Set up the last workflow + lw = None + if len(nonlin_workflows) > 0: + lw = nonlin_workflows[-1] + elif len(lin_workflows) > 0: + lw = lin_workflows[-1] + + # Connect the data to return + workflow.connect( + [(lw, outputnode, [('outputspec.average_image', 'average_image')]), + (lw, outputnode, [('outputspec.trans_files', 'trans_files')])]) + + return workflow diff --git a/nipype/workflows/warp/__init__.py b/nipype/workflows/warp/__init__.py new file mode 100644 index 0000000000..40a96afc6f --- /dev/null +++ b/nipype/workflows/warp/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- From a3a498bd6c086b44d4dc83aa534fb4226d72facb Mon Sep 17 00:00:00 2001 From: Leon Weninger Date: Thu, 26 Jul 2018 14:07:18 +0200 Subject: [PATCH 04/21] Changed Epi_Reg --- nipype/interfaces/fsl/epi.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 5b36f2cb5e..0d1b36e8f9 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -955,8 +955,6 @@ class EpiRegOutputSpec(TraitedSpec): structural space') wmseg = File( exists=True, desc='white matter segmentation used in flirt bbr') - seg = File( - exists=True, desc='wm/gm/csf segmentation used in flirt bbr') wmedge = File(exists=True, desc='white matter edges for visualization') From 1b183deddfc51f5fbe6976f4fe2da6d1787c1954 Mon Sep 17 00:00:00 2001 From: Leon Weninger <18707626+weningerleon@users.noreply.github.com> Date: Thu, 26 Jul 2018 14:19:23 +0200 Subject: [PATCH 05/21] Back to original .gitignore --- .gitignore | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index bd743815d3..081bb9303b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,31 @@ -* -!./nipype/* +-/build +-/dist +-/nipype.egg-info +-/MANIFEST +-/nipype/build +-/nipype/nipype.egg-info +-/doc/_build +-/doc/preproc +-/doc/users/examples +-/doc/api/generated +-*.pyc +-*.so +-.project +-.settings +-.pydevproject +-.eggs +-.idea/ +-/documentation.zip +-.DS_Store +-nipype/testing/data/von-ray_errmap.nii.gz +-nipype/testing/data/von_errmap.nii.gz +-nipype/testing/data/.proc* +-crash*.pklz +-.coverage +-htmlcov/ +-__pycache__/ +-*~ +-.*.swp +-.ipynb_checkpoints/ +-.ruby-version +-.pytest_cache From 7b0b2dc9b6c61eb646f807c65103cc8fc9c8f625 Mon Sep 17 00:00:00 2001 From: Leon Weninger Date: Thu, 26 Jul 2018 14:54:47 +0200 Subject: [PATCH 06/21] Change Epi_Reg --- nipype/interfaces/fsl/epi.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 0d1b36e8f9..08607e455e 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -955,6 +955,8 @@ class EpiRegOutputSpec(TraitedSpec): structural space') wmseg = File( exists=True, desc='white matter segmentation used in flirt bbr') + seg = File( + exists=True, desc='white matter, gray matter, csf segmentation') wmedge = File(exists=True, desc='white matter edges for visualization') From 25ce976a95bc4aba8ddbb224ea93f97c7dddf02e Mon Sep 17 00:00:00 2001 From: Leon Weninger <18707626+weningerleon@users.noreply.github.com> Date: Thu, 26 Jul 2018 19:10:22 +0200 Subject: [PATCH 07/21] Add segmentation output EpiReg The command EpiReg also produces a segmentation in white matter, gray matter, and csf which can now be accessed in nipype --- nipype/interfaces/fsl/epi.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index e45a94ed2c..97acba45dc 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -955,6 +955,8 @@ class EpiRegOutputSpec(TraitedSpec): structural space') wmseg = File( exists=True, desc='white matter segmentation used in flirt bbr') + seg = File( + exists=True, desc='white matter, gray matter, csf segmentation') wmedge = File(exists=True, desc='white matter edges for visualization') @@ -1021,7 +1023,9 @@ def _list_outputs(self): os.getcwd(), self.inputs.out_base + '_fast_wmedge.nii.gz') outputs['wmseg'] = os.path.join( os.getcwd(), self.inputs.out_base + '_fast_wmseg.nii.gz') - + outputs['seg'] = os.path.join( + os.getcwd(), self.inputs.out_base + '_fast_seg.nii.gz') + return outputs From bdc66d99363a2e219021fa234cb427b01909483a Mon Sep 17 00:00:00 2001 From: Leon Weninger Date: Thu, 26 Jul 2018 19:50:52 +0200 Subject: [PATCH 08/21] ? --- .gitignore | 62 ++++++++++++++++++------------------ nipype/interfaces/fsl/epi.py | 16 ++++++---- 2 files changed, 41 insertions(+), 37 deletions(-) diff --git a/.gitignore b/.gitignore index 4213d07a68..081bb9303b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,31 +1,31 @@ -/build -/dist -/nipype.egg-info -/MANIFEST -/nipype/build -/nipype/nipype.egg-info -/doc/_build -/doc/preproc -/doc/users/examples -/doc/api/generated -*.pyc -*.so -.project -.settings -.pydevproject -.eggs -.idea/ -/documentation.zip -.DS_Store -nipype/testing/data/von-ray_errmap.nii.gz -nipype/testing/data/von_errmap.nii.gz -nipype/testing/data/.proc* -crash*.pklz -.coverage -htmlcov/ -__pycache__/ -*~ -.*.swp -.ipynb_checkpoints/ -.ruby-version -.pytest_cache +-/build +-/dist +-/nipype.egg-info +-/MANIFEST +-/nipype/build +-/nipype/nipype.egg-info +-/doc/_build +-/doc/preproc +-/doc/users/examples +-/doc/api/generated +-*.pyc +-*.so +-.project +-.settings +-.pydevproject +-.eggs +-.idea/ +-/documentation.zip +-.DS_Store +-nipype/testing/data/von-ray_errmap.nii.gz +-nipype/testing/data/von_errmap.nii.gz +-nipype/testing/data/.proc* +-crash*.pklz +-.coverage +-htmlcov/ +-__pycache__/ +-*~ +-.*.swp +-.ipynb_checkpoints/ +-.ruby-version +-.pytest_cache diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 97acba45dc..3d14195a70 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -199,16 +199,16 @@ class TOPUPInputSpec(FSLCommandInputSpec): # TODO: the following traits admit values separated by commas, one value # per registration level inside topup. warp_res = traits.Float( - 10.0, usedefault=True, + 10.0, usedefault=False, argstr='--warpres=%f', desc=('(approximate) resolution (in mm) of warp ' 'basis for the different sub-sampling levels' '.')) - subsamp = traits.Int(1, usedefault=True, + subsamp = traits.Int(1, usedefault=False, argstr='--subsamp=%d', desc='sub-sampling scheme') fwhm = traits.Float( 8.0, - usedefault=True, + usedefault=False, argstr='--fwhm=%f', desc='FWHM (in mm) of gaussian smoothing kernel') config = traits.String( @@ -218,10 +218,10 @@ class TOPUPInputSpec(FSLCommandInputSpec): desc=('Name of config file specifying command line ' 'arguments')) max_iter = traits.Int( - 5, usedefault=True, + 5, usedefault=False, argstr='--miter=%d', desc='max # of non-linear iterations') reg_lambda = traits.Float( - 1.0, usedefault=True, + 1.0, usedefault=False, argstr='--miter=%0.f', desc=('lambda weighting value of the ' 'regularisation term')) @@ -259,7 +259,7 @@ class TOPUPInputSpec(FSLCommandInputSpec): desc=('Minimisation method 0=Levenberg-Marquardt, ' '1=Scaled Conjugate Gradient')) splineorder = traits.Int( - 3, usedefault=True, + 3, usedefault=False, argstr='--splineorder=%d', desc=('order of spline, 2->Qadratic spline, ' '3->Cubic spline')) @@ -1025,7 +1025,11 @@ def _list_outputs(self): os.getcwd(), self.inputs.out_base + '_fast_wmseg.nii.gz') outputs['seg'] = os.path.join( os.getcwd(), self.inputs.out_base + '_fast_seg.nii.gz') +<<<<<<< HEAD +======= + +>>>>>>> 7b0b2dc9b6c61eb646f807c65103cc8fc9c8f625 return outputs From 39dc14d2ec28949297e47df76222c731d65630f0 Mon Sep 17 00:00:00 2001 From: Leon Weninger Date: Thu, 26 Jul 2018 20:02:10 +0200 Subject: [PATCH 09/21] verified? --- nipype/interfaces/fsl/epi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 196071f197..a35910daa4 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -3,7 +3,7 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """The fsl module provides classes for interfacing with the `FSL `_ command line tools. This -was written to work with FSL version 5.0.4. +was written to work with FSL version 5.0.4.* """ from __future__ import print_function, division, unicode_literals, \ absolute_import From e344dcdc5599c1dd2e7435b14511a3311b27f529 Mon Sep 17 00:00:00 2001 From: Leon Weninger Date: Thu, 26 Jul 2018 20:05:45 +0200 Subject: [PATCH 10/21] signed commit --- nipype/interfaces/fsl/epi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index a35910daa4..196071f197 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -3,7 +3,7 @@ # vi: set ft=python sts=4 ts=4 sw=4 et: """The fsl module provides classes for interfacing with the `FSL `_ command line tools. This -was written to work with FSL version 5.0.4.* +was written to work with FSL version 5.0.4. """ from __future__ import print_function, division, unicode_literals, \ absolute_import From 475b74d546d3dd449ca534a049ed50186661b010 Mon Sep 17 00:00:00 2001 From: Leon Weninger Date: Thu, 26 Jul 2018 20:19:09 +0200 Subject: [PATCH 11/21] verify test --- foo | 1 + 1 file changed, 1 insertion(+) create mode 100644 foo diff --git a/foo b/foo new file mode 100644 index 0000000000..257cc5642c --- /dev/null +++ b/foo @@ -0,0 +1 @@ +foo From 8844d0a3fccae5aa2427986fe10d92527d76280b Mon Sep 17 00:00:00 2001 From: Leon Weninger Date: Thu, 26 Jul 2018 20:22:07 +0200 Subject: [PATCH 12/21] email --- foo | 1 - 1 file changed, 1 deletion(-) delete mode 100644 foo diff --git a/foo b/foo deleted file mode 100644 index 257cc5642c..0000000000 --- a/foo +++ /dev/null @@ -1 +0,0 @@ -foo From 0dd2771e05fca819b2b15aec1028989303d7fc95 Mon Sep 17 00:00:00 2001 From: Leon Weninger Date: Fri, 27 Jul 2018 11:43:51 +0200 Subject: [PATCH 13/21] tested --- nipype/interfaces/ants/registration.py | 2 +- nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py | 4 ++-- nipype/interfaces/fsl/epi.py | 2 +- nipype/interfaces/fsl/tests/test_auto_EpiReg.py | 1 + nipype/interfaces/io.py | 2 +- nipype/testing/data/reg_average_cmd | 1 + nipype/utils/filemanip.py | 2 +- nipype/utils/tests/test_filemanip.py | 4 ++-- 8 files changed, 10 insertions(+), 8 deletions(-) create mode 100644 nipype/testing/data/reg_average_cmd diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index 7190816cf3..9d3b07e96a 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -125,7 +125,7 @@ class ANTSOutputSpec(TraitedSpec): class ANTS(ANTSCommand): - """ANTS wrapper for registration of images + """ANTS wrapper for registration of images (old, use Registration instead) Examples diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py index 292e6b398b..341f250a2c 100644 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -52,8 +52,8 @@ def test_AntsJointFusion_inputs(): hash_files=False, ), out_label_post_prob_name_format=dict( - requires=['out_label_fusion', 'out_intensity_fusion_name_format'], - ), + requires=['out_label_fusion', + 'out_intensity_fusion_name_format'], ), patch_metric=dict(argstr='-m %s', ), patch_radius=dict( argstr='-p %s', diff --git a/nipype/interfaces/fsl/epi.py b/nipype/interfaces/fsl/epi.py index 97acba45dc..29901d4e32 100644 --- a/nipype/interfaces/fsl/epi.py +++ b/nipype/interfaces/fsl/epi.py @@ -1025,7 +1025,7 @@ def _list_outputs(self): os.getcwd(), self.inputs.out_base + '_fast_wmseg.nii.gz') outputs['seg'] = os.path.join( os.getcwd(), self.inputs.out_base + '_fast_seg.nii.gz') - + return outputs diff --git a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py index 29a935333e..634a75e376 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py @@ -61,6 +61,7 @@ def test_EpiReg_outputs(): fullwarp=dict(), out_1vol=dict(), out_file=dict(), + seg=dict(), shiftmap=dict(), wmedge=dict(), wmseg=dict(), diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 7a89675e8d..5a806a67be 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -124,7 +124,7 @@ def add_traits(base, names, trait_type=None): def _get_head_bucket(s3_resource, bucket_name): - """ Try to get the header info of a bucket, in order to + """ Try to get the header info of a bucket, in order to check if it exists and its permissions """ diff --git a/nipype/testing/data/reg_average_cmd b/nipype/testing/data/reg_average_cmd new file mode 100644 index 0000000000..0ac4151ef5 --- /dev/null +++ b/nipype/testing/data/reg_average_cmd @@ -0,0 +1 @@ +reg_average /filesrv/weninger/code/nipype/nipype/testing/data/avg_out.nii.gz -avg im1.nii im2.nii im3.nii -omp 1 \ No newline at end of file diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 80fc262f03..8bf4c456d2 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -662,7 +662,7 @@ def loadpkl(infile, versioning=False): return unpkl - # Unpickling problems + # Unpickling problems except Exception as e: if not versioning: raise e diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index ae5316c7d7..b9a413d557 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -547,7 +547,7 @@ class PickledBreaker: def __setstate__(self, d): raise Exception() - + def test_versioned_pklization(tmpdir): tmpdir.chdir() @@ -560,7 +560,7 @@ def test_versioned_pklization(tmpdir): loadpkl('./pickled.pkz', versioning=True) - + def test_unversioned_pklization(tmpdir): tmpdir.chdir() From 156b4e631d9c2adeaf42766b112836633299957a Mon Sep 17 00:00:00 2001 From: Leon Weninger Date: Sun, 29 Jul 2018 23:31:28 +0200 Subject: [PATCH 14/21] ?! --- nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py index 292e6b398b..341f250a2c 100644 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -52,8 +52,8 @@ def test_AntsJointFusion_inputs(): hash_files=False, ), out_label_post_prob_name_format=dict( - requires=['out_label_fusion', 'out_intensity_fusion_name_format'], - ), + requires=['out_label_fusion', + 'out_intensity_fusion_name_format'], ), patch_metric=dict(argstr='-m %s', ), patch_radius=dict( argstr='-p %s', From ed35207beaf732ee971c03bda0815bfc2e92d743 Mon Sep 17 00:00:00 2001 From: Leon Weninger Date: Mon, 30 Jul 2018 00:00:06 +0200 Subject: [PATCH 15/21] testing done --- nipype/interfaces/ants/registration.py | 2 +- nipype/interfaces/fsl/tests/test_auto_EpiReg.py | 1 + nipype/interfaces/io.py | 2 +- nipype/utils/filemanip.py | 2 +- nipype/utils/tests/test_filemanip.py | 4 ++-- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/nipype/interfaces/ants/registration.py b/nipype/interfaces/ants/registration.py index 7190816cf3..9d3b07e96a 100644 --- a/nipype/interfaces/ants/registration.py +++ b/nipype/interfaces/ants/registration.py @@ -125,7 +125,7 @@ class ANTSOutputSpec(TraitedSpec): class ANTS(ANTSCommand): - """ANTS wrapper for registration of images + """ANTS wrapper for registration of images (old, use Registration instead) Examples diff --git a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py index 29a935333e..634a75e376 100644 --- a/nipype/interfaces/fsl/tests/test_auto_EpiReg.py +++ b/nipype/interfaces/fsl/tests/test_auto_EpiReg.py @@ -61,6 +61,7 @@ def test_EpiReg_outputs(): fullwarp=dict(), out_1vol=dict(), out_file=dict(), + seg=dict(), shiftmap=dict(), wmedge=dict(), wmseg=dict(), diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 7a89675e8d..5a806a67be 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -124,7 +124,7 @@ def add_traits(base, names, trait_type=None): def _get_head_bucket(s3_resource, bucket_name): - """ Try to get the header info of a bucket, in order to + """ Try to get the header info of a bucket, in order to check if it exists and its permissions """ diff --git a/nipype/utils/filemanip.py b/nipype/utils/filemanip.py index 80fc262f03..8bf4c456d2 100644 --- a/nipype/utils/filemanip.py +++ b/nipype/utils/filemanip.py @@ -662,7 +662,7 @@ def loadpkl(infile, versioning=False): return unpkl - # Unpickling problems + # Unpickling problems except Exception as e: if not versioning: raise e diff --git a/nipype/utils/tests/test_filemanip.py b/nipype/utils/tests/test_filemanip.py index ae5316c7d7..b9a413d557 100644 --- a/nipype/utils/tests/test_filemanip.py +++ b/nipype/utils/tests/test_filemanip.py @@ -547,7 +547,7 @@ class PickledBreaker: def __setstate__(self, d): raise Exception() - + def test_versioned_pklization(tmpdir): tmpdir.chdir() @@ -560,7 +560,7 @@ def test_versioned_pklization(tmpdir): loadpkl('./pickled.pkz', versioning=True) - + def test_unversioned_pklization(tmpdir): tmpdir.chdir() From 475ac7b83701dd38f3e02f999e441d033b6d6803 Mon Sep 17 00:00:00 2001 From: Leon Weninger Date: Mon, 30 Jul 2018 00:03:07 +0200 Subject: [PATCH 16/21] delete .orig file --- nipype/interfaces/fsl/epi.py.orig | 1242 ----------------------------- 1 file changed, 1242 deletions(-) delete mode 100644 nipype/interfaces/fsl/epi.py.orig diff --git a/nipype/interfaces/fsl/epi.py.orig b/nipype/interfaces/fsl/epi.py.orig deleted file mode 100644 index 7e7aba0ceb..0000000000 --- a/nipype/interfaces/fsl/epi.py.orig +++ /dev/null @@ -1,1242 +0,0 @@ -# -*- coding: utf-8 -*- -# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- -# vi: set ft=python sts=4 ts=4 sw=4 et: -"""The fsl module provides classes for interfacing with the `FSL -`_ command line tools. This -was written to work with FSL version 5.0.4. -""" -from __future__ import print_function, division, unicode_literals, \ - absolute_import -from builtins import str - -import os -import numpy as np -import nibabel as nb -import warnings - -from ...utils.filemanip import split_filename -from ...utils import NUMPY_MMAP - -from ..base import (traits, TraitedSpec, InputMultiPath, File, isdefined) -from .base import FSLCommand, FSLCommandInputSpec, Info - - -class PrepareFieldmapInputSpec(FSLCommandInputSpec): - scanner = traits.String( - 'SIEMENS', - argstr='%s', - position=1, - desc='must be SIEMENS', - usedefault=True) - in_phase = File( - exists=True, - argstr='%s', - position=2, - mandatory=True, - desc=('Phase difference map, in SIEMENS format range from ' - '0-4096 or 0-8192)')) - in_magnitude = File( - exists=True, - argstr='%s', - position=3, - mandatory=True, - desc='Magnitude difference map, brain extracted') - delta_TE = traits.Float( - 2.46, - usedefault=True, - mandatory=True, - argstr='%f', - position=-2, - desc=('echo time difference of the ' - 'fieldmap sequence in ms. (usually 2.46ms in' - ' Siemens)')) - nocheck = traits.Bool( - False, - position=-1, - argstr='--nocheck', - usedefault=True, - desc=('do not perform sanity checks for image ' - 'size/range/dimensions')) - out_fieldmap = File( - argstr='%s', position=4, desc='output name for prepared fieldmap') - - -class PrepareFieldmapOutputSpec(TraitedSpec): - out_fieldmap = File(exists=True, desc='output name for prepared fieldmap') - - -class PrepareFieldmap(FSLCommand): - """ - Interface for the fsl_prepare_fieldmap script (FSL 5.0) - - Prepares a fieldmap suitable for FEAT from SIEMENS data - saves output in - rad/s format (e.g. ```fsl_prepare_fieldmap SIEMENS - images_3_gre_field_mapping images_4_gre_field_mapping fmap_rads 2.65```). - - - Examples - -------- - - >>> from nipype.interfaces.fsl import PrepareFieldmap - >>> prepare = PrepareFieldmap() - >>> prepare.inputs.in_phase = "phase.nii" - >>> prepare.inputs.in_magnitude = "magnitude.nii" - >>> prepare.inputs.output_type = "NIFTI_GZ" - >>> prepare.cmdline # doctest: +ELLIPSIS - 'fsl_prepare_fieldmap SIEMENS phase.nii magnitude.nii \ -.../phase_fslprepared.nii.gz 2.460000' - >>> res = prepare.run() # doctest: +SKIP - - - """ - _cmd = 'fsl_prepare_fieldmap' - input_spec = PrepareFieldmapInputSpec - output_spec = PrepareFieldmapOutputSpec - - def _parse_inputs(self, skip=None): - if skip is None: - skip = [] - - if not isdefined(self.inputs.out_fieldmap): - self.inputs.out_fieldmap = self._gen_fname( - self.inputs.in_phase, suffix='_fslprepared') - - if not isdefined(self.inputs.nocheck) or not self.inputs.nocheck: - skip += ['nocheck'] - - return super(PrepareFieldmap, self)._parse_inputs(skip=skip) - - def _list_outputs(self): - outputs = self.output_spec().get() - outputs['out_fieldmap'] = self.inputs.out_fieldmap - return outputs - - def _run_interface(self, runtime): - runtime = super(PrepareFieldmap, self)._run_interface(runtime) - - if runtime.returncode == 0: - out_file = self.inputs.out_fieldmap - im = nb.load(out_file, mmap=NUMPY_MMAP) - dumb_img = nb.Nifti1Image(np.zeros(im.shape), im.affine, im.header) - out_nii = nb.funcs.concat_images((im, dumb_img)) - nb.save(out_nii, out_file) - - return runtime - - -class TOPUPInputSpec(FSLCommandInputSpec): - in_file = File( - exists=True, - mandatory=True, - desc='name of 4D file with images', - argstr='--imain=%s') - encoding_file = File( - exists=True, - mandatory=True, - xor=['encoding_direction'], - desc='name of text file with PE directions/times', - argstr='--datain=%s') - encoding_direction = traits.List( - traits.Enum('y', 'x', 'z', 'x-', 'y-', 'z-'), - mandatory=True, - xor=['encoding_file'], - requires=['readout_times'], - argstr='--datain=%s', - desc=('encoding direction for automatic ' - 'generation of encoding_file')) - readout_times = InputMultiPath( - traits.Float, - requires=['encoding_direction'], - xor=['encoding_file'], - mandatory=True, - desc=('readout times (dwell times by # ' - 'phase-encode steps minus 1)')) - out_base = File( - desc=('base-name of output files (spline ' - 'coefficients (Hz) and movement parameters)'), - name_source=['in_file'], - name_template='%s_base', - argstr='--out=%s', - hash_files=False) - out_field = File( - argstr='--fout=%s', - hash_files=False, - name_source=['in_file'], - name_template='%s_field', - desc='name of image file with field (Hz)') - out_warp_prefix = traits.Str( - "warpfield", - argstr='--dfout=%s', - hash_files=False, - desc='prefix for the warpfield images (in mm)', - usedefault=True) - out_mat_prefix = traits.Str( - "xfm", - argstr='--rbmout=%s', - hash_files=False, - desc='prefix for the realignment matrices', - usedefault=True) - out_jac_prefix = traits.Str( - "jac", - argstr='--jacout=%s', - hash_files=False, - desc='prefix for the warpfield images', - usedefault=True) - out_corrected = File( - argstr='--iout=%s', - hash_files=False, - name_source=['in_file'], - name_template='%s_corrected', - desc='name of 4D image file with unwarped images') - out_logfile = File( - argstr='--logout=%s', - desc='name of log-file', - name_source=['in_file'], - name_template='%s_topup.log', - keep_extension=True, - hash_files=False) - - # TODO: the following traits admit values separated by commas, one value - # per registration level inside topup. - warp_res = traits.Float( -<<<<<<< HEAD - 10.0, usedefault=False, - argstr='--warpres=%f', - desc=('(approximate) resolution (in mm) of warp ' - 'basis for the different sub-sampling levels' - '.')) - subsamp = traits.Int(1, usedefault=False, - argstr='--subsamp=%d', desc='sub-sampling scheme') - fwhm = traits.Float( - 8.0, - usedefault=False, -======= - argstr='--warpres=%f', - desc=('(approximate) resolution (in mm) of warp ' - 'basis for the different sub-sampling levels')) - subsamp = traits.Int(argstr='--subsamp=%d', - desc='sub-sampling scheme') - fwhm = traits.Float( ->>>>>>> 715ad0ab06da09aba8956ba922a24fd058b749c0 - argstr='--fwhm=%f', - desc='FWHM (in mm) of gaussian smoothing kernel') - config = traits.String( - 'b02b0.cnf', - argstr='--config=%s', - usedefault=True, - desc=('Name of config file specifying command line ' - 'arguments')) - max_iter = traits.Int( -<<<<<<< HEAD - 5, usedefault=False, - argstr='--miter=%d', desc='max # of non-linear iterations') - reg_lambda = traits.Float( - 1.0, usedefault=False, - argstr='--miter=%0.f', - desc=('lambda weighting value of the ' - 'regularisation term')) -======= - argstr='--miter=%d', - desc='max # of non-linear iterations') - reg_lambda = traits.Float( - argstr='--lambda=%0.f', - desc=('Weight of regularisation, default ' - 'depending on --ssqlambda and --regmod switches.')) ->>>>>>> 715ad0ab06da09aba8956ba922a24fd058b749c0 - ssqlambda = traits.Enum( - 1, - 0, - argstr='--ssqlambda=%d', - desc=('Weight lambda by the current value of the ' - 'ssd. If used (=1), the effective weight of ' - 'regularisation term becomes higher for the ' - 'initial iterations, therefore initial steps' - ' are a little smoother than they would ' - 'without weighting. This reduces the ' - 'risk of finding a local minimum.')) - regmod = traits.Enum( - 'bending_energy', - 'membrane_energy', - argstr='--regmod=%s', - desc=('Regularisation term implementation. Defaults ' - 'to bending_energy. Note that the two functions' - ' have vastly different scales. The membrane ' - 'energy is based on the first derivatives and ' - 'the bending energy on the second derivatives. ' - 'The second derivatives will typically be much ' - 'smaller than the first derivatives, so input ' - 'lambda will have to be larger for ' - 'bending_energy to yield approximately the same' - ' level of regularisation.')) - estmov = traits.Enum( - 1, 0, argstr='--estmov=%d', desc='estimate movements if set') - minmet = traits.Enum( - 0, - 1, - argstr='--minmet=%d', - desc=('Minimisation method 0=Levenberg-Marquardt, ' - '1=Scaled Conjugate Gradient')) - splineorder = traits.Int( -<<<<<<< HEAD - 3, usedefault=False, -======= ->>>>>>> 715ad0ab06da09aba8956ba922a24fd058b749c0 - argstr='--splineorder=%d', - desc=('order of spline, 2->Qadratic spline, ' - '3->Cubic spline')) - numprec = traits.Enum( - 'double', - 'float', - argstr='--numprec=%s', - desc=('Precision for representing Hessian, double ' - 'or float.')) - interp = traits.Enum( - 'spline', - 'linear', - argstr='--interp=%s', - desc='Image interpolation model, linear or spline.') - scale = traits.Enum( - 0, - 1, - argstr='--scale=%d', - desc=('If set (=1), the images are individually scaled' - ' to a common mean')) - regrid = traits.Enum( - 1, - 0, - argstr='--regrid=%d', - desc=('If set (=1), the calculations are done in a ' - 'different grid')) - - -class TOPUPOutputSpec(TraitedSpec): - out_fieldcoef = File( - exists=True, desc='file containing the field coefficients') - out_movpar = File(exists=True, desc='movpar.txt output file') - out_enc_file = File(desc='encoding directions file output for applytopup') - out_field = File(desc='name of image file with field (Hz)') - out_warps = traits.List(File(exists=True), desc='warpfield images') - out_jacs = traits.List(File(exists=True), desc='Jacobian images') - out_mats = traits.List(File(exists=True), desc='realignment matrices') - out_corrected = File(desc='name of 4D image file with unwarped images') - out_logfile = File(desc='name of log-file') - - -class TOPUP(FSLCommand): - """ - Interface for FSL topup, a tool for estimating and correcting - susceptibility induced distortions. See FSL documentation for - `reference `_, - `usage examples - `_, - and `exemplary config files - `_. - - Examples - -------- - - >>> from nipype.interfaces.fsl import TOPUP - >>> topup = TOPUP() - >>> topup.inputs.in_file = "b0_b0rev.nii" - >>> topup.inputs.encoding_file = "topup_encoding.txt" - >>> topup.inputs.output_type = "NIFTI_GZ" - >>> topup.cmdline # doctest: +ELLIPSIS - 'topup --config=b02b0.cnf --datain=topup_encoding.txt \ ---imain=b0_b0rev.nii --out=b0_b0rev_base --iout=b0_b0rev_corrected.nii.gz \ ---fout=b0_b0rev_field.nii.gz --jacout=jac --logout=b0_b0rev_topup.log \ ---rbmout=xfm --dfout=warpfield' - >>> res = topup.run() # doctest: +SKIP - - """ - _cmd = 'topup' - input_spec = TOPUPInputSpec - output_spec = TOPUPOutputSpec - - def _format_arg(self, name, trait_spec, value): - if name == 'encoding_direction': - return trait_spec.argstr % self._generate_encfile() - if name == 'out_base': - path, name, ext = split_filename(value) - if path != '': - if not os.path.exists(path): - raise ValueError('out_base path must exist if provided') - return super(TOPUP, self)._format_arg(name, trait_spec, value) - - def _list_outputs(self): - outputs = super(TOPUP, self)._list_outputs() - del outputs['out_base'] - base_path = None - if isdefined(self.inputs.out_base): - base_path, base, _ = split_filename(self.inputs.out_base) - if base_path == '': - base_path = None - else: - base = split_filename(self.inputs.in_file)[1] + '_base' - outputs['out_fieldcoef'] = self._gen_fname( - base, suffix='_fieldcoef', cwd=base_path) - outputs['out_movpar'] = self._gen_fname( - base, suffix='_movpar', ext='.txt', cwd=base_path) - - n_vols = nb.load(self.inputs.in_file).shape[-1] - ext = Info.output_type_to_ext(self.inputs.output_type) - fmt = os.path.abspath('{prefix}_{i:02d}{ext}').format - outputs['out_warps'] = [ - fmt(prefix=self.inputs.out_warp_prefix, i=i, ext=ext) - for i in range(1, n_vols + 1) - ] - outputs['out_jacs'] = [ - fmt(prefix=self.inputs.out_jac_prefix, i=i, ext=ext) - for i in range(1, n_vols + 1) - ] - outputs['out_mats'] = [ - fmt(prefix=self.inputs.out_mat_prefix, i=i, ext=".mat") - for i in range(1, n_vols + 1) - ] - - if isdefined(self.inputs.encoding_direction): - outputs['out_enc_file'] = self._get_encfilename() - return outputs - - def _get_encfilename(self): - out_file = os.path.join( - os.getcwd(), - ('%s_encfile.txt' % split_filename(self.inputs.in_file)[1])) - return out_file - - def _generate_encfile(self): - """Generate a topup compatible encoding file based on given directions - """ - out_file = self._get_encfilename() - durations = self.inputs.readout_times - if len(self.inputs.encoding_direction) != len(durations): - if len(self.inputs.readout_times) != 1: - raise ValueError(('Readout time must be a float or match the' - 'length of encoding directions')) - durations = durations * len(self.inputs.encoding_direction) - - lines = [] - for idx, encdir in enumerate(self.inputs.encoding_direction): - direction = 1.0 - if encdir.endswith('-'): - direction = -1.0 - line = [ - float(val[0] == encdir[0]) * direction - for val in ['x', 'y', 'z'] - ] + [durations[idx]] - lines.append(line) - np.savetxt(out_file, np.array(lines), fmt=b'%d %d %d %.8f') - return out_file - - def _overload_extension(self, value, name=None): - if name == 'out_base': - return value - return super(TOPUP, self)._overload_extension(value, name) - - -class ApplyTOPUPInputSpec(FSLCommandInputSpec): - in_files = InputMultiPath( - File(exists=True), - mandatory=True, - desc='name of file with images', - argstr='--imain=%s', - sep=',') - encoding_file = File( - exists=True, - mandatory=True, - desc='name of text file with PE directions/times', - argstr='--datain=%s') - in_index = traits.List( - traits.Int, - argstr='--inindex=%s', - sep=',', - desc='comma separated list of indices corresponding to --datain') - in_topup_fieldcoef = File( - exists=True, - argstr="--topup=%s", - copyfile=False, - requires=['in_topup_movpar'], - desc=('topup file containing the field ' - 'coefficients')) - in_topup_movpar = File( - exists=True, - requires=['in_topup_fieldcoef'], - copyfile=False, - desc='topup movpar.txt file') - out_corrected = File( - desc='output (warped) image', - name_source=['in_files'], - name_template='%s_corrected', - argstr='--out=%s') - method = traits.Enum( - 'jac', - 'lsr', - argstr='--method=%s', - desc=('use jacobian modulation (jac) or least-squares' - ' resampling (lsr)')) - interp = traits.Enum( - 'trilinear', - 'spline', - argstr='--interp=%s', - desc='interpolation method') - datatype = traits.Enum( - 'char', - 'short', - 'int', - 'float', - 'double', - argstr='-d=%s', - desc='force output data type') - - -class ApplyTOPUPOutputSpec(TraitedSpec): - out_corrected = File( - exists=True, desc=('name of 4D image file with ' - 'unwarped images')) - - -class ApplyTOPUP(FSLCommand): - """ - Interface for FSL topup, a tool for estimating and correcting - susceptibility induced distortions. - `General reference - `_ - and `use example - `_. - - - Examples - -------- - - >>> from nipype.interfaces.fsl import ApplyTOPUP - >>> applytopup = ApplyTOPUP() - >>> applytopup.inputs.in_files = ["epi.nii", "epi_rev.nii"] - >>> applytopup.inputs.encoding_file = "topup_encoding.txt" - >>> applytopup.inputs.in_topup_fieldcoef = "topup_fieldcoef.nii.gz" - >>> applytopup.inputs.in_topup_movpar = "topup_movpar.txt" - >>> applytopup.inputs.output_type = "NIFTI_GZ" - >>> applytopup.cmdline # doctest: +ELLIPSIS - 'applytopup --datain=topup_encoding.txt --imain=epi.nii,epi_rev.nii \ ---inindex=1,2 --topup=topup --out=epi_corrected.nii.gz' - >>> res = applytopup.run() # doctest: +SKIP - - """ - _cmd = 'applytopup' - input_spec = ApplyTOPUPInputSpec - output_spec = ApplyTOPUPOutputSpec - - def _parse_inputs(self, skip=None): - if skip is None: - skip = [] - - # If not defined, assume index are the first N entries in the - # parameters file, for N input images. - if not isdefined(self.inputs.in_index): - self.inputs.in_index = list( - range(1, - len(self.inputs.in_files) + 1)) - - return super(ApplyTOPUP, self)._parse_inputs(skip=skip) - - def _format_arg(self, name, spec, value): - if name == 'in_topup_fieldcoef': - return spec.argstr % value.split('_fieldcoef')[0] - return super(ApplyTOPUP, self)._format_arg(name, spec, value) - - -class EddyInputSpec(FSLCommandInputSpec): - in_file = File( - exists=True, - mandatory=True, - argstr='--imain=%s', - desc=('File containing all the images to estimate ' - 'distortions for')) - in_mask = File( - exists=True, - mandatory=True, - argstr='--mask=%s', - desc='Mask to indicate brain') - in_index = File( - exists=True, - mandatory=True, - argstr='--index=%s', - desc=('File containing indices for all volumes in --imain ' - 'into --acqp and --topup')) - in_acqp = File( - exists=True, - mandatory=True, - argstr='--acqp=%s', - desc='File containing acquisition parameters') - in_bvec = File( - exists=True, - mandatory=True, - argstr='--bvecs=%s', - desc=('File containing the b-vectors for all volumes in ' - '--imain')) - in_bval = File( - exists=True, - mandatory=True, - argstr='--bvals=%s', - desc=('File containing the b-values for all volumes in ' - '--imain')) - out_base = traits.Str( - 'eddy_corrected', - argstr='--out=%s', - usedefault=True, - desc=('basename for output (warped) image')) - session = File( - exists=True, - argstr='--session=%s', - desc=('File containing session indices for all volumes in ' - '--imain')) - in_topup_fieldcoef = File( - exists=True, - argstr="--topup=%s", - requires=['in_topup_movpar'], - desc=('topup file containing the field ' - 'coefficients')) - in_topup_movpar = File( - exists=True, - requires=['in_topup_fieldcoef'], - desc='topup movpar.txt file') - - flm = traits.Enum( - 'linear', - 'quadratic', - 'cubic', - argstr='--flm=%s', - desc='First level EC model') - - slm = traits.Enum( - 'none', - 'linear', - 'quadratic', - argstr='--slm=%s', - desc='Second level EC model') - - fep = traits.Bool( - False, argstr='--fep', desc='Fill empty planes in x- or y-directions') - - interp = traits.Enum( - 'spline', - 'trilinear', - argstr='--interp=%s', - desc='Interpolation model for estimation step') - - nvoxhp = traits.Int( - 1000, usedefault=True, - argstr='--nvoxhp=%s', - desc=('# of voxels used to estimate the ' - 'hyperparameters')) - - fudge_factor = traits.Float( - 10.0, usedefault=True, - argstr='--ff=%s', - desc=('Fudge factor for hyperparameter ' - 'error variance')) - - dont_sep_offs_move = traits.Bool( - False, - argstr='--dont_sep_offs_move', - desc=('Do NOT attempt to separate ' - 'field offset from subject ' - 'movement')) - - dont_peas = traits.Bool( - False, - argstr='--dont_peas', - desc="Do NOT perform a post-eddy alignment of " - "shells") - - fwhm = traits.Float( - desc=('FWHM for conditioning filter when estimating ' - 'the parameters'), - argstr='--fwhm=%s') - - niter = traits.Int(5, usedefault=True, - argstr='--niter=%s', desc='Number of iterations') - - method = traits.Enum( - 'jac', - 'lsr', - argstr='--resamp=%s', - desc=('Final resampling method (jacobian/least ' - 'squares)')) - repol = traits.Bool( - False, argstr='--repol', desc='Detect and replace outlier slices') - num_threads = traits.Int( - 1, - usedefault=True, - nohash=True, - desc="Number of openmp threads to use") - is_shelled = traits.Bool( - False, - argstr='--data_is_shelled', - desc="Override internal check to ensure that " - "date are acquired on a set of b-value " - "shells") - field = traits.Str( - argstr='--field=%s', - desc="NonTOPUP fieldmap scaled in Hz - filename has " - "to be provided without an extension. TOPUP is " - "strongly recommended") - field_mat = File( - exists=True, - argstr='--field_mat=%s', - desc="Matrix that specifies the relative locations of " - "the field specified by --field and first volume " - "in file --imain") - use_cuda = traits.Bool(False, desc="Run eddy using cuda gpu") - - -class EddyOutputSpec(TraitedSpec): - out_corrected = File( - exists=True, desc='4D image file containing all the corrected volumes') - out_parameter = File( - exists=True, - desc=('text file with parameters definining the field and' - 'movement for each scan')) - out_rotated_bvecs = File( - exists=True, desc='File containing rotated b-values for all volumes') - out_movement_rms = File( - exists=True, desc='Summary of the "total movement" in each volume') - out_restricted_movement_rms = File( - exists=True, - desc=('Summary of the "total movement" in each volume ' - 'disregarding translation in the PE direction')) - out_shell_alignment_parameters = File( - exists=True, - desc=('File containing rigid body movement parameters ' - 'between the different shells as estimated by a ' - 'post-hoc mutual information based registration')) - out_outlier_report = File( - exists=True, - desc=('Text-file with a plain language report on what ' - 'outlier slices eddy has found')) - - -class Eddy(FSLCommand): - """ - Interface for FSL eddy, a tool for estimating and correcting eddy - currents induced distortions. `User guide - `_ and - `more info regarding acqp file - `_. - - Examples - -------- - - >>> from nipype.interfaces.fsl import Eddy - >>> eddy = Eddy() - >>> eddy.inputs.in_file = 'epi.nii' - >>> eddy.inputs.in_mask = 'epi_mask.nii' - >>> eddy.inputs.in_index = 'epi_index.txt' - >>> eddy.inputs.in_acqp = 'epi_acqp.txt' - >>> eddy.inputs.in_bvec = 'bvecs.scheme' - >>> eddy.inputs.in_bval = 'bvals.scheme' - >>> eddy.inputs.use_cuda = True - >>> eddy.cmdline # doctest: +ELLIPSIS - 'eddy_cuda --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \ ---bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \ ---mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected' - >>> eddy.inputs.use_cuda = False - >>> eddy.cmdline # doctest: +ELLIPSIS - 'eddy_openmp --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme \ ---bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt \ ---mask=epi_mask.nii --niter=5 --nvoxhp=1000 --out=.../eddy_corrected' - >>> res = eddy.run() # doctest: +SKIP - - """ - _cmd = 'eddy_openmp' - input_spec = EddyInputSpec - output_spec = EddyOutputSpec - - _num_threads = 1 - - def __init__(self, **inputs): - super(Eddy, self).__init__(**inputs) - self.inputs.on_trait_change(self._num_threads_update, 'num_threads') - if not isdefined(self.inputs.num_threads): - self.inputs.num_threads = self._num_threads - else: - self._num_threads_update() - self.inputs.on_trait_change(self._use_cuda, 'use_cuda') - if isdefined(self.inputs.use_cuda): - self._use_cuda() - - def _num_threads_update(self): - self._num_threads = self.inputs.num_threads - if not isdefined(self.inputs.num_threads): - if 'OMP_NUM_THREADS' in self.inputs.environ: - del self.inputs.environ['OMP_NUM_THREADS'] - else: - self.inputs.environ['OMP_NUM_THREADS'] = str( - self.inputs.num_threads) - - def _use_cuda(self): - self._cmd = 'eddy_cuda' if self.inputs.use_cuda else 'eddy_openmp' - - def _run_interface(self, runtime): - # If 'eddy_openmp' is missing, use 'eddy' - FSLDIR = os.getenv('FSLDIR', '') - cmd = self._cmd - if all((FSLDIR != '', cmd == 'eddy_openmp', - not os.path.exists(os.path.join(FSLDIR, 'bin', cmd)))): - self._cmd = 'eddy' - runtime = super(Eddy, self)._run_interface(runtime) - - # Restore command to avoid side-effects - self._cmd = cmd - return runtime - - def _format_arg(self, name, spec, value): - if name == 'in_topup_fieldcoef': - return spec.argstr % value.split('_fieldcoef')[0] - if name == 'out_base': - return spec.argstr % os.path.abspath(value) - return super(Eddy, self)._format_arg(name, spec, value) - - def _list_outputs(self): - outputs = self.output_spec().get() - outputs['out_corrected'] = os.path.abspath( - '%s.nii.gz' % self.inputs.out_base) - outputs['out_parameter'] = os.path.abspath( - '%s.eddy_parameters' % self.inputs.out_base) - - # File generation might depend on the version of EDDY - out_rotated_bvecs = os.path.abspath( - '%s.eddy_rotated_bvecs' % self.inputs.out_base) - out_movement_rms = os.path.abspath( - '%s.eddy_movement_rms' % self.inputs.out_base) - out_restricted_movement_rms = os.path.abspath( - '%s.eddy_restricted_movement_rms' % self.inputs.out_base) - out_shell_alignment_parameters = os.path.abspath( - '%s.eddy_post_eddy_shell_alignment_parameters' % - self.inputs.out_base) - out_outlier_report = os.path.abspath( - '%s.eddy_outlier_report' % self.inputs.out_base) - - if os.path.exists(out_rotated_bvecs): - outputs['out_rotated_bvecs'] = out_rotated_bvecs - if os.path.exists(out_movement_rms): - outputs['out_movement_rms'] = out_movement_rms - if os.path.exists(out_restricted_movement_rms): - outputs['out_restricted_movement_rms'] = \ - out_restricted_movement_rms - if os.path.exists(out_shell_alignment_parameters): - outputs['out_shell_alignment_parameters'] = \ - out_shell_alignment_parameters - if os.path.exists(out_outlier_report): - outputs['out_outlier_report'] = out_outlier_report - - return outputs - - -class SigLossInputSpec(FSLCommandInputSpec): - in_file = File( - mandatory=True, exists=True, argstr='-i %s', desc='b0 fieldmap file') - out_file = File( - argstr='-s %s', desc='output signal loss estimate file', genfile=True) - - mask_file = File(exists=True, argstr='-m %s', desc='brain mask file') - echo_time = traits.Float(argstr='--te=%f', desc='echo time in seconds') - slice_direction = traits.Enum( - 'x', 'y', 'z', argstr='-d %s', desc='slicing direction') - - -class SigLossOuputSpec(TraitedSpec): - out_file = File(exists=True, desc='signal loss estimate file') - - -class SigLoss(FSLCommand): - """ - Estimates signal loss from a field map (in rad/s) - - Examples - -------- - - >>> from nipype.interfaces.fsl import SigLoss - >>> sigloss = SigLoss() - >>> sigloss.inputs.in_file = "phase.nii" - >>> sigloss.inputs.echo_time = 0.03 - >>> sigloss.inputs.output_type = "NIFTI_GZ" - >>> sigloss.cmdline # doctest: +ELLIPSIS - 'sigloss --te=0.030000 -i phase.nii -s .../phase_sigloss.nii.gz' - >>> res = sigloss.run() # doctest: +SKIP - - - """ - input_spec = SigLossInputSpec - output_spec = SigLossOuputSpec - _cmd = 'sigloss' - - def _list_outputs(self): - outputs = self.output_spec().get() - outputs['out_file'] = self.inputs.out_file - if ((not isdefined(outputs['out_file'])) - and (isdefined(self.inputs.in_file))): - outputs['out_file'] = self._gen_fname( - self.inputs.in_file, suffix='_sigloss') - return outputs - - def _gen_filename(self, name): - if name == 'out_file': - return self._list_outputs()['out_file'] - return None - - -class EpiRegInputSpec(FSLCommandInputSpec): - epi = File( - exists=True, - argstr='--epi=%s', - mandatory=True, - position=-4, - desc='EPI image') - t1_head = File( - exists=True, - argstr='--t1=%s', - mandatory=True, - position=-3, - desc='wholehead T1 image') - t1_brain = File( - exists=True, - argstr='--t1brain=%s', - mandatory=True, - position=-2, - desc='brain extracted T1 image') - out_base = traits.String( - "epi2struct", - desc='output base name', - argstr='--out=%s', - position=-1, - usedefault=True) - fmap = File( - exists=True, argstr='--fmap=%s', desc='fieldmap image (in rad/s)') - fmapmag = File( - exists=True, - argstr='--fmapmag=%s', - desc='fieldmap magnitude image - wholehead') - fmapmagbrain = File( - exists=True, - argstr='--fmapmagbrain=%s', - desc='fieldmap magnitude image - brain extracted') - wmseg = File( - exists=True, - argstr='--wmseg=%s', - desc='white matter segmentation of T1 image, has to be named \ - like the t1brain and end on _wmseg') - echospacing = traits.Float( - argstr='--echospacing=%f', - desc='Effective EPI echo spacing \ - (sometimes called dwell time) - in seconds') - pedir = traits.Enum( - 'x', - 'y', - 'z', - '-x', - '-y', - '-z', - argstr='--pedir=%s', - desc='phase encoding direction, dir = x/y/z/-x/-y/-z') - - weight_image = File( - exists=True, - argstr='--weight=%s', - desc='weighting image (in T1 space)') - no_fmapreg = traits.Bool( - False, - argstr='--nofmapreg', - desc='do not perform registration of fmap to T1 \ - (use if fmap already registered)') - no_clean = traits.Bool( - True, - argstr='--noclean', - usedefault=True, - desc='do not clean up intermediate files') - - -class EpiRegOutputSpec(TraitedSpec): - out_file = File(exists=True, desc='unwarped and coregistered epi input') - out_1vol = File( - exists=True, desc='unwarped and coregistered single volume') - fmap2str_mat = File( - exists=True, desc='rigid fieldmap-to-structural transform') - fmap2epi_mat = File(exists=True, desc='rigid fieldmap-to-epi transform') - fmap_epi = File(exists=True, desc='fieldmap in epi space') - fmap_str = File(exists=True, desc='fieldmap in structural space') - fmapmag_str = File( - exists=True, desc='fieldmap magnitude image in structural space') - epi2str_inv = File(exists=True, desc='rigid structural-to-epi transform') - epi2str_mat = File(exists=True, desc='rigid epi-to-structural transform') - shiftmap = File(exists=True, desc='shiftmap in epi space') - fullwarp = File( - exists=True, - desc='warpfield to unwarp epi and transform into \ - structural space') - wmseg = File( - exists=True, desc='white matter segmentation used in flirt bbr') - seg = File( - exists=True, desc='white matter, gray matter, csf segmentation') - wmedge = File(exists=True, desc='white matter edges for visualization') - - -class EpiReg(FSLCommand): - """ - - Runs FSL epi_reg script for simultaneous coregistration and fieldmap - unwarping. - - Examples - -------- - - >>> from nipype.interfaces.fsl import EpiReg - >>> epireg = EpiReg() - >>> epireg.inputs.epi='epi.nii' - >>> epireg.inputs.t1_head='T1.nii' - >>> epireg.inputs.t1_brain='T1_brain.nii' - >>> epireg.inputs.out_base='epi2struct' - >>> epireg.inputs.fmap='fieldmap_phase_fslprepared.nii' - >>> epireg.inputs.fmapmag='fieldmap_mag.nii' - >>> epireg.inputs.fmapmagbrain='fieldmap_mag_brain.nii' - >>> epireg.inputs.echospacing=0.00067 - >>> epireg.inputs.pedir='y' - >>> epireg.cmdline # doctest: +ELLIPSIS - 'epi_reg --echospacing=0.000670 --fmap=fieldmap_phase_fslprepared.nii \ ---fmapmag=fieldmap_mag.nii --fmapmagbrain=fieldmap_mag_brain.nii --noclean \ ---pedir=y --epi=epi.nii --t1=T1.nii --t1brain=T1_brain.nii --out=epi2struct' - >>> epireg.run() # doctest: +SKIP - - """ - _cmd = 'epi_reg' - input_spec = EpiRegInputSpec - output_spec = EpiRegOutputSpec - - def _list_outputs(self): - outputs = self.output_spec().get() - outputs['out_file'] = os.path.join(os.getcwd(), - self.inputs.out_base + '.nii.gz') - if (not (isdefined(self.inputs.no_fmapreg) and self.inputs.no_fmapreg) - and isdefined(self.inputs.fmap)): - outputs['out_1vol'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_1vol.nii.gz') - outputs['fmap2str_mat'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fieldmap2str.mat') - outputs['fmap2epi_mat'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fieldmaprads2epi.mat') - outputs['fmap_epi'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fieldmaprads2epi.nii.gz') - outputs['fmap_str'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fieldmaprads2str.nii.gz') - outputs['fmapmag_str'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fieldmap2str.nii.gz') - outputs['shiftmap'] = os.path.join( - os.getcwd(), - self.inputs.out_base + '_fieldmaprads2epi_shift.nii.gz') - outputs['fullwarp'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_warp.nii.gz') - outputs['epi2str_inv'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_inv.mat') - - outputs['epi2str_mat'] = os.path.join(os.getcwd(), - self.inputs.out_base + '.mat') - outputs['wmedge'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fast_wmedge.nii.gz') - outputs['wmseg'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fast_wmseg.nii.gz') - outputs['seg'] = os.path.join( - os.getcwd(), self.inputs.out_base + '_fast_seg.nii.gz') - return outputs - - -####################################### -# deprecated interfaces -####################################### - - -class EPIDeWarpInputSpec(FSLCommandInputSpec): - mag_file = File( - exists=True, - desc='Magnitude file', - argstr='--mag %s', - position=0, - mandatory=True) - dph_file = File( - exists=True, - desc='Phase file assumed to be scaled from 0 to 4095', - argstr='--dph %s', - mandatory=True) - exf_file = File( - exists=True, - desc='example func volume (or use epi)', - argstr='--exf %s') - epi_file = File( - exists=True, desc='EPI volume to unwarp', argstr='--epi %s') - tediff = traits.Float( - 2.46, - usedefault=True, - desc='difference in B0 field map TEs', - argstr='--tediff %s') - esp = traits.Float( - 0.58, desc='EPI echo spacing', argstr='--esp %s', usedefault=True) - sigma = traits.Int( - 2, - usedefault=True, - argstr='--sigma %s', - desc="2D spatial gaussing smoothing \ - stdev (default = 2mm)") - vsm = traits.String( - genfile=True, desc='voxel shift map', argstr='--vsm %s') - exfdw = traits.String( - desc='dewarped example func volume', genfile=True, argstr='--exfdw %s') - epidw = traits.String( - desc='dewarped epi volume', genfile=False, argstr='--epidw %s') - tmpdir = traits.String(genfile=True, desc='tmpdir', argstr='--tmpdir %s') - nocleanup = traits.Bool( - True, usedefault=True, desc='no cleanup', argstr='--nocleanup') - cleanup = traits.Bool(desc='cleanup', argstr='--cleanup') - - -class EPIDeWarpOutputSpec(TraitedSpec): - unwarped_file = File(desc="unwarped epi file") - vsm_file = File(desc="voxel shift map") - exfdw = File(desc="dewarped functional volume example") - exf_mask = File(desc="Mask from example functional volume") - - -class EPIDeWarp(FSLCommand): - """ - Wraps the unwarping script `epidewarp.fsl - `_. - - .. warning:: deprecated in FSL, please use - :func:`nipype.workflows.dmri.preprocess.epi.sdc_fmb` instead. - - Examples - -------- - - >>> from nipype.interfaces.fsl import EPIDeWarp - >>> dewarp = EPIDeWarp() - >>> dewarp.inputs.epi_file = "functional.nii" - >>> dewarp.inputs.mag_file = "magnitude.nii" - >>> dewarp.inputs.dph_file = "phase.nii" - >>> dewarp.inputs.output_type = "NIFTI_GZ" - >>> dewarp.cmdline # doctest: +ELLIPSIS - 'epidewarp.fsl --mag magnitude.nii --dph phase.nii --epi functional.nii \ ---esp 0.58 --exfdw .../exfdw.nii.gz --nocleanup --sigma 2 --tediff 2.46 \ ---tmpdir .../temp --vsm .../vsm.nii.gz' - >>> res = dewarp.run() # doctest: +SKIP - - - """ - _cmd = 'epidewarp.fsl' - input_spec = EPIDeWarpInputSpec - output_spec = EPIDeWarpOutputSpec - - def __init__(self, **inputs): - warnings.warn(("Deprecated: Please use " - "nipype.workflows.dmri.preprocess.epi.sdc_fmb instead"), - DeprecationWarning) - return super(EPIDeWarp, self).__init__(**inputs) - - def _run_interface(self, runtime): - runtime = super(EPIDeWarp, self)._run_interface(runtime) - if runtime.stderr: - self.raise_exception(runtime) - return runtime - - def _gen_filename(self, name): - if name == 'exfdw': - if isdefined(self.inputs.exf_file): - return self._gen_fname(self.inputs.exf_file, suffix="_exfdw") - else: - return self._gen_fname("exfdw") - if name == 'epidw': - if isdefined(self.inputs.epi_file): - return self._gen_fname(self.inputs.epi_file, suffix="_epidw") - if name == 'vsm': - return self._gen_fname('vsm') - if name == 'tmpdir': - return os.path.join(os.getcwd(), 'temp') - return None - - def _list_outputs(self): - outputs = self.output_spec().get() - if not isdefined(self.inputs.exfdw): - outputs['exfdw'] = self._gen_filename('exfdw') - else: - outputs['exfdw'] = self.inputs.exfdw - if isdefined(self.inputs.epi_file): - if isdefined(self.inputs.epidw): - outputs['unwarped_file'] = self.inputs.epidw - else: - outputs['unwarped_file'] = self._gen_filename('epidw') - if not isdefined(self.inputs.vsm): - outputs['vsm_file'] = self._gen_filename('vsm') - else: - outputs['vsm_file'] = self._gen_fname(self.inputs.vsm) - if not isdefined(self.inputs.tmpdir): - outputs['exf_mask'] = self._gen_fname( - cwd=self._gen_filename('tmpdir'), basename='maskexf') - else: - outputs['exf_mask'] = self._gen_fname( - cwd=self.inputs.tmpdir, basename='maskexf') - return outputs - - -class EddyCorrectInputSpec(FSLCommandInputSpec): - in_file = File( - exists=True, - desc='4D input file', - argstr='%s', - position=0, - mandatory=True) - out_file = File( - desc='4D output file', - argstr='%s', - position=1, - name_source=['in_file'], - name_template='%s_edc', - output_name='eddy_corrected') - ref_num = traits.Int( - 0, - argstr='%d', - position=2, - desc='reference number', - mandatory=True, - usedefault=True) - - -class EddyCorrectOutputSpec(TraitedSpec): - eddy_corrected = File( - exists=True, desc='path/name of 4D eddy corrected output file') - - -class EddyCorrect(FSLCommand): - """ - - .. warning:: Deprecated in FSL. Please use - :class:`nipype.interfaces.fsl.epi.Eddy` instead - - Example - ------- - - >>> from nipype.interfaces.fsl import EddyCorrect - >>> eddyc = EddyCorrect(in_file='diffusion.nii', - ... out_file="diffusion_edc.nii", ref_num=0) - >>> eddyc.cmdline - 'eddy_correct diffusion.nii diffusion_edc.nii 0' - - """ - _cmd = 'eddy_correct' - input_spec = EddyCorrectInputSpec - output_spec = EddyCorrectOutputSpec - - def __init__(self, **inputs): - warnings.warn(("Deprecated: Please use nipype.interfaces.fsl.epi.Eddy " - "instead"), DeprecationWarning) - return super(EddyCorrect, self).__init__(**inputs) - - def _run_interface(self, runtime): - runtime = super(EddyCorrect, self)._run_interface(runtime) - if runtime.stderr: - self.raise_exception(runtime) - return runtime From 729c912ff9e22345c026bee40c7a5b40307a529e Mon Sep 17 00:00:00 2001 From: Leon Weninger <18707626+weningerleon@users.noreply.github.com> Date: Mon, 30 Jul 2018 00:53:06 +0200 Subject: [PATCH 17/21] Update .zenodo.json --- .zenodo.json | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.zenodo.json b/.zenodo.json index bd6d39d56a..eb9e8c66fb 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -578,6 +578,10 @@ "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" + }, + { + "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", + "name": "Weninger, Leon" } ], "keywords": [ @@ -587,4 +591,4 @@ ], "license": "Apache-2.0", "upload_type": "software" -} \ No newline at end of file +} From 51f9b0f4b5442fb498c396ad5b3c9155c8398a20 Mon Sep 17 00:00:00 2001 From: Leon Weninger <18707626+weningerleon@users.noreply.github.com> Date: Mon, 30 Jul 2018 07:42:02 +0200 Subject: [PATCH 18/21] Fix .gitignore --- .gitignore | 62 +++++++++++++++++++++++++++--------------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/.gitignore b/.gitignore index 081bb9303b..4213d07a68 100644 --- a/.gitignore +++ b/.gitignore @@ -1,31 +1,31 @@ --/build --/dist --/nipype.egg-info --/MANIFEST --/nipype/build --/nipype/nipype.egg-info --/doc/_build --/doc/preproc --/doc/users/examples --/doc/api/generated --*.pyc --*.so --.project --.settings --.pydevproject --.eggs --.idea/ --/documentation.zip --.DS_Store --nipype/testing/data/von-ray_errmap.nii.gz --nipype/testing/data/von_errmap.nii.gz --nipype/testing/data/.proc* --crash*.pklz --.coverage --htmlcov/ --__pycache__/ --*~ --.*.swp --.ipynb_checkpoints/ --.ruby-version --.pytest_cache +/build +/dist +/nipype.egg-info +/MANIFEST +/nipype/build +/nipype/nipype.egg-info +/doc/_build +/doc/preproc +/doc/users/examples +/doc/api/generated +*.pyc +*.so +.project +.settings +.pydevproject +.eggs +.idea/ +/documentation.zip +.DS_Store +nipype/testing/data/von-ray_errmap.nii.gz +nipype/testing/data/von_errmap.nii.gz +nipype/testing/data/.proc* +crash*.pklz +.coverage +htmlcov/ +__pycache__/ +*~ +.*.swp +.ipynb_checkpoints/ +.ruby-version +.pytest_cache From b78fba767ef4d15d18bd9686b347cb805fc6b66e Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 30 Jul 2018 08:28:54 -0400 Subject: [PATCH 19/21] FIX: Zenodo --- .zenodo.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.zenodo.json b/.zenodo.json index eb9e8c66fb..ed490763ff 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -574,14 +574,14 @@ { "name": "McDermottroe, Conor" }, + { + "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", + "name": "Weninger, Leon" + }, { "affiliation": "MIT, HMS", "name": "Ghosh, Satrajit", "orcid": "0000-0002-5312-6729" - }, - { - "affiliation": "Institute of Imaging & Computer Vision, RWTH Aachen University, Germany", - "name": "Weninger, Leon" } ], "keywords": [ From 353d00f5be1211142e8817edf50b739b020231d1 Mon Sep 17 00:00:00 2001 From: Leon Weninger <18707626+weningerleon@users.noreply.github.com> Date: Mon, 30 Jul 2018 14:56:20 +0200 Subject: [PATCH 20/21] Delete unnecessary file --- nipype/testing/data/reg_average_cmd | 1 - 1 file changed, 1 deletion(-) delete mode 100644 nipype/testing/data/reg_average_cmd diff --git a/nipype/testing/data/reg_average_cmd b/nipype/testing/data/reg_average_cmd deleted file mode 100644 index 0ac4151ef5..0000000000 --- a/nipype/testing/data/reg_average_cmd +++ /dev/null @@ -1 +0,0 @@ -reg_average /filesrv/weninger/code/nipype/nipype/testing/data/avg_out.nii.gz -avg im1.nii im2.nii im3.nii -omp 1 \ No newline at end of file From 249a5b45846fdbd2fd27e9c1acedcee1b18d0505 Mon Sep 17 00:00:00 2001 From: "Christopher J. Markiewicz" Date: Mon, 30 Jul 2018 13:09:23 -0400 Subject: [PATCH 21/21] STY: make specs --- nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py index 341f250a2c..292e6b398b 100644 --- a/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py +++ b/nipype/interfaces/ants/tests/test_auto_AntsJointFusion.py @@ -52,8 +52,8 @@ def test_AntsJointFusion_inputs(): hash_files=False, ), out_label_post_prob_name_format=dict( - requires=['out_label_fusion', - 'out_intensity_fusion_name_format'], ), + requires=['out_label_fusion', 'out_intensity_fusion_name_format'], + ), patch_metric=dict(argstr='-m %s', ), patch_radius=dict( argstr='-p %s',

5EKqr_OB^#4z`wqEJOKrrk!|O-H2ed z_~7aIyS6FA+5qOm&c}mx^F)ucn__RLb=9@411U<~$Db7(d~oygH2p)p_TO@5z_P&yXg)$Hwl<;D)yq2i>}^W6GS8>k zYvQbz%3^1=Pbx!Rg`OuYrhSOhx&q-Ju$VFx0e{uS*Za(_+&<%t1W>xR7M z%0)Z$Gw|K$Y^3dAO`HwjoYU~nCs4urN|*9b0v?y8)%ui4s^*qI_CfaEtCBiaCo4sf zJI3bAM5dG6e|dJ-4g=W(@{V7SWEVx)qr7{Yd{U}K0;+-_RrGoM$F~M!=P04F()x){ z%?=t@%-_m}G24t*79yv15;J+{0WV08^+91|(C--L8G<0+#wVs}20(@V&!rYqDT2UX zC&gbmSe$Pp^WEZONoKiE8oZ~0}lAlw!m8)#h{U^)Iu6L z(mTJ9)nTd)QK!&VIR>KbX+B;hlG672lzBk(%lfBal3*pSPmlF0Zn%mlM#)9FF5A*N zZx&3qdZVYkYM~GsAUzxKvLgORs_L(uBZu3>jI2;)!mL}C7u0>Q*({>^zN#>V`=ZxD zu^5{@v{PVv$(y;|uh8y>-*I^$VwQYp)Y|5oG^(fr?O{bOC;ma6*$+Xgbf> zwo|S_&WTW9&OM8d%6#r$*P-`_z8aMTFSu*my>XMX?0?xB>weXoKnkHn*w`sU^=z9C zsbiA07>RNP9U4{12)~T$KRqFT6HCK|L(d5ED*;E9p%Y)+xSTh_C%Wtxm1`3}d^|Bq zQ)6>%wJ6|sAqTqu9njQJpBiklx3Qec9XIy5OQKGHqFm6v9iuGWa_d`s$iun``!ef= zq8@6Hso}Cz)Wj<%M{~*L-S4;qRwnEun?cv@{eVb7y6hI#-qLlmSt#dSJD&nM_~K<) z0{s_33OAbJmXF=;JMOR83+Be3v;wqhDGm!5#IUZMPi;xiM z=#+(a6oCx`J#XKd7K^47aW`MFeKLx=9UkAAy#077sUUzX_I}toN!yV(y`53RvneR| znS*zXopvPYAeigod4CmkM<^Xa?!7Ev|2&Vy#P|pFivCV?{p!R=3AY>5IWDvC2HpLQ z%Y3?^Er9ahycgK=?MYOMAzD%`kp2E7ty%$s`r^-3IfBPuy$yK4@Mmu4Eyt@cb8i(u zy||a4$=->klS}20UT9vpNbCFIZwgO__zm9zA8h8*GAPaR&NVQ?`>JX=-24QNCLreu zaZxmb@2A{CU96XgWI@fY(LyPxh}^oal6xo8UjY63;!&0Fg2v3ll0BPT)wrlA`ml|c z1HLEKsa_H0fwDimp9w2kVjf>up!3Uv)#m*Zg@y&ZpoBo8qWn2>zP_tCl~G#Z2MHV5 zJI839FxIBhfWtEZ?c7&vo~ce{hxX%QRaAvQX@|x{pB2xB`pji23X`G6b=XG=aODM= zN3&SrPxNu@BlulXu@M|HknTR#Z23|#TzEPv{i41z>n!=Lp~Dh9?2qS|gvRX8Tcr@2 zTA84>*uDNX&)!NlOtiTt*YUX)Uv=?)&xlI`4DYj()}Z|D#@GVj#E5W%G-ST-^fmT0 zRPnEu_BgZAJUn!%M^k}NRk2`C5utgY)cKY|w}C?d!_$A3GgX<@gVs{I_KBcAc#;iU zJIgh&T%|U*PdJo!Our#mdqsJdH7OheSIaB^ng{J4ZdML?&J^ayJL4c`L%utT z{`cY5pH261^E+kRYa2i>U*bClG*Kmi;?PzB$9C+RGKZ_D)nkOyk@CaaY&g=iw{m?U zyfWrME(n5b{d+K(p|evjdn42)m}2B&{Z}vHKTL=X#|(19mkHdpM#>-5!OnBS?@)Hy zU;OR+>rpA|hv7DIY6?1o3jE>(QueZQh*qn$jFMVsq)S9I)@H*n6XLiw=37JQdo9ON zl8Cyrl_U=Cscn6mx?C>QA-3E0D`P8W>owFZn*^#4Qdx9a5HY;|v`y;rMpUp~!m-^A zqGVn>HxHq4>(V(CBn6y!kA5%%-x^)zn26QQR(1wwsjHnaUs=9phfWTP8F-M+Es77$ zt*-`7K|glv$$o=XgO$r=uh@a`L!SKkahJ{yEo%MZ?oT(QxUS#34N1@rP$Lf!%_7%C z`Adk;>+cL+OmnDZ%o=fKqd4F+3k}g+?lPEeJ-eJ*Cqwgpr|(Bf62k0`oYK#-`DNZz zbBTZ!A?O_(lj#&ygkpBjBydy%X7{JiYIFw-riFq0BGyTf+C)a3bQ zy+@55QQyB{;kxw1iF46#q>hy`bNnZ&S56dKK(oUY{oy0~#f^GDAJS%Aa#vzbRo8dC zrVs0uhIw98&@s{RE{nOs33-?5)h41Ja4&vts1{S9^``j+^yG3O?pPJ_UjBJ13ermJ zOUuw)c~M3LAsZ7pEq(JHXf=Iv5Fmi4`PS9F^ff5wU~2a*1Ryg1%zuiyQM{gbSfmAS zaw<{9QTPMkl5N);Oh>50z$(@dmc*d?92ho%`HOIYJ+%Erd26|o9AfJ37Z>e%1M)S>z`R{Xx2*siN#yRHx?VBNp&z}EjMHAzhMpwqtK5A^d(w|bz- zuXgQAWs6>^GrYw$xiwg0i5^bL3+Zlc9H`FnnQG07OWXZIwB0PFm7Bw+Xn^`}FiA%E z18gF=XxA=*k2Ad}EK;T$W8MhF+Bz;NDFdSI;m&q)l#jjo{ zE%SCpOc2|DQwMOjch+_`0be3X&D8q)-BY%Prr@JFA=+Tjge`C3 z^8CR)wAygKOVg!1A$wSRgrFtI@6DTH1$X7NId8#;9Kn?L)!H6e?F?hAt=yYop^ghiKG~U9#D7m(hP(~-1MhM)J{Oh#ohpYoI z%wS=ivmq7HSiV!atGeO#S+pV81?P8wZrd7hw%)yW<&)LS?Fgbx4@5s4OMbIZsi9Z> z{-Vd?f(pt}jF<3Q0(;F6oyL!!HS*i{kQ^oc*tON9OTSg|nQH#!H7{JgY2$Nm94mq_ z1v>0!yr0~taC}LcI*BIS3RSLY@nN}W6Zhsu2+zo8JIJsHd5ed9Wh0I@okLMi@A&+q#za$^|Rhzm^JGG6Y_<_bk`r-Xj> z%a?$Eu=rgj?2oFx6^?5D%!t%EfCbId2L6i&+_(BnF=l*tdRabW^7ETbJl$@5N*lA} zS1G#w9IeXfwCqz~U(m>41#XP!;$z5p%bXPLG$9;G$Wc43>z1&4D>?W!>35bN6B4fx z8^kXOHT|6aL$8VX`@iYm6N{~cMm5i4pUn|&{UZS1=r1&bbgbX}NPsKwo%2{I^}m&+ zu3_h=#$BmdF0$(?3WM+bKEj%HKjq(ZqL6Z(5dv{?f;|W|mQS`@9uW^5&$IbyChwfpgG}w%ZHogf8$wMKIgVgYkIH}_k+h}r1-H( zF?d3YHbV74?#IT@xz#D1$d1+xaH$6zGtMwZCF^d$q?2qLLP3>v0v=`#wKY7?WabEP zdz(gAqd8k(!%YbHR9#TQ_NBf%sv{FgWJJfd5b4`vN+-57m#Rf3Qy`AJqGNf5oyf9DnKq}qYsZ_RSP zU#pmc90#1N$ld8LOO4qt1UjDvuaM(w9-ci+3c;6>z~C3}pmy zODm0$c|HK>gIqQF<`je@^oFll8iap5bA7~|Hxfk^L|w`WeYg$rrR2zO=l066W8GFS zcAN1kaj!kGy=v=tzf;8aVn-RX@NyKW6{W?1^+!{eyymCIo8d7qtlKmmssq2ZfPeoW zZa`!s@#_D3@D%W|chAF7IZA0Aps(PY}RKl$h$`6##4s(Qcr_XJJ*zbNH-W~{_ve>zwcRJ2c7 zLn+0K@A{nT)KgICbs0MirE~yETv`_43~HD$iBVMpemTtp3(3QV$&%WaE?4|0(aK`x zVmhqIK8dh!8@6wnbJG5z=Z_Xy09C6>Gdi-JubCs}rQGNK{o#}q8pk-%9I`DN4%$Hr z8r5Gi5kvCtvEt6_$&>>HM@4VNP|u;3c#t2Z{0u=g6!_ydOG9e7 z9Ww{_O$%IGiLwK9Kv9Q}K!?V$O4nb_Ygy3W@H8^tg0p0LDpi)acJ)OLvI|2M0$=&U zecuL9zXpsbN_^P-IgYf8L?->WEq(RimRvH`8htdZ0fvf9B+wt``eQ_gNjo^9f?k=_ z4_6UPT?+!D*du3+liA6+5(xfzUJ~rB)PvTzkb~AcNjg8xkeaE@>0=QMd*UV_IS0XI z*kZ*^*QwOp8wFRLR)t`R{s-1d&)LT~5u_ig$I}`C4|8S-1H<#*4(r%ATy^%03%_hhi$V?M)m6T}b*QwQpey)GJlaRfq!&)rth@-nlCL?unSu}mZ zY4)do4w^q(;-YwK7Yc16ceCBemGnkh4ZqUaFg0hGzjQs>XK;u12fl7Qa=ZdYD-fOu zNzSlGOErq$2kLAuKY4DDER?3$qtad@6kLO-S3WS^mcbbp3U~T`49e)4ccJ53&8L|7 zND;_J8|gqXbJP2rI?ERffWJAk_CAI^I>j4+jSrVaU1iVbpQ$WTo3&9iAbN)XjMN3& zOV}kwa{`RSb@r_T5ctx_ghr-;Hpmz66A}x9{*)mWT}vh(cvux%!AxWCNfQX zDEXSUUE%l7^NV`=5wa|ayyjMoManAH?Rf!6(W0@o;+90QGAqP)o(V3PHa@!WF#=Ef=nktG zHrAHtTp9f_58R-JJ$0yAiJkU-4xQ z&~KVLfb*HErL0=N)Wp&C#y4L23Mjq0PW&s@stFJ1>yTCZauoge9?14A5lqn$-y{xCkHZn z%AO(dq~|TN&!gzns#)fATisy4A<@0E;{>b|Z^S=a$Um9|{;D2LSmI4^c?HK(|;x zp6PtScgozyF@a7VT(PY%`Bmq!`poDUOvH8Q-3j7;$GXWaGdEoQ;OOrvdWR&pm+Lv9 z^=nhFuxegUBi0-lB4$56B1dF7E$Z$MORfm{(;W@}m(_coUpY9sAOB`9G}U)L#h^nc z_swuzGI<7R5V8;m2_-7TmI&wjk;7C+d!Oy>m{a3Uz`QmI^W+$`GV?cf2{+!r>RrV6 zTRq^kPlRNx!^zRA3Wfl>N5CPr{@TupG%Fcrq%y{F{i~x@K4IH$TWFluG5DWPv1Tqh zT0dH{Th`Y;j}+Cn$)kt9Y^5J4oQn#rB&5Cw)V=`$GvVbg?M^t$G0tnG zqzj>0D!h=&gKJ(X_{R6kZ`AQR-~QLg2^}A~L)qU!WB%MB`%;&{D^j-QJ(BCW!k)d%w*FT!zfTpy(~HR}Q|p%* z$2zqGNw0VpZs^-8KgH!;*1)#Qu{5UYx$=EWvvCojOWdPfg8lPkH;>lmsTuko=@pDN zPzQ-U34ZV?z%nAo+2K|V+24Y@6KN9R(tiQ(Q4@LQpFhiFQW)Db+Pm0A5Qk2QohH?E zcKk!!ZBOgxn04-L1R4SI)>gzhEkw5sbMx-3=MH6kwNv(wYkx5QjkGIzd?S6#4C|CY z6KUuVJ8;<#i$N5>!pe@TVGkkJ_)^@UrJNf^49mKljcPW72Y3c{kK^D*oUV`W=Y%JF z>-TsmNgV|g0wZk%KPRw|HLPa3CfGl(YQQ)-mDsG+1+lnvv{}dRFvxa6hIwq!#PruN z&FrUCl70{Ol$U<0Y~NUZ_+Y7W#9aocsNavRIP`Ym^@frL`B^3eYkYcLly4gV8LPSZ z1Hlj7A*+0UPlqzz``cFz?5Iw$=InnKj>s>B9=n*_%`#-|efGUdbtMx~y&#Aa3R!(a5?tGSY?iK7{-DpdK&H)CwoL4*$g0#}Q?Rvcto>arsjx4p8GI z@`W$ccLjc*z}9WeGv&~m|CVxs+j{@|?{Z4=TKqBzocG-=Qk&QOt5NLv$)v8R+gg74 z%e$j6_+?zPT5KZd~WqLOOe3eg?TQ41eOoMo2)lX(8897 zn7}W0SD6=%rj_;rbRGs#HW>?Wj`UWO0H0o-^6pc!%5f?z9l|07d@FpZaIro#9p6B7 zW#1bgS==vkxgzxKgnszWze_mET<7L$XgP7z4O=yrYhH4F>ne5&iZPCE_A8|`UJ6}U z*IR%TQokz)CO;h)t)k#QB?EV^X72NnKs8NYgAN{mi!|&eNMZkdgLAXy=d#OuGHAeNxAF;jSWYXJUWq z-rq=ZA%zQ?c&tgE>gcff=%~jftO3wD3T;bhcH*1pO#g{eUCvPK#R`O_8vEJ7>^TbL zi-I_2OyL($P9RZE&~L<}-WnD8l^BmP@uOwDu;QA%SKFtdvm3lY_o;( z=obef1=L)W1zu3Z#)Au@VcJbSH6u4q`7@e9wnnB?swF|Ovm0!)mR+#IhK3o_uNjt8 zdYQC? zBVimi3L+u6Uq<1w3C=xbiCurRMhqndOXARgXZWoo@a=uv6CY9?HuL@YUKhg!3XEJ% z`Su)_!MSQ7+jRrk6^^_TG9j~NLnJ2Fi+|KX>OU~}7dK|p_wuS0N)5G7$;3h}XmJn4 zlcFYlz1#z+N9oK}8t!qt0;K?*V0`v~nJsFYsJe%s&X!l8cYELJt7jOX-_Zab&j-0( zhP1&!!_+HjGBX~Ss{^#ac2eJ$nrZV$c{QN|IlwV#CXZA=j1<1so zE4$g8Rt&+&0jB(FHwc3t-g(eQZx_rsCg&N<>ST{K|8ROM??i2=Im^<;J#U zr#8>0>F%K?)t>h!e|zJlbqD5PWvE5>bR9e~_kB?G4<+SyzMBmh7y;>-ax$~i?`ui6 zD^(6HTl#W(=?wB}s@ku$&Eexy65b&6_LS>iqPuMDGd|@J`uqDZ=PY^R0#Ln zdpTWOUl&!bg;}u~$so0P{#k`BAlWVeXZ39SSCUrc?{(hl8E#xe3u`` zIxwU+o+fS_Jc@m@{>pj{Q@%nzVE5++R(;)tc_g!K+R%4|U^2ROA%G1yJ(Kcf&S9?i zp}6Py4n;>A8_aGt?z_)3>(c+&e~?j%YB zRt05m=U^PYK;=3_QWq}8fA+c+hSJdox`6l!X6yQ(Y9a zFnY|`%B=w0&9YVubxVL8STFw{bGwwcRZS z?bI@Qm>1wTzI;2X3y%(B1IF`ilHup|DGA1n8L4R>8mYVbPq5DfA)XVR=EIa1B`4fq^0GO#Xc<#gzax^OA95RLJ5V^#Z@>VF{?ctYiA4#%c)^+0Uc6~fDGsM`CXkm%N8Ba|kwJm*mWW`n|^zJLY+?T28!*w9;*7MV1{$|utA zNS`$v<)?$A(YQPfZ39`-Rnp7rSZ(;^`GLo>{W7WYYCh{mf1jc;r~nxkbz2bn9mS=KZ_?>bvsid zPFG3zu4?Hp`B-TWvhQ<<)X+Uv!k5p9d_B zkhP3(%~sudAN1UUQ62XcD@rhch#Ji|0&1ZqkOy^Q#`bHPpVWqF(h14b%qRbni}DhR zDu1ATlJ@2y7q1iKNG> zkopPYSD!6Qe+P|*0PdT9rVC>@bBT+e(V|5;K0aOIF(N#{a;(&!8(Q8}{e7tCre&U6 zv*@XgER9cdav=TpOJ1VPdx^ES0OZPSVpx>y$pEVN|Snn5S4i_#$C^j)F1YXPY3zZ2?ro%v( zUCu(l~t1UfR1iZ|D_b_r#6S{A!~3+wBEAtj_ISt{*@?b@rs^o0OZ}w zK`m&RQ%~T;w15P)E#+g5qY&{@bnV~oSXr}0?%rv8*rvWGlA^)Uu);k*p1W{=mWxrX z(ooT+f9t=^ptw?M&eXBvX#2u}EgjRkW0fGgJS&%-q#{M55^-m-%V2FH+#ml+KB(-B zo(EPG_{mSvbu}{G*0cwBBENo_OCk_UopBjSKHxl~aL*-5LO56+wLC9*^z(o&@ZQ4Y z{YO$psG$w0{jj>pzT6mU*WwAf+RywmrqMxXO;3c+q156DW`mzZj?A{PQ#@p%3uN8{ zt9!OYdNy?BY||itCi~equ8Hl{nU3z_Q<$PL~SOCE?5q4hJ*RTs!1i<$!l?1*0+=~e?D4hbh$pNWt&wO8Ztn@zzj1C5KzFSssL+NbV_lOMZ zwY^_TySZ>Vb3D@tp|p(-{86BPtfZ@w6)PQNs)p~bWbNpz|FPjpo0F;gNuJ5ObFLr` za*02a@y&~aePtVk6bfc0Vbv|Fl$HHAqapK#Y0{+%U*21bSoa80|2t_C>?o`k^8>hg zTgE|I{nVhTilYif4digmG)l!EU*Gh#SSxgMFEr>c3d#$DKm9l_4oZo&+ewEE{ zF*iiv%wv}vE6?wB(i2`?^QTKHOBsdF5_+UUQGX&E%ryHpBeXgk42$|ipJaa3CD8}B zX#i|&?aPC|A45^oVS=%%4O)r0yH44%xbe_zUDJjerC&oe=O<~11t{HE!SZ5O3ZuwA zIjM+ZRT~5^FWYWP2(GYr^~OO1a^aQxScm(vq?^YJw)Wd8zV+H5!av;qtsl$H8vK$| zaoUIAdM*w@jH4^ShjJ3(Xl#3mKYtYWg!;Seu=@K-$xl2N()2DO-pVV<`YBoE#9x_= z+km@0>sNntk)KR-Gu(Nvi7jZ2C#c~~cxHl;B*x?TG3J$X*GE4(B?l@2ay{=Nv#27!By*_kBJ- zhfP|9)v)Cz?EIF0m~qQyV<5@1H_JKatwqp8G#cdtZ>c<|U==tZJ^o3)kza6RiS#^V z*qke^(i7we44t;U_)(E1tiEbBDeB|F%E=o`+1J1woU*N8A9Z}}t};7qv|psf3$-_U zzVJiiCi)F!)&(_@(vFe@qtbUR9t1hUEKPssyEF(PR@$==Otf7rHk5_BlHiZ4)}A6E zVRoLF2ww%I)Iqwl-yb~H87$x-*$}hX8@th6%5ZA>oP3pj_*(PY5;e=dU}`+)DP&5C z4+2eOuMr(#^nip)gXi~WOkTodR}s&amUeZb?AGTNJgd5+k4Cf_2c+pBaZUFR>dsZ+ zmnTFP3mi2&rA2Ei9pV#`ed{d2s;iF1o*z3U5ssEdW>%PYj_VhbeH4T?*5=ML$h-_V z+qu-MQO)c>I!v|cAdH!cgNYvREsNEi7&eY2w!w-4{hO=6}zF|7t8$=WAD31E_X5jPG%zMURECtc~;d z$HctL(&KU(AC*o2WOkc~hCHEuXv%9FD|^*>Fg31!4Vv2k7wIW>@ZR8K%EDx*j~@gM z1%|o9DKAHqB7l!Imf~g)vERt7)jZrR2|hLOD$^0UpMTFiGy-bibGsb311~E5%!Loo`lrYI zE(7`kX7acAZ{=*cMI1WK+AM&`>GG*xKjFk>c)jyror3Zq)hxjxsqNGOycHfe2jd4T z;JrJ%8y_8`<@+vrGS=*g2Xq0Rx!7sarxquNvR7_1wgKqQAg@w|wU;0VQ=D~~pw1Hk z3rx!{c@$ahzIaGV4EnVGxBzKR7dz8Xo@`W5Ml{ElW6Dt(LQ=a4P5UjvKCbS z(}v}4j3uO&5I9zuiK*BRs2!cpg4Oq--TzI`p1&L(!d4Na20SR-oh1^Rx-?fDxztW` zt6@tZue1#yuBEcQ$~tmuvJFm+3g8($%&RW1X3aE?L=9?uz2l!LFV>r=WX2HEFkbQF zS1en#T_4r_3_cR+CEz{g?;q7&Q!T$8nM${hNOOyjCp{8^ef=+m11@}_0CP(yn?&+5 zu8jtYv+qhDSmd8CBLPp`reXKpDMPfXTjSCYo=;%US2=j}U$RnXD{c~|rN~U*6=sw> zWx@dJvj26k9vO(TK@Toveb#P9`8(eh4t%tCaU}# zRC)__W`ak-DvY``#HLr=^=RxbU;n-30Ne(LRX&4Ogxa>dc~Pm25H4e>I-5vIoM1S?mJaq78jvN=<_`cP+{+7rd*nZ*(0UW_ZT9g#}#;Dl-GQ}GK z=3p#NFFusYoXecqls~!_J7Xlx{lbSnMSpLLQF+tY9iX(NW)xLPuc!)qGn|0UPWHfU zBw}rr+3`r)ZjUgU_vZ&fkuUPlS(Gw2Lu@_?N<(?M5cu4FvzEU;cYH?J*>t^1H3!+% zJu^SgBdK&ui}5C`+U85gpW3&I&mb>QOWfz)W4`(lAQ@0#(_2Zwei-(yfZ8uLX!*ol zxAY5cx1>I|l3f1E%j`)Ln`^)#iSDy^iJBUO9!rSG>CTf=-Ui)4w`5`5povObw5J*HMI ze&1M(+dqQmG`dRpX?BTeRM3^aZHw|QwR>`DbpwuC%0<8>whhR1E&Sw;xP^o9^B)}P zH?1YI?a^{S;B7&$oGm<i`J!1;(;w!n$1ag$vE`yFYTF&WgYY^(Z7 z8A5ty0~}5o9z)1JZDv1HmRiN*Im3HG3uvi(g3S8PyP!snyIVX)r}>9yWSC#n)6voAr7WqzMu_Rrj(N|j!b7=J~q=J3El8yBr*$_F^N;IEdU zbtVP&*8u&Oe1B0UC@0ID?_OZ@FU9F3OXwF}24&ZK4*|hDG(IyK0n2WdS~xk~mpom{nIdnf-RIs*`3OsMSxQFQme&UX+6QvB%>DCb0;U`_-`kfHBo+EiVi{Rt zpXat4n0}qFcKiNU3Ppbbb(kX|7!?k-FiJI%rN}oFDU9Qis&$4iZ%BS_nU;TChq?if2`E}00}u%RrZ z`lY+Lb5POgp5wEM?DZRpLLR8S^9}@;mIGISJ5z1S{VWw3HFxdp_!IA-_F( zx1I67GCVi1ZW@cN-hGzC(eRDh^RZAQYZAYy3)Jl=?jhp+g%;tDMA2wyKKm^S+N=>pwaN0P;A*-#DVr{=TVd08DnXdXKa z__w7R=Q!J|@$DMfZ8WLeCdd6brz(aB>#_fi2}K3Sgd=Id$^6g<8sB=w|~SNJObK2W@rKmaoL4(MZRF50OJ>$ zD3Go)E2;-l4zoWnRinWMR06H!1iB%_u(!L@qbfNVh1oKy8^wm92%HYyxwT{)56TwHMwLYS~-1$x`t#K!5%RfDct1^d2T5o6Y z6RAnO%_VLaqWTHcWX+qLo1qi(Zoi95p71f-U1`~NJA|%{1!^}f=CFecdJSrla9#MC z)cK?HS7}-Hxga{YO2{+hL8cb;w?xrNf2L{@7im#xIMD9 zSVtf%bH#)sZ9H;&oIVsCLvNg+HlnI5~OMT(HQR z!q;Art88xfpff6OCf26#IXcjA>(n@xEMTtOqhk4&qrSKbo$r1xIRy;?FLs4Z5JRU8l9C|VWCJ}6K`Z1OSe(CX0GF7ZFcrT#n@`W8rxXHiRuxSjc?r^>JYl-ci^t6Sm= zjXTwt3ppfgcn|W^hHi-lS6{^wqZXj<8Q1Y*qAuvc{$QeXMCg9Ev}PLftnT5SYe%1T zbl&iTy|8LCOV6*-IM28+^kZY+Qr%X^W#)tN9|9SvmA_ePx0Y!k=4P7S1;ogjnRU0h z(GD0{`cGwqT`NapB>8A=>BbAp>FZI+V%b-E>~Rv?t-n(O?xa^=Q`8DdlAG(>1~y)s z)o`yhd>nHli1CH_Af-a=dJ0XQxG62fJ(HD=@B2rjZ>nt-of{R_$Uwb~rZQ}_06QyV zyxoJGl34{Abag$IjmUxkcHWr`ml1K8$t>%|?CFW(_X3k0iY(trqVGqy)!8@H>XYsN z-{snex}9TbIz-m#O$CdpT(dFcdhZZ}b*?*=Y6Q-)tEKIU{kaaNr3R_SbtvZ9?DZ3= z@=5<3T&s`MpKidITwzWK%ig_5u4hbF;$t>l9NgXbjisS){~UAYPwg5-X7tW6QXZpf zgz2YOq2mubwxp3+p#HNkH$Q(#e3~Oi@4ku7En;zq-`eWLbB8UR4ch9_%L3C{@#erC zLTht<0XyO8Nh4AO{^`=!Y}0p__Trq}T_4y_;mcZ`SJl{|BhKB(&>A+`UUj3sH|HR^ zYPcW72EBo(KH{~zhf=;&UODbKV6-OaxvUfQeT$cf7k6W1#uob(mAw5t6HTImS_<>5 zX^Jap8#Yx>DLxH|4@c0CUbp$Ix>_Obl!2$HV%LayC7rF)E7`I zXK$6>oE_e?fZqYaGM!CcBNr$a$0#`^XJ@KV6`){m_PB#js9AA)7t9?CLmm#iVHig= z3^=4%-SWZ_a72c70loaWFFo^Tg#A#Riq=yo!iF7Af)V#q61m^N4G#yKr%c|zpcFoj zuVuD^ChWLS#P=ACm{pv5mb=FpuPM2-zv0_3nk5=)5t4Dqj$xZ0=%t?a9$KgqL|x8G z+YNRJon7mPTz5BU`|U#)eLJ^a@s3&=@SH@Rm3C~JTza^@n!UpkHoyLhV7kDu?wh%4 z>CBk|Sdi_oZOJ_jUo_8v|AZ+OTZeSJ4mP?95FavfVgr3vBThH6+;6tRXH1~+)m8Xv zJ#T724K^9s#!2HFiMPWkyMLC;9z27dToun*Xzy^#x4k9}^!F#1bn3pBxVNj`f(;S; z0KT~0T8igKYN|(ECh$93_2z%KPYbY=NuCV^)=E7Wyf{gX0~c*|Lna!#(wcf7Wsmx0 zH(_*2oW+Q4Iz6t|fI;F!{@t)C9f>uE?oWXvBCSDu;-f?Cg7$UAJ($^u`calk|F99~ zbU*jq|CB0}`hhQ;(F`-BZtr1Jqc|o~DNvMPgvIMhT+a8cqMxZQIJq@%O=|HD*L6Ng z*Y|IGhR~%Qu;Wiue`P+{KKXqdP-R25bTb8<)xETTa7K+(RnkOLy|10~%mXQimAQP~ z=I+KEZ-G=U_$$jN5!m}h$_kKs@H)@@c!Q0`F2e$6PV$z~P`B|IQS(*bHBT>X{LiL( z0oYi;-ahJsVq_AZ7d1y27UpHnwj?%$WNR$x0sN9wT)#i1)NH&R9pM*plZ9f=D~HT=E)q2KfqPk z7ML+wx8ITRp_b}-ee*q6spI-A^QE%u`0S`Lv2j`I$pOzW$s!`(rH0LdL8;BQDYLB4 z`n01LLoyef)AlajrMu||VJM;d#aLmlNWqz$89aapxD=+U~j`FcAo4nj$-iA2dXgdeyDzu*& znm8IEJ?VLujAfH zd9mBp9(CL9?QF2*N$Fvmm*a5%elQavCVhIolbvwT&Wl7l1}rx&`B|=#W5;Pe!oO4T zdM^)^nh6-gtOCZkp4x4Q*s*>^+eLEWRCDMXawi%qnWf*%g%Dq-y`3HAg-o7Gu9vw} zoHXj)Fc*!lb}znfsrH%!l-B&&FUP#mCU0Ag>BK)~X`6AQ*Ot)QRq=8!A{l=d5U0;x z&d)?Wsaguh?10DI;;Q=g1G_gxjkKgGv4P}1-PH^ZWDLQ>H`KE|;N2FeaNv9M%Cg5- zo?u#z4bkS6YZHC_i3UczX#N1VHsJ1YMMewIkf{k$q5hnVRHe!2;#q1;t}7YE1Mi$ zQOTRu-ZgKkYzzV%SIWxyw#_0Q!WMpb&ZX(5>$D?W zo8o_h9SyuqgGG)$i_ydkA^M3#vxkpY)(mG3_y0yI!B{F?z)0=$+A-XhpPu)Qr!N$O zx;U8y)G|8(%IJ+rvlIAsrsf6OTg8A<_%?HdHQ1i;A@IRkS`7(;^&N`Zie@=2bWzoa zbkT`w*ww^O(c=2%6+n-I81}*VZ;q+!3DB3y@=sB7@5ZwgPwuq>&P#=S!z_id=U)*? zoe{GJLc*8tfd^l3MogKnGLIB?BmQV_6sGfMvAohV|LtC^(N;xsCTN?ev*CsGyVgyM zZgEPKjzkO%C}XQ?k+v zU97!e{nex~BAD?Hb=f@3)RWG$oN8e17901CTO6`{SS zD>74cAMN%zv6*V2OVR5lmY_LeGV0SOE%=*@FWfC9oc0%F(6G3x&}EI1>hCv7%<>#7 zqs8<@sc&lY3A%F5pina3FQqKw6+yZ2zTP-q=Ut3cZ=(ggbTs*`7ci+0vC|w}PcYZv zX1t~XV~Lp)ry=pDe<3bM50sUTQ4-Gi$JB zF@t+~4|ga)wv8{=&A7l9X!&o#0&pk;tu>WDZa3ET>sDj4y=V!j3A!Ft+N&pW-e27l zyqD0cjxqDecv!Cp=D19#tpDYER=Td7p;pKqd%$J8&qFjH08bcvGX1>FLrL4x#{_;E zCf=Ch6{2Uko9qyRp(Z-I!U1NNneetA6?H20HmQHwhWS>ukCA>WK};>K!do#jkHPs+wLlbBf5=+ zu^+iF7~*EoYoUu06P37@HE%K>u0z)4AIpwmCwj-)+ujTwKOR!sCb;q_9R-o3s5^fE1Sf=XTA6svm z3b$Fpp^lAu@(0?wTQ)gtXKy3-u@q|>EVE9UM~t~$nyd|vzkaWzpnZJ<1zG=ecF)`- zn|b?C#dnJoLI+|$O%w%`d=WlARv1ew3M#&+H>V^?GG5Bg{5yS$pBJb#Ji!0MybuDo z3;pm01X5v6lXsU%6}bHp(*om(=bzCc3;Y4^uGj}V&fk;|Pa&(R(iQg;mfFAlW0^SB z{IB)VfIYUfUH0_l*yS4yM@xTiJ=k3r@888NdSq{NGtI*guVqyGN6)9-4D1PoYH+N5 zN!aKL2Q_g(rk(OU@T>9C>`WNo*@fy8;mgO=&^!2YDS}yxdEaXhxL#e!r>c>FU3~;N zvi=T8ZJ7dfPwARx48z-|c=1|d{iOx?0;6Ns{hNGrk!f05-1XPujtZ^0TYk`|BrJMe zNSx^B16;QeWw>4WXC<5@kbWrv>KXhOiTt^1f4l)fp$dhO^+ zGV?cVcv|KQ$nyq&4K}={oZ;s z=Vg>*S8f695G>G?2GjpivL$k((0i%i$n#IG<%IMEDTxSBAHQVaw}CVYL9`Hysr zoQa*)wRt#ZCNF-k$N+>QDHw!V^{|wb1uTM8F)1CPkE|@+QK9~>9NR3k_g`aPcvCGV zuUtEvgO{(g6Dw8N5X^AWYUX83fJvm+UcIcYg^Y~NKnE&+AUhv)fAqjZaLXQ+LxUW= zwe_YaGz6738Qqy(NVSJ1Zl8WkQA)+FrgoA{odOQ&!h~>`SnrqN>6$Em&BIY;lfxbB zBGFpc+Lj`%KiX6({F@)tzFC=4w&2?s>47;O1K;*AcMCYiOEU^!q2dlIoNeUdLq_iT1#yLxn2vvkf*h{@2k<`J}3#tnlgp<6Ab-$vj4XA2F&t+(yI9 z`VP5JbGA$NKdK6)zj0LKHDvJjT-@nD7Vv`e5_-(FfhUmFjWMDb ziMcn}K0n84<*W#vHR!oE3l!#&{Iirxo1hvGxgaM=M?|-hI83OeHpsAzxwo_J1r%cAZAfS?o&BzKu^B_-Z;%(S{yzdvR`9UQ|-lxwZZ!W?0_q! z?^o^H(Hfm;f&XoPWOzAqi=*$K4#;9YM%%|@FCu5{0f!qbG_brmbHe^3T)M|IV6h~Q zLf0a)_||+*m^Glk$>x=h=n*^W{D-aKsg_?}nbQDhPLcY;49zzr5hyD0^(k7d%= z2{NHw72<_^P7(bM+*Ca}2{gFZmc;AIrT>X(*wCrHz@64Im(B9xsk`dO6pLP2!e?9{ z^lf$t`31e@)8xSUv_Tdo(&`I-%fUfUOSP1hlxA;jmQ3U9An{+%M_%HMh3K27&*=0Y zK$B1gjSwa~swe-(+CLaS+t>^o$Xo#bc6QCD0Y-=vQHpM5R{fy(@5zPk2UKRDaaDkx z#oQ$;>n(9_$eSz7iLieN4S?v{{8K8SKmj&pIfz~BjYy-!csIi~BtDM#;e3!n8C%!#?hXK|e- zyC6Komh9y;6Wu8ST$X2XKQ5IQ)Gfsa3F&FtjV#prJj)o~cgZcZ+XO>lgBolGq0ASZxiwP zqOh>Xv?(_yWp3$Ji_G|`tF8N&Lep4NK3Ds@QO<<8`Vup z2=U!o-pX2oLd>ze`~M}2{jXkj%c^Om|C~+;UyQ9zhV! zidq&Fc0D1{0|$rwNbAs&y1qU5QW|>jZhqGN(HK^r2zF2THqoM zZlUd-r85&gZSaHhg2VGj;kmbn0<1B-rv{k^#(gZVv8B@bLm56d1$lO(bwa)*cuunF zw4Bw_or#XZu0L!ZfSq4hEoZ=)i`&fCudjQkCNMuMPi~TnmO{iqtyRuK7RdOhKB>P0ZOm+BZ|2p^ zHll#z@iUWCL$WFx(ZC zf-;*r*h{v~(vhnq-PMvyIf8t}yRxE{g6y*VupJUNAEv56LE7T;j`JWJ^Mt~kNUTONn@|*V zoa&JBvOABT26m%b#D$dtM687u{`Q|`0kzfvSN^q>U9uxK1*c@GeOT`XPEOD-{?E%O zbq+6Yy1@B}rqdxfhu@VW*T@`ScOdh!ZO0X1v?z^_ITDb#Z^-FD58V#|ENfB^%N1Qg z_P51a|7=NT<9nr@t|6yM_^*g1>h|bTXa>>QQm4nDGi%f}OoJZI<>q;{DNS|#pQT$7 zG68E;9iVwdHisn-SF(g_&!o*yT~^!@9Eg`6sHjjPGFtO3h6J|8dVAGVNh#2@oH;BD z(0jHa@peyw)t0Dx;XDl(UJHdgSq){M0V%l{I(s>f9`-$+t4IEb*X*1 z?E=fH7F|DuSi6=rDW|v6bO3E>Tg15 z?XnY_G?TsUc~0|M%*HFk_kq-=aS}V6kf3_*%pZB#9%I33@x{JxYt3Ivuyeu2`^w(H z2^%8S)0nRy%Gvri50sH%;CZyyelO}hc-Uuw*EXzNG)T@6ErZI|TkjcHviUZMw52ns?e3ETm ze8kjGNyZ_;n5*m=i`?&2r4fy)nPP7(P~9bWpRP7nYe!*4^WFwX46pd|3$6Y~$sr8C z@)B6(;^GpJzrMU-=8W(K(c4rOw$O~Sb%*FwJd32KN_*HGs(|T&s z9?0RI>bGEJ;guI2IxufgeX}vX%Mixz``=f!LxXfmR(0LHXLW}ynwYP`*Z({$G5jf@ z*4ek#Pt-YNdzm-ou$Awwm1>4Cv8Ph#ue;P&psLxAcRj0nmj@2KW|4mo;h=AR$7)Z0 zgIG=;Zw=Kn?}Y(^`Sd6W_DKAwbJ4&}Ke**;xwOAngK$;eZg9$TFB|8i=I?BBmqBXn z>ls#agty~H(&yzE`VGg4|A^G^6S`BW{a#d+YO+j;Op_ZM2%N9PhjN~p9>)YJWHBQL zL8N8x3BN!)V#k!9$C!~+)Ac=YTS$w8o%w~~7a8}$6%%g%L|=U$zA{6-!i#-s+32|2 zBBCMVQ(>+I<6DPTAufKL=toaF++2B_)6yo{T?u#T1GjFJ|M*Vhu_jRlrpl%zJF|(# zUmk2;cH!|VktgUFx8x9?BpHY8V3lX_2j9}i*fZhr;Y5N5wkI!~C1jt{M6{){+^I0{ z7<^+LeE@nQ;zBLdWx@Yb6H2X=L=@3nq+#vS@2^zQA(`(1%=+&s?qj*n+VjSa1A%x2CCtk67n*nv(Am$ztrX5ro) zc8;a-VrJmYO2KBo!prS?8#1vG!OQ57;}Zph{QwFChc{C}*iIO5gg>Uj$Z}!AQ{Dfr z=DT=9IHvUlg=-#e@=P{53f1Gsy)39&u5PBq5X*5`X7e9qqyqE{!sMCv#^~o24&n#h znepuh*gMer<=oG6+VR(DZdb=K!A-eS;weyLz78Mx7$IYHRlHLSEDQ;hCR}1+!7ea88+u48c z3L&GfHXAuU7(H;BANwcXTcYBQ=m6=Gy1b@b%e~EKXblRYI7IcqC+T;#Jm**k=G^utML)=xL&$_&&w zba~29*9Cb|-b0m0!0Teld^RSa>`D`R!;DGBk)D0B{IG9G%JaQ%pI4YyLe916NvDeC zHuwSzFX_9|F5{XXS^hU5^%h0PRQkW+x>`kjA55(OF>_KQOiAd<=uGj|kanP3;3&iz z@Z~MWOeYOHAKH3Z86Gz6aDr9+^6J8lyrWT+qgFk8$u_+Ukmmc$GxhxNdFo3n&z>2s zSR|I%Bvg22`SnQIM-EJWT+uf+D7s|=W?Pj7uK#*?xXh(=!vlHmd6Xhj@A=9*IfkHj zZlqHOu(jW>c0>Vr-#4*hsc;rfZX*d0DuDY51GSu_9z3b$MACdZnObe>_KB zAG#QHxcA+S6C}TH6lcb8+b^N7(r7w(0P)`@SsJ@-oc# zpI5bHs!z~tnn+MV@V=gdH%AyTUg^K;ZEAS)(U#!G3sTNT)m)Cm zLv9HyE!sr>19YknJhsHFpG|320sHw4~!Vyw!^ z9ywtz-Ne%@EonJIO%OvRija7UIs<&#W&K|)o_1E^o?7bczwqk9zr$|SD|rL+_=Ken zg~n?iC#hi`7I|fv_IY@t>(=h_g*(yUIV*B9mcAl#-n~4eyMEzivriXn-cuJ*P$*+0 zBzr4JDxdK|&~m*wKvzE_^KU{>ryW}G!aPal7E=C%EuHiCy!fRAou^kWg{Om5UVk$S zoYe4xk4#~Bc;=@pngVw9eOju7DT(c_`p~4xV(uH(W?OZd-(BH*koz8*Tfk+2b=7~W z8ImX8i6sA|;lY9@cXY`itCa0WOSe6}II({lx-QUi73Xnl$ZYJXg}KR>xv8;HoYd4f z1L|FGxjN=sGfz*n6Xy@iFAqyvhJYb>d+Pf7SfwoNep$46_p27;IW0nBvAcfCbmsm~ zd!j?wT}reF6Fk_+{G&o;fy3Ya!_zHeRg~Lxqnqh``VNFbhE8Nl0~pNE6>8{tznrgS zD>oMGFi<#%fGQ&D1U7$H8FRnl(B`90foTLXpib=%iUjoZ^*MVpuo8I97cQ1h2QF^~ zrPcX_ReZ;A>W6Ebo*hXbU|ZxgadLezdTgifjZ(~`3`MuBvYT3PHWj5u<CPbWv0@>j z*GeGYlTS$nEa0HaP_;oK25CZlj~hr;+9Evk9hQJ-!zTq8Yne%ZqMG~{vS})l?)Jdj zxxNn-uL~~~nrgH~FG;Pm#d;RWgt^S?df=W9QJ%d6bkvUU{vk!Vt9r1+iTV ztu01Ya>U!lcuwcHc|=8UlSq$PHz}Y|Hx|>EZ92U2!qpyfG%oo6t(@l+`T%E-T8uvT zM%}`_4$%#6TMe>bKl!i(4ad!;Hos7%B}#R1lv>7%Y?UkA=(!Cz6+Ap7rN;pA}g!r_F@LWEmu&nw)h0k3wknphAI08pC8W<53kd}rAut7&i+%HOQ+6px_LL+n_7imRS+&y(G^qPy6iMl0bX%0>PM>WM^vu_!o?3IL_B-oF1>w8}_=~fnO$w)rlCiO$_ge~QzgjMC)@`A5HPnd z6Yu^acl=&!I`Z9*$mY zsi;K|c;|?B;mzU|f8{UWr03KUJ-uUmZRy)SO)EL4B_zlJRGBE_t))3Aw+ODpy@UY% z58KuM^J87>P;Z}zlvQ({Sao9(+k!nPPUIpH&6}BPun1GgOfMlnaC}u?hJgh4+Ll1u z2i}*dw(;5v+m8QRj9==_CHpJYLDVkrX2XkLG6BB0?#*Tm%F<3Ym>hWo13ox#C6>IM5w1Zucpzm=9I4 zer$@5n%}q*rj?Ny*dTy|pcnB}jV;Cwj&Is_uVPaB$R^HB`!fJNSWKL6IOB`t7U##K zWaDX9Sz{X_Am6T*$_H7aijg#tAGljtV}lxL8qU0{?7I~mwR&Av`F7mqo)G0C@e#2J zdMMrI|M1zhY+x1EqI?px0x9@o&tK!OD5ppkcfQMApIV!97vm_I!ZWedI#MFP^g$;% zse6FXPst-X)EnR;wZ%2`8C`ai+g($4WI@n9#UYs(y?~%U8VjG1EgYotQ(GEfi*W)| z4)Fy>wQB)mWnsxKR!g9aZ{>v}-41TNm{u26Yt?r1(xSN= z1}PQ}b8({%Nx!hKKnxSHjQtGzkPAd(X7ifjrEzt+HC657p|k0--x-%gxLbuj`zr zD4x+RRp^6eF^hjBjbDlv`1IIGSpKP4jk1U}kar(r?czoKYt&hy?W~#0%RjkiGNy%{ zZ=car6-(KE-BLmZQp~BJ8+IOOFCEg+A7sxpdf+Q3L5ov~`$*kh8MIZWPhE?T(ScU6 z3f5mm0B!Gmk=0_*$W8@4I?!G>B2K4|ib^d_6PQ+JMmExJahK@LWf|%?@v+2|1Drq~ z5rVQae=rou<)yZo6CEsTVo05KT4_$g$z!iv3!K~{r zo4A#51Cm(ptPpyBe~8chl=DRA8@i>M);Nfd3RZF+qw)Jq}m@C=^%RX&kS5OD0ndYKNfwRi{mMy_(%$FC72a%`)7n2nbS>e`c3~X&_DdB(cwN`t&)+2< zYRFIpIBG&gP=6J&?&W3r!cP}>$zb@nidtIay9h`@lW((pn%ta1@9WzI}cZV%K8&Z5T+d*6k zH}%|)7olLJAGJ2kVEya(n;8iJ+O*Y~eRZ*w5KgJMRq;<~#URf*8(7QJou_EgYyZ~3 zzoH%9#hl9yCP7N4u%x8(Q)LkK(@%ezD^((b?z<`RD;|6m72^^Q4@4 z`5S?Kqj&X$+I^4HN3ldRJwoZ&VOe`YOHAN;e0QZlY1*&WK8M2$T>_1YaxAkr6QqqL z1LqGe8kc zh%9`1q(ouK5=GT7`WCus5}~+6FZOE813BS3*lJ z3tta;RuP(n*8&d-rDQOdCC=XTO;yGDDwVnWFdOLYxM>Zmfq%xmy}u|J@9I?P`K=4Jc0k=u~Oo2gQ{>Hlw`!1avg%ml_Am&lS9d92_a?M&OK>7erE zprKD?St_IEHt?t9Zo8hIw46S#?KroDJJO=liI0{%9GSXl=4(>wq!(q1=IsJ=&d(5+ zVCl5skOiaZ%&eI?PK)~r3@yCd7|6bl?zs%|6RriUC7`bv3Q&fI_V4VVVBCyzJ$mg<8qW6%pFwlQ7>oQ|SPcf|3ezl^) zHIS`2Rrr$KHB_PQv5+jodC{cuf-LhTBQF8sP&S(56-!T#ZgHjs7ce&`guS+TaY!no z8@|n>*8$Ls%}sZ*)zI=k6)!z6-|J-dHJg=xEVpBvbIxTF-zy*4rxGv~kmx~-ZZ|fl zp%MlRp0U_g5ok8x1Q8C?mpJ3%>~%$KJQZy@ZTZEb9$R6$ETQ!yEj34DjmP$BPMYMY zTpK)0{UmF7$_$@bIg2Zj++h3*IAToJ=vnug`{l(Sx^P5Mg$;6?zOiq;%fnC`uV)R6eD@oRWhtHDx!>>!~wmZ1_ zMvB;<*L%VKHV~^T+FP{a$pA1Mwzb&;`|e(6*-I6~5zDK7la{ma{yCi2#;teBNK{|x z>nkq|EDjNi%`*Hl_q?$xJjFJ z2W@Zq_@56*dtfR}&Im%TD@PV`=sF{WVkMAdO+WB{h!c44tsYjnz&|veHvN#rq%iK> zkpaJBcYX?*J^RpW5X6&rsmylqzBE)R7h~|LyIjKXiM~|Us!W+3W?iJViY8NGK|G%s z$XXzjPv$ChE`s`k(>Raye*e_9S|NSuj8&zYCpV7z=jr!|jE|kijv1O%Kz-_rr7Z!r z!G*<&J@W`=0z&S)>-$2d9F+JDm5-YO%f;<3sUh;?cfU!Hkk-xWzc{^ZNRvbqXZ?6w< zP@~4#Z9-D}SY?~YJ+#j>W@*R0wX10Wub(_S*%}ebn&S{%?s+y6Z)~v41GyK_he`hg z{PWK;i{Mq#eYP&~)omR;93GO}P`~0%ym6C>JZOr>=`fUzWIcpD8C7HY0WMp*k}|Ej0&!g{J{YP$202G#kD|7wts{=hh@y6{I8^t2m2&N9M;YHyLK|8(T|nZxDXJ~ zG*po!AO?IpY1!F;tq8dfT1qpan@AsyiR}XJZaenl0){cpeIuQtScS04%O_z=S2#Ue z|3f)@=omISdkLvISxss}e&MH@8s_}!ys|ut(|(B08ySC#{bT&)NDuP^mF4@}iq|6z z$v)e|`;P0GD+PRp5q9iooU6mOtwS_yOBj#ZYt%8ZVuyT}`SZQ4^o7WO>Ofs^SM|4x zrvjt#u{v8$BZy?CB_|g(dH*kS`Rd3S63I)8r?tFg%^e57a}2EH&5hpWj!?^{%;gw9 zYVI)xuT&Hc^g6*13{Xz_X?*KMWm)G{{gsbRHb5x*)}>m#{+k8nWg|$4_hO@C+bo-H z(2nU3%Te{^0C!3Sar47uAFW@- z&l?1ow=xG*yOu;$sm)+EA$p?S1&=!0iS#{20nkfcRfDhBvm*5ggFY{ zoWrojTy?84spk2C%Iv~`;q!Lnqt^K;@P*DtW&GCzfn4sR-o`w(oR1D?gNw?p&V_i%)K}_RnXTM@lf%z5Ts{_f}3?R~-SA zXGss2#mIvdU}1|i1{6Xz@eK^_X#>5c8e+OO9MAUo#rni{bvwut$tF@TRqTC#{frm+ z+KWBXf@hF&51XfqeQmI~f2=LWS9g{seF+HF_gCloO6Y9I-J^2VTa_MnSM?prh;n(A zVh~!(u$I~3-Qzq%=@aGZ7BL~WPmf0N@n$+Oax%q@YBBHR-MP&_t-R=r4c2&pbt{>V9#Ln`heb3F7v06-hT&oQWk0W0mum zARoEq85c-QoaAw4I%9XP6Rg>oN+@<)KrJMl*bQ!6+)c2!<>oDC`Q!9i_`gS)Gq!+t zXW1e*g*CzHn&t@^bu{7bdWJWwn3=JnAM^;sG6_L24n!2G#gz)iif z3k5{5Qnid*1Nc&28|^^#Lj$vylGYjIPqyk6D2B9Xx^@%dh%C~8t z(&+R8pwJI%E?;p+3O;uoc+{QpzEd-|4=cH23EZozsw8bgeEhb!l4k&^2=YCaF0FAN zVv1s8xnixVTG-GL8KilGPUQJUulfrKy>^^;>fB@#E$F2{cpY7$z7`<^vXWl8IS?M2 z9jUwB^IjRtvsJB7I@4@2Hw2%cJbbo2zD(L$oHCtqPx<|Z?)YA3j{ahRoaA73P>;8M zFsB!V{2$BNbv@Z6bB5#tmqUOGV9#9Xx^n8ii*-RKc`i9DcqZ&Y@B;!JFp`ak5;C^L zkvY13ll<*z3kWAD54^*|^A$g3YL*y2*fdvJkk#`tja;7nf0fXD z10?FEoSkUxlQz#315}wk=xav{wfms9{nIki_fSu=VXg`;TU~#%TW8p3HSzz$*I7n2 z^#^>MP>`I8NP~)qh;+Atg7jaIZV(YE=^h}`CDJ*OlJ1Vt4WpZlt_?N@jKSD=IM4g% zd2`Qweb4!wd+#?sU!wc&jgA6uI>5f?niwLbsKvAkH{yvCA9-tFc-@nM}BOltK*Ov=mPdAM0NRb$4 zH`dFQPda>gWu6BTMDdpX^oqCjLl#LP|5R8khJH+%0qs5a&EajT}Zx@p(Zh8IrpvxDH)3^1A3Y=w6jAXJR z%Vxl^2#V@EN$qWL=4Ajia8PN9tPBqL;;8FX;KX@Gh~fWO+|)lg;GZ`;g-X1Z-Za7f zc-^L;yD}v8ZTF%v#KqNDR-qH*;`I9p{m8q)vW?b1ZqG{nG^jeo!+PBwY~Kk!NpSOb z-YvFw)|w|14l>kJxQQZtaC+J_4RnNHhU$vIv>p}=)P<#AV`4iNxp(9T6^eQTeXuQt zG01Yd%)J*t3Yqf2`&kBa>YZW__ldlSthw{R5+j5c;FacxWtXC$umNfZwa3pN-#a}T zJ&Nm?S9kLN0nE#=L9gHLj=NM^e`->xe5Y|dY&F6zQCNI$oT?(C=Pc+m%=RXDO>jGn zb1a=vwLRJurRZQxKq+GfGf!NbOXy%=T(jW()t@73L8URDk(#>%XYt15q^mK<(xQDC zz2)dr!Rn}-hScFJI+_0RuTt zp1;&R5CRjQ=;-5Zsd#Wax%sa*>;6lv^8(p_9bo0~X*{@BA~BJ;?Idxx%mn<Hu#HWW`^{m|mw*mPvF%9g(rj}um}1br-s$SxRT6{?rS6Lgws<8NikpojM4mzjV0i9}2Nv0WmO7x$aK?rd*`=F(}4U*5tPlJ=J3 z2WgO9U~M0ASrVz8_Dw&FA_K>Ysk8?sZmPYSWP6oV z(xJ)85e{38*z@Y7Wv)A`^sA)e%=1qZ`xJeT`PIJ#r`t#7L;YTSc6DN?f(DNRHBGK> zG&;H}^+)L9a=Fekm{H-PWT`Ua=V1NX7xs33rUJSXlkja-dInXX27~OsJ+`s5Z5aQe zk~yEJ!eqtrXl~kFB}#e2{Q8)y2{Wz5LG=!P`AcuZAl9nWU{iwCjD%p(NQ0C10U@BH zs(~+wAn5j4B!CMyZx1RA(^kWWIBjo+qQB2{jHn{l4MQ7K$*{ zKk%qMo_<_!^`+Qtd+y5BY!4)mh~ycpId&Q3SRw9Q>|uGGZ{j>@YYuY{x{4`syiP?6 zVwqeU9Ui#)jvSeXL|j*NMqNv*~?o=x7$nCxuzQs z*s)jx!o-!y3t>L*wmEmQ_St97PH$a!4qr{9EHA&NI*bN8r)Xt`>CWH8#bp1U;&aV? z5ida8E(|fj()<~f)k-1`!z~}koVn5RUM0t6<5;l7$#YBt+8cH|p`}n(CwbT66Y~W` zU9V=7C2auJ34f4YDhOvX)*X_t$oOprZIR~37}bErtQ4!Q_(pS3R8+{$NbhGm1@E{i zebwo|aLQ>?5^;PIB2wG2opxk+=i_*&VZ8qObC#?RduC_-1)V^OHL9z)gO#hx@N6E= z&Kd8?@r)Dq_sELdJOa4*=Fv42)Fr(S_fqiKpKK@0uiP-an5al{zLYtWQeMS;9JOdz z`P6HGa_RLGd3Nrm+p2}7J-5CBClRj9T(8kz(jv7L7DkOV!(`?*wSCj}C(rAMVmz=hG^jBDfg@iWIYS+=Oi#Yob z$s-g`zw7qZyZC=@uAFIW1-{+hI!6^UFbl=s`I3FoXnOm-KJrgi$Gpg!FvlFkHEY&N zR1<4Q#vXG!@6C8rycdw~dD5q`WrkH%l`84gaj8%jLGl_EUoY8CmQc(2)=$bV;z_Rm zrIhEmm|1?+w=cb!K?}^0g4Dkj%8mi^1|D3Pz`ZrEL7z7=Co7lgNVEtN_PWTnB@;M|E#o|hceT(KVcFyPnM|*FC2=vEx zueiIcKje_;mW**QHWIot@$$<7ESAAOJ8YFoc{087gP289x!3P59Y6a@oHgab{na2{ zu%l5!5lP13V^N#L(e(sy0WBP`;S?8Cu>5hR7(59+H-d4)v>43owJ?wYMYl!?|M=^0 z0#Tq((Ee|4;tGZ8$7$?T-*RHEGVSAz)>$jIs(ZcF_HP4JM#DDYc^p^I_g(1N^TACi zl#uY7|BO~73BvH{o#ZLs#ztXQ#Puo1VMK0wGns$OZ~aa&9@!++Sb4%?IumEfk86jw z5OBPyG`i#B!enmccTeLqq|4=Ld?FO_-^-h!x%6JzpWsX0J{gj69@u{f#Nl7$J-J*X z7cjmNAy$_o(!nr-C5fe9*94LIB5EeT&rE!oxD5!3o+dZoHX6o<9YziTgs)w+GUgc4 zs~Nj2Z`E7UVp^QT8R4JbMbmBfoTKF8Rix4~yKi@_NbH8-s)xiizVTmHRyqyqu~gT* z57}b=-olK^e+M#p9fcT>%fKfO;6cK;YFJ#=xnJVX9xB_6zf)Yw3!gkomRDqkaVx*( zBiv~Uai(z<3PIk+P}q!3F`EFMbc@_t4B==+2!@^xhVLFvvn!JYBeTz1w2)2TtR#pO zkf6;aU>s|kE-90efCv9BF0pXv3`%-4>3`H|Dqz<+lPuc-1^M=E_xtlc@um-4k#N(h z9055`@72|?E7d3$;MvO4URYHf3HR!*_t)0i&_=A9{XP%uJSW~jUQQjrleu7i+UX7! zmU?MZL>dbFIxgj-H=Go&OXOz;T;X%Iz7*KsUFD6Sg(@4L4i zPl4ReF9hY+{<;!}QaChNV=b}rHY-2kqX-_XOVPnH9s6&&uCYt$`ABd0_9VMh%kb<3 z0~*&=jJ%Ic^ew{4qe=00-Q_B1yn#WN9? z3kfG5cVALJ{b5993c%`vz7&S2>!-Mg<0-|BYzBncD&l{K!6M4X_`}6t_Wucj86D=y zxy7JZum)GcyWhGGd|G)9;G-3ZF4Z^BL|v;Q2qDQ8Tl$y4yQeuB9W>Igw+dIUzAFGf z0y;9*E58R!G>KQo)3}5}D%@5(rqtw9{mmj>-NTu{|17qBVx{P?qN4>JTl2#93{9pa z7GF0$4)FfXRUQ31LWuD(vUSL{Wrs?kRp;yU_n3q7zR~ zp8@r+!7Q0$DhCypWP(=a6a%?HH<;mv-zic#CVgkP$I=}OwHR_Q>`DvdtQ5G=@AQC` zv$v%T**wg>VEPCzQcN^<=-chDhzNS}w6XKXafW8G$?*|h63Imst1ArLYM2<5N-LHt z#D>>bLAQvsS)ksXu9fXeETMfkyFh!wykFgB11TV}tYKLW*{FA}+qv?2HGJ8o&Kk{b^DFFulI{V>;KKGe7PDL-Io za621|Fa67oqv;v!(s7QI&vX2fjV?o{yz;KO&OnBJV!e+jdpe(Z8sCbxGw(LN-U~k* zgD3YT5;EqM-lD!hf#6p(-Q$5S&x(3eG?wsFVUVx;tQAde zBd_H_`o+Qb$hJE8C=uG6X_XY!Ld+7#ADVV5n_lZ%gGwXbOwjaB9T6W{P= zu3OH56g3yMlgG-ma^gVXm!mCdEOv zp08bMzZ+W}LQ3XJHD^c{YWlOckPqR7Q)A#-6+!RXr8$+pYj!>4=YMcvVHN9u*Lx{O zC;2_;M>b#y&~?#$ITgc_`Z)Jtt5E97f#bJa_6n`L9BSX&|4#Le1+1l+yv8r#!V|g~S^jcY}y)@Lv`WoN3R#MH{B_Bl37-r{}Ta zU5P9Dj`(_w4VXK5NoXT>?yud`waJt_d7NR+Wek3DVOf>BrY1`-Z+)f@g;gLYc{^LG zdX~?$sT8fM-Bs#7^J*-_?v%K%dv{xIl$e&B0Krlei#c|j;+{>o+ zo(rwZr1RdvmoJ>?n`rA+s8elH0E!G=1r?+-F8ZHVomM;n{aWAqRoAk8mU!U%29XLB zEn#josWr3y5V`@x{;VS620P&U`u@8&n)UJeq|plWFB0Mt@_oJPPruq<3#{Q> zTp%}vjp=`E|B}7H%kGlhR{IL|-KQs^ukVZd`pM}l1( zNDG#+72I|l#`lHmT5sO4;snHu7xQdh9bJbep}7izmhp?=Wz{pA*u+0)?O7uIYLNVk zFGka*h)zMr{@4jRq36uC;PUXeO`gXW*^vs0Fs)?(eP*%~B{=4}^cj$7AI5sF@>D=(feCCAJ}rc7ySCJMJMOFdaWtO!O}lAT?j^J+|04`_$GB<@_~Tz;ZH(mf%ki) z$Kt9S1aRM?tPCrg!YTJhQf-6 zU2&Xe?&W-y@vGzh&-ulBB8QD>kG}LaK6~LL`=~5Q{?~z=DU_3=cOgGEHIUuRX8LP_ z&+8S4PVF?VSsJG4u76gGv=-o-Wd^^D^a{V}(pRLrE-!%fXg$RIs5TgoZ{asM&5yB^ zSpI^dISUlif?L$5Tt^oME-;zo__rI-m~zT?r?Yb6bF&2`22y4^vrX*blgu{?u?7*n zk3FK!h7((Ue_71^AlKz$FfuN87y#!HtnfEoo{@9u6(_Fw)A{OrkB7|C#4W*8#Ow-O zia#*xV$d>D-jy;3M>$nHU;=oy{zJT71H3=BWJg_=2c4*e*%3KPDk=D~_$Z>;R$@Hg zfWqwmksp6_tDFWt(v6lQj-?X2)IZug6&~pLVtH1cC(DVk-xj#Cf!~&$|GLzBglEyZ zyt~#|Cj{KZTs|$qwM0c!CjeO;V{YGO`s2;5@*7KFb{{Urx`f(WblD?kh2XPSEMD{+ zA(E!jAgSTuhc_xW?6TP2E>7PMj>3}@MT9f=c{+^TU_V5;xaGK|EKK&+DSYzi=3q}L zs;={1K-8t8x99F*I5GVon|O>aT;I!ryuBJgCCBQrnChs~C^6IDR^4rqIcFof2E0Ib`u0>QC z)RY=?^D(BeIrlHX%baQ5=o<}Q1=@@)Ir;0~(xzhP zSS9xQK?AaNa@)HKUBJ`iTKQt|o%uX_4yY2Vyglc%+L&l#3j0j(jUix+^psE<@Bhs* z3u*DyOu%zVx0Q~-U}Aj@q;;roA}KS0{6r2ctkb!7Y|@!P=X?`Rs8^caZfuoavlLZ| z@!PG?a13@(1y>}a1}o4f%2P%*SAUCr;0>o8#8zs_m*wru@0CpSk>U`_BG-M%*0Dgn$&X=TBJ^$$#|hvVISSi`1tz z`t(gEqa=kgnZQvyZ*N2)R&a;WU*BjacaBE&J}7h<->C^}3mNrHJeZlE;8PB4=f0f_l9*+p*K~#)MNUzbzDOK)CD=z!K93a4>@s|fP?M^3{ z%v&PV`20a{+FwWhIOQPH%h#4oRuZ|I5gdC*s!!e*M4JRxu0+&KTbSG~4h?RsHTaEVYpsOWt?rYr2i~s3;B66V?)`}}H*)UkSr$M#; zI*>Kcpnvx>^p{NmV6rM?i(*g*V>MC}pJV2md+i(uh%ZZaKM?43sR5QEN2M4Gm1=O` z9^^NZr8^bDa<>bsC{OhpvDdUfMaHz^IVG>K>{5hJH~5!!+B2Z2>zhcY{LYPAFKuum z;k)1&jX{t0h80truLVa-dh`Niw(u;thn_2UeQ~g+@-dspr%|G6RX%vt>)ylpy|^2O zph3O?@wuI11qA#Htujm$Pb?0bb|#G39qnDQ6e(tPGog>i5aW%=_$_rXWxC^g*w%Sh z#xLL``(-vtD&Vn(`Ky$397KLWqJe40fZWSng!qYEhuP^pd~6vca;I9q_PO9RfYe(7LFv-_ zaX8t7x{o7Ap&!q~Zu>0)8MFz7s6FHvunMSl($-Ltaawp(#|9?L4QJNXCkFMZ1&Y`I z^$y`lyK=+dIB*b+#F&=3nRS19oEHD9euFehazWoy+rA6M3+jcv@R@t_-q4Cd`pEmY zciKSnxjMWiy>^^TD`{M5LgrfD=s!cHk`OW`PU((>C;V(7 z6Jt!Ns63gWE4EfT>rSUI$b8+MSw_bG>x9bYPevJ5`P-AR7ETXJ>oFx~G24);h4iaB z^Rihbg4YwTGY#G-Of82>2!sferonZI-s=9f1B?dj9Y`Xk6fq=<9`dg z#tqc+LS3O7azS4z0w50T)qAP+LP6cI8=!~+%1Am#9BC~lPb+;VOk3EaAO1Ax3-eaj z2)3_%m3;nOF(}VIuI%<@@3OtPO`vT~_rghvLK3bNy;suR90FFd8q`IlDS-NIHfwpP zw;s|i)^g)58Q=#tz%jg0aT#RMzZU*!L#l|ZC~c1TdgPiQ*Y*c2GZ|ZQ>{>2yp+BS> zF#kh~n=z*S`OfL1v_ETXnd&JqN~o3J$fsM2r|8kSk`UE%!Hj%(Etlazv0^|jdE3GVlQGqAV6VAw16mE*@i2|C7D~7^4=jEW z_av1OQ~GZA%ufDkYcPjjw2oa@!-=Qg%^{>oUXRpqvR2$V!I7zzO_*p;cOrDQ!UMJ8N4wAW{&uuI4RU#kB;8sf zkEJ$w{4qVKmDa7gdvf7$b*^?k)id70RVAbH-*v}1ZcaDT8Y%RzbGYMsC0`BuK)YMk z90t!42Jk2~T)Fg9TbWCK#V}{mX2IRgqH5UXZ2C=eT;bW3c8B>443S(_WijRNxpvD` zor)qVW^y~?MTHPTH{ge3AAhF~K&N1}YjeO5X8$vMO|Gs&G+Bgo0#hd<$d&jkfzsbb zPlf!t&pG^cIw@wujWdl}!pw99bKT!Ho3Aw_WoTzi-uw_vj*1*V)s?N7+nCLIL4%)D zg~Br0&pR+f{5s?!b$eS&G6^YHx1%mHtu-I4Gh$_+2q$HU0a-x-;1SNjc zKEfKiQvtuVHz>!aD5JMEIs5YaoAPhMF*Fk-Y)z?G8`OuH$?(loovQCeSOh7B@Whh z*5!)XpQ6CsLR^FFhAOY7*wnle7&`hr9E5{XnoY9l5}nJ5)#S<%3O!@K)eOAnaYD^4 zD$6y;aR1sr{sx$)D=}pAE>q)Gbt9-t_)1H;vO$gw%oZCI-uNY#js!^r_-{@QBia>e zy@WOQ1%xGNL4P=x9Xf~2{5kDf9NPc;pCw1EI%`qjp|{VA9MgzDIV5nlt(O~2SB(Q7 z_f}}cGS=P|Mch7rrzl*4KEf-J`xy3w;wqL?e)tu~pfaX@G)NPOdDF>%*QKehK`2GH z2@YZ;eYHZ-hy3Mss_MM+&zB+nvAHE;NYE*xaYDP<{9Ez} z2^*(Zkzz;rLeOl}eefq=>^PSFy;P=M^_s#_-uR=l?8U~#g>PR}imc@%><>=rD|ONF z=WEP?M3{F`yS=@s^&#EZ3z{lXwzW#Oz`QOQC`)w$p5!p?q9T3yVj3BH!BoTZF5x>U z%r`GiO)9AaBvc!Be*bKt^2Jyfiwx;V>QCVj`Dmxm+Hsc$Hh-D$EEsVGiuP#^Rhbvz zA;1amvk(t<7 zpyx?6FRPDn;oX6IjrZoxcuvp%v#GX=?znuhF`7J!2^0UPOV=I~TCH$5w%cwer&{bT z&eKKJxJ$ANnj3wysm3REMyg%r=&o$`8{nnC#j`HJ%+`;@q5Ur{i+Lrb^z?!-Pvn2v z=T}TQ+^4c9Z+F3X-e)P8mw+ta^%9Dez*ZOH94t@nVJ^nLQsN|4y3O_b6S0=IuWwdZ zweN-|jWz$2s*YbGUvl3fD5+p&6J6pywoCehFP)v&Wi3VVEN(*Y1q<#YDKUmYF9aSy z5XI>tun3{;T13gm71Ex1jVn{wGCLOH0j2TRb7iqE0dq8AH zM=kWG*M2Z&Tc{(l5^_mAgb0mpJ_dv~Np8s^! z{vobqO1^t*(#G`|D;!x*Cme_nV44uf2j1!Wn%|fhvAO9t!D_(s5pOZkPeI7mn>TZz zMafoDXcG@LiUn!SAsKFZ4ylZ{g>*~>QRUwv2pJ_)Y#K>%q=YS{bvt$ zlIy8lWE~~Pu=<0nC|A-LABw9SoSy))FjZx%P zC&BqIPQFx-Uyh|ND0`m}SMNyW`^k{a%fj$@?L=;QDQPz^${=OmvnZ;Qruhk7h>B-Q zU(2SPQ<4yCt6jy#Wq@pa)w{Z1Tg4BGUvQ#VL4@Dub6qv#s4WS!oH$JE_~^^NoXUVt zQ9OJU_Eh$5`?uM1CbG~n?2*;qQIVsBXtTJ+og=R!&WlT^NF(qMb`5L({(g{8pj6k3 zfkoYFi_G7xL(}K6l1mO7G*h|H=EQE% zmKS*ehxTQZINEi{orP}>y7K;CRgK(_mEMzucKccjsIn9H{@Ir4OH08Uj`*lx-++0~ zZA#R!a2BxaqPD8c55D%HK?fAm-A~~qr{^a?o8vlJMw>s77E_P?|BzIuOIR=EJLg1n z?}W;T=&XNcHi%mD{pvZlB~FB+^J^NVR@4MY{E9WLRu1kjc=odm+F{KCJJLxHE;9yB zHg^euq0ye*?5He75PjAm@?H>wyz_{c$=KCfN5XEePE2$^(Ewe^;PpCkH@GP>ew5ld30e9SF z3HqMptA+0pQiHabB0i5QXz6w`768P9%`MMV*E(^>fDncn2FL8qAI*(02X?dC$Qn%+ z2fD%3MKoH5z01dj)utw?g~3mKtz&*6f$@Wjl*mA;!P*{U0N8k)RQznmMX)slBFR04 zM42XCMs9@R3E}48_QU5jc{>-c1t1w1TNKN(37UzPJ}~xS61POxUMH<BqK$FiK<^^8-;LE2X%qp^+yuH|4mc%HWqC)OeEeR>+Vx6Waa%D^ zWd#FH@6>_u4jLsS3LmYgUezysfvGLc9`trL&;qqQQAa7lyQ?aU(wDT?OmXJA__A+7 z`}t2qC}W0GGM6JIcRdd#DXPzJBBsn9L^x2Vc0+aIcZNgX&)4#S>E-|6*G)UUyDj`mmmN=0n}1fcF74EK$v5)TcL&z-g_R_mXAv`cv0t@t z$%d7ZyYIV51FGo zlcl|8?gGDlL0tmlRuWL{=v8QNbn+|Md30bA0@$@Pc}M+jadG0;nTm0x^Bwu{#9nSC z2{kj1pSb-Q?so1pL4c@q#foQee`rO<~YDWGGC;@`Bg4u9#HzP96=K29XHi#GKEdcA7`qCAb+y(Ii=x)Jk@$Kp*f= z=OT*NuVVc%Mgmoc%gzf=?UkNuQC-^oj9ooG4svsk33YJj#qCU3lw_`;{cCN)hi(7a zlIiyWE?76j(~I)OfWF@Z8pktlay-7v@-k!S?zQ$dwF3JvoejGb>0nV8Qe>SjFm`B0 zsqqBipXPET(LC>&NHY`dLZth~`V;9Oi9<$rkxJX6@S$akQ(D|8`+gZ!UzLkwJuvp} zx>K+~Y0!IwBbAT1BI)L*=<7cTRS8@QQ75=_#>_TBZ-J{EQV6A_?#%n!13qrQwo?z& zvt2h0_9H)E4>ib2fX{;W$4Lba_E2dp)aNx7$y8XD21E5H6I_DUm9HlM1k|R#dQ4P3 zar&SjFQ5=1Wl!+2At#nNehiU?`>gXeB=1MipM2ZagD!@LBGlVB# zwL!5nz@){(=ZW(Q*D|5RP6-*bpV{#hSR%guHP6169;Codt&~<83=Kf02gQScjEuU* zb)3GvAaB70*FQ&sW#Tu;4Gi+M;HkgOJOtO$U4?Mu9 z;LUkh4q>q@;eG;59<1&;u)403P*xILp5~dC1@OMZ^E3vJgk8YS#xo^=BTQ4F;eL+v z46`Sc)1xlF1v7s6e4ad`sGcharkoviY`5&d#o;fVI9M5R^N9iV!TcHRuSu%0A7t3< zMW|kYOO*Lu&hKTiO*yl#0R?a|%W*!8P@~{=DHWXJaBY6ShA|09dwNh2Y*2G=tD5Ue z<%4l{du+GoyOBwSkR7vZhZ0f5&gw6?B}xXGIkp`5)@hR{7UbQt++_zRXCW;zootx~ z5iyXv+8|SQ3BG)ry~hz5z}ZQ(yJaxsDXQKs5~ETDj_*1+|R- zfcrB|1?*(g2E76|s|^G9PC zb4BfJfmC3(B2X{^Mt;=td6h=?!T#>x&(vW3@|EePc<>0uvY7wsPMRo~-_^gG8Z72EbMKOWFc$}|NhAGc+Dy^tnjbf942dI zfbMkUU=EqL&K|K;&B3rkN6Es{UYbgf5ZhJ0aMe;`9bbcZl^g%nQ}=7OTgUdQDh1tB z^>H9(Wk>59zqQ0amvZLh7w|ki$Yh#qr}3*XGkrSsv1NC8=PHh_+U-s{O=C7vuODa#fI1mm)Eg~pi zOPo1z3$H>)rkg=&cSOU}SWxAY&ZKRsa$^MV^?e5uJpSnUKK{Cr#mr65Oyz3Dxg4)e4*j(+a@A+e@yL`v{{aBg0?fyL}$ zB|cm*0eZJ}m(nB6!^9cXy;%zV~QG5KL(5E#D zVIT`8><($W6aCjh!id9<8*e1#@hiV<8)L4cwxkYN>JJ;AmtpO*|3 z&fvR@51V%<4+ir=`VNi(6Ig25Q^QO`##o!B(p@>XA9&T0!|A-{#qQbkIdp>P8 zbqVntT>K+9Eg2^8lw^f5_^}UrOZ0zigE8+LRU%>hXmV4veYoAW@xtDs+rHS>@|00% zK_hp_b=CpoXU2IjP1Ja1)A;9?1Oa#H znASfx);sHyFSpQgrVg?iYLjeJc9c({F-wSj`Y)j#Z#scL`t4UJJNKC^j+H#LZ*2jN z-&zo`Z=IJXHEFGW++{a~5YlPf8CHsgTFK3`=I=hJ*^Wfr-MUwzG*QTTq@-l@A4AbCl$?2; zr}T#J$eL*2r_%;{dUEsBS^q75;v>)LZzYMBKeMVp#kk)N(MDhQJf=8}vG382hj@;q zYrb8(LR(&|uUYWPH^CxY(;473<|Dkgh9h@?NsuQpqI0jWXe`WVeM>BqTT5Y2uOI_H zh=}-8Cj`ZY#ui&a;9JsR6C!)SNcd0vBK%YEw8_ca%Id3d|X{na?+CBf6 z9rooI6x$ki(r)(3qN51Z;C5|ey_wp{z=LI;A*4`DX#j|k6_0S3@6I=?x|ACHc>YhV z&`BFhh3A>~;~;yS0P^F&3Y%EvVx{aMu?~+SS7RHd1l*kJpHN+YbIL^@@4Ej(x9IdY zE4^MBA)E%bx@534QwOf@2m>}-L(CUmO@FU(TJv@RIJM=P#PyZmOC|G~D2hZv;FDF8rTz_5VNh<$r~ln+3^9zYBx=Fy5Fzg8};OJQUz8 zv#K>>G323psYhI7&D7Z`-_|NAOPbkrv6NdaLtW|nLE4Y-VAE3rFVk1@WM7R10$Gqs zBDYvJsC`=>38Ejk{9zO{-YLA$YA^AFk=PA@o2xCVnwM<>vkP`MzUFd$xbE__rXw2u zcgvh`kwa|r1-c;f5}oB9q_fFNUEqD1!2j`t?+RTO*!_p)R`I3v`tp9oxle)t_`S;flI##EZ>TzYDvr@<`xG1b zX4|oDft|Y39%IecqQJb&lRt&fS7h-mK+x<1<~fBQHs;!COzw7GQ%|-#xQoh+wvu|O zlG+b2J0&(>^t%NydZg_xJLzXP0Br@p6@}&~$%6ra7feEI#%Z6qOT_u25vI9TDiMf?fS_ zxext|YTTAU%ahsumG{W>BLLzW=UJ9S4A5m)&H_pp-dPIs>K`1ROYL@C_ zCDzORnr%7@1l1BOERo51&f8VXqd3u zQ*Y5gMVyDxkmr_G%Ao~iLhgX>eDl1e5YD;Gc z(%!tW?gODC)1pl3aklwRJV$vR0gv(X7i?EsCOq=G4_qFcjL$yc#MG70 z5sEq(ZdHYfSk_bTak*aQ$Jne0Xp;6BZhI}{O#SbIr3-05mOS>IdBS2@8)NH(-qPn) zf6bN@7rA1!5un6Wg-F>J9U)zUO6zeV8g1^hndV@1FR}5>wrNccF*M`0Y$>m+gW=%; zZ)gi*SvGct*0dILRu=?i(4~u=0%Jj|`!T-r4BKo)G$76O&7aRZY{5JdYqN8J$y^E{ z3;JV#koe}ww>)yMw?!JSN56lKy7t=EDpd;bx8WX=Ph;fx<|y<;Hv9Lt_Z_!8_n#4G zkz3z=74`@(onTf?L8^gv>v}B=)VbVedV9_x+B&2JFQIF@&Mnvd&Sd*uXPTO#PqNN^ zD5rLDBd$yDws4mGC9gzPl?ki;_vK$K_g-F$EH`ppS@@)pdcpS0vgtpzXPo{ zBVIwcuNbtnK`B+uLK+K4m_t&;2j55*32w>msy+4hGCZ~**6~utfUb(nK}K=AHCug|`>= zZL?idY_!2Gt>feBuSaH8T0A6oml!E;06;ez6M^X0Np{szdi!bB&OnHX*qCwK}kS4ZIgvlnvS%6 zi!X;7=kzieFbz7d@uJ+lN7w8d3J%Mn0X>D9wT?C0ABA&C^%JIol8z~!@mHcLSnVnP z^k}D@1>fcMXi~lq_E`Ab_!vn3tZ6je*=w6uz74+-KK&1zFYqx9URSNIV1UkZL@%*- zMU|Genv76eki=#{gONiPRG$8SP^7>kogjS#Vj$c9Z+qEpmV%fxxff2E{Y&5PKMutW z+bU^yo1IWghI|KiLhTeKR$T=LBW8u9^-F->HWOXWXRTDg)hNQ9fbx}s_R3_mop+AG znH=Dqm&3n_7R@zbSntE6xOPaaIrm~}&F2KLbQ7fyja-*(;PR{m^itf>8q%FTZGU`F z+ol8NRh8Hy$Zdi&PwF@(ZCIC?;TIBT8YWv4U12(Q+Tp2;M#^Klx<%dC&l_zMoQwgK zoe62g`DnjW;9ls$kH)BU0cX1@@%YxPBTmP__a2kz?S7O1aIu{uQiEt>b-sw;xhzZC z`|fK5q^`Z{x^9*QLzpFud-0~?e=l$?YU+vlK*kz zJ2{zms^@MuCXg?o&Os9hP(T~~+S^f7K&ckUy&-R9kZeq>F%)g1x&EXaJJ!ok*doD^ zg8sbB@D|gU=yIO^Y45xn@;+0yiQ~zR32N~Y#diI(K;ah@a5`k6dbw_3x6L3QR;>+= zkn{!i1%>h9Dp7nI!={;m2cuypuBeiHA%Kk02n~nlMqEdw(d{pNXG8j6#+8<*S#zg|4~W&QhD@WxMbR)m)zGgfG*-5%=u7n@7E!7 zaFtE|tq8Tx?+dblY%nnYjCe3u@#rR{Ok!E$hcV3a?>4(O<$JE&ri29>o-xHJd5={}O zyACC8G&u*HjgR-5clxF`naM^~US~>VGtE@`0BUV#?9>aixya~tM{I6~wDt5%S6J*% zNQzJ2@p?w}5N#ORhHO2CMPo;Q1Lki*-Vm3^cF4y@ZiSdMQ59YfLI9vgrqgVqjhW%p9PQ4-Ptsn0P6r2DI#bXN;!z?Mq3qN zqb-#ntxe=LV^##4M4pU^57v0K46F=8M{gCrF803TN$|}8^MVFXF};ks&lFQ6am;Sy zL#g^S6$?P1=CGjaFGKNox(09#Fv5=4>(3Bi05f2adl6z_yR5CbdwbaQ5i^*pU+14X z+fE{)y=CNF>r3bc>cU*YhByOc;P>k~3IfLHn%Q-fEqA-pzA$3n#>gRLT6Q6>q$q%f zA4XWwj^f68I9_^egPDAbe8Z%nsH^5fX;~*lbIk7x8N;j@m^J&JUgzZ1#6V61=!uO~ z?_SxbPCtSnn#KK~L|S6~TwHdA-MMm@6|Nd#E{v=JbuZH59XaRChAHRH&D)lV#;+E@%6yFy_&tUiH-!8jM~HEyO3vuc%DWq0&CW#wmYUpz^FIrD zQ~;N*<=;b6uG#19%4v6PM14~arPZ!=j<&S%69d@>7!G1sM^?4MRd62NX-~sh;{>N8 z-r*`5%QoXc$B=oF1k}&6yqTN9NbeT~`8%9>_Y6jIemOJ%x4FEwKYORSI|GcDyMM}V z=mhCjnL)mV5mTe-J{IAG_)VT29J-A9=8}UviUu13*BPSuYl;&LxBr|X9X8Ytysd{L zKFe_g(rlkgS{xTB_{=w_iP_*y{)e%%{A>D)|GtWVlA_Wv6%mo{-cY`PN+}`TB{7hW zks>7}-5}jDx;G@HOEz+J!+^mU1IESg=I_S!xUPGjf8c!1Iq&m)JzrmAtUY_Bx+p4m z7lRuOdA*&pXYs}R@NG{Xp{o2qeA`L!o#VB{agmr(3-XE)VSRj24+wZJif6H@`_Bict zi;+fuV!&ov;Ema1*Nluc=Ze@yW&b*MsvPc(M27RVw-`Bnvucg4`2Kx*<8B5@cjABz zHPESUkYjJ*C_Ly8E*+bJZ6-Tb*EK0fvyhIq8WBd+N;Iyfgj0T|cg_Uwrk&r53ovsK zc-9G@wrLMLW8B6>^tfYZ4m~aOHuMWkwB2((J&*f4Bnb_SMpaTKmJ_1>(XL@caKkZGWAl4)(2kP!pN}R%StuiPEnj6I$`1gf`YmZ)8AD(_w@f|&s-Cjs^ zL{ozgrv&3;px4%pP23ip9EKuPOr@VMIw#pqewNWPvY7+fH5?u_a!utM6+cX76j+U> zKp`%jD~O^*ELce`))5;(1ff|?WBlfo-o9*p4CjD!0rv!~JdQn@*WC;E1x5a~KTd)C zYCm60fi>e^-mSPk$Rb$x1ock*K(?OZsAFbZI=gq4pBW#Ca=Gr0MgoQ&A-0(}C9#W4 zG}3o;Rrw!aQB3FPjvDCqnC6M02Bs<^LC|FBT(4J?1tcO8p<5i+%COt*L7bK10fwQ}q7 zXD5k_`k+ho06HC3ORndchCey~*ip+{*ZCT??~0QZ@XfdvCodZo*4EX3;5&VgNF1fU z!Q8{q4<{tT>1X20e>3P zAAVL&Zo;GLm&uvy8Hs5;*VtsG@*=$)TBDyaXC|&Ots7Hiwv9OmrMl~^LHGX~ee%D- z6oWUUK64%|p0i!bbRD0O%!oZR51;+>eFWCC@yA|vW70MKC8JF>`_=ucmyAwvr`z&I z{3QJKBT2^e)>mRUF&t*pB&h|W+KFr50ik5(2)LXXov~Eluw11iDX8UndwUoGt06EEI4uG9gDusG$xFcYdd0deZ zO8uzVD=4;k_b7;eA5rS^yf5zxD0Ahz()p9V!f(0~Df;fR1oIT>ry9JN_%kPe2EPKi z4Dq|tg9_umZBMtj@LOBmP$E-4pZ&dDu<3p%NRwF2>fmpAGvY`cyYnMXpAqtRBL8kxa7gI~S_aC=V-Aox7d z=@G0*mKgl$D+t~?)f$QJ9{t#9vev2Ko6jn_;Z)#PAx%VsKs5iBEC$d*_kI8eAJ{5MA6JA+ zA7`yH?gX(!7{wPwAB6a2)kaTw4*BuT2(HRzG!ie+KxrqOsSzpMr@zJvbEf<+G~+_w zNdeIYqm?{MYpy-@!O3Uvywo0gyp7l8kVa3yz9=kOj-kh_8bw1%75Pk<6f zMXKL|mf3Xnb!a*Iyp;oP$)(X{c)`zod5#@@IH~BrC>Dy(hz6BQu|=6YuU8$s(me58 zUwUd_PdnD|v2QBelZ-WkXF6waHcJ9-(f{gu)~{35vVC70q5T@o5&0)uA6wgrhAZas z$)O<^27qH6Xjr=DMofmU324z4RBiZ>RWuFkaG~ve+Z)56Z&qIZ5NUd3;D5Js0fu#< zsJvuPHoBZU(eF37&r`G=8sz306x*MS+lMEAJd_qbaEXzi`#`9g)?VtAtrqhMu}!{Q z)_u398nFNDZ-4-yV!+(~^rK{Virn0a>6KlNW)@HTs`#|}pn6#evjiQp+|~QOLYCbF zF(JyRfqNsa8G-5~Z8!6j)KWwNL5s*2krE`+i_?A^(^ekWcck4{c+J!ER0(gtz8!AH zYa!4fiaKT5gTy8wJNct>ep2gCvwxv=f{iD^O{5{9ng*NQyU?|0YfmMZd)NS&M~`DC zm%Ey1Y?pr1sZt#a)CF8DD8z#f`nGd^?+H(_k?{3IiH#ndc0^0M4oiINDNNl;!VB5! zDP)8?Av&V=-kd%kEkCYI|J3kjnt3m!lYX$Xp$we8afigX;~xzHx^?T{TbDf@3UAmI zJ}VuP-ECJt)Z-_-!}+$_bM@i4Y1psmVw_Yj`E)WGW0|LN$t^QW`X}T&I8((InPh7v zS9NEROnxj9pZaIyQ{>r-MHJ~4C$-v8C|T~yNL`CWg_Ec=wr~;Mi9Nmq7oxhqxF@mo zGGD)j$vUxaW@?;6*QY`ix8a*pMqH*Z7Hs}mVxcD?f_a<&LbJoj{E7(14N7O+nC3tC zgH3X`AFT@hsB~qX2oqEJ;75Vv8XQezC0Nb@j^ksybITPxNnZ?Zz4+I_ewE?G zdkkQqDrLZyRyp0nST--kNs~A8!AQl1#Z@v^QVmlo7q;S(LwZuq=q)wl5rQK2Pe3Hb z)0%&7L=S(I0y^&-a%chx){K%fuN3bHV1AGRewK70-Lc&#n%htgXZf)e?)(eN za3=2EV)yTZ)r7@xtP*(D>*f<$5)PIjP8G~?%+}r(zM#Opq=bXNS&V9uQ`EQSE`c-R zJID(S>&G^wMbkVBMv*bITg1Vzgp)s?JZ0@ZIWMj$dGHr3#55=6)6!P-zPV>vn5r=gyMM;K((ulTTTJ!)>eyJZpUv&>g_oc!790CJ}ajlkRAh zj!81eDY)J~>fg~m-EtL|NxKT9AvezUWUP~_t6uyR-g-x%+Mj1 zZYnMC5Evo2My1NS;&%j5>e>B*Wb)Jea?D|f=pq|RT^UXHk&CVpu4HRV| zNgrvQq?XNU`^Ya~>_upvVBCF^Bo=DGOQxRe=~rLGG%lm~fR zd~3*mw(Bwa-*9stf57)?)SUNus-Sev$m^cqt+SFkIzNNxalM3#UXdlDzMxXi-$>KS z9(Mu1?}B3AQKRq1f#kRWzGizp=Zp@SDf{VS@7b>U{G>Q=5QbN@a!{RdRpE`@@AsR(2{D>$yifW zC*X|zW**CyvLtjyl$YTO0o)l6(h!G(bjx!NJQ@sqoW65m0{591KYNyymAOhs$izxr z-mcp*uOcUoquAbaR^2>Z5TQwRbKl7kDLV9E;jO8BNsIP~U|V8A&aO}8!{#=3qMhA2 zN5qQ>+3m3?6(OBvCi>lN5x|z+e*Vf6{=IChMv#-Wao?q`=F5>I(VJpwGOBCbsQqyu z=Hsd!a%vW7pG@t_HW6JlhP@2#3T}sEzXMYLNLt`eA9m~hDYDn9VuqF84Tv) zN;DU6u3G!|Q%6yvPUZVP@3(&WEKX^;4zHXA4KR6v%^?F{DQ6_Aeu=5-O<2&x07}S} zvQYg#O~gDk*QzGWBWzZ4{|Vq~>chTzv}(7Z%uUA@s8pmQQhpce@BQIbNws%$ zN^K?FzL(~NuR%i>&csF3KCn6-3OLYOt)b#!JA6Ia#4o8uZFelMLG zIB%6xl`h&B>kBF7Mj6ev!m_**_61b?cw)#%=Q5wIuyMI6%g&m9`(#6Xg2uJ0385G+ zppDE`0X@2)+7t?ibNBBHNT*B}J|N!TDb9q$N?(mA=9CFV-`u!;<%Y3)idnJXeO!cV zTm(Y#ZV&yXw$M;6yQ+u|Dor%Q*z?UX)=nKc5H|C_&|8|897e(Ew6e!QOhBIo8>!rD;Sf@aF~AbW9Aq%L+MC{2~}rh2r-3 zlnd+prv%}gnU}+x?l+V-EQ=mqGZDpQY-ETYvh5$I?3QF+a{lf(H5NpxdKVrANU&{4 znEYis2i(s4*6&Wikjm3c*Bm&F9_a@L#gUoMu#qb?^Otk(t&-#ATsyYqqa9qAQq#9{ z6kfngOUP0ej%AtIiD*B`f5+9I>-yK^+kY*>LDc8z;#>atmLqb18`JHvn;*2HqU3MK{6YJ82Hzt5(vlC5cdYG@Du^Ul_0bs5ca z1=!7DC)r1XM&5(^Y*J{~JBkBYz1?!zT9)b-w%=%1XC&~z=$mL4eGX_-KkJIiPMqo$ zw{_l?g3Q$*B!k7Esl`ke8w(#SFzq^g8C$L#=uBy=Ido{&@1GOJM{j6NN{VrrQocjw z^XIFrdC>QWR)3fdlfka*q#dY_&;w8@=q={5Hj_vr=bL_D0 zZe(!M8U}v5Z1*Z(lay44Bw<43=vHtJEf~7K2LJ0Fg?v8~zTY)YcRdzLwM_e7#`REe z(lPF|?yxzh@cQIrls}x}7A5xP;!NugATt;*P3|)`pAD5S4kS?5RWd_{1f?P_4|o+r zN!Ol(<_l{lcwDrbi043bXbpN_qe<6wys75Oak<}O)~_YNvW*1Ki7^$R%sd-?URHcC zi}jh1JSxUS+x<*>!SQIJQ@hRwvGl6R%WI zISzxE5m?xjc1Mn{)fug>;e5j>+W~VX892JRWAT?*UE9&Rn*Kv^us(>rvS4raU0aTE zP0RS$4v5U)=};U7&ac^96Ks?_+g7}>LBSgdYzuOoGuTz*4B*8BK7pRjQz%*E=Zt?Ce3l+h6&?Kxi z3ZLGJ`#An2+;`Ll;K{s5?WO`$1<9Q8|noz3A@!Qt=f|ITZ2RsputQjmfT3QgEQ z3_rgu>G<{8#J*~D&ACE*{=Yv6(ZijZ9YRJyifv9vqx*kBOUH4}(naB$9DS4;bO%%9 z0gOG*G9NfZSEdJ@k)Xyrm#IB?eFq)EMaI?!MZi2q#7;_jNJt`|Gv^Z2cS{|7dj|@4k{6jLzf2FG z-Z@U>r3D2eU#dG4m>ZsmWjP(0GPS{&Ub8f83lHQBh zwK5Iy$<1nt%^uj+Ssd#^fy08pLqDNY>SfUSo}Cc=8~>f28Q0K*)_Sfv zcnY(0)%By8;IBh?apY;%dWdRqsdT*o?v!7kzN*|yzN0BZRxGDCo51rHMly@U>^Le)e*g0xR)xxl`R#>u=XQHtO-z&)4hX zToK(C_0sIoXLSFPGN9bFr_KQ@FPl8Aq$T{J|KX3$p6l5$u6JG0x#(MwWeDqe*~dHd zH=n(|dSKaffy<3w5mL-r<)e>>iRvR5ylmcSF71e1v$!|2dvd!SE)_)+5&fyBFZz_B z%X22pg3n$TWVoTl^AxWSZOy5-OKoBAJyF2HJ9Eawu;Xi7jWG0=YP#zXXz}`86GzbT zi2&)F(}eI-R$qVd&Dj(k!OkGeSIY!&%IR)P0^j_}y7N2!tHCVn36=@9s zXNM8m=>7e$-jV*DaW>OiaYstp_7a(#1X^c@G;4dUmEw&oSdz~@U}XK;=PkkLQ#1Z` z`>&e}-)|dSr`&<5ah`TDEu#ZH`|roMzr&@KgDx?$_S!?vtFC`LS+UD6FyS9&r<*F- zE>oOj`p(LN=IQQ`9`XNTr=?E8C}1Ne9Ax{h?2IV_Tql~g#TVr}0M+QB{uR!rGa|um zqD64xE1hhSGo9s!tKUs{rONqDC>bT8J?G3W{5${EadkCL?g`=zlpJs3334dZxNDw? zZ+cmL2TWM1r!zaEp)Ax9VN}BIWTb@C6}Myp5;)TuAKtvkCAQBPiS=MLf2$h>v`E{$ zKh|(usQWPryyC;;IMCQmu{82H?We>vlL}Hw)2j$E`_lw`rER9FwCV2<#sd08y+iO; z`}UJ_HbY$KFU#joUCz%uNCRx~X0e}Gp=xpJ4Z->IHepwbn@xX>!Lgk|F>+I6^(1pTMPvV$%wbu^YG2j)55-#@WZI0=wR$hR# zsq%H#fcR;RE&YCZ33)vgkyyhxBU$~BcaTMA*CiY|GcE1~o+J%@y3>QJ^HvmSChzmM zxvcJKrM=LKAtIRG?irwER1?6G$9S0w@uw=uewg(x*XKo+@L{gf{IWM8O?L_6@algV zZT{l8c>lk7)uBZx8<^guB&=};lF4<h{?LxSKya zdExfC#M|-r-@Z1zP3fXpnj$9|t35JvF=oas*~SEyXz8?Usn?)4qd}eWIUqmT-As~- z#1*O2u@@z7CW=128ccjpu9w87{;w~I>6XJUy+) zF4X7t1Dw0t=9k*74OjR`4&BQ;YFZyUjwpvi7P^+TDR;&V7{uG6RZjGfbBuy~qiai~ z0v|hfw}2$*YV^4M41D=9>+Q~xYzu>jgtE5XZHXrY2lnj*wl#YeU$C{cHA&pna`B0z4kE(C!p!1-bzSRG6 zz6Hs28~=<(q#vF+J6@rc?DIz8IY*xepVVyI{-y5lRDJ332LO#m<=U{k*e|+Q@Q0Xu@F0st!3HrhYCooP_U^)I&R*j<$(08?NXo># zMi=UYqw!S0S?0j@Jp0gwWkWugFtV6@zr&aYkNtn(19kVsA3_9DpF_r#p#^x0#kz6^?N@bd$H$t0iZuUE6V-0*izES8m#(i%v-affd$vnnv!D>BiL#~|^H9la(ah}L(7`uHK#QqT+_*Lnc2b&`h z`$LACNsxY)!qUgcP}>dj5NjWzh~+dyOq+X2qcEk>q;m2kQ-Fri^bc#vk`&3nEB`-dLWG+e!A;% z8=%nC>v_DIrG;>vEG}!{w%Y1zMR@5~PGDY&(VopEPN)5L{}N=JwUq4kMJ$(SCpI`# z5oG)QI?(B3S>zr`ZIwF9ZN7&oz!}(FIK2n?7yOgsZ{vuVnvXU!!a{2DA|PZ2+CE;t zBb+l|-QQ_)Vf24@S^RIBfR@U^k>!(py5X9(hx?T*cKmi?i$UI!1SnbXHL|?oFNHZT z%&X#$alYSe1grv2sCs5h-g|WNhD{?tMMf=aiQ{-1_YedqpJ~QeDBIWx)w}HI9GVnk*~KJvmEk4^75VSdhu($<7{_d zr{@G+ZS9U@n%|U}SlI$2WH=a<4f&GRPp{`nMEd8;&EtRx#|Ba$9q72rdVn0F0iF1^ zUX>!NKd1A$?Lt+Z%k?GYE!c7&REK`nxy`1i1HJCpk-2NN^mgCa&Ctk-Tt~YQT(*xj z%K3Sp5DFZ!E8 zBhAmfVaCOfqdRq$T&H)~?L_wK$y8Su;6cm>o(u4C$l7JNx{bpw?(&)ix-L(=c;vst zHzr*Vo;bS?`)$3vs$0J^;cwll)?o5@_x*MruR>Y5p5;53JwMF>?%?+J&=~J&tGp8p z)?6+(N|~)zhVh`Q*?Qz+Ux^;;go)9ah_Qq?>JX=5pcq} zt#Tj6{cnW1&eQO9nJ!bnz9MZXQ_k@nI?Da@q*yuCO;}C$e3#FZc~oK!v3m6YPdHyj3wERF1dbSnNUR#{X-robKa**`C+I)iF$@|xGZ<}Rc0J5_ z-gxqE%`*>WfYNKoJ+81<;Ku;mjrS}%W?pH@w45b9^bm+n5K>jGS)@y2jY7XX%bS;W zSa|D7y*iaOA@eJnOYD32-hFXY{mAIG@6=L~BZ{5??#GAuY`xddALWj!sH|pT_BX+` z%rq~boNi^&5kyR)wi#$jDuACL#~zZMtTh(-Kp#c&O#QM$^c3$o9J)(XlqA zEa?wM`7m)80Tz6cQ_1<0cL1=3J8FhziQG0?$*5eaXlVlYXm5>mHGnBS63sH$9Q8H$ zhL)*sw|W#F^M?reiOzxehQcI+ykJT45a^dyOKjsa@x-X6v`o}JV;9fl%VA=u?D-V4 zbi{oyMGVrx`Sk}$YLd_I*^WdIFshfqu&TK3nzKArVC02bNvnx@Mr?iJ>f>#3lC^*4 zM6(BjAN5b^BRO=QuKy;1?va2!YInJ<3=9ri4{3(^@HxA+T$c*?dw!(Q&jCCe+a7+O z2fcsNy0SZJ@~)CH`;S*?2Xy;8^6+(iE79LBQ>{4YuZ17WctKzV(asg^{eo$S{D7TS zPnLCq~eyN|ff5+A=AacHiM_i`n~2rv*DNs-FQf+b~zg zeMb9NdKHPf=U+U|OtDF^h#fAi);8c9e13gY=6Y+i`BEVgBLFMB5xmwCFyYQcWgliP zXB>U*yaU$XVZE%Q>==~}c!miLI^BxH9>2?1BS%ULQB9=sbeUGlge{7>9Z~e0W|7@A z=l{p%a=5c99`00nql^!1*SxToaB&iIBe7#rg4O3A>GYy3_xHl0RTNl2DSIE%B2^5g zHnEOI%&2iIgt3%@Qi-nPSO|B?j+V)9i#;yl7#G@d0lhI;lPWH^_$z}D)ncf@g zSD4bpYNAfARUJC&LU$+x(xBIkr(_w%PGawEnxTWrbDo4aws!f#`uG>W#aVzckBsCLv*)q+!sX%ahzoI9!(SS&^V$*N$dCZk$;-Gr5iW?1%Cg8u zlX^UJUf3;yR<)?j(JU?H`9rs$xrnU*n`~1>SJfE%FVkFbg78Uum0JVo@B5)I@8m~i zAJ#mnOZOaQ;jkykwBI90rTdjj65qTivQQlvyo6gm&X#o145W-F22Pa%3-gcE9FW^2 z{!=M1A}UUu zDB!r$;-2oKcFGxbQ$p5{nH)JM=1m8u$v=(K>qM?^2GLj89WVoKTj_-v+)Xm$C=3GQvKECb6!9 z6JQdyh*D4|b;nC97I#MuS7L5O)EFBMPCgFN41xgxTOd2i%+kcYPd-tp^$wBqTONVx zIlBffjwz)E5XYlJk+Xa>d$Q~mcYt|xmXj$k8izEBr}EvhGsyRhFeEx39q>`LhQ!$v z(Kg;*E?J~(4>}i7t<6bI{Xzb1d%+gwFWE@5_L&RuZCjvJ zXPJu8=+^1<^-J{1H(0daGeqy%0*Y|RbZ$S1pbUGJABz^@ab<{8}fN%p-Ia{q9^`5yH3H`q#b0x2+Dp9nymt{1A(c7o|f{vFVJmRDm( zy5@NYv0sF*X{aawRQweyV?Uj5Tpu}QB z>-P3yBYg)GE}qf2MLjPj|DD33b6CiKFE$d{Noh=bRCz@;{T zb8$Mz2mP^7B?o;G9scCA#+dik`G}Kgy9gCwS27aAQKOw@O-P4!a-bxYu zU!8)C43PZ4FCW`}^V9+us4DwX>z~dSvM-qY3NCw7m7pgW@&UNpE`P#anW^&#^yTj< zZa{RXJl0`I=}NaeS<2eTlvitaF)oAOiploxz-&@$?|jNlnQd|YD5q+cX12Sm(BfAQ zr>Js1M+Wz$%~@GdyY;Z^$;4?6_%oo^}AMN9&`L}-5AcfwZt*2AObtvY@oFDNS$AWo?QV8jMxs|>HN-Rs)Z zjQgIO<_7(XNt%m(eQQ$ zXfh|~V7kIBJ&|}>Vp=nfXtz4OC8KLdHC~YO*fi|{C(u6E50LuVE8~bRdf5QYix_HE zbk>`V;h&2X%&FJCMPV3X(W);S_Z+4Awy4dVxMN!0JF^+O&wWgjx0ypXsd>t-+)cJ- z-~3|DMsjWWne%IFkjXBwTGDplIp4n`FQP{9_H7%j>#zxz)tavAR<`G;;6c1phEN&e za|D0Rt`reu+9tw5Gqg+GwvJ)ylahEjRPqA1G6OAA%F_1MHSPoLxcGHJp3PNVXSD>G zJW!Qvsx5CJx)#nM%2_y-$DX%T4H7QOg95%E9!!vkar(G_prs<-)UozgmJtBP%U;uah2xJCs;$#3Cac-uJSm;s4SyUf zTRH*IV&>?xDUC1|WwYqgXXJ%wN5<3ZYK@q}VqC`Y zpx8f_oq(ioSJ||-+TKyuvi_1f8dcwZ{|O=^eZx}n|Mjta{cRbLG74@RZ)=107duUM z*xWW7*w10*dy&6YDbF(_z4ox4V(0b^YGbwC?Y~XwT^{-J-Uho5ypR7*x`pX1wa`)f z(AY?=HowQdY>?gCREGZaeCrdw26y>mjQ}(PIo!~hxx87mmU05fxfU2V(n^bcCmZP@!L1nK}$@>;woXI$5Z{GSmLp#SI1fr zP#ebd=U2;4b8Md)|58IkI>0*KbQH#~pq*a>PEI8;;zVASnO0Hd;za-UE+@xA~8A~fo?jM8}39DSAAwTz7A-DW( zs(vi*orCuH5gQcEzLsCYzSTJadwQZ4@L*1-C!WvV)+fE^sc}lwHo6a8u?n7`G7nmN z!zF$&zYXf~2uuCt-^NdK^&rY9mG22EY%Ioe+B;~Rnpql0iKdW_%%Q`b=-~o^@7qZ) zM$m_Q*FO;A?hnrVN@IP;ZNa<34m1C~vX%d33mTbb7RdKSyo5cJ#wryI!!v(jPSy)+ z{H}wiZEDX+ImF~e)5;%JkN=Zlks`x(j{V7L43Cv-eKfQ%UcJ2_BBX}m1qXj*25ic* z%nepwP{GO`OFu&$H;J8Zplzcho+>^>HCSdGE}?jN5&`li%ibdmMe;K9@flN z*CE4V-xCkI2SoCCdkPRBaj{>q`cov?;2S4RH$X_nFK=JDtF<)X!Qu}0?@QN_00@4g zeWue5&L4cRG!I;T)T|-S;9bPZFr=N~wU9#9q4f^EMRRVPQpgW@9l>7aY>Q(_H8W=> z%aLU7Pk02Gj%!mnVKih719UL^ck|EgdbL{`7DggxjxfL{2dWn;KPvf?((tC1=-~!X(7lW3CDW%0n08ASdUzlGS2~B0R*93@kf#x4kR`T z1Y#Cw+oOzwFWovwk|AM}{qj&w{_2ge?#B*Sxd*~&lbOpH<6RzO18C*vCD23GE7lR~ z0$=$K6Csx}=MT7cDbJ3xW}H%~uH)^021MD+Qccz|j=H?VK8kF6g^zT;B_;WBrZ*1( zo$}W`?D^5cmk&1p`iHGw7@`;XLW1O_hnq5EpAojIh_hTuk5~lP(_dL79!Or@nzMv# z@(;B#UV?8`5E0#}`_~kMi`*!LrGJpe_|$eJN3_#p)Ye1>tbZCh<0pA8t|}eW;dYn( zbB9UijfMAiUHq-a*T&jjGdt$!$qQHSt$#;dPG8y5y)vdyvLqfyCoPEP|}vYJRNBZ+u_Xd*<_(AO@w?o_ENCmAV|+ zz!UY)lLOZ(M&VgCV<395xv(x{KREeDU~a z#_W68KPlgwe4Z~gF5VJw}aom8J`%TF{? zsoj3uZ0fD~2YZv5l)J)??;3;(`rXE4x%2BZjbQYWo`uQps9tP#@&)}dZ2pW-k2#uI z)f};bSIEgMR5LCXqjYMAS_WS%tGq!S4^2I%K6`yJa(=p?7ElMZiHS~AY}&hndwe1L zucfXhC+DIg%KMjEK*_nzFZAx?i?9V-Ka0&kg{%O^BTTq=7xKr@II!xG@v->TlmB8d zJ|A}tBe*|$D=){o!@bGeijuYih&{#uQ0rN|tAE`wsUL|gcXdonp@8C<)1~*b#i?K0 z0W;euEkso^ZQDmTZhMqDQq=X@^uIafkRUB--f}SolyBbyqWU< z%egA?#9}F3kN}lRx(%8LZsioRIldmaKx(mK08g{s+76RX8Wdy24un>{;7vwmhr@!5 zXKWjZw+3z_m01z@QMXDYue*~Z>_HB?BSTUDPrOspo{XvFdWBMJw*1Yp{iy|+y7SWN zkitzg?^qvUGc#}v5AB@k&b8n%+4o_j-jP|}a+rIG?h4cXCN+(@EWGky{LoQ#s0Ri1 zkiaxMS=|q*_OmugaCQSu=0ZEC-iID~#A(buENup-5{GG`%$IGOX zk9~j4^%%Y2KP|jf13VbcDruKjd3VZgj3@PMaXjVikx0Bq+|#Yw)WU(PYK0EeiE=#U zc7LutDgx!&6*KZ+@Fy0F-7y zdn<1pda4-^e9XA*TK!m$HOd6+7AU)AH_ZOz+9bh~ZZqXUb&$5^@>HD6knRLwq|c;W z0#-na6wY%RbzA>I#e?wLB8lO(?H;CP;MO1%hDbi{%hgacqKnD-uMs+`1bEy0i+*my z8$K!ZdACgrnr{t;}X~&7S8gQ9Boxqs+0EKdy){s zM*5U;u3@}QgjML8Ym;18xw+@ati;pc_|ol4(M+_aGia-Q)a71SdE>nMGd!*=UW$}2 zB@P^8Bq_Gz^6vu^e3*OUa^rW3?RXR7XX!CvV6ob4u6V|kr3ENIk3ac%s{YiwKm|0l zv|Vgi=LC4q&r=GymOaX1ei4Jdvz5M+I{Z1rdi%XCEK^#OzU<=TMc<273K`GN z0}`s}`~t^&aa$qDa_c6l6Z^4DL4~1bb1$spuP@8W?5#TPpU>&4#Q-XiFNVljl(+p? zm}fF6{OMq-VVUv3YNagu^qQ(WtyA#dqq;06MBEcLy=rf^1v<5t^3M*d12?k@M-GTs zs35!0b^D^u8a`yWchd9+o8|Y^-+nPwmMY6C5mz6(IOK2M8-(qL5of{c16lPan~sg0 z2p>u%6-PaZ*B(jm zz(M+x6q&(~^#JZm7Wf>=E?2=#JP-CR)ngu|q6k@uZhpdMkas~TYG(WQMYb#1NtA`z zsEUEhllff~=Nc(e*-DP&mf|a2-bo!tX5j%SfO;A2nfCQ_+$6ccZs)m%dt|Do(HqndEz{$Ujb6#*5cLq(ZLgLJ8=s7T3A z0ci$Igi)i$N=2kWL_lgNY0#Y;$mniH4XKeEFk)lDpZ__}dCv3hx$jrk>+3q#i|_Z7 zA9l?uq_V^o=!Jvp$1;>kH=c%eV3xz$;~db8UbGsR>mfj25?E(hQHIf zc{dKBd&B*NjzG7hSuyW;aC>4`TBRv+YICxBAXC~_Nwv}FsNRyh?f1ZXS*UL~X%NQ! z*>TN(!gSzq9>+qRTs+v;bxtBu`$XV5y0108l6wIXipnBPi}a;>`n$qxfobM}AN4-$ ztBX_C78(Vjf=6ddC$qDQp0N#Eo4@xzXIPl8EbH&hz4x!HT&{Bh5^pyz?|3v&#m#7{tGCP44MXs$=Y_YRdY@q8+QcHi{!p~o>OKQaRz*~mAFkiF2me5)XA?G0pH4@h|v zkbyePX@}h+(MC2idJO*>nt; zcxV(>ljSys%}mS>N{uzfZnTzFU^(?S7O#>lk1LbeTTI6Eyo*6$^Rk$XA1Gw8*G0PsM#CO@!+|R&OEYt1@5j%gRdFJF!+m~~>}fle(~P%$MgZR=->0QKqtOS{ zHff>Vz>>5RkDKN_bQo~d_5KRIrRUW5eO%7BRAcA=Ek9_CeQ$=7dUsS-n)x<{^xhI+ zHu0B420mAz{**8t*KjglKA2uWKRlY{TQ%C%dhhb(a{Wh7i}GRzP({jw@a_o% zx2|Y>yhWjdDeb?N!+Tc+=>iUx*SH;x=e1W;vqV~ojW?XYO=X0+;!XlWD8mG&;3fI~9J4GOO9hJf~ywT;`jQnwgM zM~a%!{38it$qgo9izD-Qnh*S%)5aSI!#loN;L!7`Xw_3u;En)Dg3ZvuCqXyV^qYOt zi9oXHPx~d|V$M@j0II*#H>k)cqtqp#}Xs4l9__coJPqj2{hDSEq>yh*yJ8%dj zAH1-wK1=>DH7(UM(H=L*fh`K42C^nh2y!@p@}}jmP&oYjzQbPh%*J31dj;TnM$=CP z#ct8sTS4Tj)NEOUf)(qFJb}mKa>Y7Ht?IVsOfqMH|KR}@4a2G39wucficFrYawk8F z4p?(DAUR3_HuQ6JZGKsVa)|zOkDd--cZF5j>nVU59M|6v>dCL0?$k~+0Ohgk<@Ob$ zh0JrsQFW+|1mB}r{r0hj&8=ay(Sp^0I5fwl19xH^>N?4ROX970Fm~?{J6rd$1$2<| zNk;^(o18eZ5P8gPl$v71i$+`qD4Fk!idD3R7?{UU9@qev^3a}vBU2gp-@z} z=LaD~A_fg7ziQvs^0y0Y5x<~_3O$ZPF|ian_^w=Hq2H^By^25k5c7Z;s=EQ$Vv3%K zZMwPAd}T{iEQ$PGPy125L}DQHNO{cVEENf2BfQPp(z@WRoDsIq8*}T=g+@4gLxCh! zp1P{lUbjSjaPV*7<&NA2D@T^@#1SFyVUNkh1zhJRLw!t$bv4A2gYjKHm29PPR~BPr2=yj^d&>EFZuhuQhOFNz;jm!G0_%`~NwU9^IZ&iLPfgF^$lh~hnepEz z2wGk5ohfSF5f1L(1;XPRgrfD%9+AB`kJ_VL=U((VAe+`dm;EJ*_y<&vTv@FQL~z1yzDz{&L=J>}zsYuccWcPm zb!X*w?7Plu!vonLT<>{<;edB%Px6oBY%Rc_*|j9X?}k75tDj?Fy7K+@-N*+Vrz^}Z z^76d`H$CFLXeG@5k(+aU=LiF#zj_H$s;f8t!hK|pwSO`Cvif%)jZ{n2T|<+zjDpWn zQ`@HUQFxI}EYr?gL&QA8xMluUzGLp-QtW0LJbQk&G2k zL9?BqO)5L9D&gR-Wp3l#m6#|D2Wui%MJDn{`WRmz(UbE?7l6Yuw9E8(1M84aG% ztsa+}r_K#l&$J6mS#F`>SN+9&Nao5rm30gIG=giny49BY_rk!3 zyDK+|e}U*guKI^-YJUeW!ATi7>mfGl6~{2#zT}W1%h%6t}LVULGG<>(?IueKwYeW-IClF%$n*#|gaS z4eS4&;T_doHTjIn?)#p#8O)yfIW|*m4?NJJEm7sMoX$~9Z;uNAJsNXbey8%r{k?ec zO@H${9`mhknLQF!Mk~V?^+Gj4jf=%cS0MihX`jTNa@H zt8p-RD(^!0Np_y?I1YI>o=q{k45fOJJ!foHijml7rs-E?1}np4KIr0J?nZe#o9YGJ ztz~YO*Qh0i2LruO*ywUO`_25QrMOJ;{=qwCDc^VF=Yzcv8Tf!Ko#7(+upYL)*pAF$ zV1cmtKSbR<-GNxv11)vvRB)NSEt~aKeAu7*8^s5{!5pmHK8_>Fn~$u$F0s|?;C+8n zX$JLVaOpuZtoWz2@*Jyx4kl+5>(n^mV)N<^{^1TITv>Pd2X= z&eA_iw+?r1-{7|N!g#F#ynNzG&Tit^!?k-3)mm*gk+Fw=W^_6ueiZxcr5~oMYG;=C zt1TPV9w-EAQ*6C$XJzY#$5w_n(mM3d8Q=a|d&u&3+Zvr)Jjc=H(J0EQ8^QCtx?Jg*1bzn&CT>z6MNkc!%l8JnF>r~ zFTSDy){Qk5c{i~;%iTTxn$*Z|qk%P;a!V8Fjwp23XnTP_I=5|NK4b92E`dW}`U7cl zW4(bmObCHowE4aHMKyl%jFPrRRDJmik3B?z!j2(SJhP|j#AHJjU~=JwczvyCu2|c| zr}C_ZwB+htM-fg0WKB;?k+rGNcNN$$7=Z~{g7)gE>!Ts#4s8nozf@<&Pfj-+>^X}@ z^Jm5>CcGN4Hkzf;%D`)u0zS4?j|z@l(!LPWx)WyUw5f_S`af1lfF@dH)W~4V=v1>2 zg?Aep_`|j@I1zc!mO==D0WEi1Uk}JKBZ{G5LEWgS)W4n|*xD}K5?wh9oSWR8mm{`- z($plDOC1jR7g7R>d%f15rb*UhZYbNlQ!Nq)YJMThWcCCus~}FzJDAC}WSP{Ci$h8# zs%|V3v=0O(9wmdcMiTqaVIc0}5Co@p-{-K?>+!o97x=?j%!fy*(RzsjK!NBy1&R11bg zMebI{nm%{?m2apH=}w8sj^BUP2FTRMVUP{YQZ8N|L*($k)IJvE!H>}uZ0tM88-=&G52dTTSL8EYzyK{RRp^a9Kfp*z7KCSOr?Veom{#Q~_Y$LBh-k zeMMv6curw{IpHb36t~0Zx%+9Vy6!)A)WX#!0VVgnFGR^00FHi9CK(vtXa)`~a_29H zJ<;5U%YU3@=DX*8;V$)|Sjd!+jg3|ZWJ3j>9q84&5gFW9p#1*TebN5+M%4;1N+Vnk z4r`f!73x_WxlVdRYHwD(Bgf7s8%Q2*O)V$%Yk88j7njS^t=*>!7FEv1mU3B)*Qv@) z=h;W=tiOs~7+k(1+nc2Y&aB$pfMl<5!j1`D(v02C)(e~JDx;L z>Z`bxz*q_5R%0Hx3*KX_0|5bCt zt6F6s;!>H*N1I**`O+hAFw42KWoRz6EYMf$-TIbaseeJ?bxjH^7QyK;>kiyW-+O5o zg8jSVx&BHk-{G=I>>^Bsh=5aJE^Y?>{HFE=FC`&)@T0!+7>r;B>zxwkadRW}iH6B} zf6CO&{+M026beYu(rVHkewC@SiK9k59*0!HRpk|D{T)qaMJs$H2};#&oZ z`puuCY}+bQjdfm~zH;)>y-TM5e}{wWsnsC;!c%Hl5x|#<$SW+LK!P9lLg&; z(9JyT$La{ttb#@Ulg0WMARjN54}PLz^Wsa|&r~YP zG!WY2-w&Uld{6kM2U<4meX05liB{{(cfFW6aUh=Y{6qG9KnhQ&%AWR&+AH~Dnn%&` zkPs-QSvPm}uZ?SYk}_J~Uh*JJTJ3SN1xBj$PI~?<1B1e8JMk`X#mZ{8e2syQDN=(X zLKf1+`z)5VZIghwwqOX=;;P-5qdggQ3*-~gQ4cuR?weh!+vQSWK#x2NW^Bb`VBaK^CR>pZDwXYHvuueoan%AexNmHn3L zjr!Mn9QWZe0H?@fY< zZ!I>Z?tBrC7~yv*BAG7E`~)4%l!MdsnCDu`A8A}tj@}#WO&x0)z3N_)*EkoULf4*h zfIFh*A+xZ*iA-DGddIam+91EXPS>Pko;G?B-UT~)M|l+I)r=8#;Z48qJ~<#~h4=!I z>2iD#^jqVrjvHU&&1V2+d!HNyNIVhYqV=;ij*PJ_fMpoEJw2tfL7+DkuYa{-V@J@ zSsD&EGtH9Ryn+6r+_6#x!m-+n(rI93DEdPz8y?|7w0_b$5JzEDFy8L2{cf%4rlq8i5>olMj*T`Yj-o|ZSMr;Lv05jGynq^Ed+2gFmVigv-C7tn?R?H%iSAP z<%_`?MmxD<2WeCG*FF(wFTK+xsbEPHTUT`KX zg^y2wjc)iqj)}T;9cCw7w_)JX-PR;}d*Z@7Hg?8|@WS=`w*E!lpwm8!I@Tb=c6`8M zxiMF9d%cmUt$^b63rJ)3%|dWG^zA$j?s>doc;rvRc%7U8H|+;q5u$CM?fQve z0ZRgD5h=OvwBWJ{8*|nBpwfnt+Bep{4@0h#j06{zn*+zUZHCc5rF%4R?Ga#kPTN6t zJI;87-~5e;tEbz~?DTIbd}S|AI?NsN#ZDWFdr}$jr_EQ#K1_@wK&j|AqTTkARXZ|N zkur0hr_6iyZ{Vogx;8p}Nf8?D;*5NjeH=rjQBZ3vff*Scy5+W2n$do9mdAM3u|lP~ zbZ#LKcKgWZbJFWsp=GlAecHJ}hLB~B{AK)pgOHPP~qK${ny^u(nI6H>+yH~wv*WGa?Wj+FLpl9v9l)9gTxPC6ng*GBGl;5w_b807vsJ+%0FZzrqM$IAO+Xaj{5s+MJ_+&$0na}aCDc4_g0zzV}n-2d^LRk>JJdllH765mgm;Kk~>p*_-k zVgWd?wGhzX*l|%#%H-EpK*_r81y;)2e5>LN1a785`=8~Uw~BmFX;JldT!qh;DDH)0 z%hX5JSHez)XS6;zuX~D_n+)?W$^XJ?weNUUyG*7}uN@7ChsuW@8@V6YCY8}fpG`OrN)^C^WTIq3LLVmd(g@VnEE?OX zvHNI150M6>OIq8$B)<<}y6$A@oveLcAA!KR{Lr~1U9A(;KYIu9bM}3)W5I&I|K@LB zGV_K!rE5|eUYm1Ydn0x1@4@TtMuV!WkGb2_TQt5@)i$sxXgnT@HirM#_a5>LDLlR0 zjk3z6!PgKE(wVGPl07US@%b0SH&(mNv*lSL<(hDZ`qwGTjm2)TqF>XmNVeCtzbCne ztcH(UvZyN%qk6-}D@6sn-a8a;JsZMDpGCRqZyI&V77aWL%uUKzU)AJfqo}l_jz&nS z+pgdl`jCn|TYeiCSU+Bow!Gs?b?dSbYB&Ylg+YTIHtea1t6}Wv2CYQ^y4*IC0tR(a zg1iFWcDTu*UXcn_W#h{99z=icC;fNOS$cF1^eK_AlgUHxmmUp^%tqWpTue_*rrcx>fllD zV?@FgU$hu_3?I@PwNkr?Pcc)vzffRIT$A#-lobBZ_|X@hgLYLk9gyug^gr&kzv^C7 zv2)cTNtd2Q?Mt?O(i3q>vuor1ZRLH%>k4zOMJvf-qdHb^gnA#*L$tQ0F#N!brje9@ z2B6ETWwFd_jeCmAi7qg1AC}_SApTdmDY`ltc2Rzc?^)(07igvKOkOn|=9~_`4xYr| zNr-esU3pB0-9Ka#EoN83G;<8E*;P0ZvKQJ6YdX0>D|@nfTM+b7V&Ra@mxua%CbW9> z`^OcYYsDWxuQm_lDx&20Yuk6a9}%~E&{>pU$Gw?Ih`k+Dwq`rn1)j9o|Dz*x&IDx+ zpbk$C6ko|S6@Z2=>IB6D7r!lPnFcJTZ+4#{)Ni$aLUqLQ>@XKmK>UnC$+Ira<)?nT z8&_5v+_#LJh%KXp2Q0toUY%P(-$`!nvKs?PcUOrv7F^C^F>HA zJX_YjZEU;o?px+%^B54)1CbUu?010bu2C?#`C?DRvBXOXpo8zf%;UzE(ojj>7Ax=I zz-Ut+^HQcv9?LA(GUXhJW~Mwv;+L+#Tk}zCm*tUh)tH%awQpr+v9fBJgPB0VT_Ph} z%tP4gPO4XDVp1=0z&pKXU+ZUlb8mZ@Sw*@vw?uv+cFXA3GHffR_@LlkV zA85bd*D5`}Qv`s2p=4G<*=@vGGAZ)azk;m%h&u~PVeIZ>-5M$NF_t%G1=9olL15jD zzw2&<6{1&dwY_tt#Rg{bzoOnSSg5|10u4<1QvsiysbyTk^)jT#KB}&&Cx52orgp16 zp;hjwUezwm$uD}t@@iAxi^QVCEZs> z83`}XL4J~UHk@kec;~gzp9#@W(2X6w!roa~@`{PbN}2AKr`=qL^G=JRnwrRhP03%+ z_#$6*i8%960;{;%=Y z0c$#0!?$kdM>AzN0!|L)Ebt;S*{hoe%ZhY}zmzvuaOc9~kGRlQzhT^)&gO~|rT^Km zMZ5L7!b6xU_Ap_CZA)2QN-Mt!Imb&h(%V+22*tdFBfy&l>tt@Z2(#*z+ezEIC6v+o zyi3u>8&coY>EHYxSbXK#-Kn3Z3qjd~U>6CHv=)YVkzAnY=T`%-o(raU37fzaQ6pYL zHdn)Vb~#a^l=F~&9dT7OJ{+1ZzkZX0XSj+#fY(-6jF#%{?_DI{521I?>3`#;nQ~BN zZ@$VMa-t>%aoIc*V;@N<&DFLiZSs+tXjNnsYJM@u-7#|_dxkB`+8eF9H1}z*Q%hjH zW8vYN(8OwPc*N|>F2%9P$~QVyZ4;@>|v4D1Z=25D=&yRNlu2%Vw6!iABQuS38kU{Tgdru?e-OCT zpKPgPRvbioXlu&hUm#QX-mV?j=Dt@s8V}VOogofM2I=)f^rrz%a@g3tEc-2L-h?iy zWV~YZ;vY%%rO3>$2&LL6Kp6Dz7{lI6Uq4>7ZX(Hb%B_s7lh#=6J8Byq*HFkLBCs)& zWycuOFZq71M>o*+`iux_-NUt;5n#ahNQ?wew&!cx#|Y#J)~@!t{Ny8(V7?#dqK)3{#VR33pEssB`b+hIn)Q zZt;!4+kTHPAWfW}dI;uC&;4trAPGpnvKn?5yZQNY^B<|$;(oP0mud=zQ=+(7Cy?c# zwF2wQSZfo3v|M$~jf8s&I$g&`P{|y)@mP@DDeMa;L!XcW;;lH3dXJVSElugaJ{?&NR39 z*&njJF|n167EasGb$je}r+g#Sk^JfEa}V*Cg2kn`ql}BthHc~hdNG|5JmvH?LHCN(9+u=y79-CPagJ zo;k{r?VQyqmS?p8t-T3L@D3w>RLKS!)zM2BoRPnbu#MgE4IFJQP9h@S62hHEOwWevY~24_X%}{Lpz@* zPEOT$RF}yRBNv~@JS3{=0%YOvY~JfzJVyYJ!|tIl4>|F9&pTq|rR34844E%_zqald z_LA#4s#4vgxsv{+WnQJ}iDI60m24UU5{{-71DLbnDe&Tvm&b%ObA+I`kmz!R*LqfP zn^`SrU2ajuLIYFa96@j12Bfp+H(Td=eu|xA*0VK0?e`d$ZV43WHVP(R0Q&(|244pM zZjJfsR0$GFK<9gQBvHm5;Uhs{P?x{gOTg3BuZ8C<7tvd_bxa!vb zDX)I`CnqKQTKzt=(>ySy)(*z8kOWPuLS-GNk1o zUDk0*~mF<$c;k5@4ena z+p}k^qRF;o0dB=Ajku5U@UE6KiXhpR%n7h^=1ZZw4D^qItBd=aIQ80TNak00vI-^0 zp^a+-nR?RX{A83!dm>AzrW6Ml&h#vp1wd778lUv}XkW8vu#e>eef#&s1?yf$hMIVt z=jL_>gdCzL@EG-eh=+HLCbvhRe~1XALG>u@SM>(Wv$x=k+CCCJ-N;7N7>V+if*;o% z%g&WHniW}Nj=?qKecN}6nYJ0$%Hm)Sucfu^9&@m?A>ln1sHG_??yXd6@t@e4u=e$w z9vg14J4kvdVw@)5{Ysz2Wx)|=N`5xmYxCJn+`Zl&F4z7nD2CP6$z<1lcmTs(AZUri zPQ9kP*5Rj2am_(3Zhzg+$~kz8io7UKG;h!xT3M%u;`=8iv9)a+6mMv2g|yg)3L;|q zzTO$Ng|wLBbJ1)L&mVT(uF2Jui4${i6`TutW2##r-Q0Sn&vd$9dlLZ%<@=86%$e$7 z8w2NFt|TM76$#q>#`Y581klk0X$v90<2-%vzpj^G`t{>2YPUiZv&(C%N{$NqN+63bi>RM1h!cle77Y1L zO5iQ5OcfY<-qN7lC`8l1L*?r1CG7|T+fg5EQLw>SSIGMdu;-{etGGA~QH*lE9`^>; z+Ve=jODRZsUziTGAV;r&RyWvj(6JyMGXT`{TdYu6FwXyNjYe*b21+8Kx;I zU)({fP|qj$!nEjzG(lc_8-$czHUHa%54p`;gDfG7U0tb_X9kk`#5hEryi!GN~FKV-6PT$h*+@|3+ zX^FRdt1PF54D2N`OEplkaesj3e~m7l-*dKrE@^kXRJIWRyx_hxJ30qab7wN1O=>RV z+sIaTg+L}c9#&N^xGYdY{t#)=*qC9lPk+{(#*D%B(|Jpnp`9IpDd{}2;~rPu8yrvq zfVha&SMb8KJKB1*bI8b5dCp>dOW`d_Ohbs?@Z_GOkXd)Hi;{1-u28pEWFpKUw;)WK zzdn#_haOdnd^A{LAEeii7DRbleF`}p26fVDIjk~ zB<~}O^0wc!zAoBLPf|B2B#^x3>-nC^@)&oV*`gBPQe+=k(pOJZ-=)@Mh}#*i14rH% z^ho%Eqd~FzJG>OCv)nls(SFNV4rQ!A;?8oe1oyY`XedhW^KNDBe`U!0=AVNVM(u*c zhe$zTj=l+i^pfdfV7M^tdz1>KzNewo4_>f&&V=B+W0pc`r27m@u z5M&+Qjo&@;yjXgDoXVQB*h<%sk_P`soX^bjmf8UuNRW>oS11y?ELBPSvui$QP$ze# zBinSvASCd7H9KWRu_xrYB(?+YUR`>w0aP>jF=##{9bo;+L6lx;XEpaaWDKJP;Thk! zc_j-Ie$F68fxM{9=L_-Gx?#6%`)$g#K8hWXlBGCtO2l={cFvsjER^#qd@;Mei2Tob9gn*t-!QA&7jBkJOB>D0m|;!z z3SqvMB9EC6G}HiVoN1FbzC!zRxSKwucZ()0C7V#oOy>ocoXX^6qz~egf%{DB12qD0 zd5hMsLS7DTEer*V9#$m%rC;Jlo>AYP4bwQZ2%js17 zOK=}*EXPsh+i1$Fb+yyc>ZCt8@M06@X+=oCgzLW{?uy2oq93-Pd^=uXMjPCzM*yKc z=G)Y?hA;X*WTUjg(1CRt9OX z)3zjUBcnO^eu=6N4<)022QJmWt3Q(95(QhfsM&d^JQ$d6QGL;8!UKTNoE*&JRlkVi zE|^j3Z!$Y!96}EKoc42Q$2wy0WIx&(C*rMrY}7z|RC}SLIDbGixoJ+h%@y?qj@5O+ z5j(p8&wZ!mCj0r;uO*}()6r5tViqI4lHmFpmt@*@^tOg>j9n}p_*Dl2ZW^^a-K+ms ze^Gh*5%*2=DyXZjYSI#?lWZvYk+f|46XcB|9vgPAgoPg+jW<9=H73LXeIFbnCFsgC zr6e`?7QV-r&D4pn(gQ3x{|Y|cFyGWvJzc`zMuoO|;=KPr0$|LGg!HNx51#SU*t`1kg7Y0^=p z=2cxtxn|U~Hk`fsKdLO6^-01Hv!|+x6SxDYezE8<9iG4$0$Whv;p|ei;rc=HZGeQ| za?f0`4x@G`XD{lo0kUw7%aP(I^2f9W_)yhEjv&2ioo6Ob(`A92y@D(Qy}MXG@K{&6 zjWe>Y(BIil?jUvI(pY%`gysjz)VgGl{J_phHrQkC%v%#lz^(q*oLEbpq$4kl$1|gD z;1lVxx3gDcqW~q*o{AEOm3*8psyD_rzL{b~7aofB zcNGU0lbmgPDN`2cJ&&6f@W$j%;{}D3pht!12e4vZuwQ>t^aVUBkDOzM@r*fVu?rnl28hFxzzoN`D3Au7m}om>-7(O***>dlzkc2s8NHy}3%a z!u)&g+O95v^A7uNgBKR!^|KTI^qn9KrrDb7$$!Lr9rhk9@lt`@j|0@FX~_m(JF69c zVwzLKBdyHsZF{GaS4yOyUe6)K1$^oH21@_7kj6xm6`F*POcdg%feoB1@*MzXBW z3rMeWMXO~V<&xnS*X|UHuoKu3p&@v*J``*yDZa(<&Rt6Oo!O0Uszs9DYAs%Ckvn40CP+8V$36E_5m!NNfdqDfep|>Jz0F< zeDv{<`qulx=KN~<6XtX8AIL>!W%?eF#`*4_(?rGSM_4w5=j^pj{FBujxhX!PMn&bn zmi>MBw-`b-Fnb>pXHW%63YFAs9v+5Dx9LZ4!vEeENVv$f_!?;WkR>RcJ0m>ZTpUuG zz#LHB_}p-IO$V)Ua%qpd0(%e{FWff?x&yBX8+~ZcCxZH;*>bf-?~!(|`ek}1iMLxFc>VdM$!8hBF-LXp9;0@zlm55#^83FB2#7AK_dcVX5$1NT3Wk|~&oh0Sh z2i{ERg zYnGgu2Ex0?V3Xmo!bQ2J*T{upc}Si$Grc^(0f)SNJg_CXV_PK2p1?2InE4QS6y#kz zQBhG1mnqdT6&;N>gluw~3`=h6j4IN{?Qt zZ9NC{St3i}(}Q^s{qi>y^PHC2ZG}w9E3d60{l&ga0=i)8M=21`-$rM=%WdB6{SXAs zbP(o;%j7O;aP~~MU7WA!4IaatrT3I9DbN2r(X zBDcTJaxX=WZU;TWZzLTH{dn7(vy7`D6QG4=(PC_LQwC=9;*kZ z!)ipmW<@g1uq2NytA@I7Y4Y7s^0SkQy8{aI)%H<)n}7@sMUF^HHnrVypc4bW2-qdw z9%s+v;(2s|aFNKn2bd*$+Z~>mUyf026FZgKYUOzf%fGE4+8$h`MELgLGw7!ly>cbU z)f}`E|6JBYgM(@GEet$g@5aW~cP+`p?s}CGQmXBNFG&d);b9}RplaC%Xc{SCG+{u!AmYI>EY3Si6<2Ckj$>!d2 zqE*YpL}9PV?B*gyih|s%`>`~8+DXK?%{OiM+qmuDZRv}gTdZ$-$pa|j15rV-YAs-Z zCt>BG?k5%0C9TyCZP$WJYac?+*l3})7?*#(`b<`P`dn~KCz&B0`MDl&8q4&IC`{Qf z4AdUWXr1hIl_me^kPA1sbIp^be_L7ncW>BiL4o|c$%5n}dxI&U-TMSwqYCAmiP|JR zZ+_htSHj+>D0z5saXddaI?lLf1pe~&u7g+F#NE6Tu&S6&jM&k6g8w#dX$yr_TQzMn z!rPsA*=S*zKI~IqCR+wb(7+4pft?KW_@oMJgPa=JZ1^$)bd0=Rn+Yiszlf#W66?|= z+8e;ZZz1ly>5}Nr$L(6MiI%X{p2-YJE|h$at6!dN8g1W|CATha?I~qY>Gs4!(W2qS z{8jb9sW$GRzz}^|*X6hL7s~9wGP3#>{-!(B)2V24+F?qylDtPwCrGrcu3*_P>PgQr ztxUNwA7e>F#TL4Ak-;;<6?zri;jux2!J_+irc>;gD|u_Jag}rw(COYH?anD7N@H}+ zOHZIHCweg`BvKffY#|nIygNf`wM^$@0W`#lba`gqXW-eI&3^A)>Z(H z`Ds&2`7MbrM}Jg96`yWd&6Xd?h@K&30ZwPGl6=B8%R_rX67o)u8??`lX#{d1n-Z>8 zSyPN8k!d!fzqT~8lqZ3nsCkp{BbI!+^{s=Pmi!8nKyj|Vkh^O?ExIWZx#HuAOJ@&n zE-I95KcgfRmuIHNT6?zt?KmHwe^>qAlKWNvJ3E(49}xWqS!L#WC-NucCof_}TxL9` z%oF*l|5RS&*ZPFcBD70LAM?(vAHbU6bRyQ|A*04E?R(6DMAiC-spb944Rw{V?g&Y; z&n^%z)xzdt@+yrrTDg9MM;-LAIj$yents8lOYTYvl*d2w9+PYK)S&!TR>?^Jz@>Dz zz_Xu(+4mo4Z-v_!c>Lgv&RwcU!*{Ydhgf+88)R1rto6R&FGvA8K<^6Pc*D;xi%0wQ zCR(p-ycYFytZ2D(>5kP)^&x3q?S60_y*=a4`J*wYL%_4G6%$6)n7B**=+?#5uM|3g z%1z40C_!k@b$)GmYcUc5uz)oO+~evFa!TI#NKh`LelA-_ zjPIyG!ztRO_CVL$6wINU#cZNcF1a0-CF^A(h+_<}`VVSfUfI8w50Fpn@ZXGaEu;VD z7LBd_-v*_ui0I~ax`<{lc0yN_H{R(^a6298#|NI(dkUdNu0YM{k(;J#rHosBL9Qb3 z_i@!%A1jIe*;j7RtIEKtsL&1j%MileQQ-{{BlwfT&p9~;-%+eg$U}1+W7A(W9(K8- z=DBeZe86ViYQ%W{U#;LO$9^9@^iz*lWBji{=3<30!mF>o%Et^@guu?&I3#DG{IfbV zW->fqz95oUQaNG!P4T27VsQbre!NY#q$fmsB)XFP2sAOH)O~$h)o?XFPy54|FiN0Ed^ZZUM@G{5wuxnE#B2$&x zkmvR!9m5Tmyj2b}Uj6jggIh1`8vO;kcJ14hsmp~3kHa00qmVyC2~l4ECQi641V$zO zU7b4tKgIlnjy<+hxV!La7Y$S^2c~}QPmyfV;VVjKXHEWTEc0sTHw@uFZ&EOLP>dfK z8-wgqsL1XxqdtY)Q;zF_iTi2 zV5S9@?q%zNKs^1huR4>#&wUH#pH*z=Y@jto0XDSx@B-8}Rx4%v6xR0YTkV=&&M#H| zJ6R&q8- znpUCK?cy{ain7YXdqxHz#{7>+J1Qk?(GN|6d%8!m|_P)n)`!nV#tt zkybk=*;}L}pWbc=R^EZw6=hU+Xv`vQ-VcVkL--ya&khC1aN1`=e2}AXJp>uk;wtq9 zIe#v$P&%4n0ro5(%%J!qNM|4<6MivX6mVZ(5oa{o8^XP|Qhan(d+JF$MyH6wr+MY! z*uT2XfY=Lh0D5dduZVxP+Cq*-U}I&8xkpW1ktZ@ouU}bIZTehHj zog3;9dQmH0ST{`TTJ|x=>_#LzcvQsjXJG>DRL67!E&G}w|4jR{6YP^1a?v6ZHI7CO z5l08(Kg+$W3K|Q}h}wr%MtTXIYpcw2EXI02J|1bzDcplUKIx|in}OCM_S9w;W4hCy zb+6A1&1IBga6-5kK_p*+{ARCpegOHI?!^qzum<(lX}%GAcUyTJbu2(Y|AZcU(ML3} zY(a9vDMI8%Vf09Hx@Lu2+vcvbJz!i~w)M@O24F_fLW($$o6egdZJ0pS>O!2y^y`BD z9Xz&QPN6AZHWeQ;tDWyI9m~=4g0(csXLUbtE8x;P!3>JSpO*mDWEam$tu23Y-mtSt&xzHl-us;#^n2Uxb4LdFxgxAL@^6$p~ z+Wbm}`Z=AoIbqSQ(rwVJ{(G*$^)Sdg;>U&0;#n-@5txE-_FJY40)g;EY}@)qHLx(a z^cWfsX=jNx5ly>9#Yl^JyDASy)Kfh1XPd~%2K3>fPjP!%L1r0#X?V%7BGa+10;ZofJj?N7XphS$fEb&OX!grdJqUL zK!89(5(qo{W{dfpmM0E8P9jLEhsjIRqq4-NIkHRqw^r& zrxC*!sde>X19NplV&`0n^t3Ql;a9FqV4ieJm3p_giR3B21(txL-AfP(SG-?XYy)uS+i4qF&(( zsp0L)xvo}_yud|5bV3X)JlZ?y5||6gOql|b?nE_N?G#>a)perXyb9g?2x#9h6|QS* znXM}Bdpl|6d|mrFNHf1N=d3@;a|1!=DP6aE&eSBKeE$J>?Bz!^SD@9qpkS!f|!fWgoLDk8iv zjBy2;7*gwtleUx5tUTRkFO62@r3w2Ph?2{CuVb$TaqQPV;zCpQnL`DYA~9%Q{RfMj zCt10wOl05an;JCFY0?Es0+N!tV%U6Z~B>(2k*#l$h zU;)PqFGC02@nN02M=m0o`bn~)VzN&14@9VG!vKRA2c1Yz6vrk0S0bR*cRM&eik9#&W&%vs+Ktc2z8_}*~JF%3H zwDmc`(>eawW(Qxg|LdEs#X2hQkq>ir*@l(JeCqAt4UNN%?{5N2Q6+Z*@txtYq^)sD z%#e?7Oe;!NT`a1+sH*4;e+P#O_^s=JyYQW`mhoQK#iR%G$IlC5A-Gfl+*Nd6C%fpi z12jGaNaI-CJ~)!n8dhg$xgW7}yL~6ztxswU~3?K zu~qTr2=bO^dwX)D=PT}Ge2jCX_86D$b;49!Ji`)_>+QzY)Bi32al?&y6C%uf11w>t z@on}_0T|TD)!rMUR!6Gkq?~x}-^|-Bm^b4BS3jEIdX9Cb_eHzT0*24K^Ol-~>Q1r3 z9&g`6e~=YxV3LdkUQWNGB{Gq>wr50c2_1FYp$Z5BFT3JGTXK6J&9kMWZFiqOp&eVS zbPYRH@F<@4S?b&tIel|e!twh39ci&;0a*_{?Bfm9UNZa9dGNu34T_oi$GP1JrJAek z<;72?r*KPk6aAxt&)uJ~wC5xT+CAT^>~Y9>o$d7%^Pk*;`^RW?IC*jhw%NS^*ii$2 z@fP-%4gBm*kUQd2xrJ$psXl8%7mwt@!irid(SZ)}$yz*fMJ02plRY&m$M6j0*)w?i%nz;u_**|%`)-#$ss%5FJr?$Uc zr`gkfj^3gHbf3o%RA*xB1}1AlzMWCsDD_$UBEtU!U+i&sNB}C$FnO^Gr6_@*&50!p zdQJr$!GGC6cDt?~6_O6oU~mGj2RYgTKH-Fd$>cm~EgG^>nB*|~u>$|mYo)7lTg@F~ z0Bh}ti8Mh_Ck|BsV93*TBB#k++=?dlqAIzuy3AG%T%Xr-*ikFCQpmMXelJEnhSw6( zuNFv?IT}V~^0*>rNPIf-Iv~S`nEKo*jg-J$>jpUYq04GO|EQ*UY5Wj%$Ojbg?ZhV{ zq+xd43tfo=Jw-T9;}9XfR=5Ds#jTr%(HN&xJsoLK4Q#9KqZ5+A%wVM ze@&ouXvp3Fn2#)mQ8rjiPt_$4Voc)I7ko|bywI^p!wgiftHIqo>N%uGz_X)!jo6d- zg9DCRMlQRg_V#kI)qNq6tA#fO+;@DpZH_rVJCXOFj#;k*gE^6x_SY03#V4 zVCq_tloWVUK&)Zbgh#xKvE$fROz1^feD*DIu&EEYp? z&;fXE=55X$K6|5Xx$3DD^Z*G_&UI%=gDrh@(ULXN*26qBE(DsAyF6AFes)b$skHIl zQTkx0947@yXq-bDx%w0SfK*^hNTTWHHIDwPr(gQ)yR!TUxY{+-w{rWzasG{|n-?vD zf4Yxalzg%62VV=kYI>VI1;?TuQ0!^4gp9=RjwZhUTok?}S>hVLjG9deZmv1Fq!}K| zq!wok19xbNiAg4OiD6U7osNDpqtRW2DFq6O3JVOl-PHQ#O4Nk2915F8bUjLS-RzzL zuoU!Td!8icqWWVWgtX_6UO1UogATN|1}B;EWV(M}Pk(8x>Z|1?IhwlW=Fhcjw=qDa z(+2G$ZU|{D6jwQDqzy>mO3(udEBk+<(ih*5a#;59!5TIZPWkdN`ZY*i)I)p%t12R( zos_W^q)G^Xkf}dy(nc}FFHHnW>-5W>y~<7Dz)DIn289Y*K5eK4&U+*HHi9H+x-?=k z2dkgYgiCXmxfEq;zV7^OPiR6u8J70OHRiM_RO2Z|^|H+xGpoDk^c%abtj0IG>McxY zoDr`F9<(q#&&Lt^wJ-dtW`Nh4dm@alQtxT-1zh{KNa)qd7SB6~zsA8ykpp?wQ-K;f zW8UdUS?|VdvzMQ(AhX5nMR881n7xzihc^%7*PpFCa({@n9g6_=*KNQg0!=9yLh@P9 zau4pW{gp=Dx2sCwA`bVHy}A`1C7b&eO!zca`~~pa&J_icK9&4<;kIvg#vd4GwK`fG zZL-A+m5ZthNq&6rU`zG@s1YJ-Iy_w`T+=?a;Bc9BX{LT$bHHh-aenL9B2vs73-!G?8!FyiGOzW1kuS?gy+^l zGGjTp|DZjuIVch1%$Vs0J_=qUrQ5MYlWfs`{Z<)(!MVBmY>vziK`CAAQfA1B>CiwO zl-7*L>{C>M9Dp*@u{Gw=QwJANv+bWVdJ&|oUd(CmwDl{B&GZQ)Xa0F#XZC?|9Vjq= zJ*rN*tNzqxU(?)1l39O%ruJ6n#P`L#JUUxt+3{O^%a|bAFSOQ|-mxx(iY&)&CCFAN zV!>d652!WYmsTZPEDg1w$3p9u@}O@n4YvZoVDmOiDl#kN%_||4;^?uVKj9^aDL>G- zUbd=FR+LeiQOkFewGFpjpE@}ALHyVrIZsE>P?A(aYkKAkqd?7e{B?v1VLMb``a; z7FkS=RKgFDhk|D+1*O<0dMx-}z;(gV%+jsRzMUl_1o(nZG3l{RECDZSpFtp#BBKe2 zP_fNuA?v%IYhQH0hIO8G%8l}8u;12*#*84vLlv4UeJ?>=#5DB95{67G?WEpet2L^e zlAE<0o6idr>TUIN9F3MRYu!f$)pIj-Cw+Ydh}xB}xT2n2c)-yIK2@C@wnRQW2SqiOx}|-uJ0t_H1fQ zLwZmp0_tY{TNcc(R@25IvlRN21Gq50AP9u=adGEH+zHOvZ5xrwSg|1`dYy_;`d9;I z5A*IV_7u`Q#M3rTLmxPYM}UV-MEH~y7qWhOQ+u4h6Hl?Xr)JS_`)<#1O7{M*kSjP7=DA>dK(vecFe#H$RQo2?P2eb~Soq4A{@h1@ zbPT*`LEc|GDfs3)%)n2{jSHFJ1bdPDLDvd7iO4)T>VgDoN?&kS9<-@>(R4?g-ISk@n|z!C)Mb|g1V+5d z5)TXdbmX)Pofu{yz>G)nnC~RwHWyT!>5d%!ptuEtj0T?GG{Xu>JZl2#h;Fg)BY`(Ma9WyIKe|^kNyxjeG*;x$*WB*9Kr4-42!CpSYI`DR3kn2CJ5i} zu~7sjFf!5>8XZ=j#VN1ZM4WXyhyP^D7Rhy>M92wlDigBs%85{-dQ#0466aWIVrj zyJySKzwulo#teUUS6-!LVNv>uwIl6vD4mBCK1_62@Pz(qusV*iHMmA}G@X<3c)}AH zvCg|nB4_LkmO6sS7D3rb3xmPmbr%nNW=55CXM61DRO-a+X7SP0W~J<1!@t#rs3I7< zAA(s$r+7AC2{rq*&-MxtDKeVhjJzx_=~lP5FI2vBjblg)NOL9MQZt|JF;^Nw7)U;< z?dbh|*l)GCdRaDGd%E?!Id-9ZYh1xrO6r)6C18PT1-tne4`|eXMkX9zrWbW_6x!Vx z(7AN!y5|K~bN@+?n0fszA&;yKTfp%##9Ej?SS&$Cr1gU0Z&44+$UoV{B}RU=ZH3?R zy$o$IymQHi%^Y4@dNd)o8a%;?|0s&8ZtnMa0qe?y$|`?J36MF`B7z2=SUEkEo7 zQS0Nr*mzmc+b#I+15^H)e!Qe!4!iJ0(VUW+6rlvtU`P5^H5eEAs-Ew-ah6%{D}}A;J?u53%q1Kgx~nTFYtn3V`~xh{XJFoxkA#?Z6anyL{{hbe!9Z@TX7CLwx0h)~zd3Li4na+}* zfV%?&`Fl@Tj$>Mk_rvmxZ$SNT)H|zA+jY7gpv&c%8R?6^dnTC&eXg_Uw6+lNFSLO& zkC``h9b*!7BrKwh4sPfl0Tp5)#1IRQ2-SPAx@mKMdTlklO7B^16d|omb^pF8siT)Z z{L9paK&$cC)o{u$2@A{tUlhB`OVc097p%u9$GmwHm^={%T*i?oTXs8v%`U;8i3q_~ zooT9|DV3bZ6&Yi4j9{yCYT6&H>WVmw|0TKaBQTJBkslAqGUmvTwZ1Mq}`Hd-v4w zQZ4AFj2D_IYom^-^gosN>Y~SNs}Cf*FodJyu?b`A!ZOXTXY7_!wPB!$Yf;;#lHu)+ z41aaF|DC9?m9>+f?~YozDB{)}`5fVLr9=08)E~xW{CV8ioO6|;zc%phm^aA!izkVT z*=fdB#{pFn-YUBT7wGNLmoge;gfv5nQ(ZLbUmKw1wT8|XzaylOoO6d}Y7}tm82QBg zJ~iLSixTqq6Rv{RUQ8$c;VfRk=cOsd)NECKva!JLiEH3yv&w8O)@f6JHp+EN!Kl?K zbxKX*p78TUKUlq)_aFEvN@9`#nZ6WJ?a4Jaw!H|F)c2Ut_P^*4kCsgc=WI!BXaGHq zKV4Tg%%}*;=Kh>evR9_KF}4@cBDInZBaZ9lKM$tSn%^$IUNH}-1XgTIWo61yOpRYB z;0)0YCt1^~|HOFpIS%~YYH+GlQ*oddL+p;T> zQl+Sa!J7~wWV}9G0b6d|r+7{?speqc*HC?t2qMBSY>J-F7#iUJ;Ho@KJJI~l_JQ#D z*k-p^gUr9%I{4Q+jrG)Krq{OX@#%pSy8^~+;wEpvUN?WJY0t2&ik9dhJ>9*-o@g|v zhC9oYSqm6A#s%t%!QYsC4G`H<=%QR&#pf?WE0H(C4n%tZ<3rmvX(xYmeUCB`{$)Zb z*0VWIG=cs?y4CITMHpMuI29Dt*U{1^Mj~{ILXh7({St`Kz4$IE77q_8XjdkHz{LmJ z+M-4+m~5aH>pK9i%oybbPLBl(A_EkWQvg)t`q`6xH#7QV-^P@})n_W|wWC3un{+~4 zIl<-IaSB;Gu2ePr6@J4!UH>wpSTQGj!h;e-O^GjjG(Ts~Dg)W{;l&$T88kLbA6iyJ zS#(kd*YdrbqA%4$G@*EGV{km{kV456V zl__Vh0Xmv1unmpFXfp`kegap+gkL0N>{+mcNlRpb%C~Bd2P=)5uD+Otkn5fUWqGJs zKf?-Jx4885)BE6o5BGuQsN~)FzliNUts#(ZOBONblPsie-~srGjge77G(&>Fi@NEW z^SnyMGj}=MGh$F!N=9L!Ra;x-FWlC3R|1 zk1#43T9tlxd49|z$+TZ7M0@!hnFch)a-I*4T2K)hur?Rh!!I=?a2eSQvfU|#-f z^RO&(`-)+}51x{$$AYcG!27|y4c{yTTH1PkPCkC?iu|Y1djChtv_56UQgLrX^7_G9 zvGC|*`@yY|MSl+HK)LatVHbDme2RC^p1J$$b^BBrg--eGB-vbxU{N&Awj@-SIsD$Xe5 zTCt7erY}HK7)4wX1z1a{KD&9C=?sBDnHs$Gj&TJvCN%$Jlcp-oH%h7PxYkn~$d`WS zbh1Y0FkU5xho}+XC(@o+W6hiH;+(v*ShkgdszKcASWw7${f+aypeZJzCBJ>bGm-2} zZQedV>?==%Sl22Bx|A(Wd)HG-)?Gzo^@}rM{`P{RXf=-Qzdo&NCE+JT&_4pUO+zBu zKOd@{`f&xBj2ip|E|cg-i$jWz-@5TeTFRTbmUO|spYjES#<u4us~cHsFhzp^!RG4i&vcCM)fGuF3@RqyxluAE+XdantT4}eyDQ; z-Q?HSo7pB1*lyx8fcP#G8Fp=S(o)BgY7t?qc|I_%bBE%ZU4Kkc$#i>RiL5~)TSh4Q zEuN%_Y>wG!27znS!cyZOuNQlS>@h&a3z12w)F8si;+)mklZ?8j16Q3Rxf+wWe`444 z$CRaGlchSc9--BkZ#T_#(J)gwj|7?(AUFnU|qlqi3Qr&sexvu5A zn|M>@a?y<6qdjYm3I-c&-RfOM?7q!QSu+){i1Q`<%*3|5U6g-9&S~eo#*JLBAtog( zoM3DJn6#UUtS<9dkZj2XL3j`y7VSJsoorROZnoB6)9n)3wdhRSRw?fA=!^N2m4|PQ zMsabHpL=K07Obh7e#IU!#6yP`C8m@2lBYu0aIC~r`(jdKGYTemS0_|4Irc`-@pgGn zBn*G8zhyWy4%~AEtI+U(E#i>Vb5HoSY^lvd-xqs+BO;34AC3Uzq8Xt|>wvfC(@inP z!aB50w`dazS|+GYS5e78C;WkGjPSJjV#;BwthtD#p}n`to_E&I;7PSVh5oFU`}~HS zs%5D_wF|!5Do?+x7vLy|m??{haA+{9y6f4J3`{9KiFkP?T&~G8&NMAx0@tXk$Qn#z zcj%D+sPwT5L>=Q-IQgJ&=#RXwxIRpWwz}(|8IvQa46g)eqcVE$Y|~5~U;viu{dqv( zIRIng!obe)4y~E``p4`Ig~r*>ZH<32Yzd$20DL#%)_WbX(_Vq^)u`lrFfTw9F5&GK zJ9I!>+&mQ7ds`f;h@&sB?z`Cz$eQ=jRz06?dQL)`m@KNYkaqEqJ3a-lY%5bq*m#pp z-XG4y_205^c_ z0b#u-`Lm;k_tK5N&W>J~NZhE3iOy+A$=}V~YMEa>O++K5tYcz7)M7fF2exZ_M(_vX zA1Ns@l}6y_O|K8!JzEhaL-4Q1m1g0lPdeeT8q)=`-NWI91e0B#$419SFO;5vXElYO z!9Oc0!om7W_m0R58x*Mr*^uvq4l7%|46K%)#V{LdED>fO+ZgRO zyRv+X4YS@=o6d%0dlcJvNg+!&MzdaI{?R5wB_piLeer|2A?olEIGZKF6%9X6!-W+V z+pe@#5pv5tRi&fLiRCr8ujilFaGpP1{MjMZ_9Lw?5pgBgtm10N-Z%?oM-}Af8#;1#1UBUUcSxp)`sbh!HR&q2T@APEnd&LDzsoI`GcXLnpP25zuC7V$9vM#}3j_sJb*epQ~B=TmOtqcqgd_Faei zq^M)n%sk`Enw6+zz+TZr1hVpj_|r`}9u8P(8WgO@{F)i2b)B$a`Z1ZHkvw<3M|6uv zC=l+Ww0FX9e4GQpPnNVY##@U3eiM=r1Xabw6-jmVnU|uGF%Y17>&Eb8HCV{jp6<9B zPvE;*Y9j6ZUeiXqU4ktf>vE=~pzDKK-isei#h?LM2&ToIqO9U}4Ovr8g^bUP3=zDV zJlVNaY6I{!qNTj#wDRrHI8Ls*+ggEZw@J~(#c}d&;M+BZC%y{C4QXEx0Rk3)TE!C< zyi@p{pzJzr!9F7-$f|{afmW_S>1r5+reQgc0>#py?Tvtwsa@s2Dl2CWbZX}q!l;x| z{MItE&AgpnkO9vh@kuvqkt3>EL-rR(uou;}Rs-yoGYfJ4& z=Rjj;ky`3hW75HqA3!r6PK!?Ng8YL`1J}(5`pUY2;HG%Z+%dE_y>pz>Na-Jp^Nzg6(9MwZQd!3W3j#Vmb+7T2FU03g- zw(d(j%LpL^)C_lh^oOf;KghCfQ*;=!wn56w}g*hd&@Op|?EtM1L&SQWBS=e8C^*-B^a zpVWOiwbcH|_*X0{P+Lu)K<}Oda&+g;6o?Oq)tO6%=T!C#nti_44X$hOxdFWED)`Rvu!#twzlI zSYrBI`D$-+q}z=vnF5EhSeO8=?qR0DLw4?`8Bg;r_|Lt)*ZM2k0AzaWxY8B2tI#)C zS0`3^xVR;%z_+nmOQs^~Nwc)<83;^Nt7hgzb#{SVTk0IY_lm0dlwOLk48eGTN5w~M zvlK&?V#1`KAjra1q$W2ey5e9+4?Nc$C9vpBr{3S20OXL`w$9eKhMMU~ovbI{B0t$I z@qS#}Q|UN(O_Vvg)ALRR`UxJ^ZxDjO;thcBkgS)YFLZyh`cM1^`@5U`SO4GZ|N9#M z>t?9B`^=KHP-6FsRc-H{CT4JWywG9T$xUT0xVz4I*$+(E7RrVL5)Qt4m>HZ6@{tsq zG0H^S!hCHI(De>`Ag?E%(7w~I9r4>lhQiR}wfWfwx%xigVWJ9R+r<|#gkCWeJNu{< zQ(fH$tC*x-ObAeJimI6$oOr;df-8skK2g{w&?U>X&f35no>MOnK6ZvH_Triw)-PRr z1KyZH2(IH1M?V8*LgG4t;_^&mw7*f=dY*KouBFr{<*=;+gpk_Pxqi}L+_N$eD9e-a zr*qDKB#S#U^VCT4an6mK%@I2g-{GVUzKg?uo)J`h5}S+V4YKIvf4#LjzBRMjGYww`Tp9QxYz{ek{Uq6N-xxdzr$ zd)))%4M#ONi`&*z%x^s!Xc_d^4H84t;l0siD7RzD1Y7RkFV+$NCAieiZ}bF%3GY%) z*Od$c0`6F(nf@4jMVAMb*ui|TIVC%4zx(XWg7`kcb#zToM>9X|2eP zv{>2i=c?1$1bDda)II_5%)>gHY4cY`Moxu;xYEjVaziU?|V`3;3dlI@J$0b>LKoRZ=$A1$xhtvpR>LUVaEyrE2_~) zk;B>t7+ioJgd+ZADE#;nbiCh5Jxly%Ex)s=R+btB@Ni=0oH%~9dzkP=gV7uinaic` zI}+I(alD?hnnQm*?eDtwuo#`Qis)GN)~K#UNKoJU59SO6wMo*SB$$0JO&FizpWE^% z8p*~EiliKkF7z5(91WB5DfBk4Vxt zZ`QF#4nJ)sPV(Fl+PT!)2=PLDTN-M)YzYt?WJA>5u zX8(m-v44)uVm}1V4o5b;Zm}2VUOVpJ*Olw}S7T^6{oOPM=IfUIPQ(3Qi%VkfTX)_L zd8u)ln6@x+Sl^xeZdxcjjhh;jU^i=q@%x8vy0cIz#1M$WvrF{XWyj`veVBRoxb!*R8)nwDvE(JEITgwN~c74N83J zY&pV35LlyQTH{ur7QaQCZ6$W~ezhGJt2VlXWB z(HP_pGtXjM>;*qOqp_U>ONb9YItN-;GtA9p#`f zVY!W-476XJ{j56RXO!hElgo=Vb04(b5xU~N@_OYth_>_RIO-5yRZu;bnbET?FqP=# z!8XvU8#ufpweh-5Db>jdOfQ7WwmOTu{G4MhRgmc35~BTeiuLf6-|z?j6|SyRp#{Zs z8c)s()k$R zkY)%uGD4sRpF0Sn)MH2ZO)JTd^he=!lhmg>+%c!YJ;h~6H^17VdCDDY^V}1@0agDu zCDL2I(A)%wZ4tcomy|YhLLuAgKfvw*$B|cTp!3Qo5;9i=&HEQSZQ?o<>l;tGzyNsG z-K)M}!ey*U1}}ck%&Xcd>1dHaxH2bHseSETMFg1CZHq!-+>Tq;vi6TMGK>~s%XzxMnVo(n~_u$5=XC-*ANRhF~gAdo2~6P24CW=TvAx}9&f-t$G~hb zDLdaiMVnQ>bN&~+WPWxDBkv^o6eAfAKfeqrhotu9o5M#U}oel3J2E%72Phg*vxx2E@WsI4u=9 z5yK&KTzrH1CvJCV4Omh+kQkVOA6u8fA-l5m^tZt9-##gItFtYBUt$iE8y%xH$sNBJ z&Af)vnin$g&Ke}QM$eX9fOB~~Z0oqNmwO@&g9EcZ6P+~Ah<9!|vO0@9O_=552t57v zC;jEvh%E4}$O8|kn+(z9|3-tBa(-q7i(0D)Ca}PP%#keWe4Zfwf5VStzI2XwFyD+$ zNfW1cnn^A6TwDES%B2t$v{PGc7a3Pjk_=zO|1E=V-X2~ZWxJ_|^-OoR&)NU$eOIJ# z6t)H_vP^;^ybhfl2DJBTg*>%Z9yMr0hbK?#6zjr3nD_V8U(3jU z9f@P`Dk&5o<5xJr5z zJqTYg%cKmKxgDd7%$gKOb{Sv&5(zJJZ!seo*~zTm+Axj{J-JFScXN)}@wI7XNPw9B zlsg@~C3nv-fLHW+*ItW3cQjG1B8ke1t%}2pBuS3Lj0e-l`%VQ47fbJ%Aty{?Kc}tu zK+DJEQ1@J0Ja4QHMH&m7olTR8Iw|ZeIy7zONTxvs>?VFX#;4Crm-eNymhcG|LWWx{ zb$gSniW+pYUNrBsr?Gbqk4LEVgLF}p>+#p~@!gmt>7s`bzS^r!)&~3VdP>oKAojls1mL}6IrSde6&CBJ$GM#i#~GV}<{iBLl>*@5 zbcT)4XP_9UFskU9R{i39zCwTXsrObs6vn|kF2z}Kgi8s>n)jX5V3kjPg<|Rp=YgxshjL|pnY+8lH?$=(w4TO?@sm5^-jfPu&E6N4kEI}dHnf`$6vNvcr{q}qxfcANJwO!XXo zzzy+{4z^sjPR#L3eq#wBeE97y-9b~E4&5A%aN1>W2Gn?{XJp7@B&%)=Rym(Yg3?A? z57oW2+>P!T+~vT&+iLcR$uiEDQft{Y_KFe(2Bd_*uaG&P?WaQ6Kcba(V7;myiIzz& z)Y5fk%IMHS_pUqBlX0ce9~2eFKVn+V13NKH()LS1HUH4`zr~lFu)v*@KZ`2g z-ubhp-s{VdBI5d_kaQRKI)cupBF?5Ot3agt(Uqi@|X z>?brve1a=hkv}(a_vW*ZclQBZF(Axmi%k(~)>$a;b_;vuj`+y&%0_suVc*tcWG zswyQ|GURseBC=TSDC6EU_@AzE^ZMt)DCLJ*%EYK>3-!+yj!?0gWwTu9>z3_-OYW7h zf+A^BgvZ$5AxXAg>Fj8AXXigakJd?4mtBOLzLAR%S%5*qXbLGsy0Tx)5}B}M$P-W* z7$n>D^)NAes!E`yVV_3IdN4E?&Zy!?eLUzv*&^^0~niOyN|JPa;FnlL7;(o z4*xgQGKYHTlWDeCsd_nbX>=Q4 z?)`2)AG?L@EBfAWoJjQJB*Rq_&c7EfMS2v^C!gkS#<=V){$|2i67yr(<1Grt`fDFZ{ir zGt=CxcPI?Ntv^zIq!Mu4uB)4WziYeC_FywKDAc#FXK#E7Zj)8+@yL+0 z4`6+X-#VkA=HONaR*^FIY$K=4H~*kezgV^RYrs<*;pQi_u{2_h2C|NPyfR&{GLpyn zB+bK!FhBKM)kDDTZ$a)6aq?97P&6!XMg}3=6Y8p8~0M z=#2W6B}g<};2;W00Y&m9@8Yi%eLZ=u?aSI5Q4j6m&9+9SH;gi<-9oi*Id< z$(^mBmHly!?ZM@AaT7@^tHhvC4vs+HmN5l1`1v9=>aWa!Rs4l4)Y;L`PrIeh$$71b z0Fd2$v7UGGhVJk{&!quU17%>ESmCpgahT)o=p%qB)1HolClCCH#XBvi)Y~0P#|^~6 z3S3{iG{D=UjP^BAmuEVbIqXfTJQQ+@mtrdM>Yz-rHBK3<)nspoOioZES@=@NVJ}D8 z^>cF`;!^Ng2b1V^fbLe6Tz&lE0eF)2dc>LnI#W3}jn?R5M;lcT-?1;Yb+z|hc;pPr zAy9F>l z^&dX|jX}QqS=z^bVW2?d+MnR56upsKQKANt>)GPk?#z6End+_u4kHN=r+GgTa&&GK z$yaFbm2>``_ARRXCcosXoAt<(W0m}h=9Ri&Wu|>r`xM5Mnza5KWIrq{!xJJ*-s@iH zInBrkWLb7_gWcVO3kLmt&NETi%QMjpZb&Xl+mq4`Fr;qKJvoI=p*Ay7jf-sZnR_*h z?pi4!5oPnmtI`|842dn0Nc-18z+X9Ijkadn*J}onpnS?m;T|;0gr=62mO_cyqg**? zdb7%rF$kpF1FUP)NKE3BUto#WFe~~$9>xxG|BH6mP@+nrUur%7)htuA zFj!V<>HTy*}V?5sGmENLCpe=<6;H!iR;9@ zVcLBQFrgAPbV2D*I}Q9Mb*KOIhbGAN+eW}07CrZ8eG%Cr3A$VyOCG!AfIFoH;Ev_o zz~~ASid6S^wYT4*R|J0zJqTBghzn!Ieeo%BJreQE`9XXy?+$)$v9np_F63lkd8G|V zD|0s+F)J?pYh+|ie8TKP>AAq#OF?%Y+Z!C@n=&B+1}B-&uTaO zgRDZr5(ilsL}l`(8JrtJvsqS;92hfe5=e56{1$(#S@{~}u+y71_lIK3`^H8kAdc=?K;7dzkieo;)LgT=VKaymg+3 zAsT-Q`$>7ur(l)$u8 zcjw%I8z275a{dz|E%^fEy6UMA<=YnI8ay(Ad#wh%a#Bw*D?ngiqW(42tBoh84^T2zqO7Aw0S7qwb2_)9&C!=nQ|9n&H;@+7DzNoJ(5H)JOn zq`n>RVZ7;13k4tcT8{MemTU7KK zaak+B8kTvQ-(_kPqT#Nv!`|~`MwUI{04{yiDB<~yPSn3YL!yJTSd{OFJa#b^r5-EY z^SQ+gRF+hP!WwNKDcTmspyo%aPm&oE;NSZAZxVX=YWRAypW51nAP*EJO(NOgI*70r zK+{DdM;DYN=UH0a2iCyN9@U|{CqHu+Rc)LaC}(_D7Tk9g45Fj6ACmH7F!xS(qtYiB zC`&!rMvA#Dq|Cmf&-gM#iTrRLb95lKTG~ZXe+AHJobMdmzmp%eI4Xn9bQEc3Mhmq{ za#ZOIbIUa7^-CDjMj~%&OmocMI=xAgUa*2j(E|30K~18W8H;`3ok4RwiO3s~XsL`0 z>$;CFF+NQAFxi99;Gp$;veLEj(@r6e%OIc@$`hod8dcWar3^kQINSP-YWt+xm@4@* z1ay>(Sa>~3hHmEG3A8IV+fHBk<&W(4OBk`cW5EZk|FrXhD>=boaL*6I`M%;`Zb8Q* z(aq%i&xbd-hMpO0bD>dtAs^+Sq0sv%-c3f*r(kA|sXo42wSisetO_PF1=Bhla^$(u#A%Z=bxF!)0z2YbwKV*tou3*+B}=$pW-)Q3Fa(e}HOWi1HTjZ=`oWUob=mLh#QCsk>5XBSYh& zr+;9*D7PE)dn91%J_qGf+a_X1?v9Cc>$_n!RSq<55~ms1X4XXByg#{0fVPZqz{!#- z^}g(I+njd=nDWW`?#UjXahzX_e?G%{t9Oy>A$zzW-#=5#{uY8_qespbx-p(1v5tV7 z_@TVHh6ad7%zeK8cV{*sCI{B4fM+CdC&`lg|S@y^1E5D zoyxUl4b7v&ApsMKh2(L?cl9FLtqe_kAH-M$-vAD0(X-I6Q{om++`Uspe(kIuJy=rw zl4I{EF0V&?`)Dww<{dYzMB=wu)3_MKjGi>Ll`~-2$Gy${c0uab&F+^IH4D$t%91bf z9Hsmlp|$;NPcd8rr;S1Hb;i8g(yV49>&<#sKxj~5MYcBl-}9e8eMRJf9Y>Km%~hn* zgsp}n+=w+~Yk4}`C7MZ-@6v;Ej>412Waed6Q&1+xY1jL{);o{48{WTR1@i>vXv(kF ztJuZTUX<_8#ypeE!=IiU=(YV~lccO6cAaARooU!vV%?6DQ*Ma_HRTX~W%Zdz@!_%I zKBJk9HM)Jh?i@TUSnWPtmA=l0`Dk)*dvU>r`6)L4SR2rG6tuo_z~N#y6-`G`8iV3< zR|N0pwl$isTfd0?&~kLym^X;nXelc5o{2K56gOW?4oP#iXtxYZaBTY<64(6l-}eT& zU}VK2J{h81JTss4RhOuh@7AJOZFC1v6&!l#`R`ea@Uf1+JUcySu5!&ohy>_Lqss2u z`AgIqSlkNv?hImwue?UA%7=Pu$awleM7h;*zv-oRjwGi2k(UU?FXqCl3!Nc6L?(Am z{I6d6hwdZaL8aIb-|fmf5EHkYIzjg#dshLG_aUDMpVDnXNxvO|__klx>z_Oz4)v56 zcBzle?fxBa12$?ISAAW(v?bV-B^0e+p70+636Bsc;zI=TnP}JcT%0L+r{akb&VIST z{cDhI{0d?0Dt2rI`mlW=1)~)y{IDWJtop61tTAz+x3g7I0umq)93d`SoBJE*d$S9$ zt^`EaRPRV6GJaY(s^A3-SXh|ICM<*wi`P}T+L8F)kJ!_rJG?45X!%5=lI5VDsUp!T z<47$1@hVQ-RM0`bOEiydtUIk%Z#w*Kezn55v{`)IsDRV0nz>B1<5y+Dt{g>PIvS0ya*m$N_RK^o4;S5jSgJ#y4i4z)+N(x5rve< zw3ZPdt6bWO7T-`xyBp8gl=}~`B3y&9bs9kYo6`)`K)YaNjq}OWnre&qs5sH<;9hH& zu@oysUg-*x{O z{~H5ZXWLjjL^F3GF@_PwSQrk@~%za>r@kTBTa4aIl>=u z1kjn=I;zofYQy4Xl30wI_5<#?pf)w>9jc@~OzEaohVn4iQbcg5$!kYe(s-JqK9a4+ zCv|VB5eeO#_(j&L9UdCQ2FI$83c9Uku@q1^=X1qHgF-bQu;3si0t@r+(!e?kko}v- z`LnXkFqg_i+@v>;&{giak?TdnLpD&$6yiLw8+D!l+U0(Gyn1{rt z4PquLTx{2Bh0nK7j$K9u@-TJ9KPV{FRwnlcXGSY-@JwwWvaIp6alW6XHgb8Bf-8R( zIYO#a)v}e>arA>Z4+p4>*dohwLkYBZsv8E`g zc00NP_tt*!aZgOy!2VyL-+BLvSsFy_N#01>KKu!vo0*O|5n78f)Z$gpad>uvR_j_S zkfSF;e{{qOANfXhUOq5Va+2qOeBbDKU%5|!D+Gnk=sD}S;89bXaqkmID||a)c>3>t zo(W`0n7j5FDk5w`$JE5s;@2Ga^=ONb3VCj$)|;pmiIL9JYjC(MS$O@oM6-n+Ev)kX z)*`H+ZS7boIEAQsq}-oa_gCH8Q%4$_Bab^ff$25!s6z+}Qdm1n4iOpV{0|U%bHyCD z-pIukou1k~j&P9`ve<)4y2uWvgS^~(3%kC4MZpTA^=#E99quEMNtc7}J$l{=BU&<< zQIGmf2q4D6f~&+KXTuqfYz z^o$wO+?y+>bv%oTzRzuO9ZEO@3bO`%nWr{!S7PrUdCWyx4qv@#*2L|KT&;OZd!XTz z;ytq9TghX{50JEm141;&!5o5qzgrK_6>8h%g-_ZC`+9SO)0^DgG|EBfosH=6e5Uzg z2Kn-_EJZgCoM<;G?G_~yPLE!G6P!tT75unAbkMNHgQJ&^G#<#Gy^sY;kf@U@dV8(4 zQ;q&1UfZ0f5+wI6vQ5rgF9mrl=B-751m0<(JWS4po$afIY-F7z{>8mQhkn=k-=euy z!dGKEyaM=KSSlUzBw9S;Sh^_z%u!l-EAX5^X8OHKi5_?baKWj4i+GqNc7@ zI&3qx3RYGM$!U{dGsUqN5DxaV;yaeYp|N*R%)n&TK2rxGym+eU)@c@VLq`%29RP zn5>&5cZ$4+WqSthQn6~=aNCPRi+Au6FW;wLL|2#f!o~r|3VS2ug7*@2X91$FEwv%n z%%c}KhzO3m8&w*r^)1H%ORsL;K|I{QiOGW~-bcRNA(c)wCfs9CS+;E&z6WvT5%l6E zgvnoinQAkv(B#^nqE%Vs7o2ulwz{LA{1BUdIZK%1Xsa8fPr0oXMI12fiSd(jf*j4C z^#?G9vX;}##_B{S6~3sBE!evVJo&o1{3UiY&d@j3N1!1qwPg5EXZB*bpj~(IXr_-8 zb|*bWGiRqlf%xfZ5b=@A2IsZ*xOA(sc{cIXi;-VJY{(uf^>dZ7+0WH=$|A}4@uGfL z)C)doms*8g-y(8SbA^CQA(>+l)sQ$C1lyB!kt(Qowb;$(%`;`vrKl>ARtGVb8^l%C z0Y(UDR@Vo27Z;L_+xR#eRt&=M{sBC0EIpE!L{|<4iKfM2=fpcIVd}B^DJt9dTZrOU zYMUAuC(9(s)cQ5ChfTIJHdh(8wN{xv;B{!NhmT0vi~D;vatE0f#O~c7*V5#?6b`L& z?iGTvSe=HSox8lRwk{0SB!+S_FWpWTfT!nrsJ^Qx2ENHop7_|y4p-RQwa<99<=0<- z#ODej+{b|uRhu+6FE%jPO?srP3= zzX;B@ru?z60bkk;Q~$#yb1mwA4OxGBB;IN!=P)P~_`Dz~#!w}IrJkJdQ_dgQ{3(YA z2xa-mimz0NO8OYgU-zO~X}1z>>*D--XoNs|6x@e@+&f@JjJd#NV8sCFKq)UIHJkS^BA#A z(oSP%;DKiyE|d+GJvONU;pC!i0tZw-t4e7MD+;^AuOv*V$4B-R>F->X?ExHzGEieM zu9k8*#@2Rl?mEOYLGJVi5MeRhx76I_8e@>f#QDeWzje1`G^-N*Te4lCJM^0eA;)`W zxMBD9A#<;ERRdA>>XQF=3kjZDkwVna=`o!r6V{JQLHT?$WRG`Av0 zEye)viVk9yn31Btnxb76eh?oqAMtsL_Cr)2oQu#6O^ZS2N3hWF}q8C&UDzn z-q)Rn9uXAd&f)3!R~?*&4-?F`rt1!S{tMNU^pPIIoH2$#Fc7b(C2*|>=!ZF%zbXx3|Nvp!MSeuMSv-}N?%3l9W zxl<|b$bT~5+g6U;HuSL7=l?`KjIHzngcYXY3qwB{(vj`{_eXW_aCM2re{(`lg=mTZ#qfU1>l z2j*eYoRz8kBdK=#zP5Z~1H0mbhnW73rnnxIsPnGc$xLK0W2~YG*=X}VHN3)|)5pb@ zI<2U~9EOUE;%){sdFfj$DQ84J;XLd6GAV-_&ke*6^I(R%QVI( z9%}^sRi)f0xcS@j0|5iJBl)F<=iuMmPnA)WgS>z$P~ZU5ygfBZL15gmnU|TlmVe=V zl9PqgwMd++@g2_IJvoV;qHHGQFV1KdS^#e(rP+i0ZXfSS0n3*JO#sAvh}-M;Ot5e3 z@E;n?ngorcbZUFH+iqKA;4Iv-f$yMf{ge`$I|`k%jg9-K4eaM}dyt&>o65?z(<$-{j(mlb;yFPswSF}0 zYvzAzUbT!g6H`b~$fR7K^4ARvi&w}~nktyBfI3-sRL?gGjPRv7Gn(&p%j*t~m_HqH z_*XF}iynz_}gCl8g%5qM1Iqj_(KH@jda3!qkSJK zHlmG<{J!p2BMU{DBb$W#M>LzXg%y$ADN`o-53qk*Kj_jxjj#EfDYu%RqiT#r-7C*a!gzr9&D#`^m1pnaJte0FfR!$6^ z(qzc_9iF9L?c0;*ex>o8P)X ze*(He5H5dey@j0iSO!4-?*=p+f;_wz$0*L!Q=5tzqXvca@s_37n? zTMQu;f->))#eZwm8-u>G4B>t2)v!fjh*=b|+K>SaYmPZaP`QB?tZ&V?MSnVBYe;A+L@l)N;wZu29@mQN!4^n_4z0qg%jHw zb3FX*VFk7UQXBJi5#B&Vs@28x+Nn*u$66`S*fB&=ErA7hw>t96skTeXlZB9KPSriL_gA8y?wRxg6!FsgY0Fh@ zLNug|#T7B$v)*W4_?wZpCOGzAC){9TJ2k3eHlbpI#%Vz1=j%$}X)Qb!h*mOcZ%AXA zJksyF@-q0f2JU5w&%a{UE#e=r1zWyrb?0&O@-W($dmHh`rH!XTO>98%9@ZXg-}0roO{oobBu8;PXUZR`zQAYt_wSxVKs9q zMhX<~cKmXxz^3O?^XH16zC2nMOYcPhv7$tR-22!pD?#sLKey+m+zfv|%zXNN1)p!Q zJzx9)rS1;avtItMIB{l2C@A02*_p@f=h{pFur1tRt*o7`CtNg)HQ69$L*!#+oLQ5G zhA=7&E8CERydVp}^IyD<`hqrd2*5odCn6YF0KwPa z%hb-Zs#D%wDsL$=Z0r`nP+kTD;aR1?d#?X;WzRPJR+ zEp&3q`|=_aO(mOkE$^7AU)%jRT;2&7I@-Ld=+lSmuu$fZ(T`hz~7p`HK^r$<^<{yqUEy@sb9nL5Xf_ z0ra@b zl|6m~(|kz@IW>!tI1QI#pAAncdCyK9sUJ%{@VPz{j&lc^a}5+{NcGCtOV6o!iui(1 zHux+0yNukHI_ahV`aoWsymx8gOJoOOA`r6brS@PC4zla^Mj81HOz6q!#oF|kG5H$P zs|GC4CmTo&d;+{T`14_Vv@jwSSw&%a!-vf-wS!|E$`(T2;%`Jb1oL3+eYvlkIOVpB ztOLGrAlQ7aD_mJ!EUJJC?i3484;i{=dmFA_Loc=%Cm|eqI72qD!9HTVzt?hd&yTzhC<01kvXa$2e? z#(Elntu8SAn==dL!oHUOFfGrPV#C*Q#%A!@%0FZz$lZCpx^STv`(Xe24;{-+gK0@p zC~)K7mNxtD%;t&Qlx&U5Z%vXu6#L{6WM-y|5fprZMpLh`fLzs!9xKi;WQXgLNVCf_ zC0g6waeNSh_4LR;a3C=;=369Md$f3J$?#2Q3i0}9=SX>C_6eP_mEB@ln=|3bMS^5I z8L4PY+ux!}$2>~Cou?&>D7IuBUoYpa(7j!WHrtX#83k@5g7d5R6c(Fx4?)?32SnIL z>$YHX1zrShQjN`&f$hX%LfVEPk)^JI3H9u`Le_h!N0-iV;{%jw@8!j07H@}42W<6! zZTo~AqSqq7_l71tfy|%3w57Jovv(0oy_hCZ#}024DEN5Fx$0#AA=qnypQr810Y?RV zOSzE*f%S>O6HQ!P-45X-?wobfRbZ8B5Rz5fK0+=WOXWZ#0+T+8;j0_+pkG!z4!+88LM{G0 zMcvb?CU>0j?5Y>BCzlS?1m7go#I_{{6=>k>v*wF;puZVMVBdDwJ*Dd?`*It4aS%CU z9D^`07Fih8uw|}lV)XSH-LH_vA~ryyU#FRu$gt{LtwE6UamYLInEkW$i_FxFZU;SX zraue}cv}>xzc01)vJNC1*w0*F%lnK;Bu1RgnI8V}r8&DhcLS~$GazHN9%6Y8{e`G5 zCZo+{R%c5=6rp0S^{IuSk?kXLi_T)?w9wmg-|l2QVMn?!!$N>o!{wTB z_&VkEJ;R7mbj1h@UPt9aC5tIN{>|S9doDka9Fn7pU(XY~g^arCT3~9GduUJdNQ7)QnfrYed6mI6 z4+fSL1(vKjnCARxLU$9Z_z1ky@~3U~gYM_K>18HVIrM$h``UQguaqm8ugh%G)qDLK z`Eze@o9z$<&k}jL1{!5_W5(`$)KB}5anw3esZ#O-_&K-CZ$a!k+W$_LRu1)_$w4xT z&#Js`;aDgDH!|O9UM{(~J`i~`0JjDxnF`5UZ&>OiFfXr`PSq1te3lK_W`ma`yycz{ z83Z+Ob+$5l1Tf;a&%?H`6HenDLcEKp%eut*ji{`5DEhVXr>`Jbty*o0#@HROd4+&@ z@%q1*%qZUSKK^VuTL=dn;4ka%s3dY&UQ%CX2=~*ml`9TO5-mtXe6Sc65YI;uuo*izv3L+mT0GpK0Y5Zk}fE*P_pgH|1l`8^1>XGV)6pda76q)CH5#M zbr$cv7c!QV~W zJQffc#<0mdUb8L3DaG>aUcg`dTF}+-KJu$SfAbyeF()RyO{=y$U3W5}szKi6+}19X z1dik07PQu_qa&fb-#8$d_$%vfjbR@V5euGyycBOmbW)sx_Ht5W0!I(guv8fOQr5qK zaC=q|uvPRWL{D4wu{tl(Rn&Z^bcp_%k^RcdleLTB+cY67@NJzbBiRgRceMlO2c^J= zW^|YpiXNHZuzs((QKf5~8LljJm4NIfx+*rdKQIaK-TjS`ZX5aTJi+g^t<`0|QGG4I zFK)`1QAQBk^T4ThHpOJm;?CQbXk%I$Iv_zXp>y==u7#;!!|zLIqchj`t8Sco7vxYf z<1K^>yp={-W00k7S|zxW+yMQ_a-neQ?T?c{Im ztFCR2JxrmPx!r9178q$Ynkj5yH*>}5()kn&GOpyDUR)48Guoqo_CD*oZm&HUVv&yIT;gyx znJ(FT;GtxoI^K9**|<~Twt>bHy6~i}7)g}bnshm4=hM(U6)mbq$1^Lk|I zHV4z-X}o-Qg2vR13t{5db}=0{ISe_$FNbSC<8#wCp8Fn3vco-NpNrk#=mA5+tL((} zc~S-L#2y9K%c%z`LA&fN*}NO}Jg!@B#?7s|iU+^07IW1jx_`)Ox14G7e!PHQ08GbK zH>l82tIu}u+$6Fn1b;a=Hu#s$)}w zlzi~vA$O^tPqg>Yu@WVdw5hR^$Lin?^ICbf*!oo2yLmQ65#-s}llNNqL+xyWP11L1XKF zF7sP$MmYtU`um=#ag4AF0H|0)`*|C9a5~+=_6cCI3Fi~f!tqLu9>10W+9+~s#hO4` z3jJW{l$@3DU8}OsK^K$S!}pH906BlmEtTt$U6N~e_DTXX%j;!wdSElSKKbX%b}iEh z`C2jHr&X7D5uYzrCzKydWG%|a9SWp0Jm3ZANW-T{9h=%8cYHk-uj()7TEg2StUFRe z?`|ZlTv7DR%{ zeUTNF<8;3d-nL0)X|Eho{;G*Bh-@Iwho&SlO9Rk+X}Z`pB_Koa7&=i7(%|0QD#Gsi zjdba9G1QpQh(7)F1zM3U(&yDm^zOlN@-5Z%aa1P;|j*)v+HYABpX5 z3_FjzL@^1)fC70G5%$eZ<5OAC7(64>vK;EYFPEWu4u^XMXcij4F z>*J}8yI>LP+V#khA9nb_vD%?0<40%2gdT1zr)BvV-6Ja_fEqaILB*)L-qY`Ouw6*@ zo9JVnl8V0s+79RXU>+&?0fM`=aS+Czq*+n?ovBA<1C%ShZ|VODIC2Ru6tl3m#22y$ z0$C)oj|SWIX&2!BJiD@*HooYspQM3%LazvWwkg`7x+LXfX4#Ef02DzeMQHVSH>XF6 z&-b5e458KxLz~Ac1y3_=0((RJidI$u~Vj*x^xO6MgAEtMq@l zY~J2jb51MJ%G_(GT360)_Sbd~PK9gUB&5qlOMa4rIoZl|SMSZIaxNYaR5Ay=V>Tdu znl+Kg@0d`k3%3MU%>T#OS%x+Bfc;-o!7uyX&# z%3zw8dr9R6L!Y-7Z|CjAx#rLHwCvR+RdWyw-IW5_0x!?_}J`3lO#g`D2^J6A*o| zE}w&U(%)-lbeT?iK58CAV#3Ugmo=+ypry@RP*QojA7FC%BaxR*!m1&p~O8@TJl zvso<=6{yAVzkZUF4RLF`JL4(=e_^q`1HxV|O`RGzXT)j+nQJ*e+<7~q&^8F3aBg9< zZ9KvLe{!0fTqIo0w_ZJBb|)#s*3kGBf%bH_4g5v&h&$L#r3g-P0^xXTb8Xqo!ff0| zIKTFZk%W%}T)>^N!ZZ4dN=grZy4py9n%?LUlwV>zyf!-9nmlu9e+lsyVV=SmAs1N9 z1*i8u?J~7g|MaNidNAGfj!Ut{2%mN%x=x{XZro-^{xux1sweI8&(HViRQ0&RprA5r zFyjfR)_u7|0<;+0Pu8xjLo;iRQf~vC2^QcRb-?m$>G!{|s^I#4tc5K*A<_do1Vxpj#0k_MXs=<=sXalslA6>g&X=2BBn%$z2=<_w*p zz2hFTzE~686y}yL+`dvRUNlkCD&}={wi0#Msc}#)A`+%78`ok@j$Sa@Jg@V}YN#!& z4gn1Q)=mteK-DaaS0lxy7y!GHT}=kJ23)ILFY#|Z`%RN%=9BxJYu_oQQfpKm%Q91w zRM96Z9KC7{PZj@=x<71HjQN@cdR2-#a!UYmkFpvo0p_%&D_O@8^sf2!Y+N{`iFnrx zZ|wg(u-RUf(to~f988D%(&OGsh4@d2N(TOf?v!**z_G+rz+GiWcn~Af;x-u`@YH+J z#(W7EOAhgBP?*2$1ac9qlb}EKm9MB2z}dYm3@Fr75#ej^GU(6&}Ad^0hG4<1@1#`;&W?Rz@i8&d`Y}Y2}$PI7# zmaaDbcC%*ngHi`+*BTuTzqT54MIO30{3RyncR6S}o+BRJ%QODXX(FQ(Y3RypZ<>!K zzod84G9o;1pz!s9(Y zK`+!z0);9xxZ6%@0~d|Hy8)>wT9*lDb>mP46f8*qBe{vNot!7*CO6NP-Gp42@BLQT zV6WGYTOVfO-AoSX7SHTp!!SihimU_bSplyv$-uZaa}(Lj^<8`SoI?1Jd94EOr>F0P z6h}^3i-hHX-nYy6Va@a^Yi38?`)pLi1KUu>5-!51fHAi7(}R7^IF;`DyD?(6S$ab_ z)Pm>s>pqYprr~>Y5`g7UxI4QgN|b2jb@Al!s0*& zW*Z;BQZpEi`ddFGi6UD3I58Q4KE~NSR6yGdwQ0*neZPO~vOgF@7{v9c!OjQNKGv)~ z?^w;R`pU~?ojS#uiVs}^S$%GMBA^$xz||HsC;LWG>wS3AIp@jcgm#1ZME~uft#yQ) z*t)XzskSY;K=G0aYF~}LyF0y(-XHN4in2naVg6&?pf}#XYaHykZ?x34z<&=B-Sc3g zf!{X}5bdtbhoKbMI~wk&#OWnM(%iL8rtNqD^FJt0y>BkI7q-#HQ7M3_Bw&lhMH~LV zyob|cK@ISYGX_$0d8W}2?W<9}xZ+p?%NwpuLiU&}2VedT%k2+tR!dJC(Je2}KK_~% zSl4d7p)->QVeF}FuPm4`HG*}eqfHiv61(!o@!`06XyF$ zbIa@u!h2Mu$pA5W*-#MdA(}Y%jHF00cC_S7vI$O81O9eKZ>y=W!WdL&(z?R4%b+`Z zHQfN&%?$jNpg1z|Q8pyk{4#vLEOdLYV{XjmKNb7)*ZvReC-VWqpc+k}MJvC{w`XTT zq!B(1T@A{(94PJ%3HQ(8WHkjWMgv!xgn4buf-fFF6hfZ7%TaejHD8uuAWDd254i5yQ9-`gN_@U^O%Sg)%aK6xMS$$a?H5X00-z>U~Of&%np9@i@&Thcr(!52@ zA|*;|w41Y%JA8Y1nKdvg)F7$>d14Y2YP2c&(jh6eJ- zwlV&)pfjoLs;8QlkVt57R+dbwL|JGIia zY$&>(zOp)?ZT3}9&usrk7;FjpQCca}@`+V#kwTI~9=KzedWo+!e=^)zdWN~12)j@I zGYDI7J#Am!;^+g6@WKC9-#(F7Vw=;!?bbtFxK5PPleDDXmER!Jw#Z07 zV^oTqkJ)6HYU3i~dw-Vkn_^)DWcI#pjmx=4QKe{`A(QkQ}kkdDwqs9CY zZz5&vue#K<+`kEblPrFd(M3YJYVj0yEV@E-w2*kJw<*@KJikV8o00TlP!8rb|6R5> z(NUGPt5F4bT+3c}V189P%>9m)P|s~8=7;F-b3d}x2DQJ3PbgHgy%AoipBY1YaTM7? zlCYQMKffo+WDxu-fVH--+}M(7Gfdu zbnq6R45F{mR{SR{((NZWSMNYE@np2gqi4JJ6gt>MrRdi!^mW;d^HtBv%o}GJeg=H7 z_6$_tuQLbidiP>5UuNv@5p}t>!o5fzWe4uW3Lh5IUJrmFBoGkf$nRv#hC5>BpEhXf-k!cu$M|jIkf!gb`Q+Hikw&zCjjp!bcwe<(`C;9i;a^L7>Vu*Lz-=}EEm8w1O(Sq<7?B5p|edo^P#GoI}GEE z7Eb4uA4sCxw+|}+c8u-z2i{V&$#TMQoG%x!XXIbvsTc({HS#y(=t%TWUi^1jd!iI5 zjlj^LK9WTdpKsQ$M?sFfrp_!nh0RM4o0U_I*MQ20Ki>BEEmqo5B1vjSdx6CS=*uYb z!=`tNFWYIGRLN{!SGn8DA5wvAWArGD6{PYV`x-EJx%i^Y80 z;1K6nIfKsK!J*34QnqUtj8t8jHVkBj6X2K3E7wEjuT-3$4%sXago5|6w&<3Kep4qO z7wP?LT#0p|+xd$3xTsc&jBj&kn6=xFG%0OzBUL!s#gVaV0{<3=s~%z*w+7XCFyK1n zwEP7JBdyX)0=@PwR#DYS-)*g#yK=G32_z_X1G1Rjz}T~WMaD1J`wYC6At;yEIjrJn z5%^&}|D`j}S2`Vw2B3qmpJ^QZ%87&XxB;<1QT%0hlQ@*F|8ol;(OY-p=B|y1=*s7B z?4V0?R2C|Hu~&Yct9~@RS+cjn<_H^X%(~CF#Wde0Ynl(#j-$=tEV;~J`oi}<4~dYf zS1LzR4(_g&Rq8%q1Fv#<#YoNB3|7_NE%=%L84i9!yw)D+OJE2*yC$SQ2v7OKCh4#~ z?mT~}?8Ljy%fkFx-0N6#erzzN;<3$FQSO+>G#fI6+g2UzeHs`l7@z$W2;hQF8_?<)LW4gn=4}84LfDf3) z0IvLdsux1R=n>N5v_XTDv5fli9b|M}JLH~}HCto1^4ADB=j4=xUUWzJ$;Iuc=RLEj zcQYi@{n&Wx&C8wgWM3fNQfwRghTkXwguB_!^-mt{boe;x6b4FN9LDqhp2_>&Oaslf zi3zh*-eK+3WZlCgT1! z&gGp`9OtmL-N>1nd!9*H4G&)NxCo$=IDAlXnk70j|6R>h=T_0R@T_YRWH)Kn7kk3V zQk)9vD0`csP^VU=XCY$dMg}`;(A>3D-FX3j#L=XGZ_yj@=g7_l+zGr?yH|Y7Y%KOL zX)b2oKL;a+Q}I z=4Ig?vXR@+0=c--ic1u0XZ&0l^Q_wAemlbhp|x(d)Oe?xgb5$42qh^r{E=1JsQ-JT z*6YAlsSU=mQJ_i1E6UIq)g7_H(0|!Q_2a(10k2O-7v58lVb)~e0{;%%f!%(`hy?s6 zuaAP?B@M$Fk&$1oy?Zb-EQC(I%CuS+ID)js`E$4 z?pxitL5D3LC-2ql{wK|wmu)aqX!fU)N|OU>o+Y+b4sbs2YIM~Mk1hGtocP>#v{#W# zfD^&qK-7ABc5LeNy^_^LqY^>hxfINfaG7m}#LcU`JEk*F53eiA(m3a#SL*MLyYpbG zp(KYm&&mmx4N(SjuM0rWsxHl!O^2&IEx6`%gAYA$nUKIie! z!RuN9klM5qoOM+r5CSnaw|;S+?qA1mGaBy1i1`5uq3Vqk@e-;8XrHET%5^9u`ttWl zXT6o*^!@iGGj6_2QOto+`3~VjOzaZ$63jci+R=CLGHz7jRyi|>#l-|hG)+5ZsA>v8 z4R^1Tu$SK_Aenc2ze&bMtxw3wBXzAY0ddc7dRBush@!qYK3RPjyo}{ft}6 zd01`Q6dAwoqEavN-z{E`F7IT+1J4N3lOD$Utn{q49*F`kkNZX%^6cFDyn3Fo_*ADs zHcgtbem+%?@K*I;(-RR6`vS_wtiF;`K98YjJ3qvrInB-egnQlV*Y0I0G*l5t=+87I zCV}@HPh)@$Al}9v&Qt8SeWBKC3d}IO>XsomHK>0K`k2v!v$d{OuU29B^MKlP=|8e- z?crWTbrzxkL6Y4tX-GmBC>+HWe%@1CpGs@V*kyHTJN^j!w9BV5Kb|lT(Dkb=1;OqJQw0kK}>V%IAWGG-*r-+C0CEol(?&qDu!22dr$q$PpFgpd?^1bTY z>6?^DO7gMo#e-Q*=WU0Q%eeF#+c~RCln-rv?W4Q{bdz#JgvYF+_zK0;dQTW*GPSaO z(Rc4t$JF_+0zmJh?g{}LUI4d2d3KnW2eigw=3DC(@DI1SSI6xc4>Qs|;zOR*C7=H3 z^ppHH9gux9D3}=PoZk?dnRkgm;b*xst3&Xf)q`~GPttC5{a{#2`d(&3v3BRHzf(}Q z``kYBXo^+w2i7;QWf``is$E<1C-%d%xEROZ@k;5d*6mzXUMp^RX`p#-Nw8+ zTft23QzyBz(TWEtbo4P>;#i$wz7TS%nb+x;A}2ARy8!>`fg-0-hTQKU_^70!!6O_X zh+K!C`e_4Z=2^{Ayj)%ef*;|?0J9>Zi9~pt*aQr0TiA8np$K$p#f?1F_v9EG#N* zr%xk@WD9JQT^`6jflG^Laxw2-T9j4h*FNXhG^~^!K|K7keRu9> zqyK{z{6gi~v*Oh?nkcbf4U=`Da-8!MEHU#>_ErL-US1z>f*Za)LF|$rB>p|iFms{! zn?^SXmGyv!`A`Wr24-heN>KKpCvHp{F@!{P@U+Pxb-)vb)o(84GOtb#$TbR(H9x&l ze^nGs@2lu51?8EuB>(f*3t%->X`hBmP-dzsEsYOjdZ$x4;#k?pW;3?*-%2%p!EZ;S zHoxC#esA?S%EbMen1OI$)D-kBV29-x>p~i6C4Cmc>>$f9&yD#aq^za_v`iUUn`*z5 zF=3o?>*rh$v?W!j(HY1Ww@TD~zOJy7_~=5R<2;&KXt_zL3#&`dE~MZ4c?Q*m$T2kR zId5a4zxH~eW>)m2IVz)8bn%Jo#$u2rGP>$(=FYjHU`FdB)V!Mbt5V*mX^gB@>8Kj} zM*39q*>e_jFxsL4oq{%ruwqeRuC;NghDNWYzd0i{55#|4;4$ojULc+m@0F?QE$<@^ zuXRDiE`xKnL-0G~=m?*8!zbER8%fUxt9wZHMCMi7v^v^tl^%|RNAvxMd!5zkdFeSG zoj;_O-F~VT3mk|j{TtlZfBPtEI8wX$mHMG4GWfo?!x3=XncrOj!zuUU$kw2W=72%`CRo(s{Y$RJHu$sr9MM4|LY^0i6>|+rDOY2?e00RQl{=>0nEum_ZEry z-{<090`t0~M^fd$FR+vqeYc|eZ9xy=*BN~0xqeh~>X#aVJ0)pyamNbUjP8h#p?zwG zrM5GtCF(s&f}UNPql@VF^&Wi&Gkc=vdx&w}t@83*sb%$Lb7ip67O4vHCqZD73kV4BruzA|t-8&;%|t*W!dcE>wu{|CERReNj<+!T%!o#`X3@CBebxIGz7=kYmrkJ-P86$)eJw z+2!(1{|-y2Acyt)7oU;YA7%@$y=ou&a9LC`r!i+{Nhv;y)I9&1_B4u_CH{htsepy! zGX8VU_#;BL2P4)NaZ1<)&*8-(e^E&Duu3vFF^8>XNv=QT`LY5(G4J0OZyY>6xXmjc z36WPG2?}1T4l4J3xiaTJlW=+`o{E^5Q!5X$!6zI=ctVdFglfVQycnuZp%*W#Kzjtu z?7rv}t1(?3K{kB5(;l0y<*6v7v|sVxz*}F}aFDq(p!l2Ejdi@^x0W3WtEAl*}I-)!359gG;0{xM|)Ox4FS>er;Vz6Gr)d& zUoY{-MYrktL)6lFYf6(ql?7C~{du7)0rLDI0woPH)0Bt4d(8bcbD|ZwrjnKl6;zO~4IwGNw9=G5Ca9f~{r*m?5)p@zW7`U1jwDTZ;9jGd%0kOMY4qB$3+IrFq5U~DIX>R)Wrm>%Gh8%?&Hx4(tQ?mqWg42 za#fMdNw3yLrQfb=W3_d~Yx^$Yh;sW_FxAyLr}_9O@a-i06+mOB^7_gjzW!?hZ$Q@m z1a9Ol(9H8;{Z9QL){~DRQ+8kE6O*!srDm`BK6qBCk+ro_<{-fF_~H)%?1(rE3r~lMO-a`ZpfOk6ke`HE zoa>r9p7U8Kj#zdX=rM2UNSC|afIr8DK0xLhv@q`krp~e=;w=LwI+DypYJ#1$L+q~y~ae1pwzsgvv z_C+m;bz9zF=h(){2pl2i5=h2b%L<$%p_=nSb~P1%rSenxMT0qJj9)w^a`nRaAA5j< z4X>GmM)a-j5MbW9<(&r=b2KFug|}+xnVT$=!(p0#Y8&rq1T6`{gqiwpt8s?Q|6*(U z#m%Ma?AKh(N7OxR%4zq*l*xh})cx!z(233Do`c_MO{U_F6R2%Y|06Wnz^vSM*eOmxuBerG>*yNVqVbA*A&kpcg#6luB(+-{#}z#=ti)2>Zz0YX zI9{0Q)7e?@o|hL#uItjGqIv(reJLWS*5h4cZw5ubuGYM-gi?%Rn?sG8I{@DvZvI%q zW@PR#RZdR-kZ48QC@uMgY#OrBv)%^`<c`K$T8=)-?DD01mxS1Cc{`^6T?~9Urc}yx!W8{CbE4^`M-*4j>#d^N# zIaf;OZoWe(8jR09qzoSTl^wQ}L}PChO052*=ho^P;2KqKHI$jbQ~8L@YmV6MJ1m%+ zQ~KyG4%z)NQl}VnCL$jkVA5<$g)?d{$mpismf!9^jtQr5#jMOYp(p9U(;|Bm^2@_s$$oLVfk%vf z#3)87hxK%lB~I1qly;*o1KQWFBep&^Wx}b;{?k5r_r*nI-tZcyCssesER_*^pQ_7c zM>fHqJ1fex2|EZ>9d^1IZfe?$t{PaVk`~8>O8b_U%ow2DwmfLxnEDq#aON z$yK}m&2e;=XHY_9aPZ~IM5&%U6iHF6#)3YkA^)M63fmq&#+_?QXDi?M_?IG8mmXwp|nmv^{q#?s46c3au8l@@_~PONl5L@+I~S(yD*t zTmKr}Iy~F98F8+OGLz#8|@ch>N799Yt+5?LmEXtlt4 z3Z|miw*@bgSOf8Rw-vwJW|2_JfJma--Ik6MAMn$!jr#@;B*8OhbeCdZT_yj%qhNk| zJG8Z(^xgoy-_}~1B!4D`gB=U@D$V=oy)K;2o#d%~)a((^YOr@Ji;L68gv=uSg z?|GPko;uWVw_`rQjCAU@3WjF`|Jsu!@L2hoT#0|vT0O|+ma=TT1yC8ZW{c? z;Zp_`{F`cX$y&}$#j)E@EJXmKF*wF_1Mh?^mDfRos;lLrCkB?cqBI!Wn@7KY456h8 z+PN{n^j|^FDzEbWq`EFRGlNU3A^Yol+S;A+g zk6z%BUX4#0u2#VJKjnkJ8Z}H=bftO6GS_4@>{T*cOFYA@jyO4F8zBnCQ^=>BzguUlH(6& zQ#Bf8US;3S|FQYiSxUTRww_1pLTml(o2r1k{hZ@a54abp5=#* z^HH@uCug@U&(!aLe({{iEaw}zIkm6w28w}q$Ek?=b9GJs(vFUQ576$IC&^^#x(`)l zKjq!X8Oh*YutI+KdE$ReY)?5p52oPyEIDgHbP`VDLFrKWU+hPK^h`$Jpj*?crbtT& z?J=xzN!JrLG@oFVM9$pF)C_di$yO?$`?ibE+y8#<;>4sGa(?F0J2 zH(^KR#w0<(pcqb2fZ0tRoV6=;AaAo~zpKv)COxCg@VF4-Kd62Z^J&LJsGG;6 zxr^nC#5g$Rd_Q08o|E4heT>~GGRsb({sO{We8G^u&K%fYWn*g=_0^w+izJ;Xj968A z_|@WN(_BML8in80z%ENr|f(Sw(8$+a3gv&la1DhPFR0>7r%w zVEP(*?Gt$K;j-YbMf!8(?r4SL<7SVO!8~lM3RrhywHW!Y@#I5Gf>Ol;6>QYBFvQ?M z>6dXt!}sOs&q~=*R;Ui}^}h(Ng!7AorW=$O7eQZ5m#ZEINB#Ho&fhn`|Gx)*E^mJG z5#dK^(=OLB7dF$9w-J-y&J)Y5CX+>PoCNjGnQ4^jPCZe-BiE@%&Vjg{fO%zkn)S$Q zHpR4@t~lI&SqCxkZ3jGr_si2MVxO-jc1Y7H%`Ba(5+MjmW{RG0IoG+?<@HAi?>rN}v% z5#vS}n^_cRwg%SMr7Rln9OyE(6%kb4jc4|3Xwf8>v*GW}hUfZdG^WG8>@|xQoaW+Etakzx4f9FdV zh%CY7-P?J!O)Brd0t_fUjW+xsX@--f{$7&M^MtnjZv=ej^>5nt+Oztx7&mF8WI}{Sw z#q$q&3m8RK${tUJ1`_0)3wm$6+Zl?z56&K( ziYucsjO)()3HNdwUe7lO?j4PA;;&fCpk%M<_y5EoeY<$G#FkTVlE$Lzx zB*gXYPkM51zt_-2UEJojKIK5*{{3sK*=dRpot=tM?d$gh%RohfxPaK_^fTLU><%Y3 zHit;9gfv(V(%wEauG(YiNav#QRf-mP=A!26Jvt9dhxs(pr{vXvzd1$M0GnEy(c%|V z14fTrv&g^Py27+lDEw%r*;R#QJ$Ipz3$K$soT-lD{WFmSKh+?5;O`(74+WpG>a?Zv z_`{H)uPc&E>~Fgzua)Po`D@IEGs!-#)DKc4k7IAwoqg|295aLpvnzzcZe9i@BHrH> z3GCa=V~#5&<DtTpyj*JXM`(~7 zHX{mRa@e}>+5l=O2cXW@Xpo%+zusAqBEAq_*I687ybm&ykVyt?=xhzs(zS-pBG>C4QMiQ|;cfC-*$(41 zEgu_u`F45HqaJ&l6usB0)gx2UT-70L_bvD2Gr|)r1dk>1MPW&Wat0dK%TgcMR3^Up z`Sc0hohkSKodayN>aTZp zab-uG^fodd4>S3bfoFGW=znQWbA}Q#1t$q0-+2EklAIQ>2iXH4tsb4mM1%L;&SJ)P zrK;&XD+1q)0^%Y3yGHV}7I__!=hQ|2t&nMj?tQptDT6`y`|QD&1E%;!{|Va2i8PUp~1VRTly3_uYH2d!s)wXsxB_ zF!}G?gSkE1;Og4~B*uY^&uVsD3gRLw?f9AW*)IEwsM?g@+2`_KR=t>1w$MnF;x1cEm}vRWooDLGh0ph6ilU{v^ELSJF?&;q=ty zNTWIWQu>(bs_5@p-UxapdHEyl6MG!DdzF#(5O&Fqn-{N;o$wal?(u+!ZoG)g4C@N$ z_fXuh@nxO+m)Gg}MqZ(h@c5vOA31>O80q#33y$7F<)GNR>ugJ(r1*{ve(-Do*)HbD zGvbRuqBd84+T(jwY+T|E@n|L>%Zx#eGZ&+4^g&dyj1JVzFOm@wdpsk}nK83FzNs?p zx?5r&=&K4o-FUFPB4WilYg5X&Q|j_3>x|i{*}j;YQuLU>-#6dK z!L|3tQ~59t_if0)=i!yk{A2p+4b|`AD zG6QGQG6gEoZ0NULb>~TI?5|96xK8{ zw8vqKo)X%Q#LDA3noY6UvnPJ`t3UA6Qv(GGKY$D^gGTV*AJ3d-+b*|;O~bfm??H^H zNG=MWI~~|)+6HQo=+O77jT=Vx9MMLEaXy#&IW@5s32z>43t9{M-}IQd z2^%v+ja3(Z{}{5635^p~q-q~F;<3oxmAja|-sCsp(`EKK7UsR@Vu+QaJklc*VEScg zO|~(wOpcG6QlnktB!E7Rm!qa?K_?hs;_CX+XY5rRIwty?-V<+Pu9qX&&MPS^#C4EG zP2=0>clgwBXwV9QK=Q6NiZ(a&-@o-UJSl$5uiV$ZhOyS;!hh*}-|TR*lzaI|=lofU z#Rbz2(~BL^1iL!x{jT>HESw?u_UdyE+)jnnNH$r4$bMo+we>|+cUb|pP|y1c>fw zS4St&RV0&Tv+GU2L5fzKJ|c79ni@%ibr|MBC&3G)Wqxw%7Z51Joj zN31)&<>$5x^@a4AhtI)<_kB);-vy3ra>6H*#n!Ku4j4mbeD+NJ+)Va9R=@<3IwXV=v&R5L&bTx|gYlxWf9;CD zXWEUwu(9{2!i_&pBA3O!s!;nEZVq)HP*AQyp1KIp;{a_ppZ#7<3iTP!2H^nC!d(ST z{&LFdw{8sidfbs~@jm+q$Wx1O-CJDM9wwojGXY$D%>Z+2?`VR8R4C*IBS6ILYB+#b zn|Ui>9RI#0ar4cJ@676EoliEGla+4kw$HUL)e+uB%`r|!J0u1jBejh)PJ7Q#uvGhY z(UZ5%ahN7(JHyUGlvaN6RJgR|n`m`1Qj+HEV&m&TG?-@V)W~+HopgMZ%{A&j{4mh* zTkT{j_du|39q-WbiIbkPjji*ZGy8?OmHqaNU!`FsG3j|0c1>#gF>%UaeI^n14*dzJ ztB}>W27(11%_awQHaf4qKs0r$fh&ijM=Ah?f2#8bs5(~(-zFQq6E>$kGNEEqqUNR9 zB1t_)d32OVa5;wE96Tq{_N}i^e1P`+e?jbWK?R4fHT&H@svWQ=TSL*^KJB{Rv&IKimQsGz9q_6;_|`EBxOUM^khh$~lm>3b10eC8wKHO^ zII(BHu17(x1wMObk0-N^Ql1QQ0y9&By0^AoT$C;Qc0umK1GGxkBH$YNY1NA}T(ALL z*cgHd4n1bwKTx5$h^f%FV8pnwz@aKH4=oPTy0RDVJc2%=fW0yjHZpb}1;QSSB}o~N zv1+%7YpD#01P{&T<<=7qdOH|-8LSWyyPl7@t6@@R{;uU-CDAlSsleIzumVW!r!v@? zlx<1TuU+GQw%2v10}7(qTZ_n+|J(1YrOTZcnL7c4%a+Cg9&KN32%W^YMujul@7CRD z$F-MKYg#Od0FAS4v|EH>QLBB2vmGkFS(i>RWD{c0Z(my((O{0tkBD9f`X$!m<(L_X zqjRAZdC~j_qEe{9qod8T-f1?YL{5qc#ZhWqOJ=T_ulI5pE-%VmaF;YvaJ_tn@o5R2eJIgFyN2 zf?$Hf!wqWyL$;FW6qEFIENdSZkvqs-P_c8Bu`OtgX-6d;R;5YJQR#Z5*?&2|Pnang zN|c&YS)a@%N1jNw<23xPx+J@u#Larb(FOr`+#yUr zMrE}XzFzwgp#7(HTsuHd@Ro3Lr5hTiM9A7N5eN1#3c%({6%#BeQMKq~kKUNQtAU~D z>lZWHZ%>>C&29UyALK-^lLUyoP3jv%z9{GZu|I1UjGXI0l<(NMw8%w9#$FV#8&pSN z(5786TgDLtMvZn196$@2qnS^c5$yy_!LT$35ClEBKf>779r4M<611GTBLctIABYvp zbP8vdRw=A421|vC0V>`U!5ix{CMRFG12@(Tro^Jad^NiJY|+1p(%W0~JE{8uGS6B@ zd*a~+Ybm&rAbO%cn}4mH=npZB)LXqx$a_@lf#du0*$>TclNbXpHV|~PoH~DtQE53% z68J*%ii*H*QtR_ieKDhpGRo_dc}@_LR~7C)9*Xh`L($Q7zKl4 z{=F>LE=o`?nRA)$IltFciDKty-rJ2P;H|n6$q5WGIA>KWH^-K%FWdf<+esXHI1wK= zV;O&U-+v1C4gDiEi#pj2h2sQcTDXqpH@Ol7vYn68HeV3w^F>scl@)CGiTB*8_k+&D zM;fmwTkZBW@ipD`=~0l0;7aZ+W;;9vO^xZD;{+%BJ(cOK%hI7hA$8_a@7m7t88%1D z4zCQFajO!9O4bjIUErL}_6liY2yyG(3uG^RQyeXMuG%Cr|9*B9De zT}9~5W$nbZ)2=YAof{)N4_Z_5n8Z=KMSkQrUc*@8T6*v9Isaam;|2tCE9SY`UI{Sh znbY>sY@0&oR4JD<{8>a&DDG^bkT?%Zz;lbN7Am4naaEQbZg=HNYVwq?ygId+^ zc&gEuboLPB`s-F}AnoY;0L0N5Q*7;yh~gLX+`_*6NbLhuS_7fvV&uO2eNYFY>iJvw zev#O8X0Mr|V*;L6uxD%xM^-zqAfC02+KpetYjkt8i_RZ1@iNYmulWF_zD9~tp$34? zX)pnZXF#}HYtx=GQL96To1N7Dyj{5^nZIyRCh+=K$#q$cqAEBk(9l4(G)AK8-uXsi zjnSIXsz+6bRicNz9O3+eC^V&f-%sa^2LaHzxthmb@zraKvGTxP?8XQeHwj1E!1SQ`?L)* z;?->mdrV_Wb=F=AVE}l9jTKOXI(qtL2+Ow$Z~@+pN)2zadsb<}bBhvPr|?)(qFQ6k zBugVfD!%Vun@hf(ij8i|KP0II=zC^A;kFe&cMsO0Q_JE7RrDW(s%z|cx4Y>%J}CwD zy&+z=k}KWQeH}h#AFe5O4m@>{o^$jMb*mm$e&z;G@bZcD2s+3G%vEaabKWM{tPB|h z^Uk@QqPoeq0dD=8h@!zynLs$<1u2FYj=3Jlb1B>NY~7I6a@f0h{>ao~fo69WMLZoX z_U)Ch{v__oH^(&3GrU#mDxtEY^i=U3tPtfZ7Vya8m!yvHO~4ag>LEzmJNGK=mDM$` zk`ygM7#CCzda^LoZ)M}itr^IOU5uV_jja`iHO4huIqW*)XnN&(Yp5SH>HV(_wL<7B z_WUvYDkE}q%Ig_Pa5r}oK`+O$^xndH7iOa zy_#=JCCqDeqm>Fgse9UH{<%7KwD_6wPrGuK^lb-IzF48Vo2w5lszZQUT>tvT{AE;P zG9JU}p+Rtub%V|X8w$b4F=y8B03!Q!EyZ>S`X4SS;a5l;-&_I>dQ5R!os;Xf$+Z%O zQ!lZnl+)Gk)$d9{6Ge2W8fjv)n_mMVa9=5}qfqymt!@BSSE==aBiEYW*Xa$u%{oaS zDfQdyw~?5V;i1vwMzeyA6|M4k3{A|uI! zUX|{V%T-)JWXy3SW4qf2y_2{p_ZXMFh-wD(s9X2%#80`qg_=j&;}$spVcW{F8PNrL z@CbuiM2a19WM9{mLT~nObCLyqEr?upnGPwhRPzaIV;cCT?h|8Jq0CM6(vYb1~%y>!i=I98v1zQ*; zMYj>sCdbVP?kECGSixtRB-{&h!fUGH62DJ6pnY#srY4mIY(F|Qy@C(>?$O)|6-yb% zuR9G1DioF+eaCW=O8OUFY}Ya4yHsh4;bY-*GdHc{O_iaA2IX`+27|WFHRd?)1ziV3 za=7ZLuWZj}^#YZtY{mW?7!8imJ*{l$ME#!xcF=aB{F%0ESh!zRMr0JqCWqv6ly z*gY??v;IcC7lnKJgz|n1YB*cI;(NHVhw$W=ZMOB69f`vN&S$=1{@K4;Wa6clvZeFH z7C$%uISAT4x~(mnmai9X#Z?%i9Smx$RK&CBTWCbXmo8Gl*^a!4ZWtp-*PKBHpm-9N zDX>nRF;n>n99s&yPdvYW_`sHj4zu_y5njt={5+E`^{~Z}mn^T#GpHEZpm(-%U{)F^ zu-@`udbTYCI^`8L!-y~R?)kGk_R=iu#G0 zaO&Nes35@{$4%s!6VvQ>p)==kWG)U8b{Y$r5wSYsY{z!`=Fb{x?EX^*HWR+YQ*BFQ zTCvW0)3AHw(()0=OBAjkD*9kfzV=Nc1E4+hvYv;tI8o?#FKPkMSB;v|P z_y!kbGC&-t)y%7CU6hhPJnC3xJ%!v4O{IG#TeWvglksCZ*k)nnDv_Qy0z`#!*=1P2 zc%1^&qG5wd9R98W07Q;vsQ4mb8+xtCTh=BjlS<0z+mF1iM?)VSSQ&BAU`D9-Iy=kOxt4y#3S{upk(>|B6ut(tP zf*p?C$k1!DK-`}eYTlM0c6hYZi7WFMNADS%=M1ljZ#G-`ap>?0dYjDC?AVS?WGm`t zPjZuuQvr}%FYf*OY^Lx;hH{G<0nR|%_m|1ue{_se^aYFOme#Zn?ZcG+A7SVI&-4TD z@uH}Nib6tBF1dw}d!<5dNkZ;~+{>Na@|9a|x#Z50TXLQIZRRe9x!;-F%w>#iHf-$p z2hKUa?f1_fkI(1*em!5$Hy+?+tR;<7qN#lcofR(FSwU{hN}*$iO`xL7zIl&!4naGxfS` z*10f`@|Etkt&Eo`EAH7oK6O7^jhk8=F?cby-ebOm>JoJfI%XxVU_O}ugU_IijLiUB#O+GqMPF~4z;xcjtEz38GBzU-!Sz;+U|#ix-t)vsne)fC zdulVT9rG=5HyaIjf8x#Su^7|uSHCjadi3ZKVY!=O1XFE33fQz%tujVroHe6SWNP=v zOW6*8r=Y%p%<9&^`rJCSTR=Zucwh^0^?iW3mq|d&rq^Zcumv*Uemx+sdFnYua0ziE z&ty`(#cvki8Mt9xa~oiK)}W&oZd^pMcpQ+krYvUTiC3eIoiD}0xX4M$-{sdn0Y-84 zzZ8nS4(si*MR2_8T&IixcwbDCW!wk35iu%um)By*oTaDR%;MAQi|TwvmgXV=xLjhx z-46dt;8;P9I%lg~(C_SAYo3b4&{|0+!$`9&>@Y=8G z7Z*IyGX$lqjE-E=#VV1=bc+?`zuQUIM4Jx1j^#g3Ss$oYxXd-Vu&!s#)Gq}m5(;|* zfm-RzliO4Mup*Md3`-d8Gu#^V#gl>m4j$IpyGor_e~5?ALefW0U~#^!hi#Sd&%l#4 zYS+{s1-#~%`*gVe;+R@uQ|rHt=`R|djBnjs9LoCZRt9D#IWJWoOU$kpSyvKw|_n_6sH zI|TpSf56>kdv0vyMmn?V`l-n@aiiRHP*o8m*3i>`r1vdU&hyPWr4oPF-mIKGUU6#H zdnlkhls|162ESkD;_HB?s;0dm|e(pZcr`w=TA4L_k0r^HQFSW4)@q0m0 z1f2s^TX`3Mp1mXlwXc9vq*Nc9MC!*EwdHz6@woC*_^8)D$Pl84k&igBKe)3U zC@szba;}Rj0YH;Z2urDPfd}`4RKPXMLKlH@Ipi~vc?hmX}0)h*)8Qgh-@ zn$|1|$_hWbM{XaIz>9I**!r@1f^Ut^pPPHom^Ng7AL9&9av@zZ*nFF zb>03sp?Ns-Bz;8Su9F?A2k&n!;+OsnQZ z`+{-?>0nDNgkKf$7%*=v5Q$1~x?=q1S!`dUEvLa7@@30IP-KGR@YWpADY2fOGWkY%8L222EWH^TtFUjj zVW?weJw|?iSW$1bFzyRi-{FIK-J2Xh*F-UMh@3Brg zeVUaEiuqWfwGfsh@pUdY`qr2br7T^IvG5vhb9V%CA@(E-s?vST_DJM#f~8`n z9r|0X)r!OquwsdajRR{OK!o{{%n~N{Z)yUx5kN6&YeJ-B7)j?Qno)-yjU7EQ{;svVVHbwBR?xpJ*BM}C8 zLkhQUHd|KQ$fFamvb_L@ib|rk#lmxl5zsG$B@EmRIO7Y0$UH=0=uY4736tX&M*ic* zg;5HIIW9C6U?>LArS@%EGXeq6eh~xSSs?Jqcw9A%?v7_W&@VTPL%Bn;NS{kT0{isi zK%HLC++T9;P>XwF_h# zF*<<(amUi#eg&TIarFv^dqqEbl`H-j-9NVW_=HZHaA^FE|_z8gR?dMqmfg_O$!j%2+Cu18wi)9p8fYMml^ zUhw)FaU6Vwobi({rHJgOb+*K;o59t4CBgqS1dU^k9M$Eg0D|My8WZ3e9=~GC06jf{ zBaB?@WoXuA^d^PZKVN#TBZh~As4HZAEJD$hpakv_JGh9FKVe4Kq_OXyc0k18VAfy6 z70FeIC2ovy>6Uh5X~{`tetST~DAJDOYKn;cBU@c-5p)&pydQ@W`w8zVapySVHhnkh zORS#9D!2x@#-aL~zg9^XuY1{=(Kx!7J!-T!){N7+J!O9}aF1zx(6}f$K$QM%FS4p5 zjc{Wm>LaLtbtH}RVBcANT;6A%>tt5rxs$LzqYTz~-DA-TLEjKp z%uF-u`C$`&l&z@zU8R$g+vm5K(u`FejO)LV)6-D*#*5#H!nN6cjl9=&4{mjxNi{%A zigx^K3|*UQqC3ZdZ}z42W?(VJRMXBOH()v8s>R(_e^TCqp1Tf0$vLcQiwC*+OV%*f zk5&f|udT=Ud%Ms3FQ@ezO){RkKy(mdHC7Sa9b6aC&ppORr{Qd{D+4Ns0(BO|PM-iW zf#~uoM_4-LKzl`Mtsp4V8+>DxYbQ@FJ&==3L#o4#ZckGFncelGerq_7n**k8B=o1d z45F@!_e{%o9o_3WxCjM*G5nYukbL4;cYZCRg=k3~-al0NYQFzNo9>;@ssZi`qPGpbOc@sS)HFuIo`;HslDBB% zUS`NcUpck@P&0YpD_)mb-MX8|DBcIk-H(rUTWHoB)%1?h0uO$u2hIz#XK5<}*IY~S zeXk`rBakxYob#b>Arkf{&fkjS?%TJukU25b6yzVhg9~TX){<>)>%*d$UZC%8W{$P}G>9GM53^+~)0hh`?}x!^zNefIV2^EM7OYhW8ea zb#OC+_9D9KU|BjEE-Q02`m{ppT%|oVb!5Ki8&({kj2;Ra}OueTkW_+9F*sitosu&YDSa7 zhDpln%H5~7VE;r`8O+A48n_*xXA#7tp6Fv&EPnM)TH#F&^2=SGynMr`q6q zC@qv5flN;PlZ-xfo?a5D)%N_2Ulc%I05~X~IsUtU*zPTBEC$-my&^feD_A&?gC2n> zY}w??1l6~DzsnX0&s)FVj~DSh`*>uOrP)8>$4f)J2&6h_T6{D^=*X=663cAMV|c6* z1FubBADdKiqt4df{yE4;w2y4~FGbW?c4Nvmdfn4z3eM|0I&BjGFVAF{m_DP8s>>UiY zyQG<)Bec`A6J7uB2>)UIKSc64>$?0fBql2poJnwv`)A?%TbyJvP~XDiG4Ywxp-J@f z&?DiwzV&Y3LL#vaKz%1EMM>2Tjb`tubGE;_-PL;2H_hj1hX^G?TiNG@IxM0`0uhO- zBp$TsGdQiu57y+l>N0erqOH$~cl5t^kS^-Xqga&J4{Ex%E}jkRX_t`6I~Ue9E(pEY zk$SXfTd(1Yp7091%2|>T@DR^4inj5hrni0iuMAxAFIS3k9PEs!zk;&L9qZV3Mcq6V zH9U2(=p{vRwdI|DJ^7GQaJIwFt5Z44wEZJ6=K$hUZNXiEd)NCk&_~@pxa$z;e<|qK z5|8KX-}cl>cZEr7D`Tf9tTt{)^J0Y)&Nv%rB0f-Kit1nE?5X;@RV7Ia=WthMpvEGN zT1)G(p6=}DC7O2#9B=6NA!wWqo1HJ|Pc$05pV$Rqrv*GR^h15s7t`Iklv7$l{d-$k zR2{lR)AxCgUG&GL9kf^jK%GVAd{7;*yyOu3Mk9Ynca(fj{&+7C+t-0eL2Li+A2m*; zMq`9F#~Dyd565@%vZ%oj5wxIG8u#yIlTbywlU2c^xb4&ocJk;wh01)?7hkL0moe3+ zPM9+MASuD`yGxyj+pyH4>rR{^bh+zdis7QxjNxxLzJ89wKLoY)0~sgaVxfG|78lzi z?hwb(?~S>-yp+Km>!M8u&z@GrF@Ig+9=~D0?#d++TSP<$U$9Y?EeRd~Sk(w1BRdio}{6Wr1DshLEY69xiy0TD~u41Gypcw8{$Ls_by1q(F zOpo{_A)rIu&-fc1s-B4u3$&JLijcHf*J?6@G)9E28@hiorX*pMr#!G|BgL;;G=6i6 z-JGW78*;TT?HpPV$M>W5#DaXeMuP#GIrw2xtpITuKdd%TT?j+87lk?ValA3KQu38v z$zneiXiJBeO7K>bnIOrLD$E_q;9siF@It)-)u`7={S@+r@F_B~S_iOFLTlZ~qcP>2T{1JLC)<>r)~n-)J)k zE#@Dw zG|y0jpN|+`;$Rms;5a?nZJI6d&`6%jq^6$PO!O@3C3$1Q_#`bxi*_nfC*?hGp_vC# zi#E3!BrK;F4`3lyZdvi5hj6re6c)+gY<4>})qdD+j16`gyjg}Ui+U@pgS|20V>|Jg z&OL&5c4e=tB7&<6pbmHbQwF=H?UZjlaoH8S`Axb4VeD3@wHsSY6TeEEl`LN@uP8mE z*Pg3nd@zqbAQvSX)Rt!F{P6v9hq0T>=BFdon-EBxH-fpH;fz*elm>7v_NT-DyJXn= zkoffB9$&jDR`uFcNyGd0Mvc;1p`88!Y`v2!?oKXY@w!Ev`~r+{x)dCMR9A3Bi}=wmaZYOqz(jzpA*9w`+2T@UV-&!*D-H5;gb(2P4iJkJ@Wh6 zk|#yVeQNLx-mg17`K9<5s&Zt`?s=~lK**BR^spk0*ETiTU5U(=3F=?{!ZBvpI3gmF zGW&9!rl^Q#JOP-2x@Jqq9S@DE>mCuK^IY?^rspAr^p=mnG`B1FV0Ocxbd+`4Q5lAJ zyK;GdsY8^;_JI7e$w2K)V<}L#>C%)*;Et!?R=>B)uSu*dVg(<_KgdA)PHvjL?H^o}HWbNW@F`od;S@b8u)2I33DGy^B!InmV@ zNj8Y~t++{E5X6{Y^T?~+W}9j^D|~=GAE$L;yE{c)TFor8wrLw)fsI5aDMC~TJr29w#zXhrSW~-B0k7-MZ0-9fb53r%kcixX&mCv z2KhKSMK}(pxZ)yDE+`a!^w4hV;!fRt$)0Pdb36`7$3|-G?YDz|PSrGm`+!*TDFdxm z+W$|5b@qco>-#F~vL`V(-cHrCv+D}zrg?h1^y=!K{K=O1B-gr3wg8gxOH*Ao_XgxG zZEB|Io8`4dP4SL%}xlsDLxsy=is%n_-k6@ zcAzW$R_k>iVN_2jQ|Gsua{=xBMq|q-ab`)$ZerZY@~$cJQ#(*S?$X~TZP_aTnxp#X ziI`MHD#L7%yWwpc-G3J3D^BRyW_n9*2MraoxL)FuJ`(XjT_sUX_b}f^Xs0^$ECHdL zr?ZKGoiWdHN}}td?|8$R1VYGJI}W2kg5G0GTxC~V9=qd?yqEZ0(yI)Uuh3XM-j)m( zp15~ic!mH-!Z9shJBbpsZvPZ-v$Pqj}>FB6Y-H_I?E|>>FdwuR;M$adERrOYQ%{l4H{lRpjX+rM<){33m z8xY1>yi=6Ws^S(T6eRw@ql>Vlt-Kg_FTH(b6%gaR{6!c%?q&?#?EQ3Ft$v2_!Rqa3K#Sp?HuT2V@7KPwWL-(i- zs4v`6lLf)wH>qD&9ucXJaw)^huUz?+X{vJ);+1nK?BB(oSTm6#D;Xuh~$jr4g~39okDvjxOHX$$|Erpdj*Zo&Wjh%3*@eQQF}XGlTw#S_C}G^iBDlLV<`1=BRyuvBBwf&#e+dfG)%?}KY9Bq{8nfO9LXv>@_tSPN z+SxuR#ng%_{3^t&-9`k|@i<7@TvIaYz0Tse^Q{n}-Ydd^j(XGDL-Bt|6O6XIWfUVc zn=mw-`BL#3+7JAFm{qFGRCHD} zRA)B$J5g}P?OqHBA&M_bE35(U!#p%$WPgi_!!v#_cO;jjJw6Z}NOaDco2#*oVC;ry zD_xIn66XYQYzj$A1Q1?>awUQWH7fM4ax&gXa>Ttix!L<+AQL9$2^q@_sJGSUv6z%> z3M5^~asVUlY5e3}FL>E&5<$~ae7S(i^oIuMs2qF+T_yA=og?(gt_xs#-ICaQ`xXRM zHkheJ+(ot8<_uJ$^<3J~?M3?Wlw6tTSlvM8FvPbQfRWqPHvme9_HLwZuK|l0p_&qe zd2e8Brozz!jhe7=FM?{~#w+wr=x^yYi=aZu!w+~J!4i3oP9rOJTL^2m-bPk#=%M(4 z-6K$0e(n=3;O<9gEPuA^!Df-}V0cdB7YNP!;mP_5&{?s^w9pEB;NP7iy8U41T2Ns& zzrnl+a3oGLs`mjo93{$S|A3%kZ&p=K1pd|+L)LaKN*^I9AHKstsSE(6nA@- z|5Ytbwc$Im!h2FhS%W{qPoe*eiZ8PDn02JaT{mZXZEP6zq!YqgFJhBO{(*YD7#`#s z=E-@&#;Fgy=l2ZR0QCK8^IuSAGYAv)<@-;@vGgIYQb+AkK_`F1O`R@fFXlUQziN!E zKV0?bQdNEF&RuzuvAaPRs_+xP}; zTDn9F&z47wJ{b4wB|As5!kg-O8-|po2cm)Aw!yr|ES#v!acsbt?NJ8zLk~?8f$z04 zrFQp)*AQkKmZol#0gv=WtM1m>KYYZ@9WB15w7=nYWezDv_UMz_zJW~+8q)Z8$>X10 zkFcYbj-FWmz#HW9i`K_aU^#?ieN~ZUpy0W;|yX$W$4d?uk{GoV{aX|d_ znIhq_-f%(e@*W1U+*06H5p@&@`&ri0mG0LlZHM?8Z$Cu*eE9Z7JLUQk{zI<*jzFbp z#yX}Vl5^@7429%%SI$rY-pnrz;nAVqJDD{JeZ{mNs2_C4#-tlmJyqAjm*(nIST>eX z&(D0!x~s5#19_$4W}7HD_ukD%+&_=`;|&z<*8LUohrHW9`=E5tJ=9M9&-=v!OOaIL zUl$MELc3pdv|}r)4G@ZWo*7p%Apbe|g}?ipY@YDqsA~EV-8n3h1vmL%$>dUOT4y+- zjfyeP-SPV!J?^umGH0{(V23x}LDJ=j$w7Bxr!%rZemVu=E(ZX=Q3rYn9{4pI{PV$qIzT2k`J;6KWK+|j zVS9nY?dfgTB*Jd3;Q*S-yJ32jIj=;SeV~a~Wndsz)WG)P9+x4X0$w9M@G4aZ^Mbu* z=R=yzYIof9xsMa@PZPGzzId9aqt*Vpx(c?7?%Y2PFDIxLt=BXu3i^(7J_+~V7C{v= z&$gbE?$fNGt6_}C=34_}CyH}Pbt9>#M5Q_Y4VCJkZ#L@;_CAl;Q!ExH@STD7A`uF` zht1dAwv1OS!a5*SaRhU3BLm=n|>VvDNFh z`xA06phI9xULnRN6Zp<_AX}d(K>Wx}wTue7(tY{w-<5gR&y_Ejy$%NJgAXQ$d)4*) zzOcVyQV7lT_!d^`-a%+Ps|JN!U*T*@>dE<-1)~$pqF@() zNH&A;zQKPXCCX*VKBpP7whOyrYQlsPq}qC^%iN5J1qk9J;8!L z{yNw@NaBvM_qaKEp$X`{G8-Qd2h?5m668BQ(w9T&z{BJJ3~YL9@Mg30Sk?|W#MR9^ zIv74yI|MtNE*^Ff0C2P^t2On~xV-n|nK9QGPq4-YsEX8<;V}P9UbshCP{G+ez^_1|c^!?wc`*{n08Zs+#<81Stx z0#mPBqK>%Fx_Ynbyk=*T|nRFOZOHdJ@y% zlE4AHc+v1Xs!}9Sv&pw$-Z}lYN@euYYZnXW!+_OS#v$x>@tDq1+T-|{C*`V}*yp@s zPH>g0Zn8**h`F>Y$Ekm8pzEj;S(vkFP;4O+V3<`(baL`CoDUXnC(uv-CwCm=%uI>& zK1n&fQsex=a6BS3_?L&NAh}7$dqUOu*v$Aa?a+C|K<4za-&l2y>(+ioU9fF5DV&%c zZw`8+{OU%pV!4Zod0caq99B4o1+&6wN_Qe;G!wA)TFmVF%$+ArZ#xcWj#KU z@n&}6qEUbeV*bF+xV7-q#3VL_B5*-tue$MF|BbX;ZzGH#56YhI-)O8D7tSv7fXh(y zGz_*MlQjE(g6@#d*XXY{x0z(^l@%(>oy8igBTbe+`KZ6x-|vbD{bnF_twNbR>hCd$ ze-AXHw{j7Ef(}_07zEZ%mW194FfB%uC2186J+smldM_$nH4I;f+2t-}lV;Xk5iwiv zGz8zjx=?w%3i5J5)h@iT_`T`db-LRCa_4~P9kCarokkBIncTJ?72 zS4iUn?#~%1{4i<0E#LFBLuC4tUt`77_dST4RdpvxlLMM{_`*#+O619FzOF~+Q1=7< zZ}e}qHsgw0@J*Y{J0VcaGIjY3_g`)Ms&YOsm8r<(etW~?-Ll~F7Om3I{a?Fu zVRFZHnw})2_FnPR>HfBs#(lbqeSp0e+9rzaxrih>Ly#);L3jWPJe!T#ZI;yK?V;F< zqQ)ar6L7`LUOK(DE@1@W^>mkoP|!f?MgJOE*@FoTuMFlb^i%I+joCf&8niS{Tz^by zR6uxB)p6{j*)8$RR}ny+^GOV9!u<7GwNbK^VyE z5Ao>zKEhlI4HP85squ^`D>A7t4$1ge+X+ZT9`X8%lojb16T-By;xP zh`XpG-lqcsm4H&Vc4sb_L{#|vt_@p!R^52|pScWj*J>BoM$F8E_(;iPp8n<(xRh&U z?U`g9Wz%y-Kn!?R`MHtKXz|=#;gJRAkeU*6XN38ua$H(X&)ggAPuY1b+{DjBrX8!t z05$vT0X+xfM^!l}z)65$E=@hs{E6!ajQgiYtM#qRK4m*^0ZQ_$cNkBk$q#(Kd>byD zZe^a8F6lKCepQ;oh&fmuz!kz1DBXihRgPSJ2$7RV<%b3jsq@l>>xv#C z&w3231cIV8T1E+hkcxsO9l2I_gLgN47XD_BbH4@!4l1Asl%D-F^2G2L}PA!R&cg@0)&ISQt0 zCZ+I2@|Spcwm zI`vjhNWtgrCdGTFN7MhR+M+APjOJCIG&ND+gYHviT7RZ!#*3S)ecqEY$gIEJH*^QL z$!i9V@ENWjH!XzNz=}3Ge;F6^QU*VIDx3}>CrX;;&(UI_300Hho2#22E9!mUI`;_# zVG~-2Ij0GWQTxo!VFnDlc*>TYg&|CB|6Tz5T@f68W#QL~+*`3Vm)dJ^rm@J#lPzHPD3{`?Rz-Nf$4?&hyZ*7(=MY*GXD*03puX71Ya4Z zP*vESz%|Oa6((J)Kg5MVbEWA^@Y-5`0NGjp!7f2;x+E9=mSu}pL4@if2wvc*F|S2h9^Kgj$3^K{f2!c7SGaitsRMdBJ*~s9*e0LrOquo+XltO2akKDkTo`(^zw{d0!_NxgK#+;L!M-T8 zV@QrR^_j)XHU#6q{dO~Zr9?a|#qg3wGb75QqsKiD>e2WE{ zh+^7`N)40VlY&NpbwTS^L`HR)9>?C?g+=`>7z{cu7x=7tD`zC+@83M*)M@o>utv6n zmiyIhHZ-AtNB89I8vdi2L*QgXx`yt{TBDz+m^J>S@fYHcxXw}@Hv z1Vi+FQ(;OtfRWbML9@wij*m21zEffFoN*I#pt7M3kHg8U{={jaMS`tGXe-CDr1(eB zdykYt`B_=6LpUh*3n2ALGu<1{i z-Mj))^KM!l5^+@?Q(IRF#h7h-cKIRr)3i~??iaV^sbC2kFCN36UaiE?x2BUd4|t7Q zy%%pDU#?_^^gb>qtqv`kCH2W6Ka4#Wh)t_7N|7fv<vWKm)5W9=|CeeO7VE5LZ`+DMvqhnR<;KEj_)@MAmpORG>5?D4a`r9&rIzNVkV=zAUH)yYW;`y+a(=kTvS(Jl zGU2aoG&A(NiZ4A~i1%~MODdh4O&J+-fh1v;J+L7f9@ra)F;&0SRutEgHutoTa=GtS zctU}F9I)s=hVo%hYQIC_p`)@;l#^Iy!nbsvtt*3Cbd>GaeI?%;4b?8q20F)ZzquM` zV3He2qqnI!IxnWRcEE9OHc-e6Bw%)y&s|>Vs&IP^hE1i7&$L(y>fM+BQ;#6|0 z?0H}6$sgKl1KNGfoFixYm1U(NL*KwC+`e*R!!6&Mwz^h+pR*J7{qHs%n~Sl*_u(5p zww_hI#OJAu96O3}VgXZVZ8QjzcRW=I=5}A2S)I(kiasZ1bF8Lz+1+H(1j0WnKKr{@ zg%^3~aCaq-Ce=qp`9P|8EX2OZ zN9#l-A5mDNGQ;O+*)BbclxcHEwm5KtOpP70%ca!APIn%(74Y0o2`f8$BAi=9g1&w4 zmDJlYDw?TcTw7IfMPHRL#m=5QTRs?OGdJ7Lm?YM^P^zcl=H~?V%3z_WY3q)nO>~pl z-^h4!EhS^uoiF6#c=EPYzk;!^JDwKV4gDjvJKbVWTF3!iw6uOFfSBVU9GqQU5?p;L z>Aa<|_wbuYEvvH4A3S%ndYw-HjJTGZ*3+B;FK63c->$0)8M7h<30kujb&WIahWgJ+ z#4`sr&Tx}3unokrC2c%F+!0So!P{FgbHmj? zm^(e>#qDs*pYDeLaH zTs65SO&_^+!NEHzjlD8-^V}mdQ_wk|pCa;C#|1tjPB?NBqz~-^N!`Gr)t|EE7tJib zt0|h!?DrL}m=L3!AeYeRS5dZ~O8yFZjBDVEjmRvrH6>ue+;fY143`6I&C4E*^V>f# z3h=*wIcedzms-3-2bb{;?}%tit-}yIRNd71Ome?A)r0Dgy>+LZTSTpUR)dAT#%Kri znQv%X@bJVw&ISL<_Fdj!v*i}^jw+cW?plP(CO4fC2VxB3Rvmu#t8QLk#xl!bD1d)m z`Fkyd3lIbToR5IyZipeFCkgvY7y035uSrP7W+4IWP~*54Ry(}~bimw~*zP;>YzzqQ zl!lLoG=}L)^0U*@d0$!7-Q>e&B&CvpSOLXo5MlJu0v`jXjN#t- z!=o%oK)`chPP$FjbN1vTvB?a-65P_z9t-vL zx7M5{f6cS5XVJ&Sb3~H2s?8*R@bpKPh%9m4g2|~F*@x=*XyEPskxR4q`udP|1#W=V-=c%P|( z^J9w;8Q9ci>s8Ia5qu8$lXi1<)TrybIYP6rSEI1NZBoy{m~NmgRppZ!v)Pp+@m@qn zV9IrAg_GwGdo;WFsHXQvoKKa;eFWuZreOgJ`adPQs zaQzt0LOnZu#Yy|)$k&ssYjTsp>CUljRusH=g6r+({mJ%IrD_2o(0|hg`|0H~w;@9K zrud`8rmsQX3D83MIjwF>Tlj?qOpAUt-)s~6)Uexzo7kbjheS3uF5rB6#Nq$ty2;yS#YdiAb%L}H$Z0`q# z>GSB!=r;DrI!B;#a=9n>Yr6Y0E6=d$XS??Xyz{NA4H;Wlb{V(-?EV4C`DKZb`f@zW z(qHby-pQyv$Vup8zrXXdQ!x@Ru$ha48CKUk8-Mn;(*{7e*v>K<%zQ_5DH-Ru(~`6R z=YR5LrFy~mI1?@aaVb>pCg%j9JvuM@!OB|d zqedF%o-$9KfC9B);}v0^3n*rQQuL|o?O=(?Q?32%rx7&OtL{Z9XNUq@%m%qj_;B= zRLuHc_p0ZI9>f98ImF!E;lRVt=T!NZ`tCRcJ$Gl@b6T8!qEX*GT~_Sf3vM78*Eu3j z{>@WjRLu<&ha&T%PjJ`e0{HA517hBDncAcBit^kn;_8E%(4O-%clW5*M1&sa>&|^R zt%=dM(L~OyEq|;goL1F?B$fAiUm8dp;KWnf-{NWkz7K7;+lcKxk|(ac|9oI%rA9B@ zH*uDl>mzwf92wg?;BMfgDFaSGGS=#o%hQ(V=}#$V|7tA(gNVW??;FPpYJ&$lH-STP zW;O_ZnVToEAuC3saR}0=&)rplmKyw;_l#jQL={DB$*q3E$KwavNxqn=;Cuv1?N z-FWrhv!WoMm z`(%KAv@?n0S-iMS?SnXs%V&#d33*ZP^qqQnbD_6FWQT+Tv1VewIxW-ChkOxi_BXS?;*Mj z#22TSE+PXmxH)s;Q@`lX6t)XrF?eOT$-6cY>Y?P2~jXc4x$sHYx;K7E0_Ch{v{4 zP`>prJ>DybnsTOZ!?1qhf@@%E0VB=17C?J~RSYN{%?v>+zMh-8PCb3Kujl>^i0!_i zp_%y)5!JGFkQ9Y5SAPPX$T*t500NYdc;J3HGAjMm`%?-n`+O-HG{NgWn3bE1%}i&8 zm~EwP{)7LNU-BY}6YywBHS4G{RO4vm;TQH@^cpaP_-XEoSCi%@p9Een8X}=fUOv=P z4vJT1SLGnpPo&P+K28$o|FHQ+{Xlz$yVTQOf2ccagFGnwehGzXS}IsaK!tMtL}7*% zfu`n8NkJ1WE}dVCh4E$FJJqtU5GLf$rgJ?MGq$x%ARz-HA+RXd^v=W&NWlKK!Hp%o z6;XNY;;xhs3RDO^9Y`=L%siTWnGzm_KdE1k`pxb2r(xNO0sSSp zKgF6HUwqKX+i-1+Q(rWf=4J=>Q1#*!q9I+*=bT`;Jk_GVuaB8p;-_u;Yz)Mpc|(HL&8O(&_c{u!NRH~fBXl9e#DI2@4r{L-w~`N z4t}?PklKE7^mazZ*#j9E-}Il-zt6ZZU41$e@L`V%x&XfP1y>F5N*-LVU$jvL!-qeS zM$x@xVlAfY7j9PnhqmJv`rDH99^5ON%f`LnYmAlm`>K_~MJ=0FG$Z6)H7lg0_+M+t z7&0m*c6*-hZIpoY9A=uGt)}RMY5Ep2#|`;syRyc1!rnr)d=#X!eOE|NMj;vi9LRR5#`nf(BioHNF#y=~sLfO{yrD%T&5 zH{ue7;_mMMJoNc;cfil&W{-Q@0QW20P?;=SE_jb;Jy~s44g?6+N~(jcx>=+|v)*=V zJbx$}lrBAj-RWbqU9&nrUrmps^|APXt78mjMCkU$XKvS$h%-9*7rTGKKvKPk>7|NG zx-w4`{%o&hdi;9JKF@Bem8Rsl=wQBGovvPWHJszSLz`bO{rYhmq7?oHdQ@~Y3Rx!@ zg|y{>W{pjL%@y{)n zRc+#7SPnsv$y7`H8i*x1E>^9lB8cTP45c+NZSqGq{yST(1`%XY@kBuofzwQ2D(&v8 z&Lw#d*IfxdR2tBAy|axdh{?vuCmJkMdm|bJI%$>_dsj6Cb)uCGc%IT_ z*}cH^YK`acNg1ZSYi0cBDi*dP3*~ux$!pj!uPuo&LuIT5;Bu+CI7flWlwomEp#hcZ z%LluehNZ%nu%A_*so6o_?)(o|XC4jZAO8JHQ3**xwjxwQvTrke6Ot_{+h8mqSq5V_ zGZI;{RkCHPgk<0M8M|yVAqIoN*au@9voU`Dc+T^j-}CSN$9>LypY!>g>v~`B>-9b} zpsLVhJDDmF>PijKj1PV>kCOI#pTh9i9#nJ#DSD15Unrwp8O7bOy8bJ8oaUV7Yx?hr_=u^}h_^TP^Nd&+Mc``UFw_#3&Vb+0AJw-wYXK z=+o$S2Pu$oay32O>F8})@85a*PQ(9pqcv^w1 ztGJm$9f4N$R^#~n1mpJdzRVVKca$49keB)Lr2(^&Z@gcxpCY-Z4;pF|odVjc>V!U} z&}Nqqw>Kg*tQ>jJTjiD-O3RY|ErfRzMV9)rfE>~toOLHVQH4_3Sb7X`=+aP8pA1fD zv!$&5%ysSd?Y^G6-HBLxtaV+Mz#NpV_gXIjC;e2|&Te#R-Txf&=RigTcm^ut0umOa-_i3S_bQXsufO7#W zs*#>G*?orlzTipSRDeDk!P=Om-wQY-YTT)zzLS`J4C)08*%6ss;w-O5LF zCJODHcG7u0L_T4_RWwm=s=3m5BF+IE%u#N*SBKNVcMtp+0tcMbunqVHSk}ui=GFhw z#eUuS&~f#&{x_@T?ARDL)3}IG>u%?}KYsqoz5e3R(DX5zqT_R`S07>|&zTl{c=Vjt z=RsJNnF!4OGKvQIM1z=oaHB zPy^V5}`w3pl?w?*}91iBWcQw7RP4nFH zo%<%?!50V3>~qfipGPhR;nQ+C$(rAOthiLjPPDe6LrHP}X@E3mcK3%Cthul(ucg|_ zm&=l~_>N*w#*Q>SsodLj=WMhW=ubh2V$`gC@WJv`SNmJw9ON~5PG-j9RMKXB*9Z2d zM#(=)h8y5+Q4dpm-iv~luiM~4%h6h~B~o8ohi5;ytHa-AoqkHS*N|PgCs49jI^S9S zSnQM3t)MM-EIGYUb^Dqcru$Vm#seJbdqwB?&tp_0b|?FAwAQG$wMNS~FoCN=D$O}f z>JG8--EJz?`A6FL#w&40sF(XYuiyLjTUy7U?52Kjl7z|mPxJ2g_i|v8Mh)WzlTjqz zgC_HioUOtt3(p4-XGM-}!u1C`0hq0evs*~%30HrS`f0%{x;PT`FTTD5z(A=%nzn-#r_y<{K-q=8Wx7g2& zB(dmZykz=IJ>6dW0l@sZv9$F)nCU7OW+ z%t1|dv9vL|=UX`($dYEhsRkW7DwjdsHl8ue7zO+e=}!4?fm0(iENOf(?I2V(a(h#E z@a?&G(zMbCg8LUnq=Q_7nw2(++-JfRvy4QC!$d=$J7yZpR)~(Ehu>d89RFb9x&t?W z8QrFIj@5as2Ty(aU8Z0oGq#6+Q8zucyd2VAI-ni$we;TrIW@Qi3UZR{jyMkyhC78tU<#!ay@5)QJz87-96sNBv>Hn*mPgBOIiOHW>QIWz~8th*lw zdKGqy!yBy$3ptLhz8HPhb=&&+pP<9Gej+z;S!hC$!~#FiBN>ta^U?z^K_cP&8F8@Y z*O>ziP`-}2DRAQ^W;w*~IYumyc*79ap~iFc4yI7DdaSM8C=T&PeE%ayga<5xen|fT zk4D_p({nsNBK|3CdC6ph%_{DS|6S^<(7NMSw}j{atj#x@jT)!*yuha@@tcI3=$?vh z*S6$VLC5hXSCDN}7Hz~q06t%Y@`9t$J51QBYKiKN18&+U!N&)^JqT%Uu>yUz!?kWo z`mVYfl64F#LbpcHnYj~`F^a2I0((3VcGFFj|X%D9Y zk?6XqISU@3;LiCV$cpI7MU!n>(bnle*_topWOYF-5oB@o%~9hFEZ1Ri3q;(9_G@pptfp<}UXh{g7{Dy*7)T2kKL; znMl5tMVn7eW5OEdhji0eP!!;9$R*dOb)Bgt!cJRPV4+lIUtJ1ixznHk|pGwxGb5!2-=J6*dBJ`L*5?U~meOXR=KpN8fZx{3Kw zocmQqoO4m(I$BAoM`A^!u+RF(*uZ|=N7scXUw@e-FZ^Teh?C}mwX;*-b>7ilPh=NzqgwygBsFC71=^O zCZg~Y0kR{O#S%ie&ugbRQ>1stO0T)siD6-^xi8fM6OIz(ZK{_<)(Z2I;oIeLK7-s1 zrI0y8Tk7eb+$>(fM!At|Evq&4fm{~;iT&yhor(VclAZ{IK(k8h)3w<^&295a{EMIt zPaen!t9PIv{5D=b>*h(d$8vM-EbFjbn<(+AugRp}N(Q?a_Cmrmv)!FOer}(m9)lF= zrGH)H8Bdd*uqu#cs|zdDd?4sdo)<&o#w9Lt&A+nizsulrdj!kyMVy<7X&lJC6(xr! zwxGuiWH`L5Yx(&gn`}05)adP0{@O3dcj@Rtb;97mjvw@IAl9?9%#(BRKr7Vsj*n#4 zpmtL-TUQ*mU;Bu2S&87at!9>52~^Q7A0rk#W#e zWidE)b5NQNyjfL$W4BkpIRUDd?30>r^D#}AWh8t@+)9)zxX0tVT$FYbS${w@>%T@ptiH1W!L7NlJ?Mnky__yT;SeQVSs@WnBu1?uML3x#{E2-g%Tt6F$riU~e zzaX`7JXS7O|7D{~+Y{J&mX_!$$Lb%oY4w-lNV(s5b)+2qBA( z2LC;+zEYVqgG)S~DpziDc6|zCWj71$@s-IpSOB-x{~YdkzAP@=^{HVG^ZIGyAKuw} z-aj9dXC2J6S2cDNRsg>HD7$vJ>E*}`{nqTAm;OfUV>yz!J0pXMD#pf^(k3QS-T`;Y z(jW(zx31PX4WC&DAz`|r{Y?qO_uQTM(bg$y9E9k~OM81sdyAl`-IzPU@6fIse{7#v z6#hf$624^)8kNV~@4%WYU+a=+Ga0xHNY<-Y%x&Di39&M#Yx!yJT}N&o_lQ`?Icl>K ze}DV$9W*3|V{=?1(_M}N*e8Irp};{=?WT9|WKD;D0amL4&Ev zdvIB`veMkFk99U7Mwj3%1!*srC8LnS7L(wZ35}#orQAZgqL++=gyxq#!+r9E`yU$N z^v}J_DK!4GN7KldZVTO zTC57Xg&TBn;h{A*x(QlEhcN%}~pD$6<jDOb&!6E6 z!gud>u4wiZx>kFmdC|62`74>{X{tV6x$BuNu3>E2PJ#`y1I&g0Qf zQse_0ezEU#RGRFmmEgMxLhSW*f1|UILwdpNV(}J51R5z&5h_Q2q5gyY29^}xD0^|T-uQ;B^0Wx@OWGFRC>apA z55-c7IFCoiO1wuZsAMmT#?(HFQLf!%23<;u~n6NHBtxhpLTIeXM0vt6i-Sm9T z*Av2ZN9J>IdMnPnUztc;u6|gq@~*}T;}ugalURFD+9G2~#8-;O8^5vX@n35GRuAGv z(Mjk-FPD%pYd1pg(DhDi zOX0RYsRdQ&*~3&v&L_GnM}+g8UbpNg2_F13k+dTVxdWKYklPnLTyah6ibTqn_` z4ohFpZgQ(7o+V;&gwR{NSG(TSs9!E`L)m?`#ot)o-KYOzzs#n)8W1RlVU2sNoFGIP z9bV5NUR|*qTgHz%_ys4*Ut*|4J95#n+hioyip#2q#=&TJ!RUSk$c65IYq$1$W|O(t zHF`%sg4q&Q=5q;q{GvD2AE(M?LE%Pv$3H#U7fj!1HtnJ5*|j#mXEv0X@cPZTeM ze4#{U;PA6gwWVmUbf=c#@l0SZ1bm>72)ZU(B6D05<;1nKoE0B(!~C!WQ@B$J?x!`% zCK#&ZkM=$V#Lc5MN1bv>02nvl)5TF?QF)O*nyqA7q%KP5#eo#VtJC@vCRrmkK!l~ulXh%7cr~Xv5@uluSB1OmPx1=}oOJcn`YTPDM0@Kx%$u za<}LT1i$UKVx7fk@b|nuod|u43_mig1zy0`tZa!OsL;Rqg;Xd<*BqJl-z-nGPJY_W zoV^*_wIjd;Z)uD4D-3eAg4osbp>9Lf;_^x%Q$3M%yJVB;ghnQ0{wMQC3?-%Ufu*-A zFD0xK9a`G;Qo%N@zr|p@)SgEY-szd#-N4oF3f&&Fw zQpQCcJXem|NSnK|M^0m72TY*W`vjpyziK%~A;HkJ)6D=g%$#t>ERX*0Uj2W!?=g?< zQjsjm+^!Dz{l8weY&4W!iuO}TDrrus(x4mgiILxx;;v;QxD=vaGBI&2C79al52i9n z2V}bKd{6Ljw=eoKw!WvAlPB{`nMtSd3y*$(&u2$9_A6IAVks4#X-Z}w$|8VL)>f&} z?;)gd#ob1#$1QKr|5=$cH$*NFF?4P0{leF;s4oB_g(>5H{%6AG805PCd98TTwX~j6 zXA?WT{5PMN3ktMHsw_c%32E+O>&NGx{+W`+T)Bj>VPiiK7)z@|0QD$p?;eDn#I4Mm zjs&iSI)>is5@4i_qPK2x#j-86jeGa@`jvaS54e0{Pg$}TjUh($g!XfITKU)+|1#OQ zF#VDb*s}sEtEdbpJrLjMT)gS_qb2WX0s5NAE5+=J^FL&z%|Xw>-tE1l&MpC$92|5! z`zUA2#tWpOHNZJ|TjrB4`#R{dWg_iz639bRpKVWW5xO8WSBkaPyY;|E!suX-OI@vI z%aCzxPxL{0Y+%_(zKkV>1(*GKgt1utbF{$?(pC2Eq+Q|GCh@E8vCfUXW}a57gpHo; zT=%;r(rm6Pjzhu+nv4+5G?$Zn1{n)&xNfv<@2mc$q{nz2>CA_P z(sLRij~-DfYsQlmh&y&ZQms7*hSW}Ag=^|_y(=Ufn;l1Usc})YNSVtlUeVX8cWV7Q zD%+2q7=_IKFSE(jP|KNWG-DP?V!WJaZg_g|8}me1HdL*E#gJ(gy-DFm7RA$(B3?fz zaQzXsN9la0oEe%Tb9|jy%AoJM@7;}vElSWX9?_H&SJf^ZUpZ>MC`Wl3o_6$wT#=O! z$XW|RB{0V8GxR7vGL-2;9&Nd&Wzu0oz4&cT3ZZ?;bk+3{;b3W}lE)s#oRr99z$p!ap6gn^X0Yz1s=(OipI6)K-?)vo;12Y9K85G z#-efpOBm{D#xs0Nk6W~-e$|NNrmv6kpJa8vk0adnFFqM1hdX+t`eLsp?obwO_^qO< zHpjauvND&zMiS<~R#U2-Fz)KJ6{Xi;M#`swuoNy{W}ED-Teyo(w>lJT48bSFuzVE* z;kc*7pFop?qdF(I18)HWA0OFf%Wxl7Zs%fH{(k3WcI7qK^(rss)7BL+kv~O9ud%{r zCQ8-&^F~H8o9d%J2wMQ*wfMb2A?+BIo%ac+SHt-w4?xcuTUYV_rXaJ*a1@2yLv(`8K7Ej zyvnn`^yKF>v`cYP_ujQ*0mpA2G%Em84nvVZ_+lab`SY-R^7TsbY>BU_> z=SyNY(uzkg`W^U3LHZL?QHh4eWY2if7s0JvpQ~7(XJ9+=2l8Tsm^CrLV`ZSL;5<|K z&x^X2juq)~fy;tFZK?DVN`#3It1p zFy2t-j$V3E^yu1$?OQx>o!(dYDD@UObxx>7c(VV;`gr=(t@jAD zj<}TklII8z8ud2Lhu%6P)N~p6n92%VW%V@@O)hTcNJWsu)S|Pu<%W=#h+)^0?%ITU zQzDRj7pHqb{qbuG>u9IFXh~P~v?OOgS)P9?nbHS3na33(S@Nq?bv8daiecX&H?-6` zYj!R>acticCcEGa-%yPwcdRsrhCVvvoa5}@wNh^d8>yVb2$E&s%7PxXC1i&Y*sEO; zT26XVt6$R?HWMtQrn7N6BdzMQXyfI&%v@4Q4YQVoF0k{O={p_O5f&?&!|uE-ta~W- zX$W3)3*A_@Mh(fD`K(#J`^{^e6EHK92hH|Q%x*1)K0^P!6S zaLDG?lQ-}W1Q{6aF`|B7$P_eii7*oz{NaEFZcI&q%%@UqKT01O*n=)N$~Ruyv6>b# zkIonDMS%7w{wLgwmWK2aqqu_0yC5?E9GmigT`nrp;FRl-rCYI3j1ss-&3-0tTNAYF8%L|2T#N zfc6oS4+Gh@E4h+Qgdi~r{&G8xEw>AtoSb$zvmhpiFTIokl6Jx5)#xARHXm==GctyMuRKz81(Ywlk2&ka?RM^T4*5Peg!w73 zE3?XrYDMTe&I`f>4)*$pY06@3fHIK;Oc? z9wptsnJ7FC?P=A1dO~JFc$BMpa~uLuggu)b)7b{i<-luu$+@&oz&W993&LRtnO73I zD2K1Uw0B>r#b1;Er)cvZQdEbWLpAd68YsRO!YrunP1|v`WbaKb5;xw)g=-;Kz3b2I zhHrnWKZG)F`7CHTe{`POHF-Q;Oy1UO{FB6P|6r))D`pwDE+s0BwDhi1))&lU#}%NT zSz9IAn(Ww0(D8GB>sFJC@m(Mgs|53It{GwyfC~uLfyJ^rN5Ui&Q_Qnf?xXsD_5Ggz zUFm&CQm#>>NFgsgVHFl(gsjiBS$6{L^VZR{>UsmeBpuvWi zVIXN>J7v7uJ&@NT6||pu?i~xRD@$*x)6VCK#pQy9HqR!LWxsE*O<{vM&rvzu}hO^I*!guic5;@`;Bv{p0Q zPIxS0_*W&BlzAj5h|`+`S*kr5c}_aFQ-mDau{0&LKue=qCKPx1=auGfp)RN#$FK*x z5fpMiNwP{YnwOkJkb;65j}FvsGtA)Z?toBp$A21<#}U+bPaeNEv8x_3{1SR|h~_*X zA1||sfLkVQ*}y@APGMpti^3>@3nZ4(;8>K-^^)LZ5>KH*DY-oc|{1X)CX6~D@I&aF{8Q`cA*l1D&^ z#=Zb~#Y2L6-cNvtJgTE>$Ddy8d}lgqvfC1FZU&iXwS^?0V2|huOXVX?+(#RAXpVEX z7Ndnj3?Sfu=U5}m|4rDjpud9-*5*#$ViC>QQH!IhfN<~o#MCZt*8|$!!((Jb&{fRt zQpIMFEo21F{yt5$8lS!etvpc|e*W!W5NakZFI?Z*gH-sscvZZJRvx{=vlh|#_$k_#R< z4AM|G`PO=sB1P_PeRDv2v0SN7r+fx*)!0qg3aOgVQA6@?+4?Pon~wh=XHt*P!VJoL zubZ9Ox#!=~C2$|vVxu3Y@U9j1amy9=8H>rB z>TuH^>DrbsqAK#V<|*p}@Yu1nebWnFpIL@-=YgZwzaEQLzK3b)XWfCiJIb`pKM%HB zZ=IUw-dU9U)=u6GGc*SwKTmAOn-Lvj-Hlbs?G%(vFF17@y%Tf}ubG(gJ%g5{-d_It z#@#J{MJ&M_aj&JM#%`WbQ!QE+yQU_)5h^<^)VYyUQ7}qj!2k1yK1{NiFZbVnm;F;g zcTH25k{b&$SbNo_5-f)0K4Q*zWI^ADUzaQ2zNkHwWSe!VK4Zi$B>xXBJmp9z_>y-{ z(e(bxble7CaPOh8vO!pOHDJj9=I$rBFJYqlyXD8x`LBrs^gi&#tRD8n#-3*3JEY}# zYvuVrspWf1J?ow>Ut^gXUQPu#W}=N5v#7XO#-gV(hwFGkzAJ-3GpCO#2o#LEG5kRs z(99-i^k(N{&0nD;zR>%-1?i0KOD&TC^cQxtPR6>*cxA^7%R|0S-6I+R?R)b}vR84{2A`7ZWPxUGGrNhMMZGl%*#<365Y{K|9**Zm9KQ zr<{I6*(et%W_E;HVIUmhN|^Zl%AI*JOSp-Dr3ZGFVIX&{sonhC9!Z5j3Y-XB|Bd4r zX;R9$vew2l{R9a2-!SgNCPMg}>kbyAyNGN!{t1C4pVo4bLsDyMf3R=U&UT?3{$Ps?7t|@PG zq_7}$WU%qv6KM(bGJ}|*>t`D}z*0Hu%@%W_WdL967A-{F9Ow*j{WzkDWaMj`3{8*y zE`3>HZ?E}ffvx&Aw*8G%wul?vW7@!+N4U$ax|A2-^^_OjrPMkVoDO_+eXquQZi ze7KeXaeEk+?WYRXDwW;z(}Q{2Dj@trrmcXC*NYvM<$ zuR*KJV@hr)-^HE|D7KqbmeklgPzTRi{hsob@A=nDrFNJ<+`h}Hc~KLxxtkc$7j`>G zLz`zv!=}}z(9KcmS~#}=Am!_yHWiU!b9VJgb+JR6#neI6|M zx77ud+ljwEt8pN8w%q-q_IH2q0tdK$s>z~Z@daK^YwLo~P<+_5bU2(6c6KYX-2#`k z7?NEU_`CFNX_9woq`R(VZv?#iv#gxT%ld1M?)sn3Qw$ny`_V2*S?P|Hxu|0^tAu%A#&-`=@c$s(>=mQnHNzU=0bLmnZXxM zRcl(PR)*cu{M9?Q{Y~hZiV@j%qjQLPLcUzUe!xP_ltM>BhP(cHGDD`edIVRL zoU}-0Vgr#zK1Uj8f|}ybat^PB$F#+a>pd2!ZvQByh95^bw_b+6mlZ#{GrBlW@6+># zW<95Iy!z^`z~!8x?)IBhtCV|Dt5~Z|+NKB230B{Tg;!rIbPj~3F%-&oVx$?C;>TA3 zf%7i`QS&d33S0+1AD7Ag)k@H%viaK|JJk~(ba>GJ+-O9t=y1ReuLdl%hAmlVh5LS? zlK9xnw_);?DL+J{a%v_6+-ghRNAx`xb=P|WA_{13k|8o^aBP&{aGm{6Lz7EK;wP>F zWEm(*WP3}9%7>1E>3K5Gh$e?;!WgRKgx#4^N)I==y8$|9wyg%Y+_tq4`D)hEI*oZ; z;8*tTAPYBA&kD&W2jtjZ#ZQgMo=~|Mri-jJ=RKtX44p`R*Wa>>@%uKlrsu3Mp_3~~R5AZ9|W`$fKJYKOw-OBM$DY|Ke5v)O9S1D<+YYCdh7zhvmi z{3!eJ09e)OGT~8ehu&T5QHfC>McS?GadQ;|z@SnaiEGX#FD}(X$UUv~-ckyEjXx3Q zg4vv`8fA}snq{2TOBZgO}I8>xT!fB z!oLBqpq77>F}z>@Q$}Y*V|0Mla3(Mp8|hetq+-bdn_A;*RP2t%Q0d@DJLghZ{_5BiAQ4Dt9)`tVFpBLbDy zd^MB_-XnLcF1DWmqWKKY2D~|qkfs;@9Qr28CAR!*t~^6DwoR0=(y`*|Ec<3CRWGSi zV184YE*dBAkVl=Hv^g*=pDrce?!sR6;uE$`Qt@DcHy6KLE0)KE=C)NR1T_Wo@qr}> zzv#`x>Gpx_l&lS0AsT77(17GOYIlMPft9h)THEKE0UUjR_sE90YmNVke#p zWrpQsu_Z#ky}8dNyF%AD_O3n2SR5w+0OOxM27p#URb{YKungHi`^sIM313cag$q7E z*c0#!Z|7Z3Tg~j3UNEpTJ(d|l_8oKHvmM#&b-Y6skzj^bNOL1@E`N0O?V#rF>q}EK zZ$ZC*%g*p|1y%m&&wNxZeQbqd{z&0EU6z3pH?nWe*yt0z+ag#t!e<+nmwcza5zol@ zqr4%4bL<|~iPn+Ex6?o7Og@3>OWysQH;$A*NUQ!8y|VN3vHX6`|8iBXvg)MP*BxF7 zWeSjj8<4tFYF6(O_h~eElfW$ zCfnZd^|sruW~%e7?Id!Jg-V7nP6{J4WHU0xO~erf7#^ z*_MYwzH(dA<%Q~a`l6sCG5VmISa1cFrgJSkh=GdMson?K~y1CKab z(e=S51m=J?!;T^f96@3tcD~m9$P!QG@A-B!L*-cxO%HZQl$G8R;j}pWLEAjtXAk3Gi)&W9-9gG+P}TYVToJ zqv}Hq^Qq}T|LzZ_RwiPbPXrfUqH=WA=BwYQXtd1VB_C(M)yCbJO&RCi9?;-exoRgy zX5HSH8v8MloDaS?l-hhhuw<-guhWC}^<;#5{Hy)=!cOt|LwkR@2u*R3u8az-wyB$z z3H8zJW!L3;U}xM?eBriuGGKlw$5zE_H#TN;BoI?1P*G-fIXGte?d`xQn`zz0KZ9UutfSD&~pt39AIaLce*lK zZXg%4$)=srn34lOI>fds*V!P`?wQDMPXh+%cIJ;C6)~@xFHn!h4Z2UO&^v~ zRn~0EMa)0G;^TTU)>jZN3m6Dut5=xBEGsY{n8sm$P^xK|pi3#^+GkBB{xOa+?sIdk z+Y#7vx%ndSbm6s+f8bV*pDpnOg^`G0OD+^;9l250%&L>5N=3az)SioU`mAphIIMyBAIktz3i%n1^dOk2y# z`1*kIlR+_u$+CEPtoUeJzUb2hoJM$h=4^pBSWZzi%uJP&`~i`oRiEHuQ;yv9_m+Am zh_5zGX?(cZIsO$#^cMs+oLnnQe^$03sxRAj`~nG9Nl;{LEVhf^z-PzgM^@K$hI9ly z)iOfbL#EQ;ozbDbk(3i7k8mJ`Q=LTn0^B4?(Eicx?s+Kujfy9NZ?+GmB8gWe+g{OOh?FRME$do}NZP*dRQY9X?HtpvGSxYPkzksw;T%4m|1ADY zg6O+kNI!&S$Mt8}=fduASB6t6JJefvZibps%fDzgb0Q+G6&<(Ch@+EW(Yj&~!u77K zy^N0zGS`WiHv?ya4JNW0!nxJ&Mhlyu9#}r`Mko%iCFLpexR=?YB+J#i; zN$auaUcjF?BB=lwjY^in(FC|Xx8p5~?DQY;EDfRCqvpDjQZV76;ETCpvsDwgW*FD2 zcR}f`;^ktW$jK6G*s)aHVX*tNRJE&tj4jF52g4~b+&i(V%aCSX@FQ(4BdyOeC|>x! z4ie^rUQ4X3D>%80aZ^7Cr8Dl=w+}WZ1f3DWIk|u66*bU-ziPISeP5$oaP)izBCQ{0 z+OyhU8!eVd0P3W<{=oChVsS9}oR4GnKK0xf^|RGOV|}20-TYdXUFP{PyWaH5_ut=G z^;7W&#Q;}$X6Il+6Fakz5)l9?iW8N6;+{@b_eKr`TuYeQf)L%rw+pWy1Pc@ z@iRdZ%cKh;Q70Yzep#5BxI@li@62gADNUcA$f092y|>XC(QO$XfDI5GvP~E>6G>0P z5C7vku#K%&BY4tKH0Og`i3+s|YSCRdiLE+T`{2Bkx#^o(N1KxfE}x&wl@MhV$oGAH z4Cj+V@R`uON1a+B#Zbr-2lgp}MhhpF+NE5{-L)m-lD;)1TOI}?58mw$a!~NQccN#Q zzKXyXMrH?uhq7>1htE?=fuE7hKf*kwg1*KgL{~=0j+|j~%T7(lYMFJ+(SBoctKW4a z=BRgibm0H>)c=3&Jm})KrdZt^LK{C8T8>gS$I~Y(F_Ggs>v&S>iO*RjcyibI!bBYl z*j8#jgpKmggTxwedK)ygS{^F&Ol=q+y+Z%Agai4MM8^?@y2fmK@#h(XwJ}ciL~R?a zAGRq*9Q)#5l%#5>&lzGCnM^XGXidufTYO;2BEDrqxL=QE1ZaPNWn-Br z0q2aCyi4iv+ZmGs3Ub8BOsa-DEh$pEm$iQ0^N!Dy;)9FLb)B!^f4$!Vzf9=wwe~Y_ zpcNPq6EQNf%nMa{j56S=ncz+7`ofV?N=yH6yTkG8&avmC1_KJHcKY$?q5sivA8Ek4 zeE0d1wuj5#-UL>|(!-pNv8h_XZ-#dhXE|Jcb+qgKeIBm*Rhuv;8^@`NG%Sf-^RWpLLP>Gn!$rDIQ0vYtSC zkqAZ2O)vBWnr4gK^26NlP~JM3nyy-B)4!(~mnR?86++L8GVYoQqgJKp?1Ki!A12I> z6BH!WKLd-ii`ej{{{&WBGs>5y?|v8T^&Y9kOH=(kl+h(gbyfidrpGI}nJ)y})+$C! z3Xn-}z!tyi_6_Im4ffJ2F!4^$RGgh1e;KWM5*~w?a!^V9K$S&b&gipRosFnLHc@%2 zmfqNM-BW@dUz?XSPVavotRk68QuQp?%5?^Pn7`vqyV88^8OgW~{9;tFhY7@J!{TG+ z+=h1k`UVrSb^-R`ZYkfz7D;T^%@msYYvH-UagTdq=zpVKnRj+2a#Aw3#H()jLWUBD zOI>l$tiwM%k-f9-Ay@Y0(T`txxkQ(yZczYdn-5nQ>JcWc3w>1^jB}h_S^8dUMLm7M zPF&AC?+Iy*vY;ykdMtIFLtkTTr$03P0u~1Etg#2IKait5J^`DkgeTCermDuVmDSxx zZPiS6Jh#*Hz}lr%uO)(?>SdhiVw*?l1vbXbo%^%EiQR^>iArp})0T`6hd06j`+*5M zw6xx!+|y3KgW8;PuuOax;9e!}iX6J8R}TX*c>$XfxvkTXH=0~il7?UKHuDj&&Xb%i zD;nz)EI&(j(v`0e`wPX;f1M3Q)8D zbX?>$Ty0~F^m`Q4gC*MM)lWw!6Q+DBe5{AdT}7&0U>bGuGl~y_r+4%Kur}K5*;iop*Bc`N^jfmu7`yoZy3q9}Cs$veA$(#k)_jYot9dgE81Sv@uzFh4qavQ9I0qNVe}@=at^*Q-h2t>nipe#Wl%uN#Nt$^gHgMhCVB9A3fOp{GXvtxwYY z+y|T|0Z(-ufNM0M#^L2Hb?wJaod$&~12CK9JlUct^Ml?GRSB0NVo&|Sa zM0<`vn`ctJMATQ({pda&#IYj1fr)NJXTU{mjFFcRh}DMk>6Z}jOetLRTltWVc|IPL zXLie1WWlb1jM=irIpP;o2P~}83J<6O2$P`y!h|8S8=h;Aw4IAA_7Z`UoU z{wK(t&Wmcktl*8G3gP+AwXHPyJ}e2?(@f1D?R-sa-VP0U+)448)x`pdGaJJunWVo_ zZ8c@Eo9Ha&<-=@JhEJLO{VRcErxV_+SfwYWB0VAc2AQ3f!U-b_ZInaAbAI*HRQSVv z^eg=41G_9?9DW5g)>`dWM5+2J+k8Kqx~)I)8he;PToXk~p}%k>it-b_V^FKau2Pv? z;^1RGdqm(V4>DO(XE&YZndKE(uGJa*{vCzkGy6SF`rmphYs2rPb@ak(T68hrc}QML zVbp#_EwLbhuI%87T4`+H(T)yG3k7Uu3o{<_pKzJ3b2VWU)E_D|i zxIjAb04I>Lt*6h?KS%}?g>`cAFh8N%rn6nGqJv*kBISX;e!dC|^Vq#;zqtnEuyY+N z9ZyuH82ee2SXa~&M~jaDGCp@aF`=2R#TV7>_Z2~K^t1y+B z^`MMv53k2)-Rp*6)~oB% zEePdX{9X!63rKGC+@_>)*Cd^5PYQ}@qqX3Zt4nA(4WG&v!@FO}3rJzkkA9ut;&FFO zx~ATNeYiXYb6uKC= z*Wp~{me12B5bEojc*@Oa;=K6IQf=QkyntM+%D|{=4E9jmt4BK3A@nD&&zzE58@;FZ zV5iq*yxkXO;n{e@OQa341!vXs2dE!pu@fFMe~NQ1yi$|3unj1Co|(7!^0sM{MV zw;sc65<3D4L2%}di27ZP;!O2-9huq>gdbHm>Y=!&b~LoEG*-BiJ-HZgLE=BvJxYzW zsGtqHlIU6KD1&ANabO&%KS$`|xqRK>NmyDOaSGp;5lCZNM?u|X0&WwZ6slafRBR^r zC?tXOm8^X@Dcg1F6Zzimh~srHPqj;k7GRpc0V3MRBM-46x*Dz|Z;R@l9E;KMzTAOb ze3VSoS8AD{Md)x8W|I#t4VG*YNW7ABsEd6O^KXRw9N&qy0( zH?Z5-f>!O5&-+j44mJ|IpVpU?7Ru677kj{FjUVZ5gwi((FcGnfx}@$ z^w5@D`?aYn2+3D@Tkhk|*NR<}V6^44zKjv zo>IVxlrhil)k?khF2!`hPO=-99`9`@Fy|QYKEGEr);qo-bV-8~gh{3EPGwMo&ZT&+;+B8)4icIg0C zy$kZr^Z8$08l1XA2Dk{eT-x^bnaXeqW)O8F z;I)E$mK`6goSSRuf5EMy{-qJ>+^n}=lMDXzYB#$tT~r?KL10RZPtD` zwUF=O^c%SgGBaPZ%OYImZl0OLe|WK_s;F{xt6c0+tIY@HWZEgi0JPEyIkpX#aZ%mMF6$F0PchlIpHSF47;QGvxWEFxrl;OaSC!^ z_-y3c+$7$+vkTt1D-fG?(WCt41FR zX?Uc_7}FRH4=5@;I{E9SD|ouO%Z}h@O(_*e_+eN^1LHDvU7ACx;ISTU_uE9J1HQ!@ zsNV8lo8z7QVegOcEX28R5BHENgIPgQYFiayVVw-qQGf2304>PmES*w5Po$ z*j1A@Q+&9S)wX+VIFsH>|IX(+2iuyWNB|r557(ZL#to$8Yo?|T-g{2(?|i1 z4JM~9oBMdUHR|K1>>Sb+8y(nOZjvW<>*oAD(tFi(m#wpct0`GpH&UE5*gMh!Uqfm& z9>jD`YD6FO#g7~?V|BIA4^>40)g#g@=U+;2Sl8>DD&yqWFN{xJ4bLi!h3llE7JRNr z^rQ-KC4nP|%ENGlgJ-H>P_eH>B-g`=-10To?4nQznW+PgP3V*CUx*`F*azIOlCH;? zTZr%9J-vNdAj_+CTXDUifIl#!EZ0ig%NCW+<3xc^QQ#W)6dSrQaxE|}?9n>P$iP6p)S6x&icwopP2i3nmDYDEcs2+N8qY5!s~;Z%ug8DVeL1YS z$sskh#IoR02~Lx|40O##i2w$&4j_UA<>0CfMWcBS9WE+pxvfXX&z26eS5bVz4gJ{F zRb9T~Cf-iJCr4~S*CO^-PL6`H_SlLW*At){RQf4Q~1Z(fYga$x6d6r%>k^NF6T;kTuONyfi5N7;Av;mT;p99zZ;FTsCLH!dn>geT2(8$Pe+=NVu1{R#BvreRa_gC}MiumNPx zG+jdMD;V1VEjcPiuGo8qXDxd z|6`~C69bQKpDe3n68);$Z0ais35aIkP*olcHrEvg0hMIQ17tJ7EVeovSaip4lm zvo&v^_XA87LA+#MdQV*}%W@LCv|R5XLRK`enT&R1~+6V|LAB3)fTJG-QMN72=Lb|_{2Sv7Cw+dj|gTpq&Axl^Kp1D_=1Y-oXl&_G{Uo&>8yRU93 zt(F|+YAIT%)O5BJZtN-gwz$YItvRInbujV8$x2FjxU!lCZQ2TEMEd>Is5nR4WvHtA z8-mgt3@ep4`kF0QnosKVX~(`FQGU#&4TMvHt``pZEq89B-Kx0-#Ac7u*Z&^JX9%xZ z1Yhh4eIjw7qQ_XhBFj5bS{9!*^SLgLE}vTkTB%3eodD?4pP!-zW9a>93{-iG{X@%yd#3DtH&gEFmk!+_9~NFWHd9}GA`nS05%g%A zZ?b{(s>e-=6>mI$F9}>;>QLK*ce9t^72Z1Yo_;8y%gFCamx#2=zZdHp-$iFEU$86i zl=RhQJSt81K$bYflB5$wW)uv50RQr<7xR33cDpg|^u(d<`VdX3;fd8}RS|xkf1j6m z3eJmeceaCjg3^I)Ia2A{_lVv*9G1;hIpA1%u~ed}7kVxY%@F4B$?uw5qpyFtZYwMK zsN0VRE^8Gl8>$spzd>_9Jf!_=P$ZK}o$6c`Z(}K^bliyK0n@1Qe3B1*MFA>)rXP)c znduSqc-W?(#p5yx(`oC66qakd#cD_h2JPwgy>kMeab;@lsMpJT2{&y&>GD@|e@+QF zgA5Uh$4XyrRL8zF{^tAygZpg~Am|=q2n$mbc}V%DIU;oXrDIBm{2bha_+bmmq9yWT zv0`%Xw|@Dgl=QJNd$pzMary;}L&pKMw|G^jqXi^E|1zR&Ewi?JtbnIP7uWn2y|G9ywel!W3 zwQRE6N`FBRS1eQj-ILk8(kwR5)S(ggxBy;f3gOD6S{9BERKrHvp!%Q zUk_84S7kCmluDgjHQbQgNP3)`4FINXqiv~@-6-$uKGF``c~HP;Ecc<&?&o3V(;Koo zwGOC_IeF(!Y{w=#G$&hFgf3)?Kfp;y%R5<2~6Vo0qD?gs^EfQ4L6MW6|Ypjml@@ykAFCxSzKwI z!mn;Twe99YJccqm}1+ZMp2eD`zsj#%F*nt3xXAVl6azxu#q=Y(i5w}Ll27w+fQWJwfJll`KBs7YEB{b-|QwX7*EfG;UzU`JA0{SrWs0r;nb zB*7HP+>Rm0NEX~ry=sZ0bfg(?dHFx`WDzSLbrwnE7Na^!(O+n4045o%>-PvXcmYtY zOs_lUN^&KmC&w7^9;!V+f$Y$D=z7M=(8RI`;}y%)zDz{7Sx_O$qa?&H!C1`0O}4wf z=MLKfueYWFqEKY242KB@IHl7+0aazFk-eOU=d8*^bl3Mzm!8{U=63^ESI z!XDyE49+Epz9Ni#Nb5Sh2fH%I+Lp>X#2KGrl^$Sj<=$UgD1(oFcM(WUb{kLmGv9$_fsrZ7)~< zp88DGfYAk%56g7Zswr=$DruO*ZL&VTMVg0W7!)pN?Xfl!UWUaR3Jo7$?2ktWFCKX< zX`gGF*|GOWbd%$r4c}56+d+~JZ)uN(9&2lKn(x3edH`i=^&8PIOB^)~FU*(KSnBw+ zSqpx=2}uSy-Q-!dSAJkzGGV*X@}UTsdS=+Rj4`a_+UX8HEv&JPIpQTbUe=EsjI+5= zpI*4f8mjIJK+Bdp9%s1Jd7P)$h`6SG)bz9tJ~Vz=)O;Ej)F&D+7z#W<9&q*sc&I)FC#rx*+IB~Db?#N9BeEuQM$n|L7Q@t0T29wH{k<^VdxF|PFnq78w zobIxRfDd!9kSQS7DkHD>zO4M<<$CKDFsKm(WM;%`g+x`;K02q0%+UW zqt`T>`}?L$)<@(i)iGG)`S;{{zdvEg^c6Zd$5Q}4p!FA}RgUs1EXJW_l=ZP&x*q3J zC=fBy{s|rM?-Wqx!;RoL#xhTWwj;tBaWxgcFQ$ z&X+PO=krhbM{gw}wbD$7z28Q!oOK1Z)n*ID=`x(M^eC}dA(Wnqtp4i42{sY>8l0|t{tG>sg`b!oXR!Wz84>^KC|@e+0uz^g7X?BJa$I7)|3yv`Y?yDL@La5T(_mH!g1#5fqVeWEVVCTTGt#k?9b8D6DeSPdGSbnI0?^D`^pI{VsZ7jtwvsA#0YW>a|KD zd!9(bAuh_<^M6LBQaM-HAO$$AFA1n9x~A|OeT$v}Xo}#&&m^gy9#1a^c-4h30z!Ve zK-3$RYt_Ff2919N2&HGiTkiQhMKg3dtffUuZcN+Gc;mXO++7GMcpz0RB=^L*ZL*$4 z3wdX@3O?9y5w)NR3`O9rY){!UD`*q_Sb)Rt$Z7;#rl*_*|sYsOv*F!vX zS2>4yDT*jNwTY0(w2Jvay?((S0eF~uu0F%mPx@c=?Hp0Z`8_JEtc9upH@UN1(dzQh zUzspE&I`p=q_3@-&E}F3m~3lvp3KkV0v5iKz8aG|$HohR7al0`zRMQ976T3S`R*c> z$dj2Q$!s>5Vwmq{iqK-B{Dks`ozX`sV7w&4k(h@{^^p}m)_%_jr!(`RJZlb2Lb9Wr_y#V_jrY>LH?bio-l=Ij z-R-s4yR9r(8{m1%*_5`JDRkJ&cH<-~+JGL;ACR}NmL*~VpT%q?fjL2G@z3IpvG-#X zzq`_IIv$mbktDm^mD(li3%!00;46FZlgc!!e`wgV?1`YVVD&zlY^|B6>*5>q&z(VE`Lk+14CK5zRY7~_W0+8(CdvrB3 zEP>^xUG&3;_CCGJ?u}c|7rS8oPjZu*kfvOL1vBqF-M+X8S*qCbSST!IU}Nv1M9^0i zbEmiXr$cX4M>9>IHM(CNpSXtXD66p+0W?2!{}la{liflix0F(lpmC54ihzdA5WE<~ zOese5;vpI{Yp;Vuve!kY)!o!GT|!e}52p_sbw1A^fDB6mRlNT8kLKrU3f2otH~#8V zSE_2+TYHwu8ml&EAbrB-(Zl;y!9SrGGQa#ww^Gp=*c%AP!l%yXOFkM9{XHCQw2CHK zBR32m^6O^MV5=dnXefR=cssk`bE-w=2b(Z3-(>@V%f-Loav9Di%9Hgsfe6G4SiJF4 z;$y2}u3)#5kFRF8(ol_0^+<8EAFsY~22+uN4D3fcRzdhH};5nO}OOBcT8!wO`&cqX= zwq-(_Wn2gkspj-i)XkeNt~)bZ_5N;MQGa`>eA}?U!Pq8s(TK^FE0+QJFh_2!QG01> zdp2&zV*EWd=gw8tYsj~lWcYBm>R_YsQgwFiWa>8$B7eT_2W$J2i9+Rr%B}`!df zalPl#a&^AObmwzaQ)l#gDtWwm;`Bp9@Xr>D7~I+KIFl$U(9<%Y>*L)xZ3JgbJdN|`o6jzo?iRT%G=i})EmtXw=urj{sqhcBZF@Ch=xYx~1OgpV%rX(p`6xF>zIZ_9sNB+Td2 zOi=WGY=6b%W*tkESV>D~Il#;7zshI>=Af^rx8pr!*h=V&L3RT9Kj7e4+1!&bJG#zd z&$We@yu&Z@xB+zs zbb<|#X(1H+;3Mh?yTG%dW4B!(6ghjp&Tef#BCz{PthkKwtbpp%n7de!_`5-M!blIM zf!kQfuM@+f3UbuPatYDl<%ctYq_ndr+6kH`osN%hvayTlI0L(&~eMMm|2L z2xx5H*y-}hSM=mORg-8jV^L{#;VZx|ivX4h?_&)hHoKcYduD3F%fW_J7wHS>CD$HqJs(FinExEQ>lzUTC?2Dk48*Z#QYkz@ zI;AJA)A8!MHWF(EQi%y7PY_{HH=-e3S)|G6C2=arl*)MCuuBN0S0LblS)d8QCHuQB+t_v&AzpATTpC}+L*SHWs5F7?pxmJ3y8rJcmQ z?$pVk`l+4ab_RKMK_*M~E{>?>K!vlh6&v9yS>+;#_`3LX49Dcgl=HNE|Igud$t~(8 z|4Lj#)irwq0r+CK*VM)^{SB6_>*#)_V4YRh4`ryv{rOt3By)I=3#Ulb0-J9h)7D7O zWzC^JI~5H!`x7eR%gtLRr(NG-KHk(U_M3jY%fG57?VB;t_^1@(RTAsNf_Led$Aqnc zm?66&oAbd$frl#U_<8));gbGVO14LE9{q-98$XrhRghUUBsw!T(5_`IzQT;B!%imI zz-E1GG*g8DeNGWg%!F3Go5#jSFtzIhG5#sRB#Qj_GVi7SvH8?8Vr4@GjXEiH`VH`x z82GC$sQ6K$ykD-1B1)W@D-e%!K}ltm_V=B= zdSW*9N^Rcu^m%NCWtWYc1#2rfCe=u1iaV+uoMoMSuP^cO!d@3p>71Olha6R&28~94 z$oh_gjHHgwNrUQApK2>Jqw`%HDasyNnqa|*V$Nsx5QuGuyJt6;uz8@wr74A|=#>~B zfyBcIT}G2;eo;@DY5>#~rdX-!IJteV=C?^#bXBF-9`DxxuVoHQ%eLn2 z&P=21e(Qkv8#woN?Or2|y)P-c!Co=)9dHG%sQho_q50=-VL+Ykz3p^-ce) zg|)hD#5liOfTJl=(PTcd_S)L(%T%&4=qrY6n|3ry=|=oytLQ`DyuG_ybg-k9uKMU@ zI+VW6u$h9_V|C8KP?)Ohtm8MV;qFt{w+{_L`>mqmUDF@`O&qFA{*@-RSl-gOy0v33 zLCUsD*$SRJ92_(5dglwH@<`)lrHMP<(c?DwNn5;YI0@*2A$xV|J<3XXgdrlrG(ARR zQP6b_*u(R$Q_P*B_v54<7YpCnnhX79K6j>;Xam22@3AwrpO;N zj|LqS8;G%}IgQ0c%|Y7ERB&_IbwszKj(Ai>0&WT87`a9Foe}|Yn6>W z^wBTtUe%*H^t*-4n_8XUybE_t*E7Q0G9G)jbDO96>ZvdI4n*!L@`;Hvk_Aqy`+W5&WJu zZjD1=wt6r7D>E(~?w*@I;LGf~ITH^3OU#sgjNRo28`V8O7&rMh6|!t?0V94gJyy$I zTC8{rYxjWhO!q9j*QTQwbMF1k`RJWlQ<};@=87YB{0d$xO_+1fhGX-?zQ64MYCM=+G zby9$7f>WCp4PenSW2#Jhs+?7!fUEy=Oxp@M_*dIaq$% zOTIZJ#ZrHBM?ng=Ye@cbqz|)IyM{iER-QYGKQK=R*Zk0NWnQ5t)oT=d(zro-16dDI zw=SUtbe~H5IUp^GWC|+kCR!+eBMPT=&YgpkJ=nT|W3vYbP$;~j@GgMtX-PKLX zXz%&~dbxfPdn%ypZfp^U_u2#=NHe{ENIA>8@9lnKi|UWStq-Ps)!4dXtU;C=?)WKO zttd&e_3exn{r!{S=|~$AK`7|?JmH^?#J}q=#p^Ln zF8jl#ZK`eB>gqK1;nC0D^(+9|Yj=k>muLRmJ3^c@O0|Ui0Dk1fBFw?=a@ReniS?8i z<)LHqHE&}mZd@S{U>|F-w%2ep%@()6)}j8U5%05j0Qv+#uV#jBT(WcKF@i$~OP7)w z_oB58yw`p8ADwS8UKKc3T>*SuuL>a}#LM{-D%>*VQx=1Xbi4+LWBzq(z9qyz#(7;XMyC zFK)}YhPwhDbI7aN^IUN*&gZZ;THtWjPZj4us~*L6&FSA+N?H72i0>;IbJQ*)W3|I&i~R9~_PB2|se%bA8^kUWOXL_}?`V=Y^Z4|23eJZ~Jz0{(#VNhyXb-T4PLW7sm$ZH&qRSJVUf_o$re zT=ED1LH;BEM*C+B%c$0+b~qWqX+mmarC{Sb2s=IXJE(^CS+PjuO{GG~Z<|7H+JZq7~U~??Jk+%nJf=F@o_xRqL?0a_&;3+#5`CXtj&wv zp1)E9OZ*u(3a$S_y#l|rK8rJnEdN@GrsA>R;y+lNtZ18@@l!SVdSqZ;Qqo-^_fbi^ z(j#*Ryx5*vnO-K~_7qt%2sQtNW1IW?PHHKCoN6)CNdM+_bX1qKBZc>@Hi>ed%O`A2 zgKP3DAtGol#uhx@Lra+EFr+TO>MD^nFp0tmL?sUOP4DC#q2wU{*T*j_$7(6LA z`MiSHCzP^ubS{$D`Mx{a#nN(m)%-^JilHfPURo^pnntZAk0s+g-vSMmPi%TAy>0_T zKPzi&j%2wkMq3lKw#02Tf>2z6a-PxMtqZ89(q`HsuqeOBWwf965~)P3s)QR9RzcLm zUc%}Ng_X6trdZaCw6LyD*Ng{fMZ%5a=zcHu>PTQ4;WoR6 zb7x(}Z^%wnIh6%R-SgRz2BR-rW|xZ{qxm=e;XD7a!W-e)&Xj~hJ6^RNaI2AhA79<= zVJ7!zSON(Edb1;GJwHDQC|bZ_oqZU_A~wwG`=g@k?VqK;7G8RI!I$f2XUvDp*$+}jAeD}?4LNr%^jaF-58g}^6?kKn z%W`1GX8d~E_D=Kja%qX^*-xvz=+|5{{al3JyMT^|{)mP0t_xC8RED-Nv&pX9kz04; zpV(GWa@+cNNJCXT4KDRR8+di2isKiMg0j3U7aT=4ot5}JuoMu~W z>7uA@0Crt*IQI@Hn#u;p_@_o#@nDy59qcuE@i~wgwR6D`GB6|w(igij-^83VUbUyN zoEp&V!L)dDf!xY^e=ZIwa)1sJU4@)80tdF}OHQg%MN75|3=8XuTKE?x-XwvKZAMYs z>snWB>dtI48}x7PSaz;wp2)-{^gsFMFrxyLEYGrFJZEfdlz;?aykr>5mbPjNz^S}w z@&R?7t_!m)^~JHP>|+X_wuJ@bJ+pv~&vn=WmxY5VKDXjGUb9!% zP>P4X_m{53%($r0ta0T7P94-%i4$jrUzIbwy4f<%m(+Ro^K0S%g&(THWQEbT0?<=P zr1)>5q9K*rI>@tY?Qo5ALB@CZ_MzDny;#pJ{4#&BF~wG20j;(Ln&a7De-x$hC9YBc zNks?I^$9Z@2q^Ou2qgY}R<}3j6ag-^Vw-L1rt>A99UU8O0O__0zkwtpZ%SyYCgV?% z;#sp3>LaJGSi`nq-8wwwJp|_lNH^@9zpib{kdbK{!-7@mnOG`t8F5z)|{O(qb~UOJJGsD()G7dxOz0m*Si_E9|g#%G!Bx zx;p4OLVOpO(AO|BI9E9Q*8JMhU{jMfW>-D$!xkkJWo9;r`jrPGwt@!`z35W6KVh?x9dh3ACZRVhOCe70l}!)FuaPg{FNY@P9!DqS z)0u>vO4J-bQzUtNy)a*FBNTH(doxSDT8hX%FrSm#P_` z(SwrAuRG!OSM@V5<>JI6j1^1fpB96b0t@GX`4mZ`Tkgk!R;1Ym1c1NFsEM>;DT zV-;!jqrYV_zPr~WlPSfo^)^5z`n&=A`5|&cYvKJ$YWI(XN;+J@^L*VyxBSW*mY1@e zUCVjaVx;rCwfAf~yLU69o0Z4rjMd|XsyD!ZFom^8RT{UQis$oRLDuij*uxp6!dG%{ zj=;Gpzk$py4{2F;q)_vl!jTa0b$qtB+9aWE@lozB^tBDIgSL6IBE6NTfaMS7?X(ZI z8~V3pkMul>V8|nkUK756Raez+?Mw1=%bUF$Q>RlY8qqvCJilF=NRh!|s8D&<+A6)3 z=2*GpfcqneHAP^8JnUth$(+GY|M2G7t#aZ_eg*q^%kn}B%CM&{Nj6?zLV4fe($`wu zGRn!LACE@(f5S%fgs1FG)+Qmkg zJ0x44$?Qv1ZBLOe>CdpYVO{iDfwlcbg(`ypem;K@q!U~<>`Htn*Z6|yGsI>8se-ZR zmf&DD(T;&01%KGZeiPx)8Th}j*xmTxwBQ?+(zYQ6LG-vt>8ePYl^iZ_4=VNMtD;l& zj>8ieHF|swuc4>)Cx29{D;$5V)m?xc5KOYENW+*%Jnps&3(ENTJQ*Kpq*fLla%s8z2QedEUmYRi z?ALg4N!lfaJOR5Guh1e=B2CxSYfXq!Y+~;HY%&MuWtN0|nWrka=qkms6F>IM6Nkth znU2L5us8k>>CfnUKJE(^uv|u$hvdERPh(RF;hr>>+k&ZK*^`|OvIMo&df<-;9REs) z^ah$1GGEDa`f_3}9nPe6!EAk$rfQQlhoap33TqgwR6@u$N(J#FOQGeW9-1xL(*m12 zD@%!IIs5dY)Bf0C-T0m*O;lX9Dw!5^<`hP#a&62yxn(^FYmOD6W({Hy&5mNzRUj&| z7=$Dfdqq1OS{|F1M}euZR?`CtS5aw}&3-hDukGuT1p7N?+S3ht8FX$o77OVAr zu9NyhPoy#RbzI@q`jW~1hzj{XsDDjy<=$)(TEunuogU#ID=IxoYWio`7c2ohy6KgS z>HupC9Xs}1t4o@C)T?4rkl+to30Vs54|Uzji4Z@?UFqX%rWx~-D6YZB6n| zc5>7>QykgXeCR=Y;`hADJQr$+YPiZpr#{6q6Xt=FM%u2|U&QuB0jw-c}W{$!@eH7aeK*$uddt0O?&q zs@-eaLVXT)7jy5!gZXrYjlRX#1-un7H~SiN$naenmoWkQVd26qUEVbY0_?WMEcqNu(QBoa# zV1?!!v2>RvI|2)Of3!5U*4!|xn~viW8yAgCn|};;bDQIM_o(`s%isFIQlcdP!Lpg@ zJSC^Z!n|!DCB9@(1^FFN2jkin6u$)Ziz$VhidC+PUKgp6bgl!L+(0<( zxU$v-o5rN-xt-OXeS?0M#jpR%_2Gm78;_z--~H?CcYbBrKm0XeH{-TsHGGE$h z`_U=gfQej%?m&1%UbgGG9PM^+N7+)9+OgMz{;Rj}Cr|HbZtRrC{Ag9Y9&^Pc6~p(qdTeG=T8v)dXZ^yY1iDOSpkoe(tm_si*2{UAuo2vvcwi z&y0;O(95uPQ%}*11q4T5pIVNx=1-Bwzo3`R)&(>)G)ZK^g|ZvBvyQ}Lf0{=&lFOY* z1O3ur&=$&KYaK~ACU#dcs~0ZuIJG#Ul$~Ij;jVZfIrM2knRS@t5?4&udBrMnU;N~q zIdmOF<8EQF(8!D4u_0&;&cnuUWjihJP9=zfI4(oB30SXEXis0=@`knQaKL9S3$?o? zQKhBsw;IjTt(3J|NN)gQyq0B;>4&dXHTeU~ch$t0k!j-JS6zLmW;hjb4qT%qY4_L3UTa_8$)5)EIDE})%lQ8Dc7=1H#N+?(gs?q*!!!!=a zcj!2%l)KZ$mT4CJeEGNusnYM2zOV4kFL3YJ;r9e0gul|_8v+LC^A+Mr>a7356Qk7( zP#KoIg*&YI>qTuQf)Gsh+S56ZSsK=hkY#|*7K+$fv^S=R6j!D4)nzWwH&;;ANofNH zNCO2-(=MscJMz_}+|?OqiP7^cQG!7jJ^odZmlvNxD4+}+hP{f zFORYsSF6$#CnKX5_#XC9J!j8HudR#cSEa>fQxv7a`1?slgwO}OzAY|m7y338jT*yA zc%jeQ#DvPt;Yfa`vIW*}LeSma!hz_J_XCn#zc<9A)Xd{tB3l;#A3g&WL-~9?N!yhR z1Iz>X(q18;4d))+5a^LrE2x%=k<`8qQum;C12DS@E6h28Q_&{7tigHC9Cq zzk6f156%;jHFsY_Iwf;LIoH7(*{(nAZs}-8INW(u_DhW{kyzUGWxh?l8~~j~SXh_8tHG@jiE~ zx1X{^f5rq;Mi=h;Ve2`AGi#4+TBdIbg5qxIRxe zv&-X{Ef`?o%(K~78!L&~nf}k_(dI%3>U_NHV1ksgYW{IH_iI67TQ;6CfaqBeW5oMr z^Z61}Oh^5{%-Srfj4g*-0erIk@N}t)XoKR|@>N4(VT-QK7$)_qMY%zOW}3yJ_1edW zdclqw@45BnEvBTDKrFt)ZimkmM@EKUVjMxyJD9&~ciYD0Ys$<`xXGqYMHjZPyUcB+ zSqb~!tIaD88%qc3PQU{*{(>~Zdy@noN1>HH2B30fw4Ljd4zT4TvqZCv9LgN1=SpM* zKQwKW_qlv5g}G5&@jdwxqEwr{LT zlfUscBbYXwcH_bGVHtQiWf}nc`hd^Mc7rf$a`eV6D;-eOhq+B?3A)-98Y1Px|* zQX7Mz9~c=kn$#IIb1JD1k9tomZ>kbmbLQi7D2b58e!84G_CEd4X&KAbM~Fi@HD7$N{G=zrR?7e$lT;pZ$d>*xM9pdddu%+t~)qL^mwB2M5 zba`MOD-jnnd1Lu7$XHo5m6D-ZZ-P|5Rx1FKP=~tF)>mg-(4bC@cSR)b`vDC z7~59XEWyuO5ZyrTb^cDpVaz3!=TjH?~is}c7|ZG%P;&r?PhexJrH%nQdi zAI-Q&oU;H`F3WAD;H9K84S$cv-lxXPc|q5o^D^@uz24-l3818#896I~%Uft2Egr3B z*OWhpen~iykXU-{qJ8vdC-d83C1uZ5X=-+=Ub-c>7A`$Z}=-^J$ev zLuzO~YcZrK1q%clt5ee*)LGJ8o&ACalRgJa@T9iVP077&q4ln9&=5Hn7$-bHO)O^g zT*Bv5K!xVG_3Tz{k6@fmm{Hq}YizQy4t1+mkd4&0>QumU|B{RrY4e>0-sZuYwaMMK z&h+kHPS<6?*|5zg_qOHg1+21Y+nlal(oX$~QD%3<51)JM$uT!mrxKDn%M~vB{qDFm z&OUPfukDSG%~Hg}0WQv!>Ui~wD**jXMH8ATu_HB7AQLYw3J8&fwQZi!YV+1OS=-xZ zf1l12b%v>4lh(BFOJz6J(74E;YyQwLJ$<#fQX?@v_!h*G^4bXBz?ui{gx-y0C*ShA zS#iieYEcg*pa^_DEs=QI4;0BOBk&L>QE(KlI3 ziBqD#@d`@ng2ld?v$g6G893GaAQQ-9q~6HCus1&_-ix;dGN=s6ZE~g+2PwK}to!V8 z^q-!0q6KpcHCBp~PLFtZ1ab6G?z~wA>Vr^BX_tX550dE$GM1Y+s>&K}(wM;oY5%>R zuIR>o9@(zH`TQ#V>SOJ37D>#6pO@G}FXcTCS8HabjxIK)zU6E7j}e(sEA(>;DXYNm z!e=H7Hsw5X4S+dKw0u|aO^j<0Yfr3X$1ufObn_$MKTuY${wG`EGkT(yD`!{F%=nkK zTwM(_4ev6-QX=Fm&smG$sHPaE&81M38p%3f59Pc#G&2eZhqk0M0O9~KFR2%=z!q57 zQ0D7q6h4r*i34m~@Uug2tfLgsHbccg;u@eU_FzP9gKw(+uv0Fu!=`6ev62cKV(ql* z61RbuSiz*~cEhAtFw0il3HxfaB`1_S@0Iw6K_;z|y5KbOMNDAibh^6N^r_PRIg_db z-nwYqFLS!Cipo&(8#2#`IFsXtd*ma~KY^*-!g3KT`!B)?TrLlqCukzosAgJ0H}>LX z`{btoIy8Db`wqR66ROVHZoV}6#-WivX3HdX_;FZq+9+$vcH1op7j9Mu5O!GGkNe@V zV~kbLy<#}3xIxs=CiPV4nJ=^&QB2|r9vO3<+z18prL?qX8oiZx(Hz)vtF`iuj*J>M z>;^(63o5~7@i1y;+{sX#2W6ee&QvzEwtz^`z`yK%v-7)+*sVpy-*wh7vK>~PHTu!% z7*(kN>qwifi{rMkk@1^9t3xk?KRsfyws9IhX$fX#y>@)-#)nvbLNvKeMM(Xtxvs}* zkb4|`KR!+(*2FYx`Vqx^32wk>1HeO>5r|&V9pf@eqYmG(>Z7V4w|}ewO9cZe_DvBE z^%aM}JrfRyz^0TDh%YVttJi&K`wK_c_ijuhz&^BB*Qqw5&Kp%3ZT8Vi03` zWIJ9ya8j`t2i91;@8V@IqjbhlI8*aC2|Z7AFmAnOAJ3+4D>*y*Q+e)!bjl=+&$Qcl z#RTYoQgQfbod6M2O`&gYa7UB5dp=UQ;>Bf$k6&QlstotM3MQ*=5s4a{!Ap$L z*XPz`t||CuP1{Wly)Czp9Atapwkws$M(TOa%zc-$yN7}i*;0PrK*5SeG*#{pNz&`3 zgsDA`_3P!4R-v`?SrV#rCaqxbl~UEaBApX)Pc1osd~=0xU#L&?_|atT_1nZ1q0}ya&Nc1A*@sol-}E`B@j) z==UG(v@F_yctKKaKi4P!GnaO~g$GHpLoP1KwJ=fC8g}@3si@QMk(AL$s=@<*l_@6s z2LCG5T*X9_NZGcJ5L{PRrAeavA#USj#z%KfgilytSZG>D`&-Pc5v9!yEMt=1HM>;@ zM+k~u#YDSvP~=wUvUwmaE`@!a?-?CxQ6V`Yxt}K|%lnHzq2`c(I)ho#2X>BoAz=w) zb7??UvRK-|+^8S0ISF!O>kB3P=hY>m$k#`{gZXXfrVr1T9jLeZRwUE?I*F0{e3pL~ zZF<)nm<@T96f5fH<~3D6RS!Q`k7$bPtIf+F`7N!tLBGd5OeY(pot>QyN)??yuXHjI zwvdiXMh3X5yAVwiiS$>$983_H1%s_V>>ckVVok9+l+u+Dan-7=b2=ZHhBB7QB<@}H zU5T!&f7XclDLi7T#zy4?b8oPgSmM|BKF90=U29~>p3(K`@D#zFT{I(2ygp@{!|rdg zl~%cA6OpX}e9R|aB-$FB%8PpKFDYuB_OAdf`qW_yw;j*DNSm?wwoSH?>!yd1kxGf0 zDw9S^A>`5%P}55QX_1*+fL%w(IVun-M2=H=Y24sk<+Aj4r#|zI%Ikj@&`i;+2 zn51rRKKUi&W5gyee*piw`m{OZH9mD^a+a5^^=^N`kokT&5;ViwS*uakzlT-X;cX(7 zQm*u^-0QbS$9ZiWicMqsRuUL{OIBEk?f%R#bMcf(TMsghzlDxwwy_7z^LTG0?#}|nWK0OZY++ek7uoDS?>(5I zkox@;Lmf%@Se)c$^1RhSwrdbw5Fcb8?X#dYqQ*J=@`L+<=3u4H^vq$q4EAob(LA%! zdF~8hdr7FNKg8WGV6jMehP&-rRij41AO6gG&^39tfLegI&3Z1wo{Jt-Uq7l8(_ub; z{_6kn_3q(J|MCC-yHqNtN|EC#3CSrrGo4jVMK$Mzkn`EF&60CYm2<`tQHeR^%$DQi zw4Bd|VHjo(!;U|{>-t`wzrLUEf6xD3|Gci};c-9QN=`CQGZaSaSru9wx%>uyIP~{) zCNw$uL<3-lFZO?Z_ntVOo^rG&rj7PW{=)bOB+XmENo&qxwfY1?D1mm(D=|00CTI$u zpCf`EUW*N?=FNtEsgMym=^<*GMmxw5?V=B3U3I-_0kxD?xbCE#gEax7ZE7rO>uc8o zm&zsk0Kl{@uJGaUWXcHip8HLAA4P^via_d6L=MWo+CRD6esDW>?GP=nuSRz$;f=?uNURa%R{#B2e2lRD==QFgOXVs_j)r0T-5^tQQbn}mg6@&eLBtLSMLy}>2#oE9 ze!0Rc#f)b}?*;0Qa{`zKS&O%NU2$Lfvp$5|+z%8YjQh0^9JbXYU)p{N6o{VqT9SpQ zoSRnX!#3kSX5^OrQL4F)zF?A{(vQC&jGSzxu=4E}qTVG>Fm&X2rbf}Jakt-+#H;ya zac^hZbkbk+K~nq7Ulhg)_-k}ip0Gn*4ku~9j#TD4J)d7zLWwl-shl^7N?g->I4Z~Y z0K;$lQY6MSW74kW@BZww+BNyb)W&%vSGo8s03I_KGj;LPVR2org{|VG8QaKF$$M1V z(dv?NI^z~I8Po8~B@I`UOPu#?eIBONxFE7F+vl-S-4*+~?UTk=Z^nT&StAgI+&g|$ zT;PRWK_Q)!tlKF4IdoHw^6+rOytJhRJaF1yjYTv@mO=lr5w{i$ugbAcJO7kn^fL! zS#{5Z3iK7oPipwYE%q1lHy;~kS>>k9-1KKiG|H)CQcVPBQ#{Fa;wUn?9K759S}~oR zkH`PL7DIW}89!JPZ1nBKE-SQ_qPj8GMq?2vXO_kh;6w1;c9*pnwlBx@5+v|1_{qeq zMpZ!6KsLBVzio5qF?PCV;nnNw)Q6ozsoa5MQ>iFo;6ZRF|r*|7dgU6IZ_Xf-m9#y8;xSr4A8*^|TeGQ`IK{tXB z=@GhUiMXUZ9Qfb9y1#X*i&q2CNonmy7p&Yk#vTVsHoW>*FE%ptkX)q^KrtI7Gdt zUih^>53j^V$9=;S`urj^qLJl~mnKd&^te&l-Hi`@7s5PqI=CNodsFK8f=sPjJGEjF z23cy#u9w{!Kg! z{i-e9woQQi2RXchlCW)!#^e&LLu|Jhd|;Tl66{C>Q{$rwgD;y#J2RPo&9hh_slT zuMXGH6xd5Nca{m)&=dVO}qNBk4x-+Jg z)O)}sj~VszuWNy5_0(RWuhaMMNK4!>eVyCJV%&a#)T+6nSnYNB*R0LeW8*_=mq~%* zxSu6C;0(?7*;XMrjZOJP4_#mI+OD_yE%S*>xt=>#ZG%rOgdrsbdXxQn${kD8RWvg_ z;BsHR!D_Emv`JS&SYPLr(b2o{U0WW-S_pJ9e1fdz5aoflJ`jN%@hqE!De zFzEoI6WV_4hH->gAF%HuCgA}f`t}p}7sd%@kl_mR-QGn)>qDV`+^-1@qBG1->g=N1 z0K289z}Lv7!Mv1|; zu5BoEg7X%^3HZN0RPBB1BnWS0l0W02!odjJ2evBcrB{*#Fj(6*X<4M&SzGrEcY_a) z2hw-&39fLfTSdAq5B4SkbP6K`pU`!|L|N^~XP~zTy23WHNsEg!=djHhhjER*EERxTc|f57 zmzdD7HTfLpHc&Jhwrjllj&+1=mD5zTUZiJGWuG*tq6LfeHgOnrA{5(QuuQKOOj4W4IR3>mf{lXD7z&mr8zdMx z_6>wrd1r3F-wN$Q$cUHLEueLF#ZxlgZuw5O2}($n-~)Ff!c+-l`hIM-%S55OmiTbk zr2Eloo#`D2|HyDGZu<3-2q|V}b>QjTr<=oz9fasqi?f;oLzlP38aS?rJPq$83YAEf^ zQM)77Cjnzc*7kfeb$N)s=_h^z8LsysYu}*LITU=56mX3tXPVMVo(5vhh`Xn)82?%} z(oBA_d!*#J_l*NUzhB)4-*4w)tO9#zNdNSY^9?Xop%DLe2&VE|w@giC^E0yEe5Yg6 z)cRJzM6F%1M-^#zj4L~iUpuMH^d?)4f$EF<*=){EpkLDSJu>gX)J@oa5 zH$O&7@L$gKnvj1VEU{!1<@;6d`ziwTCgkV4+)YbvN!8Zwy&)LcoEzX zDq-X9tDoxE;@w4pSdE(5#T(0H@6Qe6NNCU>#j=}P4Mwf8+o<9D^ z$(Uu;iSov-&7^I3n@aJA$6L=v@5ENkmudk@16xdh=Y*i<3s0DCJ;B0%6+4hTlTe z^Oo$tFSjF(@}2&_Ey^06{l~T}MS#_;d0KSR8=N!Nr$@5w>WdU;|2}0Txrmq9YcRqn zv8+F9MP;@VD%Zrzt^4>J%YM7O^p0Hx6Ou|*kZ|yL9(2<|P$N<|P9~jJACfMO&@@nQ zd|>3L%gAg1J+b@~_Nkn^fD;~u)v@DID1M^#Q(B;`CRyQt9Ei$pM0tIJc{Ro9kMz8d ztxH?vm9oh6ajnvs4RhN%TZuBw8s0XT;MxRiy-cIQoY2{Xwohn<2ymbJef35?s*F{V z!hvRp3tD4Ia(WSpZHjC7Xt*i6u(YqRd4?0;O9a)q2)6ZCYq<8p2)hQCum#w7g?RMX=pI8F}9-a)x@7cE+k#!M;Ug-MH*vkk~xPoabTrsFbI}UnQ}b zDQ)T(Q*M=_G7w;U?m0TXKATYG%paHg;q%t;TpF4qad27Fko|PQMZ-7Gt@*JSPnCjf z%)^jw(x!w8OI1X-=8AX1$ofHoz`+82epW2wJZHA9>4bVu!xT*Vw0V`h(_7#F;)LNo znj2eT5&i;xv50da`2sg}I}WtrCC1Y&_$PM$x*$6U;8IW{_zrNHZGn}R`PCGL=#<-7 zebVbFz>u6ZKLtRAgg$~f|A^?%c!pb%Ft^peS$=tzEF4#;e=NIJuE#sfi5{%hAk}_! zy-s;k1p6TO>b_eD@GpJ+E}DFf60JXUPg4ru!w_LCUO6i27MfG}7bZh>|>O zyO8g~WPZo<9Gktz+r$zaojHwJv>!VyDiY=S`nUv-!m9rbPgl;TYT@^`yM9qZ6Zh76 zCVyM(_l((n+m9Od{S~Eyl5#;crv)W_5Z*)Du46`CYbP)>CN0uNA5S!!H0Q)`!|U{% z0*#jTc2ra;j<6ST(03OT&Q3(9w&r6~?sVMP9el8R!LjoXsB`ugaB7jzx06k5&;tzz z%5PndL7_O+hTRAD%N)Aj*xb1ChrS(94JwV`o&;fNG1PHdB~4l!M=9KJhvS?Yc4x4c zt*~8j`#&K5A&~9}RtA+IR6uP>?{a4L67AYTTWnlXu*Squ1NCIo+)3}_UaM#r-t@{Q zNjda1ki#G7gXjU|Ki)Kpbtm>vFU^U2&3;73m^{&hjdJg0PUvD?S&Q8p(J0*99U)43DX3&vMvi3x?R?Pk{$P7*1;u z8t#`#m0mDqji_tJIC?L9+o1EaF-u}l&sfayNv2_9t`&U3nN*WmIr8bnenl-ySF$Drp^hzFqycSGCo<7q{>qHBF=kJ5wdrI;hJs3&X9-0U8BzA{%?v776 z?hLRu;l17^iv70Be2AH%X5TX~l%7QaS(SzA7H&9(Fp}4OP^YMOZ`7SU#@`z@wKu7# zN`NM*zC&@x(2zy0ISCiRmVAD^J8D8CX9!WoZq@f@pW?V3k}4{{^9pfvTi zFLHBC{oswNdlo#MM|$LO4%7Fx5mJP031S&_GKzg6oBcz|V*jnR0c5qeR8g9uhjHVP z{MB~wyv=(27TY1g)H{+xnNb8-tD>V2q{*}1aIPf?L79G(Ea0D}^%Ep8`Bc07LRWBI z;{g?9Ar#bv*guq0&H^iocliHJva+=jvPSqeB*eK8~*MG3Tn)Q75(4au0{TMKvw>7coG1tH6LE? z2>*yb$^5*25HpYipHI##N;>cT{*wBYkwk!6W2CJ-jKFPd83jV8?ekuyCA6JGdWF`u zP)cX~*V+1k^}ZVy1e!k0BuGW-Z7eTQGpTw5kj78=)K+66I{u%=O-Y0ZO|exXR&X{K za>jIlS3Tevk)f4zCO zLwYi?ko1%ER?lAsy$nqV<-Waq)fjW@l_3P)=KwKQo+drf{Xy6fN< zMXT^~&;wh#YRB15$(1K#rFV@MtL#_q4ZD^oe^5;IY?D+8xF>D^+309G(gT3qg!?Lc zV&q4~tOy<;+P9&h$uq?_NGaJ-TTp9)UTnW~NcIa*?^Vass8Y{yMH&rOc8sT3N|+nG zur(nt*|Rf5UzeY|oLFwU;En@RrVvht7qivzTVdu|YdyT#|8NBU4)?>_RjPmQ1$KU> z5`rJ-T$8Q-v!uv#Di|vXjIt{^+S=7(_X2l>(ojd3^Z2&_=}n*$_3%;np+lhfeD9co zAI1T%lM`@+90C6_%4wH~>81`5Y*gKBrP>K^?HKpJAM&9FHR7=w%k^D5O~ypwEQ{m` zwoPejO`>3HZ2!JbRv`eqX-3*Azy*44EWQihA$ zEp@ZVT#MnA_MXxi;YQ$77Q7_ryG&qjf=XV#4-S*wBfrCgH8as#!UrO+AVtZp3yUuF zk?1O)OuRo$z+n(DDd5{Fa39u8FZq%B#6s$om-x-lAYd@4HDb$F@vvkjdtC(Qwt8X9 zLCzt_XX-NACBm_A@9SRV`}{gY^Yb+j6;eVRD zH~+$)0wX+(N&TBVpp!!P4Ej!4nz}D(VGa^Un#Sbk-O>+xrB&+*-}ieKt$?{ti}2r& zEI#}RoF5^+iyb$H+`;kNokP~_>b=+WQhLig*%yS$Ee5(w<-MAljyWRHV|{Zkv(r+#M_qCgPD_-g4q2Yqd%c--4t2d-XC?bA zd1)H6EjOTPv44qnS=%Q)>wCA2|FtKV9%hCg+6Qd1`o2eksGfa>y0(^r_Da7fi<&7@ zQ%Nkd2!HSSze!dhr!J0um&5-3{eWd8CK~5-@m*MZT42dG-c# zWaflg>Pw)v~ww~s9QTpIQ>Z0@7Yf~_G0RcFyp0=*zSWD@p#GPkCnVz zWY&`2u7rb!R3FK^wGI>0YW zeE97!zHd0iqrfdHW}3w_Ugp;;TYYX(xpFGzRX0!8L){32$xuOeBmIKLmtn<$73Vqo z(7pY`7#-HuGPtHVTRmCdqh#fLP+e)4DTDTNK#=qF#mm$a6{V4^V{~Pl)@pByfKCZL z1I7Pw#})&Os-@Vk1`NC-`J8Tmx7%qffioCgrxwEKWxOQtGNmtI@T~NBkufLmMVF$| zMuK6V(5STCyf~`9HIen2xH8S-!77LPzC2?=M}A_rppbv38 z-VyVL?glsdW5)8U<&rUSBYyS|^WAkGdhY(EAf7b@%4>4muR%wjkQO}RrZWfAGP1oV zX-@gz?O2yRHP%`N@Y}UTtOb_EIo}XsyCR%Tkom|9;b`>q2;{&kvG`9S$KS6|^s%s! zz5!FmP%=(<E~=C7;?sGi(M zL9?NiMG#+~>B=C$YjrQaXrRRe5AU&1L5dnjyRa^My+pPHFEsr z`qGtr_v#xfiHBNDQP#8aIt9Bu$%QcESLmIyV^4F0zQN0oaIUDM7s_uhn0F(>ntf=7=9CUQYmz_G7-0~CD#umbpkTx67vJ|i-Q8z&xU(g9~@mF?Ro9MO+8=TcH!$Ebz9 zz-o+neK083AK>l>9kdg75$i}F)?!xWMC7@GOn>$$>;@`d)l!z9yo^42M9cpiCI=vz zXcE{ov!C?cSln~OUW_W7(w)M%3 z|EZW=72h+xppcI)9Y=KnM>>y-D)n?Xv}2Bd)07F@)_8ddjgV?vkCpi9%1T*78=<}VH^lQhK+D@+B1gfN1C>1O^tamM9&S$^=up^vEb&&ll2 zHmn8tk7?Vo(iT!X8}v<}U@~iE8Q$tX6gKNcY=#L#zVtGvp==JHZBPMe%oks zm^_`NSpFd2#7s<}ZTB5NBgLKQ{ybZ8;031yPCa%9*Yh}D2Fe)FIoZCDW@(0u!mh}xkTjkw2F40zpkBddUpIy>s~uSL1brO6{fZ?JW6U`md7!U$55Kv!tvXSk zw8*c#7;jAa6TKD2UHw2vLa#0pZyP$_XY3HRs82!*^q4Ff%rWU&=yWmLyr> z&tP%069B`Tl2`nwO_&9&4q(j<5w8_x*s#+!Y|+ha1pTd2dR%bVMpsfPg^PYhd@V^n z)9`bsl9Js5BkA`$fG8$A3!fUeKFEBzjMJhsR!7h{Kk{_lF}Y)Pe(R{NxqCL>JB1BU zigTo96FZ8>!Ka4Ob5EcYOH;_IzDpWp{t4VvSL>m9*cp=MxyQ-1w@P~3>zc9-9YB>w zw3ha(QrKFr==F7bBgtX4_bd~IYq1eiGeF>J_|fnjNaJ`7ZGhzAcw>+7od7Ok7b2{5 z!^-juZ`MxyDR0)2zIjHwq)C{TaRZ!;eHxUlBXAAP^q+PsJAO0)@Xa%R<0ma=&tq`^ z%N)XeslZH&k~80ZAnZsevs-uoq3T?1X!kUnwCPb3@t}@7R$zSz>FfAglpg;3S4>+w zke&d6kYQijxf=AKG3dcI=Qn6)4inG2c8ZjHz2fq%r+WLtb4qujOI4WB-aoc`RL?Qa zk>5U;6PWh)Nb%fX3Iji0aU*}|Lhl!R!qHR*wAFUS7sT~--__b@%6s10J}%K(JgS8) z(y8xL-dURtb#SSIi;GD%33KEpyf%0yJH(7`5I2VWzgFAsMaHqNA6|1cE@r)9oBw>G zkIFhR;jHJ=$N&0_BMsQb2{}CppAq#bC0>s%mUVntP}ng=-C#adZ#g)|cvR+-pZCr0 z6&z;#x=`h*@1T2~fTXY8Sxx*p$%+CijmR5K_xQ&ZJ~X87SjvP*$n&_n(bS@%Kq5N! zTN9AT74Suk$*(Jda7WVqzLp})`k}w)=HnJh9W|9OUhk{3bgW)7M<8jDa3#aNtr$e; zQH#W;<_~~k}aE>Q?+~TJ~z>*yYt9AZy%4#e2KPkUaM+G(oE$6flnBF zQUobhN`sVSW!z6`7AxcSc{@irv`yvvtF)mPKetB-1WikeWSZ){I1*cJjKZ@_zr3b{7DNjkobf%Bd@x|9K$PxSNt zq{P8P6rlc59#)Y_mU)j=RJi9@h^i9*%<>=Me?<)gg3v4YHT8}kN7$7_D>k2Rkx43i zwtccuO3^XWf3iiwTix$u$geE&7jdDSJwnpO!dMa4{l9wpNsvfyY>UDNKu2-o@X$Kd z-78oSw5rf!z{ls5Tis<~I)eS6-@)UgCBnIye1x{;oT%EYwfgFj!~F`_RhE}3c4+s* zhP`lhScK}O#BO~Jm?!-cj(wnPZOsI@W=hoZtMJk8iy!c47iRZm<=YWI&+S@}>_{hV z-lYsA`+f!q<;IT*G4OKf<`{!{x3f}obNP*3UBa6_L|+YVGKYSC?bzoYOJ0W7%N8#glFlWjPUAN`>OS?1+P=m3RwM zCVK;C?z5dN5e6gWqb&C*;VJWXLh2-akbsHva#u!VQA%+hT=-XWtcByWV64|pWM>K> z!*A-iH%08@sc;Nz!@{n5!h2Oe+wPM=_;y?VY8TNH4o)yjlGC>li$jJ4KaVJxQ0+SD zp!`bFFYDu32cY$hlhqFVQ6Y9I*rwn+KGtX}xlrTSjc4Ke9ft9_*M0HrDKU zBzYmn1k3&QR7v7(FL00s^)5@6dK*2^vl{S{kfAq)N1Kh4wbS?G6)lve6xqU zTddiJXy2!?q^obwTE=;b$pEK;7te`()>mINRA%^&sMHB_Tj%l<5A1r{&-&^c>7!0< zv^R>$g#wq-g80IMJ=td1PfA0RRTB_?{fc*9S6bVszA|co^>Lu)P)pM?pEFC5O24{A ze+n55=VGUX_e$0K5^%M?^A#s|&yGs`o$XabSx{arPkvJ^@6vt_a%HS>4WZ{ z?HNM!cu5;#ZXLl@Avg9^@7p7EfNng-1lTe?ELz;rtc$-^FhO;cGD(-$b$_Ly%B+*Vnc)?xB@ClwbiCj5R~XpaR4I5z%(ISACP?+-b} z-pdVi++ACypWULaM`*3P0;e0V6M}|H|G1QT3K0ULQ zX`D({5wctNQ>+#uG9)?BWqOqN^q`qdIr~2Z$8B@;aHjo3NtC^d>$?eBkO=UdXPZG* zQD+F=R~suA6*_l!NYl4X6_Rd_nl$~JtvMHz*qQt^8vk^1VKeW-R(Y+E*f+8NEyeaf zxwh%}qfK!Uy}+k#Kf4q075QWU99peYf9?{{Sd%%H`dRzSU_j11>Y5z{L_w*PKqIi`cg2^D zbCXRT+u84bO{aylFXfQZ$`?!_2M5o!jtlex37b4XR0mrSgY`lNG7{2UHa_?g1<>ja zbr=CLtz>C*Lg!2kGQ`IetLCrafo(TPaG{|9oJ6!~YHPWCn1AvF5Ku!M*7MMdA=aoT znDyxp`zd1rb{{{x6-{Rk{8g-(+WYYX;oZIG!FL!DnHQd&YUwOS>YtXJ8U#Vl&|b71 z;x&|jUazaCgp&#L%(IMXlw8iiKQ<$2IZL4N6io6ivJl~eQqt&~$o1532WCs`pK>tN z6K0uhN~2FJFvP%O3|+^;{jN{HLhCArg&wUA3AJ%lMQ%jC02XGOb@H zcKheRHX9xW)$rSdPtBtse(;Rv<>1t*q4%n5%R0d?>mY7F?*(daam75&=<@wL=qT0B zWw>>;;c2Rj#4LVewPNUbiyp_)ZB4M#1+z5;xX<5PXw0DQPuf}v>3YJSi+zUSbG!)m zO|~}a`!So7^71bAS0=N+)PUXE8}H2gc1rOc$ZouX_+IqA^*3N*b1Ro1fqK%4TlJ9= zqt>>WiSU-FY?@Uw9QW#iQ?xfDzTd&#ehYuQjym`w^FiJQraYVPvRZ099)%4Df6(#v z8z3(&K#2v}`Qzc-z8hhmyHkRhr2cY3Q`e5mfbs6<GaP!;u_C7Q2b~}STbm#A8H8UHWZFpD9dX}T2 zotsk3`Lu@{Cr1mZ=m)Q2PDzG+iN3*~Gn7UL!zw#wdv2x%9`0nD10W%l zx;HLKdy>e0fSh`EURBSL^cIu1+{vVr zl;(U><1k78&-pFFmtUq%tfgm^FeOe#Ry>QZxk9|D^iu3)^Vg-C} z;jn3I?2B;C!We^%6O{FeAQ48$fgaYJbKVwuX2cUNFw`z|D-|v?pKmMb)kh$=9e$^K z*`laaq|3ga;8=(Mtg@Qr0$d|=&3s-_yHGA#fc@t}H(KUF1IG7eCL%6&flJR<-baiQ zKa1#1&skg#d=b+g9lzJyBDAX4DZ0Bl)g}Au9ccZgIM?!CYltk|-HFeDu-5P=+grg3 zQ_02J9E3}(1%T%^4tkEqc*2eQJ>k$y8tKZRyr2|tMbWZWDd=Q?jVG*|P>%Oe_!{aA z#oJ5I$Q~CC&C%hNG}<9S+rA$31pYD{3(8eRkgMi6Ni0!TpMG`1IScq>nP&R%jMSg6l_MH^k0x8hf!d{Eh8M;in{htBOUicy;)H4tfV`T`uqPwrs0LSSqf&JTk1jPey$b zF4NWnYA5`=Tari8j7y+EP}?Pi^-+ROGPNV3jx!KTWj$!T5Rsf9Hv=7zrP|TQe2g%H zbv`6X#)Dm6xbg)VTD)2o>4sHI;^IYfq|Yh}u=Hdml(K%nHD4`S|HG(TGNx^!NPnTL zg)cJ$?qye@P;TdhkM((ql1q)pp6-kN`21^L%&3tj+5nMOEf9%so>Ox=POZ%ejFUOL z?NzX(5u{_2?5{flm&OnqpDw(ij!meIg%Q+wt2Rz?FZs-EP~i9)i0y$M{2E2^@AB00 zzN9MnerNq2>>*s%Ka}O*J37;r;;zCuE#-CR&{w1?;aI0P>%P92Yig|L zR0S3T&3iK9qAHRv(}D{M6UGAO^;|`hu{X5S$^N*nMcJ67!2KHShyKuuoCh*Zd8IH( z?8>nPcMk8oEo(qxTd?o_BgER(wjqvva6#~&UvQy=+uCP|!~edk`5&KmVTDQphwkDr zVI7=ISnK$%A$KEd8d|F3~>DO9hLy$}W*)I$gLbZC>grmlw)qIHCwDt zZxPG>hVEvrd9+Zz!=KwOT5WCGepz?Wu6%{3thiqFrV8xNF@Hy%6JitX1V~W@ko7kB zn1nU=1RuP?Tq$sI1`pDgxOL+B4G{?0Z$RP;!Ttr&*S9CfD^wf?EC0(W2zXXkL3CG; zY@JYh&3!djo*7~cOZ3w#AYE7Au%X;m)LzUP;>*4cN!C%@6Q*v@?ZtL<3F+3HpEsa|dQUXS4%u_Q%Tcn1Vk z>s~C4@`tQG2H}fM`U78U!3l|I-aWXq7#1mzq-Lo=%a)}-=*b#eE~`gqocf3^Jhv( zNEsvwK1310R29h>SqcUq5sDk#2|Hl|A-@1$pC;0c?&UnCHZq-m{M1h4oTyXZdyBZg%ctmUW)_JrcU^0!# z?*t~(E&dg~FB?<;(-skYigJ(+d(B_U_@IA#Z7w7!uY#)V_w|BRU%(zqPVmPM$O9#% zjU(2dS=t)#^(EVA$c-$2Cu7ga-`I0SYkv?TR?C)rC~88wCiy@bQ7pdMw3P({#&ABw z*+=>}EK_5*n=z_dOm9WKf}SlV;d9;aY<7gweYaIOkfWVuJ#qR>(3V^}Q*-MB>dltZ zn0mi%!&T#l?~{qT+ZRWLA{KS6_?qp^m?al<+H_p8Dm4QT^)Hn4Rc);)7r(3K&JVof zX7UoQ-~?fxIJhqJtL*)h*26NKbLHQVce@4Ef~9xQki`7K@`;Q0WO78H(A1Ew;OLs? zn!PX4Nix?X8Yddd{ZIX58HMEZQudp&O&i7n?w?mNdc*Dg`VhL6J0YwqcK5H;4|XN1 zyhhQGkPakM?@@P4!{2&tALV$B$;>$HbAdG${_TIBQ-SwH^5^QE_eeFqaSdIXH8Iz) zc);lRgEvB68nE)XcZl$qS|JN1gE#t$14>(%gfRN?qf+hYi`t!;AWJ3B4WuE)ZEeu9 zCSlz$ey^)V?%DovvE0jHndR`0%;dDq@y3C=(|Lk`(*UHixD21QD&Vc{Sb3+)KG$|n z>AenLu-3NJ9Bihl=_jmVUuNeUC^IHrjL)=!Ye?-~c#>pE^mcRC~46xE=3v1xh|6$G<2$*nmc0g@}Pg6}M^e1f} zL*$o45bwvRIJY$Ej&qgQ&eRHO{pJc!{;~$Ta`=O`b}VV=e2`n=PFCNog`N(I9}f>& z@Q1l1PCXA3@17eyFhH!~A4+NgCmn4M4mYH(f3F@fMKy~)hP?glvX>J&`&avnYUeMw znK^;l@24)|+}Z=(eK3js=itHPwi~tOHyl<8h^%lObAh)>kQt7+;a=xvJH`i00^n^X ze~y33V|_|db`r#%_gE%ij1yK(SfDO!UdT)#oInNr1#}(U)eBiKXwN<6yLs5uts>Ys z1YbxjRr|bs6H(<9P`E`m34G%m(3xmot=(|tGa33vCL;bL^>Cbi#k&>hr@WB5;~9wA z$4$5qIz#1EKl>ht8%n0N6dk2`mE6tu24@eXUYE;q;eBrs?mjE&=$f ztlP@XT@LpJ)Uf~Nd49hUzHuv*Q~)?#<4ogo#Fu!%Kl@ANaIQ;M+MXp3uA>)taO$=O zW{N^j^!pZbLmv9=^gqmHWt9zAn#wDjUSfV}A(ZVHpKrRPxd zS2tPR`wsG&JJb7>G-EC=3DX>fN?LQH zu~XLb5Uu7G!>>C6PxZq0Ut^lYqoy&=53$i>`Gi)@y8y#+;uY_4N~}iUMMS)!qY)Q^ z_fdl)BUd}&H*sp2X{zrTeC9FwVNV8kP8!Ls=6m4C`#<*Iwj}jW5o|_~4E1`~UArlUlhA`+>JGEvZn|7dSRzkGIBYwHRoUqbiNjb(=xgEKW%|MR7Du zs}FR|>$VR40>^G{efBxU@^~YRP+E^o_ymfyeBC|hy;zpUxn@Hox*G$o-q*3NuE)Gs zBurrA%(a^SS~ss&Kf1~asp)HXysqX!W)z6HGk>S5N@okHBZnzm;X7@+npnd+<#eK% zo!6z*z%sN02AmSov%ivxaNKUP)ATn=R}&*@D)$i}bJ=0pV%D!Wpxj5#lGLVu+%w93 zlarE0%)_eYf5q4Ei^tW7A!VR{5(g7LDoh{3i=6uC5_QloZg#BjX|cDgv!MHa zx7>4Lo2;z_@rtCM?gsuf0kN2SNuzTRcElnJvg_V4B3S3rguu~ zV|ae@OisF~sqR?#m~w0&9jQe$8dFKKg8r`Zb=E1#T9REU(4N@<3v=&XuaZ9_5%UFj zdeQ-1zCe&FT$zp)m;p%ZXml9U)TO%U}y_V?Oz?ea)XC3z(Si5XPO>ojrf;{=q zNbzYd<6o#V@^NL>>}3M8mQE(Z(gOcr2{q9d4SJT@hOJ}%gjp!i+?K)kr#cLF$?lz1*F?8lY-1%X2x7>Zgj|n(bN5DhK*V#D zuMPNpowHV8P=MZ0wKl_$OP=rp_zW0IU@V&9WH3drq{S^oZ1wK+f1*No(WFLwoi#nq7Hl$LYT#UUi+A^4m(0A}S{S`wp_4tX%U! zk005zKX3jah)6_7*p8u}*39cfs;*(@r`UqgCaHnbHDfc|#ZKGDO;2E5`;N+!&c9P# z7xrwd156?T(mjRVw@6sG01pgljDJbBg5)sdG#)-El~pt2FxNZAEkOO%k(T^z-nAr@ zM}gzv!RKN3hIutz?)U2_j^8%lJ9e>JQ2~YZGsJ>IJHdyI4y}M$d-~%#+vY+l&hTd2 zgl?EQ#Jhg=&vHm!B^fAG`7MimCcauKUAMg#^#(y6)sk$CoL(Qjr_bXJ-T9TIA%(uO zID-~G8oo>Y=%dT#*YJ`g(LJP6S%I(pg40y@+tLhg3QY$01ewyIKK@FS-$^eH7_G^c zj7wcg63|(^*R0}{Z*GH9N9b>bb&Z+N&t`rl6DqC}%c^xIFED~*L7Op2J;?;yfxLTO ziV@BK3=^5BBwM^KHT6IQD3my`%QyAi!a!c9Np2_IghI8Y24p+snwW(L$g7RrKvm(#ni}XtX^sF5#E4KYhV2|!|EUt? z-xG)-S7(@)usbUMM@jg9ENl&rY=Es@h!;p_o8mO~XH zI=m}Q$3;_1pi6!J{775`E=B~Qmv`hWJJcZ0X>UsK8}mA{NBAfc+(xi#>8k!)2bV4We4xtljfY4hY z0YV4~hjZ`Dees_&XYSiQ`^|p#%(K6HJ!^f|ckT# zF@10<|Gf|3lB>irv@$NWlT-%8nc_~1h zP=TJh*IT6J?Uf5K-q}U=X2$_w=1*1%Vl)%7izJvn=zjrr70ON`0sVfskP1f9eaFwd zl))X8O|!9NWMZ|~vah66JidluSJNa4rOa0x6XpbOt5QH~QM#B-AJM>WK0Tmcd7P=> zqg$W4`Bc%ZAX29jpXgBQ@WJFHJ)O-R{dgg)c@1mgLG-UuoI3FdqZCwMdLSMH7L)C& z9r;~<+Dm4Z#kAhjH1jLYt?q-Ow8HjJdjacnbo}r>C_*`0$E6)WLn7)I(z`~l$|vVH z_q9wCjE|!>E`SX(Q6#;GFS%W&si80G;1w4iE*?Stlt`|WJJs@4CN`(aA7jpokxQ?b zZZNGf+=_q~E97QIT+nCL_+5Ce-HK^wRUFWU?9mAnUG}0&L|BgPgUUm4xx?cV7M-YI zjGtny?A3d=+?S$CKR_9J4<0^Iq*El@q`0hO2*!JM-=(Y?@by8hb9GbM zv**vaT$E7pGP(oQ#eVkW^0KCnGlHlVhsy189ljt}z92fQ{*wF(*KUD_BPJ|9))sdHW)l&E#C;F#bbL(uyfR(JjCFc6F@JImx7K z)kW0nF?UXhd8TKssFcbT1IX(k7j$><4ac9NFDFf3p7E#@SGwrV_B=e;fq;SB0QxX8 zd*$2HumAS6Y06q6jn&1szf+!^!PzURese`G7RpSGBhHsv|F(Yett%~+lVxB%Jlu+R0bk;7rvvY-cr-n9=^eTwC?|%bj_`pW;pZs#R%^f&!oe; zLsHrfc4^G)(EM)5K{)4Cn%BF=7HlHTDYaNr8O`0tobt=->kN6Qo)L6rQ11~QsGZ{R zWL3mk1sc(IFk5>&%OU8(dE(4Vr+%3ATr#_J?HMUXMcf+&vsWD$S1%Vq>-@ z0KT^8tpCQcxO&dplhfjS%@pmhqFo6$MCj!Q)q*6l)d%shv7h5qH--XV`F|AB-Je}I zNQu4=k1(ibq+~i>CnM)IvptW1nL^&RaP~n`$7D?9&HaTTlfB=tC%yJB>Ia}C*QB23 zp93??*#H*W-B&soq4eZKY_UE+#Kc2e+SJ-Q+~7$Fs&+%Z_9%Uu7SG?XG36InhFP&= zvWkTPQ+7kRCwy<1shcEN28#O*Hv4&JDH&F7HciGRZhy{vdv4Jj01y-mxX4|Bk+NJcxg2drv02Nj@=|AG(4JS?nLY&Tj8bXj>lZy zp^}spg%|tjswwv$_p<>AV{7GtIgb!^bUB)|P5w;4Pk|H4+r;F4D4M$r6)3>xl}s%7 zlJ8%>DnAWfY`-c{zH~q9dGihgdvS%Ou+#v+pNo!&CVWSk;Ti6oS<0~Ug^3_HKgIRI zmL*vHLuMf#h9vSqlX@_}5fw9?-sIl*B|Ql{h%Np4flQZ#x5CY^a#Ox_bl*_B zE!eaK#V^9-|F2ftqEnox=mr>1ax&(g63lP~&l}qL<>n>~ZGH!I`4yywLm}*|f~9Xv z&;oT!p{sT&>e@9;yPIDsW-C4hlq0M;o-ScLrg;zO1{WM{xuiFx-}Ln2pGh>?UpzJs zbWYy%b-#7N@8rAdhRTFA&GzabPX=0tUvv~6g$I$F&s)!@LT=$C4qa?Baebu4bT=BU ze~0B(+gGeTjwVQaG9jB;g<%Ffwo4uA+_>#!%N^mluJHA;qEP7qY2`+DqS)0#h<*Ts zOVK4#VQmUnV4j-$7^k4n7EP4QWER^hi^a|(h2S>7!eq}o@xD6$sODFTHg}Q09swg*xR;-sHmLSImLrvkx zs}>8Pm9mhx1((e5*mFdJZEq6?*IGFL5eQbLtP=OWEK1W{!qu|~U}Ig1JPvQ%E^|LS z#+&r80OP{7#toEm9&EUlKYQlF=2B(xAhTb8>>ZfxNeIN{bgWT#m{Pkd)qT6%Rb=V` zXsstsZx`QLq)j>;kUlTN3c@~ejVv)^HF~`O+M1jZwfNvv^UKNO+Wr-wi2wdptu$_N zphKy(-g_NqZ%7f?HWV(a@*sn&8*tHDMwh{{#Oy%AtCS!x^+jod8H}SY0Irc}@ZxMN zPMu0z0~A)#>N3G7@A{Nj^!AG2!X6ORxP4RQ^p9>m(`}nKOp2_0L#QtBYIek+1QL+k z)cj-yoQ9mn(`Gugj4kKm%p!?-_m? zWu1V5-jb1je|A~4<8WHwsOB5{{mYa^nTQY&ht;-Su4?XY={pd&u0Oj*Fn|V23F%Q- z1Urc5az8|lFWiSM^@?~QQMstVMc=NnN{UE2RnCWua?j6LgPTuRmp$fZcs~(N>uj<} zTwUe# zm2B;>l_~9k7t>{teXlwqVfZBhH_wk?J!ZkztbLje8z~hJK5>@2cteF&pC_Lewte^W zy6w}XO`rAM1HP4f&-4j*baPQ=AD}7`6PL9-?u>7D_kL@=*lRGoBzx~5KH=c1vo`yO zY|m$gF0~k6359^Hrd{TcT-|8!;0fmzmJ98pdSx13H?26#t+i^%D@l=xgN&2BeYzbU zx!T;t-EGL38)t+|dhqjc`nZ|B4MHSK)M@TPK6;REFvi{S7_-R$0*tpNzE=OADBSwZ258~Abzg<88 z6*lm|C9B#$aELCD_DJp9VFsZ^Hh)d_C*B+1GKdfC-ZSQbY4ox<#@v=h3>i!Mgo)h& z%$&W$)gjlYxmqGG+m>3_A__@2N&oJmI@Z{Aj3oja<_Fq5C(vQ#^2TNOteM35j4eY4 z$X@ar-@F-JK@oFbJT6qOfZJU4UKfLc@0HXkTOe%N)CK% z)b`EtZbgjM-ko;C-4&N5pP~_onu3LeX}1|s;O0v+3ae^mdH%gI_Id_+E&>;7xkJV8 zX|q*BP;T$qLSZv}fVC7m+vL0(mF90J-33r$CGa>3d%x(#yjik2lO&EHx1d6&n#b!9 zh6`4$5YlQ~XJ~SfiLVnR5K$(2*FuYD{AXZx>p}akk%Z=wK&jIv_Qya>qvtHqw}VxQ z>uYTo6%Zr$75%7&8AiR&d zeI{(**+JfA+Fz03ku()Cnpdz^Z!Wl)XF6h}LiUnw@TnTs{Ab*nbP({SZR8`B)W{5^ zQR;$f@_uC(vws!u#1ScK2{Zn~)9Ci4Vsa9MOFHCcr1835ncd0SI3=^E+MCBvk)?8t~HOFOBQ1VMfvpUt6z{MOmHaKtv_J`|1EAI;8t_vbyE&h&(#$_dmM#{|Mp?Hj$O)d*=qA zmdl8YSS4_M0q@x7Bn36fI0;w|_bu|+@?HUVa7;JJ2CZo$3t=bY|8DjXm%7wt8!1$( zqNIA&#aIl$gUA_cn$VPP$6IqVdr$|IlRF` z;%wwK*eF@I>Qj(q0PpkH*+TkSRTMCnzSVk8bpU0WV|$~yWkdf(+w%L)rDisf8S zJIf09m!4+A;V1i_tU=D3J&{&HZaoDqlD1!husGVVvRk^z)7Esc$4}2q8c)Jniv(sbXzw9)KQv{eJd!v-XtsYQvG*o92HeZht>HGgAY&%_qu z;?gXVe6A1orJVLQT7@2&rnYGCa(cxU)<+I)m3NzF;CjCsq8LTASp&nPWpCS>CCx?ogXAP|gmAC;8jt#be%@XI(xY876 zXK@2<#g_2~F86%TE|a_U0j?hkHw2-_yWio(LPAxb)hU}9W3YCsvl)>%E+cI7I>jG( z^0c}3FWxiPC)4;xdA{^z279U0o^VM`2IfM2j`qd~{UT`fzWr6Nq`1g^t7&t<<*J>;_kNWp~JAu*lJaLMaDGMK& z6>TSeJo6Y)WSOHTa%YWf907-m#Tcmuim-+|nb6XoGg{}QvH7P@dc^Stabm+WHe4X( z#zW$=X}DZO(Mqh&`itRk{r{~^hr`MTAl7DXVFt+#>cz^db#Bc$5M2ebslIArXWi)4 z=&7TvlTs|Km(VhsT?w(foF6+e^x^&ta}O-{c-j8>&#TP@u8Q6A{*2INv)GcZj9WLR zc~2vg%h?@Pasp51pisx%i;#7(J4<>@?A;|^l|FcS?&Tg0kg21-UpGqPGK+n0t@?Cx zY8Hqv#0${QLMtI(*3L8z?~6Y(fl76JV|80R(;)q1C5d%?V0pvQIH;Ks9sk4=%HDcM z+6ulAAS&E01+OX%sp(FGOPT9R{McNl5(deepPdTHPwLwD;H1ua2N5eZXP}})k<%RV5TV;Dd zZS>wBU@}det#icC^UV!~49{E|cMaL6FQ&p|T>5ST4@@}rQW-#4Xi%zx3i$Tg)A!~s z!67(`ow4Jty4``zVRX4ikgP03L-S2^;L}WzLe8C|FF_+><-u(X5iYXH0SQ9$#l@dO z%10zGP}Ewb(yR)+aNo3UEZnMr(_minhX2ox0lO8SltKUf;3RC8SOf@ z(j)|6V^wYIj)*bmteBK|l1|;S0l2oed#UT4iCD+wf#cbt)|oYHWov91a5}hcLQ_o2 z>e{L#yJ3+6V#GYoyynqE69Nq3edSJC&W`jf;(B%=+M5FZjqpW2e9FH{p%B3$P?vZX zWKfKv6}dff8+6mv)=>-NBGux6R$_(q>P?TQ`#BT?;tSi8LT3M zI%8a=Y#-guo`c{$l^Prcf6bf?uWZ3Za zK+Tl<`X8JMeEocd@#W5D)S>woJHrf)sfvwgjkY9ZLv13wvz32s8oaTXFkK1}_Ngry zY)(%(h;>cgHA?eQ4-D`N&*Y?F@QHH~#=j{&@mSF#kfOCxozM06!^>!>vxD27TRy&Z z%vBbsy5s?W!G#iXzm(|7eI%TyZ44{!Wr6Uo(6n)Fbc>Q0qQ<5)BSBm-bYV-zkD}JW zhkNoZk+=7RP&*ME0lvD>BjJ}p?UgVimCJGCXmW*@)JQc;LQ%c|9O^fde%ngqQh^zS zjf62qyQnsP9@!eGE3LgDJ!X3GO}rcNJDn*@GE@?HdNs2Jv>52PUo`Q#r+)QYf zRu#Y8eRy$4+L7%Vw3lxQ!gL-tYI}UiLhxz9@Q=L#g?II5)w_?a?cX z+5HVy&#G-yls}Z`Dfy8<^LL23pPe1F3Il+v8HF>yvA7Z0>Xm1k0?+>fr#n+6>Gs4* zYyaxw%HZ>J?SYC?9E-)IAp{h(}=EA|(Q?4&GB)J)7Re zNcg)31wu#226KLKI3?7@pmPA>AiVNZ{e6#iMp|Ju?KejSG+S~Jh(DM2M&wN!o@SDR zUIW+UfLUN)7j7q#`F^^2Lg%l#BKecF!!&WM(ez1@mDj_&DD3SJ4-aygAn|MT5k782 z;Mw=5PwO;aKL+(U{~N_OQ4Y@iliS|({s~=T(Ly$yG7)qBQCLOuQf7+oz16%)6Lf5x zmhbDuqo1Vxv{||dVTvg)bL;)tfhc!Hs|4Kr01SW(6Hu(2u>4(_uAwGm5-NVRvq=el zvYfeJHu~$CuL~PQ2s_Hju+9i>qD`u|8&&lqo7gcGD2poX!y?soe2OaZIYO6A#>p)_&$?AX-iQcB7g z>R*}zh@=8v+RjB!!b{SfD@zEqM0 zQr8f*PlqIYK7*EhoM-B)Xal_Pa7-d>4*E5o*Sl~5mzGF-Mb2avMFiu*3rI6O zYjIt_gYZ>W`lCEsM^d_fZaZ~@Sv2i4yKC85pzK;(KzQbh5*jhTY@7p9Yim}dT%kO{ zBT#n!8-m4s$tiAciwz|*!l%y^E9E+R`2%*hj|4hgZMaY92Ki6lLJgk2m%2kdr@lM! zzp{zOXwYF6`-$Iii>89R##H1IMFU^>8+>lUNVF-_ zaCB==l0$ayugwVaaRzngXWmtTD#nO}qX5l~RDQ=*jvm()pV{Ey*7LK{7r5pKkX5G( zS7qx8^B}IS1yt_Z3hK)h?)~M%;wzAV66+pd;164tKbqcxwetLj*8XXnO}IJ{9N-X! zorL7JsGmZ(hWd)G0nfNp5qbKcy0k{3l~}jl$l>gU#g-9ha0~cR5s&>+f?6c(_ol+W z%sZ$XC+c-L&u8LTQBOm5H8`{31&It`%IiO>P3-Z5sVsR^wa|xGoX-{E(MMv~vwHgk z*vl_CC9b<8dy2hpiZUFD#FsDkvTL5>t^=*CkHwLO4BP|FpiZH)c$K4iK2@XhdJ3}j z!0A)}WSg=CnkB0_o|ez@mTP)P!4NeTn6nC^>lYM1Tec&;Se){htLwICDX9|JB#S(Y zIsc^#+TS{tKbxt$!aH_=+UGFlg^32~yK9Fk(c;7mz()V?wU;zEw4}h}bJ)KZHa3+a z$1#_GWg#EEH@!nSwVJ!zR?7d8;f=a{_4^hY65i*D4qIGjkG;}Rd++iHvo_CpD$02?rP(sL!TNS- zHek{NIPn8#z+nAWz3&Xcj%x0b$vo&Hdnlpzc2sW#*3UqW5zh;YZ|C}{wFKz z+==Tv7~dpO;3c||-heqhJ)q!12j23Vkh47&`rtcQJ{ydAU6|2obIkpjRiMlh(;^!V zMSNq1p8_{sOzTFeGR&_m4$7wcI{u88te;rNIgAtan-HHu1_M<7G+)Ji3xd#*st=R5 zhbhgF+j-`1ht$h8c(Vh;7n)Vd7@FGGcMFfu@i;^^DsEN<*K^x%PldP~^_rmp3PLZd}j% z6)Q$#c94{~8|6ZpxeBj8$zuKeP9Dq$HxKybaXK47nlTF44>~n;a5|Ww-b&+1Qw7ib zL)WHUyKGZE!C`Rl*%}8In#zT&U4@~&06DWQ1$P;u50(cWk$s}&vlnAuggB2S%*^mZ z`(mT@caRdFwj#VPlLuA_Cta-ZwSdiR^recc-@-EwgICaBgx$_o6QMeaY$0G_(b%t- z+LMfl;*RPvuEXeHl=N8$cB<#5J@ZlHAn1mNKfkK@eeM-N^qES)-tFPRSBdk*lOxl1 zDYgGu^m(c6&081L>t&faWe&L2)F8al4vhALxf9is0vCXvyf|KGTy7ObVue!MWTQUdub=PwT(vHF$TBv)NcTkZMyrdDq*y zwGwlf*2|*T>|W=6IN!Pvn(hTC@N^>aAF~K><|i^61OK-C7uQQEHFl0vwe+d!lwu_?OWe(-l| zWw5!f7ET@16_%^g$K>F_5;6tek-Ro7oa><_SJ-=PUcK@)3@jaFC=VP3Yi}7+eRxb$VYG+Hfd;= z&izef=Jc{Me=@lFMne3Pm^!MIt!%StOZhcG0QWBts0Ffdm{<5pSNq`$Zx%do?QO<^ zpO0}xFYS=S9#rp<5nrCigjL~qqQ~Jq|I2t=l3vSK(j;i1mJsFSJ!WUbH3=iJ{v%v*8BoIqN>bR!&gYR;_DIMF=9?MK z(|2cMhw4e!YwZ}NsH>QE^xta`^#yGAivR20WfXCt4yKdOI_KOyWn8?f>QG>bdY4T< zc`a>1A+M`WWE*CZU{W~lUx)`&q~^-I+iqOK$30)S8J_`Kj9axgtWhoBpFL@CVG3$; zJeg%Z9=_ap>``X&Zsj;Nmsnw%YX0RiEBZNOxx9UhGb+80<(M<+3cYw^J@M9#8J%FS zlj4qV?77{)TqnJnq}`hAxEs>Pk<*~JWNbiA4Rc`b_N_U|t+cj5O+LyynpiTW*Ax~l za*1O|fH#-`70$cBE#|vq-FM ztoBS*LcB|ufHP`Rc|i5S-Fb^m>efNN^fS(N8l3FTXrl!&FA|?XOZab}7A2qW&^*^l zN*yBFlN^+0oA{%gi!>`obj=iDdbBktrRSA#BH&R$3Jn7*NB#xfO6MntXIzN*cOf$m zn^ybgKcMK>QzUR>7O~|B!yF}=A2IJk(MvT4E{MrxKXxP;;0&4z_DfxY&*ongzM)S_> z!b@*(2~B_KM8a#w7HP3p#@Z0y?6>qJ=HB<6w&=ODw&3=$_?(79b9#FOt}k;HQ*O@P zm8yZ!wgZ3j8LN?|{KqR}|6bVrKH)PYb>h16Y)O4-kO@`K7yK^H@{6PGv`3bc<9vD9 zS&6U<@{u3pDSk{|*$7U)i7t}8hhP_GJr(2R_Dwvwj=8hivt#e|S*C>GzLHT*)9`b+ zHr*+|LhARul)bU(2iygqe23vpN{8&HPL>)ls~*EMh*Xi7^?_(stlZny`W>=a3BvE8%kHGtW5^_6&;8u zypg@^p(s)TPD;tScVs4(TgF|U)&I9oP3vTWmiZ~E|d?;9$m#7d{|nM6wb6?-5JG zYeg*_-Q^XI!6bexuEu-<o)<(CVJ6XJjKbGI5n+@ zc}zw$GMiW(2qhvu^Gz^Kvh&UDz6A{JD~plR&@zx%WKQlc;qw$GX~iMT!zul@7aXB; z8W_HiL)?A%t2iq6P%R4}C}4= zemYoPLKM%)phmn%`){AVv+5q;t?U$4gZ999Q^1@y-jbq{`7fjUZ}25wBj%2+|BMIx zKAe9*z{7VL;nT=ZSCErQt~H!m)kNJmX@LQ^VfT$A0cfm(7r^*e;*T&8`+ndQ5}9LA%(KB^~`W6stTzq_io!QL&P!>@KlHEKbn>GhJ*+M?y2HgdnB z6Zz6vrOuN7Rd)}cm6-8gWxrP1a^2ze$}J2ZYP5DEHH7##saJN$#x=|Gi-vs5T>ZpD z^TXw{ww6rLPi)8g!0TAA;fo>!od2b4#W(WM5^w9L)p5nR62j#*iu0T-iNkC4e~B%4 zF(>{772-AN@Gc1&+-Q- zPW#j(ZIgtE`o%4tu}5*E*~#42&BNPa>4fE%QVk=RFn+(H+_PNBp%7F{^?{4WxpX0& z=)+6#Wjs^sT`V`ea0oHgL0c+h)jN8zH}hyn`*lx;EVij?XTH_~kT6F$S82@ZOe?0cxtP|II&q{;H zToGyCuDn&UpRd`cC$M}E#~YFEeyfHDPrvaFeO#Kzb&j`p%COXq@p~IS2z-p#@kW?U z3;1L~+eFpO;ck@c(F>4tdbN$Nm(vBX-` z@oW7IDNVJO%FCK$-E`H6(p=+ZY0vCE(A5oK1J!NFxYU4&+m=OuZC+o^XL4ccKItRE zP_=>EsofhTS<^9Twa)81x_KERJLAV=r>bH!$DhKpX>pU*7nXa`ptl2aHLCeRLm4HV zV*SKj*ME{rY6JXXXLD+|VkR8Z#u;$OZS{;}2qE9c;EI1gT@!Ofry_Ez1*K)@e+tc= z${JI7fy03{S4fg`f~# z*&4WCn0Q*nRQC^Le-$rdStiAEwSj)`$j=C`+wwLT^YwZqF}tpXOe|;Bko>Rwnk8Cu z=d=Vlr#B7Fzr9>mOuu7DD>jkp^|7YZm@mD}w~#t{2zXO5P7MPL*!#iFcKzu;_7i@% zlB6f?W?{;UvH=gVoiyx=QOS;mYlqhxt8ALTUAnCb4TQk$jOSmsO#6nKOX38cD{`|4 zJ6ojXm>x1Y<-6!QKIj%Nr1b;yr^H=Q#`Lw622p(UdJ;aTV$78`eVVkZR*U)*XE|&~ zf8rDBp9jelXBC?H)MNQOuz&PXntZONg3s2Oag69Lhr3&r(Lf1b?PD>)>4L@bPJz&*D?>+8l6FRK%4b@U|{{cNDo zRkgU4@Yn3zI8ssFGqn3Btw-DuoXm3>OU|<5$FJc%FeZW#y2oh&KW(FZ`wio+8ak=E zxrLK(aMZl;gedTpt5l#vt1obcx6@e0x@TP@s0RrH%X97z$ZqPaNiSYS3M=FLXhG}O zKFq>5N;ESU3lFw{3*ma7)aDDBDMN<0izpgK)Qu;U19T2GG-`;fZ)$~-@{1Qip8C_^$i43_(?%#dBwDXttW&9s?$jU$D z&jk?=5(qK#1*omg8qwZ;LD*Cehy#9dp}`^9(xkIAF5kdC4!G6cd1kmnyV6s?1&uS8Y&-i)Yi%KXL;@eP4%uQCm@Spo+C5PN?Z2CV^a9G&4 zXz?=u;P7BPTZ*a0JGxESs>q~Q@?)8Y-d1q1x%^%Uz<3sO^YSfTT}}vJqWNPAcMTpENT1&;j zl%iWCOJ5_ag>&0$KET&FDH=tccC;2Q({b0~qYfEs3B)vkL>M=Ms<&s^pd2UNJo zL27zF&a(4q5~t&hIp$@ewSU?a`p8_EY^K;jOHf!cH<|+Vyti4TPQj;4OXZ2`JpwL0 zh!N4$K^3L71&bZrE>cydr|a_vs=a2!&6zn1j>u-g{?xUu%K;t6-(hYToTe@LAbUhh zO0h4O@LAdoPs&SnO*-?eZCkr(XF-^nEgz$PzG&2TEXB*^ng!mu>6J zHQqHJZ8r;49?4vE*+;5Bu;4hh2<+K9mTb5CtS9l%xsWBScb19`!@sU585j$8Y;9`( zbM`ILJm}L{YnS4~GXE9tql3Q)l&%s8*2LrKXjw}D->a8s0rxb8T`;WSaVkgfp#5b{ z=v6P8x_mczfILK4S=syiz-#r&ZT%BcN#ME@Flm8tj-N>3VCgJ|A8{oWTO}U_tY>fW z53-Y){!+C42jSsZ(Z0rgzZw2pk$h&Nf1#zT`rZ}>=;d`@Cl`?^UQq=W9mbQm`%;qZ z$@NE=-=yVrL2@VL2GaFASe4PN7f|&_|8)M)_tP6)I)G8_W3mz2G_ceezxoSCu}BHNLKw0z$j) z;BS|2Pqh;={pBX?9SP|A=|XQ^&h8^zZ27+Vbs?CW&N;j7Q2oFh+K&;#TNc>=fqdg{ z%*F00nq^r4wXV1N<#&tGRUvA@?@-HsAw2}<>V;_=%DAu^o1*zsT-p3gJ5T8~o`sbg+oX=zak7pyx39SMoaL^|c z@VA@}Z(gS#Sic46j!Cqy9xUH=i7yXj+^ls$-%8`dLI24*UmguhHFMF=@j|VV#m~hE zTq&OO?@CsG<`1CIhOYs8jEhaqFT|qe$_(~CRtF7K_WDY;03{KIFAxjTYgio9v>W#) zxxzr;vo4o^&|br}h)Ty_7U4sdm4owcadWn8GsgLm`e{q+ZuxoU=00eQAb6<9#w_$(+m2F22bn03uRpLx{V$7UJ zk!WAb4xCG~d1?%Q!o?x2dO*vWv{yQ7a+7pRYK!ACHkQSytCzP~|5>&>&};OK-x>?g z`ofA8Xkod*cpom#g{a9&S`!5gQy55lnqO2PMK+v!`n>j{~GFYHvao)TB1@nR4L5XHzWBrV5Tl` z!S{Eqq$t8*m2tOA@U+8^uJ-vT*W?_Xr`iS8%*Y>7p(6b(q=e1XZhomwte`Ry>y-9f z!09Ko|Ws)JI*Y>H`(QNyWk;a14S91(YW6jZCnc2pjZfF@%pV&)v z$)zG2@*}6K7a*3Fa>3evVQkQQ*c_P6d<0Dn{QAJx^?TOM9)fISOT3)UCxEwNc%}e? zryH!cm)fF5${xbp7k%DRJc+1DmIhCnWaeD+Oqq%BsJkl9Y%b|+{RRVErp@sw7)V~* zV6+PVDYl#8tC0Y@OK%!b`U=<_(Y#WyO0 zp~HWGpCvBMd0TV{k1m}(B32#hTd)Q)Nffb`buXX;dJ1Bs)0dUBM66C=Bu}_zEjj;G z&2PU*MlO*4iFOe{__!~~$kI0MXFOmtIJy4l#-M8+@!K6%0Y&$kt@oz=UQ61z49+|u7Nu1D69A60fzlYRFT-PJwKNdF>mIW&*%5Ia0SYVGZveyyAz4MzpzHIdyui# zz%5q6df_7_Bix)kO=A{|h<~?~l30%{oa)?j3~!qJ;LA5tB*&P7%XHAhRMBR48MTO* z^y24JZdqK!%O?mxk9wJ z=Dp>=#Kiq6hgzC`NobNwub=0S!>F2fREru-yLHDJ_T*`gE7;DAr^oe5zBlPW+b{Gz z<7R;Jcx-_MKD|N6(;RT&5QJTZt(Ewlzib}@yQd6$Y~`X?eb(!9FRR94rHsf{*})$SV@wf;{|-F7eCCOaBK%(Moll(NQQEd z==SqOwpht+I9xMGeDN{ji!V{V<}uW%(X#bB@$a_ki#?el(k%r_+8k;>PtcvBmh3k* zPeY}m6Khqzf)o=OYiK*@!kupRT+NganO;Ht;X;mS|t53n1f;h$NmnH5XCAy<9g`ocTPe)gj~5!GFY` z{LU9;eLymeWjsmTy+5#JVlC6ehZ_l)#tZG`!Jad!va;{VtOdMa4(uDu{3D7`MaFMVRiUeEn46o zV$wSE!=8%TMpO&gWfZq4K7^vRqHyXo^fRYpdnE0z+D?3fwD0lBQXWm`8djgK>9ORd z{$S$q-phd4eN~n)nQzsWh(TPd`urje&MtgzAhk7^*ThA?-|4(zp5Dwmo6RO4Z6iQf zd-F-R)uqno345A0PQm5#Vxg~Ec;-!cEDHx48-U&K*-<>vjW&oC@o%h4Ow%-8;^aaM zqqjyF8COQ4BM)dFxvYE%2R~+h*Fp3ih$pkGb;72XHiPM#Cyz=6TKWRT916;Af&A&a zjFdHrnB;m@%(8*X*35e98!48Fa6dRj(u3GGE2a?*TvWz{C^z z7pH+jE!58jg|wq5O9ET?&N&d}{4QgeKX;SktqzbKjq?wrqc?zr&K>3gyp2DR;NL;? zeYT>wgyispRtlciH;k0XuDl0K1Plt#+5h&Pk4;-DkpYT1mJQtPb)S#+q8n%!sSm5d zhpDK}8J00FIz&rGPZ$Z5UzE=Ob5b%nuAPvvUjNkIyfYookSyYgCji^)`(|P`*AtD? zvlTA_v!<==L`W;k9$ES~rD8AIj>Q*}y+KVw1E~jNM}uF16YAim)|^)5A+M!ZjTh@L z{p}QdNyvm~6qLYerL|FBb#gSTa)hV_o>l8=EEeEPHt>-LiuNx3x6r#k=yh6oXPXY8 z4mQq9Z@j;_>0h*WuN5u6k}YP|jqNJz4S>|#nxwP2G{bGS&#wIt^J$Ad4;p&PROj@6 zk>vk}Xn!`d@$&i6`gqgd9hsXRp!)vT2xi2P}%g6;=z?Uu# zci^1w6rb^PfA%8GAg?72j2(OKCj1T@vLt`7^oy-2YW=UcO`=Em>Cjy`gT+TwfJ?jN z=2C;M#@_0yi6TOfZb!h^YbfHzvC-*+zGE}3%J27@a=?$aa~Wi|Ne?}%ZD+n=u%ds1ro;vKXRXO3 z=F@Ia^I1X!kn*5t;rWFYc)&+plVn(;qX!msE~P$xO@G^^_43D^ORA&lPr!Tq>esp2 z>iB2;%&&RX*u_jdF z(YT?lPFD0zv^iz5`D*}?`k~GdbIzhZ zpY$B4eM%kV2X=Oe70EwaY1`@xFg#s9)jCpx{A?VB_S1(C@aTUJyrYTFK(}U>bXN+I z1*VmcD;EV^ZgijDr6o1fdn}xBtqayaF*un2!9hhH`j@D^|M`bL;8`Maleoa#Pydtn zTL0qm<5?$Qj`-iy$tN_|j~K);_}7WRiei)%VHptvxCwa82-1&cm5j`A-(_k0D}F%LQm2;LVt`gET)X3a{(ISta!Gczqj80cFxo|edYP`Kz07~-1eSvZKGW| zFILw?iwNO*SNVBS$z?^daIwAq@ljin=e)fm|62vh*^5tDWwF`C?Spt66QsQ;&b9UX zyB}!<^*kGH&p&rN=;ae*GNYc7v#|7UxmbtI|0A#z$5k#VAu_9KtyWr|QnZj4W9r=! zW~ru_Tjqk+!R>9Les*12uYh}Uhbk4CK4O+hpPME{DWvx)Gy`&4Clc{N)+r)#!b33) zgQ;SjvoAQRW2Ed*uJbYz+6@e?z24W(f_?aj*LM}cQHVcXsKTCe!`8o>i77(I`?-$0 zTYY5Md7<)A95kgN-r^c`>eH+rm$ck<#zjB^y`Xa6ZHRAj4h!8{6T1G!LJ8uN_beR< zO4C9pzI0wQE9hOTMf(!Ss%Sga{m;*lj;?(d>g zvDY;t9*rCWzAgyfw>pkcl0CUQICQR&!~e_~a|aLDlD1FFd6CO*_U zu*Q?hiwDnV6xW@sOP9a?0I|+HPA(1sR#Pde?>z_$g6V%fx(*rLG;maE8j{ZRR4OA6 zQL$d_9L5$BkHvk+XmjC}M7EWQQn_5!tnjc%fKb>JeI+>4I*z5=d*lKj(>5w zsx@>-&00=(x}#9P1MYkg=5&dQowPKUO+RK=Xvam|^c%Z_>Vd$r|2RjLe^+Ad#(r*_ zS_!L}kDVG|X67%_yq>lsHLMF?ET_f40x0ZqoZW0nW^1Y}#65ez^d4En>9I+(&1?5^ zKH>Q4!`EE@nEqiI{B?5#dtdqb#ZMz0_Ae79Z62zbJdpc((sP-2dBZX-lO=sr_xKQG3)DRjt%6TASE=#$Hhr zt*zAFimDlVZ;8EYOG*$sW{?oa@6LIg$N4?}=lsvz+{ul+^ZC5q*Xwy*7qope@V!;@ zR_74+%&Es{p4jrG=4MKAaez`c$8piO{T%}ssrkf5W)XO$`(;Zwf=rO5pDZv6XZgxK zU(fwEpO8D^%h^+BaoU!21kZFUKjWG&>28KG5Qmj2{-8`6xm6Eek{8`C|9LJvuwp9! z-jl}Hj7A7L^X2)hOgtXZEqpb$n=m8hb38HqCz&k^Z4$O5R1#M*UV=&PHs4j%gP?5 zf5FKcpvTCxjls^%)u?h@3gz$#!z7GVQ0Z@Kr$$BiKzb+8napy~+3m#PxEhn5@XutC zpg0~2|5~=|cw#c!%wYxPmW1rO;nUnpg;?Lj6Qxp(9u9&?iYxfL7;SYWUCwCORw_B`UQJMYX#t619sUAK|3m~cpz(}ol*n~vqZ z@hKjF4BY5FGRrX1xF;qZ#~wAAnqKxo#(uV1`f(@=?Dh%jg`pfXT1P4MW=# zhBfEIiB=Y$diUa&GNP4Zohhf6&ZJH%1g#ev`1fk%m{qwHyAe2u{#TitksLoZhGlX~j3|~SY zrN>kDOFS)zde8mvd54^GOrYqA`Px8rolG2=FVi!J;yn|2_oGrhFLL1&!E$8xZ8GcK zu5soorGJ$P8o3>|VA@vokFYtXR~Zo!hTp+o7sFcrjfhqU58z69&WLj@o|F|L05 zKWs{hPM|bwcsutImG!TF2(s)IDIvhbsp6lw>A6pyI|#4cA*%0{4jz``qny7_wlxvd z3_}ZpdyLzM>-;}2?_jDQQu8MBy=IQDta!|;WYCnX-0Y%ykzW(CI7$+c#dO65i_NV8ajW!vAX~p~mqD*1`wSK+CXnjVw%62@AgIA#q73u z!|#H+(O5t~p3RaSD}Iu0dW+R#??LlYg3(*zvr4T?X$YNx&w9&}ibESE;~(m_tUcBX zjKrve%BhSsm5Vh4SKI^a7{p=An9a4DsCp-%rU#v1!~R!c{{<^b1UmP!2MFvznzgsT z^z*-&o)(h;rV;vo%*LDRG~DrigPH`7Fs9GCIbD{QZzy9cwVP<&k>6}AH-Woyjz8p% ztp|ewG#t8HN@2~IH`<5>MTNG?H~`arCwbF6La(3Z3L?M34R<0uZ6oRwdGdvKp(G@1f&#nIL8Kvy!OTE;bbQY)Y^yn-WnDaar(Vp zG<74n_^{jQMIjY!qu~cFTaYXGcI|m!I#lPvs!Og0s2bI3r-Z8>>&z^}m%N$0ObM@R zJYS;28$5-pCws-{eg7W3+e)M?Hi};?q;z41)%5<}RIg{^s(Zwi4!au8^V(a|XA|~--`Ft7u`cPSfg^EbED#AIf<{>w| zkm{dgrQg(H`Xa;<+LxI~_~2zWz?$1Jk}Dy-yBu=1djObde0Wa(HRmqQ`TFq?#AB_L zQtovVl($c(JqoF7NT3TLZ+U`CqZS?mA&2xVG>jK#GTY+a!p3uT!UpFHy5|FvenUBRFrhswz*xCbJx56 zdX7=N-*8L+A+3fZsCB|}&2%4=R9wt{w&n8zleg%tsm?cjcex7-H*&(2QkZUFIIg^^l2Z{q8n(LtZtCzkeZw@~tiEBWd=dOTx zQ)`@x7bYeTUJUHtV!kk@U%MVSri-cUz5rgcj!078ygm!Op1|%r2RRLJ^ZbCB#Bi__ z8oZs!1ylYyPeRbTk^=;pIVQ3=3K`+A0us_xf zrPE}n$Hz1dj%0L3EVXPBj24^Q{tmT@6J&IFT;#mfa7vMq?b#67)|_>U#ax-qZ%8$v z`jkJtpqrz2r+~YS2cfH!aizjN`t?ZteRP|#4o%Ni|C2(CACpS{rNY>gY*_H~J)Q+8 z#$wrQ65+9ANZLJAr^Yw%u@6mfaKj;cXivi0*6N2Yj=tSn9gPx zWpTuQu1}9*B$-SmezU8q0;OKT6%I02+tqiM>SP{hs7ARUwkFEZDp|{Zq=npRfr*H!y7rj zUAfIDqZ0=lCM@y{*GO4+ZxwN`Ftf>>Ah4`I4w8lsVvQT3GirlzYRhz~?w*&)8tZ|P zYIxbzoXT8@f98uH?}d;_Sv&BF_&TbyYWTl^#$3pbO_}~}nsqMbS$8mdE!po@HK#lI zhgp>XU?7XyUUXb0X? z5jjcdPmW0_kuR6ZopJuP;2(?bQR(7DzLmCb?nRPzM?T~2qkNgOW$oNJTHlxGqQD^6 zzp-M2BdRbfaDlHUTb%UNvfNk=W8ZXz_niE{x5xGZFJlTkW*$W`gAUIL!Xs6TPhIS; zb&C`P^`Dkb#2)>Pn|vxx`Ai?>5dCnMae-m17TC`uRa9Nbl*CTZ=E4~t=}T~DH-UG! zaS(D~%&Cu2u}6yAZc{Vtzz*s4zfi`h?J=2^uQ`A4xr{i;55A7C zHhg3!$Q1;{ucm8XDbK31aL*)c!Ox=)mG2skNuvcakTYSW1 zl<&kmvBTB*xoolLL=F2;XslyTFtwCR<=AT0@Rl$0M7?5gwXODE{*3Vsn`;jgMVpxSD*7(e9Di=T$Y%IlzZe-MwnnhY8JwgTHm)yVm&_&{J1Bp=Xlhf z2{*VLZ9CzF0&`z6Cl}7~`BEzTSgC;s4XYDw>z{|SL~{sEL-uI@sH?Ql(1MPR#H#z& zn_ch(^iv)mn-rmiPSkm4+_}_Vsb&h)5d*F#yh}d4m3I8lz*`jEOs+PfNN_*6 zZ9D5=q!LrO_$CQuFlv~Jh9eYWnsk!$-1_q}ukToqMsa?Cj^7Qg=&94lO~ymtUmHk& zzjpY1hPx16Q)$wB>RsO=JMKBevXM!FqqVZjEcWcYcRUOgSc_>70WLDDJsqM#{!L%K z>p1JlFA8+6nXlZA(ldyZ?cdnA;XVjXn;81`Q3pvnt?=@TFTzl`YN*0c<_|TLp~v0{|C&x*t=@Ok8Jc?#o(tuhQm)_v#+}eS6+b}`AlC= zE@ignT>hzkas4cNiv8HT%4myOZ)o$qpStO%;VEV_Z?jU3DCk$u&&a9jvQDzW3=nn) zXTc<;3%YnQu=B7LqmWf=J2t!3_JMrOL$$g~op2(G-(;6BAP%C|(H&@v;jB`r)p)zv6>JT<&{ZbXGx_HioGb;J85W%>Cjw+A z-f>tTz4)zy9CXU_Ld8J|zym8R%806YB*K*Ouz85--wfci4 zg?^+#KW?^j%=HZTYsI$G_U8~;p9!wH^8a)q{RA7s-A)G-S|h>beFwC>dvSSsNDj4H z71=)g*M!`k2Q72{Pe*zqOxxCK*W);luVN%@Or!a_#nZ}hNLb#8p7U#sBKa+@NrV(8K=4-eaX-llZ{;{&Y)WX{6cF`(}Hs z92zjuBmknGO|o>nkorC*i-v3%ANfSKGj|`9k|+nkr5&n+MrJ$pU|URm>Vj9D4}H;C z7&7>~(OYl%O1wgsT!znjXW8XM0iMtumvgxl{fWkf?7FGq{FFCnTHG*aBq2k|3jrgR zbi7)7+XUj$A;^y^jb|dqmdKesC}|hxp?1D~C5Yoz|D?ev%3MINxeK5RWxSdull{;l zZhE;w-g>iPk~7#MWwP<5M+kJ`^`ECsc07IS4nXQ+4=(;jW2;F2FMzM*z1d;?@}ATP z+HM^#lg#3_gnqDx_!J!(#{M)dnyLTle*OJav*JkLe2tE6wP5MeZtY9sHCCm=`z(>3 z9{coB{zU;DHmy%9$<^YL@L!SKAhy%v&bMdS#pYGTkt?^z_7|}iq?V$3txr`Vb5m<;>(0!J&O%-b2L-CR#>KX#?A=}aBF9ibv0S5jq9+j-z?7H(uIb;t%l$lT{k{gg zxH4*hN1+)8qdHtAIj#4-C(dk+=|fiCxfRdutqEBId)nV*uX~Wx^xm=9l}y85AyuW< zgI%MA8o`9%tHy<|^*mnhLfRbHS9&>;;cqHVk=j%t-b-@ob3>gF+LRK?$igTz`F z<}%X38~4Rz=a&ipQ70Yse9`O%+)SjqHfU1={ZnZ&^VoR#gYGR zM{wM#)|~Z&9KuKwaZ;;2#lW=aRd&_nJ&OBS{X8d_oU*j--90yST$1?iNRfK-&e!DG zf;qrD58j=@54bQb7Rfjp>{43nMOAX;76%PZETHPvTHuXmhbSN_IgiH66^T4{`1bP7+N(zdH&plTJ#Ud;`9xe ztG0pacA~J#5l1r0{b7>c6BC_n%3zD&^I(QqR$f4yU~`a2_iKS{R=RXAAzwYa7MOt8 zOq>i0kM8`X;JStI=^9fIpf4h*yeO4U(}dMN$6~g4=m}}o8STChdXeuU*l$NQqwb`cS?8cl2 z!%hVuYj@-UYgxnCUfXl8ID9DS98eYL8(=3mwW#8@PnK~^M&5xqzCv<_OtTt- zKou63>u6)$%M+aQ;5!!ab_C>(`ro}s6kWa$|57G&bQ-I?ZSSwRm$LeJpyn^`~X&7UoS6+LlKZp6LW5?3j(57}p_`edcPssM!G4SK&1R(~eoW z3}o&t9R*J3fBWDySsmp*EL2TOCQq7l3B^pF#^9YoJ-yYKPm0;#liw`s&4EVyk#d_p zq_f}aWVDDu3p^{FygYw2#xu6A1ao-6B(;^S!4)xVbO-KmSDYM^yI@&iG@vyMq9Zqa zdXH3Jfj!evd{QAIZH%j~SI^|k^5?u`3bJQmV_8RuAY(u%Fi~S>Yj#S31*A}&QDiyA zdXHa+cmKQ2W+G2`vP*wJYw~K=v9{fhg*&K`?K<&qE!R`#d6acaf`von_n0%EqJXG3 z>1@;8v6B6s@Yjc0^<+;=sp5j%XLf7DdM`$DE*LS^|13Y%{Y>ccM*hUjPk{Hv-j9Gz zbX&<^GjBH7)A{9-D~xQl!ElPc%H~DRlH93T;3U@Y-R+7pw3qa!x#%TV^HAP21JS|E zx}dGg@ISTcINzCDb58x@v{4i1toJBr#iy4|^l5aiXEMc7i)yZ9GX)+M0_B@JTxpsH z{qOl*D+6C$i;DuQ$k&Ch)EAG=<3gnXp;w^+ZXfB<&;Z+g?O8V^gXQ|`eVlGB7Zb=F`Pe=Eunx6-Ctc+c{xT5<7FiHFx zh_Fwr^!Y-&KYv5gj&8((!Y`r5hgcEXbb%D4cW(WY+K4YAXNIDO(g+cZhtp`?v7$Zm z*hs;*ws{=C5=ZnyiFtvnd@{N#8b438oo~8drgq#<-}}}XK2RI0Qk6zGkfD^J zPjfwwh*1VDs||I%=EbB`$jUVI_<3i0Fz`BVKo^}RX;VYgXR(Om-$7dFyMPp-o#&GZ zJ&W&9b(<)B!}MI-ZAE)Pw#FosKSG*?TB&JnjBD-zfo|Y~32*R}MO%>7Qf*Pno;H2Wufp(*Z*jZ&lY~4HC9Ov_QSJ}k=>mO$- zWSoB;?&h3MBxtKM3E`otWyF|}X2J7qD zq)OSh@|8sU3!evaX zlr>3R!uO|l*VHs`g6XLFDa^0Ucg52g76)E~1Ix~s^SDt7X6L|dVsUN?4ql`e%E)EX znsIJ%;To0Gyy3b9exMJcjw%@zuwfS2rF5eCK~lD|%|5#vPKK=Kl^x zla8e42&x=cp^?7R$^}qu8a3pCzoZ&2-=mDewwcPM4R*#lNGA2!R5dC*#lLA6S3LaE z0LjUn8)+uEYCF>^tfC%8xMs77H7!BAQW95_^c0gU`gV^)?loD*x}R?=XmHub`QE$A zyAtOn#mxMBp+{eNvB0f(lw7a2%xEC@h@iLZnkdJ)*GCk@xW8wv=@R4k)|wwAStttc zcj;GOs|y|z-vB;h8bmVQ5%^o<7?-ET%q?ec^d|}1Ml*|)cS{MX%&0U4LFo27ysjM7 z<(&3oxSM}d(F`BxDh4QmgvZ+3{hrj{_nqGL?;t6S)fm}|;?i*{)?(rN)?}QlXA|$j zenE;wCLgd&H7}g&jGSGhM|-4MP}tJpEsZpTEc+}6lt(T7SOw!}zpNgnxfsO4Vezq= z&H`P^IqIkNSRM&%5pm;=TI??nRW z!Iu7iI%wS5`$0ePvcN|_ujZZgRMGT1MRgJNn|I*cy~tlNDRX}7u}mP)Osb5VNy$j1 z?dtiz7br-EC{ZiGbf*8rIsYmiyn1*K!oX@Pcx)jL?Vo=`c#wY8J}#Pbbc=sQs!F$Q zJ93Z;6r+~!D>(upe?j;+wbSNwy?E)K=hiGIJodg_5UJvLu@I?O^Qr$=$e8K%@bsL2 z^h7vFSs2fPc#MYf=|ZQUN@ZRvOx?}9*#iW)pt;2}_OvTO7l1 z&@9Vt$Cg(ZdczZ&i3v^yJ98GeJ(?_@cbPuZ7xrpUjf4;CI=6_j4=5U5zW0~4_s=q zzmZ%!PUfBbSxwzxDzrf*HUTt&Z$(&WeK?yLxnj1|yN--hU)nHdaC35^;%qdf$39dn z_&mJ8UBsC)0*tTu4lRX`M5@8-7mfD8dfUUE`)|iW1O(%r0agfp?n_(mvlUQ7r=J}8 z>}`kn1fBTG9a0<{Yh>24kYhHCp)VS!_OZu7-L-^s;~kA|f*XTw&aZyC+XQm@t;~;` zRp=`1nX$ks$u^5dA#cCNbd&e$N^1r(@!8-C%jiwQZyrJs++GY-s~w3A3c89aKcjef ze0&F=8r`MC&8)B%9hSDyXE~dhv$X*pe;hqQBXWOPC@1GnaslmNoc^)Upi=p1DQ7d; zPVEhgGnOzN>&cE4n|=IoEV$>rvEe%bK4dO0zU)Xi-DhU_iQm|`bFDfaw6C31{Br}h z@Ww8%XNjs>r-jWy#zzAdJ45=w=_J#58y%wLSm`O(zLM4wVDhGU_^NRX^V?il$xqKz=? zy*=WO!MLm%pS{-2V?dh6n_066JB{LlBg5Dt8>By%K=zO8M_!acu(`9X3oy=K-WWFb zr$%_J8hvil9tSwluE=c(X(L8$Hd)$C?Qcj0Ikanh8JY6sSjRYLMK2BMacdZjpcJk| zQtMB%f!9{Ss@TVtr8yXwr-Xl53E8suvhB9$#gjBlL*4#f-cpi)5@gBYhMhId7AS@z zX?#kyrpkNf|9X`S8Ls+*Ao>|{$Fj*Hd+x8d9CSr5MC7cNhR|TU`ROdf%md_1ZJ|{C ztV69v_lm2d}WBr%ssB)^c}8-SW`17^>W z&&X)chwa)UeC)<}jzFfY)R6+BSI!$8%I6T4Zc&Dkbw&J^cC^*EAWpgcta-8)$w01) zwpVFBk0T)VqGq-MF=u6v3we#YE`A;+NfBVFPWMV1gjT zOVvZJ7rK+dCf_bF)dJXa3E({Z6|}W*3HI$c%MNo;%TjFhnRN;3K6^`Din+7@ZGR}{ z=v?5(-~s7P@;f%Hh+4(%{&(-Mm+2#cx_MKbltI?LrrjXp3k6C0mzImvfs1A5 zqnf*qV)(|s3vBa|9-)6ZzM_10&QEL|bx(e4n_~!Po8?1+%cBC#zp>gMyK%0s3C+;R z3GW;czrZS{uX}B>0v45V^WDLR?`^NOBxqX@wWtVo4Zwm^@ai8nkJoz=qh0kA#FC^Q zyPp-%&^4Fk$UsEr*}JW);d9P$(ag3KCof+|rrmV-Cy%}k^xtpGCOw!}vvz%Z+DPWe zA1o9D*`-wePsE7=ecxD(b@j!IgWeO$m=*m0INxOVw10ErJ6M$Snzw6xQzY)N-&A77 zZ88wtkyj;hOfhSx;gT41@C|->7cr5r+)ZXpY{<`X$f}d_+HG>-GOrO(+;&JRW;dR6 zbGZI6p&1pK?-EloOTF9yQ1fuur`#iBi92|YSIXP^`dRMSrovn4h-pEjzVl3cgKCYZ z73N%xoNoJaOqv%If~67f^@m2qJ^;3~$Hp2B9x?FpQ}~hZaIp@dB;}|*t_&?pJ^4wA zbJ(c9><4`Pt>*vYe$y1c4G7f!dbF7qP}8Wt=pqc#YLbY1>b~pukm>g#nzP}^=f$+F zLVzo>v|~?Z++D()S7A|9TQr;~i;Y$t?(@ zcL!`<97Gn&>*(P-GVj(}T_X=z^!wFe?Njpfb%NtWln+?fq1fM*G>Fnr%bJ2qiE_t?-d97uCCJf)- zO`jsT$yHmB28Q;H!79`_rT>n*k)N1BBVn_#bZ5@|eFy=i{n68JS+vAoCTQrOk|o@3 z#k*XN$3i!CEF_n{c5aauTkzF*ES}$Vw@AV*`EhxQbqd)QL3F*c>$o_GZbZDeidEc>)V@3@ zm$7LSvm?c6_VKC#60{^iwQ&mgqdO>W&fa>=FM`evX?q-Yq6X|QoHzc%r9{=*BhGR&R6NVbEb z2+B1WqaAr08QdK@{Ska>twU{=MwJnzwOv9Tg__#=x&8(G?PNpBz zDF508Eu6isFlsXD-m!5K`271{bHABd^dDiGt)W0)4@-^FL_2{{Y`#bcJ<0;s;BNAM z^Yf<&Elre#uF;X1Cwox$)7G#;-U$AM((G&uH%2G^6d7Bx*^=Y~DUpP;w_?&SKp!aa zWnv6cLVB&Ai|WnArzuY^G}52YFE)|uzn;PQ%Jq>&2JXoIN-EV^D+=@&Jlhj)CU8d_ z-r|U}q<9P5(B`V_dR(S~;LxebUR=!H!*)_EGmZ}W0XLO4P!@BDvOduYZQ#~F>rY(@ zFg3Xwu*b%a?##$o*FXN`(!m4#lRTU=Z1FQX&AgZk^JRki%0MX@&%{1kTV#Z>A?epJ z+X6y@cC{En*#*c4)jfi0l6nTwN-@m(4TpMLnfqS@gzWv{q@xez1YIj&X1kDVfo_~$ zbq(ZeUWF?5AG2v8CO=s51yLCx>f|orAEX~N&Qs(6C9CT)Z&SK3DNzsiwrWhXE8{Tu z!V&SNgmaVYsQ8lk#Uh6V7<42#HB$(GE=s#3X=6#j&w1Q%!1NCDMM6At`Gw<{bm>t* zS;L4>M7IpN*oR{l70Kz(m*xF(t?lJ`RlYhKtZ({~uf>bK0T?!o>VSVWRNDQ(*IIs+ zqc@vY-s1riMW>Jx>efYBYoQV24PKL6gZK-!`SplKR2KUBZl%+k*drkMi=T4hGzO!PH;zg7}et|eNi{2%3J>Avqy6eS}b!^)GkI%(^4>Zcjk^q zYQde(P9;J-9eubE@K!XVzsL&k`MEL&GY(S#+Uy} z{2~USJLidLxEjyKa#6pq{NV<(0Ajc&0uA=e-69f_I8pO894xH(uXP&2BX3BCWVUNRuh zFJGWmt0K#k{l;K$RP_tYe%Z1D;5no}KoMOKfs4utZ9}i1r(;ZVc6}3OC>YX|>r^a) zX-Z*-n{@r=-`XB~RV%mKW(3b^`m;i9;6kxD$+G9BLGVje>(1)2wENTaaUS3hN_ zct!qb$Jo(cFGFWE=B)S7zqVwsLQ9}v7!&7ysN-WW=}lx&1@~1rP_!7F^U2V}7B7Vk zGYM^vVPh*IzH#nndnJcim-vuCZ5bh(^P+-w?z84as6VmtOvIYGmbIJ&T<Wt!WGscV&;ql^6U0AQ2y#XT$UL(v%}a)()Xs>2pjuyP}J zM)$s8R0?0FvGA5bhh=&agR2=F3WN$E9S9N2sQH&J9mtMy;LG!DPUag+SfOuf7`?~o zMcn#v35-p7mgh`a@%lsC2k1OOx+1}(YM-sar0vkY_3LC2s9)U{Ca6%%+5Do2W>)Gu z*(2wbU}Q1q#D7kKnQ+p2q#JwsE~C{f=kM>$JyOdDezqOHgU`Gqws7QT2VK>lL@OCo zB8Jw(o95vOv@=fYjB8O?MmTBr5IW5jbcBv2JW#{_)KiW_{BCJay-MrqbPm%EmC9a| z@+Pn3&X&hn3k>d@J*$_#=x?<1|0J&az3dxV63?azuSH)etjf7Fz#~ctY`Ko zt6t#iL!RV{b1fE(AnF!zB0)rtDCIH*IhBgMSA++ka=>SN(=S4zpv$ zzRugyOEg(~|Ax14=9cQy47-OhvjYdrz7CoV3!W$6RIw?IL34|xh%+)WAeCxNo8vaq zW`WJ^;esBV4e8$x(9b5rJQ-8k@&=4%%e--P`|tPrWi%IU`cnL4Y|-Egx^j5 z`((<9)P4-xyTJ@CL8n1u1^s%&OX=v!$Tahro;Vy(!&kDyx#HGK>tS(4#nF47>@E*) znnq0kwBVmiiv3&j9j=43AFOwN44q0(F6Ht~5ma5P&K;&`9d63Nf=PUy$hE5gpRM&7 zcI}wKo^F_5sSE&XoK2nXc38*kM(t%ZOQ-lm~Tc6_KA(7Mse9w0pVL+r~ns>JHW$-$+UBuxWLw*$qss@=Xv z&8TyqFk5`g+tR4D>3^5WQS!(4Qvb>cS zCiOJxt{%$DIp!6tKsB&0bG<8AF6msDPSYu?sB2Y$FOazH->}(AQuh-M9J=yu+?6$& z?K>kAk9`l!DQ^&KP>!z*E=N6Z(r#zUiQ-_77(YT8NcMw3K!nBP7lQN7C(R6q6omin zC@x*9b!Hbn(Q@;^BBND4*$G8llirgbS85N`T7;0XYa@%VOr!xUJ6HG>STO_r?0ILM zbu1O%R6@W3SXli2fhO2{jq?y(OMRS|=jxESdaWy??g94vwW=Jm+{oqJI$74ST=4(u zfB3I}(ZAPplmVmu$y;<&nftK5=Zv# z66g!3ukGONtG$l5kiY|5-8WyMDB&a zv}?8d(ds{%*3f<7jr)CHCfhMsv7vwLG-^3t19ysCHjU(SSg5Xn-3$e0(g{)gZsu|YQtb`LhEwi4dGID(49K~pb73Yc<=@T+4!YK%gQ zb6DmDo_j%-h2=MUk{hg56iw;O(Wcm%+57ijxq)9o^uEDXXGR$RVF*s|Nk2p5bO$3U zBW`Xa?@U6RG3PK?;7;<3>{sp&%A>QC{8jg~C6`fk-tO=D1d8es4Ymu(Eh4~Ldqa;cL)}(2MM8uDZ%em{n++-r85<~zt z9i6Z*@pQIo8)zm0JX!T8FUkLiQvo4?iF(RLKP3LDFE9pXV^lH`|6oouERt8lcf}nl zD@{<`CW+>x*S%i%4olx;)biuz^HUrlgtupH8^~R*ckVR@7wq1BUo0xG&mWZG=4|_n z1)~2ftX!CLx6kH3L?7RBjFK;a195NT?5`gp7?so}EBV^zV1{m?-%}pUPAl^E2c$Wf zz4Ej79evM7uC6FnwfBv&I4rl|yao{(qTcJBY|(?w$ChT2{A1;Dx&4hp^{RM@_f`w@ zYZ(LTb#szySKvyUpUwtXkG?~W=YjZ?f3T8>Yga6~wO}ycEl0F)1TtTOf@0wd3?RW zAuQjXQ0wc9z5fL}zU$Y)>vn%C8Zr)$M%tqQ#Wrr6qH7xUox2LBo%SwlQv^+tW(wp^ zP3VH{_(T02-x2vjA&-H}K6z8J%HL+S$M4~xS>3yJ8u*LqOCv)+x-r;49cHhd5%Uy~uR57_+-6$cA>%&QsV4lB-GL9X$HO9v3^SfnhrJyEl>2*eUyK~{xrv7e zVnJn0DZG$gD$id5Ak0j3UV&{6ASXz|JBw{^w%?EMqcbup;9cAD}eqy$&TZYCqW{<2bQj z>b@hjpf>-RB zl)Ar6dhy58W&US%ea&!zV~A#x*W*n3Y)iVYGY2J2j3 znzDP#e07TjTn=}!l{@%LO}0kj&nkF3rssn0Mo$5|qyQ9Kc{ORQ*U2(%VG#bb*7@uM zC-U;pK9?va!qvvP``GxwYA<@$nGQSGaM!{&?aP@(Ep{jXPFUz}m^y3@(l3h)%qQF) z9CC9@BzndDf2Mx_pHzbXk`0U~FL&H>C9n8!VR$v|X7lT!pjWlX?iO2(HWu8)5GbU} z@vOTX@9Ia6vI%OcxuUDn9R0or7pNM$ertL#+mZppu5}nh?!1$kc^)Cad1<^dMTxpB zC@}&Zd9+i4_bs8Qam!x1VNc-p*TB@p<~)L>mTECy`yUGr{Gro*%)UfB(5TD&$o`wpDBw%0`%k5H$&tJN^tRWx<=6w`UBIlKSucKXEbzrkO|T(a{Kayg^SC z1)%Etx@hMpXBfxoYeABnPpQq-b4y<=bzz|A8?JeFFQ)eRb`^wM*j~GNJGr5Ux|RT_ zk$^Xj2~S+u1OIVSSL7FbJ}`yJj7E^fI;#1&m>U*G*g~i@`me;WGN>`>QCk7|1+=_O8P1KVOJQM0`s0c5z}0Jm z)a7f279{nR`Wy@5f#2gA{%NJ%f?l}s@X_S?xNr6+vw=A4mFp*c;aU`Aaj1BA;cHHi zbM^e&%&nUoYA!p9pKzxgA89dHOC?r5nFaArqnl(=lNuR*DoyEJiq%<1#mkntEWPsy zEWBTNQ52~Qqa@p})I30A zIP_t_Uq32v8}fy? zP@pg_#GWVU+nmebX|hQ#q}rnX47}d;_ADVuJ{IK@*#0Dl4aU(J-mxB#ZcqB*yRV^7V|e9L6knT)J@16TBrl=cv5nVZ^}nKCol z2Qn=XBL$N-v04vTC8a*rzUN2sehWUd$wq}*4SjFWVz&msAVp>CWyGg zG_7VqWr~VRzUBssnF;|a(`b?lXey$BFe56m7%3tOsDE$%ckjjfUjEN{Z=Q?a<@Y@2 zd!FZe&hz_R+Jxysc*c`B&Q76!HM2Jx{&E$Ax;@qcg>F)Y_Bb~_Joo4p-U{sR)bJr( zdJZaFGkb4yq^2zVMdod$Mf7j}-K0+42!Pbl+ArO?9g*{JF`6$=d-MVkcT*?ouZlv| z;P3s(_ZA0jcXXb5=D}=qP(=TkbC{B;E(X+yh=m@l87QRCjz$#H#17| zItkdPn_EoV`hcr%VMofMJN3ScIQHpDT+ru&saRC!YpmHh;?1iKm}hbm^^V}1nOmT+ z9m}T_fi=ngQ{s`WBTavE;3q;^iWM^UcPvGpS@+?}51*qC{;22uS77Sm%~|3S_;>n& zBfcdy+VijTX0zNEJ_78dF5vmF2fFKJ-w5{Uexa6eaGAZ z1C+@)fPQVi!xo2l;;m|4_qK(h@2;1x8%YLiixc{HUx~(_c8C=#+!zCcZw{(=$UXzK z5Z~fRt0iu4fKSX79mk~*<59rQ<)0mvw$q=0|(3I;L92<9}PClDD!6vhvd^ASglW7X|T~E&=Y=njE8Cu zn?8CLuOaLhA&Xh{_-PQSmSd$Z1Z^gDHwO*S%3dV%4=qEZk7nE*!?#a{1U{(+Xt&M{ z39{C|eV}w-#t?1Rte07Q^FXf1*SlL9f!Skh;h+uP^=0PcCn?={d7H9{t4u?;q}n9z zykS7=|I#I~opmh_(jxb+GBlL#KW=N>eqmwIpZ>&W3l}d*-+XNr4?V%H5U-Pv_D+NQ zHBD_d3YP$W{ABggv5)F{-Q#Yk{hPMvp8M|KE%%Q6!25FcGExKs$JxIYh+3!xL$>rO3oNUlXOUiGq69wLPs$liA68+Y;u>2BV%c zuD{IwbgqMK3yOjkUCW{ep#(ESmuz@+ zd`FkL2LYDb>=R)~KNr2KQWImZNpr>o`z5@R3lxDa^$+RC8;w>=NO~&lAo;63sTbei ze+Ia4?^$~F^3yUXe%ft>{xuCQuA=DN{pZ?Hxv42FDq!X*L0d6;t{lH0gb7G6+qdW} zU{MZ6ecgfK5;WbFDnPR0e_$g0T}g@3`LRzKyPK8fZ-J(6K9KO~80@iKq?g==T_(1_ zcF!;TV`SA|nc7GU8n>Fb4_iUdh4U2)H`m)NntB3Tnu-IDkGN3xoE^?jNTgBFnRmYK zOpG?70%ocTfvEcWGfKl5-z3$*Ci=0Ca#dLkYk-xo7r)+6Zgw2-CMv``Fw*=|EK+lbo@q`-os{FW-iA~IJYaoMIj%id)X64Zh0*;O zyH+OZNeW_7Bk&mlYc}Ufcj}myH+~Ihki5xeHL1)4R&L{Li-m~kXg8>gXC>_)o0B$Y zhcWL!jPIs1#!K*$D%koZw*%;j$qT^_YCzCLX4@LNJ0f63^b>UTHYW}(tUGikkKiKe zZOiuPeAZXj4Oq)ae!&aOVJyKxx$)J@=M(06pP>>;3KLn*cI)NKYZnYQeyMA4XpyJ- zG2gq{n-Yp9b+oB1dY;*2Y7+O5neX$1tCUqn!&UyuGq(Va3SiU+F4~&y1u=R>d>=7p zl*>pe7<*grYH{lZwgws$U5@EU)KF!X*@(O^e%V;uObxIT$LQ;spYNWSO`o7@XAj1mz;6Rb78W0-X0`_-Cz zjser9X2PaihdAUNGjZNtYD?Z+d8sf2f}}^__2VL7s_2|{MKIV1Yj-y*SP>(au$VqV zt7^@MuJLr{TvF&ugZeT#I_{3BXtI$^2)Ln~oV?sfthPkddFb8QH5;Ph=Kp<7xi&g`!Ug#MQ6TO2}rJZl4`HbjH*@AHWJL5+b|@yh#!{*Ugt7`9BPc#QLY@m##FJg4f10A6u{TSauOEFMKtiXT zh8*;l%YKI2tKOU{S3kf-elvx2v|E@9!zP=d#M0rZCLF2K6Qlsvw`ZnbqgeAcU;2*FW6{8wFf!rQ zycM+gIsVkl$5=3{#knV6@v42dRLc#j{;Q5-jnu6r_jSWtA{)|biUyPdQbFwdF%gdt zKc&F*e;KXx8Lt(F2za_r(>xS*XY|9W(Xi7<8m?%cKWwwDcWrQ?y2ONifCX_^1q7Kz z1gP(?f?gEkAF%{ifB6ZY;1J}-A`b+G4-zg3lxv(au>3zNrSU zg8R|_C=e@WBwO3SBGn!;V(nK9^%pak`Rx3wg&DhX7aJ*2MF(vztOY6O?@JUn_^JbF zYgY3vUu*}biMWk9#NMaG7|lmyw~9*s!F`NNTA;7w?A@1q-~OK!-EH^8y@ys; z7kic%mK`-Vbb$fcDmhK&!sLe`FHD=Y^1S|jb#D4%$BhV3*ss%nz^DHN1{}}Q>AC*G zz_W*4Xl_*CXun2et2^kzbo(0`KB*tfS2#&uIecghU$|ZX+2xyi=6hLl=esPgPw?lx zn}O1^(NOjW3u8YP*l}<_sX-_F*ejlUk7ZulYki%oKm6)u>;|O?kP4}46ezwq=5 z@1(=!(E2IVc*KVIaJ${j-qlA`i)A0)=mNr~*AY&3Mw*IKX`>T8(Gawmp}b`O?j_jo z@4PSwPqhDV_3?GmJF_{?2Pfz#ty~@Ti@L2VcKz4j!+xd3aB?Ym@T3u;zsuc@;KUuB z>KPxKG7^{~jj#lnHwST)%!hQm5?$FH*{Y42=yqzZUf)sy&NRF~<%(7At(^LaKP{*U zy)_?;O~C?*(J3}{~rfw7(JcPG+82Jn?h(oIe77>zz8?P?{xy3vnIi!xirz;UY`rfySD{s&8Wt z!HN+YzS!#c*BU3zh6SHmYVZ&NfPik$X4SIDjp}`^U~aY)m)+}lyiRn49;r}P0p-3c z0zvOY0|^^MiI+B_AO3F|&q%7qW(6JnRD-W8~ zn$4y{)qPqkdXlET%OYXfeSfZuM1aZf zLZ>Rq=0CRdG7Ex#O69kH60>dvAwy-tal{U@V!s6e`91{ zxbkPwx4<*zLJ$F171eiJb5!*t!wO0X$P_+hi?bLk`CD2&&}HKlHmy zuZxvEh|+QnWLmGAsga2FHkl0Qm^d!K*SjZ!C{)jYG6-@d=X@9mmo&Lp*X;mvu0s{) z6C6&o;pB0Y()&i6>5uPPL>o?bs9XV+e9y_N#FpT6&C$7?D8b*}`!nJ-e`V_o~60^X;L~h{2E3wK# z(tO#C^*F;M=#(d=y`2*rxtCckFlX%Y1nT7gnPDhZvLQ&ychC3{>j?5x#=dRrJ6jl! zbo8^sr)M-V${=l=3>SoHR9(|a*75_q#9&8`t-f)fpBysUlQoD+WX%~C6r}ot zIKe;OXVFp;V>}G&Y>z=l7Y>-Xa@QHKXX6o>2u``Qz|E`{UA6iaz1yEQIp9e&?z598 z^XuqY)!(*(8{L;$?K01R@+@k#z+7s*&$1d?WTv z5nCi&d%|l?cU}rLOp-@GM$R#1Nyp!hduUS&o?L;t?MTQp^2~kBU&kYiNg5|Ut)xP7 zju4ncYQFGquXz7z6}-34JG^>X|7(`yc^_m8u;i7kCEyR%0=cQO3|E~%cOLZ%*v!!rPQtbOnghfB$ zC1>n}ZtvQv$2f8j!1sOL(rgPEF7oLMKaB(j@E9+Tt!0_QK=HeF=Ia~bg17Z)N^yU` zc)b=E+iaC!&09tz+QwxSUB{*k)yb`GPA&5U3H2KNTAhpyyJW)oC_z)yvXn@gqV#1} zOhicRc?DLOz*|^Sdx916m^BHZP<1x+mUg9)NLc%ARGm!c#uAcZg@{ zs|;38n-K8u^RHkoHSDMX`W6eqE_7{Ne7PU?@uEUUpS#p#{hSA@Xp{~X^U7)qRI5Cq zB31!DzRDmqoY>(ZO*NkOnh$a#%%w*|MWah;k!Qh_Ur12?%G8ci1s?iwjFfK93KeHb zIsnx^h>181s=iw$7)>?w{7;gDf7Mq1ef#&5m;Y+@|5&SgY@2WEtJnUg?`^*BcmE4T C@(9iV literal 0 HcmV?d00001 diff --git a/nipype/testing/data/tpm_02.nii.gz b/nipype/testing/data/tpm_02.nii.gz new file mode 100644 index 0000000000000000000000000000000000000000..aa7adc9bbc92b5a89e67eb5498e2076059a81d9d GIT binary patch literal 139761 zcmeFZS5%Yhzx}(`vY;ZMq7(&UK@cgSw@{W1RN7BaIz+liCsawuQWT_%^u9!jNC^<> zkcdD+kF*2`5PIkV0;GpOdyM}$w|k6z_T4#G&*eMb`po(L%=rT1&zyk@MqB)Kvz-@e|Du_Oeip8F<)Yx_-_M(G|4key=$-%k&hz@WZ!!{&N_`|k z>d$95Q5_!DgE0O}V`pT9hdn!t?(g3J{!i(Pi>G(*pL66nd81ro$p5)w`NvC1VNafo zaS5SS;fIbY*=bVwPXwVOxS*~6DKC9AiCXDV)$XB-Yx6{0e=L4YXN@!(g`az|g0z|k z@1A0v;vt@Z`P}ngh~15e5=+JLNw@mgg~xKx18{^j72jCEtyVkQ!>jQWzQMN&n5qG$ z^NcwUvg2CA&&cf-_PpvRo{uKR<7%a`-)_O@wp0${kA@ry?&|#;(f5z?zvu6q`2Uyx zk6wbGH(ma{B`)QM_I%mRWq6bdWv`BQwY$yzz zIOg;6&H?J~E8CoiBjussMzAl!M!C7Ms@2EYd7L7#;n})tRDK4SHB)Bq6|rIs+rG=R zCz@!?Ha(T3cc1WRPFtYSNnCrkj$IXxfr zn^o|wN&=W5;$zgxTk%l%o}D)P^*I{`T`Vm%*yt#&p5`OyXEXjuS3xEWLV|sW37%h@ z=p1S2*zY*&lDq$Cr$~d+He+vbwEviLP_|iqxYf`4V6Jr|d?Q=EtEHbAZS(v5#(COj0f?Ye8uVwW!5ZX)y*MGt8 zE6RLo=>C*hOvG&sKD3g3K7VHr{pI_Jm;1k$yg-eUdSa|%JtB`bh+xoy4RyL@#{?!M z$xA}br23z46$%D3wrg&ypmeH;h1K9ij={PuPW&L zB5Nr(m^g`wPXhn8^wK9L`@M>oTXi9>;sOUP$_GM=GYMpg^BVTS|H*xM3Ml5vW02bW zCDj`Mzxz_%daJFXoo4DqT9lVf+-lh+|3n8hD)FpUh2KdRHOpDt$eoHHy;!BjU*kOA z`XGee00CxY3TAW6gg4;G)_bD| zQ-F@yHuGzHAt3T;u;1XlCrqX@mg%}z)(I3#7k_1gQdaTx;eH9G|0p{om1>}tdIspj z{pv|=1J}@nwpqv%$$)c3>Dh3QDrvW)lVCcoo)A1Pk5gykG7JiGbK0hE2+->OUDtNT}A~zZ>yaGCuiR-uh z*yg)-6u9s%hit9Y`6bz4&x41VVWQp^l(`EISu3fU|NS`7ZDr|He%UQjKxme@X4sH& zNFy-WdC(pTEllKaM;DVD9bbIB;`s56qekS*%Rq8JEo{qw)SAFGp;V$HZ5+5op3I@P za7wA+wbxa!3sg=`CidRK6PZ{z0^WO%_pYx9M~D71TRmh3t24Ye)73Z79t)Cao$k7) z?|0RG-PemjkYq~IPH6G#&T4MR!dAj_1{=)BWAD=ukJRc*f5uZ_3+ik63p4%0eYH|0cy+Bn#_yq4kX>?CNlFa>u zDEZ1IZ#-I})z)mSYldSZ5B)y^SLnutJZ&g(!VDy=85utsqMXSrFc;fVdzQe}$Z(mF>H$5XN zH|iT^OXrV9&%*B23J1NoWQ0*={?ygM_LD&Cj`JZwZvrVLA+vR^bvM4ZiulJR z*tO2$FY{N`<5DNzhCamcx2uuNMm#z=17#HJN6oVn>M-WYTS57i+UlTv9jPt(Qe_n` ze>_M$@>bnJZ8h%$ChPH0tqeIg>L;V#*>%m|Ey!dy`$N0O-nO;)BJNYr=+2jm{HYqi zwy)$1J5fuNOIbB9(+ds97Sf{MK?UXmjBmgstc2|R?5WgMq zr+qi{RW~=;-|D?mWfj%9vr`RjB&~+3R+uLQ+HK*rF}5$9c@5NfIM?J9NgX?~)0joy z1o?X{Z4|_P^t-wWviJ8vaDJxLah}W5&b{lriX;F=CYu|(VdJa7s{^19(t{qY){F^9 z7S3?#4Zl9rgmYV+M?iCZFH@|+diUG(|Ej{F;{Ns^Q*(Vcj_PAKxS2NH*7pA7c%QtX zmHDzq0R}D({YCU*hT+~)sMu8-^H0E}{54s3R|Ux$T-l@autgVL*Nt^5;XGqu%5248#yKZdPEU$f0@3n68$fFkRiM|bdd zBzANJNBhh2PQuk|k(XF@wk=SeJlnBgGDO&m ztGWb8<2d7Z&ld@>eBCQN1}By`*3Onv&y+AwP33^Q7GeS!R*2VB6I(A&tcsPqny^C}#?@|^$HUSGgo;y)!y ztp5ZOyB-#M3}vp{R)-H69a%m%OTzICjYK>ix{%@|mA$NUn_jM=TBllH$~}R>u%>>y zc#_h+9un%n_8Cu~b2#R@=~S1`9*6$a)QVB3UzAaE!#*=ddfUwzN-t^? z+1&DFc;(_nEhD2^ui8Hk^S9s4jRj>%X1*oq7e{Kfr;^ESB}?lzBJi`f1j3S=rY3!5 zLWU9a!WiSiXr9Ae6K9nLgpm>nvTehA+Dm3}Dbb1lF9SuV)!Uo}+r zXMxC^z`8H`Z=xrB$uswUvWbI(qRGOC;E*}hc)#z5)vncKH>YY3$W8Hr^ttdS`%fno zgTsidqS`)JhpQZNS6F{UsIrE`MAM*(t7L@c*lWo1-+rgOJY`-TzdA$@+~!Ze%--5= z>+LJ{pG2;P8y2;49t~$D%uwt^Li?9pZ4xw7niaqvg;m{1E4qXJ-r-HW@c)h)ounG}#`hY2|!PnSroVqQ9o9rDClmR;m7~ zs&(Ltz(CG@g~Ckq#vL$9?ZC?V{GxpJve!DI0oKsCGIfS!O`!Z5pqC8NgfPXi83GF0 z1MeCY;O`E=b^4mjs})Cyw>TH2n+qCM5Ae_*GU5{HoAOJnbf#IoviNMsQU(_xlCS@A zmtZ_Ay2dGwKA@b%HDm}|x2iLmzfOY;UCrvWUg@c#7&7@$9z|}X=-O(&X9_uJ>2_Jm z|0NCa(o9JaUoUB!iY{}_La=5@0S3?Pe;Lze*z2T;14b%eW?JG=;abZFe;ralP|AQ> zDzH)O?p0)_wQlM-1KwbEg z8x_!p2C-|)P4VOW@3#F8(-N~^Q#h}!uU=EwN|P;VXFOWpc8`po#;j0^A}i$ulB(~< zNq^6_Z!J;+UEt38Tkr$0lBgpykM`=~p@P4C0$+)+@}F29X$JZs96qmawoN)a=5rV7 zhK6{30MfYFxjn>g<9t115K%+>++xuP^+vH82eEVg^++EkNb6$*A#!kv_OM@92(-JU zGTB>u6j?ZE6|lJ#W7y_M^UXv^q6q9&=11@RL9Dr{Wv%}3Be`5^w-}Zuw7BzASXeLX zx=-&xVE)Tuc6Be}Hg+*EQc(qy(Yar>!PS;e?y;Jkbx4c2xy{?N9=3DPQ|p)Ei6t+D zlL@w)ZkX2Z7yociaoCsKW0e5Q;CWj(zSJWhrluUWm{NC?c!}TqL!@qDmUyLH%0C|sx}XY>HU_6C|s_O)>N?o1N(f%@7#AOsHp@~eNPMm1lheQ7$?py8TlBK;U@ViUU z3!6$Sy5+<*+N|at4~P&(3-_4N(Vn10cXZ&RkUW(={COAg%M>&3a7^>ygq$YhpF)_P zRN*~k6}?wvnVBssB11G(seH*C6tek|`D-5pti1K$7SPCh4XyMU06Xj65myC*TggQ@ zpdj77?fFt=y=Na$sSCEeez-0q;&DuQ*Wpy#phXHl$%pJW*WKtc>J1_W?*Wx?V!Q&q zo098Q%^F(6dk$uVdp}jW*Z19IFwJ%*`m?HSawE&7^HiUidJ7HP0x4Tm&9~VSHw-Pl z=8T0b4HhuW`Z`?gXMh_!Xnyf%x<@U(L;N6|Jn$PN+;1>5BQcmm4O}eD%%#1-`woNM zp|Rr;4qQ@(8%Dr%v+(F)YOjab@|KOoQS`^c?jj0iv)vM&8%;Izz8cA_GrU^&Ax|us zxTwLOCp0hH{X-j;&=Fzc$|@lXbGecI_(bsQ-DMD7c=@K17W6A;Xvj-2S)Gk7S|d!p z{kg6{pvrk6WL2ayVL%^d!(ivgjD*#n~Iqdpef_O%_u>n*t7z>Gpx2z zFL+2u&NL)Fkvv|Ni_4?Xa*>y4l2MuD1RICvrF*^Y!X@?~n9l)F>zlJ!MsvD^`Tv4b z`&aIS*E-N9V}n7jgJQ)Wc~N)_RZJ2mPh854AziF;bIjkiQT1v%!wal{GkGNJH%)Ti8CHb zFS`coX!aWWez{ND-rd0^FfxEfP^W-=mbO9JLGHQ>Fyo7uWm4=crDZY6b7M6w$)|RU5=mbEK<`7TFO*j#52GK{8mr77BnxKh&jy+mAyq^f>$5pe6b&`M9d!co zB(``hlaPa2S}O}xu$%TZ7KJ8)rDqL>oIx3oNCjR1kZeSoQ5)}uuE>{J4|M}$ z^Ol!(Ia@(C;_lU7;ZFhShtO%URK0ED+p#JIXJSkDCfRex?UGgkT&9j^S3eJnL7re; z8FV?Bnug*7*G6*g&L>AI1i#v7E7{Lv(BsKApDPv+j`|PuNXeWftbPOikuM=MKMy4! zw{^{&L|_P<)6?~Z>+SUE=C#Ly_k0M1s8xrj7+Kp1RVY-|kpVHAk72ITlu|V|QGa=e z308u^{q{j-?B2GYz4rmYtxOd6l*aPs-srSiie$)r{Tq!jtM*~V64+OrG=QN!hZ1oH zvDidT7QS*|xAahN-*i(FC!#<)e~?e=7I=bwf2(%Bwz{mOS{dR=)e#L$M4k=hTLcBf zM%+iL%uvNWbevYwecP_Ir$k_I@?G^WPu&89IgtWiQonRp30&P=TwxyTr?X8EA1qmT z#4_Zi>KhaobTG5jvP<3M_O^S(3?z5ZMZbFN?EML(lWgFyKoib^Nh%-7bZl8894Enh zc4o+}H-Yq9b?V?p>6a7_$-{F@N4feAEv(wVaZpMm)-K+(?FHW9gaj|-6$TT-1CQoki?dT?`wuS8%$K1`}3SSO5_Q=J73ntC@ zt$LPmM5xoOhfuHH3zo-XS%FVLb-BKR)1I(KRsNd~Yn`SD7YB}1On-#&Y!2Z{K+UI` zIJq`l5ah1brIV?Ah8o90)H&P%70USiv2*9w=FevjZn31I1kGyS6nfJWv_hBnI)+D$ zw{s}g0L9_G##QmMwp)pI`A1d5^1|{e4C}8tccp0(SJoDXqvc>3p?`<)r;F2l<{he2 zsia>IlBP8G&J)eBk?OB^v)j;keoV`(6K8sEmFIJ^dS&wFhtPIvav)*by3pv63vY{) z-b%r}>4V9)9Xn(DGL~=+0m?^s7z|8uYA&8jZ|S1lQQ2&VT2I_L8`4{h4H5ON7=2hUWsj^N*Hg|51@x?^ix|W6RxAjilAgQFWhTN^T-B4eD_61w6;ttRFNs5up+ zYxt(q>Od9ER0s(jQK()}7%qJ>1ga0sp;UhjpB}Qa&hzN}g0q9+$Ltl z(16!ZN-Y29TiaW-^y$*q8mif(?CCYzYB-;OjBO)0sj#Fub(2}s0(HJu#iF|D>QTkx zkw~gYt0Pq^wc>r-T>-tFzr8%j$sv7zdWCz~l6q~oT~`J{4g#C~N+e>X*CPWwptZj6 z9x*JNE!lI>DDIc+Ut`JALt>sS+%_VF_33T>nujj#Q3$iFnshsSLyH3zW{48U8*2m5?IFMl>HyuUa`1NnN#mpR@bg{|T`-RmR@aV$BL>4NKRZ zao6WLVZGFpobR(>$kvk!=*VlW0oE{!eZCScnJV-WA8(A`v3XZ}(Lk}r&E2z1In@jW z(O8A(#5OILQB*QDm@Yn=Jl`b_Y3P9&SO3xf;blgEmjYh9)ysLHhu?IqbzfsxmRJ;j zEpaQ;SzSvYe&y!Ng{$Vp|avC!!ml=n^%jsvXyK&y0~ovL?1;2RfYL+f!_0ulOqU4l(TqYZ&~rS~JCs zYm_N_dnjIzB}DUn>#+A$aQ>11Z{dYqnKRQ6>j4j#@4%2%9Cn9+wdN+74(W@EU+Ks0 zUKbJBIv0-bGUzao0UUM=)qAa*Jz=;^&=TO}H?g&D5nni*SIBc%J32O(E&8x>{{SRiZhweYzh?(GKA9VYg-qr(b zO2HAmET#kq@vUE9ck+(-xP-h6kDyp>sRW`HM=sOZN<4ssYOQF8 zf85-L2Fh64+U?r6oqcD!FO2>|KrkXpj zkdT+{-PV~$KKqViOX+uGzk;o;lX3e7l)pd8sYI^*plJ*gLeWDLZ5?#k^|H=X;UA8J z5O^`~(z4(85O6N!XS>Cjzf!Ns4)EuRbRQ9nB`zM~y^$OUHc1?hnOI&LHL&6tji1a`eEo6t7RR(d$7Dr37;waUQVe=C(yXKuyuH;HtZL@{7=DA zSXfv8LnEoFZC|iN(!QAq@(Ol`%|ZW{XHs7DFs7DlwezJMKWdog!;Oy#Ssa)BJkVrK zv8U(IGGbCy$G#>T+n@9d>xS9l80%GkqRnmRfBm72US7dP;twBeKjqi)kDN~18~Pfg z(2>>dx;J+wv%|kQJ2YF2WoS2()Yw)!x%^j%=C&eLbj5eTtv0t%7!#`D$M>#C3k_{cNq^?XOO$=&`!N=_rJB7DgP#k>a&Uz|exix>nA{qb!nB zK`g+N{&-J27paPo4{53}&(`sjIRgFV9m}}3^;qKd5!8%xF$FL=9x+fqyS`s`b3nrY zoq6FTBk5Y@`JrkD-`UA&mxZ?EtPPi;tW2udP0Be<09x@ZH-id!m*Cb^etctaUIz zyD>wO6kJL9SAMm^1aIt^yCBNKhaZ9=50n}!j#123#BKKY;ne%Vdp|yr#0U7?7!_=t z_S|T{JdMH^j#qWR|Gx+u=$+tFrX_agL0inG!A`8X0MMaPV zy=+4#1Obhsym0VVX_V;io3e9#q@l7=R+FTwBGH}O6wK(} zs)@`FZrf|6yp;FJ46Z3FCHsl1i0!n-{Q~XnrdS8VXKKe}6r%nOf7P`vI&i7+X7EoQ zU%b~>m)P2U98!LR=z)LYhWlv3RDPSTspPssTYF^Sf%BEufaP$L3S1K#UuK|;p4e6L zN^hG^$yk~GU{6r_(z{n1EkDY~D&F9o=rFR6)Y6TzsIdSqJUtl$=zrUOfoPhbb6eYx zU`_yXvaWzJPkoPymU%;Tf&&l%`X~MF4ZY}*i^NihCA-r-N$npCPVo||)nVgogD?^P z`sf`^!4-x`=eGI=rCEUO=&g5Cx<%ybr2ewE(wRta%})^XKyg6i1FNLBw7Jbc&&HZ9 zFj<@RrWOc_4kl@t?q3UQ&bXATf)YCcmVFvdiJEMNZX27auPpJgMu8}o{D`F?=#c7Y2GO+CA=}@C$Dc9mo_@omx8iwu zZmQs~BsZNhz+n}BwJ8sL#?8-Up|Z%QlDgIUmxlNgH1lHucecJ?d|jCB|f`@(cgv4vrR{4g6VmQN@g?$ZYH? zoo~EtS%zqYTvnX&IIF@P2|cCX;&Hy9Eu^_SWcT^6l>*VLYv3KNrzdFwvu~EpdaJni z)?TNF1YT=8N2%WJRn{9OMR*yUpM@2%`DeDodGC^kcC#1bynQ^kU&yd4vH^Bz7j{jI znn8Q};fG>kkA`6 z@3pJ}op-gEmx|2|JU8>CXiq1yVBb7@muypGn6X#vS{=POV{@-EC(->2(<{qgE{YAj zSe{84z|BNn*omxCK$kz}33PDl5A*GX_P3|TC8rdgn4~)%J5s^Vy z1Oh(U91s0_zgoDtfv{3_MAV0;^p1*=Q^;D=62WN`?wD8MfO%+3gn%T~nk~^JN|Lgb$QRa) z*D=KoVczqmq;M!Xs7!vugj@D`_X?HW;Dq@fQM%9g9Uy~7yBzOD7N!Md^Fllx(^c98 zoO{Oq>?qb0B+DkgW$s&B>%;T)mqIk}kVz!=DRM&=G6lAHP5fQ?A~h!^T>F~rYYSxA|BvksiR z#Z(wiO*UHi=n6x?ha7}Y(sCzG*#HuJE-(0PzHuv0*6p7PYGBkf;5is~w_v(!Rjop6 zkO`tE7G+%)JLK*CEEa|g5pq|g}u zG+$ahv!2*<*Gu;irDuqec?-RB{q}*I-P7i6GR7mjsq=Yi@3Tecp8P?GV|8oazBHQ- zOhk3B$bRrF4O!pAnIwjAAaSe?5Qi5a6*ww9tmFkAj+t}bs_2o&HWm16-e1xPzS({; z>L$#t`b)8Vq~PRaD42+PF=Tf``-Q{QJ*6eqI6R;?gmUx689roNM#!6@Pf@MM){!K+o7L-8%v@# zy^&l(*QF07%%KFpE5b^AkVU=U4{JBH9!;m(GhIZWS>y0x`iXGp($UR{P*s$|#t z_=tt4$~O{F!xuv|?H?k2 z1r+BtmR+FP2b`YUs0&9)`o#(;Cq4LyoQU46b0L$Cpy03LTfci|p6th0a_t8uOKKvV ztqX}MI#kb@kl92)e<-_k>u9clkga|m^>RPaU>oL!QyJSJdzg zM(KolEh+^1Bqz_KCn>oE@UOI{v)TdZ;sZJhKmI))`Rj*0f2*|3LG=*-NfWFG|DA!04ia+SLb`i@UOVIq(ZV#$ zRpDTrU9_W9D7lp{DLJncEOpC65zAF}@6J%fb&p(@A0bvRdqaQAQ?5*$)+e)DkExl& z^4;BJVDXkKuC|;II-6%4!!yZ#x;(iAHp&lehc>`2!@hEIFCk{mX zZeiD_gtF43j)s1An^Hty+u<(gO|?wd(rKUO0*%|**BBIqZd~NVlABFF z7aZ^OYH79Wnl>m`c~L&3u-4n?2qm)zYMTB-sjtEStP&Nu7~VX6qQI3)s~<%Uv(lUD z`@GMhMm$bq>@fgQNJ@OOCF)jC{mc@+|AT2R* zqBenCkzxn79n;DCP}fu(fZQ(^uxF%5R%6lHj#jjRd_{N-+;k zTs&-Ho2T;bilri#(^Cc|a>J?TvOUgawuK^T8jIOG7a%%T4_gECk+rnLgZ#Ob%;V$& zA!nnR#&Zj~-|1~a!RUF9GmF0`To^VyFe z7R-OT1P0PlS!oS6y|D1c&gr-8JKMAO@=hQW%PwWtG;h@d?$i+i_bLa1E=THjEODXZ zG6PPPGYbTV$PztJ0eotj%0~U`55^*qF@aT5Yx3t1Wa38#I4cvXO3C_t~w+m;~mhqDAlJ_ zIP{kLr0&pe3MwyhCMyXsjrBL@pbLeVxd()3zgkEwY`8uGRgkZv08Z}griCSJ)auQE zV6`xfq=9Nq^VqB^TbIhe>eGz60V?QHX$7h>`-(2>y5oo zjDTheFXJbl#M2n*2CtnOAeUvqTF4>iB3^Zg8CK5onSoe%HYxf8q?=rQtQ zG0(0dVJp@&e6Et>ej7B(dxfyfV0~U~Dpdq}%ZM3A<)>7x6f6sZwNu zguMiR4ZVR^W0(2+rz3em{M{cB*JuCM8{rz4ELP?vq=Opv>NPUhLbE~an}EPn^xK2x zmj-&VPAS@H3lv~M%Lv9>EQNzuTHhy{aN`A@hYo46ONLF?y8#7f^{;ESa!lC7`lcn=AZp#r5mHlAs;p9jmC9>Pqm~&7W_d zq#9x|5&cZB6(9=L(a~fI|3k&64>~$Q4@`P%6i9jOEv$9FCXw;eJLoFIqe&`hKrG%_ zpX3zS^Zr4E;hVpr=1$3KXJqkpY;V{)%TUDyB(W_D#&UxPlHHLdfd1fBJGKa|e72BP z<+;sE?zzY2d*eS*DR4I~Jcuc~-T5n@?mg9+6f;PXyTy9G742U)zj}ec=Fjb0+6v=Z zTBYA(GBa33_IH0Zz7!r9v(0z(2=L%BsL)4TmGyVLtW&@@ z-O@Dsp`f8&vhp2TlJbG8h|PAip)wkfeJ@m+c-AaCdcNZ?-~z$EGTsk!sYH zt@jLzjs2>FnX2vO&0;q1o~57K<(oK7U~(1|R1m_xCZdTB)F=&9KSvz?n28dBJJN0@Bog6~6XlWewW}Q|@vUpc0z$&@T z>;{B1g7cYC;U1Q63P1etW9}W(UYtx$>hH3WWOHWNhZBmOXG^G~yl2O&dq?b$jWBg* z)F<&Ksqn8Ice!U<)cixw0EY< zYBuV;Wmrd9t0sBv)yTZf;C4Q79}=;CrtWj$X-9ZSYq2VssCGv4L72opJqDC)!^?h5S@of6LX`vDq_BQ*~3~IwKt<6ci8)evN2xASa@U9@Z5-SXr9haHiVJi+E=GgzIx1Q}zD>+#$;Vs;lJRlu5R2nKsw&h>vOb zqB$ixv@g=VEA7ebzfBb0&5yi28-g zDB{03c&jIMCtlt%6-n!f%@Dr4H!SC)4btgdFQCyU>gevZva|4xFKy(bPlG7|?&k`7 z0x%8sX0Q}ampNtj7=O17KVom&{A^ojV=hElxgxKQu&LfN5o zx4p4qg9H*bq~oa!s!s<}A35aQ zgd9ylc6-Av0X8~2Ie#Suh8xpkSkIraGbr0brL^(wZT^C3ftl>xzhA(OI@fGA&x@`S z3`lrHZK2F+&er70$SPy;k-CQ-%4~o0cc3ifsWAEo?5#$#|+Z2ycdH(q18X!rj9 zby`NA(q=07U0_323XpB9{Y$WaW5;1Nz=#F)5G9naXDkbW&-yYPUtnyRB|D0tGGYc( z9Ui?+uV&L9?X?W+ltki*MFY|uM719V``b%7D(=b<^*rl+fcmz4{xW(Mdtsvu()~dgb{=$ z-$J<^aw-%5?7zqB*6i~^jU;!1Hqulh;^&P++6|BUYWRAoxXka_(2dmU!rbzF-3T2= zo|_vtt&_E9_i`?6j&3|FJg`(jIFOJ%OWXPRoWY7~SGV50c6HOOL^tFFGwLw$)KoqM z(#y$OUwcgzLsQq)ki(6**7jpw-^vUlSK zN=lL^8PbhIL;}DoO?va{!S%4>c~PO!Q);*;ml`{BH1YmXpl#`KjLcRhR<`B3={okq zgbYAK87bh1zu&J7O?twABJ8405gb31lNGvwwgwGMd7>RszT4hLWwYK2On&0kvH|Mj z4=PtURE^uWlb$Z%hc5!EoqQxwws_PiDYn%gdh$;wBdjY)>$K(eZFPkMevq$8IwxmT zN$-@Xz4t%yorVCh7~ZG&ZI^iHw|F+!HZ(;P9&OUe+p@>{XDmh8vES~s*Z7F?R!c-m z?!$c>Q-h$VY+zWqbnr5BA_RKAE+Ho zYX|SDn8(?tt=`|fOgb59gWD!h8+l={D*<*BT^U@L)X@DdMmM`9&@Y z{Y!RKw_o_<(Ay8K(2wJWK(Sk;(OF8OWlhm-=LO}#k4JFBx58kI5}3Y(3d-sw898J~ zw|e2*%Dv6=s9~*NUir+?zR~v7Bm;Vl_F<;TD|Y6k!aLIEjaM7$kn_IZ`VZFbsF#Ng z-6j0|Frf*V?)xG~_&f?()9EK+8>`FVQHf_S1EltboxPJkk4AqDn3m%lx6z(Z!bfIO z*QYG|$z<}zyIs9i;u3nq)B5Y`?G+tjQ0sO?^(Hn<80{AW7BiSRMDobG?~{ih|g4SwTWs7c)}Uki5|#}X|#=) z*ZXmUYd4GVFPzQ66*pzYR_$u)-ppKBIVFJe*a`DiHhzt z)BTnTzjNxtlV+XBK1F`(N{G|?sF}n^^zvj|KzH!kj;BmGjg>WjW%eYX`R1$mjg$}S zHRTWBhaGV)E2!gHr12T`lqV}L2B*D_N`JKQfV9Ecj3+n6ODo$`D@v3ip1j5;H54xq zzwpC1FcjX~8by=!KEFxLfV{re+r4Ig@l3?&M-rp&9u`K>cnbFobJV!yS0tLG^C0l0 z3kr8SX1HBzLVgaBh~H3;^sHCSLxRR#8?;ej!Uh9RE&kI4= zg;uAR`hqIi#$(Hfy2G~2VcFnr0e@Xu5GDWJAG%SbC21usWjX|AviG*hC7nx}iHexV zEj1^#?h7ydjr$eT^B!OzoxS^pmBby}Zx>oDUo-o#BKm!2R`SdaHUaI7wl@QGM4s$r zs1c5Dt*Fry!dHUeJg{&)zjr&XAg(a>UU?xBKnpRo&(@V<_Ss65Q&r-9t$#Ali2R1E z(LyQmJ5K;NdCoM)xJDZFvZPX5EACQ}YKIX@sWj}=_s(SBgf^kY7hkc_J&m4pY{<~r z(|f6H%UQRFA?=0wZo$miS9%AjgB}e9;r%l7j}1A!UWx4Oa9;67*V*YM=6}FMc+Mfh zpZeVEM3Gl*)Oy-BNG+(B(_^BnIpVj^%cGF2y4S3?8Q9Lv?4>a|tu#-S=JZP}y?|fe z0au}W?F)c&Xomj?5?K)m`!!x6F&jp>%a}Q}+r5{!;2qQ2)eJic(Pl%(QHTdp&1RmH zjA4!iN6POEOk2Vzl!FYmyZfJ!ePY_&Cd;M*OUz*hZGd)Pkmg0vgzeU zONK6u^NHmQ>u8MoFh#saC&u1()dD4b;o8Uk{6@Q8 zWWcOfj6P_>fb=XuQaMk^er{s9gX68XR#PF%m?~fX zrV1;s|EK@_vf;e3x|oEzJ3=7`KG2ZZZ3!=0o;a=xQi{S?oKxWndj&b$NwX(>*3fi{ z6nbp^2aB)Hq!XO9gZqH)4m5FRpN@w_Y%$7-Oe)+;9c3L<9C-J?`)|Va^I2+nu>QWblLaEbEvbTtfF~o8ZyC+8+3KHK%*3 zG(ta93emD@YV&y>gSR#|TxZVQ-f#gR1|M#G6T0M%Pwd{%pJ@@;sV#kiKRP3ngmip{ zH$CTwS7dGMmgwE~dThhn;;)GHvVZD=PRS$@_jb<;&7a{gH*A;Ny5xKGHt>@L-|4pO zN#(VjI?Lo>Or7Jir8ILqvoAbou;tky4D_=7idQGE7u{zzCaelPu!XPwx6&`GY9EOJ z^#5b}i7(tOzhExioT;FFyV?pL6_1LIDdDp*>I>fK(Iza^d(_QB4lhw=ukZs!I*c~; zHppw}N}F2+|1|L}1$!|aIOdbPUUN5vESpkBu-o`XjF?gWOlQr871#~$6lk{eQ$0m1 z=~lJ^A=5;6PxIqeluAbP|6%Mrznb{McCCV&bRqEYu27wv+w7+uU*c;Nb!E# z5ViIk_FLHZG5_B3#7}V4%rZ4IEAc9veh^KOr`Gg5}J3*Wnra3F&v6{#-T5&Jv!rF{=>QJ7~Ey{XXfh zPvee>QeFq=34bQO{jL5iNXjdrK;bVC_-}P``Rsqb8W>Ksp|ad+O}nyep+BD#&chc= zhnx(=1OMZ38}E)O-90Vm|EwF1`w4VE512gTi((5t%UjZ!>f4#qCf3-gM5I3VdB|OU z5f^{ck_Q%DdX%=~;yC9ys&s7RVDqtd?I- z?fE?UeKE_;AUUS?MhtDYWohx5FYs*eJJ?n14<}ZvP4mFO_nEXV$a8Dvv3x#wKgN3d z3P`KfyJ0!-bkm~=`^7Q`!lb#G{Jh9^)0Q4R>1C)}q%f@Y;?ketwuoj$G&GGa9`c5#qp$ zx$KThVfmA$+@v7b-tyQVHt#hxY1-Z5Mj}@w71WyFp4Qcu@7%L^Q z0kWGKpZ^lV=nns${t`h`aQkB>ak11|^cm^Y32t}Sx}_reO+J@dOAa!{H(p)_e}XEw zmBSv1FD?ft2)Rl<_a)yQY9hH3WH@Pn-^i*w2>YHdY0;tGS`oeMGRLEpy^)p_>h_Xs!^xNvJ{HNtf z+m8iTxdvXbN-VPO8Kwu}wWW7~G~PWck$8KQ@w7r0Y}SQJmsb zmdoV*!&o6yyTT-;$E%`00P43R_x!RaXR9d$Ezj)>Em#`);?|-E8lWkBW*|?;MuSvdcMvV zCigS9r^SYHE)_WlM9hzfXeBF5mR}mbrPM0t)cqGn1vlPoNh(BSea6i8$F_Q3I?}ro+@lKh+$l5M{W9c-ia{Jjl$=a?-YXDG(A&|Y z-&^6BI-pBkFAmjWv$d?_QXpH<^XZaEQPjb!J6u#UB*HUzUuV^O$gKk6diO0f+igX0 z1kLqxrFij>76}XjPV6l|*%BnX#k%KfDs^c&aJI`#r;wkHChj!LI$KLH{{-GBAB@5$VFlE2jHB*oCb+LV=D9$vm-E$?1u+cVhaYotvDXHg;i+XdKZ)*ET zE50&~W?GIDb^IikmrS9l4!_t1%v=78?~$FpzHUAT*`~*2$(9`$>QAYCTPU9A;_o7s z%ab1}tCgsywamQ|V%c$;A#%z&UW`wcpiR%&lXzJtZsv+FUM16J!ML{!2x2kjr8m&a z>S0I-J)a`mb&Z&qFwmMxDREUsXY!`sGqHO0@Ei&_hZI83^g>dgsXWi`$Cu7q4 zwP8NaOW51pM=!&hwgQrxO&2kfez&E&|2+@>c62UHS^-{gJLqF?{$n&}HwUAto^T