diff --git a/example-specs/interface/nipype/afni/a_boverlap.yaml b/example-specs/interface/nipype/afni/a_boverlap.yaml index 4b710470..f600834d 100644 --- a/example-specs/interface/nipype/afni/a_boverlap.yaml +++ b/example-specs/interface/nipype/afni/a_boverlap.yaml @@ -6,23 +6,23 @@ # Docs # ---- # Output (to screen) is a count of various things about how -# the automasks of datasets A and B overlap or don't overlap. +# the automasks of datasets A and B overlap or don't overlap. # -# For complete details, see the `3dABoverlap Documentation. -# `_ +# For complete details, see the `3dABoverlap Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> aboverlap = afni.ABoverlap() +# >>> aboverlap.inputs.in_file_a = 'functional.nii' +# >>> aboverlap.inputs.in_file_b = 'structural.nii' +# >>> aboverlap.inputs.out_file = 'out.mask_ae_overlap.txt' +# >>> aboverlap.cmdline +# '3dABoverlap functional.nii structural.nii |& tee out.mask_ae_overlap.txt' +# >>> res = aboverlap.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> aboverlap = afni.ABoverlap() -# >>> aboverlap.inputs.in_file_a = 'functional.nii' -# >>> aboverlap.inputs.in_file_b = 'structural.nii' -# >>> aboverlap.inputs.out_file = 'out.mask_ae_overlap.txt' -# >>> aboverlap.cmdline -# '3dABoverlap functional.nii structural.nii |& tee out.mask_ae_overlap.txt' -# >>> res = aboverlap.run() # doctest: +SKIP # -# task_name: ABoverlap nipype_name: ABoverlap nipype_module: nipype.interfaces.afni.utils @@ -39,11 +39,8 @@ inputs: # passed to the field in the automatically generated unittests. in_file_a: medimage/nifti1 # type=file|default=: input file A - in_file_b: medimage/nifti1 + in_file_b: generic/file # type=file|default=: input file B - out_file: Path - # type=file: output file - # type=file|default=: collect output to a file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -67,7 +64,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -96,7 +93,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -115,13 +112,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file_a: # type=file|default=: input file A - in_file_b: - # type=file|default=: input file B out_file: ' "out.mask_ae_overlap.txt"' # type=file: output file # type=file|default=: collect output to a file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -136,7 +131,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dABoverlap functional.nii structural.nii |& tee out.mask_ae_overlap.txt +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -144,13 +139,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file_a: '"functional.nii"' # type=file|default=: input file A - in_file_b: '"structural.nii"' - # type=file|default=: input file B out_file: ' "out.mask_ae_overlap.txt"' # type=file: output file # type=file|default=: collect output to a file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/afn_ito_nifti.yaml b/example-specs/interface/nipype/afni/afn_ito_nifti.yaml index ee03b2fd..abee3813 100644 --- a/example-specs/interface/nipype/afni/afn_ito_nifti.yaml +++ b/example-specs/interface/nipype/afni/afn_ito_nifti.yaml @@ -6,22 +6,22 @@ # Docs # ---- # Converts AFNI format files to NIFTI format. This can also convert 2D or -# 1D data, which you can numpy.squeeze() to remove extra dimensions. +# 1D data, which you can numpy.squeeze() to remove extra dimensions. # -# For complete details, see the `3dAFNItoNIFTI Documentation. -# `_ +# For complete details, see the `3dAFNItoNIFTI Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> a2n = afni.AFNItoNIFTI() +# >>> a2n.inputs.in_file = 'afni_output.3D' +# >>> a2n.inputs.out_file = 'afni_output.nii' +# >>> a2n.cmdline +# '3dAFNItoNIFTI -prefix afni_output.nii afni_output.3D' +# >>> res = a2n.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> a2n = afni.AFNItoNIFTI() -# >>> a2n.inputs.in_file = 'afni_output.3D' -# >>> a2n.inputs.out_file = 'afni_output.nii' -# >>> a2n.cmdline -# '3dAFNItoNIFTI -prefix afni_output.nii afni_output.3D' -# >>> res = a2n.run() # doctest: +SKIP # -# task_name: AFNItoNIFTI nipype_name: AFNItoNIFTI nipype_module: nipype.interfaces.afni.utils @@ -36,11 +36,8 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: medimage-afni/three-d + in_file: fileformats.medimage_afni.ThreeD # type=file|default=: input file to 3dAFNItoNIFTI - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -57,14 +54,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: output file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -93,7 +90,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -112,11 +109,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dAFNItoNIFTI - out_file: ' "afni_output.nii"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -131,7 +125,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dAFNItoNIFTI -prefix afni_output.nii afni_output.3D +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -139,11 +133,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"afni_output.3D"' # type=file|default=: input file to 3dAFNItoNIFTI - out_file: ' "afni_output.nii"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/align_epi_anat_py.yaml b/example-specs/interface/nipype/afni/align_epi_anat_py.yaml index 0bb7bb02..4f82ce85 100644 --- a/example-specs/interface/nipype/afni/align_epi_anat_py.yaml +++ b/example-specs/interface/nipype/afni/align_epi_anat_py.yaml @@ -7,55 +7,55 @@ # ---- # Align EPI to anatomical datasets or vice versa. # -# This Python script computes the alignment between two datasets, typically -# an EPI and an anatomical structural dataset, and applies the resulting -# transformation to one or the other to bring them into alignment. +# This Python script computes the alignment between two datasets, typically +# an EPI and an anatomical structural dataset, and applies the resulting +# transformation to one or the other to bring them into alignment. # -# This script computes the transforms needed to align EPI and -# anatomical datasets using a cost function designed for this purpose. The -# script combines multiple transformations, thereby minimizing the amount of -# interpolation applied to the data. +# This script computes the transforms needed to align EPI and +# anatomical datasets using a cost function designed for this purpose. The +# script combines multiple transformations, thereby minimizing the amount of +# interpolation applied to the data. # -# Basic Usage:: +# Basic Usage:: # -# align_epi_anat.py -anat anat+orig -epi epi+orig -epi_base 5 +# align_epi_anat.py -anat anat+orig -epi epi+orig -epi_base 5 # -# The user must provide :abbr:`EPI (echo-planar imaging)` and anatomical datasets -# and specify the EPI sub-brick to use as a base in the alignment. +# The user must provide :abbr:`EPI (echo-planar imaging)` and anatomical datasets +# and specify the EPI sub-brick to use as a base in the alignment. # -# Internally, the script always aligns the anatomical to the EPI dataset, -# and the resulting transformation is saved to a 1D file. -# As a user option, the inverse of this transformation may be applied to the -# EPI dataset in order to align it to the anatomical data instead. +# Internally, the script always aligns the anatomical to the EPI dataset, +# and the resulting transformation is saved to a 1D file. +# As a user option, the inverse of this transformation may be applied to the +# EPI dataset in order to align it to the anatomical data instead. # -# This program generates several kinds of output in the form of datasets -# and transformation matrices which can be applied to other datasets if -# needed. Time-series volume registration, oblique data transformations and -# Talairach (standard template) transformations will be combined as needed -# and requested (with options to turn on and off each of the steps) in -# order to create the aligned datasets. +# This program generates several kinds of output in the form of datasets +# and transformation matrices which can be applied to other datasets if +# needed. Time-series volume registration, oblique data transformations and +# Talairach (standard template) transformations will be combined as needed +# and requested (with options to turn on and off each of the steps) in +# order to create the aligned datasets. # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> al_ea = afni.AlignEpiAnatPy() -# >>> al_ea.inputs.anat = "structural.nii" -# >>> al_ea.inputs.in_file = "functional.nii" -# >>> al_ea.inputs.epi_base = 0 -# >>> al_ea.inputs.epi_strip = '3dAutomask' -# >>> al_ea.inputs.volreg = 'off' -# >>> al_ea.inputs.tshift = 'off' -# >>> al_ea.inputs.save_skullstrip = True -# >>> al_ea.cmdline # doctest: +ELLIPSIS -# 'python2 ...align_epi_anat.py -anat structural.nii -epi_base 0 -epi_strip 3dAutomask -epi functional.nii -save_skullstrip -suffix _al -tshift off -volreg off' -# >>> res = allineate.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> al_ea = afni.AlignEpiAnatPy() +# >>> al_ea.inputs.anat = "structural.nii" +# >>> al_ea.inputs.in_file = "functional.nii" +# >>> al_ea.inputs.epi_base = 0 +# >>> al_ea.inputs.epi_strip = '3dAutomask' +# >>> al_ea.inputs.volreg = 'off' +# >>> al_ea.inputs.tshift = 'off' +# >>> al_ea.inputs.save_skullstrip = True +# >>> al_ea.cmdline # doctest: +ELLIPSIS +# 'python2 ...align_epi_anat.py -anat structural.nii -epi_base 0 -epi_strip 3dAutomask -epi functional.nii -save_skullstrip -suffix _al -tshift off -volreg off' +# >>> res = al_ea.run() # doctest: +SKIP +# +# See Also +# -------- +# For complete details, see the `align_epi_anat.py documentation. +# `__. # -# See Also -# -------- -# For complete details, see the `align_epi_anat.py documentation. -# `__. # -# task_name: AlignEpiAnatPy nipype_name: AlignEpiAnatPy nipype_module: nipype.interfaces.afni.preprocess @@ -72,7 +72,7 @@ inputs: # passed to the field in the automatically generated unittests. anat: medimage/nifti1 # type=file|default=: name of structural dataset - in_file: medimage/nifti1 + in_file: generic/file # type=file|default=: EPI dataset to align callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -114,7 +114,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -150,7 +150,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -169,20 +169,14 @@ tests: # (if not specified, will try to choose a sensible value) anat: # type=file|default=: name of structural dataset - in_file: - # type=file|default=: EPI dataset to align epi_base: '0' # type=traitcompound|default=None: the epi base used in alignmentshould be one of (0/mean/median/max/subbrick#) - epi_strip: '"3dAutomask"' - # type=enum|default='3dSkullStrip'|allowed['3dAutomask','3dSkullStrip','None']: method to mask brain in EPI datashould be one of[3dSkullStrip]/3dAutomask/None) volreg: '"off"' # type=enum|default='on'|allowed['off','on']: do volume registration on EPI dataset before alignmentshould be 'on' or 'off', defaults to 'on' - tshift: '"off"' - # type=enum|default='on'|allowed['off','on']: do time shifting of EPI dataset before alignmentshould be 'on' or 'off', defaults to 'on' save_skullstrip: 'True' # type=bool|default=False: save skull-stripped (not aligned) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -197,7 +191,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: python2 ...align_epi_anat.py -anat structural.nii -epi_base 0 -epi_strip 3dAutomask -epi functional.nii -save_skullstrip -suffix _al -tshift off -volreg off +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -205,20 +199,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. anat: '"structural.nii"' # type=file|default=: name of structural dataset - in_file: '"functional.nii"' - # type=file|default=: EPI dataset to align epi_base: '0' # type=traitcompound|default=None: the epi base used in alignmentshould be one of (0/mean/median/max/subbrick#) - epi_strip: '"3dAutomask"' - # type=enum|default='3dSkullStrip'|allowed['3dAutomask','3dSkullStrip','None']: method to mask brain in EPI datashould be one of[3dSkullStrip]/3dAutomask/None) volreg: '"off"' # type=enum|default='on'|allowed['off','on']: do volume registration on EPI dataset before alignmentshould be 'on' or 'off', defaults to 'on' - tshift: '"off"' - # type=enum|default='on'|allowed['off','on']: do time shifting of EPI dataset before alignmentshould be 'on' or 'off', defaults to 'on' save_skullstrip: 'True' # type=bool|default=False: save skull-stripped (not aligned) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/allineate.yaml b/example-specs/interface/nipype/afni/allineate.yaml index 8e6a34cf..253decc3 100644 --- a/example-specs/interface/nipype/afni/allineate.yaml +++ b/example-specs/interface/nipype/afni/allineate.yaml @@ -7,36 +7,36 @@ # ---- # Program to align one dataset (the 'source') to a base dataset # -# For complete details, see the `3dAllineate Documentation. -# `_ +# For complete details, see the `3dAllineate Documentation. +# `_ # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> allineate = afni.Allineate() -# >>> allineate.inputs.in_file = 'functional.nii' -# >>> allineate.inputs.out_file = 'functional_allineate.nii' -# >>> allineate.inputs.in_matrix = 'cmatrix.mat' -# >>> allineate.cmdline -# '3dAllineate -source functional.nii -prefix functional_allineate.nii -1Dmatrix_apply cmatrix.mat' -# >>> res = allineate.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> allineate = afni.Allineate() +# >>> allineate.inputs.in_file = 'functional.nii' +# >>> allineate.inputs.out_file = 'functional_allineate.nii' +# >>> allineate.inputs.in_matrix = 'cmatrix.mat' +# >>> allineate.cmdline +# '3dAllineate -source functional.nii -prefix functional_allineate.nii -1Dmatrix_apply cmatrix.mat' +# >>> res = allineate.run() # doctest: +SKIP # -# >>> allineate = afni.Allineate() -# >>> allineate.inputs.in_file = 'functional.nii' -# >>> allineate.inputs.reference = 'structural.nii' -# >>> allineate.inputs.allcostx = 'out.allcostX.txt' -# >>> allineate.cmdline -# '3dAllineate -source functional.nii -base structural.nii -allcostx |& tee out.allcostX.txt' -# >>> res = allineate.run() # doctest: +SKIP +# >>> allineate = afni.Allineate() +# >>> allineate.inputs.in_file = 'functional.nii' +# >>> allineate.inputs.reference = 'structural.nii' +# >>> allineate.inputs.allcostx = 'out.allcostX.txt' +# >>> allineate.cmdline +# '3dAllineate -source functional.nii -base structural.nii -allcostx |& tee out.allcostX.txt' +# >>> res = allineate.run() # doctest: +SKIP +# +# >>> allineate = afni.Allineate() +# >>> allineate.inputs.in_file = 'functional.nii' +# >>> allineate.inputs.reference = 'structural.nii' +# >>> allineate.inputs.nwarp_fixmot = ['X', 'Y'] +# >>> allineate.cmdline +# '3dAllineate -source functional.nii -nwarp_fixmotX -nwarp_fixmotY -prefix functional_allineate -base structural.nii' +# >>> res = allineate.run() # doctest: +SKIP # -# >>> allineate = afni.Allineate() -# >>> allineate.inputs.in_file = 'functional.nii' -# >>> allineate.inputs.reference = 'structural.nii' -# >>> allineate.inputs.nwarp_fixmot = ['X', 'Y'] -# >>> allineate.cmdline -# '3dAllineate -source functional.nii -nwarp_fixmotX -nwarp_fixmotY -prefix functional_allineate -base structural.nii' -# >>> res = allineate.run() # doctest: +SKIP -# task_name: Allineate nipype_name: Allineate nipype_module: nipype.interfaces.afni.preprocess @@ -51,9 +51,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - allcostx: Path - # type=file: Compute and print ALL available cost functionals for the un-warped inputs - # type=file|default=: Compute and print ALL available cost functionals for the un-warped inputsAND THEN QUIT. If you use this option none of the other expected outputs will be produced in_file: medimage/nifti1 # type=file|default=: input file to 3dAllineate in_matrix: datascience/text-matrix @@ -62,19 +59,7 @@ inputs: # type=file|default=: Read warp parameters from file and apply them to the source dataset, and produce a new dataset master: generic/file # type=file|default=: Write the output dataset on the same grid as this file. - out_file: Path - # type=file: output image file name - # type=file|default=: output file from 3dAllineate - out_matrix: Path - # type=file: matrix to align input file - # type=file|default=: Save the transformation matrix for each volume. - out_param_file: Path - # type=file: warp parameters - # type=file|default=: Save the warp parameters in ASCII (.1D) format. - out_weight_file: Path - # type=file: weight volume - # type=file|default=: Write the weight volume to disk as a dataset - reference: medimage/nifti1 + reference: generic/file # type=file|default=: file to be used as reference, the first volume will be used if not given the reference will be the first volume of in_file. source_mask: generic/file # type=file|default=: mask the input dataset @@ -99,7 +84,7 @@ outputs: allcostx: text/text-file # type=file: Compute and print ALL available cost functionals for the un-warped inputs # type=file|default=: Compute and print ALL available cost functionals for the un-warped inputsAND THEN QUIT. If you use this option none of the other expected outputs will be produced - out_file: medimage/nifti1 + out_file: generic/file # type=file: output image file name # type=file|default=: output file from 3dAllineate out_matrix: generic/file @@ -115,7 +100,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -236,7 +221,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -255,13 +240,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dAllineate - out_file: '"functional_allineate.nii"' - # type=file: output image file name - # type=file|default=: output file from 3dAllineate in_matrix: # type=file|default=: matrix to align input file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -280,13 +262,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dAllineate - reference: - # type=file|default=: file to be used as reference, the first volume will be used if not given the reference will be the first volume of in_file. allcostx: '"out.allcostX.txt"' # type=file: Compute and print ALL available cost functionals for the un-warped inputs # type=file|default=: Compute and print ALL available cost functionals for the un-warped inputsAND THEN QUIT. If you use this option none of the other expected outputs will be produced imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -305,12 +285,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dAllineate - reference: - # type=file|default=: file to be used as reference, the first volume will be used if not given the reference will be the first volume of in_file. nwarp_fixmot: '["X", "Y"]' # type=list|default=[]: To fix motion along directions. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -325,7 +303,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dAllineate -source functional.nii -prefix functional_allineate.nii -1Dmatrix_apply cmatrix.mat +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -333,13 +311,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dAllineate - out_file: '"functional_allineate.nii"' - # type=file: output image file name - # type=file|default=: output file from 3dAllineate in_matrix: '"cmatrix.mat"' # type=file|default=: matrix to align input file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -351,13 +326,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dAllineate - reference: '"structural.nii"' - # type=file|default=: file to be used as reference, the first volume will be used if not given the reference will be the first volume of in_file. allcostx: '"out.allcostX.txt"' # type=file: Compute and print ALL available cost functionals for the un-warped inputs # type=file|default=: Compute and print ALL available cost functionals for the un-warped inputsAND THEN QUIT. If you use this option none of the other expected outputs will be produced imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -369,12 +342,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dAllineate - reference: '"structural.nii"' - # type=file|default=: file to be used as reference, the first volume will be used if not given the reference will be the first volume of in_file. nwarp_fixmot: '["X", "Y"]' # type=list|default=[]: To fix motion along directions. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/auto_tcorrelate.yaml b/example-specs/interface/nipype/afni/auto_tcorrelate.yaml index 2033fb58..f488d1ab 100644 --- a/example-specs/interface/nipype/afni/auto_tcorrelate.yaml +++ b/example-specs/interface/nipype/afni/auto_tcorrelate.yaml @@ -6,25 +6,25 @@ # Docs # ---- # Computes the correlation coefficient between the time series of each -# pair of voxels in the input dataset, and stores the output into a -# new anatomical bucket dataset [scaled to shorts to save memory space]. +# pair of voxels in the input dataset, and stores the output into a +# new anatomical bucket dataset [scaled to shorts to save memory space]. # -# For complete details, see the `3dAutoTcorrelate Documentation. -# `_ +# For complete details, see the `3dAutoTcorrelate Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> corr = afni.AutoTcorrelate() +# >>> corr.inputs.in_file = 'functional.nii' +# >>> corr.inputs.polort = -1 +# >>> corr.inputs.eta2 = True +# >>> corr.inputs.mask = 'mask.nii' +# >>> corr.inputs.mask_only_targets = True +# >>> corr.cmdline # doctest: +ELLIPSIS +# '3dAutoTcorrelate -eta2 -mask mask.nii -mask_only_targets -prefix functional_similarity_matrix.1D -polort -1 functional.nii' +# >>> res = corr.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> corr = afni.AutoTcorrelate() -# >>> corr.inputs.in_file = 'functional.nii' -# >>> corr.inputs.polort = -1 -# >>> corr.inputs.eta2 = True -# >>> corr.inputs.mask = 'mask.nii' -# >>> corr.inputs.mask_only_targets = True -# >>> corr.cmdline # doctest: +ELLIPSIS -# '3dAutoTcorrelate -eta2 -mask mask.nii -mask_only_targets -prefix functional_similarity_matrix.1D -polort -1 functional.nii' -# >>> res = corr.run() # doctest: +SKIP -# task_name: AutoTcorrelate nipype_name: AutoTcorrelate nipype_module: nipype.interfaces.afni.preprocess @@ -41,13 +41,10 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: timeseries x space (volume or surface) file - mask: medimage/nifti1 + mask: generic/file # type=file|default=: mask of voxels mask_source: generic/file # type=file|default=: mask for source voxels - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -71,7 +68,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -81,7 +78,7 @@ tests: in_file: # type=file|default=: timeseries x space (volume or surface) file polort: - # type=int|default=0: Remove polynomical trend of order m or -1 for no detrending + # type=int|default=0: Remove polynomial trend of order m or -1 for no detrending eta2: # type=bool|default=False: eta^2 similarity mask: @@ -102,7 +99,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -121,16 +118,12 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: timeseries x space (volume or surface) file - polort: '-1' - # type=int|default=0: Remove polynomical trend of order m or -1 for no detrending eta2: 'True' # type=bool|default=False: eta^2 similarity - mask: - # type=file|default=: mask of voxels mask_only_targets: 'True' # type=bool|default=False: use mask only on targets voxels imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -145,7 +138,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dAutoTcorrelate -eta2 -mask mask.nii -mask_only_targets -prefix functional_similarity_matrix.1D -polort -1 functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -153,16 +146,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: timeseries x space (volume or surface) file - polort: '-1' - # type=int|default=0: Remove polynomical trend of order m or -1 for no detrending eta2: 'True' # type=bool|default=False: eta^2 similarity - mask: '"mask.nii"' - # type=file|default=: mask of voxels mask_only_targets: 'True' # type=bool|default=False: use mask only on targets voxels imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/auto_tlrc.yaml b/example-specs/interface/nipype/afni/auto_tlrc.yaml index 6dec9053..9bd74b74 100644 --- a/example-specs/interface/nipype/afni/auto_tlrc.yaml +++ b/example-specs/interface/nipype/afni/auto_tlrc.yaml @@ -6,22 +6,22 @@ # Docs # ---- # A minimal wrapper for the AutoTLRC script -# The only option currently supported is no_ss. -# For complete details, see the `3dQwarp Documentation. -# `_ +# The only option currently supported is no_ss. +# For complete details, see the `3dQwarp Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> autoTLRC = afni.AutoTLRC() +# >>> autoTLRC.inputs.in_file = 'structural.nii' +# >>> autoTLRC.inputs.no_ss = True +# >>> autoTLRC.inputs.base = "TT_N27+tlrc" +# >>> autoTLRC.cmdline +# '@auto_tlrc -base TT_N27+tlrc -input structural.nii -no_ss' +# >>> res = autoTLRC.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> autoTLRC = afni.AutoTLRC() -# >>> autoTLRC.inputs.in_file = 'structural.nii' -# >>> autoTLRC.inputs.no_ss = True -# >>> autoTLRC.inputs.base = "TT_N27+tlrc" -# >>> autoTLRC.cmdline -# '@auto_tlrc -base TT_N27+tlrc -input structural.nii -no_ss' -# >>> res = autoTLRC.run() # doctest: +SKIP # -# task_name: AutoTLRC nipype_name: AutoTLRC nipype_module: nipype.interfaces.afni.preprocess @@ -60,7 +60,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -80,7 +80,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -99,12 +99,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Original anatomical volume (+orig).The skull is removed by this scriptunless instructed otherwise (-no_ss). - no_ss: 'True' - # type=bool|default=False: Do not strip skull of input data set (because skull has already been removed or because template still has the skull) NOTE: The ``-no_ss`` option is not all that optional. Here is a table of when you should and should not use ``-no_ss`` +------------------+------------+---------------+ | Dataset | Template | +==================+============+===============+ | | w/ skull | wo/ skull | +------------------+------------+---------------+ | WITH skull | ``-no_ss`` | xxx | +------------------+------------+---------------+ | WITHOUT skull | No Cigar | ``-no_ss`` | +------------------+------------+---------------+ Template means: Your template of choice Dset. means: Your anatomical dataset ``-no_ss`` means: Skull stripping should not be attempted on Dset xxx means: Don't put anything, the script will strip Dset No Cigar means: Don't try that combination, it makes no sense. base: '"TT_N27+tlrc"' # type=str|default='': Reference anatomical volume. Usually this volume is in some standard space like TLRC or MNI space and with afni dataset view of (+tlrc). Preferably, this reference volume should have had the skull removed but that is not mandatory. AFNI's distribution contains several templates. For a longer list, use "whereami -show_templates" TT_N27+tlrc --> Single subject, skull stripped volume. This volume is also known as N27_SurfVol_NoSkull+tlrc elsewhere in AFNI and SUMA land. (www.loni.ucla.edu, www.bic.mni.mcgill.ca) This template has a full set of FreeSurfer (surfer.nmr.mgh.harvard.edu) surface models that can be used in SUMA. For details, see Talairach-related link: https://afni.nimh.nih.gov/afni/suma TT_icbm452+tlrc --> Average volume of 452 normal brains. Skull Stripped. (www.loni.ucla.edu) TT_avg152T1+tlrc --> Average volume of 152 normal brains. Skull Stripped.(www.bic.mni.mcgill.ca) TT_EPI+tlrc --> EPI template from spm2, masked as TT_avg152T1 TT_avg152 and TT_EPI volume sources are from SPM's distribution. (www.fil.ion.ucl.ac.uk/spm/) If you do not specify a path for the template, the script will attempt to locate the template AFNI's binaries directory. NOTE: These datasets have been slightly modified from their original size to match the standard TLRC dimensions (Jean Talairach and Pierre Tournoux Co-Planar Stereotaxic Atlas of the Human Brain Thieme Medical Publishers, New York, 1988). That was done for internal consistency in AFNI. You may use the original form of these volumes if you choose but your TLRC coordinates will not be consistent with AFNI's TLRC database (San Antonio Talairach Daemon database), for example. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -119,7 +117,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: '@auto_tlrc -base TT_N27+tlrc -input structural.nii -no_ss' +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -127,12 +125,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: Original anatomical volume (+orig).The skull is removed by this scriptunless instructed otherwise (-no_ss). - no_ss: 'True' - # type=bool|default=False: Do not strip skull of input data set (because skull has already been removed or because template still has the skull) NOTE: The ``-no_ss`` option is not all that optional. Here is a table of when you should and should not use ``-no_ss`` +------------------+------------+---------------+ | Dataset | Template | +==================+============+===============+ | | w/ skull | wo/ skull | +------------------+------------+---------------+ | WITH skull | ``-no_ss`` | xxx | +------------------+------------+---------------+ | WITHOUT skull | No Cigar | ``-no_ss`` | +------------------+------------+---------------+ Template means: Your template of choice Dset. means: Your anatomical dataset ``-no_ss`` means: Skull stripping should not be attempted on Dset xxx means: Don't put anything, the script will strip Dset No Cigar means: Don't try that combination, it makes no sense. base: '"TT_N27+tlrc"' # type=str|default='': Reference anatomical volume. Usually this volume is in some standard space like TLRC or MNI space and with afni dataset view of (+tlrc). Preferably, this reference volume should have had the skull removed but that is not mandatory. AFNI's distribution contains several templates. For a longer list, use "whereami -show_templates" TT_N27+tlrc --> Single subject, skull stripped volume. This volume is also known as N27_SurfVol_NoSkull+tlrc elsewhere in AFNI and SUMA land. (www.loni.ucla.edu, www.bic.mni.mcgill.ca) This template has a full set of FreeSurfer (surfer.nmr.mgh.harvard.edu) surface models that can be used in SUMA. For details, see Talairach-related link: https://afni.nimh.nih.gov/afni/suma TT_icbm452+tlrc --> Average volume of 452 normal brains. Skull Stripped. (www.loni.ucla.edu) TT_avg152T1+tlrc --> Average volume of 152 normal brains. Skull Stripped.(www.bic.mni.mcgill.ca) TT_EPI+tlrc --> EPI template from spm2, masked as TT_avg152T1 TT_avg152 and TT_EPI volume sources are from SPM's distribution. (www.fil.ion.ucl.ac.uk/spm/) If you do not specify a path for the template, the script will attempt to locate the template AFNI's binaries directory. NOTE: These datasets have been slightly modified from their original size to match the standard TLRC dimensions (Jean Talairach and Pierre Tournoux Co-Planar Stereotaxic Atlas of the Human Brain Thieme Medical Publishers, New York, 1988). That was done for internal consistency in AFNI. You may use the original form of these volumes if you choose but your TLRC coordinates will not be consistent with AFNI's TLRC database (San Antonio Talairach Daemon database), for example. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/autobox.yaml b/example-specs/interface/nipype/afni/autobox.yaml index a290d3f1..9b1d0ed5 100644 --- a/example-specs/interface/nipype/afni/autobox.yaml +++ b/example-specs/interface/nipype/afni/autobox.yaml @@ -6,22 +6,22 @@ # Docs # ---- # Computes size of a box that fits around the volume. -# Also can be used to crop the volume to that box. +# Also can be used to crop the volume to that box. # -# For complete details, see the `3dAutobox Documentation. -# `_ +# For complete details, see the `3dAutobox Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> abox = afni.Autobox() +# >>> abox.inputs.in_file = 'structural.nii' +# >>> abox.inputs.padding = 5 +# >>> abox.cmdline +# '3dAutobox -input structural.nii -prefix structural_autobox -npad 5' +# >>> res = abox.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> abox = afni.Autobox() -# >>> abox.inputs.in_file = 'structural.nii' -# >>> abox.inputs.padding = 5 -# >>> abox.cmdline -# '3dAutobox -input structural.nii -prefix structural_autobox -npad 5' -# >>> res = abox.run() # doctest: +SKIP # -# task_name: Autobox nipype_name: Autobox nipype_module: nipype.interfaces.afni.utils @@ -38,9 +38,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file - out_file: Path - # type=file: output file - # type=file|default=: callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -76,7 +73,7 @@ outputs: z_min: z_min_callable # type=int: templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -101,7 +98,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -120,10 +117,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file - padding: '5' - # type=int|default=0: Number of extra voxels to pad on each side of box imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -138,7 +133,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dAutobox -input structural.nii -prefix structural_autobox -npad 5 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -146,10 +141,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: input file - padding: '5' - # type=int|default=0: Number of extra voxels to pad on each side of box imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/automask.yaml b/example-specs/interface/nipype/afni/automask.yaml index 6c379956..0edfe381 100644 --- a/example-specs/interface/nipype/afni/automask.yaml +++ b/example-specs/interface/nipype/afni/automask.yaml @@ -7,21 +7,21 @@ # ---- # Create a brain-only mask of the image using AFNI 3dAutomask command # -# For complete details, see the `3dAutomask Documentation. -# `_ +# For complete details, see the `3dAutomask Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> automask = afni.Automask() +# >>> automask.inputs.in_file = 'functional.nii' +# >>> automask.inputs.dilate = 1 +# >>> automask.inputs.outputtype = 'NIFTI' +# >>> automask.cmdline # doctest: +ELLIPSIS +# '3dAutomask -apply_prefix functional_masked.nii -dilate 1 -prefix functional_mask.nii functional.nii' +# >>> res = automask.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> automask = afni.Automask() -# >>> automask.inputs.in_file = 'functional.nii' -# >>> automask.inputs.dilate = 1 -# >>> automask.inputs.outputtype = 'NIFTI' -# >>> automask.cmdline # doctest: +ELLIPSIS -# '3dAutomask -apply_prefix functional_masked.nii -dilate 1 -prefix functional_mask.nii functional.nii' -# >>> res = automask.run() # doctest: +SKIP # -# task_name: Automask nipype_name: Automask nipype_module: nipype.interfaces.afni.preprocess @@ -36,14 +36,8 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - brain_file: Path - # type=file: brain file (skull stripped) - # type=file|default=: output file from 3dAutomask in_file: medimage/nifti1 # type=file|default=: input file to 3dAutomask - out_file: Path - # type=file: mask file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -70,7 +64,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -100,7 +94,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -119,12 +113,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dAutomask - dilate: '1' - # type=int|default=0: dilate the mask outwards outputtype: '"NIFTI"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -139,7 +131,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dAutomask -apply_prefix functional_masked.nii -dilate 1 -prefix functional_mask.nii functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -147,12 +139,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dAutomask - dilate: '1' - # type=int|default=0: dilate the mask outwards outputtype: '"NIFTI"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/axialize.yaml b/example-specs/interface/nipype/afni/axialize.yaml index 78c9b2de..c1cd32b1 100644 --- a/example-specs/interface/nipype/afni/axialize.yaml +++ b/example-specs/interface/nipype/afni/axialize.yaml @@ -6,22 +6,22 @@ # Docs # ---- # Read in a dataset and write it out as a new dataset -# with the data brick oriented as axial slices. +# with the data brick oriented as axial slices. # -# For complete details, see the `3dcopy Documentation. -# `__ +# For complete details, see the `3dcopy Documentation. +# `__ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> axial3d = afni.Axialize() +# >>> axial3d.inputs.in_file = 'functional.nii' +# >>> axial3d.inputs.out_file = 'axialized.nii' +# >>> axial3d.cmdline +# '3daxialize -prefix axialized.nii functional.nii' +# >>> res = axial3d.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> axial3d = afni.Axialize() -# >>> axial3d.inputs.in_file = 'functional.nii' -# >>> axial3d.inputs.out_file = 'axialized.nii' -# >>> axial3d.cmdline -# '3daxialize -prefix axialized.nii functional.nii' -# >>> res = axial3d.run() # doctest: +SKIP # -# task_name: Axialize nipype_name: Axialize nipype_module: nipype.interfaces.afni.utils @@ -38,9 +38,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3daxialize - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -57,14 +54,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: output file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -95,7 +92,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -114,11 +111,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3daxialize - out_file: '"axialized.nii"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -133,7 +127,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3daxialize -prefix axialized.nii functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -141,11 +135,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3daxialize - out_file: '"axialized.nii"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/bandpass.yaml b/example-specs/interface/nipype/afni/bandpass.yaml index 11c65d8c..b80e231f 100644 --- a/example-specs/interface/nipype/afni/bandpass.yaml +++ b/example-specs/interface/nipype/afni/bandpass.yaml @@ -6,24 +6,24 @@ # Docs # ---- # Program to lowpass and/or highpass each voxel time series in a -# dataset, offering more/different options than Fourier +# dataset, offering more/different options than Fourier # -# For complete details, see the `3dBandpass Documentation. -# `_ +# For complete details, see the `3dBandpass Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> from nipype.testing import example_data +# >>> bandpass = afni.Bandpass() +# >>> bandpass.inputs.in_file = 'functional.nii' +# >>> bandpass.inputs.highpass = 0.005 +# >>> bandpass.inputs.lowpass = 0.1 +# >>> bandpass.cmdline +# '3dBandpass -prefix functional_bp 0.005000 0.100000 functional.nii' +# >>> res = bandpass.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> from nipype.testing import example_data -# >>> bandpass = afni.Bandpass() -# >>> bandpass.inputs.in_file = 'functional.nii' -# >>> bandpass.inputs.highpass = 0.005 -# >>> bandpass.inputs.lowpass = 0.1 -# >>> bandpass.cmdline -# '3dBandpass -prefix functional_bp 0.005000 0.100000 functional.nii' -# >>> res = bandpass.run() # doctest: +SKIP # -# task_name: Bandpass nipype_name: Bandpass nipype_module: nipype.interfaces.afni.preprocess @@ -46,9 +46,6 @@ inputs: # type=file|default=: Orthogonalize each voxel to the corresponding voxel time series in dataset 'fset', which must have the same spatial and temporal grid structure as the main input dataset. At present, only one '-dsort' option is allowed. orthogonalize_file: generic/file+list-of # type=inputmultiobject|default=[]: Also orthogonalize input to columns in f.1D. Multiple '-ort' options are allowed. - out_file: Path - # type=file: output file - # type=file|default=: output file from 3dBandpass callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -72,7 +69,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -121,7 +118,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -140,12 +137,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dBandpass - highpass: '0.005' - # type=float|default=0.0: highpass lowpass: '0.1' # type=float|default=0.0: lowpass imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.testing name: ' example_data' @@ -163,7 +158,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dBandpass -prefix functional_bp 0.005000 0.100000 functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -171,12 +166,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dBandpass - highpass: '0.005' - # type=float|default=0.0: highpass lowpass: '0.1' # type=float|default=0.0: lowpass imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/blur_in_mask.yaml b/example-specs/interface/nipype/afni/blur_in_mask.yaml index c6cf5410..94597342 100644 --- a/example-specs/interface/nipype/afni/blur_in_mask.yaml +++ b/example-specs/interface/nipype/afni/blur_in_mask.yaml @@ -7,21 +7,21 @@ # ---- # Blurs a dataset spatially inside a mask. That's all. Experimental. # -# For complete details, see the `3dBlurInMask Documentation. -# `_ +# For complete details, see the `3dBlurInMask Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> bim = afni.BlurInMask() +# >>> bim.inputs.in_file = 'functional.nii' +# >>> bim.inputs.mask = 'mask.nii' +# >>> bim.inputs.fwhm = 5.0 +# >>> bim.cmdline # doctest: +ELLIPSIS +# '3dBlurInMask -input functional.nii -FWHM 5.000000 -mask mask.nii -prefix functional_blur' +# >>> res = bim.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> bim = afni.BlurInMask() -# >>> bim.inputs.in_file = 'functional.nii' -# >>> bim.inputs.mask = 'mask.nii' -# >>> bim.inputs.fwhm = 5.0 -# >>> bim.cmdline # doctest: +ELLIPSIS -# '3dBlurInMask -input functional.nii -FWHM 5.000000 -mask mask.nii -prefix functional_blur' -# >>> res = bim.run() # doctest: +SKIP # -# task_name: BlurInMask nipype_name: BlurInMask nipype_module: nipype.interfaces.afni.preprocess @@ -38,13 +38,10 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dSkullStrip - mask: medimage/nifti1 + mask: generic/file # type=file|default=: Mask dataset, if desired. Blurring will occur only within the mask. Voxels NOT in the mask will be set to zero in the output. multimask: generic/file # type=file|default=: Multi-mask dataset -- each distinct nonzero value in dataset will be treated as a separate mask for blurring purposes. - out_file: Path - # type=file: output file - # type=file|default=: output to the file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -68,7 +65,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -103,7 +100,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -122,12 +119,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dSkullStrip - mask: - # type=file|default=: Mask dataset, if desired. Blurring will occur only within the mask. Voxels NOT in the mask will be set to zero in the output. fwhm: '5.0' # type=float|default=0.0: fwhm kernel size imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -142,7 +137,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dBlurInMask -input functional.nii -FWHM 5.000000 -mask mask.nii -prefix functional_blur +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -150,12 +145,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dSkullStrip - mask: '"mask.nii"' - # type=file|default=: Mask dataset, if desired. Blurring will occur only within the mask. Voxels NOT in the mask will be set to zero in the output. fwhm: '5.0' # type=float|default=0.0: fwhm kernel size imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/blur_to_fwhm.yaml b/example-specs/interface/nipype/afni/blur_to_fwhm.yaml index ace8e5c3..8ee439e9 100644 --- a/example-specs/interface/nipype/afni/blur_to_fwhm.yaml +++ b/example-specs/interface/nipype/afni/blur_to_fwhm.yaml @@ -6,22 +6,22 @@ # Docs # ---- # Blurs a 'master' dataset until it reaches a specified FWHM smoothness -# (approximately). +# (approximately). # -# For complete details, see the `3dBlurToFWHM Documentation -# `_ +# For complete details, see the `3dBlurToFWHM Documentation +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> blur = afni.preprocess.BlurToFWHM() +# >>> blur.inputs.in_file = 'epi.nii' +# >>> blur.inputs.fwhm = 2.5 +# >>> blur.cmdline # doctest: +ELLIPSIS +# '3dBlurToFWHM -FWHM 2.500000 -input epi.nii -prefix epi_afni' +# >>> res = blur.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> blur = afni.preprocess.BlurToFWHM() -# >>> blur.inputs.in_file = 'epi.nii' -# >>> blur.inputs.fwhm = 2.5 -# >>> blur.cmdline # doctest: +ELLIPSIS -# '3dBlurToFWHM -FWHM 2.500000 -input epi.nii -prefix epi_afni' -# >>> res = blur.run() # doctest: +SKIP # -# task_name: BlurToFWHM nipype_name: BlurToFWHM nipype_module: nipype.interfaces.afni.preprocess @@ -42,9 +42,6 @@ inputs: # type=file|default=: The dataset that will be smoothed mask: generic/file # type=file|default=: Mask dataset, if desired. Voxels NOT in mask will be set to zero in output. - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -68,7 +65,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -99,7 +96,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -118,10 +115,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: The dataset that will be smoothed - fwhm: '2.5' - # type=float|default=0.0: Blur until the 3D FWHM reaches this value (in mm) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -136,7 +131,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dBlurToFWHM -FWHM 2.500000 -input epi.nii -prefix epi_afni +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -144,10 +139,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"epi.nii"' # type=file|default=: The dataset that will be smoothed - fwhm: '2.5' - # type=float|default=0.0: Blur until the 3D FWHM reaches this value (in mm) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/brick_stat.yaml b/example-specs/interface/nipype/afni/brick_stat.yaml index 13a936d0..13c974a7 100644 --- a/example-specs/interface/nipype/afni/brick_stat.yaml +++ b/example-specs/interface/nipype/afni/brick_stat.yaml @@ -6,23 +6,23 @@ # Docs # ---- # Computes maximum and/or minimum voxel values of an input dataset. -# TODO Add optional arguments. +# TODO Add optional arguments. # -# For complete details, see the `3dBrickStat Documentation. -# `_ +# For complete details, see the `3dBrickStat Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> brickstat = afni.BrickStat() +# >>> brickstat.inputs.in_file = 'functional.nii' +# >>> brickstat.inputs.mask = 'skeleton_mask.nii.gz' +# >>> brickstat.inputs.min = True +# >>> brickstat.cmdline +# '3dBrickStat -min -mask skeleton_mask.nii.gz functional.nii' +# >>> res = brickstat.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> brickstat = afni.BrickStat() -# >>> brickstat.inputs.in_file = 'functional.nii' -# >>> brickstat.inputs.mask = 'skeleton_mask.nii.gz' -# >>> brickstat.inputs.min = True -# >>> brickstat.cmdline -# '3dBrickStat -min -mask skeleton_mask.nii.gz functional.nii' -# >>> res = brickstat.run() # doctest: +SKIP # -# task_name: BrickStat nipype_name: BrickStat nipype_module: nipype.interfaces.afni.utils @@ -39,7 +39,7 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dmaskave - mask: medimage/nifti-gz + mask: generic/file # type=file|default=: -mask dset = use dset as mask to include/exclude voxels callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -63,7 +63,7 @@ outputs: min_val: min_val_callable # type=float: output templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -93,7 +93,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -112,12 +112,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dmaskave - mask: - # type=file|default=: -mask dset = use dset as mask to include/exclude voxels min: 'True' # type=bool|default=False: print the minimum value in dataset imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -132,7 +130,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dBrickStat -min -mask skeleton_mask.nii.gz functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -140,12 +138,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dmaskave - mask: '"skeleton_mask.nii.gz"' - # type=file|default=: -mask dset = use dset as mask to include/exclude voxels min: 'True' # type=bool|default=False: print the minimum value in dataset imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/bucket.yaml b/example-specs/interface/nipype/afni/bucket.yaml index c9e9c2d2..c978eb33 100644 --- a/example-specs/interface/nipype/afni/bucket.yaml +++ b/example-specs/interface/nipype/afni/bucket.yaml @@ -6,32 +6,32 @@ # Docs # ---- # Concatenate sub-bricks from input datasets into one big -# 'bucket' dataset. +# 'bucket' dataset. # -# .. danger:: +# .. danger:: # -# Using this program, it is possible to create a dataset that -# has different basic datum types for different sub-bricks -# (e.g., shorts for brick 0, floats for brick 1). -# Do NOT do this! Very few AFNI programs will work correctly -# with such datasets! +# Using this program, it is possible to create a dataset that +# has different basic datum types for different sub-bricks +# (e.g., shorts for brick 0, floats for brick 1). +# Do NOT do this! Very few AFNI programs will work correctly +# with such datasets! # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> bucket = afni.Bucket() -# >>> bucket.inputs.in_file = [('functional.nii',"{2..$}"), ('functional.nii',"{1}")] -# >>> bucket.inputs.out_file = 'vr_base' -# >>> bucket.cmdline -# "3dbucket -prefix vr_base functional.nii'{2..$}' functional.nii'{1}'" -# >>> res = bucket.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> bucket = afni.Bucket() +# >>> bucket.inputs.in_file = [('functional.nii',"{2..$}"), ('functional.nii',"{1}")] +# >>> bucket.inputs.out_file = 'vr_base' +# >>> bucket.cmdline +# "3dbucket -prefix vr_base functional.nii'{2..$}' functional.nii'{1}'" +# >>> res = bucket.run() # doctest: +SKIP +# +# See Also +# -------- +# For complete details, see the `3dbucket Documentation. +# `__. # -# See Also -# -------- -# For complete details, see the `3dbucket Documentation. -# `__. # -# task_name: Bucket nipype_name: Bucket nipype_module: nipype.interfaces.afni.utils @@ -46,9 +46,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: Path - # type=file: output file - # type=file|default=: callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -72,7 +69,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -93,7 +90,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -112,11 +109,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: '[(''functional.nii'',"{2..$}"), (''functional.nii'',"{1}")]' # type=list|default=[]: List of tuples of input datasets and subbrick selection strings as described in more detail in the following afni help string Input dataset specified using one of these forms: ``prefix+view``, ``prefix+view.HEAD``, or ``prefix+view.BRIK``. You can also add a sub-brick selection list after the end of the dataset name. This allows only a subset of the sub-bricks to be included into the output (by default, all of the input dataset is copied into the output). A sub-brick selection list looks like one of the following forms:: fred+orig[5] ==> use only sub-brick #5 fred+orig[5,9,17] ==> use #5, #9, and #17 fred+orig[5..8] or [5-8] ==> use #5, #6, #7, and #8 fred+orig[5..13(2)] or [5-13(2)] ==> use #5, #7, #9, #11, and #13 Sub-brick indexes start at 0. You can use the character '$' to indicate the last sub-brick in a dataset; for example, you can select every third sub-brick by using the selection list ``fred+orig[0..$(3)]`` N.B.: The sub-bricks are output in the order specified, which may not be the order in the original datasets. For example, using ``fred+orig[0..$(2),1..$(2)]`` will cause the sub-bricks in fred+orig to be output into the new dataset in an interleaved fashion. Using ``fred+orig[$..0]`` will reverse the order of the sub-bricks in the output. N.B.: Bucket datasets have multiple sub-bricks, but do NOT have a time dimension. You can input sub-bricks from a 3D+time dataset into a bucket dataset. You can use the '3dinfo' program to see how many sub-bricks a 3D+time or a bucket dataset contains. N.B.: In non-bucket functional datasets (like the 'fico' datasets output by FIM, or the 'fitt' datasets output by 3dttest), sub-brick ``[0]`` is the 'intensity' and sub-brick [1] is the statistical parameter used as a threshold. Thus, to create a bucket dataset using the intensity from dataset A and the threshold from dataset B, and calling the output dataset C, you would type:: 3dbucket -prefix C -fbuc 'A+orig[0]' -fbuc 'B+orig[1] - out_file: '"vr_base"' - # type=file: output file - # type=file|default=: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -131,7 +125,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dbucket -prefix vr_base functional.nii"{2..$}" functional.nii"{1}" +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -139,11 +133,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '[(''functional.nii'',"{2..$}"), (''functional.nii'',"{1}")]' # type=list|default=[]: List of tuples of input datasets and subbrick selection strings as described in more detail in the following afni help string Input dataset specified using one of these forms: ``prefix+view``, ``prefix+view.HEAD``, or ``prefix+view.BRIK``. You can also add a sub-brick selection list after the end of the dataset name. This allows only a subset of the sub-bricks to be included into the output (by default, all of the input dataset is copied into the output). A sub-brick selection list looks like one of the following forms:: fred+orig[5] ==> use only sub-brick #5 fred+orig[5,9,17] ==> use #5, #9, and #17 fred+orig[5..8] or [5-8] ==> use #5, #6, #7, and #8 fred+orig[5..13(2)] or [5-13(2)] ==> use #5, #7, #9, #11, and #13 Sub-brick indexes start at 0. You can use the character '$' to indicate the last sub-brick in a dataset; for example, you can select every third sub-brick by using the selection list ``fred+orig[0..$(3)]`` N.B.: The sub-bricks are output in the order specified, which may not be the order in the original datasets. For example, using ``fred+orig[0..$(2),1..$(2)]`` will cause the sub-bricks in fred+orig to be output into the new dataset in an interleaved fashion. Using ``fred+orig[$..0]`` will reverse the order of the sub-bricks in the output. N.B.: Bucket datasets have multiple sub-bricks, but do NOT have a time dimension. You can input sub-bricks from a 3D+time dataset into a bucket dataset. You can use the '3dinfo' program to see how many sub-bricks a 3D+time or a bucket dataset contains. N.B.: In non-bucket functional datasets (like the 'fico' datasets output by FIM, or the 'fitt' datasets output by 3dttest), sub-brick ``[0]`` is the 'intensity' and sub-brick [1] is the statistical parameter used as a threshold. Thus, to create a bucket dataset using the intensity from dataset A and the threshold from dataset B, and calling the output dataset C, you would type:: 3dbucket -prefix C -fbuc 'A+orig[0]' -fbuc 'B+orig[1] - out_file: '"vr_base"' - # type=file: output file - # type=file|default=: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/calc.yaml b/example-specs/interface/nipype/afni/calc.yaml index 7b31a119..2f8ef833 100644 --- a/example-specs/interface/nipype/afni/calc.yaml +++ b/example-specs/interface/nipype/afni/calc.yaml @@ -7,33 +7,33 @@ # ---- # This program does voxel-by-voxel arithmetic on 3D datasets. # -# For complete details, see the `3dcalc Documentation. -# `_ +# For complete details, see the `3dcalc Documentation. +# `_ # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> calc = afni.Calc() -# >>> calc.inputs.in_file_a = 'functional.nii' -# >>> calc.inputs.in_file_b = 'functional2.nii' -# >>> calc.inputs.expr='a*b' -# >>> calc.inputs.out_file = 'functional_calc.nii.gz' -# >>> calc.inputs.outputtype = 'NIFTI' -# >>> calc.cmdline # doctest: +ELLIPSIS -# '3dcalc -a functional.nii -b functional2.nii -expr "a*b" -prefix functional_calc.nii.gz' -# >>> res = calc.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> calc = afni.Calc() +# >>> calc.inputs.in_file_a = 'functional.nii' +# >>> calc.inputs.in_file_b = 'functional2.nii' +# >>> calc.inputs.expr='a*b' +# >>> calc.inputs.out_file = 'functional_calc.nii.gz' +# >>> calc.inputs.outputtype = 'NIFTI' +# >>> calc.cmdline # doctest: +ELLIPSIS +# '3dcalc -a functional.nii -b functional2.nii -expr "a*b" -prefix functional_calc.nii.gz' +# >>> res = calc.run() # doctest: +SKIP +# +# >>> from nipype.interfaces import afni +# >>> calc = afni.Calc() +# >>> calc.inputs.in_file_a = 'functional.nii' +# >>> calc.inputs.expr = '1' +# >>> calc.inputs.out_file = 'rm.epi.all1' +# >>> calc.inputs.overwrite = True +# >>> calc.cmdline +# '3dcalc -a functional.nii -expr "1" -prefix rm.epi.all1 -overwrite' +# >>> res = calc.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni -# >>> calc = afni.Calc() -# >>> calc.inputs.in_file_a = 'functional.nii' -# >>> calc.inputs.expr = '1' -# >>> calc.inputs.out_file = 'rm.epi.all1' -# >>> calc.inputs.overwrite = True -# >>> calc.cmdline -# '3dcalc -a functional.nii -expr "1" -prefix rm.epi.all1 -overwrite' -# >>> res = calc.run() # doctest: +SKIP # -# task_name: Calc nipype_name: Calc nipype_module: nipype.interfaces.afni.utils @@ -50,15 +50,12 @@ inputs: # passed to the field in the automatically generated unittests. in_file_a: medimage/nifti1 # type=file|default=: input file to 3dcalc - in_file_b: medimage/nifti1 + in_file_b: generic/file # type=file|default=: operand file to 3dcalc in_file_c: generic/file # type=file|default=: operand file to 3dcalc other: generic/file # type=file|default=: other options - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -75,14 +72,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-afni/all1,medimage/nifti-gz + out_file: fileformats.medimage_afni.All1 # type=file: output file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -119,7 +116,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -138,17 +135,12 @@ tests: # (if not specified, will try to choose a sensible value) in_file_a: # type=file|default=: input file to 3dcalc - in_file_b: - # type=file|default=: operand file to 3dcalc expr: '"a*b"' # type=str|default='': expr - out_file: ' "functional_calc.nii.gz"' - # type=file: output file - # type=file|default=: output image file name outputtype: '"NIFTI"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -167,15 +159,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file_a: # type=file|default=: input file to 3dcalc - expr: '"1"' - # type=str|default='': expr out_file: '"rm.epi.all1"' # type=file: output file # type=file|default=: output image file name - overwrite: 'True' - # type=bool|default=False: overwrite output imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -190,7 +178,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dcalc -a functional.nii -b functional2.nii -expr "a*b" -prefix functional_calc.nii.gz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -198,17 +186,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file_a: '"functional.nii"' # type=file|default=: input file to 3dcalc - in_file_b: '"functional2.nii"' - # type=file|default=: operand file to 3dcalc expr: '"a*b"' # type=str|default='': expr - out_file: ' "functional_calc.nii.gz"' - # type=file: output file - # type=file|default=: output image file name outputtype: '"NIFTI"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -220,15 +203,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file_a: '"functional.nii"' # type=file|default=: input file to 3dcalc - expr: '"1"' - # type=str|default='': expr out_file: '"rm.epi.all1"' # type=file: output file # type=file|default=: output image file name - overwrite: 'True' - # type=bool|default=False: overwrite output imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/cat.yaml b/example-specs/interface/nipype/afni/cat.yaml index 74983e72..e8b49ae3 100644 --- a/example-specs/interface/nipype/afni/cat.yaml +++ b/example-specs/interface/nipype/afni/cat.yaml @@ -6,24 +6,24 @@ # Docs # ---- # 1dcat takes as input one or more 1D files, and writes out a 1D file -# containing the side-by-side concatenation of all or a subset of the -# columns from the input files. +# containing the side-by-side concatenation of all or a subset of the +# columns from the input files. # -# For complete details, see the `1dcat Documentation. -# `_ +# For complete details, see the `1dcat Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> cat1d = afni.Cat() +# >>> cat1d.inputs.sel = "'[0,2]'" +# >>> cat1d.inputs.in_files = ['f1.1D', 'f2.1D'] +# >>> cat1d.inputs.out_file = 'catout.1d' +# >>> cat1d.cmdline +# "1dcat -sel '[0,2]' f1.1D f2.1D > catout.1d" +# >>> res = cat1d.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> cat1d = afni.Cat() -# >>> cat1d.inputs.sel = "'[0,2]'" -# >>> cat1d.inputs.in_files = ['f1.1D', 'f2.1D'] -# >>> cat1d.inputs.out_file = 'catout.1d' -# >>> cat1d.cmdline -# "1dcat -sel '[0,2]' f1.1D f2.1D > catout.1d" -# >>> res = cat1d.run() # doctest: +SKIP # -# task_name: Cat nipype_name: Cat nipype_module: nipype.interfaces.afni.utils @@ -38,11 +38,8 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_files: medimage-afni/one-d+list-of + in_files: generic/file+list-of # type=list|default=[]: - out_file: Path - # type=file: output file - # type=file|default='catout.1d': output (concatenated) file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -59,14 +56,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-afni/one-d + out_file: fileformats.medimage_afni.OneD # type=file: output file # type=file|default='catout.1d': output (concatenated) file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -107,7 +104,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -126,13 +123,11 @@ tests: # (if not specified, will try to choose a sensible value) sel: '"''[0,2]''"' # type=str|default='': Apply the same column/row selection string to all filenames on the command line. - in_files: - # type=list|default=[]: out_file: '"catout.1d"' # type=file: output file # type=file|default='catout.1d': output (concatenated) file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -147,7 +142,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 1dcat -sel "[0,2]" f1.1D f2.1D > catout.1d +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -155,13 +150,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. sel: '"''[0,2]''"' # type=str|default='': Apply the same column/row selection string to all filenames on the command line. - in_files: '["f1.1D", "f2.1D"]' - # type=list|default=[]: out_file: '"catout.1d"' # type=file: output file # type=file|default='catout.1d': output (concatenated) file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/cat_matvec.yaml b/example-specs/interface/nipype/afni/cat_matvec.yaml index d9ad9fb9..9ab66c1e 100644 --- a/example-specs/interface/nipype/afni/cat_matvec.yaml +++ b/example-specs/interface/nipype/afni/cat_matvec.yaml @@ -7,20 +7,20 @@ # ---- # Catenates 3D rotation+shift matrix+vector transformations. # -# For complete details, see the `cat_matvec Documentation. -# `_ +# For complete details, see the `cat_matvec Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> cmv = afni.CatMatvec() +# >>> cmv.inputs.in_file = [('structural.BRIK::WARP_DATA','I')] +# >>> cmv.inputs.out_file = 'warp.anat.Xat.1D' +# >>> cmv.cmdline +# 'cat_matvec structural.BRIK::WARP_DATA -I > warp.anat.Xat.1D' +# >>> res = cmv.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> cmv = afni.CatMatvec() -# >>> cmv.inputs.in_file = [('structural.BRIK::WARP_DATA','I')] -# >>> cmv.inputs.out_file = 'warp.anat.Xat.1D' -# >>> cmv.cmdline -# 'cat_matvec structural.BRIK::WARP_DATA -I > warp.anat.Xat.1D' -# >>> res = cmv.run() # doctest: +SKIP # -# task_name: CatMatvec nipype_name: CatMatvec nipype_module: nipype.interfaces.afni.utils @@ -35,9 +35,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: Path - # type=file: output file - # type=file|default=: File to write concattenated matvecs to callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -54,14 +51,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-afni/one-d + out_file: generic/file # type=file: output file # type=file|default=: File to write concattenated matvecs to callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -88,7 +85,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -107,11 +104,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: '[("structural.BRIK::WARP_DATA","I")]' # type=list|default=[]: list of tuples of mfiles and associated opkeys - out_file: '"warp.anat.Xat.1D"' - # type=file: output file - # type=file|default=: File to write concattenated matvecs to imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -126,7 +120,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: cat_matvec structural.BRIK::WARP_DATA -I > warp.anat.Xat.1D +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -134,11 +128,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '[("structural.BRIK::WARP_DATA","I")]' # type=list|default=[]: list of tuples of mfiles and associated opkeys - out_file: '"warp.anat.Xat.1D"' - # type=file: output file - # type=file|default=: File to write concattenated matvecs to imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/center_mass.yaml b/example-specs/interface/nipype/afni/center_mass.yaml index 67e048ae..a4192a99 100644 --- a/example-specs/interface/nipype/afni/center_mass.yaml +++ b/example-specs/interface/nipype/afni/center_mass.yaml @@ -7,27 +7,27 @@ # ---- # Computes center of mass using 3dCM command # -# .. note:: +# .. note:: # -# By default, the output is (x,y,z) values in DICOM coordinates. But -# as of Dec, 2016, there are now command line switches for other options. +# By default, the output is (x,y,z) values in DICOM coordinates. But +# as of Dec, 2016, there are now command line switches for other options. # # -# For complete details, see the `3dCM Documentation. -# `_ +# For complete details, see the `3dCM Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> cm = afni.CenterMass() +# >>> cm.inputs.in_file = 'structural.nii' +# >>> cm.inputs.cm_file = 'cm.txt' +# >>> cm.inputs.roi_vals = [2, 10] +# >>> cm.cmdline +# '3dCM -roi_vals 2 10 structural.nii > cm.txt' +# >>> res = 3dcm.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> cm = afni.CenterMass() -# >>> cm.inputs.in_file = 'structural.nii' -# >>> cm.inputs.cm_file = 'cm.txt' -# >>> cm.inputs.roi_vals = [2, 10] -# >>> cm.cmdline -# '3dCM -roi_vals 2 10 structural.nii > cm.txt' -# >>> res = 3dcm.run() # doctest: +SKIP # -# task_name: CenterMass nipype_name: CenterMass nipype_module: nipype.interfaces.afni.utils @@ -42,9 +42,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - cm_file: Path - # type=file: file with the center of mass coordinates - # type=file|default=: File to write center of mass to in_file: medimage/nifti1 # type=file|default=: input file to 3dCM mask_file: generic/file @@ -65,7 +62,7 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - cm_file: text/text-file + cm_file: generic/file # type=file: file with the center of mass coordinates # type=file|default=: File to write center of mass to out_file: generic/file @@ -74,7 +71,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -103,7 +100,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -122,13 +119,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dCM - cm_file: '"cm.txt"' - # type=file: file with the center of mass coordinates - # type=file|default=: File to write center of mass to roi_vals: '[2, 10]' # type=list|default=[]: Compute center of mass for each blob with voxel value of v0, v1, v2, etc. This option is handy for getting ROI centers of mass. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -143,7 +137,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dCM -roi_vals 2 10 structural.nii > cm.txt +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -151,13 +145,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: input file to 3dCM - cm_file: '"cm.txt"' - # type=file: file with the center of mass coordinates - # type=file|default=: File to write center of mass to roi_vals: '[2, 10]' # type=list|default=[]: Compute center of mass for each blob with voxel value of v0, v1, v2, etc. This option is handy for getting ROI centers of mass. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/clip_level.yaml b/example-specs/interface/nipype/afni/clip_level.yaml index 01699314..fc6d25db 100644 --- a/example-specs/interface/nipype/afni/clip_level.yaml +++ b/example-specs/interface/nipype/afni/clip_level.yaml @@ -6,21 +6,21 @@ # Docs # ---- # Estimates the value at which to clip the anatomical dataset so -# that background regions are set to zero. +# that background regions are set to zero. # -# For complete details, see the `3dClipLevel Documentation. -# `_ +# For complete details, see the `3dClipLevel Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces.afni import preprocess +# >>> cliplevel = preprocess.ClipLevel() +# >>> cliplevel.inputs.in_file = 'anatomical.nii' +# >>> cliplevel.cmdline +# '3dClipLevel anatomical.nii' +# >>> res = cliplevel.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces.afni import preprocess -# >>> cliplevel = preprocess.ClipLevel() -# >>> cliplevel.inputs.in_file = 'anatomical.nii' -# >>> cliplevel.cmdline -# '3dClipLevel anatomical.nii' -# >>> res = cliplevel.run() # doctest: +SKIP # -# task_name: ClipLevel nipype_name: ClipLevel nipype_module: nipype.interfaces.afni.preprocess @@ -61,7 +61,7 @@ outputs: clip_val: clip_val_callable # type=float: output templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -81,7 +81,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -101,7 +101,7 @@ tests: in_file: # type=file|default=: input file to 3dClipLevel imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -116,7 +116,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dClipLevel anatomical.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -125,7 +125,7 @@ doctests: in_file: '"anatomical.nii"' # type=file|default=: input file to 3dClipLevel imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/convert_dset.yaml b/example-specs/interface/nipype/afni/convert_dset.yaml index b08ff9ad..764d544b 100644 --- a/example-specs/interface/nipype/afni/convert_dset.yaml +++ b/example-specs/interface/nipype/afni/convert_dset.yaml @@ -7,21 +7,21 @@ # ---- # Converts a surface dataset from one format to another. # -# For complete details, see the `ConvertDset Documentation. -# `_ +# For complete details, see the `ConvertDset Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> convertdset = afni.ConvertDset() +# >>> convertdset.inputs.in_file = 'lh.pial_converted.gii' +# >>> convertdset.inputs.out_type = 'niml_asc' +# >>> convertdset.inputs.out_file = 'lh.pial_converted.niml.dset' +# >>> convertdset.cmdline +# 'ConvertDset -o_niml_asc -input lh.pial_converted.gii -prefix lh.pial_converted.niml.dset' +# >>> res = convertdset.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> convertdset = afni.ConvertDset() -# >>> convertdset.inputs.in_file = 'lh.pial_converted.gii' -# >>> convertdset.inputs.out_type = 'niml_asc' -# >>> convertdset.inputs.out_file = 'lh.pial_converted.niml.dset' -# >>> convertdset.cmdline -# 'ConvertDset -o_niml_asc -input lh.pial_converted.gii -prefix lh.pial_converted.niml.dset' -# >>> res = convertdset.run() # doctest: +SKIP # -# task_name: ConvertDset nipype_name: ConvertDset nipype_module: nipype.interfaces.afni.utils @@ -38,9 +38,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/gifti # type=file|default=: input file to ConvertDset - out_file: Path - # type=file: output file - # type=file|default=: output file for ConvertDset callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -57,14 +54,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-afni/dset + out_file: fileformats.medimage_afni.Dset # type=file: output file # type=file|default=: output file for ConvertDset callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -87,7 +84,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -106,13 +103,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to ConvertDset - out_type: '"niml_asc"' - # type=enum|default='niml'|allowed['1D','1Dp','1Dpt','gii','gii_asc','gii_b64','gii_b64gz','niml','niml_asc','niml_bi']: output type out_file: '"lh.pial_converted.niml.dset"' # type=file: output file # type=file|default=: output file for ConvertDset imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -127,7 +122,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: ConvertDset -o_niml_asc -input lh.pial_converted.gii -prefix lh.pial_converted.niml.dset +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -135,13 +130,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"lh.pial_converted.gii"' # type=file|default=: input file to ConvertDset - out_type: '"niml_asc"' - # type=enum|default='niml'|allowed['1D','1Dp','1Dpt','gii','gii_asc','gii_b64','gii_b64gz','niml','niml_asc','niml_bi']: output type out_file: '"lh.pial_converted.niml.dset"' # type=file: output file # type=file|default=: output file for ConvertDset imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/copy.yaml b/example-specs/interface/nipype/afni/copy.yaml index ed4e6da6..b02e286f 100644 --- a/example-specs/interface/nipype/afni/copy.yaml +++ b/example-specs/interface/nipype/afni/copy.yaml @@ -6,40 +6,40 @@ # Docs # ---- # Copies an image of one type to an image of the same -# or different type using 3dcopy command +# or different type using 3dcopy command # -# For complete details, see the `3dcopy Documentation. -# `__ +# For complete details, see the `3dcopy Documentation. +# `__ # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> copy3d = afni.Copy() -# >>> copy3d.inputs.in_file = 'functional.nii' -# >>> copy3d.cmdline -# '3dcopy functional.nii functional_copy' -# >>> res = copy3d.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> copy3d = afni.Copy() +# >>> copy3d.inputs.in_file = 'functional.nii' +# >>> copy3d.cmdline +# '3dcopy functional.nii functional_copy' +# >>> res = copy3d.run() # doctest: +SKIP # -# >>> from copy import deepcopy -# >>> copy3d_2 = deepcopy(copy3d) -# >>> copy3d_2.inputs.outputtype = 'NIFTI' -# >>> copy3d_2.cmdline -# '3dcopy functional.nii functional_copy.nii' -# >>> res = copy3d_2.run() # doctest: +SKIP +# >>> from copy import deepcopy +# >>> copy3d_2 = deepcopy(copy3d) +# >>> copy3d_2.inputs.outputtype = 'NIFTI' +# >>> copy3d_2.cmdline +# '3dcopy functional.nii functional_copy.nii' +# >>> res = copy3d_2.run() # doctest: +SKIP # -# >>> copy3d_3 = deepcopy(copy3d) -# >>> copy3d_3.inputs.outputtype = 'NIFTI_GZ' -# >>> copy3d_3.cmdline -# '3dcopy functional.nii functional_copy.nii.gz' -# >>> res = copy3d_3.run() # doctest: +SKIP +# >>> copy3d_3 = deepcopy(copy3d) +# >>> copy3d_3.inputs.outputtype = 'NIFTI_GZ' +# >>> copy3d_3.cmdline +# '3dcopy functional.nii functional_copy.nii.gz' +# >>> res = copy3d_3.run() # doctest: +SKIP +# +# >>> copy3d_4 = deepcopy(copy3d) +# >>> copy3d_4.inputs.out_file = 'new_func.nii' +# >>> copy3d_4.cmdline +# '3dcopy functional.nii new_func.nii' +# >>> res = copy3d_4.run() # doctest: +SKIP # -# >>> copy3d_4 = deepcopy(copy3d) -# >>> copy3d_4.inputs.out_file = 'new_func.nii' -# >>> copy3d_4.cmdline -# '3dcopy functional.nii new_func.nii' -# >>> res = copy3d_4.run() # doctest: +SKIP # -# task_name: Copy nipype_name: Copy nipype_module: nipype.interfaces.afni.utils @@ -56,9 +56,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dcopy - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -82,7 +79,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -105,7 +102,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -125,7 +122,7 @@ tests: in_file: # type=file|default=: input file to 3dcopy imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -145,7 +142,7 @@ tests: outputtype: '"NIFTI"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: copy name: deepcopy @@ -168,7 +165,7 @@ tests: outputtype: '"NIFTI_GZ"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -189,7 +186,7 @@ tests: # type=file: output file # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -204,7 +201,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dcopy functional.nii functional_copy +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -213,7 +210,7 @@ doctests: in_file: '"functional.nii"' # type=file|default=: input file to 3dcopy imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -226,7 +223,7 @@ doctests: outputtype: '"NIFTI"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -239,7 +236,7 @@ doctests: outputtype: '"NIFTI_GZ"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -253,7 +250,7 @@ doctests: # type=file: output file # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/deconvolve.yaml b/example-specs/interface/nipype/afni/deconvolve.yaml index 06fadcac..7976fc51 100644 --- a/example-specs/interface/nipype/afni/deconvolve.yaml +++ b/example-specs/interface/nipype/afni/deconvolve.yaml @@ -7,26 +7,26 @@ # ---- # Performs OLS regression given a 4D neuroimage file and stimulus timings # -# For complete details, see the `3dDeconvolve Documentation. -# `_ +# For complete details, see the `3dDeconvolve Documentation. +# `_ # -# Examples -# ======== +# Examples +# ======== +# +# >>> from nipype.interfaces import afni +# >>> deconvolve = afni.Deconvolve() +# >>> deconvolve.inputs.in_files = ['functional.nii', 'functional2.nii'] +# >>> deconvolve.inputs.out_file = 'output.nii' +# >>> deconvolve.inputs.x1D = 'output.1D' +# >>> stim_times = [(1, 'timeseries.txt', 'SPMG1(4)')] +# >>> deconvolve.inputs.stim_times = stim_times +# >>> deconvolve.inputs.stim_label = [(1, 'Houses')] +# >>> deconvolve.inputs.gltsym = ['SYM: +Houses'] +# >>> deconvolve.inputs.glt_label = [(1, 'Houses')] +# >>> deconvolve.cmdline +# "3dDeconvolve -input functional.nii functional2.nii -bucket output.nii -x1D output.1D -num_stimts 1 -stim_times 1 timeseries.txt 'SPMG1(4)' -stim_label 1 Houses -num_glt 1 -gltsym 'SYM: +Houses' -glt_label 1 Houses" +# >>> res = deconvolve.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni -# >>> deconvolve = afni.Deconvolve() -# >>> deconvolve.inputs.in_files = ['functional.nii', 'functional2.nii'] -# >>> deconvolve.inputs.out_file = 'output.nii' -# >>> deconvolve.inputs.x1D = 'output.1D' -# >>> stim_times = [(1, 'timeseries.txt', 'SPMG1(4)')] -# >>> deconvolve.inputs.stim_times = stim_times -# >>> deconvolve.inputs.stim_label = [(1, 'Houses')] -# >>> deconvolve.inputs.gltsym = ['SYM: +Houses'] -# >>> deconvolve.inputs.glt_label = [(1, 'Houses')] -# >>> deconvolve.cmdline -# "3dDeconvolve -input functional.nii functional2.nii -bucket output.nii -x1D output.1D -num_stimts 1 -stim_times 1 timeseries.txt 'SPMG1(4)' -stim_label 1 Houses -num_glt 1 -gltsym 'SYM: +Houses' -glt_label 1 Houses" -# >>> res = deconvolve.run() # doctest: +SKIP -# task_name: Deconvolve nipype_name: Deconvolve nipype_module: nipype.interfaces.afni.model @@ -51,12 +51,6 @@ inputs: # type=file|default=: filename of single (fMRI) .1D time series where time runs down the column. mask: generic/file # type=file|default=: filename of 3D mask dataset; only data time series from within the mask will be analyzed; results for voxels outside the mask will be set to zero. - out_file: Path - # type=file: output statistics file - # type=file|default=: output statistics file - x1D: Path - # type=file: save out X matrix - # type=file|default=: specify name for saved X matrix callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -76,19 +70,19 @@ outputs: cbucket: generic/file # type=file: output regression coefficients file (if generated) # type=str|default='': Name for dataset in which to save the regression coefficients (no statistics). This dataset will be used in a -xrestore run [not yet implemented] instead of the bucket dataset, if possible. - out_file: medimage/nifti1 + out_file: generic/file # type=file: output statistics file # type=file|default=: output statistics file reml_script: generic/file # type=file: automatically generated script to run 3dREMLfit - x1D: medimage-afni/one-d + x1D: fileformats.medimage_afni.OneD # type=file: save out X matrix # type=file|default=: specify name for saved X matrix callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -193,7 +187,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -212,22 +206,15 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=inputmultiobject|default=[]: filenames of 3D+time input datasets. More than one filename can be given and the datasets will be auto-catenated in time. You can input a 1D time series file here, but the time axis should run along the ROW direction, not the COLUMN direction as in the 'input1D' option. - out_file: '"output.nii"' - # type=file: output statistics file - # type=file|default=: output statistics file x1D: '"output.1D"' # type=file: save out X matrix # type=file|default=: specify name for saved X matrix stim_times: stim_times # type=list|default=[]: generate a response model from a set of stimulus times given in file. - stim_label: '[(1, "Houses")]' - # type=list|default=[]: label for kth input stimulus (e.g., Label1) gltsym: '["SYM: +Houses"]' # type=list|default=[]: general linear tests (i.e., contrasts) using symbolic conventions (e.g., '+Label1 -Label2') - glt_label: '[(1, "Houses")]' - # type=list|default=[]: general linear test (i.e., contrast) labels imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -250,22 +237,15 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["functional.nii", "functional2.nii"]' # type=inputmultiobject|default=[]: filenames of 3D+time input datasets. More than one filename can be given and the datasets will be auto-catenated in time. You can input a 1D time series file here, but the time axis should run along the ROW direction, not the COLUMN direction as in the 'input1D' option. - out_file: '"output.nii"' - # type=file: output statistics file - # type=file|default=: output statistics file x1D: '"output.1D"' # type=file: save out X matrix # type=file|default=: specify name for saved X matrix stim_times: stim_times # type=list|default=[]: generate a response model from a set of stimulus times given in file. - stim_label: '[(1, "Houses")]' - # type=list|default=[]: label for kth input stimulus (e.g., Label1) gltsym: '["SYM: +Houses"]' # type=list|default=[]: general linear tests (i.e., contrasts) using symbolic conventions (e.g., '+Label1 -Label2') - glt_label: '[(1, "Houses")]' - # type=list|default=[]: general linear test (i.e., contrast) labels imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/degree_centrality.yaml b/example-specs/interface/nipype/afni/degree_centrality.yaml index a72b78c1..3c499865 100644 --- a/example-specs/interface/nipype/afni/degree_centrality.yaml +++ b/example-specs/interface/nipype/afni/degree_centrality.yaml @@ -6,24 +6,24 @@ # Docs # ---- # Performs degree centrality on a dataset using a given maskfile -# via 3dDegreeCentrality +# via 3dDegreeCentrality # -# For complete details, see the `3dDegreeCentrality Documentation. -# `_ +# For complete details, see the `3dDegreeCentrality Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> degree = afni.DegreeCentrality() +# >>> degree.inputs.in_file = 'functional.nii' +# >>> degree.inputs.mask = 'mask.nii' +# >>> degree.inputs.sparsity = 1 # keep the top one percent of connections +# >>> degree.inputs.out_file = 'out.nii' +# >>> degree.cmdline +# '3dDegreeCentrality -mask mask.nii -prefix out.nii -sparsity 1.000000 functional.nii' +# >>> res = degree.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> degree = afni.DegreeCentrality() -# >>> degree.inputs.in_file = 'functional.nii' -# >>> degree.inputs.mask = 'mask.nii' -# >>> degree.inputs.sparsity = 1 # keep the top one percent of connections -# >>> degree.inputs.out_file = 'out.nii' -# >>> degree.cmdline -# '3dDegreeCentrality -mask mask.nii -prefix out.nii -sparsity 1.000000 functional.nii' -# >>> res = degree.run() # doctest: +SKIP # -# task_name: DegreeCentrality nipype_name: DegreeCentrality nipype_module: nipype.interfaces.afni.preprocess @@ -40,11 +40,8 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dDegreeCentrality - mask: medimage/nifti1 + mask: generic/file # type=file|default=: mask file to mask input data - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -64,14 +61,14 @@ outputs: oned_file: generic/file # type=file: The text output of the similarity matrix computed after thresholding with one-dimensional and ijk voxel indices, correlations, image extents, and affine matrix. # type=str|default='': output filepath to text dump of correlation matrix - out_file: medimage/nifti1 + out_file: generic/file # type=file: output file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -107,7 +104,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -126,15 +123,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dDegreeCentrality - mask: - # type=file|default=: mask file to mask input data sparsity: '1 # keep the top one percent of connections' # type=float|default=0.0: only take the top percent of connections - out_file: '"out.nii"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -149,7 +141,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dDegreeCentrality -mask mask.nii -prefix out.nii -sparsity 1.000000 functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -157,15 +149,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dDegreeCentrality - mask: '"mask.nii"' - # type=file|default=: mask file to mask input data sparsity: '1 # keep the top one percent of connections' # type=float|default=0.0: only take the top percent of connections - out_file: '"out.nii"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/despike.yaml b/example-specs/interface/nipype/afni/despike.yaml index 23cec1f6..08b5bdf8 100644 --- a/example-specs/interface/nipype/afni/despike.yaml +++ b/example-specs/interface/nipype/afni/despike.yaml @@ -7,19 +7,19 @@ # ---- # Removes 'spikes' from the 3D+time input dataset # -# For complete details, see the `3dDespike Documentation. -# `_ +# For complete details, see the `3dDespike Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> despike = afni.Despike() +# >>> despike.inputs.in_file = 'functional.nii' +# >>> despike.cmdline +# '3dDespike -prefix functional_despike functional.nii' +# >>> res = despike.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> despike = afni.Despike() -# >>> despike.inputs.in_file = 'functional.nii' -# >>> despike.cmdline -# '3dDespike -prefix functional_despike functional.nii' -# >>> res = despike.run() # doctest: +SKIP # -# task_name: Despike nipype_name: Despike nipype_module: nipype.interfaces.afni.preprocess @@ -36,9 +36,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dDespike - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -62,7 +59,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -83,7 +80,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -103,7 +100,7 @@ tests: in_file: # type=file|default=: input file to 3dDespike imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -118,7 +115,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dDespike -prefix functional_despike functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -127,7 +124,7 @@ doctests: in_file: '"functional.nii"' # type=file|default=: input file to 3dDespike imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/detrend.yaml b/example-specs/interface/nipype/afni/detrend.yaml index 6b023d2e..9d012182 100644 --- a/example-specs/interface/nipype/afni/detrend.yaml +++ b/example-specs/interface/nipype/afni/detrend.yaml @@ -6,23 +6,23 @@ # Docs # ---- # This program removes components from voxel time series using -# linear least squares +# linear least squares # -# For complete details, see the `3dDetrend Documentation. -# `_ +# For complete details, see the `3dDetrend Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> detrend = afni.Detrend() +# >>> detrend.inputs.in_file = 'functional.nii' +# >>> detrend.inputs.args = '-polort 2' +# >>> detrend.inputs.outputtype = 'AFNI' +# >>> detrend.cmdline +# '3dDetrend -polort 2 -prefix functional_detrend functional.nii' +# >>> res = detrend.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> detrend = afni.Detrend() -# >>> detrend.inputs.in_file = 'functional.nii' -# >>> detrend.inputs.args = '-polort 2' -# >>> detrend.inputs.outputtype = 'AFNI' -# >>> detrend.cmdline -# '3dDetrend -polort 2 -prefix functional_detrend functional.nii' -# >>> res = detrend.run() # doctest: +SKIP # -# task_name: Detrend nipype_name: Detrend nipype_module: nipype.interfaces.afni.preprocess @@ -39,9 +39,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dDetrend - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -65,7 +62,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -86,7 +83,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -105,12 +102,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dDetrend - args: '"-polort 2"' - # type=str|default='': Additional parameters to the command outputtype: '"AFNI"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -125,7 +120,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dDetrend -polort 2 -prefix functional_detrend functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -133,12 +128,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dDetrend - args: '"-polort 2"' - # type=str|default='': Additional parameters to the command outputtype: '"AFNI"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/dot.yaml b/example-specs/interface/nipype/afni/dot.yaml index eb94d2e9..c471ddd3 100644 --- a/example-specs/interface/nipype/afni/dot.yaml +++ b/example-specs/interface/nipype/afni/dot.yaml @@ -6,26 +6,26 @@ # Docs # ---- # Correlation coefficient between sub-brick pairs. -# All datasets in in_files list will be concatenated. -# You can use sub-brick selectors in the file specification. +# All datasets in in_files list will be concatenated. +# You can use sub-brick selectors in the file specification. # -# .. warning:: +# .. warning:: # -# This program is not efficient when more than two subbricks are input. +# This program is not efficient when more than two subbricks are input. # -# For complete details, see the `3ddot Documentation. -# `_ +# For complete details, see the `3ddot Documentation. +# `_ +# +# >>> from nipype.interfaces import afni +# >>> dot = afni.Dot() +# >>> dot.inputs.in_files = ['functional.nii[0]', 'structural.nii'] +# >>> dot.inputs.dodice = True +# >>> dot.inputs.out_file = 'out.mask_ae_dice.txt' +# >>> dot.cmdline +# '3dDot -dodice functional.nii[0] structural.nii |& tee out.mask_ae_dice.txt' +# >>> res = copy3d.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni -# >>> dot = afni.Dot() -# >>> dot.inputs.in_files = ['functional.nii[0]', 'structural.nii'] -# >>> dot.inputs.dodice = True -# >>> dot.inputs.out_file = 'out.mask_ae_dice.txt' -# >>> dot.cmdline -# '3dDot -dodice functional.nii[0] structural.nii |& tee out.mask_ae_dice.txt' -# >>> res = copy3d.run() # doctest: +SKIP # -# task_name: Dot nipype_name: Dot nipype_module: nipype.interfaces.afni.utils @@ -44,9 +44,6 @@ inputs: # type=list|default=[]: list of input files, possibly with subbrick selectors mask: generic/file # type=file|default=: Use this dataset as a mask - out_file: Path - # type=file: output file - # type=file|default=: collect output to a file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -70,7 +67,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -115,7 +112,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -134,13 +131,11 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=list|default=[]: list of input files, possibly with subbrick selectors - dodice: 'True' - # type=bool|default=False: Return the Dice coefficient (the Sorensen-Dice index). out_file: '"out.mask_ae_dice.txt"' # type=file: output file # type=file|default=: collect output to a file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -163,13 +158,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["functional.nii[0]", "structural.nii"]' # type=list|default=[]: list of input files, possibly with subbrick selectors - dodice: 'True' - # type=bool|default=False: Return the Dice coefficient (the Sorensen-Dice index). out_file: '"out.mask_ae_dice.txt"' # type=file: output file # type=file|default=: collect output to a file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/ecm.yaml b/example-specs/interface/nipype/afni/ecm.yaml index ef7a5767..75cd7656 100644 --- a/example-specs/interface/nipype/afni/ecm.yaml +++ b/example-specs/interface/nipype/afni/ecm.yaml @@ -6,24 +6,24 @@ # Docs # ---- # Performs degree centrality on a dataset using a given maskfile -# via the 3dECM command +# via the 3dECM command # -# For complete details, see the `3dECM Documentation. -# `_ +# For complete details, see the `3dECM Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> ecm = afni.ECM() +# >>> ecm.inputs.in_file = 'functional.nii' +# >>> ecm.inputs.mask = 'mask.nii' +# >>> ecm.inputs.sparsity = 0.1 # keep top 0.1% of connections +# >>> ecm.inputs.out_file = 'out.nii' +# >>> ecm.cmdline +# '3dECM -mask mask.nii -prefix out.nii -sparsity 0.100000 functional.nii' +# >>> res = ecm.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> ecm = afni.ECM() -# >>> ecm.inputs.in_file = 'functional.nii' -# >>> ecm.inputs.mask = 'mask.nii' -# >>> ecm.inputs.sparsity = 0.1 # keep top 0.1% of connections -# >>> ecm.inputs.out_file = 'out.nii' -# >>> ecm.cmdline -# '3dECM -mask mask.nii -prefix out.nii -sparsity 0.100000 functional.nii' -# >>> res = ecm.run() # doctest: +SKIP # -# task_name: ECM nipype_name: ECM nipype_module: nipype.interfaces.afni.preprocess @@ -40,11 +40,8 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dECM - mask: medimage/nifti1 + mask: generic/file # type=file|default=: mask file to mask input data - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -61,14 +58,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: output file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -115,7 +112,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -134,15 +131,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dECM - mask: - # type=file|default=: mask file to mask input data sparsity: '0.1 # keep top 0.1% of connections' # type=float|default=0.0: only take the top percent of connections - out_file: '"out.nii"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -157,7 +149,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dECM -mask mask.nii -prefix out.nii -sparsity 0.100000 functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -165,15 +157,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dECM - mask: '"mask.nii"' - # type=file|default=: mask file to mask input data sparsity: '0.1 # keep top 0.1% of connections' # type=float|default=0.0: only take the top percent of connections - out_file: '"out.nii"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/edge_3.yaml b/example-specs/interface/nipype/afni/edge_3.yaml index 7d52d966..bb4cb634 100644 --- a/example-specs/interface/nipype/afni/edge_3.yaml +++ b/example-specs/interface/nipype/afni/edge_3.yaml @@ -6,23 +6,23 @@ # Docs # ---- # Does 3D Edge detection using the library 3DEdge -# by Gregoire Malandain. +# by Gregoire Malandain. # -# For complete details, see the `3dedge3 Documentation. -# `_ +# For complete details, see the `3dedge3 Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> edge3 = afni.Edge3() +# >>> edge3.inputs.in_file = 'functional.nii' +# >>> edge3.inputs.out_file = 'edges.nii' +# >>> edge3.inputs.datum = 'byte' +# >>> edge3.cmdline +# '3dedge3 -input functional.nii -datum byte -prefix edges.nii' +# >>> res = edge3.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> edge3 = afni.Edge3() -# >>> edge3.inputs.in_file = 'functional.nii' -# >>> edge3.inputs.out_file = 'edges.nii' -# >>> edge3.inputs.datum = 'byte' -# >>> edge3.cmdline -# '3dedge3 -input functional.nii -datum byte -prefix edges.nii' -# >>> res = edge3.run() # doctest: +SKIP # -# task_name: Edge3 nipype_name: Edge3 nipype_module: nipype.interfaces.afni.utils @@ -39,9 +39,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dedge3 - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -58,14 +55,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: output file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -98,7 +95,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -117,13 +114,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dedge3 - out_file: '"edges.nii"' - # type=file: output file - # type=file|default=: output image file name datum: '"byte"' # type=enum|default='byte'|allowed['byte','float','short']: specify data type for output. Valid types are 'byte', 'short' and 'float'. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -138,7 +132,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dedge3 -input functional.nii -datum byte -prefix edges.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -146,13 +140,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dedge3 - out_file: '"edges.nii"' - # type=file: output file - # type=file|default=: output image file name datum: '"byte"' # type=enum|default='byte'|allowed['byte','float','short']: specify data type for output. Valid types are 'byte', 'short' and 'float'. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/eval.yaml b/example-specs/interface/nipype/afni/eval.yaml index acd88eaa..ad97dfcf 100644 --- a/example-specs/interface/nipype/afni/eval.yaml +++ b/example-specs/interface/nipype/afni/eval.yaml @@ -6,25 +6,25 @@ # Docs # ---- # Evaluates an expression that may include columns of data from one or -# more text files. +# more text files. # -# For complete details, see the `1deval Documentation. -# `_ +# For complete details, see the `1deval Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> eval = afni.Eval() +# >>> eval.inputs.in_file_a = 'seed.1D' +# >>> eval.inputs.in_file_b = 'resp.1D' +# >>> eval.inputs.expr = 'a*b' +# >>> eval.inputs.out1D = True +# >>> eval.inputs.out_file = 'data_calc.1D' +# >>> eval.cmdline +# '1deval -a seed.1D -b resp.1D -expr "a*b" -1D -prefix data_calc.1D' +# >>> res = eval.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> eval = afni.Eval() -# >>> eval.inputs.in_file_a = 'seed.1D' -# >>> eval.inputs.in_file_b = 'resp.1D' -# >>> eval.inputs.expr = 'a*b' -# >>> eval.inputs.out1D = True -# >>> eval.inputs.out_file = 'data_calc.1D' -# >>> eval.cmdline -# '1deval -a seed.1D -b resp.1D -expr "a*b" -1D -prefix data_calc.1D' -# >>> res = eval.run() # doctest: +SKIP # -# task_name: Eval nipype_name: Eval nipype_module: nipype.interfaces.afni.utils @@ -39,17 +39,14 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file_a: medimage-afni/one-d + in_file_a: fileformats.medimage_afni.OneD # type=file|default=: input file to 1deval - in_file_b: medimage-afni/one-d + in_file_b: generic/file # type=file|default=: operand file to 1deval in_file_c: generic/file # type=file|default=: operand file to 1deval other: generic/file # type=file|default=: other options - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -66,14 +63,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-afni/one-d + out_file: fileformats.medimage_afni.OneD # type=file: output file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -110,7 +107,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -129,17 +126,13 @@ tests: # (if not specified, will try to choose a sensible value) in_file_a: # type=file|default=: input file to 1deval - in_file_b: - # type=file|default=: operand file to 1deval expr: '"a*b"' # type=str|default='': expr - out1D: 'True' - # type=bool|default=False: output in 1D out_file: ' "data_calc.1D"' # type=file: output file # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -154,7 +147,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 1deval -a seed.1D -b resp.1D -expr "a*b" -1D -prefix data_calc.1D +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -162,17 +155,13 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file_a: '"seed.1D"' # type=file|default=: input file to 1deval - in_file_b: '"resp.1D"' - # type=file|default=: operand file to 1deval expr: '"a*b"' # type=str|default='': expr - out1D: 'True' - # type=bool|default=False: output in 1D out_file: ' "data_calc.1D"' # type=file: output file # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/fim.yaml b/example-specs/interface/nipype/afni/fim.yaml index 076e5cc3..f854d38f 100644 --- a/example-specs/interface/nipype/afni/fim.yaml +++ b/example-specs/interface/nipype/afni/fim.yaml @@ -6,25 +6,25 @@ # Docs # ---- # Program to calculate the cross-correlation of an ideal reference -# waveform with the measured FMRI time series for each voxel. +# waveform with the measured FMRI time series for each voxel. # -# For complete details, see the `3dfim+ Documentation. -# `_ +# For complete details, see the `3dfim+ Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> fim = afni.Fim() +# >>> fim.inputs.in_file = 'functional.nii' +# >>> fim.inputs.ideal_file= 'seed.1D' +# >>> fim.inputs.out_file = 'functional_corr.nii' +# >>> fim.inputs.out = 'Correlation' +# >>> fim.inputs.fim_thr = 0.0009 +# >>> fim.cmdline +# '3dfim+ -input functional.nii -ideal_file seed.1D -fim_thr 0.000900 -out Correlation -bucket functional_corr.nii' +# >>> res = fim.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> fim = afni.Fim() -# >>> fim.inputs.in_file = 'functional.nii' -# >>> fim.inputs.ideal_file= 'seed.1D' -# >>> fim.inputs.out_file = 'functional_corr.nii' -# >>> fim.inputs.out = 'Correlation' -# >>> fim.inputs.fim_thr = 0.0009 -# >>> fim.cmdline -# '3dfim+ -input functional.nii -ideal_file seed.1D -fim_thr 0.000900 -out Correlation -bucket functional_corr.nii' -# >>> res = fim.run() # doctest: +SKIP # -# task_name: Fim nipype_name: Fim nipype_module: nipype.interfaces.afni.preprocess @@ -39,13 +39,10 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - ideal_file: medimage-afni/one-d + ideal_file: generic/file # type=file|default=: ideal time series file name in_file: medimage/nifti1 # type=file|default=: input file to 3dfim+ - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -69,7 +66,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -96,7 +93,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -115,17 +112,13 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dfim+ - ideal_file: - # type=file|default=: ideal time series file name out_file: '"functional_corr.nii"' # type=file: output file # type=file|default=: output image file name - out: '"Correlation"' - # type=str|default='': Flag to output the specified parameter fim_thr: '0.0009' # type=float|default=0.0: fim internal mask threshold value imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -140,7 +133,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dfim+ -input functional.nii -ideal_file seed.1D -fim_thr 0.000900 -out Correlation -bucket functional_corr.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -148,17 +141,13 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dfim+ - ideal_file: '"seed.1D"' - # type=file|default=: ideal time series file name out_file: '"functional_corr.nii"' # type=file: output file # type=file|default=: output image file name - out: '"Correlation"' - # type=str|default='': Flag to output the specified parameter fim_thr: '0.0009' # type=float|default=0.0: fim internal mask threshold value imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/fourier.yaml b/example-specs/interface/nipype/afni/fourier.yaml index 029f08a6..3b680707 100644 --- a/example-specs/interface/nipype/afni/fourier.yaml +++ b/example-specs/interface/nipype/afni/fourier.yaml @@ -6,24 +6,24 @@ # Docs # ---- # Program to lowpass and/or highpass each voxel time series in a -# dataset, via the FFT +# dataset, via the FFT # -# For complete details, see the `3dFourier Documentation. -# `_ +# For complete details, see the `3dFourier Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> fourier = afni.Fourier() +# >>> fourier.inputs.in_file = 'functional.nii' +# >>> fourier.inputs.retrend = True +# >>> fourier.inputs.highpass = 0.005 +# >>> fourier.inputs.lowpass = 0.1 +# >>> fourier.cmdline +# '3dFourier -highpass 0.005000 -lowpass 0.100000 -prefix functional_fourier -retrend functional.nii' +# >>> res = fourier.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> fourier = afni.Fourier() -# >>> fourier.inputs.in_file = 'functional.nii' -# >>> fourier.inputs.retrend = True -# >>> fourier.inputs.highpass = 0.005 -# >>> fourier.inputs.lowpass = 0.1 -# >>> fourier.cmdline -# '3dFourier -highpass 0.005000 -lowpass 0.100000 -prefix functional_fourier -retrend functional.nii' -# >>> res = fourier.run() # doctest: +SKIP # -# task_name: Fourier nipype_name: Fourier nipype_module: nipype.interfaces.afni.preprocess @@ -40,9 +40,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dFourier - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -66,7 +63,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -93,7 +90,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -112,14 +109,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dFourier - retrend: 'True' - # type=bool|default=False: Any mean and linear trend are removed before filtering. This will restore the trend after filtering. highpass: '0.005' # type=float|default=0.0: highpass - lowpass: '0.1' - # type=float|default=0.0: lowpass imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -134,7 +127,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dFourier -highpass 0.005000 -lowpass 0.100000 -prefix functional_fourier -retrend functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -142,14 +135,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dFourier - retrend: 'True' - # type=bool|default=False: Any mean and linear trend are removed before filtering. This will restore the trend after filtering. highpass: '0.005' # type=float|default=0.0: highpass - lowpass: '0.1' - # type=float|default=0.0: lowpass imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/fwh_mx.yaml b/example-specs/interface/nipype/afni/fwh_mx.yaml index 34d36603..e964c2dd 100644 --- a/example-specs/interface/nipype/afni/fwh_mx.yaml +++ b/example-specs/interface/nipype/afni/fwh_mx.yaml @@ -6,99 +6,99 @@ # Docs # ---- # -# Unlike the older 3dFWHM, this program computes FWHMs for all sub-bricks -# in the input dataset, each one separately. The output for each one is -# written to the file specified by '-out'. The mean (arithmetic or geometric) -# of all the FWHMs along each axis is written to stdout. (A non-positive -# output value indicates something bad happened; e.g., FWHM in z is meaningless -# for a 2D dataset; the estimation method computed incoherent intermediate results.) -# -# For complete details, see the `3dFWHMx Documentation. -# `_ -# -# (Classic) METHOD: -# -# * Calculate ratio of variance of first differences to data variance. -# * Should be the same as 3dFWHM for a 1-brick dataset. -# (But the output format is simpler to use in a script.) -# -# -# .. note:: IMPORTANT NOTE [AFNI > 16] -# -# A completely new method for estimating and using noise smoothness values is -# now available in 3dFWHMx and 3dClustSim. This method is implemented in the -# '-acf' options to both programs. 'ACF' stands for (spatial) AutoCorrelation -# Function, and it is estimated by calculating moments of differences out to -# a larger radius than before. -# -# Notably, real FMRI data does not actually have a Gaussian-shaped ACF, so the -# estimated ACF is then fit (in 3dFWHMx) to a mixed model (Gaussian plus -# mono-exponential) of the form -# -# .. math:: -# -# ACF(r) = a * exp(-r*r/(2*b*b)) + (1-a)*exp(-r/c) -# -# -# where :math:`r` is the radius, and :math:`a, b, c` are the fitted parameters. -# The apparent FWHM from this model is usually somewhat larger in real data -# than the FWHM estimated from just the nearest-neighbor differences used -# in the 'classic' analysis. -# -# The longer tails provided by the mono-exponential are also significant. -# 3dClustSim has also been modified to use the ACF model given above to generate -# noise random fields. -# -# .. note:: TL;DR or summary -# -# The take-awaymessage is that the 'classic' 3dFWHMx and -# 3dClustSim analysis, using a pure Gaussian ACF, is not very correct for -# FMRI data -- I cannot speak for PET or MEG data. -# -# .. warning:: -# -# Do NOT use 3dFWHMx on the statistical results (e.g., '-bucket') from -# 3dDeconvolve or 3dREMLfit!!! The function of 3dFWHMx is to estimate -# the smoothness of the time series NOISE, not of the statistics. This -# proscription is especially true if you plan to use 3dClustSim next!! -# -# .. note:: Recommendations -# -# * For FMRI statistical purposes, you DO NOT want the FWHM to reflect -# the spatial structure of the underlying anatomy. Rather, you want -# the FWHM to reflect the spatial structure of the noise. This means -# that the input dataset should not have anatomical (spatial) structure. -# * One good form of input is the output of '3dDeconvolve -errts', which is -# the dataset of residuals left over after the GLM fitted signal model is -# subtracted out from each voxel's time series. -# * If you don't want to go to that much trouble, use '-detrend' to approximately -# subtract out the anatomical spatial structure, OR use the output of 3dDetrend -# for the same purpose. -# * If you do not use '-detrend', the program attempts to find non-zero spatial -# structure in the input, and will print a warning message if it is detected. -# -# .. note:: Notes on -demend -# -# * I recommend this option, and it is not the default only for historical -# compatibility reasons. It may become the default someday. -# * It is already the default in program 3dBlurToFWHM. This is the same detrending -# as done in 3dDespike; using 2*q+3 basis functions for q > 0. -# * If you don't use '-detrend', the program now [Aug 2010] checks if a large number -# of voxels are have significant nonzero means. If so, the program will print a -# warning message suggesting the use of '-detrend', since inherent spatial -# structure in the image will bias the estimation of the FWHM of the image time -# series NOISE (which is usually the point of using 3dFWHMx). -# -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> fwhm = afni.FWHMx() -# >>> fwhm.inputs.in_file = 'functional.nii' -# >>> fwhm.cmdline -# '3dFWHMx -input functional.nii -out functional_subbricks.out > functional_fwhmx.out' -# >>> res = fwhm.run() # doctest: +SKIP -# -# +# Unlike the older 3dFWHM, this program computes FWHMs for all sub-bricks +# in the input dataset, each one separately. The output for each one is +# written to the file specified by '-out'. The mean (arithmetic or geometric) +# of all the FWHMs along each axis is written to stdout. (A non-positive +# output value indicates something bad happened; e.g., FWHM in z is meaningless +# for a 2D dataset; the estimation method computed incoherent intermediate results.) +# +# For complete details, see the `3dFWHMx Documentation. +# `_ +# +# (Classic) METHOD: +# +# * Calculate ratio of variance of first differences to data variance. +# * Should be the same as 3dFWHM for a 1-brick dataset. +# (But the output format is simpler to use in a script.) +# +# +# .. note:: IMPORTANT NOTE [AFNI > 16] +# +# A completely new method for estimating and using noise smoothness values is +# now available in 3dFWHMx and 3dClustSim. This method is implemented in the +# '-acf' options to both programs. 'ACF' stands for (spatial) AutoCorrelation +# Function, and it is estimated by calculating moments of differences out to +# a larger radius than before. +# +# Notably, real FMRI data does not actually have a Gaussian-shaped ACF, so the +# estimated ACF is then fit (in 3dFWHMx) to a mixed model (Gaussian plus +# mono-exponential) of the form +# +# .. math:: +# +# ACF(r) = a * exp(-r*r/(2*b*b)) + (1-a)*exp(-r/c) +# +# +# where :math:`r` is the radius, and :math:`a, b, c` are the fitted parameters. +# The apparent FWHM from this model is usually somewhat larger in real data +# than the FWHM estimated from just the nearest-neighbor differences used +# in the 'classic' analysis. +# +# The longer tails provided by the mono-exponential are also significant. +# 3dClustSim has also been modified to use the ACF model given above to generate +# noise random fields. +# +# .. note:: TL;DR or summary +# +# The take-awaymessage is that the 'classic' 3dFWHMx and +# 3dClustSim analysis, using a pure Gaussian ACF, is not very correct for +# FMRI data -- I cannot speak for PET or MEG data. +# +# .. warning:: +# +# Do NOT use 3dFWHMx on the statistical results (e.g., '-bucket') from +# 3dDeconvolve or 3dREMLfit!!! The function of 3dFWHMx is to estimate +# the smoothness of the time series NOISE, not of the statistics. This +# proscription is especially true if you plan to use 3dClustSim next!! +# +# .. note:: Recommendations +# +# * For FMRI statistical purposes, you DO NOT want the FWHM to reflect +# the spatial structure of the underlying anatomy. Rather, you want +# the FWHM to reflect the spatial structure of the noise. This means +# that the input dataset should not have anatomical (spatial) structure. +# * One good form of input is the output of '3dDeconvolve -errts', which is +# the dataset of residuals left over after the GLM fitted signal model is +# subtracted out from each voxel's time series. +# * If you don't want to go to that much trouble, use '-detrend' to approximately +# subtract out the anatomical spatial structure, OR use the output of 3dDetrend +# for the same purpose. +# * If you do not use '-detrend', the program attempts to find non-zero spatial +# structure in the input, and will print a warning message if it is detected. +# +# .. note:: Notes on -demend +# +# * I recommend this option, and it is not the default only for historical +# compatibility reasons. It may become the default someday. +# * It is already the default in program 3dBlurToFWHM. This is the same detrending +# as done in 3dDespike; using 2*q+3 basis functions for q > 0. +# * If you don't use '-detrend', the program now [Aug 2010] checks if a large number +# of voxels are have significant nonzero means. If so, the program will print a +# warning message suggesting the use of '-detrend', since inherent spatial +# structure in the image will bias the estimation of the FWHM of the image time +# series NOISE (which is usually the point of using 3dFWHMx). +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> fwhm = afni.FWHMx() +# >>> fwhm.inputs.in_file = 'functional.nii' +# >>> fwhm.cmdline +# '3dFWHMx -input functional.nii -out functional_subbricks.out > functional_fwhmx.out' +# >>> res = fwhm.run() # doctest: +SKIP +# +# task_name: FWHMx nipype_name: FWHMx nipype_module: nipype.interfaces.afni.utils @@ -117,15 +117,6 @@ inputs: # type=file|default=: input dataset mask: generic/file # type=file|default=: use only voxels that are nonzero in mask - out_detrend: Path - # type=file: output file, detrended - # type=file|default=: Save the detrended file into a dataset - out_file: Path - # type=file: output file - # type=file|default=: output file - out_subbricks: Path - # type=file: output file (subbricks) - # type=file|default=: output file listing the subbricks FWHM callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -161,7 +152,7 @@ outputs: fwhm: fwhm_callable # type=traitcompound: FWHM along each axis templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -204,7 +195,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -224,7 +215,7 @@ tests: in_file: # type=file|default=: input dataset imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -239,7 +230,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dFWHMx -input functional.nii -out functional_subbricks.out > functional_fwhmx.out +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -248,7 +239,7 @@ doctests: in_file: '"functional.nii"' # type=file|default=: input dataset imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/gcor.yaml b/example-specs/interface/nipype/afni/gcor.yaml index 451c2d5e..1e52fd9e 100644 --- a/example-specs/interface/nipype/afni/gcor.yaml +++ b/example-specs/interface/nipype/afni/gcor.yaml @@ -6,24 +6,24 @@ # Docs # ---- # -# Computes the average correlation between every voxel -# and ever other voxel, over any give mask. +# Computes the average correlation between every voxel +# and ever other voxel, over any give mask. # # -# For complete details, see the `@compute_gcor Documentation. -# `_ +# For complete details, see the `@compute_gcor Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> gcor = afni.GCOR() +# >>> gcor.inputs.in_file = 'structural.nii' +# >>> gcor.inputs.nfirst = 4 +# >>> gcor.cmdline +# '@compute_gcor -nfirst 4 -input structural.nii' +# >>> res = gcor.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> gcor = afni.GCOR() -# >>> gcor.inputs.in_file = 'structural.nii' -# >>> gcor.inputs.nfirst = 4 -# >>> gcor.cmdline -# '@compute_gcor -nfirst 4 -input structural.nii' -# >>> res = gcor.run() # doctest: +SKIP # -# task_name: GCOR nipype_name: GCOR nipype_module: nipype.interfaces.afni.utils @@ -64,7 +64,7 @@ outputs: out: out_callable # type=float: global correlation value templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -84,7 +84,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -103,10 +103,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input dataset to compute the GCOR over - nfirst: '4' - # type=int|default=0: specify number of initial TRs to ignore imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -121,7 +119,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: '@compute_gcor -nfirst 4 -input structural.nii' +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -129,10 +127,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: input dataset to compute the GCOR over - nfirst: '4' - # type=int|default=0: specify number of initial TRs to ignore imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/hist.yaml b/example-specs/interface/nipype/afni/hist.yaml index 86e03533..c6d0185e 100644 --- a/example-specs/interface/nipype/afni/hist.yaml +++ b/example-specs/interface/nipype/afni/hist.yaml @@ -6,21 +6,21 @@ # Docs # ---- # Computes average of all voxels in the input dataset -# which satisfy the criterion in the options list +# which satisfy the criterion in the options list # -# For complete details, see the `3dHist Documentation. -# `_ +# For complete details, see the `3dHist Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> hist = afni.Hist() +# >>> hist.inputs.in_file = 'functional.nii' +# >>> hist.cmdline +# '3dHist -input functional.nii -prefix functional_hist' +# >>> res = hist.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> hist = afni.Hist() -# >>> hist.inputs.in_file = 'functional.nii' -# >>> hist.cmdline -# '3dHist -input functional.nii -prefix functional_hist' -# >>> res = hist.run() # doctest: +SKIP # -# task_name: Hist nipype_name: Hist nipype_module: nipype.interfaces.afni.preprocess @@ -39,12 +39,6 @@ inputs: # type=file|default=: input file to 3dHist mask: generic/file # type=file|default=: matrix to align input file - out_file: Path - # type=file: output file - # type=file|default=: Write histogram to niml file with this prefix - out_show: Path - # type=file: output visual histogram - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -71,7 +65,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -103,7 +97,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -123,7 +117,7 @@ tests: in_file: # type=file|default=: input file to 3dHist imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -138,7 +132,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dHist -input functional.nii -prefix functional_hist +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -147,7 +141,7 @@ doctests: in_file: '"functional.nii"' # type=file|default=: input file to 3dHist imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/lfcd.yaml b/example-specs/interface/nipype/afni/lfcd.yaml index 92945608..224a78d7 100644 --- a/example-specs/interface/nipype/afni/lfcd.yaml +++ b/example-specs/interface/nipype/afni/lfcd.yaml @@ -6,23 +6,23 @@ # Docs # ---- # Performs degree centrality on a dataset using a given maskfile -# via the 3dLFCD command +# via the 3dLFCD command # -# For complete details, see the `3dLFCD Documentation. -# `_ +# For complete details, see the `3dLFCD Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> lfcd = afni.LFCD() +# >>> lfcd.inputs.in_file = 'functional.nii' +# >>> lfcd.inputs.mask = 'mask.nii' +# >>> lfcd.inputs.thresh = 0.8 # keep all connections with corr >= 0.8 +# >>> lfcd.inputs.out_file = 'out.nii' +# >>> lfcd.cmdline +# '3dLFCD -mask mask.nii -prefix out.nii -thresh 0.800000 functional.nii' +# >>> res = lfcd.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> lfcd = afni.LFCD() -# >>> lfcd.inputs.in_file = 'functional.nii' -# >>> lfcd.inputs.mask = 'mask.nii' -# >>> lfcd.inputs.thresh = 0.8 # keep all connections with corr >= 0.8 -# >>> lfcd.inputs.out_file = 'out.nii' -# >>> lfcd.cmdline -# '3dLFCD -mask mask.nii -prefix out.nii -thresh 0.800000 functional.nii' -# >>> res = lfcd.run() # doctest: +SKIP -# task_name: LFCD nipype_name: LFCD nipype_module: nipype.interfaces.afni.preprocess @@ -39,11 +39,8 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dLFCD - mask: medimage/nifti1 + mask: generic/file # type=file|default=: mask file to mask input data - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -60,14 +57,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: output file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -98,7 +95,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -117,15 +114,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dLFCD - mask: - # type=file|default=: mask file to mask input data thresh: '0.8 # keep all connections with corr >= 0.8' # type=float|default=0.0: threshold to exclude connections where corr <= thresh - out_file: '"out.nii"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -140,7 +132,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dLFCD -mask mask.nii -prefix out.nii -thresh 0.800000 functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -148,15 +140,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dLFCD - mask: '"mask.nii"' - # type=file|default=: mask file to mask input data thresh: '0.8 # keep all connections with corr >= 0.8' # type=float|default=0.0: threshold to exclude connections where corr <= thresh - out_file: '"out.nii"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/local_bistat.yaml b/example-specs/interface/nipype/afni/local_bistat.yaml index 97b8f92c..3ffed8aa 100644 --- a/example-specs/interface/nipype/afni/local_bistat.yaml +++ b/example-specs/interface/nipype/afni/local_bistat.yaml @@ -6,25 +6,25 @@ # Docs # ---- # 3dLocalBistat - computes statistics between 2 datasets, at each voxel, -# based on a local neighborhood of that voxel. +# based on a local neighborhood of that voxel. # -# For complete details, see the `3dLocalBistat Documentation. -# `_ +# For complete details, see the `3dLocalBistat Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> bistat = afni.LocalBistat() +# >>> bistat.inputs.in_file1 = 'functional.nii' +# >>> bistat.inputs.in_file2 = 'structural.nii' +# >>> bistat.inputs.neighborhood = ('SPHERE', 1.2) +# >>> bistat.inputs.stat = 'pearson' +# >>> bistat.inputs.outputtype = 'NIFTI' +# >>> bistat.cmdline +# "3dLocalBistat -prefix functional_bistat.nii -nbhd 'SPHERE(1.2)' -stat pearson functional.nii structural.nii" +# >>> res = automask.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> bistat = afni.LocalBistat() -# >>> bistat.inputs.in_file1 = 'functional.nii' -# >>> bistat.inputs.in_file2 = 'structural.nii' -# >>> bistat.inputs.neighborhood = ('SPHERE', 1.2) -# >>> bistat.inputs.stat = 'pearson' -# >>> bistat.inputs.outputtype = 'NIFTI' -# >>> bistat.cmdline -# "3dLocalBistat -prefix functional_bistat.nii -nbhd 'SPHERE(1.2)' -stat pearson functional.nii structural.nii" -# >>> res = automask.run() # doctest: +SKIP # -# task_name: LocalBistat nipype_name: LocalBistat nipype_module: nipype.interfaces.afni.utils @@ -41,13 +41,10 @@ inputs: # passed to the field in the automatically generated unittests. in_file1: medimage/nifti1 # type=file|default=: Filename of the first image - in_file2: medimage/nifti1 + in_file2: generic/file # type=file|default=: Filename of the second image mask_file: generic/file # type=file|default=: mask image file name. Voxels NOT in the mask will not be used in the neighborhood of any voxel. Also, a voxel NOT in the mask will have its statistic(s) computed as zero (0). - out_file: Path - # type=file: output file - # type=file|default=: Output dataset. weight_file: generic/file # type=file|default=: File name of an image to use as a weight. Only applies to 'pearson' statistics. callable_defaults: @@ -73,7 +70,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -106,7 +103,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -125,16 +122,12 @@ tests: # (if not specified, will try to choose a sensible value) in_file1: # type=file|default=: Filename of the first image - in_file2: - # type=file|default=: Filename of the second image neighborhood: ("SPHERE", 1.2) # type=traitcompound|default=None: The region around each voxel that will be extracted for the statistics calculation. Possible regions are: 'SPHERE', 'RHDD' (rhombic dodecahedron), 'TOHD' (truncated octahedron) with a given radius in mm or 'RECT' (rectangular block) with dimensions to specify in mm. - stat: '"pearson"' - # type=inputmultiobject|default=[]: Statistics to compute. Possible names are: * pearson = Pearson correlation coefficient * spearman = Spearman correlation coefficient * quadrant = Quadrant correlation coefficient * mutinfo = Mutual Information * normuti = Normalized Mutual Information * jointent = Joint entropy * hellinger= Hellinger metric * crU = Correlation ratio (Unsymmetric) * crM = Correlation ratio (symmetrized by Multiplication) * crA = Correlation ratio (symmetrized by Addition) * L2slope = slope of least-squares (L2) linear regression of the data from dataset1 vs. the dataset2 (i.e., d2 = a + b*d1 ==> this is 'b') * L1slope = slope of least-absolute-sum (L1) linear regression of the data from dataset1 vs. the dataset2 * num = number of the values in the region: with the use of -mask or -automask, the size of the region around any given voxel will vary; this option lets you map that size. * ALL = all of the above, in that order More than one option can be used. outputtype: '"NIFTI"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -149,7 +142,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dLocalBistat -prefix functional_bistat.nii -nbhd "SPHERE(1.2)" -stat pearson functional.nii structural.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -157,16 +150,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file1: '"functional.nii"' # type=file|default=: Filename of the first image - in_file2: '"structural.nii"' - # type=file|default=: Filename of the second image neighborhood: ("SPHERE", 1.2) # type=traitcompound|default=None: The region around each voxel that will be extracted for the statistics calculation. Possible regions are: 'SPHERE', 'RHDD' (rhombic dodecahedron), 'TOHD' (truncated octahedron) with a given radius in mm or 'RECT' (rectangular block) with dimensions to specify in mm. - stat: '"pearson"' - # type=inputmultiobject|default=[]: Statistics to compute. Possible names are: * pearson = Pearson correlation coefficient * spearman = Spearman correlation coefficient * quadrant = Quadrant correlation coefficient * mutinfo = Mutual Information * normuti = Normalized Mutual Information * jointent = Joint entropy * hellinger= Hellinger metric * crU = Correlation ratio (Unsymmetric) * crM = Correlation ratio (symmetrized by Multiplication) * crA = Correlation ratio (symmetrized by Addition) * L2slope = slope of least-squares (L2) linear regression of the data from dataset1 vs. the dataset2 (i.e., d2 = a + b*d1 ==> this is 'b') * L1slope = slope of least-absolute-sum (L1) linear regression of the data from dataset1 vs. the dataset2 * num = number of the values in the region: with the use of -mask or -automask, the size of the region around any given voxel will vary; this option lets you map that size. * ALL = all of the above, in that order More than one option can be used. outputtype: '"NIFTI"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/localstat.yaml b/example-specs/interface/nipype/afni/localstat.yaml index dba6496b..5ae3a9f1 100644 --- a/example-specs/interface/nipype/afni/localstat.yaml +++ b/example-specs/interface/nipype/afni/localstat.yaml @@ -6,26 +6,26 @@ # Docs # ---- # 3dLocalstat - computes statistics at each voxel, -# based on a local neighborhood of that voxel. +# based on a local neighborhood of that voxel. # -# For complete details, see the `3dLocalstat Documentation. -# `_ +# For complete details, see the `3dLocalstat Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> localstat = afni.Localstat() +# >>> localstat.inputs.in_file = 'functional.nii' +# >>> localstat.inputs.mask_file = 'skeleton_mask.nii.gz' +# >>> localstat.inputs.neighborhood = ('SPHERE', 45) +# >>> localstat.inputs.stat = 'mean' +# >>> localstat.inputs.nonmask = True +# >>> localstat.inputs.outputtype = 'NIFTI_GZ' +# >>> localstat.cmdline +# "3dLocalstat -prefix functional_localstat.nii -mask skeleton_mask.nii.gz -nbhd 'SPHERE(45.0)' -use_nonmask -stat mean functional.nii" +# >>> res = localstat.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> localstat = afni.Localstat() -# >>> localstat.inputs.in_file = 'functional.nii' -# >>> localstat.inputs.mask_file = 'skeleton_mask.nii.gz' -# >>> localstat.inputs.neighborhood = ('SPHERE', 45) -# >>> localstat.inputs.stat = 'mean' -# >>> localstat.inputs.nonmask = True -# >>> localstat.inputs.outputtype = 'NIFTI_GZ' -# >>> localstat.cmdline -# "3dLocalstat -prefix functional_localstat.nii -mask skeleton_mask.nii.gz -nbhd 'SPHERE(45.0)' -use_nonmask -stat mean functional.nii" -# >>> res = localstat.run() # doctest: +SKIP # -# task_name: Localstat nipype_name: Localstat nipype_module: nipype.interfaces.afni.utils @@ -42,11 +42,8 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input dataset - mask_file: medimage/nifti-gz + mask_file: generic/file # type=file|default=: Mask image file name. Voxels NOT in the mask will not be used in the neighborhood of any voxel. Also, a voxel NOT in the mask will have its statistic(s) computed as zero (0) unless the parameter 'nonmask' is set to true. - out_file: Path - # type=file: output file - # type=file|default=: Output dataset. callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -70,7 +67,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -92,7 +89,7 @@ tests: reduce_grid: # type=traitcompound|default=None: Compute output on a grid that is reduced by the specified factors. If a single value is passed, output is resampled to the specified isotropic grid. Otherwise, the 3 inputs describe the reduction in the X, Y, and Z directions. This option speeds up computations at the expense of resolution. It should only be used when the nbhd is quite large with respect to the input's resolution, and the resultant stats are expected to be smooth. reduce_restore_grid: - # type=traitcompound|default=None: Like reduce_grid, but also resample output back to inputgrid. + # type=traitcompound|default=None: Like reduce_grid, but also resample output back to input grid. reduce_max_vox: # type=float|default=0.0: Like reduce_restore_grid, but automatically set Rx Ry Rz sothat the computation grid is at a resolution of nbhd/MAX_VOXvoxels. grid_rmode: @@ -113,7 +110,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -132,18 +129,12 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input dataset - mask_file: - # type=file|default=: Mask image file name. Voxels NOT in the mask will not be used in the neighborhood of any voxel. Also, a voxel NOT in the mask will have its statistic(s) computed as zero (0) unless the parameter 'nonmask' is set to true. neighborhood: ("SPHERE", 45) # type=traitcompound|default=None: The region around each voxel that will be extracted for the statistics calculation. Possible regions are: 'SPHERE', 'RHDD' (rhombic dodecahedron), 'TOHD' (truncated octahedron) with a given radius in mm or 'RECT' (rectangular block) with dimensions to specify in mm. - stat: '"mean"' - # type=inputmultiobject|default=[]: statistics to compute. Possible names are: * mean = average of the values * stdev = standard deviation * var = variance (stdev\*stdev) * cvar = coefficient of variation = stdev/fabs(mean) * median = median of the values * MAD = median absolute deviation * min = minimum * max = maximum * absmax = maximum of the absolute values * num = number of the values in the region: with the use of -mask or -automask, the size of the region around any given voxel will vary; this option lets you map that size. It may be useful if you plan to compute a t-statistic (say) from the mean and stdev outputs. * sum = sum of the values in the region * FWHM = compute (like 3dFWHM) image smoothness inside each voxel's neighborhood. Results are in 3 sub-bricks: FWHMx, FHWMy, and FWHMz. Places where an output is -1 are locations where the FWHM value could not be computed (e.g., outside the mask). * FWHMbar= Compute just the average of the 3 FWHM values (normally would NOT do this with FWHM also). * perc:P0:P1:Pstep = Compute percentiles between P0 and P1 with a step of Pstep. Default P1 is equal to P0 and default P2 = 1 * rank = rank of the voxel's intensity * frank = rank / number of voxels in neighborhood * P2skew = Pearson's second skewness coefficient 3 \* (mean - median) / stdev * ALL = all of the above, in that order (except for FWHMbar and perc). * mMP2s = Exactly the same output as: median, MAD, P2skew, but a little faster * mmMP2s = Exactly the same output as: mean, median, MAD, P2skew More than one option can be used. nonmask: 'True' # type=bool|default=False: Voxels not in the mask WILL have their local statistics computed from all voxels in their neighborhood that ARE in the mask. For instance, this option can be used to compute the average local white matter time series, even at non-WM voxels. - outputtype: '"NIFTI_GZ"' - # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -158,7 +149,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dLocalstat -prefix functional_localstat.nii -mask skeleton_mask.nii.gz -nbhd "SPHERE(45.0)" -use_nonmask -stat mean functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -166,18 +157,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input dataset - mask_file: '"skeleton_mask.nii.gz"' - # type=file|default=: Mask image file name. Voxels NOT in the mask will not be used in the neighborhood of any voxel. Also, a voxel NOT in the mask will have its statistic(s) computed as zero (0) unless the parameter 'nonmask' is set to true. neighborhood: ("SPHERE", 45) # type=traitcompound|default=None: The region around each voxel that will be extracted for the statistics calculation. Possible regions are: 'SPHERE', 'RHDD' (rhombic dodecahedron), 'TOHD' (truncated octahedron) with a given radius in mm or 'RECT' (rectangular block) with dimensions to specify in mm. - stat: '"mean"' - # type=inputmultiobject|default=[]: statistics to compute. Possible names are: * mean = average of the values * stdev = standard deviation * var = variance (stdev\*stdev) * cvar = coefficient of variation = stdev/fabs(mean) * median = median of the values * MAD = median absolute deviation * min = minimum * max = maximum * absmax = maximum of the absolute values * num = number of the values in the region: with the use of -mask or -automask, the size of the region around any given voxel will vary; this option lets you map that size. It may be useful if you plan to compute a t-statistic (say) from the mean and stdev outputs. * sum = sum of the values in the region * FWHM = compute (like 3dFWHM) image smoothness inside each voxel's neighborhood. Results are in 3 sub-bricks: FWHMx, FHWMy, and FWHMz. Places where an output is -1 are locations where the FWHM value could not be computed (e.g., outside the mask). * FWHMbar= Compute just the average of the 3 FWHM values (normally would NOT do this with FWHM also). * perc:P0:P1:Pstep = Compute percentiles between P0 and P1 with a step of Pstep. Default P1 is equal to P0 and default P2 = 1 * rank = rank of the voxel's intensity * frank = rank / number of voxels in neighborhood * P2skew = Pearson's second skewness coefficient 3 \* (mean - median) / stdev * ALL = all of the above, in that order (except for FWHMbar and perc). * mMP2s = Exactly the same output as: median, MAD, P2skew, but a little faster * mmMP2s = Exactly the same output as: mean, median, MAD, P2skew More than one option can be used. nonmask: 'True' # type=bool|default=False: Voxels not in the mask WILL have their local statistics computed from all voxels in their neighborhood that ARE in the mask. For instance, this option can be used to compute the average local white matter time series, even at non-WM voxels. - outputtype: '"NIFTI_GZ"' - # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/mask_tool.yaml b/example-specs/interface/nipype/afni/mask_tool.yaml index a61da219..2dbd17dc 100644 --- a/example-specs/interface/nipype/afni/mask_tool.yaml +++ b/example-specs/interface/nipype/afni/mask_tool.yaml @@ -7,20 +7,20 @@ # ---- # 3dmask_tool - for combining/dilating/eroding/filling masks # -# For complete details, see the `3dmask_tool Documentation. -# `_ +# For complete details, see the `3dmask_tool Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> masktool = afni.MaskTool() +# >>> masktool.inputs.in_file = 'functional.nii' +# >>> masktool.inputs.outputtype = 'NIFTI' +# >>> masktool.cmdline +# '3dmask_tool -prefix functional_mask.nii -input functional.nii' +# >>> res = automask.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> masktool = afni.MaskTool() -# >>> masktool.inputs.in_file = 'functional.nii' -# >>> masktool.inputs.outputtype = 'NIFTI' -# >>> masktool.cmdline -# '3dmask_tool -prefix functional_mask.nii -input functional.nii' -# >>> res = automask.run() # doctest: +SKIP # -# task_name: MaskTool nipype_name: MaskTool nipype_module: nipype.interfaces.afni.utils @@ -37,9 +37,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1+list-of # type=inputmultiobject|default=[]: input file or files to 3dmask_tool - out_file: Path - # type=file: mask file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -63,7 +60,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -104,7 +101,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -123,10 +120,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=inputmultiobject|default=[]: input file or files to 3dmask_tool - outputtype: '"NIFTI"' - # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -141,7 +136,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dmask_tool -prefix functional_mask.nii -input functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -149,10 +144,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=inputmultiobject|default=[]: input file or files to 3dmask_tool - outputtype: '"NIFTI"' - # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/maskave.yaml b/example-specs/interface/nipype/afni/maskave.yaml index cb035e58..d1e6f6b0 100644 --- a/example-specs/interface/nipype/afni/maskave.yaml +++ b/example-specs/interface/nipype/afni/maskave.yaml @@ -6,23 +6,23 @@ # Docs # ---- # Computes average of all voxels in the input dataset -# which satisfy the criterion in the options list +# which satisfy the criterion in the options list # -# For complete details, see the `3dmaskave Documentation. -# `_ +# For complete details, see the `3dmaskave Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> maskave = afni.Maskave() +# >>> maskave.inputs.in_file = 'functional.nii' +# >>> maskave.inputs.mask= 'seed_mask.nii' +# >>> maskave.inputs.quiet= True +# >>> maskave.cmdline # doctest: +ELLIPSIS +# '3dmaskave -mask seed_mask.nii -quiet functional.nii > functional_maskave.1D' +# >>> res = maskave.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> maskave = afni.Maskave() -# >>> maskave.inputs.in_file = 'functional.nii' -# >>> maskave.inputs.mask= 'seed_mask.nii' -# >>> maskave.inputs.quiet= True -# >>> maskave.cmdline # doctest: +ELLIPSIS -# '3dmaskave -mask seed_mask.nii -quiet functional.nii > functional_maskave.1D' -# >>> res = maskave.run() # doctest: +SKIP # -# task_name: Maskave nipype_name: Maskave nipype_module: nipype.interfaces.afni.preprocess @@ -39,11 +39,8 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dmaskave - mask: medimage/nifti1 + mask: generic/file # type=file|default=: matrix to align input file - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -67,7 +64,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -92,7 +89,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -111,12 +108,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dmaskave - mask: - # type=file|default=: matrix to align input file quiet: 'True' # type=bool|default=False: matrix to align input file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -131,7 +126,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dmaskave -mask seed_mask.nii -quiet functional.nii > functional_maskave.1D +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -139,12 +134,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dmaskave - mask: '"seed_mask.nii"' - # type=file|default=: matrix to align input file quiet: 'True' # type=bool|default=False: matrix to align input file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/means.yaml b/example-specs/interface/nipype/afni/means.yaml index eb8748eb..da9b26f1 100644 --- a/example-specs/interface/nipype/afni/means.yaml +++ b/example-specs/interface/nipype/afni/means.yaml @@ -7,30 +7,30 @@ # ---- # Takes the voxel-by-voxel mean of all input datasets using 3dMean # -# For complete details, see the `3dMean Documentation. -# `_ +# For complete details, see the `3dMean Documentation. +# `_ # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> means = afni.Means() -# >>> means.inputs.in_file_a = 'im1.nii' -# >>> means.inputs.in_file_b = 'im2.nii' -# >>> means.inputs.out_file = 'output.nii' -# >>> means.cmdline -# '3dMean -prefix output.nii im1.nii im2.nii' -# >>> res = means.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> means = afni.Means() +# >>> means.inputs.in_file_a = 'im1.nii' +# >>> means.inputs.in_file_b = 'im2.nii' +# >>> means.inputs.out_file = 'output.nii' +# >>> means.cmdline +# '3dMean -prefix output.nii im1.nii im2.nii' +# >>> res = means.run() # doctest: +SKIP +# +# >>> from nipype.interfaces import afni +# >>> means = afni.Means() +# >>> means.inputs.in_file_a = 'im1.nii' +# >>> means.inputs.out_file = 'output.nii' +# >>> means.inputs.datum = 'short' +# >>> means.cmdline +# '3dMean -datum short -prefix output.nii im1.nii' +# >>> res = means.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni -# >>> means = afni.Means() -# >>> means.inputs.in_file_a = 'im1.nii' -# >>> means.inputs.out_file = 'output.nii' -# >>> means.inputs.datum = 'short' -# >>> means.cmdline -# '3dMean -datum short -prefix output.nii im1.nii' -# >>> res = means.run() # doctest: +SKIP # -# task_name: Means nipype_name: Means nipype_module: nipype.interfaces.afni.preprocess @@ -47,11 +47,8 @@ inputs: # passed to the field in the automatically generated unittests. in_file_a: medimage/nifti1 # type=file|default=: input file to 3dMean - in_file_b: medimage/nifti1 + in_file_b: generic/file # type=file|default=: another input file to 3dMean - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -75,7 +72,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -116,7 +113,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -135,13 +132,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file_a: # type=file|default=: input file to 3dMean - in_file_b: - # type=file|default=: another input file to 3dMean out_file: ' "output.nii"' # type=file: output file # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -160,13 +155,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file_a: # type=file|default=: input file to 3dMean - out_file: ' "output.nii"' - # type=file: output file - # type=file|default=: output image file name datum: '"short"' # type=str|default='': Sets the data type of the output dataset imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -181,7 +173,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dMean -prefix output.nii im1.nii im2.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -189,13 +181,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file_a: '"im1.nii"' # type=file|default=: input file to 3dMean - in_file_b: '"im2.nii"' - # type=file|default=: another input file to 3dMean out_file: ' "output.nii"' # type=file: output file # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -207,13 +197,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file_a: '"im1.nii"' # type=file|default=: input file to 3dMean - out_file: ' "output.nii"' - # type=file: output file - # type=file|default=: output image file name datum: '"short"' # type=str|default='': Sets the data type of the output dataset imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/merge.yaml b/example-specs/interface/nipype/afni/merge.yaml index 807163d2..64899d5b 100644 --- a/example-specs/interface/nipype/afni/merge.yaml +++ b/example-specs/interface/nipype/afni/merge.yaml @@ -7,22 +7,22 @@ # ---- # Merge or edit volumes using AFNI 3dmerge command # -# For complete details, see the `3dmerge Documentation. -# `_ +# For complete details, see the `3dmerge Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> merge = afni.Merge() +# >>> merge.inputs.in_files = ['functional.nii', 'functional2.nii'] +# >>> merge.inputs.blurfwhm = 4 +# >>> merge.inputs.doall = True +# >>> merge.inputs.out_file = 'e7.nii' +# >>> merge.cmdline +# '3dmerge -1blur_fwhm 4 -doall -prefix e7.nii functional.nii functional2.nii' +# >>> res = merge.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> merge = afni.Merge() -# >>> merge.inputs.in_files = ['functional.nii', 'functional2.nii'] -# >>> merge.inputs.blurfwhm = 4 -# >>> merge.inputs.doall = True -# >>> merge.inputs.out_file = 'e7.nii' -# >>> merge.cmdline -# '3dmerge -1blur_fwhm 4 -doall -prefix e7.nii functional.nii functional2.nii' -# >>> res = merge.run() # doctest: +SKIP # -# task_name: Merge nipype_name: Merge nipype_module: nipype.interfaces.afni.utils @@ -39,9 +39,6 @@ inputs: # passed to the field in the automatically generated unittests. in_files: medimage/nifti1+list-of # type=inputmultiobject|default=[]: - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -58,14 +55,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: output file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -90,7 +87,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -109,15 +106,10 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=inputmultiobject|default=[]: - blurfwhm: '4' - # type=int|default=0: FWHM blur value (mm) doall: 'True' # type=bool|default=False: apply options to all sub-bricks in dataset - out_file: '"e7.nii"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -132,7 +124,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dmerge -1blur_fwhm 4 -doall -prefix e7.nii functional.nii functional2.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -140,15 +132,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["functional.nii", "functional2.nii"]' # type=inputmultiobject|default=[]: - blurfwhm: '4' - # type=int|default=0: FWHM blur value (mm) doall: 'True' # type=bool|default=False: apply options to all sub-bricks in dataset - out_file: '"e7.nii"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/net_corr.yaml b/example-specs/interface/nipype/afni/net_corr.yaml index fa1c7e32..e5d73e43 100644 --- a/example-specs/interface/nipype/afni/net_corr.yaml +++ b/example-specs/interface/nipype/afni/net_corr.yaml @@ -6,27 +6,27 @@ # Docs # ---- # Calculate correlation matrix of a set of ROIs (using mean time series of -# each). Several networks may be analyzed simultaneously, one per brick. +# each). Several networks may be analyzed simultaneously, one per brick. # -# For complete details, see the `3dNetCorr Documentation -# `_. +# For complete details, see the `3dNetCorr Documentation +# `_. +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> ncorr = afni.NetCorr() +# >>> ncorr.inputs.in_file = 'functional.nii' +# >>> ncorr.inputs.mask = 'mask.nii' +# >>> ncorr.inputs.in_rois = 'maps.nii' +# >>> ncorr.inputs.ts_wb_corr = True +# >>> ncorr.inputs.ts_wb_Z = True +# >>> ncorr.inputs.fish_z = True +# >>> ncorr.inputs.out_file = 'sub0.tp1.ncorr' +# >>> ncorr.cmdline +# '3dNetCorr -prefix sub0.tp1.ncorr -fish_z -inset functional.nii -in_rois maps.nii -mask mask.nii -ts_wb_Z -ts_wb_corr' +# >>> res = ncorr.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> ncorr = afni.NetCorr() -# >>> ncorr.inputs.in_file = 'functional.nii' -# >>> ncorr.inputs.mask = 'mask.nii' -# >>> ncorr.inputs.in_rois = 'maps.nii' -# >>> ncorr.inputs.ts_wb_corr = True -# >>> ncorr.inputs.ts_wb_Z = True -# >>> ncorr.inputs.fish_z = True -# >>> ncorr.inputs.out_file = 'sub0.tp1.ncorr' -# >>> ncorr.cmdline -# '3dNetCorr -prefix sub0.tp1.ncorr -fish_z -inset functional.nii -in_rois maps.nii -mask mask.nii -ts_wb_Z -ts_wb_corr' -# >>> res = ncorr.run() # doctest: +SKIP # -# task_name: NetCorr nipype_name: NetCorr nipype_module: nipype.interfaces.afni.preprocess @@ -45,10 +45,8 @@ inputs: # type=file|default=: input time series file (4D data set) in_rois: medimage/nifti1 # type=file|default=: input set of ROIs, each labelled with distinct integers - mask: medimage/nifti1 + mask: generic/file # type=file|default=: can include a whole brain mask within which to calculate correlation. Otherwise, data should be masked already - out_file: Path - # type=file|default=: output file name part weight_ts: generic/file # type=file|default=: input a 1D file WTS of weights that will be applied multiplicatively to each ROI's average time series. WTS can be a column- or row-file of values, but it must have the same length as the input time series volume. If the initial average time series was A[n] for n=0,..,(N-1) time points, then applying a set of weights W[n] of the same length from WTS would produce a new time series: B[n] = A[n] * W[n] callable_defaults: @@ -75,7 +73,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -125,7 +123,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -144,20 +142,14 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input time series file (4D data set) - mask: - # type=file|default=: can include a whole brain mask within which to calculate correlation. Otherwise, data should be masked already in_rois: # type=file|default=: input set of ROIs, each labelled with distinct integers - ts_wb_corr: 'True' - # type=bool|default=False: switch to create a set of whole brain correlation maps. Performs whole brain correlation for each ROI's average time series; this will automatically create a directory for each network that contains the set of whole brain correlation maps (Pearson 'r's). The directories are labelled as above for '-ts_indiv' Within each directory, the files are labelled WB_CORR_ROI_001+orig, WB_CORR_ROI_002+orig, etc., with the numbers given by the actual ROI integer labels ts_wb_Z: 'True' # type=bool|default=False: same as above in '-ts_wb_corr', except that the maps have been Fisher transformed to Z-scores the relation: Z=atanh(r). To avoid infinities in the transform, Pearson values are effectively capped at |r| = 0.999329 (where |Z| = 4.0). Files are labelled WB_Z_ROI_001+orig, etc - fish_z: 'True' - # type=bool|default=False: switch to also output a matrix of Fisher Z-transform values for the corr coefs (r): Z = atanh(r) , (with Z=4 being output along matrix diagonals where r=1, as the r-to-Z conversion is ceilinged at Z = atanh(r=0.999329) = 4, which is still *quite* a high Pearson-r value out_file: '"sub0.tp1.ncorr"' # type=file|default=: output file name part imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -172,7 +164,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dNetCorr -prefix sub0.tp1.ncorr -fish_z -inset functional.nii -in_rois maps.nii -mask mask.nii -ts_wb_Z -ts_wb_corr +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -180,20 +172,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input time series file (4D data set) - mask: '"mask.nii"' - # type=file|default=: can include a whole brain mask within which to calculate correlation. Otherwise, data should be masked already in_rois: '"maps.nii"' # type=file|default=: input set of ROIs, each labelled with distinct integers - ts_wb_corr: 'True' - # type=bool|default=False: switch to create a set of whole brain correlation maps. Performs whole brain correlation for each ROI's average time series; this will automatically create a directory for each network that contains the set of whole brain correlation maps (Pearson 'r's). The directories are labelled as above for '-ts_indiv' Within each directory, the files are labelled WB_CORR_ROI_001+orig, WB_CORR_ROI_002+orig, etc., with the numbers given by the actual ROI integer labels ts_wb_Z: 'True' # type=bool|default=False: same as above in '-ts_wb_corr', except that the maps have been Fisher transformed to Z-scores the relation: Z=atanh(r). To avoid infinities in the transform, Pearson values are effectively capped at |r| = 0.999329 (where |Z| = 4.0). Files are labelled WB_Z_ROI_001+orig, etc - fish_z: 'True' - # type=bool|default=False: switch to also output a matrix of Fisher Z-transform values for the corr coefs (r): Z = atanh(r) , (with Z=4 being output along matrix diagonals where r=1, as the r-to-Z conversion is ceilinged at Z = atanh(r=0.999329) = 4, which is still *quite* a high Pearson-r value out_file: '"sub0.tp1.ncorr"' # type=file|default=: output file name part imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/notes.yaml b/example-specs/interface/nipype/afni/notes.yaml index 379c9378..131e0004 100644 --- a/example-specs/interface/nipype/afni/notes.yaml +++ b/example-specs/interface/nipype/afni/notes.yaml @@ -7,21 +7,21 @@ # ---- # A program to add, delete, and show notes for AFNI datasets. # -# For complete details, see the `3dNotes Documentation. -# `_ +# For complete details, see the `3dNotes Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> notes = afni.Notes() +# >>> notes.inputs.in_file = 'functional.HEAD' +# >>> notes.inputs.add = 'This note is added.' +# >>> notes.inputs.add_history = 'This note is added to history.' +# >>> notes.cmdline +# '3dNotes -a "This note is added." -h "This note is added to history." functional.HEAD' +# >>> res = notes.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> notes = afni.Notes() -# >>> notes.inputs.in_file = 'functional.HEAD' -# >>> notes.inputs.add = 'This note is added.' -# >>> notes.inputs.add_history = 'This note is added to history.' -# >>> notes.cmdline -# '3dNotes -a "This note is added." -h "This note is added to history." functional.HEAD' -# >>> res = notes.run() # doctest: +SKIP # -# task_name: Notes nipype_name: Notes nipype_module: nipype.interfaces.afni.utils @@ -36,11 +36,8 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: medimage-afni/head + in_file: fileformats.medimage_afni.Head # type=file|default=: input file to 3dNotes - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -64,7 +61,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -95,7 +92,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -114,12 +111,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dNotes - add: '"This note is added."' - # type=str|default='': note to add add_history: '"This note is added to history."' # type=str|default='': note to add to history imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -134,7 +129,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dNotes -a "This note is added." -h "This note is added to history." functional.HEAD +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -142,12 +137,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.HEAD"' # type=file|default=: input file to 3dNotes - add: '"This note is added."' - # type=str|default='': note to add add_history: '"This note is added to history."' # type=str|default='': note to add to history imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/nwarp_adjust.yaml b/example-specs/interface/nipype/afni/nwarp_adjust.yaml index 94fc924e..8d67671e 100644 --- a/example-specs/interface/nipype/afni/nwarp_adjust.yaml +++ b/example-specs/interface/nipype/afni/nwarp_adjust.yaml @@ -6,25 +6,25 @@ # Docs # ---- # This program takes as input a bunch of 3D warps, averages them, -# and computes the inverse of this average warp. It then composes -# each input warp with this inverse average to 'adjust' the set of -# warps. Optionally, it can also read in a set of 1-brick datasets -# corresponding to the input warps, and warp each of them, and average -# those. +# and computes the inverse of this average warp. It then composes +# each input warp with this inverse average to 'adjust' the set of +# warps. Optionally, it can also read in a set of 1-brick datasets +# corresponding to the input warps, and warp each of them, and average +# those. # -# For complete details, see the `3dNwarpAdjust Documentation. -# `_ +# For complete details, see the `3dNwarpAdjust Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> adjust = afni.NwarpAdjust() +# >>> adjust.inputs.warps = ['func2anat_InverseWarp.nii.gz', 'func2anat_InverseWarp.nii.gz', 'func2anat_InverseWarp.nii.gz', 'func2anat_InverseWarp.nii.gz', 'func2anat_InverseWarp.nii.gz'] +# >>> adjust.cmdline +# '3dNwarpAdjust -nwarp func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz' +# >>> res = adjust.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> adjust = afni.NwarpAdjust() -# >>> adjust.inputs.warps = ['func2anat_InverseWarp.nii.gz', 'func2anat_InverseWarp.nii.gz', 'func2anat_InverseWarp.nii.gz', 'func2anat_InverseWarp.nii.gz', 'func2anat_InverseWarp.nii.gz'] -# >>> adjust.cmdline -# '3dNwarpAdjust -nwarp func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz' -# >>> res = adjust.run() # doctest: +SKIP # -# task_name: NwarpAdjust nipype_name: NwarpAdjust nipype_module: nipype.interfaces.afni.utils @@ -41,9 +41,6 @@ inputs: # passed to the field in the automatically generated unittests. in_files: generic/file+list-of # type=inputmultiobject|default=[]: List of input 3D datasets to be warped by the adjusted warp datasets. There must be exactly as many of these datasets as there are input warps. - out_file: Path - # type=file: output file - # type=file|default=: Output mean dataset, only needed if in_files are also given. The output dataset will be on the common grid shared by the source datasets. warps: medimage/nifti-gz+list-of # type=inputmultiobject|default=[]: List of input 3D warp datasets callable_defaults: @@ -69,7 +66,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -92,7 +89,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -112,7 +109,7 @@ tests: warps: # type=inputmultiobject|default=[]: List of input 3D warp datasets imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -127,7 +124,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dNwarpAdjust -nwarp func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz func2anat_InverseWarp.nii.gz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -136,7 +133,7 @@ doctests: warps: '["func2anat_InverseWarp.nii.gz", "func2anat_InverseWarp.nii.gz", "func2anat_InverseWarp.nii.gz", "func2anat_InverseWarp.nii.gz", "func2anat_InverseWarp.nii.gz"]' # type=inputmultiobject|default=[]: List of input 3D warp datasets imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/nwarp_apply.yaml b/example-specs/interface/nipype/afni/nwarp_apply.yaml index 0207690d..dfeb80d1 100644 --- a/example-specs/interface/nipype/afni/nwarp_apply.yaml +++ b/example-specs/interface/nipype/afni/nwarp_apply.yaml @@ -6,24 +6,24 @@ # Docs # ---- # Program to apply a nonlinear 3D warp saved from 3dQwarp -# (or 3dNwarpCat, etc.) to a 3D dataset, to produce a warped -# version of the source dataset. +# (or 3dNwarpCat, etc.) to a 3D dataset, to produce a warped +# version of the source dataset. # -# For complete details, see the `3dNwarpApply Documentation. -# `_ +# For complete details, see the `3dNwarpApply Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> nwarp = afni.NwarpApply() +# >>> nwarp.inputs.in_file = 'Fred+orig' +# >>> nwarp.inputs.master = 'NWARP' +# >>> nwarp.inputs.warp = "'Fred_WARP+tlrc Fred.Xaff12.1D'" +# >>> nwarp.cmdline +# "3dNwarpApply -source Fred+orig -interp wsinc5 -master NWARP -prefix Fred+orig_Nwarp -nwarp 'Fred_WARP+tlrc Fred.Xaff12.1D'" +# >>> res = nwarp.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> nwarp = afni.NwarpApply() -# >>> nwarp.inputs.in_file = 'Fred+orig' -# >>> nwarp.inputs.master = 'NWARP' -# >>> nwarp.inputs.warp = "'Fred_WARP+tlrc Fred.Xaff12.1D'" -# >>> nwarp.cmdline -# "3dNwarpApply -source Fred+orig -interp wsinc5 -master NWARP -prefix Fred+orig_Nwarp -nwarp 'Fred_WARP+tlrc Fred.Xaff12.1D'" -# >>> res = nwarp.run() # doctest: +SKIP # -# task_name: NwarpApply nipype_name: NwarpApply nipype_module: nipype.interfaces.afni.utils @@ -40,9 +40,6 @@ inputs: # passed to the field in the automatically generated unittests. master: generic/file # type=file|default=: the name of the master dataset, which defines the output grid - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -66,7 +63,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -99,7 +96,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -118,12 +115,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: '"Fred+orig"' # type=traitcompound|default=None: the name of the dataset to be warped can be multiple datasets - master: - # type=file|default=: the name of the master dataset, which defines the output grid warp: '"''Fred_WARP+tlrc Fred.Xaff12.1D''"' # type=string|default='': the name of the warp dataset. multiple warps can be concatenated (make sure they exist) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -138,7 +133,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dNwarpApply -source Fred+orig -interp wsinc5 -master NWARP -prefix Fred+orig_Nwarp -nwarp "Fred_WARP+tlrc Fred.Xaff12.1D" +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -146,12 +141,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"Fred+orig"' # type=traitcompound|default=None: the name of the dataset to be warped can be multiple datasets - master: '"NWARP"' - # type=file|default=: the name of the master dataset, which defines the output grid warp: '"''Fred_WARP+tlrc Fred.Xaff12.1D''"' # type=string|default='': the name of the warp dataset. multiple warps can be concatenated (make sure they exist) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/nwarp_cat.yaml b/example-specs/interface/nipype/afni/nwarp_cat.yaml index 170a1ed6..8bd88393 100644 --- a/example-specs/interface/nipype/afni/nwarp_cat.yaml +++ b/example-specs/interface/nipype/afni/nwarp_cat.yaml @@ -7,54 +7,54 @@ # ---- # Catenates (composes) 3D warps defined on a grid, OR via a matrix. # -# .. note:: +# .. note:: # -# * All transformations are from DICOM xyz (in mm) to DICOM xyz. +# * All transformations are from DICOM xyz (in mm) to DICOM xyz. # -# * Matrix warps are in files that end in '.1D' or in '.txt'. A matrix -# warp file should have 12 numbers in it, as output (for example), by -# '3dAllineate -1Dmatrix_save'. +# * Matrix warps are in files that end in '.1D' or in '.txt'. A matrix +# warp file should have 12 numbers in it, as output (for example), by +# '3dAllineate -1Dmatrix_save'. # -# * Nonlinear warps are in dataset files (AFNI .HEAD/.BRIK or NIfTI .nii) -# with 3 sub-bricks giving the DICOM order xyz grid displacements in mm. +# * Nonlinear warps are in dataset files (AFNI .HEAD/.BRIK or NIfTI .nii) +# with 3 sub-bricks giving the DICOM order xyz grid displacements in mm. # -# * If all the input warps are matrices, then the output is a matrix -# and will be written to the file 'prefix.aff12.1D'. -# Unless the prefix already contains the string '.1D', in which case -# the filename is just the prefix. +# * If all the input warps are matrices, then the output is a matrix +# and will be written to the file 'prefix.aff12.1D'. +# Unless the prefix already contains the string '.1D', in which case +# the filename is just the prefix. # -# * If 'prefix' is just 'stdout', then the output matrix is written -# to standard output. -# In any of these cases, the output format is 12 numbers in one row. +# * If 'prefix' is just 'stdout', then the output matrix is written +# to standard output. +# In any of these cases, the output format is 12 numbers in one row. # -# * If any of the input warps are datasets, they must all be defined on -# the same 3D grid! -# And of course, then the output will be a dataset on the same grid. -# However, you can expand the grid using the '-expad' option. +# * If any of the input warps are datasets, they must all be defined on +# the same 3D grid! +# And of course, then the output will be a dataset on the same grid. +# However, you can expand the grid using the '-expad' option. # -# * The order of operations in the final (output) warp is, for the -# case of 3 input warps: +# * The order of operations in the final (output) warp is, for the +# case of 3 input warps: # -# OUTPUT(x) = warp3( warp2( warp1(x) ) ) +# OUTPUT(x) = warp3( warp2( warp1(x) ) ) # -# That is, warp1 is applied first, then warp2, et cetera. -# The 3D x coordinates are taken from each grid location in the -# first dataset defined on a grid. +# That is, warp1 is applied first, then warp2, et cetera. +# The 3D x coordinates are taken from each grid location in the +# first dataset defined on a grid. # -# For complete details, see the `3dNwarpCat Documentation. -# `_ +# For complete details, see the `3dNwarpCat Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> nwarpcat = afni.NwarpCat() +# >>> nwarpcat.inputs.in_files = ['Q25_warp+tlrc.HEAD', ('IDENT', 'structural.nii')] +# >>> nwarpcat.inputs.out_file = 'Fred_total_WARP' +# >>> nwarpcat.cmdline +# "3dNwarpCat -interp wsinc5 -prefix Fred_total_WARP Q25_warp+tlrc.HEAD 'IDENT(structural.nii)'" +# >>> res = nwarpcat.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> nwarpcat = afni.NwarpCat() -# >>> nwarpcat.inputs.in_files = ['Q25_warp+tlrc.HEAD', ('IDENT', 'structural.nii')] -# >>> nwarpcat.inputs.out_file = 'Fred_total_WARP' -# >>> nwarpcat.cmdline -# "3dNwarpCat -interp wsinc5 -prefix Fred_total_WARP Q25_warp+tlrc.HEAD 'IDENT(structural.nii)'" -# >>> res = nwarpcat.run() # doctest: +SKIP # -# task_name: NwarpCat nipype_name: NwarpCat nipype_module: nipype.interfaces.afni.utils @@ -69,9 +69,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -95,7 +92,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -126,7 +123,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -145,11 +142,8 @@ tests: # (if not specified, will try to choose a sensible value) in_files: '["Q25_warp+tlrc.HEAD", ("IDENT", "structural.nii")]' # type=list|default=[]: list of tuples of 3D warps and associated functions - out_file: '"Fred_total_WARP"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -164,7 +158,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dNwarpCat -interp wsinc5 -prefix Fred_total_WARP Q25_warp+tlrc.HEAD "IDENT(structural.nii)" +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -172,11 +166,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["Q25_warp+tlrc.HEAD", ("IDENT", "structural.nii")]' # type=list|default=[]: list of tuples of 3D warps and associated functions - out_file: '"Fred_total_WARP"' - # type=file: output file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/one_d_tool_py.yaml b/example-specs/interface/nipype/afni/one_d_tool_py.yaml index da52c7c3..50112bec 100644 --- a/example-specs/interface/nipype/afni/one_d_tool_py.yaml +++ b/example-specs/interface/nipype/afni/one_d_tool_py.yaml @@ -6,17 +6,17 @@ # Docs # ---- # This program is meant to read/manipulate/write/diagnose 1D datasets. -# Input can be specified using AFNI sub-brick[]/time{} selectors. +# Input can be specified using AFNI sub-brick[]/time{} selectors. # -# >>> from nipype.interfaces import afni -# >>> odt = afni.OneDToolPy() -# >>> odt.inputs.in_file = 'f1.1D' -# >>> odt.inputs.set_nruns = 3 -# >>> odt.inputs.demean = True -# >>> odt.inputs.out_file = 'motion_dmean.1D' -# >>> odt.cmdline # doctest: +ELLIPSIS -# 'python2 ...1d_tool.py -demean -infile f1.1D -write motion_dmean.1D -set_nruns 3' -# >>> res = odt.run() # doctest: +SKIP +# >>> from nipype.interfaces import afni +# >>> odt = afni.OneDToolPy() +# >>> odt.inputs.in_file = 'f1.1D' +# >>> odt.inputs.set_nruns = 3 +# >>> odt.inputs.demean = True +# >>> odt.inputs.out_file = 'motion_dmean.1D' +# >>> odt.cmdline # doctest: +ELLIPSIS +# 'python2 ...1d_tool.py -demean -infile f1.1D -write motion_dmean.1D -set_nruns 3' +# >>> res = odt.run() # doctest: +SKIP task_name: OneDToolPy nipype_name: OneDToolPy nipype_module: nipype.interfaces.afni.utils @@ -31,11 +31,8 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: medimage-afni/one-d + in_file: fileformats.medimage_afni.OneD # type=file|default=: input file to OneDTool - out_file: Path - # type=file: output of 1D_tool.py - # type=file|default=: write the current 1D data to FILE show_cormat_warnings: generic/file # type=file|default=: Write cormat warnings to a file callable_defaults: @@ -54,14 +51,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-afni/one-d + out_file: generic/file # type=file: output of 1D_tool.py # type=file|default=: write the current 1D data to FILE callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -102,7 +99,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -121,15 +118,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to OneDTool - set_nruns: '3' - # type=int|default=0: treat the input data as if it has nruns demean: 'True' # type=bool|default=False: demean each run (new mean of each run = 0.0) - out_file: '"motion_dmean.1D"' - # type=file: output of 1D_tool.py - # type=file|default=: write the current 1D data to FILE imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -152,15 +144,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"f1.1D"' # type=file|default=: input file to OneDTool - set_nruns: '3' - # type=int|default=0: treat the input data as if it has nruns demean: 'True' # type=bool|default=False: demean each run (new mean of each run = 0.0) - out_file: '"motion_dmean.1D"' - # type=file: output of 1D_tool.py - # type=file|default=: write the current 1D data to FILE imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/outlier_count.yaml b/example-specs/interface/nipype/afni/outlier_count.yaml index 36bfb07f..ff037c20 100644 --- a/example-specs/interface/nipype/afni/outlier_count.yaml +++ b/example-specs/interface/nipype/afni/outlier_count.yaml @@ -6,21 +6,21 @@ # Docs # ---- # Calculates number of 'outliers' at each time point of a -# a 3D+time dataset. +# a 3D+time dataset. # -# For complete details, see the `3dToutcount Documentation -# `_ +# For complete details, see the `3dToutcount Documentation +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> toutcount = afni.OutlierCount() +# >>> toutcount.inputs.in_file = 'functional.nii' +# >>> toutcount.cmdline # doctest: +ELLIPSIS +# '3dToutcount -qthr 0.00100 functional.nii' +# >>> res = toutcount.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> toutcount = afni.OutlierCount() -# >>> toutcount.inputs.in_file = 'functional.nii' -# >>> toutcount.cmdline # doctest: +ELLIPSIS -# '3dToutcount -qthr 0.00100 functional.nii' -# >>> res = toutcount.run() # doctest: +SKIP # -# task_name: OutlierCount nipype_name: OutlierCount nipype_module: nipype.interfaces.afni.preprocess @@ -39,9 +39,6 @@ inputs: # type=file|default=: input dataset mask: generic/file # type=file|default=: only count voxels within the given mask - out_file: Path - # type=file: capture standard output - # type=file|default=: capture standard output outliers_file: generic/file # type=file|default=: output image file name callable_defaults: @@ -69,7 +66,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -106,7 +103,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -126,7 +123,7 @@ tests: in_file: # type=file|default=: input dataset imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -141,7 +138,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dToutcount -qthr 0.00100 functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -150,7 +147,7 @@ doctests: in_file: '"functional.nii"' # type=file|default=: input dataset imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/quality_index.yaml b/example-specs/interface/nipype/afni/quality_index.yaml index d79863fe..d7817f66 100644 --- a/example-specs/interface/nipype/afni/quality_index.yaml +++ b/example-specs/interface/nipype/afni/quality_index.yaml @@ -6,24 +6,24 @@ # Docs # ---- # Computes a quality index for each sub-brick in a 3D+time dataset. -# The output is a 1D time series with the index for each sub-brick. -# The results are written to stdout. +# The output is a 1D time series with the index for each sub-brick. +# The results are written to stdout. # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> tqual = afni.QualityIndex() -# >>> tqual.inputs.in_file = 'functional.nii' -# >>> tqual.cmdline # doctest: +ELLIPSIS -# '3dTqual functional.nii > functional_tqual' -# >>> res = tqual.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> tqual = afni.QualityIndex() +# >>> tqual.inputs.in_file = 'functional.nii' +# >>> tqual.cmdline # doctest: +ELLIPSIS +# '3dTqual functional.nii > functional_tqual' +# >>> res = tqual.run() # doctest: +SKIP +# +# See Also +# -------- +# For complete details, see the `3dTqual Documentation +# `_ # -# See Also -# -------- -# For complete details, see the `3dTqual Documentation -# `_ # -# task_name: QualityIndex nipype_name: QualityIndex nipype_module: nipype.interfaces.afni.preprocess @@ -42,9 +42,6 @@ inputs: # type=file|default=: input dataset mask: generic/file # type=file|default=: compute correlation only across masked voxels - out_file: Path - # type=file: file containing the captured standard output - # type=file|default=: capture standard output callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -68,7 +65,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -99,7 +96,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -119,7 +116,7 @@ tests: in_file: # type=file|default=: input dataset imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -134,7 +131,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dTqual functional.nii > functional_tqual +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -143,7 +140,7 @@ doctests: in_file: '"functional.nii"' # type=file|default=: input dataset imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/qwarp.yaml b/example-specs/interface/nipype/afni/qwarp.yaml index 11c539b2..288dd9fa 100644 --- a/example-specs/interface/nipype/afni/qwarp.yaml +++ b/example-specs/interface/nipype/afni/qwarp.yaml @@ -6,95 +6,95 @@ # Docs # ---- # -# Allineate your images prior to passing them to this workflow. +# Allineate your images prior to passing them to this workflow. # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> qwarp = afni.Qwarp() -# >>> qwarp.inputs.in_file = 'sub-01_dir-LR_epi.nii.gz' -# >>> qwarp.inputs.nopadWARP = True -# >>> qwarp.inputs.base_file = 'sub-01_dir-RL_epi.nii.gz' -# >>> qwarp.inputs.plusminus = True -# >>> qwarp.cmdline -# '3dQwarp -base sub-01_dir-RL_epi.nii.gz -source sub-01_dir-LR_epi.nii.gz -nopadWARP -prefix ppp_sub-01_dir-LR_epi -plusminus' -# >>> res = qwarp.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> qwarp = afni.Qwarp() +# >>> qwarp.inputs.in_file = 'sub-01_dir-LR_epi.nii.gz' +# >>> qwarp.inputs.nopadWARP = True +# >>> qwarp.inputs.base_file = 'sub-01_dir-RL_epi.nii.gz' +# >>> qwarp.inputs.plusminus = True +# >>> qwarp.cmdline +# '3dQwarp -base sub-01_dir-RL_epi.nii.gz -source sub-01_dir-LR_epi.nii.gz -nopadWARP -prefix ppp_sub-01_dir-LR_epi -plusminus' +# >>> res = qwarp.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni -# >>> qwarp = afni.Qwarp() -# >>> qwarp.inputs.in_file = 'structural.nii' -# >>> qwarp.inputs.base_file = 'mni.nii' -# >>> qwarp.inputs.resample = True -# >>> qwarp.cmdline -# '3dQwarp -base mni.nii -source structural.nii -prefix ppp_structural -resample' -# >>> res = qwarp.run() # doctest: +SKIP +# >>> from nipype.interfaces import afni +# >>> qwarp = afni.Qwarp() +# >>> qwarp.inputs.in_file = 'structural.nii' +# >>> qwarp.inputs.base_file = 'mni.nii' +# >>> qwarp.inputs.resample = True +# >>> qwarp.cmdline +# '3dQwarp -base mni.nii -source structural.nii -prefix ppp_structural -resample' +# >>> res = qwarp.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni -# >>> qwarp = afni.Qwarp() -# >>> qwarp.inputs.in_file = 'structural.nii' -# >>> qwarp.inputs.base_file = 'epi.nii' -# >>> qwarp.inputs.out_file = 'anatSSQ.nii.gz' -# >>> qwarp.inputs.resample = True -# >>> qwarp.inputs.lpc = True -# >>> qwarp.inputs.verb = True -# >>> qwarp.inputs.iwarp = True -# >>> qwarp.inputs.blur = [0,3] -# >>> qwarp.cmdline -# '3dQwarp -base epi.nii -blur 0.0 3.0 -source structural.nii -iwarp -prefix anatSSQ.nii.gz -resample -verb -lpc' +# >>> from nipype.interfaces import afni +# >>> qwarp = afni.Qwarp() +# >>> qwarp.inputs.in_file = 'structural.nii' +# >>> qwarp.inputs.base_file = 'epi.nii' +# >>> qwarp.inputs.out_file = 'anatSSQ.nii.gz' +# >>> qwarp.inputs.resample = True +# >>> qwarp.inputs.lpc = True +# >>> qwarp.inputs.verb = True +# >>> qwarp.inputs.iwarp = True +# >>> qwarp.inputs.blur = [0,3] +# >>> qwarp.cmdline +# '3dQwarp -base epi.nii -blur 0.0 3.0 -source structural.nii -iwarp -prefix anatSSQ.nii.gz -resample -verb -lpc' # -# >>> res = qwarp.run() # doctest: +SKIP +# >>> res = qwarp.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni -# >>> qwarp = afni.Qwarp() -# >>> qwarp.inputs.in_file = 'structural.nii' -# >>> qwarp.inputs.base_file = 'mni.nii' -# >>> qwarp.inputs.duplo = True -# >>> qwarp.inputs.blur = [0,3] -# >>> qwarp.cmdline -# '3dQwarp -base mni.nii -blur 0.0 3.0 -duplo -source structural.nii -prefix ppp_structural' +# >>> from nipype.interfaces import afni +# >>> qwarp = afni.Qwarp() +# >>> qwarp.inputs.in_file = 'structural.nii' +# >>> qwarp.inputs.base_file = 'mni.nii' +# >>> qwarp.inputs.duplo = True +# >>> qwarp.inputs.blur = [0,3] +# >>> qwarp.cmdline +# '3dQwarp -base mni.nii -blur 0.0 3.0 -duplo -source structural.nii -prefix ppp_structural' # -# >>> res = qwarp.run() # doctest: +SKIP +# >>> res = qwarp.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni -# >>> qwarp = afni.Qwarp() -# >>> qwarp.inputs.in_file = 'structural.nii' -# >>> qwarp.inputs.base_file = 'mni.nii' -# >>> qwarp.inputs.duplo = True -# >>> qwarp.inputs.minpatch = 25 -# >>> qwarp.inputs.blur = [0,3] -# >>> qwarp.inputs.out_file = 'Q25' -# >>> qwarp.cmdline -# '3dQwarp -base mni.nii -blur 0.0 3.0 -duplo -source structural.nii -minpatch 25 -prefix Q25' +# >>> from nipype.interfaces import afni +# >>> qwarp = afni.Qwarp() +# >>> qwarp.inputs.in_file = 'structural.nii' +# >>> qwarp.inputs.base_file = 'mni.nii' +# >>> qwarp.inputs.duplo = True +# >>> qwarp.inputs.minpatch = 25 +# >>> qwarp.inputs.blur = [0,3] +# >>> qwarp.inputs.out_file = 'Q25' +# >>> qwarp.cmdline +# '3dQwarp -base mni.nii -blur 0.0 3.0 -duplo -source structural.nii -minpatch 25 -prefix Q25' # -# >>> res = qwarp.run() # doctest: +SKIP -# >>> qwarp2 = afni.Qwarp() -# >>> qwarp2.inputs.in_file = 'structural.nii' -# >>> qwarp2.inputs.base_file = 'mni.nii' -# >>> qwarp2.inputs.blur = [0,2] -# >>> qwarp2.inputs.out_file = 'Q11' -# >>> qwarp2.inputs.inilev = 7 -# >>> qwarp2.inputs.iniwarp = ['Q25_warp+tlrc.HEAD'] -# >>> qwarp2.cmdline -# '3dQwarp -base mni.nii -blur 0.0 2.0 -source structural.nii -inilev 7 -iniwarp Q25_warp+tlrc.HEAD -prefix Q11' +# >>> res = qwarp.run() # doctest: +SKIP +# >>> qwarp2 = afni.Qwarp() +# >>> qwarp2.inputs.in_file = 'structural.nii' +# >>> qwarp2.inputs.base_file = 'mni.nii' +# >>> qwarp2.inputs.blur = [0,2] +# >>> qwarp2.inputs.out_file = 'Q11' +# >>> qwarp2.inputs.inilev = 7 +# >>> qwarp2.inputs.iniwarp = ['Q25_warp+tlrc.HEAD'] +# >>> qwarp2.cmdline +# '3dQwarp -base mni.nii -blur 0.0 2.0 -source structural.nii -inilev 7 -iniwarp Q25_warp+tlrc.HEAD -prefix Q11' # -# >>> res2 = qwarp2.run() # doctest: +SKIP -# >>> res2 = qwarp2.run() # doctest: +SKIP -# >>> qwarp3 = afni.Qwarp() -# >>> qwarp3.inputs.in_file = 'structural.nii' -# >>> qwarp3.inputs.base_file = 'mni.nii' -# >>> qwarp3.inputs.allineate = True -# >>> qwarp3.inputs.allineate_opts = '-cose lpa -verb' -# >>> qwarp3.cmdline -# "3dQwarp -allineate -allineate_opts '-cose lpa -verb' -base mni.nii -source structural.nii -prefix ppp_structural" +# >>> res2 = qwarp2.run() # doctest: +SKIP +# >>> res2 = qwarp2.run() # doctest: +SKIP +# >>> qwarp3 = afni.Qwarp() +# >>> qwarp3.inputs.in_file = 'structural.nii' +# >>> qwarp3.inputs.base_file = 'mni.nii' +# >>> qwarp3.inputs.allineate = True +# >>> qwarp3.inputs.allineate_opts = '-cose lpa -verb' +# >>> qwarp3.cmdline +# "3dQwarp -allineate -allineate_opts '-cose lpa -verb' -base mni.nii -source structural.nii -prefix ppp_structural" # -# >>> res3 = qwarp3.run() # doctest: +SKIP +# >>> res3 = qwarp3.run() # doctest: +SKIP +# +# See Also +# -------- +# For complete details, see the `3dQwarp Documentation. +# `__ # -# See Also -# -------- -# For complete details, see the `3dQwarp Documentation. -# `__ # -# task_name: Qwarp nipype_name: Qwarp nipype_module: nipype.interfaces.afni.preprocess @@ -109,7 +109,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - base_file: medimage/nifti1,medimage/nifti-gz + base_file: medimage/nifti-gz # type=file|default=: Base image (opposite phase encoding direction than source image). emask: generic/file # type=file|default=: Here, 'ee' is a dataset to specify a mask of voxelsto EXCLUDE from the analysis -- all voxels in 'ee'that are NONZERO will not be used in the alignment.The base image always automasked -- the emask isextra, to indicate voxels you definitely DON'T wantincluded in the matching process, even if they areinside the brain. @@ -117,12 +117,8 @@ inputs: # type=file|default=: This option provides an alternate way to specify the patch grid sizes used in the warp optimization process. 'gl' is a 1D file with a list of patches to use -- in most cases, you will want to use it in the following form: ``-gridlist '1D: 0 151 101 75 51'`` * Here, a 0 patch size means the global domain. Patch sizes otherwise should be odd integers >= 5. * If you use the '0' patch size again after the first position, you will actually get an iteration at the size of the default patch level 1, where the patch sizes are 75% of the volume dimension. There is no way to force the program to literally repeat the sui generis step of lev=0. in_file: medimage/nifti1,medimage/nifti-gz # type=file|default=: Source image (opposite phase encoding direction than base image). - iniwarp: medimage-afni/head+list-of + iniwarp: generic/file+list-of # type=list|default=[]: A dataset with an initial nonlinear warp to use. * If this option is not used, the initial warp is the identity. * You can specify a catenation of warps (in quotes) here, as in program 3dNwarpApply. * As a special case, if you just input an affine matrix in a .1D file, that will work also -- it is treated as giving the initial warp via the string "IDENT(base_dataset) matrix_file.aff12.1D". * You CANNOT use this option with -duplo !! * -iniwarp is usually used with -inilev to re-start 3dQwarp from a previous stopping point. - out_file: Path - # type=file|default=: Sets the prefix/suffix for the output datasets. * The source dataset is warped to match the base and gets prefix 'ppp'. (Except if '-plusminus' is used * The final interpolation to this output dataset is done using the 'wsinc5' method. See the output of 3dAllineate -HELP (in the "Modifying '-final wsinc5'" section) for the lengthy technical details. * The 3D warp used is saved in a dataset with prefix 'ppp_WARP' -- this dataset can be used with 3dNwarpApply and 3dNwarpCat, for example. * To be clear, this is the warp from source dataset coordinates to base dataset coordinates, where the values at each base grid point are the xyz displacements needed to move that grid point's xyz values to the corresponding xyz values in the source dataset: base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z) Another way to think of this warp is that it 'pulls' values back from source space to base space. * 3dNwarpApply would use 'ppp_WARP' to transform datasets aligned with the source dataset to be aligned with the base dataset. **If you do NOT want this warp saved, use the option '-nowarp'**. (However, this warp is usually the most valuable possible output!) * If you want to calculate and save the inverse 3D warp, use the option '-iwarp'. This inverse warp will then be saved in a dataset with prefix 'ppp_WARPINV'. * This inverse warp could be used to transform data from base space to source space, if you need to do such an operation. * You can easily compute the inverse later, say by a command like 3dNwarpCat -prefix Z_WARPINV 'INV(Z_WARP+tlrc)' or the inverse can be computed as needed in 3dNwarpApply, like 3dNwarpApply -nwarp 'INV(Z_WARP+tlrc)' -source Dataset.nii ... - out_weight_file: Path - # type=file|default=: Write the weight volume to disk as a dataset weight: generic/file # type=file|default=: Instead of computing the weight from the base dataset,directly input the weight volume from dataset 'www'.Useful if you know what over parts of the base image youwant to emphasize or de-emphasize the matching functional. callable_defaults: @@ -155,7 +151,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -192,6 +188,8 @@ tests: # type=file|default=: Instead of computing the weight from the base dataset,directly input the weight volume from dataset 'www'.Useful if you know what over parts of the base image youwant to emphasize or de-emphasize the matching functional. wball: # type=list|default=[]: "``-wball x y z r f`` Enhance automatic weight from '-useweight' by a factor of 1+f\*Gaussian(FWHM=r) centered in the base image at DICOM coordinates (x,y,z) and with radius 'r'. The goal of this option is to try and make the alignment better in a specific part of the brain. Example: -wball 0 14 6 30 40 to emphasize the thalamic area (in MNI/Talairach space). * The 'r' parameter must be positive! * The 'f' parameter must be between 1 and 100 (inclusive). * '-wball' does nothing if you input your own weight with the '-weight' option. * '-wball' does change the binary weight created by the '-noweight' option. * You can only use '-wball' once in a run of 3dQwarp. **The effect of '-wball' is not dramatic.** The example above makes the average brain image across a collection of subjects a little sharper in the thalamic area, which might have some small value. If you care enough about alignment to use '-wball', then you should examine the results from 3dQwarp for each subject, to see if the alignments are good enough for your purposes. + bandpass: + # type=tuple|default=None: wmask: # type=tuple|default=None: Similar to '-wball', but here, you provide a dataset 'ws' that indicates where to increase the weight. * The 'ws' dataset must be on the same 3D grid as the base dataset. * 'ws' is treated as a mask -- it only matters where it is nonzero -- otherwise, the values inside are not used. * After 'ws' comes the factor 'f' by which to increase the automatically computed weight. Where 'ws' is nonzero, the weighting will be multiplied by (1+f). * As with '-wball', the factor 'f' should be between 1 and 100. out_weight_file: @@ -265,7 +263,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -284,14 +282,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Source image (opposite phase encoding direction than base image). - nopadWARP: 'True' - # type=bool|default=False: If for some reason you require the warp volume tomatch the base volume, then use this option to have the outputWARP dataset(s) truncated. base_file: # type=file|default=: Base image (opposite phase encoding direction than source image). - plusminus: 'True' - # type=bool|default=False: Normally, the warp displacements dis(x) are defined to match base(x) to source(x+dis(x)). With this option, the match is between base(x-dis(x)) and source(x+dis(x)) -- the two images 'meet in the middle'. * One goal is to mimic the warping done to MRI EPI data by field inhomogeneities, when registering between a 'blip up' and a 'blip down' down volume, which will have opposite distortions. * Define Wp(x) = x+dis(x) and Wm(x) = x-dis(x). Then since base(Wm(x)) matches source(Wp(x)), by substituting INV(Wm(x)) wherever we see x, we have base(x) matches source(Wp(INV(Wm(x)))); that is, the warp V(x) that one would get from the 'usual' way of running 3dQwarp is V(x) = Wp(INV(Wm(x))). * Conversely, we can calculate Wp(x) in terms of V(x) as follows: If V(x) = x + dv(x), define Vh(x) = x + dv(x)/2; then Wp(x) = V(INV(Vh(x))) * With the above formulas, it is possible to compute Wp(x) from V(x) and vice-versa, using program 3dNwarpCalc. The requisite commands are left as an exercise for the aspiring AFNI Jedi Master. * You can use the semi-secret '-pmBASE' option to get the V(x) warp and the source dataset warped to base space, in addition to the Wp(x) '_PLUS' and Wm(x) '_MINUS' warps. * Alas: -plusminus does not work with -duplo or -allineate :-( * However, you can use -iniwarp with -plusminus :-) * The outputs have _PLUS (from the source dataset) and _MINUS (from the base dataset) in their filenames, in addition to the prefix. The -iwarp option, if present, will be ignored. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -310,12 +304,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Source image (opposite phase encoding direction than base image). - base_file: - # type=file|default=: Base image (opposite phase encoding direction than source image). resample: 'True' # type=bool|default=False: This option simply resamples the source dataset to match the base dataset grid. You can use this if the two datasets overlap well (as seen in the AFNI GUI), but are not on the same 3D grid. * If they don't overlap well, allineate them first * The reampling here is done with the 'wsinc5' method, which has very little blurring artifact. * If the base and source datasets ARE on the same 3D grid, then the -resample option will be ignored. * You CAN use -resample with these 3dQwarp options: -plusminus -inilev -iniwarp -duplo imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -334,22 +326,14 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Source image (opposite phase encoding direction than base image). - base_file: - # type=file|default=: Base image (opposite phase encoding direction than source image). out_file: '"anatSSQ.nii.gz"' # type=file|default=: Sets the prefix/suffix for the output datasets. * The source dataset is warped to match the base and gets prefix 'ppp'. (Except if '-plusminus' is used * The final interpolation to this output dataset is done using the 'wsinc5' method. See the output of 3dAllineate -HELP (in the "Modifying '-final wsinc5'" section) for the lengthy technical details. * The 3D warp used is saved in a dataset with prefix 'ppp_WARP' -- this dataset can be used with 3dNwarpApply and 3dNwarpCat, for example. * To be clear, this is the warp from source dataset coordinates to base dataset coordinates, where the values at each base grid point are the xyz displacements needed to move that grid point's xyz values to the corresponding xyz values in the source dataset: base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z) Another way to think of this warp is that it 'pulls' values back from source space to base space. * 3dNwarpApply would use 'ppp_WARP' to transform datasets aligned with the source dataset to be aligned with the base dataset. **If you do NOT want this warp saved, use the option '-nowarp'**. (However, this warp is usually the most valuable possible output!) * If you want to calculate and save the inverse 3D warp, use the option '-iwarp'. This inverse warp will then be saved in a dataset with prefix 'ppp_WARPINV'. * This inverse warp could be used to transform data from base space to source space, if you need to do such an operation. * You can easily compute the inverse later, say by a command like 3dNwarpCat -prefix Z_WARPINV 'INV(Z_WARP+tlrc)' or the inverse can be computed as needed in 3dNwarpApply, like 3dNwarpApply -nwarp 'INV(Z_WARP+tlrc)' -source Dataset.nii ... - resample: 'True' - # type=bool|default=False: This option simply resamples the source dataset to match the base dataset grid. You can use this if the two datasets overlap well (as seen in the AFNI GUI), but are not on the same 3D grid. * If they don't overlap well, allineate them first * The reampling here is done with the 'wsinc5' method, which has very little blurring artifact. * If the base and source datasets ARE on the same 3D grid, then the -resample option will be ignored. * You CAN use -resample with these 3dQwarp options: -plusminus -inilev -iniwarp -duplo lpc: 'True' # type=bool|default=False: Local Pearson minimization (i.e., EPI-T1 registration)This option has not be extensively testedIf you use '-lpc', then '-maxlev 0' is automatically set.If you want to go to more refined levels, you can set '-maxlev'This should be set up to have lpc as the second to last argumentand maxlev as the second to last argument, as needed by AFNIUsing maxlev > 1 is not recommended for EPI-T1 alignment. - verb: 'True' - # type=bool|default=False: more detailed description of the process iwarp: 'True' # type=bool|default=False: Do compute and save the _WARPINV file. - blur: '[0,3]' - # type=list|default=[]: Gaussian blur the input images by 'bb' (FWHM) voxels before doing the alignment (the output dataset will not be blurred). The default is 2.345 (for no good reason). * Optionally, you can provide 2 values for 'bb', and then the first one is applied to the base volume, the second to the source volume. e.g., '-blur 0 3' to skip blurring the base image (if the base is a blurry template, for example). * A negative blur radius means to use 3D median filtering, rather than Gaussian blurring. This type of filtering will better preserve edges, which can be important in alignment. * If the base is a template volume that is already blurry, you probably don't want to blur it again, but blurring the source volume a little is probably a good idea, to help the program avoid trying to match tiny features. * Note that -duplo will blur the volumes some extra amount for the initial small-scale warping, to make that phase of the program converge more rapidly. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -368,14 +352,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Source image (opposite phase encoding direction than base image). - base_file: - # type=file|default=: Base image (opposite phase encoding direction than source image). duplo: 'True' # type=bool|default=False: Start off with 1/2 scale versions of the volumes," for getting a speedy coarse first alignment." * Then scales back up to register the full volumes." The goal is greater speed, and it seems to help this" positively piggish program to be more expeditious." * However, accuracy is somewhat lower with '-duplo'," for reasons that currently elude Zhark; for this reason," the Emperor does not usually use '-duplo'. - blur: '[0,3]' - # type=list|default=[]: Gaussian blur the input images by 'bb' (FWHM) voxels before doing the alignment (the output dataset will not be blurred). The default is 2.345 (for no good reason). * Optionally, you can provide 2 values for 'bb', and then the first one is applied to the base volume, the second to the source volume. e.g., '-blur 0 3' to skip blurring the base image (if the base is a blurry template, for example). * A negative blur radius means to use 3D median filtering, rather than Gaussian blurring. This type of filtering will better preserve edges, which can be important in alignment. * If the base is a template volume that is already blurry, you probably don't want to blur it again, but blurring the source volume a little is probably a good idea, to help the program avoid trying to match tiny features. * Note that -duplo will blur the volumes some extra amount for the initial small-scale warping, to make that phase of the program converge more rapidly. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -394,18 +374,12 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Source image (opposite phase encoding direction than base image). - base_file: - # type=file|default=: Base image (opposite phase encoding direction than source image). duplo: 'True' # type=bool|default=False: Start off with 1/2 scale versions of the volumes," for getting a speedy coarse first alignment." * Then scales back up to register the full volumes." The goal is greater speed, and it seems to help this" positively piggish program to be more expeditious." * However, accuracy is somewhat lower with '-duplo'," for reasons that currently elude Zhark; for this reason," the Emperor does not usually use '-duplo'. - minpatch: '25' - # type=int|default=0: The value of mm should be an odd integer. * The default value of mm is 25. * For more accurate results than mm=25, try 19 or 13. * The smallest allowed patch size is 5. * You may want stop at a larger patch size (say 7 or 9) and use the -Qfinal option to run that final level with quintic warps, which might run faster and provide the same degree of warp detail. * Trying to make two different brain volumes match in fine detail is usually a waste of time, especially in humans. There is too much variability in anatomy to match gyrus to gyrus accurately. For this reason, the default minimum patch size is 25 voxels. Using a smaller '-minpatch' might try to force the warp to match features that do not match, and the result can be useless image distortions -- another reason to LOOK AT THE RESULTS. blur: '[0,3]' # type=list|default=[]: Gaussian blur the input images by 'bb' (FWHM) voxels before doing the alignment (the output dataset will not be blurred). The default is 2.345 (for no good reason). * Optionally, you can provide 2 values for 'bb', and then the first one is applied to the base volume, the second to the source volume. e.g., '-blur 0 3' to skip blurring the base image (if the base is a blurry template, for example). * A negative blur radius means to use 3D median filtering, rather than Gaussian blurring. This type of filtering will better preserve edges, which can be important in alignment. * If the base is a template volume that is already blurry, you probably don't want to blur it again, but blurring the source volume a little is probably a good idea, to help the program avoid trying to match tiny features. * Note that -duplo will blur the volumes some extra amount for the initial small-scale warping, to make that phase of the program converge more rapidly. - out_file: '"Q25"' - # type=file|default=: Sets the prefix/suffix for the output datasets. * The source dataset is warped to match the base and gets prefix 'ppp'. (Except if '-plusminus' is used * The final interpolation to this output dataset is done using the 'wsinc5' method. See the output of 3dAllineate -HELP (in the "Modifying '-final wsinc5'" section) for the lengthy technical details. * The 3D warp used is saved in a dataset with prefix 'ppp_WARP' -- this dataset can be used with 3dNwarpApply and 3dNwarpCat, for example. * To be clear, this is the warp from source dataset coordinates to base dataset coordinates, where the values at each base grid point are the xyz displacements needed to move that grid point's xyz values to the corresponding xyz values in the source dataset: base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z) Another way to think of this warp is that it 'pulls' values back from source space to base space. * 3dNwarpApply would use 'ppp_WARP' to transform datasets aligned with the source dataset to be aligned with the base dataset. **If you do NOT want this warp saved, use the option '-nowarp'**. (However, this warp is usually the most valuable possible output!) * If you want to calculate and save the inverse 3D warp, use the option '-iwarp'. This inverse warp will then be saved in a dataset with prefix 'ppp_WARPINV'. * This inverse warp could be used to transform data from base space to source space, if you need to do such an operation. * You can easily compute the inverse later, say by a command like 3dNwarpCat -prefix Z_WARPINV 'INV(Z_WARP+tlrc)' or the inverse can be computed as needed in 3dNwarpApply, like 3dNwarpApply -nwarp 'INV(Z_WARP+tlrc)' -source Dataset.nii ... imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -424,18 +398,12 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Source image (opposite phase encoding direction than base image). - base_file: - # type=file|default=: Base image (opposite phase encoding direction than source image). blur: '[0,2]' # type=list|default=[]: Gaussian blur the input images by 'bb' (FWHM) voxels before doing the alignment (the output dataset will not be blurred). The default is 2.345 (for no good reason). * Optionally, you can provide 2 values for 'bb', and then the first one is applied to the base volume, the second to the source volume. e.g., '-blur 0 3' to skip blurring the base image (if the base is a blurry template, for example). * A negative blur radius means to use 3D median filtering, rather than Gaussian blurring. This type of filtering will better preserve edges, which can be important in alignment. * If the base is a template volume that is already blurry, you probably don't want to blur it again, but blurring the source volume a little is probably a good idea, to help the program avoid trying to match tiny features. * Note that -duplo will blur the volumes some extra amount for the initial small-scale warping, to make that phase of the program converge more rapidly. - out_file: '"Q11"' - # type=file|default=: Sets the prefix/suffix for the output datasets. * The source dataset is warped to match the base and gets prefix 'ppp'. (Except if '-plusminus' is used * The final interpolation to this output dataset is done using the 'wsinc5' method. See the output of 3dAllineate -HELP (in the "Modifying '-final wsinc5'" section) for the lengthy technical details. * The 3D warp used is saved in a dataset with prefix 'ppp_WARP' -- this dataset can be used with 3dNwarpApply and 3dNwarpCat, for example. * To be clear, this is the warp from source dataset coordinates to base dataset coordinates, where the values at each base grid point are the xyz displacements needed to move that grid point's xyz values to the corresponding xyz values in the source dataset: base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z) Another way to think of this warp is that it 'pulls' values back from source space to base space. * 3dNwarpApply would use 'ppp_WARP' to transform datasets aligned with the source dataset to be aligned with the base dataset. **If you do NOT want this warp saved, use the option '-nowarp'**. (However, this warp is usually the most valuable possible output!) * If you want to calculate and save the inverse 3D warp, use the option '-iwarp'. This inverse warp will then be saved in a dataset with prefix 'ppp_WARPINV'. * This inverse warp could be used to transform data from base space to source space, if you need to do such an operation. * You can easily compute the inverse later, say by a command like 3dNwarpCat -prefix Z_WARPINV 'INV(Z_WARP+tlrc)' or the inverse can be computed as needed in 3dNwarpApply, like 3dNwarpApply -nwarp 'INV(Z_WARP+tlrc)' -source Dataset.nii ... inilev: '7' # type=int|default=0: The initial refinement 'level' at which to start. * Usually used with -iniwarp; CANNOT be used with -duplo. * The combination of -inilev and -iniwarp lets you take the results of a previous 3dQwarp run and refine them further: Note that the source dataset in the second run is the SAME as in the first run. If you don't see why this is necessary, then you probably need to seek help from an AFNI guru. - iniwarp: - # type=list|default=[]: A dataset with an initial nonlinear warp to use. * If this option is not used, the initial warp is the identity. * You can specify a catenation of warps (in quotes) here, as in program 3dNwarpApply. * As a special case, if you just input an affine matrix in a .1D file, that will work also -- it is treated as giving the initial warp via the string "IDENT(base_dataset) matrix_file.aff12.1D". * You CANNOT use this option with -duplo !! * -iniwarp is usually used with -inilev to re-start 3dQwarp from a previous stopping point. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -454,14 +422,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Source image (opposite phase encoding direction than base image). - base_file: - # type=file|default=: Base image (opposite phase encoding direction than source image). allineate: 'True' # type=bool|default=False: This option will make 3dQwarp run 3dAllineate first, to align the source dataset to the base with an affine transformation. It will then use that alignment as a starting point for the nonlinear warping. - allineate_opts: '"-cose lpa -verb"' - # type=str|default='': add extra options to the 3dAllineate command to be run by 3dQwarp. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -476,7 +440,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dQwarp -base sub-01_dir-RL_epi.nii.gz -source sub-01_dir-LR_epi.nii.gz -nopadWARP -prefix ppp_sub-01_dir-LR_epi -plusminus +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -484,14 +448,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"sub-01_dir-LR_epi.nii.gz"' # type=file|default=: Source image (opposite phase encoding direction than base image). - nopadWARP: 'True' - # type=bool|default=False: If for some reason you require the warp volume tomatch the base volume, then use this option to have the outputWARP dataset(s) truncated. base_file: '"sub-01_dir-RL_epi.nii.gz"' # type=file|default=: Base image (opposite phase encoding direction than source image). - plusminus: 'True' - # type=bool|default=False: Normally, the warp displacements dis(x) are defined to match base(x) to source(x+dis(x)). With this option, the match is between base(x-dis(x)) and source(x+dis(x)) -- the two images 'meet in the middle'. * One goal is to mimic the warping done to MRI EPI data by field inhomogeneities, when registering between a 'blip up' and a 'blip down' down volume, which will have opposite distortions. * Define Wp(x) = x+dis(x) and Wm(x) = x-dis(x). Then since base(Wm(x)) matches source(Wp(x)), by substituting INV(Wm(x)) wherever we see x, we have base(x) matches source(Wp(INV(Wm(x)))); that is, the warp V(x) that one would get from the 'usual' way of running 3dQwarp is V(x) = Wp(INV(Wm(x))). * Conversely, we can calculate Wp(x) in terms of V(x) as follows: If V(x) = x + dv(x), define Vh(x) = x + dv(x)/2; then Wp(x) = V(INV(Vh(x))) * With the above formulas, it is possible to compute Wp(x) from V(x) and vice-versa, using program 3dNwarpCalc. The requisite commands are left as an exercise for the aspiring AFNI Jedi Master. * You can use the semi-secret '-pmBASE' option to get the V(x) warp and the source dataset warped to base space, in addition to the Wp(x) '_PLUS' and Wm(x) '_MINUS' warps. * Alas: -plusminus does not work with -duplo or -allineate :-( * However, you can use -iniwarp with -plusminus :-) * The outputs have _PLUS (from the source dataset) and _MINUS (from the base dataset) in their filenames, in addition to the prefix. The -iwarp option, if present, will be ignored. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -503,12 +463,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: Source image (opposite phase encoding direction than base image). - base_file: '"mni.nii"' - # type=file|default=: Base image (opposite phase encoding direction than source image). resample: 'True' # type=bool|default=False: This option simply resamples the source dataset to match the base dataset grid. You can use this if the two datasets overlap well (as seen in the AFNI GUI), but are not on the same 3D grid. * If they don't overlap well, allineate them first * The reampling here is done with the 'wsinc5' method, which has very little blurring artifact. * If the base and source datasets ARE on the same 3D grid, then the -resample option will be ignored. * You CAN use -resample with these 3dQwarp options: -plusminus -inilev -iniwarp -duplo imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -520,22 +478,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: Source image (opposite phase encoding direction than base image). - base_file: '"epi.nii"' - # type=file|default=: Base image (opposite phase encoding direction than source image). out_file: '"anatSSQ.nii.gz"' # type=file|default=: Sets the prefix/suffix for the output datasets. * The source dataset is warped to match the base and gets prefix 'ppp'. (Except if '-plusminus' is used * The final interpolation to this output dataset is done using the 'wsinc5' method. See the output of 3dAllineate -HELP (in the "Modifying '-final wsinc5'" section) for the lengthy technical details. * The 3D warp used is saved in a dataset with prefix 'ppp_WARP' -- this dataset can be used with 3dNwarpApply and 3dNwarpCat, for example. * To be clear, this is the warp from source dataset coordinates to base dataset coordinates, where the values at each base grid point are the xyz displacements needed to move that grid point's xyz values to the corresponding xyz values in the source dataset: base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z) Another way to think of this warp is that it 'pulls' values back from source space to base space. * 3dNwarpApply would use 'ppp_WARP' to transform datasets aligned with the source dataset to be aligned with the base dataset. **If you do NOT want this warp saved, use the option '-nowarp'**. (However, this warp is usually the most valuable possible output!) * If you want to calculate and save the inverse 3D warp, use the option '-iwarp'. This inverse warp will then be saved in a dataset with prefix 'ppp_WARPINV'. * This inverse warp could be used to transform data from base space to source space, if you need to do such an operation. * You can easily compute the inverse later, say by a command like 3dNwarpCat -prefix Z_WARPINV 'INV(Z_WARP+tlrc)' or the inverse can be computed as needed in 3dNwarpApply, like 3dNwarpApply -nwarp 'INV(Z_WARP+tlrc)' -source Dataset.nii ... - resample: 'True' - # type=bool|default=False: This option simply resamples the source dataset to match the base dataset grid. You can use this if the two datasets overlap well (as seen in the AFNI GUI), but are not on the same 3D grid. * If they don't overlap well, allineate them first * The reampling here is done with the 'wsinc5' method, which has very little blurring artifact. * If the base and source datasets ARE on the same 3D grid, then the -resample option will be ignored. * You CAN use -resample with these 3dQwarp options: -plusminus -inilev -iniwarp -duplo lpc: 'True' # type=bool|default=False: Local Pearson minimization (i.e., EPI-T1 registration)This option has not be extensively testedIf you use '-lpc', then '-maxlev 0' is automatically set.If you want to go to more refined levels, you can set '-maxlev'This should be set up to have lpc as the second to last argumentand maxlev as the second to last argument, as needed by AFNIUsing maxlev > 1 is not recommended for EPI-T1 alignment. - verb: 'True' - # type=bool|default=False: more detailed description of the process iwarp: 'True' # type=bool|default=False: Do compute and save the _WARPINV file. - blur: '[0,3]' - # type=list|default=[]: Gaussian blur the input images by 'bb' (FWHM) voxels before doing the alignment (the output dataset will not be blurred). The default is 2.345 (for no good reason). * Optionally, you can provide 2 values for 'bb', and then the first one is applied to the base volume, the second to the source volume. e.g., '-blur 0 3' to skip blurring the base image (if the base is a blurry template, for example). * A negative blur radius means to use 3D median filtering, rather than Gaussian blurring. This type of filtering will better preserve edges, which can be important in alignment. * If the base is a template volume that is already blurry, you probably don't want to blur it again, but blurring the source volume a little is probably a good idea, to help the program avoid trying to match tiny features. * Note that -duplo will blur the volumes some extra amount for the initial small-scale warping, to make that phase of the program converge more rapidly. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -547,14 +497,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: Source image (opposite phase encoding direction than base image). - base_file: '"mni.nii"' - # type=file|default=: Base image (opposite phase encoding direction than source image). duplo: 'True' # type=bool|default=False: Start off with 1/2 scale versions of the volumes," for getting a speedy coarse first alignment." * Then scales back up to register the full volumes." The goal is greater speed, and it seems to help this" positively piggish program to be more expeditious." * However, accuracy is somewhat lower with '-duplo'," for reasons that currently elude Zhark; for this reason," the Emperor does not usually use '-duplo'. - blur: '[0,3]' - # type=list|default=[]: Gaussian blur the input images by 'bb' (FWHM) voxels before doing the alignment (the output dataset will not be blurred). The default is 2.345 (for no good reason). * Optionally, you can provide 2 values for 'bb', and then the first one is applied to the base volume, the second to the source volume. e.g., '-blur 0 3' to skip blurring the base image (if the base is a blurry template, for example). * A negative blur radius means to use 3D median filtering, rather than Gaussian blurring. This type of filtering will better preserve edges, which can be important in alignment. * If the base is a template volume that is already blurry, you probably don't want to blur it again, but blurring the source volume a little is probably a good idea, to help the program avoid trying to match tiny features. * Note that -duplo will blur the volumes some extra amount for the initial small-scale warping, to make that phase of the program converge more rapidly. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -566,18 +512,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: Source image (opposite phase encoding direction than base image). - base_file: '"mni.nii"' - # type=file|default=: Base image (opposite phase encoding direction than source image). duplo: 'True' # type=bool|default=False: Start off with 1/2 scale versions of the volumes," for getting a speedy coarse first alignment." * Then scales back up to register the full volumes." The goal is greater speed, and it seems to help this" positively piggish program to be more expeditious." * However, accuracy is somewhat lower with '-duplo'," for reasons that currently elude Zhark; for this reason," the Emperor does not usually use '-duplo'. - minpatch: '25' - # type=int|default=0: The value of mm should be an odd integer. * The default value of mm is 25. * For more accurate results than mm=25, try 19 or 13. * The smallest allowed patch size is 5. * You may want stop at a larger patch size (say 7 or 9) and use the -Qfinal option to run that final level with quintic warps, which might run faster and provide the same degree of warp detail. * Trying to make two different brain volumes match in fine detail is usually a waste of time, especially in humans. There is too much variability in anatomy to match gyrus to gyrus accurately. For this reason, the default minimum patch size is 25 voxels. Using a smaller '-minpatch' might try to force the warp to match features that do not match, and the result can be useless image distortions -- another reason to LOOK AT THE RESULTS. blur: '[0,3]' # type=list|default=[]: Gaussian blur the input images by 'bb' (FWHM) voxels before doing the alignment (the output dataset will not be blurred). The default is 2.345 (for no good reason). * Optionally, you can provide 2 values for 'bb', and then the first one is applied to the base volume, the second to the source volume. e.g., '-blur 0 3' to skip blurring the base image (if the base is a blurry template, for example). * A negative blur radius means to use 3D median filtering, rather than Gaussian blurring. This type of filtering will better preserve edges, which can be important in alignment. * If the base is a template volume that is already blurry, you probably don't want to blur it again, but blurring the source volume a little is probably a good idea, to help the program avoid trying to match tiny features. * Note that -duplo will blur the volumes some extra amount for the initial small-scale warping, to make that phase of the program converge more rapidly. - out_file: '"Q25"' - # type=file|default=: Sets the prefix/suffix for the output datasets. * The source dataset is warped to match the base and gets prefix 'ppp'. (Except if '-plusminus' is used * The final interpolation to this output dataset is done using the 'wsinc5' method. See the output of 3dAllineate -HELP (in the "Modifying '-final wsinc5'" section) for the lengthy technical details. * The 3D warp used is saved in a dataset with prefix 'ppp_WARP' -- this dataset can be used with 3dNwarpApply and 3dNwarpCat, for example. * To be clear, this is the warp from source dataset coordinates to base dataset coordinates, where the values at each base grid point are the xyz displacements needed to move that grid point's xyz values to the corresponding xyz values in the source dataset: base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z) Another way to think of this warp is that it 'pulls' values back from source space to base space. * 3dNwarpApply would use 'ppp_WARP' to transform datasets aligned with the source dataset to be aligned with the base dataset. **If you do NOT want this warp saved, use the option '-nowarp'**. (However, this warp is usually the most valuable possible output!) * If you want to calculate and save the inverse 3D warp, use the option '-iwarp'. This inverse warp will then be saved in a dataset with prefix 'ppp_WARPINV'. * This inverse warp could be used to transform data from base space to source space, if you need to do such an operation. * You can easily compute the inverse later, say by a command like 3dNwarpCat -prefix Z_WARPINV 'INV(Z_WARP+tlrc)' or the inverse can be computed as needed in 3dNwarpApply, like 3dNwarpApply -nwarp 'INV(Z_WARP+tlrc)' -source Dataset.nii ... imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -589,18 +529,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: Source image (opposite phase encoding direction than base image). - base_file: '"mni.nii"' - # type=file|default=: Base image (opposite phase encoding direction than source image). blur: '[0,2]' # type=list|default=[]: Gaussian blur the input images by 'bb' (FWHM) voxels before doing the alignment (the output dataset will not be blurred). The default is 2.345 (for no good reason). * Optionally, you can provide 2 values for 'bb', and then the first one is applied to the base volume, the second to the source volume. e.g., '-blur 0 3' to skip blurring the base image (if the base is a blurry template, for example). * A negative blur radius means to use 3D median filtering, rather than Gaussian blurring. This type of filtering will better preserve edges, which can be important in alignment. * If the base is a template volume that is already blurry, you probably don't want to blur it again, but blurring the source volume a little is probably a good idea, to help the program avoid trying to match tiny features. * Note that -duplo will blur the volumes some extra amount for the initial small-scale warping, to make that phase of the program converge more rapidly. - out_file: '"Q11"' - # type=file|default=: Sets the prefix/suffix for the output datasets. * The source dataset is warped to match the base and gets prefix 'ppp'. (Except if '-plusminus' is used * The final interpolation to this output dataset is done using the 'wsinc5' method. See the output of 3dAllineate -HELP (in the "Modifying '-final wsinc5'" section) for the lengthy technical details. * The 3D warp used is saved in a dataset with prefix 'ppp_WARP' -- this dataset can be used with 3dNwarpApply and 3dNwarpCat, for example. * To be clear, this is the warp from source dataset coordinates to base dataset coordinates, where the values at each base grid point are the xyz displacements needed to move that grid point's xyz values to the corresponding xyz values in the source dataset: base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z) Another way to think of this warp is that it 'pulls' values back from source space to base space. * 3dNwarpApply would use 'ppp_WARP' to transform datasets aligned with the source dataset to be aligned with the base dataset. **If you do NOT want this warp saved, use the option '-nowarp'**. (However, this warp is usually the most valuable possible output!) * If you want to calculate and save the inverse 3D warp, use the option '-iwarp'. This inverse warp will then be saved in a dataset with prefix 'ppp_WARPINV'. * This inverse warp could be used to transform data from base space to source space, if you need to do such an operation. * You can easily compute the inverse later, say by a command like 3dNwarpCat -prefix Z_WARPINV 'INV(Z_WARP+tlrc)' or the inverse can be computed as needed in 3dNwarpApply, like 3dNwarpApply -nwarp 'INV(Z_WARP+tlrc)' -source Dataset.nii ... inilev: '7' # type=int|default=0: The initial refinement 'level' at which to start. * Usually used with -iniwarp; CANNOT be used with -duplo. * The combination of -inilev and -iniwarp lets you take the results of a previous 3dQwarp run and refine them further: Note that the source dataset in the second run is the SAME as in the first run. If you don't see why this is necessary, then you probably need to seek help from an AFNI guru. - iniwarp: '["Q25_warp+tlrc.HEAD"]' - # type=list|default=[]: A dataset with an initial nonlinear warp to use. * If this option is not used, the initial warp is the identity. * You can specify a catenation of warps (in quotes) here, as in program 3dNwarpApply. * As a special case, if you just input an affine matrix in a .1D file, that will work also -- it is treated as giving the initial warp via the string "IDENT(base_dataset) matrix_file.aff12.1D". * You CANNOT use this option with -duplo !! * -iniwarp is usually used with -inilev to re-start 3dQwarp from a previous stopping point. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -612,14 +546,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: Source image (opposite phase encoding direction than base image). - base_file: '"mni.nii"' - # type=file|default=: Base image (opposite phase encoding direction than source image). allineate: 'True' # type=bool|default=False: This option will make 3dQwarp run 3dAllineate first, to align the source dataset to the base with an affine transformation. It will then use that alignment as a starting point for the nonlinear warping. - allineate_opts: '"-cose lpa -verb"' - # type=str|default='': add extra options to the 3dAllineate command to be run by 3dQwarp. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/qwarp_plus_minus.yaml b/example-specs/interface/nipype/afni/qwarp_plus_minus.yaml index 8e5bf757..d05d49c8 100644 --- a/example-specs/interface/nipype/afni/qwarp_plus_minus.yaml +++ b/example-specs/interface/nipype/afni/qwarp_plus_minus.yaml @@ -6,25 +6,25 @@ # Docs # ---- # A version of 3dQwarp for performing field susceptibility correction -# using two images with opposing phase encoding directions. +# using two images with opposing phase encoding directions. # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> qwarp = afni.QwarpPlusMinus() -# >>> qwarp.inputs.in_file = 'sub-01_dir-LR_epi.nii.gz' -# >>> qwarp.inputs.nopadWARP = True -# >>> qwarp.inputs.base_file = 'sub-01_dir-RL_epi.nii.gz' -# >>> qwarp.cmdline -# '3dQwarp -prefix Qwarp.nii.gz -plusminus -base sub-01_dir-RL_epi.nii.gz -source sub-01_dir-LR_epi.nii.gz -nopadWARP' -# >>> res = warp.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> qwarp = afni.QwarpPlusMinus() +# >>> qwarp.inputs.in_file = 'sub-01_dir-LR_epi.nii.gz' +# >>> qwarp.inputs.nopadWARP = True +# >>> qwarp.inputs.base_file = 'sub-01_dir-RL_epi.nii.gz' +# >>> qwarp.cmdline +# '3dQwarp -prefix Qwarp.nii.gz -plusminus -base sub-01_dir-RL_epi.nii.gz -source sub-01_dir-LR_epi.nii.gz -nopadWARP' +# >>> res = warp.run() # doctest: +SKIP +# +# See Also +# -------- +# For complete details, see the `3dQwarp Documentation. +# `__ # -# See Also -# -------- -# For complete details, see the `3dQwarp Documentation. -# `__ # -# task_name: QwarpPlusMinus nipype_name: QwarpPlusMinus nipype_module: nipype.interfaces.afni.preprocess @@ -49,10 +49,6 @@ inputs: # type=file|default=: Source image (opposite phase encoding direction than base image). iniwarp: generic/file+list-of # type=list|default=[]: A dataset with an initial nonlinear warp to use. * If this option is not used, the initial warp is the identity. * You can specify a catenation of warps (in quotes) here, as in program 3dNwarpApply. * As a special case, if you just input an affine matrix in a .1D file, that will work also -- it is treated as giving the initial warp via the string "IDENT(base_dataset) matrix_file.aff12.1D". * You CANNOT use this option with -duplo !! * -iniwarp is usually used with -inilev to re-start 3dQwarp from a previous stopping point. - out_file: Path - # type=file|default='Qwarp.nii.gz': Output file - out_weight_file: Path - # type=file|default=: Write the weight volume to disk as a dataset source_file: generic/file # type=file|default=: Source image (opposite phase encoding direction than base image) weight: generic/file @@ -87,7 +83,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -128,6 +124,8 @@ tests: # type=file|default=: Instead of computing the weight from the base dataset,directly input the weight volume from dataset 'www'.Useful if you know what over parts of the base image youwant to emphasize or de-emphasize the matching functional. wball: # type=list|default=[]: "``-wball x y z r f`` Enhance automatic weight from '-useweight' by a factor of 1+f\*Gaussian(FWHM=r) centered in the base image at DICOM coordinates (x,y,z) and with radius 'r'. The goal of this option is to try and make the alignment better in a specific part of the brain. Example: -wball 0 14 6 30 40 to emphasize the thalamic area (in MNI/Talairach space). * The 'r' parameter must be positive! * The 'f' parameter must be between 1 and 100 (inclusive). * '-wball' does nothing if you input your own weight with the '-weight' option. * '-wball' does change the binary weight created by the '-noweight' option. * You can only use '-wball' once in a run of 3dQwarp. **The effect of '-wball' is not dramatic.** The example above makes the average brain image across a collection of subjects a little sharper in the thalamic area, which might have some small value. If you care enough about alignment to use '-wball', then you should examine the results from 3dQwarp for each subject, to see if the alignments are good enough for your purposes. + bandpass: + # type=tuple|default=None: wmask: # type=tuple|default=None: Similar to '-wball', but here, you provide a dataset 'ws' that indicates where to increase the weight. * The 'ws' dataset must be on the same 3D grid as the base dataset. * 'ws' is treated as a mask -- it only matters where it is nonzero -- otherwise, the values inside are not used. * After 'ws' comes the factor 'f' by which to increase the automatically computed weight. Where 'ws' is nonzero, the weighting will be multiplied by (1+f). * As with '-wball', the factor 'f' should be between 1 and 100. out_weight_file: @@ -199,7 +197,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -218,12 +216,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Source image (opposite phase encoding direction than base image). - nopadWARP: 'True' - # type=bool|default=False: If for some reason you require the warp volume tomatch the base volume, then use this option to have the outputWARP dataset(s) truncated. base_file: # type=file|default=: Base image (opposite phase encoding direction than source image). imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -238,7 +234,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dQwarp -prefix Qwarp.nii.gz -plusminus -base sub-01_dir-RL_epi.nii.gz -source sub-01_dir-LR_epi.nii.gz -nopadWARP +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -246,12 +242,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"sub-01_dir-LR_epi.nii.gz"' # type=file|default=: Source image (opposite phase encoding direction than base image). - nopadWARP: 'True' - # type=bool|default=False: If for some reason you require the warp volume tomatch the base volume, then use this option to have the outputWARP dataset(s) truncated. base_file: '"sub-01_dir-RL_epi.nii.gz"' # type=file|default=: Base image (opposite phase encoding direction than source image). imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/re_ho.yaml b/example-specs/interface/nipype/afni/re_ho.yaml index ccc8263d..0d40d49a 100644 --- a/example-specs/interface/nipype/afni/re_ho.yaml +++ b/example-specs/interface/nipype/afni/re_ho.yaml @@ -6,23 +6,23 @@ # Docs # ---- # Compute regional homogeneity for a given neighbourhood.l, -# based on a local neighborhood of that voxel. +# based on a local neighborhood of that voxel. # -# For complete details, see the `3dReHo Documentation. -# `_ +# For complete details, see the `3dReHo Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> reho = afni.ReHo() +# >>> reho.inputs.in_file = 'functional.nii' +# >>> reho.inputs.out_file = 'reho.nii.gz' +# >>> reho.inputs.neighborhood = 'vertices' +# >>> reho.cmdline +# '3dReHo -prefix reho.nii.gz -inset functional.nii -nneigh 27' +# >>> res = reho.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> reho = afni.ReHo() -# >>> reho.inputs.in_file = 'functional.nii' -# >>> reho.inputs.out_file = 'reho.nii.gz' -# >>> reho.inputs.neighborhood = 'vertices' -# >>> reho.cmdline -# '3dReHo -prefix reho.nii.gz -inset functional.nii -nneigh 27' -# >>> res = reho.run() # doctest: +SKIP # -# task_name: ReHo nipype_name: ReHo nipype_module: nipype.interfaces.afni.utils @@ -43,9 +43,6 @@ inputs: # type=file|default=: a set of ROIs, each labelled with distinct integers. ReHo will then be calculated per ROI. mask_file: generic/file # type=file|default=: Mask within which ReHo should be calculated voxelwise - out_file: Path - # type=file: Voxelwise regional homogeneity map - # type=file|default=: Output dataset. callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -62,7 +59,7 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti-gz + out_file: generic/file # type=file: Voxelwise regional homogeneity map # type=file|default=: Output dataset. out_vals: generic/file @@ -71,7 +68,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -102,7 +99,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -121,13 +118,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input dataset - out_file: '"reho.nii.gz"' - # type=file: Voxelwise regional homogeneity map - # type=file|default=: Output dataset. neighborhood: '"vertices"' # type=enum|default='faces'|allowed['edges','faces','vertices']: voxels in neighborhood. can be: ``faces`` (for voxel and 6 facewise neighbors, only), ``edges`` (for voxel and 18 face- and edge-wise neighbors), ``vertices`` (for voxel and 26 face-, edge-, and node-wise neighbors). imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -142,7 +136,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dReHo -prefix reho.nii.gz -inset functional.nii -nneigh 27 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -150,13 +144,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input dataset - out_file: '"reho.nii.gz"' - # type=file: Voxelwise regional homogeneity map - # type=file|default=: Output dataset. neighborhood: '"vertices"' # type=enum|default='faces'|allowed['edges','faces','vertices']: voxels in neighborhood. can be: ``faces`` (for voxel and 6 facewise neighbors, only), ``edges`` (for voxel and 18 face- and edge-wise neighbors), ``vertices`` (for voxel and 26 face-, edge-, and node-wise neighbors). imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/refit.yaml b/example-specs/interface/nipype/afni/refit.yaml index 7e544735..1d838c28 100644 --- a/example-specs/interface/nipype/afni/refit.yaml +++ b/example-specs/interface/nipype/afni/refit.yaml @@ -7,27 +7,27 @@ # ---- # Changes some of the information inside a 3D dataset's header # -# For complete details, see the `3drefit Documentation. -# `_ +# For complete details, see the `3drefit Documentation. +# `_ # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> refit = afni.Refit() -# >>> refit.inputs.in_file = 'structural.nii' -# >>> refit.inputs.deoblique = True -# >>> refit.cmdline -# '3drefit -deoblique structural.nii' -# >>> res = refit.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> refit = afni.Refit() +# >>> refit.inputs.in_file = 'structural.nii' +# >>> refit.inputs.deoblique = True +# >>> refit.cmdline +# '3drefit -deoblique structural.nii' +# >>> res = refit.run() # doctest: +SKIP +# +# >>> refit_2 = afni.Refit() +# >>> refit_2.inputs.in_file = 'structural.nii' +# >>> refit_2.inputs.atrfloat = ("IJK_TO_DICOM_REAL", "'1 0.2 0 0 -0.2 1 0 0 0 0 1 0'") +# >>> refit_2.cmdline +# "3drefit -atrfloat IJK_TO_DICOM_REAL '1 0.2 0 0 -0.2 1 0 0 0 0 1 0' structural.nii" +# >>> res = refit_2.run() # doctest: +SKIP # -# >>> refit_2 = afni.Refit() -# >>> refit_2.inputs.in_file = 'structural.nii' -# >>> refit_2.inputs.atrfloat = ("IJK_TO_DICOM_REAL", "'1 0.2 0 0 -0.2 1 0 0 0 0 1 0'") -# >>> refit_2.cmdline -# "3drefit -atrfloat IJK_TO_DICOM_REAL '1 0.2 0 0 -0.2 1 0 0 0 0 1 0' structural.nii" -# >>> res = refit_2.run() # doctest: +SKIP # -# task_name: Refit nipype_name: Refit nipype_module: nipype.interfaces.afni.utils @@ -68,7 +68,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -114,7 +114,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -133,10 +133,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3drefit - deoblique: 'True' - # type=bool|default=False: replace current transformation matrix with cardinal matrix imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -155,10 +153,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3drefit - atrfloat: ("IJK_TO_DICOM_REAL", "'1 0.2 0 0 -0.2 1 0 0 0 0 1 0'") - # type=tuple|default=('', ''): Create or modify floating point attributes. The input values may be specified as a single string in quotes or as a 1D filename or string, example '1 0.2 0 0 -0.2 1 0 0 0 0 1 0' or flipZ.1D or '1D:1,0.2,2@0,-0.2,1,2@0,2@0,1,0' imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -173,7 +169,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3drefit -deoblique structural.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -181,10 +177,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: input file to 3drefit - deoblique: 'True' - # type=bool|default=False: replace current transformation matrix with cardinal matrix imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -196,10 +190,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: input file to 3drefit - atrfloat: ("IJK_TO_DICOM_REAL", "'1 0.2 0 0 -0.2 1 0 0 0 0 1 0'") - # type=tuple|default=('', ''): Create or modify floating point attributes. The input values may be specified as a single string in quotes or as a 1D filename or string, example '1 0.2 0 0 -0.2 1 0 0 0 0 1 0' or flipZ.1D or '1D:1,0.2,2@0,-0.2,1,2@0,2@0,1,0' imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/remlfit.yaml b/example-specs/interface/nipype/afni/remlfit.yaml index fc213b35..8132a644 100644 --- a/example-specs/interface/nipype/afni/remlfit.yaml +++ b/example-specs/interface/nipype/afni/remlfit.yaml @@ -6,25 +6,25 @@ # Docs # ---- # Performs Generalized least squares time series fit with Restricted -# Maximum Likelihood (REML) estimation of the temporal auto-correlation -# structure. +# Maximum Likelihood (REML) estimation of the temporal auto-correlation +# structure. # -# For complete details, see the `3dREMLfit Documentation. -# `_ +# For complete details, see the `3dREMLfit Documentation. +# `_ # -# Examples -# ======== +# Examples +# ======== +# +# >>> from nipype.interfaces import afni +# >>> remlfit = afni.Remlfit() +# >>> remlfit.inputs.in_files = ['functional.nii', 'functional2.nii'] +# >>> remlfit.inputs.out_file = 'output.nii' +# >>> remlfit.inputs.matrix = 'output.1D' +# >>> remlfit.inputs.gltsym = [('SYM: +Lab1 -Lab2', 'TestSYM'), ('timeseries.txt', 'TestFile')] +# >>> remlfit.cmdline +# '3dREMLfit -gltsym "SYM: +Lab1 -Lab2" TestSYM -gltsym "timeseries.txt" TestFile -input "functional.nii functional2.nii" -matrix output.1D -Rbuck output.nii' +# >>> res = remlfit.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni -# >>> remlfit = afni.Remlfit() -# >>> remlfit.inputs.in_files = ['functional.nii', 'functional2.nii'] -# >>> remlfit.inputs.out_file = 'output.nii' -# >>> remlfit.inputs.matrix = 'output.1D' -# >>> remlfit.inputs.gltsym = [('SYM: +Lab1 -Lab2', 'TestSYM'), ('timeseries.txt', 'TestFile')] -# >>> remlfit.cmdline -# '3dREMLfit -gltsym "SYM: +Lab1 -Lab2" TestSYM -gltsym "timeseries.txt" TestFile -input "functional.nii functional2.nii" -matrix output.1D -Rbuck output.nii' -# >>> res = remlfit.run() # doctest: +SKIP -# task_name: Remlfit nipype_name: Remlfit nipype_module: nipype.interfaces.afni.model @@ -45,57 +45,18 @@ inputs: # type=inputmultiobject|default=[]: file(s) to add baseline model columns to the matrix with this option. Each column in the specified file(s) will be appended to the matrix. File(s) must have at least as many rows as the matrix does. dsort: generic/file # type=file|default=: 4D dataset to be used as voxelwise baseline regressor - errts_file: Path - # type=file: output dataset for REML residuals = data - fitted model (if generated - # type=file|default=: output dataset for REML residuals = data - fitted model - fitts_file: Path - # type=file: output dataset for REML fitted model (if generated) - # type=file|default=: output dataset for REML fitted model - glt_file: Path - # type=file: output dataset for beta + statistics from the REML estimation, but ONLY for the GLTs added on the REMLfit command line itself via 'gltsym' (if generated) - # type=file|default=: output dataset for beta + statistics from the REML estimation, but ONLY for the GLTs added on the REMLfit command line itself via 'gltsym'; GLTs from Deconvolve's command line will NOT be included. in_files: medimage/nifti1+list-of # type=inputmultiobject|default=[]: Read time series dataset mask: generic/file # type=file|default=: filename of 3D mask dataset; only data time series from within the mask will be analyzed; results for voxels outside the mask will be set to zero. matim: generic/file # type=file|default=: read a standard file as the matrix. You can use only Col as a name in GLTs with these nonstandard matrix input methods, since the other names come from the 'matrix' file. These mutually exclusive options are ignored if 'matrix' is used. - matrix: medimage-afni/one-d + matrix: fileformats.medimage_afni.OneD # type=file|default=: the design matrix file, which should have been output from Deconvolve via the 'x1D' option - obeta: Path - # type=file: dataset for beta weights from the OLSQ estimation (if generated) - # type=file|default=: dataset for beta weights from the OLSQ estimation - obuck: Path - # type=file: dataset for beta + statistics from the OLSQ estimation (if generated) - # type=file|default=: dataset for beta + statistics from the OLSQ estimation - oerrts: Path - # type=file: dataset for OLSQ residuals = data - fitted model (if generated - # type=file|default=: dataset for OLSQ residuals (data - fitted model) - ofitts: Path - # type=file: dataset for OLSQ fitted model (if generated) - # type=file|default=: dataset for OLSQ fitted model - oglt: Path - # type=file: dataset for beta + statistics from 'gltsym' options (if generated - # type=file|default=: dataset for beta + statistics from 'gltsym' options - out_file: Path - # type=file: dataset for beta + statistics from the REML estimation (if generated - # type=file|default=: output dataset for beta + statistics from the REML estimation; also contains the results of any GLT analysis requested in the Deconvolve setup, similar to the 'bucket' output from Deconvolve. This dataset does NOT get the betas (or statistics) of those regressors marked as 'baseline' in the matrix file. - ovar: Path - # type=file: dataset for OLSQ st.dev. parameter (if generated) - # type=file|default=: dataset for OLSQ st.dev. parameter (kind of boring) - rbeta_file: Path - # type=file: output dataset for beta weights from the REML estimation (if generated - # type=file|default=: output dataset for beta weights from the REML estimation, similar to the 'cbucket' output from Deconvolve. This dataset will contain all the beta weights, for baseline and stimulus regressors alike, unless the '-nobout' option is given -- in that case, this dataset will only get the betas for the stimulus regressors. slibase: generic/file+list-of # type=inputmultiobject|default=[]: similar to 'addbase' in concept, BUT each specified file must have an integer multiple of the number of slices in the input dataset(s); then, separate regression matrices are generated for each slice, with the first column of the file appended to the matrix for the first slice of the dataset, the second column of the file appended to the matrix for the first slice of the dataset, and so on. Intended to help model physiological noise in FMRI, or other effects you want to regress out that might change significantly in the inter-slice time intervals. This will slow the program down, and make it use a lot more memory (to hold all the matrix stuff). slibase_sm: generic/file+list-of # type=inputmultiobject|default=[]: similar to 'slibase', BUT each file much be in slice major order (i.e. all slice0 columns come first, then all slice1 columns, etc). - var_file: Path - # type=file: dataset for REML variance parameters (if generated) - # type=file|default=: output dataset for REML variance parameters - wherr_file: Path - # type=file: dataset for REML residual, whitened using the estimated ARMA(1,1) correlation matrix of the noise (if generated) - # type=file|default=: dataset for REML residual, whitened using the estimated ARMA(1,1) correlation matrix of the noise callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -113,7 +74,7 @@ outputs: # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. errts_file: generic/file - # type=file: output dataset for REML residuals = data - fitted model (if generated + # type=file: output dataset for REML residuals = data - fitted model (if generated) # type=file|default=: output dataset for REML residuals = data - fitted model fitts_file: generic/file # type=file: output dataset for REML fitted model (if generated) @@ -128,22 +89,22 @@ outputs: # type=file: dataset for beta + statistics from the OLSQ estimation (if generated) # type=file|default=: dataset for beta + statistics from the OLSQ estimation oerrts: generic/file - # type=file: dataset for OLSQ residuals = data - fitted model (if generated + # type=file: dataset for OLSQ residuals = data - fitted model (if generated) # type=file|default=: dataset for OLSQ residuals (data - fitted model) ofitts: generic/file # type=file: dataset for OLSQ fitted model (if generated) # type=file|default=: dataset for OLSQ fitted model oglt: generic/file - # type=file: dataset for beta + statistics from 'gltsym' options (if generated + # type=file: dataset for beta + statistics from 'gltsym' options (if generated) # type=file|default=: dataset for beta + statistics from 'gltsym' options - out_file: medimage/nifti1 - # type=file: dataset for beta + statistics from the REML estimation (if generated + out_file: generic/file + # type=file: dataset for beta + statistics from the REML estimation (if generated) # type=file|default=: output dataset for beta + statistics from the REML estimation; also contains the results of any GLT analysis requested in the Deconvolve setup, similar to the 'bucket' output from Deconvolve. This dataset does NOT get the betas (or statistics) of those regressors marked as 'baseline' in the matrix file. ovar: generic/file # type=file: dataset for OLSQ st.dev. parameter (if generated) # type=file|default=: dataset for OLSQ st.dev. parameter (kind of boring) rbeta_file: generic/file - # type=file: output dataset for beta weights from the REML estimation (if generated + # type=file: output dataset for beta weights from the REML estimation (if generated) # type=file|default=: output dataset for beta weights from the REML estimation, similar to the 'cbucket' output from Deconvolve. This dataset will contain all the beta weights, for baseline and stimulus regressors alike, unless the '-nobout' option is given -- in that case, this dataset will only get the betas for the stimulus regressors. var_file: generic/file # type=file: dataset for REML variance parameters (if generated) @@ -155,7 +116,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -203,13 +164,13 @@ tests: gltsym: # type=list|default=[]: read a symbolic GLT from input file and associate it with a label. As in Deconvolve, you can also use the 'SYM:' method to provide the definition of the GLT directly as a string (e.g., with 'SYM: +Label1 -Label2'). Unlike Deconvolve, you MUST specify 'SYM: ' if providing the GLT directly as a string instead of from a file out_file: - # type=file: dataset for beta + statistics from the REML estimation (if generated + # type=file: dataset for beta + statistics from the REML estimation (if generated) # type=file|default=: output dataset for beta + statistics from the REML estimation; also contains the results of any GLT analysis requested in the Deconvolve setup, similar to the 'bucket' output from Deconvolve. This dataset does NOT get the betas (or statistics) of those regressors marked as 'baseline' in the matrix file. var_file: # type=file: dataset for REML variance parameters (if generated) # type=file|default=: output dataset for REML variance parameters rbeta_file: - # type=file: output dataset for beta weights from the REML estimation (if generated + # type=file: output dataset for beta weights from the REML estimation (if generated) # type=file|default=: output dataset for beta weights from the REML estimation, similar to the 'cbucket' output from Deconvolve. This dataset will contain all the beta weights, for baseline and stimulus regressors alike, unless the '-nobout' option is given -- in that case, this dataset will only get the betas for the stimulus regressors. glt_file: # type=file: output dataset for beta + statistics from the REML estimation, but ONLY for the GLTs added on the REMLfit command line itself via 'gltsym' (if generated) @@ -218,7 +179,7 @@ tests: # type=file: output dataset for REML fitted model (if generated) # type=file|default=: output dataset for REML fitted model errts_file: - # type=file: output dataset for REML residuals = data - fitted model (if generated + # type=file: output dataset for REML residuals = data - fitted model (if generated) # type=file|default=: output dataset for REML residuals = data - fitted model wherr_file: # type=file: dataset for REML residual, whitened using the estimated ARMA(1,1) correlation matrix of the noise (if generated) @@ -239,13 +200,13 @@ tests: # type=file: dataset for beta + statistics from the OLSQ estimation (if generated) # type=file|default=: dataset for beta + statistics from the OLSQ estimation oglt: - # type=file: dataset for beta + statistics from 'gltsym' options (if generated + # type=file: dataset for beta + statistics from 'gltsym' options (if generated) # type=file|default=: dataset for beta + statistics from 'gltsym' options ofitts: # type=file: dataset for OLSQ fitted model (if generated) # type=file|default=: dataset for OLSQ fitted model oerrts: - # type=file: dataset for OLSQ residuals = data - fitted model (if generated + # type=file: dataset for OLSQ residuals = data - fitted model (if generated) # type=file|default=: dataset for OLSQ residuals (data - fitted model) num_threads: # type=int|default=1: set number of threads @@ -256,7 +217,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -275,15 +236,10 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=inputmultiobject|default=[]: Read time series dataset - out_file: '"output.nii"' - # type=file: dataset for beta + statistics from the REML estimation (if generated - # type=file|default=: output dataset for beta + statistics from the REML estimation; also contains the results of any GLT analysis requested in the Deconvolve setup, similar to the 'bucket' output from Deconvolve. This dataset does NOT get the betas (or statistics) of those regressors marked as 'baseline' in the matrix file. matrix: # type=file|default=: the design matrix file, which should have been output from Deconvolve via the 'x1D' option - gltsym: '[("SYM: +Lab1 -Lab2", "TestSYM"), ("timeseries.txt", "TestFile")]' - # type=list|default=[]: read a symbolic GLT from input file and associate it with a label. As in Deconvolve, you can also use the 'SYM:' method to provide the definition of the GLT directly as a string (e.g., with 'SYM: +Label1 -Label2'). Unlike Deconvolve, you MUST specify 'SYM: ' if providing the GLT directly as a string instead of from a file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -306,15 +262,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["functional.nii", "functional2.nii"]' # type=inputmultiobject|default=[]: Read time series dataset - out_file: '"output.nii"' - # type=file: dataset for beta + statistics from the REML estimation (if generated - # type=file|default=: output dataset for beta + statistics from the REML estimation; also contains the results of any GLT analysis requested in the Deconvolve setup, similar to the 'bucket' output from Deconvolve. This dataset does NOT get the betas (or statistics) of those regressors marked as 'baseline' in the matrix file. matrix: '"output.1D"' # type=file|default=: the design matrix file, which should have been output from Deconvolve via the 'x1D' option - gltsym: '[("SYM: +Lab1 -Lab2", "TestSYM"), ("timeseries.txt", "TestFile")]' - # type=list|default=[]: read a symbolic GLT from input file and associate it with a label. As in Deconvolve, you can also use the 'SYM:' method to provide the definition of the GLT directly as a string (e.g., with 'SYM: +Label1 -Label2'). Unlike Deconvolve, you MUST specify 'SYM: ' if providing the GLT directly as a string instead of from a file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/resample.yaml b/example-specs/interface/nipype/afni/resample.yaml index 802b962a..16ff4957 100644 --- a/example-specs/interface/nipype/afni/resample.yaml +++ b/example-specs/interface/nipype/afni/resample.yaml @@ -7,21 +7,21 @@ # ---- # Resample or reorient an image using AFNI 3dresample command # -# For complete details, see the `3dresample Documentation. -# `_ +# For complete details, see the `3dresample Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> resample = afni.Resample() +# >>> resample.inputs.in_file = 'functional.nii' +# >>> resample.inputs.orientation= 'RPI' +# >>> resample.inputs.outputtype = 'NIFTI' +# >>> resample.cmdline +# '3dresample -orient RPI -prefix functional_resample.nii -inset functional.nii' +# >>> res = resample.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> resample = afni.Resample() -# >>> resample.inputs.in_file = 'functional.nii' -# >>> resample.inputs.orientation= 'RPI' -# >>> resample.inputs.outputtype = 'NIFTI' -# >>> resample.cmdline -# '3dresample -orient RPI -prefix functional_resample.nii -inset functional.nii' -# >>> res = resample.run() # doctest: +SKIP # -# task_name: Resample nipype_name: Resample nipype_module: nipype.interfaces.afni.utils @@ -40,9 +40,6 @@ inputs: # type=file|default=: input file to 3dresample master: generic/file # type=file|default=: align dataset grid to a reference file - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -66,7 +63,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -95,7 +92,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -114,12 +111,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dresample - orientation: '"RPI"' - # type=str|default='': new orientation code outputtype: '"NIFTI"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -134,7 +129,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dresample -orient RPI -prefix functional_resample.nii -inset functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -142,12 +137,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dresample - orientation: '"RPI"' - # type=str|default='': new orientation code outputtype: '"NIFTI"' # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/retroicor.yaml b/example-specs/interface/nipype/afni/retroicor.yaml index 9b2bd338..b34c7f58 100644 --- a/example-specs/interface/nipype/afni/retroicor.yaml +++ b/example-specs/interface/nipype/afni/retroicor.yaml @@ -6,38 +6,38 @@ # Docs # ---- # Performs Retrospective Image Correction for physiological -# motion effects, using a slightly modified version of the -# RETROICOR algorithm +# motion effects, using a slightly modified version of the +# RETROICOR algorithm # -# The durations of the physiological inputs are assumed to equal -# the duration of the dataset. Any constant sampling rate may be -# used, but 40 Hz seems to be acceptable. This program's cardiac -# peak detection algorithm is rather simplistic, so you might try -# using the scanner's cardiac gating output (transform it to a -# spike wave if necessary). +# The durations of the physiological inputs are assumed to equal +# the duration of the dataset. Any constant sampling rate may be +# used, but 40 Hz seems to be acceptable. This program's cardiac +# peak detection algorithm is rather simplistic, so you might try +# using the scanner's cardiac gating output (transform it to a +# spike wave if necessary). # -# This program uses slice timing information embedded in the -# dataset to estimate the proper cardiac/respiratory phase for -# each slice. It makes sense to run this program before any -# program that may destroy the slice timings (e.g. 3dvolreg for -# motion correction). +# This program uses slice timing information embedded in the +# dataset to estimate the proper cardiac/respiratory phase for +# each slice. It makes sense to run this program before any +# program that may destroy the slice timings (e.g. 3dvolreg for +# motion correction). # -# For complete details, see the `3dretroicor Documentation. -# `_ +# For complete details, see the `3dretroicor Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> ret = afni.Retroicor() +# >>> ret.inputs.in_file = 'functional.nii' +# >>> ret.inputs.card = 'mask.1D' +# >>> ret.inputs.resp = 'resp.1D' +# >>> ret.inputs.outputtype = 'NIFTI' +# >>> ret.cmdline +# '3dretroicor -prefix functional_retroicor.nii -resp resp.1D -card mask.1D functional.nii' +# >>> res = ret.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> ret = afni.Retroicor() -# >>> ret.inputs.in_file = 'functional.nii' -# >>> ret.inputs.card = 'mask.1D' -# >>> ret.inputs.resp = 'resp.1D' -# >>> ret.inputs.outputtype = 'NIFTI' -# >>> ret.cmdline -# '3dretroicor -prefix functional_retroicor.nii -resp resp.1D -card mask.1D functional.nii' -# >>> res = ret.run() # doctest: +SKIP # -# task_name: Retroicor nipype_name: Retroicor nipype_module: nipype.interfaces.afni.preprocess @@ -52,16 +52,13 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - card: medimage-afni/one-d + card: generic/file # type=file|default=: 1D cardiac data file for cardiac correction cardphase: generic/file # type=file|default=: Filename for 1D cardiac phase output in_file: medimage/nifti1 # type=file|default=: input file to 3dretroicor - out_file: Path - # type=file: output file - # type=file|default=: output image file name - resp: medimage-afni/one-d + resp: fileformats.medimage_afni.OneD # type=file|default=: 1D respiratory waveform data for correction respphase: generic/file # type=file|default=: Filename for 1D resp phase output @@ -88,7 +85,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -121,7 +118,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -140,14 +137,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dretroicor - card: - # type=file|default=: 1D cardiac data file for cardiac correction resp: # type=file|default=: 1D respiratory waveform data for correction - outputtype: '"NIFTI"' - # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -162,7 +155,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dretroicor -prefix functional_retroicor.nii -resp resp.1D -card mask.1D functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -170,14 +163,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dretroicor - card: '"mask.1D"' - # type=file|default=: 1D cardiac data file for cardiac correction resp: '"resp.1D"' # type=file|default=: 1D respiratory waveform data for correction - outputtype: '"NIFTI"' - # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/roi_stats.yaml b/example-specs/interface/nipype/afni/roi_stats.yaml index 9cc2cfea..523e7691 100644 --- a/example-specs/interface/nipype/afni/roi_stats.yaml +++ b/example-specs/interface/nipype/afni/roi_stats.yaml @@ -7,22 +7,22 @@ # ---- # Display statistics over masked regions # -# For complete details, see the `3dROIstats Documentation -# `_ +# For complete details, see the `3dROIstats Documentation +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> roistats = afni.ROIStats() +# >>> roistats.inputs.in_file = 'functional.nii' +# >>> roistats.inputs.mask_file = 'skeleton_mask.nii.gz' +# >>> roistats.inputs.stat = ['mean', 'median', 'voxels'] +# >>> roistats.inputs.nomeanout = True +# >>> roistats.cmdline +# '3dROIstats -mask skeleton_mask.nii.gz -nomeanout -nzmean -nzmedian -nzvoxels functional.nii > functional_roistat.1D' +# >>> res = roistats.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> roistats = afni.ROIStats() -# >>> roistats.inputs.in_file = 'functional.nii' -# >>> roistats.inputs.mask_file = 'skeleton_mask.nii.gz' -# >>> roistats.inputs.stat = ['mean', 'median', 'voxels'] -# >>> roistats.inputs.nomeanout = True -# >>> roistats.cmdline -# '3dROIstats -mask skeleton_mask.nii.gz -nomeanout -nzmean -nzmedian -nzvoxels functional.nii > functional_roistat.1D' -# >>> res = roistats.run() # doctest: +SKIP # -# task_name: ROIStats nipype_name: ROIStats nipype_module: nipype.interfaces.afni.preprocess @@ -41,11 +41,8 @@ inputs: # type=file|default=: input dataset mask: generic/file # type=file|default=: input mask - mask_file: medimage/nifti-gz + mask_file: generic/file # type=file|default=: input mask - out_file: Path - # type=file: output tab-separated values file - # type=file|default=: output file roisel: generic/file # type=file|default=: Only considers ROIs denoted by values found in the specified file. Note that the order of the ROIs as specified in the file is not preserved. So an SEL.1D of '2 8 20' produces the same output as '8 20 2' callable_defaults: @@ -71,7 +68,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -114,7 +111,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -133,14 +130,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input dataset - mask_file: - # type=file|default=: input mask stat: '["mean", "median", "voxels"]' # type=inputmultiobject|default=[]: Statistics to compute. Options include: * mean = Compute the mean using only non_zero voxels. Implies the opposite for the mean computed by default. * median = Compute the median of nonzero voxels * mode = Compute the mode of nonzero voxels. (integral valued sets only) * minmax = Compute the min/max of nonzero voxels * sum = Compute the sum using only nonzero voxels. * voxels = Compute the number of nonzero voxels * sigma = Compute the standard deviation of nonzero voxels Statistics that include zero-valued voxels: * zerominmax = Compute the min/max of all voxels. * zerosigma = Compute the standard deviation of all voxels. * zeromedian = Compute the median of all voxels. * zeromode = Compute the mode of all voxels. * summary = Only output a summary line with the grand mean across all briks in the input dataset. This option cannot be used with nomeanout. More that one option can be specified. - nomeanout: 'True' - # type=bool|default=False: Do not include the (zero-inclusive) mean among computed stats imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -155,7 +148,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dROIstats -mask skeleton_mask.nii.gz -nomeanout -nzmean -nzmedian -nzvoxels functional.nii > functional_roistat.1D +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -163,14 +156,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input dataset - mask_file: '"skeleton_mask.nii.gz"' - # type=file|default=: input mask stat: '["mean", "median", "voxels"]' # type=inputmultiobject|default=[]: Statistics to compute. Options include: * mean = Compute the mean using only non_zero voxels. Implies the opposite for the mean computed by default. * median = Compute the median of nonzero voxels * mode = Compute the mode of nonzero voxels. (integral valued sets only) * minmax = Compute the min/max of nonzero voxels * sum = Compute the sum using only nonzero voxels. * voxels = Compute the number of nonzero voxels * sigma = Compute the standard deviation of nonzero voxels Statistics that include zero-valued voxels: * zerominmax = Compute the min/max of all voxels. * zerosigma = Compute the standard deviation of all voxels. * zeromedian = Compute the median of all voxels. * zeromode = Compute the mode of all voxels. * summary = Only output a summary line with the grand mean across all briks in the input dataset. This option cannot be used with nomeanout. More that one option can be specified. - nomeanout: 'True' - # type=bool|default=False: Do not include the (zero-inclusive) mean among computed stats imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/seg.yaml b/example-specs/interface/nipype/afni/seg.yaml index 6b900b57..9e103b49 100644 --- a/example-specs/interface/nipype/afni/seg.yaml +++ b/example-specs/interface/nipype/afni/seg.yaml @@ -6,23 +6,23 @@ # Docs # ---- # 3dSeg segments brain volumes into tissue classes. The program allows -# for adding a variety of global and voxelwise priors. However for the -# moment, only mixing fractions and MRF are documented. +# for adding a variety of global and voxelwise priors. However for the +# moment, only mixing fractions and MRF are documented. # -# For complete details, see the `3dSeg Documentation. -# `_ +# For complete details, see the `3dSeg Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces.afni import preprocess +# >>> seg = preprocess.Seg() +# >>> seg.inputs.in_file = 'structural.nii' +# >>> seg.inputs.mask = 'AUTO' +# >>> seg.cmdline +# '3dSeg -mask AUTO -anat structural.nii' +# >>> res = seg.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces.afni import preprocess -# >>> seg = preprocess.Seg() -# >>> seg.inputs.in_file = 'structural.nii' -# >>> seg.inputs.mask = 'AUTO' -# >>> seg.cmdline -# '3dSeg -mask AUTO -anat structural.nii' -# >>> res = seg.run() # doctest: +SKIP # -# task_name: Seg nipype_name: Seg nipype_module: nipype.interfaces.afni.preprocess @@ -61,7 +61,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -95,7 +95,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -114,10 +114,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: ANAT is the volume to segment - mask: '"AUTO"' - # type=traitcompound|default=None: only non-zero voxels in mask are analyzed. mask can either be a dataset or the string "AUTO" which would use AFNI's automask function to create the mask. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -132,7 +130,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dSeg -mask AUTO -anat structural.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -140,10 +138,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: ANAT is the volume to segment - mask: '"AUTO"' - # type=traitcompound|default=None: only non-zero voxels in mask are analyzed. mask can either be a dataset or the string "AUTO" which would use AFNI's automask function to create the mask. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/skull_strip.yaml b/example-specs/interface/nipype/afni/skull_strip.yaml index 1da38ac7..7cba8cec 100644 --- a/example-specs/interface/nipype/afni/skull_strip.yaml +++ b/example-specs/interface/nipype/afni/skull_strip.yaml @@ -6,23 +6,23 @@ # Docs # ---- # A program to extract the brain from surrounding tissue from MRI -# T1-weighted images. -# TODO Add optional arguments. +# T1-weighted images. +# TODO Add optional arguments. # -# For complete details, see the `3dSkullStrip Documentation. -# `_ +# For complete details, see the `3dSkullStrip Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> skullstrip = afni.SkullStrip() +# >>> skullstrip.inputs.in_file = 'functional.nii' +# >>> skullstrip.inputs.args = '-o_ply' +# >>> skullstrip.cmdline +# '3dSkullStrip -input functional.nii -o_ply -prefix functional_skullstrip' +# >>> res = skullstrip.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> skullstrip = afni.SkullStrip() -# >>> skullstrip.inputs.in_file = 'functional.nii' -# >>> skullstrip.inputs.args = '-o_ply' -# >>> skullstrip.cmdline -# '3dSkullStrip -input functional.nii -o_ply -prefix functional_skullstrip' -# >>> res = skullstrip.run() # doctest: +SKIP # -# task_name: SkullStrip nipype_name: SkullStrip nipype_module: nipype.interfaces.afni.preprocess @@ -39,9 +39,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dSkullStrip - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -65,7 +62,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -86,7 +83,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -105,10 +102,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dSkullStrip - args: '"-o_ply"' - # type=str|default='': Additional parameters to the command imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -123,7 +118,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dSkullStrip -input functional.nii -o_ply -prefix functional_skullstrip +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -131,10 +126,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dSkullStrip - args: '"-o_ply"' - # type=str|default='': Additional parameters to the command imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/svm_test.yaml b/example-specs/interface/nipype/afni/svm_test.yaml index e2c7be1a..460c8c2d 100644 --- a/example-specs/interface/nipype/afni/svm_test.yaml +++ b/example-specs/interface/nipype/afni/svm_test.yaml @@ -6,22 +6,22 @@ # Docs # ---- # Temporally predictive modeling with the support vector machine -# SVM Test Only -# For complete details, see the `3dsvm Documentation. -# `_ +# SVM Test Only +# For complete details, see the `3dsvm Documentation. +# `_ # -# Examples -# ======== +# Examples +# ======== +# +# >>> from nipype.interfaces import afni as afni +# >>> svmTest = afni.SVMTest() +# >>> svmTest.inputs.in_file= 'run2+orig' +# >>> svmTest.inputs.model= 'run1+orig_model' +# >>> svmTest.inputs.testlabels= 'run2_categories.1D' +# >>> svmTest.inputs.out_file= 'pred2_model1' +# >>> res = svmTest.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni as afni -# >>> svmTest = afni.SVMTest() -# >>> svmTest.inputs.in_file= 'run2+orig' -# >>> svmTest.inputs.model= 'run1+orig_model' -# >>> svmTest.inputs.testlabels= 'run2_categories.1D' -# >>> svmTest.inputs.out_file= 'pred2_model1' -# >>> res = svmTest.run() # doctest: +SKIP # -# task_name: SVMTest nipype_name: SVMTest nipype_module: nipype.interfaces.afni.svm @@ -38,9 +38,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: A 3D or 3D+t AFNI brik dataset to be used for testing. - out_file: Path - # type=file: output file - # type=file|default=: filename for .1D prediction file(s). testlabels: generic/file # type=file|default=: *true* class category .1D labels for the test dataset. It is used to calculate the prediction accuracy performance callable_defaults: @@ -66,7 +63,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -101,7 +98,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/afni/svm_train.yaml b/example-specs/interface/nipype/afni/svm_train.yaml index e33492e0..c2d0d1da 100644 --- a/example-specs/interface/nipype/afni/svm_train.yaml +++ b/example-specs/interface/nipype/afni/svm_train.yaml @@ -6,24 +6,24 @@ # Docs # ---- # Temporally predictive modeling with the support vector machine -# SVM Train Only -# For complete details, see the `3dsvm Documentation. -# `_ +# SVM Train Only +# For complete details, see the `3dsvm Documentation. +# `_ # -# Examples -# ======== +# Examples +# ======== +# +# >>> from nipype.interfaces import afni as afni +# >>> svmTrain = afni.SVMTrain() +# >>> svmTrain.inputs.in_file = 'run1+orig' +# >>> svmTrain.inputs.trainlabels = 'run1_categories.1D' +# >>> svmTrain.inputs.ttype = 'regression' +# >>> svmTrain.inputs.mask = 'mask.nii' +# >>> svmTrain.inputs.model = 'model_run1' +# >>> svmTrain.inputs.alphas = 'alphas_run1' +# >>> res = svmTrain.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni as afni -# >>> svmTrain = afni.SVMTrain() -# >>> svmTrain.inputs.in_file = 'run1+orig' -# >>> svmTrain.inputs.trainlabels = 'run1_categories.1D' -# >>> svmTrain.inputs.ttype = 'regression' -# >>> svmTrain.inputs.mask = 'mask.nii' -# >>> svmTrain.inputs.model = 'model_run1' -# >>> svmTrain.inputs.alphas = 'alphas_run1' -# >>> res = svmTrain.run() # doctest: +SKIP # -# task_name: SVMTrain nipype_name: SVMTrain nipype_module: nipype.interfaces.afni.svm @@ -38,21 +38,12 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - alphas: Path - # type=file: output alphas file name - # type=file|default=: output alphas file name censor: generic/file # type=file|default=: .1D censor file that allows the user to ignore certain samples in the training data. in_file: generic/file # type=file|default=: A 3D+t AFNI brik dataset to be used for training. mask: generic/file # type=file|default=: byte-format brik file used to mask voxels in the analysis - model: Path - # type=file: brik containing the SVM model file name - # type=file|default=: basename for the brik containing the SVM model - out_file: Path - # type=file: sum of weighted linear support vectors file name - # type=file|default=: output sum of weighted linear support vectors file name trainlabels: generic/file # type=file|default=: .1D labels corresponding to the stimulus paradigm for the training data. callable_defaults: @@ -84,7 +75,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -129,7 +120,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/afni/synthesize.yaml b/example-specs/interface/nipype/afni/synthesize.yaml index fea928ec..5fdbd357 100644 --- a/example-specs/interface/nipype/afni/synthesize.yaml +++ b/example-specs/interface/nipype/afni/synthesize.yaml @@ -6,24 +6,24 @@ # Docs # ---- # Reads a '-cbucket' dataset and a '.xmat.1D' matrix from 3dDeconvolve, -# and synthesizes a fit dataset using user-selected sub-bricks and -# matrix columns. +# and synthesizes a fit dataset using user-selected sub-bricks and +# matrix columns. # -# For complete details, see the `3dSynthesize Documentation. -# `_ +# For complete details, see the `3dSynthesize Documentation. +# `_ # -# Examples -# ======== +# Examples +# ======== +# +# >>> from nipype.interfaces import afni +# >>> synthesize = afni.Synthesize() +# >>> synthesize.inputs.cbucket = 'functional.nii' +# >>> synthesize.inputs.matrix = 'output.1D' +# >>> synthesize.inputs.select = ['baseline'] +# >>> synthesize.cmdline +# '3dSynthesize -cbucket functional.nii -matrix output.1D -select baseline' +# >>> syn = synthesize.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni -# >>> synthesize = afni.Synthesize() -# >>> synthesize.inputs.cbucket = 'functional.nii' -# >>> synthesize.inputs.matrix = 'output.1D' -# >>> synthesize.inputs.select = ['baseline'] -# >>> synthesize.cmdline -# '3dSynthesize -cbucket functional.nii -matrix output.1D -select baseline' -# >>> syn = synthesize.run() # doctest: +SKIP -# task_name: Synthesize nipype_name: Synthesize nipype_module: nipype.interfaces.afni.model @@ -40,11 +40,8 @@ inputs: # passed to the field in the automatically generated unittests. cbucket: medimage/nifti1 # type=file|default=: Read the dataset output from 3dDeconvolve via the '-cbucket' option. - matrix: medimage-afni/one-d + matrix: generic/file # type=file|default=: Read the matrix output from 3dDeconvolve via the '-x1D' option. - out_file: Path - # type=file: output file - # type=file|default=: output dataset prefix name (default 'syn') callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -68,7 +65,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -99,7 +96,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -118,12 +115,10 @@ tests: # (if not specified, will try to choose a sensible value) cbucket: # type=file|default=: Read the dataset output from 3dDeconvolve via the '-cbucket' option. - matrix: - # type=file|default=: Read the matrix output from 3dDeconvolve via the '-x1D' option. select: '["baseline"]' # type=list|default=[]: A list of selected columns from the matrix (and the corresponding coefficient sub-bricks from the cbucket). Valid types include 'baseline', 'polort', 'allfunc', 'allstim', 'all', Can also provide 'something' where something matches a stim_label from 3dDeconvolve, and 'digits' where digits are the numbers of the select matrix columns by numbers (starting at 0), or number ranges of the form '3..7' and '3-7'. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -146,12 +141,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. cbucket: '"functional.nii"' # type=file|default=: Read the dataset output from 3dDeconvolve via the '-cbucket' option. - matrix: '"output.1D"' - # type=file|default=: Read the matrix output from 3dDeconvolve via the '-x1D' option. select: '["baseline"]' # type=list|default=[]: A list of selected columns from the matrix (and the corresponding coefficient sub-bricks from the cbucket). Valid types include 'baseline', 'polort', 'allfunc', 'allstim', 'all', Can also provide 'something' where something matches a stim_label from 3dDeconvolve, and 'digits' where digits are the numbers of the select matrix columns by numbers (starting at 0), or number ranges of the form '3..7' and '3-7'. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/t_cat.yaml b/example-specs/interface/nipype/afni/t_cat.yaml index f281668f..5abb7443 100644 --- a/example-specs/interface/nipype/afni/t_cat.yaml +++ b/example-specs/interface/nipype/afni/t_cat.yaml @@ -7,24 +7,24 @@ # ---- # Concatenate sub-bricks from input datasets into one big 3D+time dataset. # -# TODO Replace InputMultiPath in_files with Traits.List, if possible. Current -# version adds extra whitespace. +# TODO Replace InputMultiPath in_files with Traits.List, if possible. Current +# version adds extra whitespace. # -# For complete details, see the `3dTcat Documentation. -# `_ +# For complete details, see the `3dTcat Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> tcat = afni.TCat() +# >>> tcat.inputs.in_files = ['functional.nii', 'functional2.nii'] +# >>> tcat.inputs.out_file= 'functional_tcat.nii' +# >>> tcat.inputs.rlt = '+' +# >>> tcat.cmdline +# '3dTcat -rlt+ -prefix functional_tcat.nii functional.nii functional2.nii' +# >>> res = tcat.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> tcat = afni.TCat() -# >>> tcat.inputs.in_files = ['functional.nii', 'functional2.nii'] -# >>> tcat.inputs.out_file= 'functional_tcat.nii' -# >>> tcat.inputs.rlt = '+' -# >>> tcat.cmdline -# '3dTcat -rlt+ -prefix functional_tcat.nii functional.nii functional2.nii' -# >>> res = tcat.run() # doctest: +SKIP # -# task_name: TCat nipype_name: TCat nipype_module: nipype.interfaces.afni.utils @@ -41,9 +41,6 @@ inputs: # passed to the field in the automatically generated unittests. in_files: medimage/nifti1+list-of # type=inputmultiobject|default=[]: input file to 3dTcat - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -60,14 +57,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: output file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -92,7 +89,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -111,13 +108,10 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=inputmultiobject|default=[]: input file to 3dTcat - out_file: '"functional_tcat.nii"' - # type=file: output file - # type=file|default=: output image file name rlt: '"+"' # type=enum|default=''|allowed['','+','++']: Remove linear trends in each voxel time series loaded from each input dataset, SEPARATELY. Option -rlt removes the least squares fit of 'a+b*t' to each voxel time series. Option -rlt+ adds dataset mean back in. Option -rlt++ adds overall mean of all dataset timeseries back in. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -132,7 +126,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dTcat -rlt+ -prefix functional_tcat.nii functional.nii functional2.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -140,13 +134,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["functional.nii", "functional2.nii"]' # type=inputmultiobject|default=[]: input file to 3dTcat - out_file: '"functional_tcat.nii"' - # type=file: output file - # type=file|default=: output image file name rlt: '"+"' # type=enum|default=''|allowed['','+','++']: Remove linear trends in each voxel time series loaded from each input dataset, SEPARATELY. Option -rlt removes the least squares fit of 'a+b*t' to each voxel time series. Option -rlt+ adds dataset mean back in. Option -rlt++ adds overall mean of all dataset timeseries back in. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/t_cat_sub_brick.yaml b/example-specs/interface/nipype/afni/t_cat_sub_brick.yaml index 68739d1b..b76c77d7 100644 --- a/example-specs/interface/nipype/afni/t_cat_sub_brick.yaml +++ b/example-specs/interface/nipype/afni/t_cat_sub_brick.yaml @@ -6,23 +6,23 @@ # Docs # ---- # Hopefully a temporary function to allow sub-brick selection until -# afni file management is improved. +# afni file management is improved. # -# For complete details, see the `3dTcat Documentation. -# `_ +# For complete details, see the `3dTcat Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> tcsb = afni.TCatSubBrick() +# >>> tcsb.inputs.in_files = [('functional.nii', "'{2..$}'"), ('functional2.nii', "'{2..$}'")] +# >>> tcsb.inputs.out_file= 'functional_tcat.nii' +# >>> tcsb.inputs.rlt = '+' +# >>> tcsb.cmdline +# "3dTcat -rlt+ -prefix functional_tcat.nii functional.nii'{2..$}' functional2.nii'{2..$}' " +# >>> res = tcsb.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> tcsb = afni.TCatSubBrick() -# >>> tcsb.inputs.in_files = [('functional.nii', "'{2..$}'"), ('functional2.nii', "'{2..$}'")] -# >>> tcsb.inputs.out_file= 'functional_tcat.nii' -# >>> tcsb.inputs.rlt = '+' -# >>> tcsb.cmdline -# "3dTcat -rlt+ -prefix functional_tcat.nii functional.nii'{2..$}' functional2.nii'{2..$}' " -# >>> res = tcsb.run() # doctest: +SKIP # -# task_name: TCatSubBrick nipype_name: TCatSubBrick nipype_module: nipype.interfaces.afni.utils @@ -37,9 +37,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -56,15 +53,15 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: output file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - out_file: '"functional_tcat.nii"' + # dict[str, str] - `path_template` values to be provided to output fields + out_file: out_file # type=file: output file # type=file|default=: output image file name requirements: @@ -89,7 +86,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -108,13 +105,10 @@ tests: # (if not specified, will try to choose a sensible value) in_files: '[(''functional.nii'', "''{2..$}''"), (''functional2.nii'', "''{2..$}''")]' # type=list|default=[]: List of tuples of file names and subbrick selectors as strings.Don't forget to protect the single quotes in the subbrick selectorso the contents are protected from the command line interpreter. - out_file: '"functional_tcat.nii"' - # type=file: output file - # type=file|default=: output image file name rlt: '"+"' # type=enum|default=''|allowed['','+','++']: Remove linear trends in each voxel time series loaded from each input dataset, SEPARATELY. Option -rlt removes the least squares fit of 'a+b*t' to each voxel time series. Option -rlt+ adds dataset mean back in. Option -rlt++ adds overall mean of all dataset timeseries back in. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -129,7 +123,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: '3dTcat -rlt+ -prefix functional_tcat.nii functional.nii"{2..$}" functional2.nii"{2..$}" ' +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -137,13 +131,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '[(''functional.nii'', "''{2..$}''"), (''functional2.nii'', "''{2..$}''")]' # type=list|default=[]: List of tuples of file names and subbrick selectors as strings.Don't forget to protect the single quotes in the subbrick selectorso the contents are protected from the command line interpreter. - out_file: '"functional_tcat.nii"' - # type=file: output file - # type=file|default=: output image file name rlt: '"+"' # type=enum|default=''|allowed['','+','++']: Remove linear trends in each voxel time series loaded from each input dataset, SEPARATELY. Option -rlt removes the least squares fit of 'a+b*t' to each voxel time series. Option -rlt+ adds dataset mean back in. Option -rlt++ adds overall mean of all dataset timeseries back in. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/t_corr_1d.yaml b/example-specs/interface/nipype/afni/t_corr_1d.yaml index 20713833..84bb26fd 100644 --- a/example-specs/interface/nipype/afni/t_corr_1d.yaml +++ b/example-specs/interface/nipype/afni/t_corr_1d.yaml @@ -6,20 +6,20 @@ # Docs # ---- # Computes the correlation coefficient between each voxel time series -# in the input 3D+time dataset. +# in the input 3D+time dataset. # -# For complete details, see the `3dTcorr1D Documentation. -# `_ +# For complete details, see the `3dTcorr1D Documentation. +# `_ +# +# >>> from nipype.interfaces import afni +# >>> tcorr1D = afni.TCorr1D() +# >>> tcorr1D.inputs.xset= 'u_rc1s1_Template.nii' +# >>> tcorr1D.inputs.y_1d = 'seed.1D' +# >>> tcorr1D.cmdline +# '3dTcorr1D -prefix u_rc1s1_Template_correlation.nii.gz u_rc1s1_Template.nii seed.1D' +# >>> res = tcorr1D.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni -# >>> tcorr1D = afni.TCorr1D() -# >>> tcorr1D.inputs.xset= 'u_rc1s1_Template.nii' -# >>> tcorr1D.inputs.y_1d = 'seed.1D' -# >>> tcorr1D.cmdline -# '3dTcorr1D -prefix u_rc1s1_Template_correlation.nii.gz u_rc1s1_Template.nii seed.1D' -# >>> res = tcorr1D.run() # doctest: +SKIP # -# task_name: TCorr1D nipype_name: TCorr1D nipype_module: nipype.interfaces.afni.preprocess @@ -34,12 +34,9 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: Path - # type=file: output file containing correlations - # type=file|default=: output filename prefix xset: medimage/nifti1 # type=file|default=: 3d+time dataset input - y_1d: medimage-afni/one-d + y_1d: generic/file # type=file|default=: 1D time series file input callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -64,7 +61,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -95,7 +92,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -114,10 +111,8 @@ tests: # (if not specified, will try to choose a sensible value) xset: # type=file|default=: 3d+time dataset input - y_1d: - # type=file|default=: 1D time series file input imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -140,10 +135,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. xset: '"u_rc1s1_Template.nii"' # type=file|default=: 3d+time dataset input - y_1d: '"seed.1D"' - # type=file|default=: 1D time series file input imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/t_corr_map.yaml b/example-specs/interface/nipype/afni/t_corr_map.yaml index 8fb389b8..bbebba67 100644 --- a/example-specs/interface/nipype/afni/t_corr_map.yaml +++ b/example-specs/interface/nipype/afni/t_corr_map.yaml @@ -6,24 +6,24 @@ # Docs # ---- # For each voxel time series, computes the correlation between it -# and all other voxels, and combines this set of values into the -# output dataset(s) in some way. +# and all other voxels, and combines this set of values into the +# output dataset(s) in some way. # -# For complete details, see the `3dTcorrMap Documentation. -# `_ +# For complete details, see the `3dTcorrMap Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> tcm = afni.TCorrMap() +# >>> tcm.inputs.in_file = 'functional.nii' +# >>> tcm.inputs.mask = 'mask.nii' +# >>> tcm.mean_file = 'functional_meancorr.nii' +# >>> tcm.cmdline # doctest: +SKIP +# '3dTcorrMap -input functional.nii -mask mask.nii -Mean functional_meancorr.nii' +# >>> res = tcm.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> tcm = afni.TCorrMap() -# >>> tcm.inputs.in_file = 'functional.nii' -# >>> tcm.inputs.mask = 'mask.nii' -# >>> tcm.mean_file = 'functional_meancorr.nii' -# >>> tcm.cmdline # doctest: +SKIP -# '3dTcorrMap -input functional.nii -mask mask.nii -Mean functional_meancorr.nii' -# >>> res = tcm.run() # doctest: +SKIP # -# task_name: TCorrMap nipype_name: TCorrMap nipype_module: nipype.interfaces.afni.preprocess @@ -38,55 +38,14 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - absolute_threshold: Path - # type=file: - # type=file|default=: - average_expr: Path - # type=file: - # type=file|default=: - average_expr_nonzero: Path - # type=file: - # type=file|default=: - correlation_maps: Path - # type=file: - # type=file|default=: - correlation_maps_masked: Path - # type=file: - # type=file|default=: - histogram: Path - # type=file: - # type=file|default=: in_file: medimage/nifti1 # type=file|default=: - mask: medimage/nifti1 - # type=file|default=: - mean_file: Path - # type=file: - # type=file|default=: - out_file: Path - # type=file|default=: output image file name - pmean: Path - # type=file: - # type=file|default=: - qmean: Path - # type=file: + mask: generic/file # type=file|default=: regress_out_timeseries: generic/file # type=file|default=: seeds: generic/file # type=file|default=: - sum_expr: Path - # type=file: - # type=file|default=: - var_absolute_threshold: Path - # type=file: - # type=file|default=: - var_absolute_threshold_normalize: Path - # type=file: - # type=file|default=: - zmean: Path - # type=file: - # type=file|default=: callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -146,7 +105,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -227,7 +186,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -246,10 +205,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: - mask: - # type=file|default=: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -264,7 +221,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dTcorrMap -input functional.nii -mask mask.nii -Mean functional_meancorr.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -272,10 +229,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: - mask: '"mask.nii"' - # type=file|default=: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/t_correlate.yaml b/example-specs/interface/nipype/afni/t_correlate.yaml index 7067997b..7b64b262 100644 --- a/example-specs/interface/nipype/afni/t_correlate.yaml +++ b/example-specs/interface/nipype/afni/t_correlate.yaml @@ -6,25 +6,25 @@ # Docs # ---- # Computes the correlation coefficient between corresponding voxel -# time series in two input 3D+time datasets 'xset' and 'yset' +# time series in two input 3D+time datasets 'xset' and 'yset' # -# For complete details, see the `3dTcorrelate Documentation. -# `_ +# For complete details, see the `3dTcorrelate Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> tcorrelate = afni.TCorrelate() +# >>> tcorrelate.inputs.xset= 'u_rc1s1_Template.nii' +# >>> tcorrelate.inputs.yset = 'u_rc1s2_Template.nii' +# >>> tcorrelate.inputs.out_file = 'functional_tcorrelate.nii.gz' +# >>> tcorrelate.inputs.polort = -1 +# >>> tcorrelate.inputs.pearson = True +# >>> tcorrelate.cmdline +# '3dTcorrelate -prefix functional_tcorrelate.nii.gz -pearson -polort -1 u_rc1s1_Template.nii u_rc1s2_Template.nii' +# >>> res = tcarrelate.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> tcorrelate = afni.TCorrelate() -# >>> tcorrelate.inputs.xset= 'u_rc1s1_Template.nii' -# >>> tcorrelate.inputs.yset = 'u_rc1s2_Template.nii' -# >>> tcorrelate.inputs.out_file = 'functional_tcorrelate.nii.gz' -# >>> tcorrelate.inputs.polort = -1 -# >>> tcorrelate.inputs.pearson = True -# >>> tcorrelate.cmdline -# '3dTcorrelate -prefix functional_tcorrelate.nii.gz -pearson -polort -1 u_rc1s1_Template.nii u_rc1s2_Template.nii' -# >>> res = tcarrelate.run() # doctest: +SKIP # -# task_name: TCorrelate nipype_name: TCorrelate nipype_module: nipype.interfaces.afni.preprocess @@ -39,12 +39,9 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: Path - # type=file: output file - # type=file|default=: output image file name xset: medimage/nifti1 # type=file|default=: input xset - yset: medimage/nifti1 + yset: generic/file # type=file|default=: input yset callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -69,7 +66,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -86,7 +83,7 @@ tests: pearson: # type=bool|default=False: Correlation is the normal Pearson correlation coefficient polort: - # type=int|default=0: Remove polynomical trend of order m + # type=int|default=0: Remove polynomial trend of order m num_threads: # type=int|default=1: set number of threads outputtype: @@ -96,7 +93,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -115,17 +112,13 @@ tests: # (if not specified, will try to choose a sensible value) xset: # type=file|default=: input xset - yset: - # type=file|default=: input yset out_file: '"functional_tcorrelate.nii.gz"' # type=file: output file # type=file|default=: output image file name - polort: '-1' - # type=int|default=0: Remove polynomical trend of order m pearson: 'True' # type=bool|default=False: Correlation is the normal Pearson correlation coefficient imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -140,7 +133,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dTcorrelate -prefix functional_tcorrelate.nii.gz -pearson -polort -1 u_rc1s1_Template.nii u_rc1s2_Template.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -148,17 +141,13 @@ doctests: # '.mock()' method of the corresponding class is used instead. xset: '"u_rc1s1_Template.nii"' # type=file|default=: input xset - yset: '"u_rc1s2_Template.nii"' - # type=file|default=: input yset out_file: '"functional_tcorrelate.nii.gz"' # type=file: output file # type=file|default=: output image file name - polort: '-1' - # type=int|default=0: Remove polynomical trend of order m pearson: 'True' # type=bool|default=False: Correlation is the normal Pearson correlation coefficient imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/t_norm.yaml b/example-specs/interface/nipype/afni/t_norm.yaml index f78630fc..252f9414 100644 --- a/example-specs/interface/nipype/afni/t_norm.yaml +++ b/example-specs/interface/nipype/afni/t_norm.yaml @@ -6,23 +6,23 @@ # Docs # ---- # Shifts voxel time series from input so that separate slices are aligned -# to the same temporal origin. +# to the same temporal origin. # -# For complete details, see the `3dTnorm Documentation. -# `_ +# For complete details, see the `3dTnorm Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> tnorm = afni.TNorm() +# >>> tnorm.inputs.in_file = 'functional.nii' +# >>> tnorm.inputs.norm2 = True +# >>> tnorm.inputs.out_file = 'rm.errts.unit errts+tlrc' +# >>> tnorm.cmdline +# '3dTnorm -norm2 -prefix rm.errts.unit errts+tlrc functional.nii' +# >>> res = tshift.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> tnorm = afni.TNorm() -# >>> tnorm.inputs.in_file = 'functional.nii' -# >>> tnorm.inputs.norm2 = True -# >>> tnorm.inputs.out_file = 'rm.errts.unit errts+tlrc' -# >>> tnorm.cmdline -# '3dTnorm -norm2 -prefix rm.errts.unit errts+tlrc functional.nii' -# >>> res = tshift.run() # doctest: +SKIP # -# task_name: TNorm nipype_name: TNorm nipype_module: nipype.interfaces.afni.preprocess @@ -39,9 +39,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dTNorm - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -65,7 +62,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -98,7 +95,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -117,13 +114,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dTNorm - norm2: 'True' - # type=bool|default=False: L2 normalize (sum of squares = 1) [DEFAULT] out_file: '"rm.errts.unit errts+tlrc"' # type=file: output file # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -138,7 +133,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dTnorm -norm2 -prefix rm.errts.unit errts+tlrc functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -146,13 +141,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dTNorm - norm2: 'True' - # type=bool|default=False: L2 normalize (sum of squares = 1) [DEFAULT] out_file: '"rm.errts.unit errts+tlrc"' # type=file: output file # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/t_project.yaml b/example-specs/interface/nipype/afni/t_project.yaml index 50c16683..de72574d 100644 --- a/example-specs/interface/nipype/afni/t_project.yaml +++ b/example-specs/interface/nipype/afni/t_project.yaml @@ -6,34 +6,34 @@ # Docs # ---- # -# This program projects (detrends) out various 'nuisance' time series from -# each voxel in the input dataset. Note that all the projections are done -# via linear regression, including the frequency-based options such -# as ``-passband``. In this way, you can bandpass time-censored data, and at -# the same time, remove other time series of no interest -# (e.g., physiological estimates, motion parameters). -# Shifts voxel time series from input so that separate slices are aligned to -# the same temporal origin. +# This program projects (detrends) out various 'nuisance' time series from +# each voxel in the input dataset. Note that all the projections are done +# via linear regression, including the frequency-based options such +# as ``-passband``. In this way, you can bandpass time-censored data, and at +# the same time, remove other time series of no interest +# (e.g., physiological estimates, motion parameters). +# Shifts voxel time series from input so that separate slices are aligned to +# the same temporal origin. # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> tproject = afni.TProject() -# >>> tproject.inputs.in_file = 'functional.nii' -# >>> tproject.inputs.bandpass = (0.00667, 99999) -# >>> tproject.inputs.polort = 3 -# >>> tproject.inputs.automask = True -# >>> tproject.inputs.out_file = 'projected.nii.gz' -# >>> tproject.cmdline -# '3dTproject -input functional.nii -automask -bandpass 0.00667 99999 -polort 3 -prefix projected.nii.gz' -# >>> res = tproject.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> tproject = afni.TProject() +# >>> tproject.inputs.in_file = 'functional.nii' +# >>> tproject.inputs.bandpass = (0.00667, 99999) +# >>> tproject.inputs.polort = 3 +# >>> tproject.inputs.automask = True +# >>> tproject.inputs.out_file = 'projected.nii.gz' +# >>> tproject.cmdline +# '3dTproject -input functional.nii -automask -bandpass 0.00667 99999 -polort 3 -prefix projected.nii.gz' +# >>> res = tproject.run() # doctest: +SKIP +# +# See Also +# -------- +# For complete details, see the `3dTproject Documentation. +# `__ # -# See Also -# -------- -# For complete details, see the `3dTproject Documentation. -# `__ # -# task_name: TProject nipype_name: TProject nipype_module: nipype.interfaces.afni.preprocess @@ -60,9 +60,6 @@ inputs: # type=file|default=: Only operate on voxels nonzero in the mset dataset. * Voxels outside the mask will be filled with zeros. * If no masking option is given, then all voxels will be processed. ort: generic/file # type=file|default=: Remove each column in file. Each column will have its mean removed. - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -86,7 +83,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -137,7 +134,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -156,17 +153,13 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dTproject - bandpass: (0.00667, 99999) - # type=tuple|default=(0.0, 0.0): Remove all frequencies EXCEPT those in the range polort: '3' # type=int|default=0: Remove polynomials up to and including degree pp. * Default value is 2. * It makes no sense to use a value of pp greater than 2, if you are bandpassing out the lower frequencies! * For catenated datasets, each run gets a separate set set of pp+1 Legendre polynomial regressors. * Use of -polort -1 is not advised (if data mean != 0), even if -ort contains constant terms, as all means are removed. - automask: 'True' - # type=bool|default=False: Generate a mask automatically out_file: '"projected.nii.gz"' # type=file: output file # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -181,7 +174,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dTproject -input functional.nii -automask -bandpass 0.00667 99999 -polort 3 -prefix projected.nii.gz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -189,17 +182,13 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dTproject - bandpass: (0.00667, 99999) - # type=tuple|default=(0.0, 0.0): Remove all frequencies EXCEPT those in the range polort: '3' # type=int|default=0: Remove polynomials up to and including degree pp. * Default value is 2. * It makes no sense to use a value of pp greater than 2, if you are bandpassing out the lower frequencies! * For catenated datasets, each run gets a separate set set of pp+1 Legendre polynomial regressors. * Use of -polort -1 is not advised (if data mean != 0), even if -ort contains constant terms, as all means are removed. - automask: 'True' - # type=bool|default=False: Generate a mask automatically out_file: '"projected.nii.gz"' # type=file: output file # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/t_shift.yaml b/example-specs/interface/nipype/afni/t_shift.yaml index eeb1f7d1..889ecff0 100644 --- a/example-specs/interface/nipype/afni/t_shift.yaml +++ b/example-specs/interface/nipype/afni/t_shift.yaml @@ -6,93 +6,93 @@ # Docs # ---- # Shifts voxel time series from input so that separate slices are aligned -# to the same temporal origin. +# to the same temporal origin. # -# For complete details, see the `3dTshift Documentation. -# `_ +# For complete details, see the `3dTshift Documentation. +# `_ # -# Examples -# -------- -# Slice timing details may be specified explicitly via the ``slice_timing`` -# input: +# Examples +# -------- +# Slice timing details may be specified explicitly via the ``slice_timing`` +# input: # -# >>> from nipype.interfaces import afni -# >>> TR = 2.5 -# >>> tshift = afni.TShift() -# >>> tshift.inputs.in_file = 'functional.nii' -# >>> tshift.inputs.tzero = 0.0 -# >>> tshift.inputs.tr = '%.1fs' % TR -# >>> tshift.inputs.slice_timing = list(np.arange(40) / TR) -# >>> tshift.cmdline -# '3dTshift -prefix functional_tshift -tpattern @slice_timing.1D -TR 2.5s -tzero 0.0 functional.nii' +# >>> from nipype.interfaces import afni +# >>> TR = 2.5 +# >>> tshift = afni.TShift() +# >>> tshift.inputs.in_file = 'functional.nii' +# >>> tshift.inputs.tzero = 0.0 +# >>> tshift.inputs.tr = '%.1fs' % TR +# >>> tshift.inputs.slice_timing = list(np.arange(40) / TR) +# >>> tshift.cmdline +# '3dTshift -prefix functional_tshift -tpattern @slice_timing.1D -TR 2.5s -tzero 0.0 functional.nii' # -# When the ``slice_timing`` input is used, the ``timing_file`` output is populated, -# in this case with the generated file. +# When the ``slice_timing`` input is used, the ``timing_file`` output is populated, +# in this case with the generated file. # -# >>> tshift._list_outputs()['timing_file'] # doctest: +ELLIPSIS -# '.../slice_timing.1D' +# >>> tshift._list_outputs()['timing_file'] # doctest: +ELLIPSIS +# '.../slice_timing.1D' # -# >>> np.loadtxt(tshift._list_outputs()['timing_file']).tolist()[:5] -# [0.0, 0.4, 0.8, 1.2, 1.6] +# >>> np.loadtxt(tshift._list_outputs()['timing_file']).tolist()[:5] +# [0.0, 0.4, 0.8, 1.2, 1.6] # -# If ``slice_encoding_direction`` is set to ``'k-'``, the slice timing is reversed: +# If ``slice_encoding_direction`` is set to ``'k-'``, the slice timing is reversed: # -# >>> tshift.inputs.slice_encoding_direction = 'k-' -# >>> tshift.cmdline -# '3dTshift -prefix functional_tshift -tpattern @slice_timing.1D -TR 2.5s -tzero 0.0 functional.nii' -# >>> np.loadtxt(tshift._list_outputs()['timing_file']).tolist()[:5] -# [15.6, 15.2, 14.8, 14.4, 14.0] +# >>> tshift.inputs.slice_encoding_direction = 'k-' +# >>> tshift.cmdline +# '3dTshift -prefix functional_tshift -tpattern @slice_timing.1D -TR 2.5s -tzero 0.0 functional.nii' +# >>> np.loadtxt(tshift._list_outputs()['timing_file']).tolist()[:5] +# [15.6, 15.2, 14.8, 14.4, 14.0] # -# This method creates a ``slice_timing.1D`` file to be passed to ``3dTshift``. -# A pre-existing slice-timing file may be used in the same way: +# This method creates a ``slice_timing.1D`` file to be passed to ``3dTshift``. +# A pre-existing slice-timing file may be used in the same way: # -# >>> tshift = afni.TShift() -# >>> tshift.inputs.in_file = 'functional.nii' -# >>> tshift.inputs.tzero = 0.0 -# >>> tshift.inputs.tr = '%.1fs' % TR -# >>> tshift.inputs.slice_timing = 'slice_timing.1D' -# >>> tshift.cmdline -# '3dTshift -prefix functional_tshift -tpattern @slice_timing.1D -TR 2.5s -tzero 0.0 functional.nii' +# >>> tshift = afni.TShift() +# >>> tshift.inputs.in_file = 'functional.nii' +# >>> tshift.inputs.tzero = 0.0 +# >>> tshift.inputs.tr = '%.1fs' % TR +# >>> tshift.inputs.slice_timing = 'slice_timing.1D' +# >>> tshift.cmdline +# '3dTshift -prefix functional_tshift -tpattern @slice_timing.1D -TR 2.5s -tzero 0.0 functional.nii' # -# When a pre-existing file is provided, ``timing_file`` is simply passed through. +# When a pre-existing file is provided, ``timing_file`` is simply passed through. # -# >>> tshift._list_outputs()['timing_file'] # doctest: +ELLIPSIS -# '.../slice_timing.1D' +# >>> tshift._list_outputs()['timing_file'] # doctest: +ELLIPSIS +# '.../slice_timing.1D' # -# Alternatively, pre-specified slice timing patterns may be specified with the -# ``tpattern`` input. -# For example, to specify an alternating, ascending slice timing pattern: +# Alternatively, pre-specified slice timing patterns may be specified with the +# ``tpattern`` input. +# For example, to specify an alternating, ascending slice timing pattern: # -# >>> tshift = afni.TShift() -# >>> tshift.inputs.in_file = 'functional.nii' -# >>> tshift.inputs.tzero = 0.0 -# >>> tshift.inputs.tr = '%.1fs' % TR -# >>> tshift.inputs.tpattern = 'alt+z' -# >>> tshift.cmdline -# '3dTshift -prefix functional_tshift -tpattern alt+z -TR 2.5s -tzero 0.0 functional.nii' +# >>> tshift = afni.TShift() +# >>> tshift.inputs.in_file = 'functional.nii' +# >>> tshift.inputs.tzero = 0.0 +# >>> tshift.inputs.tr = '%.1fs' % TR +# >>> tshift.inputs.tpattern = 'alt+z' +# >>> tshift.cmdline +# '3dTshift -prefix functional_tshift -tpattern alt+z -TR 2.5s -tzero 0.0 functional.nii' # -# For backwards compatibility, ``tpattern`` may also take filenames prefixed -# with ``@``. -# However, in this case, filenames are not validated, so this usage will be -# deprecated in future versions of Nipype. +# For backwards compatibility, ``tpattern`` may also take filenames prefixed +# with ``@``. +# However, in this case, filenames are not validated, so this usage will be +# deprecated in future versions of Nipype. # -# >>> tshift = afni.TShift() -# >>> tshift.inputs.in_file = 'functional.nii' -# >>> tshift.inputs.tzero = 0.0 -# >>> tshift.inputs.tr = '%.1fs' % TR -# >>> tshift.inputs.tpattern = '@slice_timing.1D' -# >>> tshift.cmdline -# '3dTshift -prefix functional_tshift -tpattern @slice_timing.1D -TR 2.5s -tzero 0.0 functional.nii' +# >>> tshift = afni.TShift() +# >>> tshift.inputs.in_file = 'functional.nii' +# >>> tshift.inputs.tzero = 0.0 +# >>> tshift.inputs.tr = '%.1fs' % TR +# >>> tshift.inputs.tpattern = '@slice_timing.1D' +# >>> tshift.cmdline +# '3dTshift -prefix functional_tshift -tpattern @slice_timing.1D -TR 2.5s -tzero 0.0 functional.nii' # -# In these cases, ``timing_file`` is undefined. +# In these cases, ``timing_file`` is undefined. # -# >>> tshift._list_outputs()['timing_file'] # doctest: +ELLIPSIS -# +# >>> tshift._list_outputs()['timing_file'] # doctest: +ELLIPSIS +# # -# In any configuration, the interface may be run as usual: +# In any configuration, the interface may be run as usual: +# +# >>> res = tshift.run() # doctest: +SKIP # -# >>> res = tshift.run() # doctest: +SKIP -# task_name: TShift nipype_name: TShift nipype_module: nipype.interfaces.afni.preprocess @@ -109,9 +109,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dTshift - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -137,7 +134,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -178,7 +175,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -197,14 +194,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dTshift - tzero: '0.0' - # type=float|default=0.0: align each slice to given time offset tr: '"%.1fs" % TR' # type=str|default='': manually set the TR. You can attach suffix "s" for seconds or "ms" for milliseconds. - slice_timing: list(np.arange(40) / TR) - # type=traitcompound|default=None: time offsets from the volume acquisition onset for each slice imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -224,7 +217,7 @@ tests: slice_encoding_direction: '"k-"' # type=enum|default='k'|allowed['k','k-']: Direction in which slice_timing is specified (default: k). If negative,slice_timing is defined in reverse order, that is, the first entry corresponds to the slice with the largest index, and the final entry corresponds to slice index zero. Only in effect when slice_timing is passed as list, not when it is passed as file. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -243,14 +236,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dTshift - tzero: '0.0' - # type=float|default=0.0: align each slice to given time offset tr: '"%.1fs" % TR' # type=str|default='': manually set the TR. You can attach suffix "s" for seconds or "ms" for milliseconds. - slice_timing: '"slice_timing.1D"' - # type=traitcompound|default=None: time offsets from the volume acquisition onset for each slice imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -269,14 +258,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dTshift - tzero: '0.0' - # type=float|default=0.0: align each slice to given time offset tr: '"%.1fs" % TR' # type=str|default='': manually set the TR. You can attach suffix "s" for seconds or "ms" for milliseconds. - tpattern: '"alt+z"' - # type=traitcompound|default=None: use specified slice time pattern rather than one in header imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -295,14 +280,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dTshift - tzero: '0.0' - # type=float|default=0.0: align each slice to given time offset tr: '"%.1fs" % TR' # type=str|default='': manually set the TR. You can attach suffix "s" for seconds or "ms" for milliseconds. - tpattern: '"@slice_timing.1D"' - # type=traitcompound|default=None: use specified slice time pattern rather than one in header imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -325,14 +306,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dTshift - tzero: '0.0' - # type=float|default=0.0: align each slice to given time offset tr: '"%.1fs" % TR' # type=str|default='': manually set the TR. You can attach suffix "s" for seconds or "ms" for milliseconds. - slice_timing: list(np.arange(40) / TR) - # type=traitcompound|default=None: time offsets from the volume acquisition onset for each slice imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -345,7 +322,7 @@ doctests: slice_encoding_direction: '"k-"' # type=enum|default='k'|allowed['k','k-']: Direction in which slice_timing is specified (default: k). If negative,slice_timing is defined in reverse order, that is, the first entry corresponds to the slice with the largest index, and the final entry corresponds to slice index zero. Only in effect when slice_timing is passed as list, not when it is passed as file. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -357,14 +334,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dTshift - tzero: '0.0' - # type=float|default=0.0: align each slice to given time offset tr: '"%.1fs" % TR' # type=str|default='': manually set the TR. You can attach suffix "s" for seconds or "ms" for milliseconds. - slice_timing: '"slice_timing.1D"' - # type=traitcompound|default=None: time offsets from the volume acquisition onset for each slice imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -376,14 +349,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dTshift - tzero: '0.0' - # type=float|default=0.0: align each slice to given time offset tr: '"%.1fs" % TR' # type=str|default='': manually set the TR. You can attach suffix "s" for seconds or "ms" for milliseconds. - tpattern: '"alt+z"' - # type=traitcompound|default=None: use specified slice time pattern rather than one in header imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -395,14 +364,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dTshift - tzero: '0.0' - # type=float|default=0.0: align each slice to given time offset tr: '"%.1fs" % TR' # type=str|default='': manually set the TR. You can attach suffix "s" for seconds or "ms" for milliseconds. - tpattern: '"@slice_timing.1D"' - # type=traitcompound|default=None: use specified slice time pattern rather than one in header imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/t_smooth.yaml b/example-specs/interface/nipype/afni/t_smooth.yaml index 2f1b8822..8d2f489b 100644 --- a/example-specs/interface/nipype/afni/t_smooth.yaml +++ b/example-specs/interface/nipype/afni/t_smooth.yaml @@ -6,23 +6,23 @@ # Docs # ---- # Smooths each voxel time series in a 3D+time dataset and produces -# as output a new 3D+time dataset (e.g., lowpass filter in time). +# as output a new 3D+time dataset (e.g., lowpass filter in time). # -# For complete details, see the `3dTsmooth Documentation. -# `_ +# For complete details, see the `3dTsmooth Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> from nipype.testing import example_data +# >>> smooth = afni.TSmooth() +# >>> smooth.inputs.in_file = 'functional.nii' +# >>> smooth.inputs.adaptive = 5 +# >>> smooth.cmdline +# '3dTsmooth -adaptive 5 -prefix functional_smooth functional.nii' +# >>> res = smooth.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> from nipype.testing import example_data -# >>> smooth = afni.TSmooth() -# >>> smooth.inputs.in_file = 'functional.nii' -# >>> smooth.inputs.adaptive = 5 -# >>> smooth.cmdline -# '3dTsmooth -adaptive 5 -prefix functional_smooth functional.nii' -# >>> res = smooth.run() # doctest: +SKIP # -# task_name: TSmooth nipype_name: TSmooth nipype_module: nipype.interfaces.afni.preprocess @@ -41,9 +41,6 @@ inputs: # type=file|default=: odd # of coefficients must be in a single column in ASCII file in_file: medimage/nifti1 # type=file|default=: input file to 3dTSmooth - out_file: Path - # type=file: output file - # type=file|default=: output file from 3dTSmooth callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -67,7 +64,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -106,7 +103,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -125,10 +122,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dTSmooth - adaptive: '5' - # type=int|default=0: use adaptive mean filtering of width N (where N must be odd and bigger than 3). imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.testing name: ' example_data' @@ -146,7 +141,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dTsmooth -adaptive 5 -prefix functional_smooth functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -154,10 +149,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dTSmooth - adaptive: '5' - # type=int|default=0: use adaptive mean filtering of width N (where N must be odd and bigger than 3). imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/t_stat.yaml b/example-specs/interface/nipype/afni/t_stat.yaml index f00dfece..fd9bcc8c 100644 --- a/example-specs/interface/nipype/afni/t_stat.yaml +++ b/example-specs/interface/nipype/afni/t_stat.yaml @@ -7,21 +7,21 @@ # ---- # Compute voxel-wise statistics using AFNI 3dTstat command # -# For complete details, see the `3dTstat Documentation. -# `_ +# For complete details, see the `3dTstat Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> tstat = afni.TStat() +# >>> tstat.inputs.in_file = 'functional.nii' +# >>> tstat.inputs.args = '-mean' +# >>> tstat.inputs.out_file = 'stats' +# >>> tstat.cmdline +# '3dTstat -mean -prefix stats functional.nii' +# >>> res = tstat.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> tstat = afni.TStat() -# >>> tstat.inputs.in_file = 'functional.nii' -# >>> tstat.inputs.args = '-mean' -# >>> tstat.inputs.out_file = 'stats' -# >>> tstat.cmdline -# '3dTstat -mean -prefix stats functional.nii' -# >>> res = tstat.run() # doctest: +SKIP # -# task_name: TStat nipype_name: TStat nipype_module: nipype.interfaces.afni.utils @@ -40,9 +40,6 @@ inputs: # type=file|default=: input file to 3dTstat mask: generic/file # type=file|default=: mask file - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -66,7 +63,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -91,7 +88,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -110,13 +107,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dTstat - args: '"-mean"' - # type=str|default='': Additional parameters to the command out_file: '"stats"' # type=file: output file # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -131,7 +126,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dTstat -mean -prefix stats functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -139,13 +134,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dTstat - args: '"-mean"' - # type=str|default='': Additional parameters to the command out_file: '"stats"' # type=file: output file # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/to_3d.yaml b/example-specs/interface/nipype/afni/to_3d.yaml index 2676d1dc..285e37d5 100644 --- a/example-specs/interface/nipype/afni/to_3d.yaml +++ b/example-specs/interface/nipype/afni/to_3d.yaml @@ -7,22 +7,22 @@ # ---- # Create a 3D dataset from 2D image files using AFNI to3d command # -# For complete details, see the `to3d Documentation -# `_ +# For complete details, see the `to3d Documentation +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> to3d = afni.To3D() +# >>> to3d.inputs.datatype = 'float' +# >>> to3d.inputs.in_folder = '.' +# >>> to3d.inputs.out_file = 'dicomdir.nii' +# >>> to3d.inputs.filetype = 'anat' +# >>> to3d.cmdline # doctest: +ELLIPSIS +# 'to3d -datum float -anat -prefix dicomdir.nii ./*.dcm' +# >>> res = to3d.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> to3d = afni.To3D() -# >>> to3d.inputs.datatype = 'float' -# >>> to3d.inputs.in_folder = '.' -# >>> to3d.inputs.out_file = 'dicomdir.nii' -# >>> to3d.inputs.filetype = 'anat' -# >>> to3d.cmdline # doctest: +ELLIPSIS -# 'to3d -datum float -anat -prefix dicomdir.nii ./*.dcm' -# >>> res = to3d.run() # doctest: +SKIP # -# task_name: To3D nipype_name: To3D nipype_module: nipype.interfaces.afni.utils @@ -39,9 +39,6 @@ inputs: # passed to the field in the automatically generated unittests. in_folder: generic/directory # type=directory|default=: folder with DICOM images to convert - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -65,7 +62,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -96,7 +93,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -115,15 +112,11 @@ tests: # (if not specified, will try to choose a sensible value) datatype: '"float"' # type=enum|default='short'|allowed['byte','complex','float','short']: set output file datatype - in_folder: '"."' - # type=directory|default=: folder with DICOM images to convert out_file: '"dicomdir.nii"' # type=file: output file # type=file|default=: output image file name - filetype: '"anat"' - # type=enum|default='spgr'|allowed['abuc','anat','bmap','ct','diff','epan','fbuc','fibn','fibt','fico','fict','fift','figt','fim','fipt','fith','fitt','fizt','fse','mra','omri','pet','spct','spgr']: type of datafile being converted imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -138,7 +131,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: to3d -datum float -anat -prefix dicomdir.nii ./*.dcm +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -146,15 +139,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. datatype: '"float"' # type=enum|default='short'|allowed['byte','complex','float','short']: set output file datatype - in_folder: '"."' - # type=directory|default=: folder with DICOM images to convert out_file: '"dicomdir.nii"' # type=file: output file # type=file|default=: output image file name - filetype: '"anat"' - # type=enum|default='spgr'|allowed['abuc','anat','bmap','ct','diff','epan','fbuc','fibn','fibt','fico','fict','fift','figt','fim','fipt','fith','fitt','fizt','fse','mra','omri','pet','spct','spgr']: type of datafile being converted imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/undump.yaml b/example-specs/interface/nipype/afni/undump.yaml index 0be1de71..4146ffe1 100644 --- a/example-specs/interface/nipype/afni/undump.yaml +++ b/example-specs/interface/nipype/afni/undump.yaml @@ -6,39 +6,39 @@ # Docs # ---- # 3dUndump - Assembles a 3D dataset from an ASCII list of coordinates and -# (optionally) values. +# (optionally) values. # -# The input file(s) are ASCII files, with one voxel specification per -# line. A voxel specification is 3 numbers (-ijk or -xyz coordinates), -# with an optional 4th number giving the voxel value. For example: +# The input file(s) are ASCII files, with one voxel specification per +# line. A voxel specification is 3 numbers (-ijk or -xyz coordinates), +# with an optional 4th number giving the voxel value. For example: # -# 1 2 3 -# 3 2 1 5 -# 5.3 6.2 3.7 -# // this line illustrates a comment +# 1 2 3 +# 3 2 1 5 +# 5.3 6.2 3.7 +# // this line illustrates a comment # -# The first line puts a voxel (with value given by '-dval') at point -# (1,2,3). The second line puts a voxel (with value 5) at point (3,2,1). -# The third line puts a voxel (with value given by '-dval') at point -# (5.3,6.2,3.7). If -ijk is in effect, and fractional coordinates -# are given, they will be rounded to the nearest integers; for example, -# the third line would be equivalent to (i,j,k) = (5,6,4). +# The first line puts a voxel (with value given by '-dval') at point +# (1,2,3). The second line puts a voxel (with value 5) at point (3,2,1). +# The third line puts a voxel (with value given by '-dval') at point +# (5.3,6.2,3.7). If -ijk is in effect, and fractional coordinates +# are given, they will be rounded to the nearest integers; for example, +# the third line would be equivalent to (i,j,k) = (5,6,4). # # -# For complete details, see the `3dUndump Documentation. -# `_ +# For complete details, see the `3dUndump Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> unndump = afni.Undump() +# >>> unndump.inputs.in_file = 'structural.nii' +# >>> unndump.inputs.out_file = 'structural_undumped.nii' +# >>> unndump.cmdline +# '3dUndump -prefix structural_undumped.nii -master structural.nii' +# >>> res = unndump.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> unndump = afni.Undump() -# >>> unndump.inputs.in_file = 'structural.nii' -# >>> unndump.inputs.out_file = 'structural_undumped.nii' -# >>> unndump.cmdline -# '3dUndump -prefix structural_undumped.nii -master structural.nii' -# >>> res = unndump.run() # doctest: +SKIP # -# task_name: Undump nipype_name: Undump nipype_module: nipype.interfaces.afni.utils @@ -57,9 +57,6 @@ inputs: # type=file|default=: input file to 3dUndump, whose geometry will determinethe geometry of the output mask_file: generic/file # type=file|default=: mask image file name. Only voxels that are nonzero in the mask can be set. - out_file: Path - # type=file: assembled file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -76,14 +73,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: assembled file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -120,7 +117,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -139,11 +136,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dUndump, whose geometry will determinethe geometry of the output - out_file: '"structural_undumped.nii"' - # type=file: assembled file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -158,7 +152,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dUndump -prefix structural_undumped.nii -master structural.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -166,11 +160,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: input file to 3dUndump, whose geometry will determinethe geometry of the output - out_file: '"structural_undumped.nii"' - # type=file: assembled file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/unifize.yaml b/example-specs/interface/nipype/afni/unifize.yaml index 092b0876..8cc7e22a 100644 --- a/example-specs/interface/nipype/afni/unifize.yaml +++ b/example-specs/interface/nipype/afni/unifize.yaml @@ -7,45 +7,45 @@ # ---- # 3dUnifize - for uniformizing image intensity # -# * The input dataset is supposed to be a T1-weighted volume, -# possibly already skull-stripped (e.g., via 3dSkullStrip). -# However, this program can be a useful step to take BEFORE -# 3dSkullStrip, since the latter program can fail if the input -# volume is strongly shaded -- 3dUnifize will (mostly) remove -# such shading artifacts. +# * The input dataset is supposed to be a T1-weighted volume, +# possibly already skull-stripped (e.g., via 3dSkullStrip). +# However, this program can be a useful step to take BEFORE +# 3dSkullStrip, since the latter program can fail if the input +# volume is strongly shaded -- 3dUnifize will (mostly) remove +# such shading artifacts. # -# * The output dataset has the white matter (WM) intensity approximately -# uniformized across space, and scaled to peak at about 1000. +# * The output dataset has the white matter (WM) intensity approximately +# uniformized across space, and scaled to peak at about 1000. # -# * The output dataset is always stored in float format! +# * The output dataset is always stored in float format! # -# * If the input dataset has more than 1 sub-brick, only sub-brick -# #0 will be processed! +# * If the input dataset has more than 1 sub-brick, only sub-brick +# #0 will be processed! # -# * Want to correct EPI datasets for nonuniformity? -# You can try the new and experimental [Mar 2017] '-EPI' option. +# * Want to correct EPI datasets for nonuniformity? +# You can try the new and experimental [Mar 2017] '-EPI' option. # -# * The principal motive for this program is for use in an image -# registration script, and it may or may not be useful otherwise. +# * The principal motive for this program is for use in an image +# registration script, and it may or may not be useful otherwise. # -# * This program replaces the older (and very different) 3dUniformize, -# which is no longer maintained and may sublimate at any moment. -# (In other words, we do not recommend the use of 3dUniformize.) +# * This program replaces the older (and very different) 3dUniformize, +# which is no longer maintained and may sublimate at any moment. +# (In other words, we do not recommend the use of 3dUniformize.) # -# For complete details, see the `3dUnifize Documentation. -# `_ +# For complete details, see the `3dUnifize Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> unifize = afni.Unifize() +# >>> unifize.inputs.in_file = 'structural.nii' +# >>> unifize.inputs.out_file = 'structural_unifized.nii' +# >>> unifize.cmdline +# '3dUnifize -prefix structural_unifized.nii -input structural.nii' +# >>> res = unifize.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> unifize = afni.Unifize() -# >>> unifize.inputs.in_file = 'structural.nii' -# >>> unifize.inputs.out_file = 'structural_unifized.nii' -# >>> unifize.cmdline -# '3dUnifize -prefix structural_unifized.nii -input structural.nii' -# >>> res = unifize.run() # doctest: +SKIP # -# task_name: Unifize nipype_name: Unifize nipype_module: nipype.interfaces.afni.utils @@ -62,12 +62,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dUnifize - out_file: Path - # type=file: unifized file - # type=file|default=: output image file name - scale_file: Path - # type=file: scale factor file - # type=file|default=: output file name to save the scale factor used at each voxel callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -84,7 +78,7 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: unifized file # type=file|default=: output image file name scale_file: generic/file @@ -94,7 +88,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -136,7 +130,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -155,11 +149,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dUnifize - out_file: '"structural_unifized.nii"' - # type=file: unifized file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -174,7 +165,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dUnifize -prefix structural_unifized.nii -input structural.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -182,11 +173,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: input file to 3dUnifize - out_file: '"structural_unifized.nii"' - # type=file: unifized file - # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/volreg.yaml b/example-specs/interface/nipype/afni/volreg.yaml index b7ff805d..ed104149 100644 --- a/example-specs/interface/nipype/afni/volreg.yaml +++ b/example-specs/interface/nipype/afni/volreg.yaml @@ -7,36 +7,36 @@ # ---- # Register input volumes to a base volume using AFNI 3dvolreg command # -# For complete details, see the `3dvolreg Documentation. -# `_ +# For complete details, see the `3dvolreg Documentation. +# `_ # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> volreg = afni.Volreg() -# >>> volreg.inputs.in_file = 'functional.nii' -# >>> volreg.inputs.args = '-Fourier -twopass' -# >>> volreg.inputs.zpad = 4 -# >>> volreg.inputs.outputtype = 'NIFTI' -# >>> volreg.cmdline # doctest: +ELLIPSIS -# '3dvolreg -Fourier -twopass -1Dfile functional.1D -1Dmatrix_save functional.aff12.1D -prefix functional_volreg.nii -zpad 4 -maxdisp1D functional_md.1D functional.nii' -# >>> res = volreg.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> volreg = afni.Volreg() +# >>> volreg.inputs.in_file = 'functional.nii' +# >>> volreg.inputs.args = '-Fourier -twopass' +# >>> volreg.inputs.zpad = 4 +# >>> volreg.inputs.outputtype = 'NIFTI' +# >>> volreg.cmdline # doctest: +ELLIPSIS +# '3dvolreg -Fourier -twopass -1Dfile functional.1D -1Dmatrix_save functional.aff12.1D -prefix functional_volreg.nii -zpad 4 -maxdisp1D functional_md.1D functional.nii' +# >>> res = volreg.run() # doctest: +SKIP +# +# >>> from nipype.interfaces import afni +# >>> volreg = afni.Volreg() +# >>> volreg.inputs.in_file = 'functional.nii' +# >>> volreg.inputs.interp = 'cubic' +# >>> volreg.inputs.verbose = True +# >>> volreg.inputs.zpad = 1 +# >>> volreg.inputs.basefile = 'functional.nii' +# >>> volreg.inputs.out_file = 'rm.epi.volreg.r1' +# >>> volreg.inputs.oned_file = 'dfile.r1.1D' +# >>> volreg.inputs.oned_matrix_save = 'mat.r1.tshift+orig.1D' +# >>> volreg.cmdline +# '3dvolreg -cubic -1Dfile dfile.r1.1D -1Dmatrix_save mat.r1.tshift+orig.1D -prefix rm.epi.volreg.r1 -verbose -base functional.nii -zpad 1 -maxdisp1D functional_md.1D functional.nii' +# >>> res = volreg.run() # doctest: +SKIP # -# >>> from nipype.interfaces import afni -# >>> volreg = afni.Volreg() -# >>> volreg.inputs.in_file = 'functional.nii' -# >>> volreg.inputs.interp = 'cubic' -# >>> volreg.inputs.verbose = True -# >>> volreg.inputs.zpad = 1 -# >>> volreg.inputs.basefile = 'functional.nii' -# >>> volreg.inputs.out_file = 'rm.epi.volreg.r1' -# >>> volreg.inputs.oned_file = 'dfile.r1.1D' -# >>> volreg.inputs.oned_matrix_save = 'mat.r1.tshift+orig.1D' -# >>> volreg.cmdline -# '3dvolreg -cubic -1Dfile dfile.r1.1D -1Dmatrix_save mat.r1.tshift+orig.1D -prefix rm.epi.volreg.r1 -verbose -base functional.nii -zpad 1 -maxdisp1D functional_md.1D functional.nii' -# >>> res = volreg.run() # doctest: +SKIP # -# task_name: Volreg nipype_name: Volreg nipype_module: nipype.interfaces.afni.preprocess @@ -55,18 +55,6 @@ inputs: # type=file|default=: base file for registration in_file: medimage/nifti1 # type=file|default=: input file to 3dvolreg - md1d_file: Path - # type=file: max displacement info file - # type=file|default=: max displacement output file - oned_file: Path - # type=file: movement parameters info file - # type=file|default=: 1D movement parameters output file - oned_matrix_save: Path - # type=file: matrix transformation from base to input - # type=file|default=: Save the matrix transformation - out_file: Path - # type=file: registered file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -86,20 +74,20 @@ outputs: md1d_file: generic/file # type=file: max displacement info file # type=file|default=: max displacement output file - oned_file: medimage-afni/one-d + oned_file: fileformats.medimage_afni.OneD # type=file: movement parameters info file # type=file|default=: 1D movement parameters output file - oned_matrix_save: medimage-afni/one-d + oned_matrix_save: generic/file # type=file: matrix transformation from base to input # type=file|default=: Save the matrix transformation - out_file: medimage-afni/r1 + out_file: generic/file # type=file: registered file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -143,7 +131,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -162,14 +150,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dvolreg - args: '"-Fourier -twopass"' - # type=str|default='': Additional parameters to the command zpad: '4' # type=int|default=0: Zeropad around the edges by 'n' voxels during rotations - outputtype: '"NIFTI"' - # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -188,25 +172,15 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dvolreg - interp: '"cubic"' - # type=enum|default='Fourier'|allowed['Fourier','cubic','heptic','linear','quintic']: spatial interpolation methods [default = heptic] verbose: 'True' # type=bool|default=False: more detailed description of the process - zpad: '1' - # type=int|default=0: Zeropad around the edges by 'n' voxels during rotations basefile: # type=file|default=: base file for registration - out_file: '"rm.epi.volreg.r1"' - # type=file: registered file - # type=file|default=: output image file name oned_file: '"dfile.r1.1D"' # type=file: movement parameters info file # type=file|default=: 1D movement parameters output file - oned_matrix_save: '"mat.r1.tshift+orig.1D"' - # type=file: matrix transformation from base to input - # type=file|default=: Save the matrix transformation imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -221,7 +195,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dvolreg -Fourier -twopass -1Dfile functional.1D -1Dmatrix_save functional.aff12.1D -prefix functional_volreg.nii -zpad 4 -maxdisp1D functional_md.1D functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -229,14 +203,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dvolreg - args: '"-Fourier -twopass"' - # type=str|default='': Additional parameters to the command zpad: '4' # type=int|default=0: Zeropad around the edges by 'n' voxels during rotations - outputtype: '"NIFTI"' - # type=enum|default='AFNI'|allowed['AFNI','NIFTI','NIFTI_GZ']: AFNI output filetype imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -248,25 +218,15 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dvolreg - interp: '"cubic"' - # type=enum|default='Fourier'|allowed['Fourier','cubic','heptic','linear','quintic']: spatial interpolation methods [default = heptic] verbose: 'True' # type=bool|default=False: more detailed description of the process - zpad: '1' - # type=int|default=0: Zeropad around the edges by 'n' voxels during rotations basefile: '"functional.nii"' # type=file|default=: base file for registration - out_file: '"rm.epi.volreg.r1"' - # type=file: registered file - # type=file|default=: output image file name oned_file: '"dfile.r1.1D"' # type=file: movement parameters info file # type=file|default=: 1D movement parameters output file - oned_matrix_save: '"mat.r1.tshift+orig.1D"' - # type=file: matrix transformation from base to input - # type=file|default=: Save the matrix transformation imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/warp.yaml b/example-specs/interface/nipype/afni/warp.yaml index 70332d87..aa0e7a46 100644 --- a/example-specs/interface/nipype/afni/warp.yaml +++ b/example-specs/interface/nipype/afni/warp.yaml @@ -7,31 +7,31 @@ # ---- # Use 3dWarp for spatially transforming a dataset. # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> warp = afni.Warp() -# >>> warp.inputs.in_file = 'structural.nii' -# >>> warp.inputs.deoblique = True -# >>> warp.inputs.out_file = 'trans.nii.gz' -# >>> warp.cmdline -# '3dWarp -deoblique -prefix trans.nii.gz structural.nii' -# >>> res = warp.run() # doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> warp = afni.Warp() +# >>> warp.inputs.in_file = 'structural.nii' +# >>> warp.inputs.deoblique = True +# >>> warp.inputs.out_file = 'trans.nii.gz' +# >>> warp.cmdline +# '3dWarp -deoblique -prefix trans.nii.gz structural.nii' +# >>> res = warp.run() # doctest: +SKIP # -# >>> warp_2 = afni.Warp() -# >>> warp_2.inputs.in_file = 'structural.nii' -# >>> warp_2.inputs.newgrid = 1.0 -# >>> warp_2.inputs.out_file = 'trans.nii.gz' -# >>> warp_2.cmdline -# '3dWarp -newgrid 1.000000 -prefix trans.nii.gz structural.nii' -# >>> res = warp_2.run() # doctest: +SKIP +# >>> warp_2 = afni.Warp() +# >>> warp_2.inputs.in_file = 'structural.nii' +# >>> warp_2.inputs.newgrid = 1.0 +# >>> warp_2.inputs.out_file = 'trans.nii.gz' +# >>> warp_2.cmdline +# '3dWarp -newgrid 1.000000 -prefix trans.nii.gz structural.nii' +# >>> res = warp_2.run() # doctest: +SKIP +# +# See Also +# -------- +# For complete details, see the `3dWarp Documentation. +# `__. # -# See Also -# -------- -# For complete details, see the `3dWarp Documentation. -# `__. # -# task_name: Warp nipype_name: Warp nipype_module: nipype.interfaces.afni.preprocess @@ -54,9 +54,6 @@ inputs: # type=file|default=: apply transformation from 3dWarpDrive oblique_parent: generic/file # type=file|default=: Read in the oblique transformation matrix from an oblique dataset and make cardinal dataset oblique to match - out_file: Path - # type=file: Warped file. - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -82,7 +79,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -125,7 +122,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -144,13 +141,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dWarp - deoblique: 'True' - # type=bool|default=False: transform dataset from oblique to cardinal out_file: '"trans.nii.gz"' # type=file: Warped file. # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -169,13 +164,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dWarp - newgrid: '1.0' - # type=float|default=0.0: specify grid of this size (mm) out_file: '"trans.nii.gz"' # type=file: Warped file. # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -190,7 +183,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dWarp -deoblique -prefix trans.nii.gz structural.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -198,13 +191,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: input file to 3dWarp - deoblique: 'True' - # type=bool|default=False: transform dataset from oblique to cardinal out_file: '"trans.nii.gz"' # type=file: Warped file. # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -216,13 +207,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: input file to 3dWarp - newgrid: '1.0' - # type=float|default=0.0: specify grid of this size (mm) out_file: '"trans.nii.gz"' # type=file: Warped file. # type=file|default=: output image file name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/z_cut_up.yaml b/example-specs/interface/nipype/afni/z_cut_up.yaml index 402baca7..b924b5fc 100644 --- a/example-specs/interface/nipype/afni/z_cut_up.yaml +++ b/example-specs/interface/nipype/afni/z_cut_up.yaml @@ -7,21 +7,21 @@ # ---- # Cut z-slices from a volume using AFNI 3dZcutup command # -# For complete details, see the `3dZcutup Documentation. -# `_ +# For complete details, see the `3dZcutup Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> zcutup = afni.ZCutUp() +# >>> zcutup.inputs.in_file = 'functional.nii' +# >>> zcutup.inputs.out_file = 'functional_zcutup.nii' +# >>> zcutup.inputs.keep= '0 10' +# >>> zcutup.cmdline +# '3dZcutup -keep 0 10 -prefix functional_zcutup.nii functional.nii' +# >>> res = zcutup.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> zcutup = afni.ZCutUp() -# >>> zcutup.inputs.in_file = 'functional.nii' -# >>> zcutup.inputs.out_file = 'functional_zcutup.nii' -# >>> zcutup.inputs.keep= '0 10' -# >>> zcutup.cmdline -# '3dZcutup -keep 0 10 -prefix functional_zcutup.nii functional.nii' -# >>> res = zcutup.run() # doctest: +SKIP # -# task_name: ZCutUp nipype_name: ZCutUp nipype_module: nipype.interfaces.afni.utils @@ -38,9 +38,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to 3dZcutup - out_file: Path - # type=file: output file - # type=file|default=: output image file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -57,14 +54,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: output file # type=file|default=: output image file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -87,7 +84,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -106,13 +103,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to 3dZcutup - out_file: '"functional_zcutup.nii"' - # type=file: output file - # type=file|default=: output image file name keep: '"0 10"' # type=str|default='': slice range to keep in output imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -127,7 +121,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dZcutup -keep 0 10 -prefix functional_zcutup.nii functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -135,13 +129,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input file to 3dZcutup - out_file: '"functional_zcutup.nii"' - # type=file: output file - # type=file|default=: output image file name keep: '"0 10"' # type=str|default='': slice range to keep in output imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/zcat.yaml b/example-specs/interface/nipype/afni/zcat.yaml index feabf411..d978ed32 100644 --- a/example-specs/interface/nipype/afni/zcat.yaml +++ b/example-specs/interface/nipype/afni/zcat.yaml @@ -6,22 +6,22 @@ # Docs # ---- # Copies an image of one type to an image of the same -# or different type using 3dZcat command +# or different type using 3dZcat command # -# For complete details, see the `3dZcat Documentation. -# `_ +# For complete details, see the `3dZcat Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> zcat = afni.Zcat() +# >>> zcat.inputs.in_files = ['functional2.nii', 'functional3.nii'] +# >>> zcat.inputs.out_file = 'cat_functional.nii' +# >>> zcat.cmdline +# '3dZcat -prefix cat_functional.nii functional2.nii functional3.nii' +# >>> res = zcat.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> zcat = afni.Zcat() -# >>> zcat.inputs.in_files = ['functional2.nii', 'functional3.nii'] -# >>> zcat.inputs.out_file = 'cat_functional.nii' -# >>> zcat.cmdline -# '3dZcat -prefix cat_functional.nii functional2.nii functional3.nii' -# >>> res = zcat.run() # doctest: +SKIP # -# task_name: Zcat nipype_name: Zcat nipype_module: nipype.interfaces.afni.utils @@ -38,9 +38,6 @@ inputs: # passed to the field in the automatically generated unittests. in_files: medimage/nifti1+list-of # type=inputmultiobject|default=[]: - out_file: Path - # type=file: output file - # type=file|default=: output dataset prefix name (default 'zcat') callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -57,14 +54,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: output file # type=file|default=: output dataset prefix name (default 'zcat') callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -93,7 +90,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -112,11 +109,8 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=inputmultiobject|default=[]: - out_file: '"cat_functional.nii"' - # type=file: output file - # type=file|default=: output dataset prefix name (default 'zcat') imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -131,7 +125,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dZcat -prefix cat_functional.nii functional2.nii functional3.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -139,11 +133,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["functional2.nii", "functional3.nii"]' # type=inputmultiobject|default=[]: - out_file: '"cat_functional.nii"' - # type=file: output file - # type=file|default=: output dataset prefix name (default 'zcat') imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/afni/zeropad.yaml b/example-specs/interface/nipype/afni/zeropad.yaml index e8743b3f..97ff870f 100644 --- a/example-specs/interface/nipype/afni/zeropad.yaml +++ b/example-specs/interface/nipype/afni/zeropad.yaml @@ -7,26 +7,26 @@ # ---- # Adds planes of zeros to a dataset (i.e., pads it out). # -# For complete details, see the `3dZeropad Documentation. -# `__ +# For complete details, see the `3dZeropad Documentation. +# `__ +# +# Examples +# -------- +# >>> from nipype.interfaces import afni +# >>> zeropad = afni.Zeropad() +# >>> zeropad.inputs.in_files = 'functional.nii' +# >>> zeropad.inputs.out_file = 'pad_functional.nii' +# >>> zeropad.inputs.I = 10 +# >>> zeropad.inputs.S = 10 +# >>> zeropad.inputs.A = 10 +# >>> zeropad.inputs.P = 10 +# >>> zeropad.inputs.R = 10 +# >>> zeropad.inputs.L = 10 +# >>> zeropad.cmdline +# '3dZeropad -A 10 -I 10 -L 10 -P 10 -R 10 -S 10 -prefix pad_functional.nii functional.nii' +# >>> res = zeropad.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import afni -# >>> zeropad = afni.Zeropad() -# >>> zeropad.inputs.in_files = 'functional.nii' -# >>> zeropad.inputs.out_file = 'pad_functional.nii' -# >>> zeropad.inputs.I = 10 -# >>> zeropad.inputs.S = 10 -# >>> zeropad.inputs.A = 10 -# >>> zeropad.inputs.P = 10 -# >>> zeropad.inputs.R = 10 -# >>> zeropad.inputs.L = 10 -# >>> zeropad.cmdline -# '3dZeropad -A 10 -I 10 -L 10 -P 10 -R 10 -S 10 -prefix pad_functional.nii functional.nii' -# >>> res = zeropad.run() # doctest: +SKIP # -# task_name: Zeropad nipype_name: Zeropad nipype_module: nipype.interfaces.afni.utils @@ -45,9 +45,6 @@ inputs: # type=file|default=: input dataset master: generic/file # type=file|default=: match the volume described in dataset 'mset', where mset must have the same orientation and grid spacing as dataset to be padded. the goal of -master is to make the output dataset from 3dZeropad match the spatial 'extents' of mset by adding or subtracting slices as needed. You can't use -I,-S,..., or -mm with -master - out_file: Path - # type=file: output file - # type=file|default=: output dataset prefix name (default 'zeropad') callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -64,14 +61,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: output file # type=file|default=: output dataset prefix name (default 'zeropad') callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -116,7 +113,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -135,23 +132,14 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=file|default=: input dataset - out_file: '"pad_functional.nii"' - # type=file: output file - # type=file|default=: output dataset prefix name (default 'zeropad') I: '10' # type=int|default=0: adds 'n' planes of zero at the Inferior edge - S: '10' - # type=int|default=0: adds 'n' planes of zero at the Superior edge A: '10' # type=int|default=0: adds 'n' planes of zero at the Anterior edge - P: '10' - # type=int|default=0: adds 'n' planes of zero at the Posterior edge R: '10' # type=int|default=0: adds 'n' planes of zero at the Right edge - L: '10' - # type=int|default=0: adds 'n' planes of zero at the Left edge imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -166,7 +154,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: 3dZeropad -A 10 -I 10 -L 10 -P 10 -R 10 -S 10 -prefix pad_functional.nii functional.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -174,23 +162,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '"functional.nii"' # type=file|default=: input dataset - out_file: '"pad_functional.nii"' - # type=file: output file - # type=file|default=: output dataset prefix name (default 'zeropad') I: '10' # type=int|default=0: adds 'n' planes of zero at the Inferior edge - S: '10' - # type=int|default=0: adds 'n' planes of zero at the Superior edge A: '10' # type=int|default=0: adds 'n' planes of zero at the Anterior edge - P: '10' - # type=int|default=0: adds 'n' planes of zero at the Posterior edge R: '10' # type=int|default=0: adds 'n' planes of zero at the Right edge - L: '10' - # type=int|default=0: adds 'n' planes of zero at the Left edge imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/affine_initializer.yaml b/example-specs/interface/nipype/ants/affine_initializer.yaml index 1d7dc03e..61dd6685 100644 --- a/example-specs/interface/nipype/ants/affine_initializer.yaml +++ b/example-specs/interface/nipype/ants/affine_initializer.yaml @@ -6,16 +6,16 @@ # Docs # ---- # -# Initialize an affine transform (as in antsBrainExtraction.sh) +# Initialize an affine transform (as in antsBrainExtraction.sh) +# +# >>> from nipype.interfaces.ants import AffineInitializer +# >>> init = AffineInitializer() +# >>> init.inputs.fixed_image = 'fixed1.nii' +# >>> init.inputs.moving_image = 'moving1.nii' +# >>> init.cmdline +# 'antsAffineInitializer 3 fixed1.nii moving1.nii transform.mat 15.000000 0.100000 0 10' # -# >>> from nipype.interfaces.ants import AffineInitializer -# >>> init = AffineInitializer() -# >>> init.inputs.fixed_image = 'fixed1.nii' -# >>> init.inputs.moving_image = 'moving1.nii' -# >>> init.cmdline -# 'antsAffineInitializer 3 fixed1.nii moving1.nii transform.mat 15.000000 0.100000 0 10' # -# task_name: AffineInitializer nipype_name: AffineInitializer nipype_module: nipype.interfaces.ants.utils @@ -32,11 +32,8 @@ inputs: # passed to the field in the automatically generated unittests. fixed_image: medimage/nifti1 # type=file|default=: reference image - moving_image: medimage/nifti1 + moving_image: generic/file # type=file|default=: moving image - out_file: Path - # type=file: output transform file - # type=file|default='transform.mat': output transform file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -60,7 +57,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -91,7 +88,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -110,10 +107,8 @@ tests: # (if not specified, will try to choose a sensible value) fixed_image: # type=file|default=: reference image - moving_image: - # type=file|default=: moving image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -136,10 +131,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. fixed_image: '"fixed1.nii"' # type=file|default=: reference image - moving_image: '"moving1.nii"' - # type=file|default=: moving image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/ai.yaml b/example-specs/interface/nipype/ants/ai.yaml index 840ce367..9fae0229 100644 --- a/example-specs/interface/nipype/ants/ai.yaml +++ b/example-specs/interface/nipype/ants/ai.yaml @@ -6,27 +6,27 @@ # Docs # ---- # -# Calculate the optimal linear transform parameters for aligning two images. +# Calculate the optimal linear transform parameters for aligning two images. # -# Examples -# -------- -# >>> AI( -# ... fixed_image='structural.nii', -# ... moving_image='epi.nii', -# ... metric=('Mattes', 32, 'Regular', 1), -# ... ).cmdline -# 'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1] -# -o initialization.mat -p 0 -s [20,0.12] -t Affine[0.1] -v 0' +# Examples +# -------- +# >>> AI( +# ... fixed_image='structural.nii', +# ... moving_image='epi.nii', +# ... metric=('Mattes', 32, 'Regular', 1), +# ... ).cmdline +# 'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1] +# -o initialization.mat -p 0 -s [20,0.12] -t Affine[0.1] -v 0' +# +# >>> AI(fixed_image='structural.nii', +# ... moving_image='epi.nii', +# ... metric=('Mattes', 32, 'Regular', 1), +# ... search_grid=(12, (1, 1, 1)), +# ... ).cmdline +# 'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1] +# -o initialization.mat -p 0 -s [20,0.12] -g [12.0,1x1x1] -t Affine[0.1] -v 0' # -# >>> AI(fixed_image='structural.nii', -# ... moving_image='epi.nii', -# ... metric=('Mattes', 32, 'Regular', 1), -# ... search_grid=(12, (1, 1, 1)), -# ... ).cmdline -# 'antsAI -c [10,1e-06,10] -d 3 -m Mattes[structural.nii,epi.nii,32,Regular,1] -# -o initialization.mat -p 0 -s [20,0.12] -g [12.0,1x1x1] -t Affine[0.1] -v 0' # -# task_name: AI nipype_name: AI nipype_module: nipype.interfaces.ants.utils @@ -49,9 +49,6 @@ inputs: # type=file|default=: Image that will be transformed to fixed_image moving_image_mask: generic/file # type=file|default=: moving mage mask - output_transform: Path - # type=file: output file name - # type=file|default='initialization.mat': output file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -75,7 +72,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -116,7 +113,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/ants/ants.yaml b/example-specs/interface/nipype/ants/ants.yaml index 94f2ed69..46763368 100644 --- a/example-specs/interface/nipype/ants/ants.yaml +++ b/example-specs/interface/nipype/ants/ants.yaml @@ -6,32 +6,32 @@ # Docs # ---- # ANTS wrapper for registration of images -# (old, use Registration instead) +# (old, use Registration instead) # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.ants import ANTS +# >>> ants = ANTS() +# >>> ants.inputs.dimension = 3 +# >>> ants.inputs.output_transform_prefix = 'MY' +# >>> ants.inputs.metric = ['CC'] +# >>> ants.inputs.fixed_image = ['T1.nii'] +# >>> ants.inputs.moving_image = ['resting.nii'] +# >>> ants.inputs.metric_weight = [1.0] +# >>> ants.inputs.radius = [5] +# >>> ants.inputs.transformation_model = 'SyN' +# >>> ants.inputs.gradient_step_length = 0.25 +# >>> ants.inputs.number_of_iterations = [50, 35, 15] +# >>> ants.inputs.use_histogram_matching = True +# >>> ants.inputs.mi_option = [32, 16000] +# >>> ants.inputs.regularization = 'Gauss' +# >>> ants.inputs.regularization_gradient_field_sigma = 3 +# >>> ants.inputs.regularization_deformation_field_sigma = 0 +# >>> ants.inputs.number_of_affine_iterations = [10000,10000,10000,10000,10000] +# >>> ants.cmdline +# 'ANTS 3 --MI-option 32x16000 --image-metric CC[ T1.nii, resting.nii, 1, 5 ] --number-of-affine-iterations 10000x10000x10000x10000x10000 --number-of-iterations 50x35x15 --output-naming MY --regularization Gauss[3.0,0.0] --transformation-model SyN[0.25] --use-Histogram-Matching 1' # -# >>> from nipype.interfaces.ants import ANTS -# >>> ants = ANTS() -# >>> ants.inputs.dimension = 3 -# >>> ants.inputs.output_transform_prefix = 'MY' -# >>> ants.inputs.metric = ['CC'] -# >>> ants.inputs.fixed_image = ['T1.nii'] -# >>> ants.inputs.moving_image = ['resting.nii'] -# >>> ants.inputs.metric_weight = [1.0] -# >>> ants.inputs.radius = [5] -# >>> ants.inputs.transformation_model = 'SyN' -# >>> ants.inputs.gradient_step_length = 0.25 -# >>> ants.inputs.number_of_iterations = [50, 35, 15] -# >>> ants.inputs.use_histogram_matching = True -# >>> ants.inputs.mi_option = [32, 16000] -# >>> ants.inputs.regularization = 'Gauss' -# >>> ants.inputs.regularization_gradient_field_sigma = 3 -# >>> ants.inputs.regularization_deformation_field_sigma = 0 -# >>> ants.inputs.number_of_affine_iterations = [10000,10000,10000,10000,10000] -# >>> ants.cmdline -# 'ANTS 3 --MI-option 32x16000 --image-metric CC[ T1.nii, resting.nii, 1, 5 ] --number-of-affine-iterations 10000x10000x10000x10000x10000 --number-of-iterations 50x35x15 --output-naming MY --regularization Gauss[3.0,0.0] --transformation-model SyN[0.25] --use-Histogram-Matching 1' -# task_name: ANTS nipype_name: ANTS nipype_module: nipype.interfaces.ants.registration @@ -46,7 +46,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - fixed_image: medimage/nifti1+list-of + fixed_image: generic/file+list-of # type=inputmultiobject|default=[]: image to which the moving image is warped moving_image: medimage/nifti1+list-of # type=inputmultiobject|default=[]: image to apply transformation to (generally a coregisteredfunctional) @@ -80,7 +80,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -138,7 +138,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -157,38 +157,22 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - output_transform_prefix: '"MY"' - # type=str|default='out': metric: '["CC"]' # type=list|default=[]: - fixed_image: - # type=inputmultiobject|default=[]: image to which the moving image is warped moving_image: # type=inputmultiobject|default=[]: image to apply transformation to (generally a coregisteredfunctional) - metric_weight: '[1.0]' - # type=list|default=[1.0]: the metric weight(s) for each stage. The weights must sum to 1 per stage. radius: '[5]' # type=list|default=[]: radius of the region (i.e. number of layers around a voxel/pixel) that is used for computing cross correlation - transformation_model: '"SyN"' - # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']: gradient_step_length: '0.25' # type=float|default=0.0: - number_of_iterations: '[50, 35, 15]' - # type=list|default=[]: use_histogram_matching: 'True' # type=bool|default=True: - mi_option: '[32, 16000]' - # type=list|default=[]: regularization: '"Gauss"' # type=enum|default='Gauss'|allowed['DMFFD','Gauss']: - regularization_gradient_field_sigma: '3' - # type=float|default=0.0: regularization_deformation_field_sigma: '0' # type=float|default=0.0: - number_of_affine_iterations: '[10000,10000,10000,10000,10000]' - # type=list|default=[]: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -211,38 +195,22 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - output_transform_prefix: '"MY"' - # type=str|default='out': metric: '["CC"]' # type=list|default=[]: - fixed_image: '["T1.nii"]' - # type=inputmultiobject|default=[]: image to which the moving image is warped moving_image: '["resting.nii"]' # type=inputmultiobject|default=[]: image to apply transformation to (generally a coregisteredfunctional) - metric_weight: '[1.0]' - # type=list|default=[1.0]: the metric weight(s) for each stage. The weights must sum to 1 per stage. radius: '[5]' # type=list|default=[]: radius of the region (i.e. number of layers around a voxel/pixel) that is used for computing cross correlation - transformation_model: '"SyN"' - # type=enum|default='Diff'|allowed['Diff','Elast','Exp','Greedy Exp','SyN']: gradient_step_length: '0.25' # type=float|default=0.0: - number_of_iterations: '[50, 35, 15]' - # type=list|default=[]: use_histogram_matching: 'True' # type=bool|default=True: - mi_option: '[32, 16000]' - # type=list|default=[]: regularization: '"Gauss"' # type=enum|default='Gauss'|allowed['DMFFD','Gauss']: - regularization_gradient_field_sigma: '3' - # type=float|default=0.0: regularization_deformation_field_sigma: '0' # type=float|default=0.0: - number_of_affine_iterations: '[10000,10000,10000,10000,10000]' - # type=list|default=[]: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/ants_introduction.yaml b/example-specs/interface/nipype/ants/ants_introduction.yaml index 04e3516d..8ee02305 100644 --- a/example-specs/interface/nipype/ants/ants_introduction.yaml +++ b/example-specs/interface/nipype/ants/ants_introduction.yaml @@ -7,18 +7,18 @@ # ---- # Uses ANTS to generate matrices to warp data from one space to another. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.ants.legacy import antsIntroduction +# >>> warp = antsIntroduction() +# >>> warp.inputs.reference_image = 'Template_6.nii' +# >>> warp.inputs.input_image = 'structural.nii' +# >>> warp.inputs.max_iterations = [30,90,20] +# >>> warp.cmdline +# 'antsIntroduction.sh -d 3 -i structural.nii -m 30x90x20 -o ants_ -r Template_6.nii -t GR' # -# >>> from nipype.interfaces.ants.legacy import antsIntroduction -# >>> warp = antsIntroduction() -# >>> warp.inputs.reference_image = 'Template_6.nii' -# >>> warp.inputs.input_image = 'structural.nii' -# >>> warp.inputs.max_iterations = [30,90,20] -# >>> warp.cmdline -# 'antsIntroduction.sh -d 3 -i structural.nii -m 30x90x20 -o ants_ -r Template_6.nii -t GR' # -# task_name: antsIntroduction nipype_name: antsIntroduction nipype_module: nipype.interfaces.ants.legacy @@ -33,7 +33,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - input_image: medimage/nifti1 + input_image: generic/file # type=file|default=: input image to warp to template reference_image: medimage/nifti1 # type=file|default=: template file to warp to @@ -67,7 +67,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -103,7 +103,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -122,12 +122,10 @@ tests: # (if not specified, will try to choose a sensible value) reference_image: # type=file|default=: template file to warp to - input_image: - # type=file|default=: input image to warp to template max_iterations: '[30,90,20]' # type=list|default=[]: maximum number of iterations (must be list of integers in the form [J,K,L...]: J = coarsest resolution iterations, K = middle resolution iterations, L = fine resolution iterations imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -150,12 +148,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. reference_image: '"Template_6.nii"' # type=file|default=: template file to warp to - input_image: '"structural.nii"' - # type=file|default=: input image to warp to template max_iterations: '[30,90,20]' # type=list|default=[]: maximum number of iterations (must be list of integers in the form [J,K,L...]: J = coarsest resolution iterations, K = middle resolution iterations, L = fine resolution iterations imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/apply_transforms.yaml b/example-specs/interface/nipype/ants/apply_transforms.yaml index 58e0dba8..d3a574b8 100644 --- a/example-specs/interface/nipype/ants/apply_transforms.yaml +++ b/example-specs/interface/nipype/ants/apply_transforms.yaml @@ -6,58 +6,58 @@ # Docs # ---- # ApplyTransforms, applied to an input image, transforms it according to a -# reference image and a transform (or a set of transforms). +# reference image and a transform (or a set of transforms). # -# Examples -# -------- +# Examples +# -------- # -# >>> from nipype.interfaces.ants import ApplyTransforms -# >>> at = ApplyTransforms() -# >>> at.inputs.input_image = 'moving1.nii' -# >>> at.inputs.reference_image = 'fixed1.nii' -# >>> at.inputs.transforms = 'identity' -# >>> at.cmdline -# 'antsApplyTransforms --default-value 0 --float 0 --input moving1.nii --interpolation Linear --output moving1_trans.nii --reference-image fixed1.nii --transform identity' +# >>> from nipype.interfaces.ants import ApplyTransforms +# >>> at = ApplyTransforms() +# >>> at.inputs.input_image = 'moving1.nii' +# >>> at.inputs.reference_image = 'fixed1.nii' +# >>> at.inputs.transforms = 'identity' +# >>> at.cmdline +# 'antsApplyTransforms --default-value 0 --float 0 --input moving1.nii --interpolation Linear --output moving1_trans.nii --reference-image fixed1.nii --transform identity' # -# >>> at = ApplyTransforms() -# >>> at.inputs.dimension = 3 -# >>> at.inputs.input_image = 'moving1.nii' -# >>> at.inputs.reference_image = 'fixed1.nii' -# >>> at.inputs.output_image = 'deformed_moving1.nii' -# >>> at.inputs.interpolation = 'Linear' -# >>> at.inputs.default_value = 0 -# >>> at.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat'] -# >>> at.inputs.invert_transform_flags = [False, True] -# >>> at.cmdline -# 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii --interpolation Linear --output deformed_moving1.nii --reference-image fixed1.nii --transform ants_Warp.nii.gz --transform [ trans.mat, 1 ]' +# >>> at = ApplyTransforms() +# >>> at.inputs.dimension = 3 +# >>> at.inputs.input_image = 'moving1.nii' +# >>> at.inputs.reference_image = 'fixed1.nii' +# >>> at.inputs.output_image = 'deformed_moving1.nii' +# >>> at.inputs.interpolation = 'Linear' +# >>> at.inputs.default_value = 0 +# >>> at.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat'] +# >>> at.inputs.invert_transform_flags = [False, True] +# >>> at.cmdline +# 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii --interpolation Linear --output deformed_moving1.nii --reference-image fixed1.nii --transform ants_Warp.nii.gz --transform [ trans.mat, 1 ]' # -# >>> at1 = ApplyTransforms() -# >>> at1.inputs.dimension = 3 -# >>> at1.inputs.input_image = 'moving1.nii' -# >>> at1.inputs.reference_image = 'fixed1.nii' -# >>> at1.inputs.output_image = 'deformed_moving1.nii' -# >>> at1.inputs.interpolation = 'BSpline' -# >>> at1.inputs.interpolation_parameters = (5,) -# >>> at1.inputs.default_value = 0 -# >>> at1.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat'] -# >>> at1.inputs.invert_transform_flags = [False, False] -# >>> at1.cmdline -# 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii --interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii --transform ants_Warp.nii.gz --transform trans.mat' +# >>> at1 = ApplyTransforms() +# >>> at1.inputs.dimension = 3 +# >>> at1.inputs.input_image = 'moving1.nii' +# >>> at1.inputs.reference_image = 'fixed1.nii' +# >>> at1.inputs.output_image = 'deformed_moving1.nii' +# >>> at1.inputs.interpolation = 'BSpline' +# >>> at1.inputs.interpolation_parameters = (5,) +# >>> at1.inputs.default_value = 0 +# >>> at1.inputs.transforms = ['ants_Warp.nii.gz', 'trans.mat'] +# >>> at1.inputs.invert_transform_flags = [False, False] +# >>> at1.cmdline +# 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii --interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii --transform ants_Warp.nii.gz --transform trans.mat' # -# Identity transforms may be used as part of a chain: +# Identity transforms may be used as part of a chain: +# +# >>> at2 = ApplyTransforms() +# >>> at2.inputs.dimension = 3 +# >>> at2.inputs.input_image = 'moving1.nii' +# >>> at2.inputs.reference_image = 'fixed1.nii' +# >>> at2.inputs.output_image = 'deformed_moving1.nii' +# >>> at2.inputs.interpolation = 'BSpline' +# >>> at2.inputs.interpolation_parameters = (5,) +# >>> at2.inputs.default_value = 0 +# >>> at2.inputs.transforms = ['identity', 'ants_Warp.nii.gz', 'trans.mat'] +# >>> at2.cmdline +# 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii --interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii --transform identity --transform ants_Warp.nii.gz --transform trans.mat' # -# >>> at2 = ApplyTransforms() -# >>> at2.inputs.dimension = 3 -# >>> at2.inputs.input_image = 'moving1.nii' -# >>> at2.inputs.reference_image = 'fixed1.nii' -# >>> at2.inputs.output_image = 'deformed_moving1.nii' -# >>> at2.inputs.interpolation = 'BSpline' -# >>> at2.inputs.interpolation_parameters = (5,) -# >>> at2.inputs.default_value = 0 -# >>> at2.inputs.transforms = ['identity', 'ants_Warp.nii.gz', 'trans.mat'] -# >>> at2.cmdline -# 'antsApplyTransforms --default-value 0 --dimensionality 3 --float 0 --input moving1.nii --interpolation BSpline[ 5 ] --output deformed_moving1.nii --reference-image fixed1.nii --transform identity --transform ants_Warp.nii.gz --transform trans.mat' -# task_name: ApplyTransforms nipype_name: ApplyTransforms nipype_module: nipype.interfaces.ants.resampling @@ -92,15 +92,15 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_image: medimage/nifti1 + output_image: generic/file # type=file: Warped image # type=str|default='': output file name callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - output_image: '"deformed_moving1.nii"' + # dict[str, str] - `path_template` values to be provided to output fields + output_image: output_image # type=file: Warped image # type=str|default='': output file name requirements: @@ -123,7 +123,7 @@ tests: reference_image: # type=file|default=: reference image space that you wish to warp INTO interpolation: - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: interpolation_parameters: # type=traitcompound|default=None: transforms: @@ -143,7 +143,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -162,12 +162,10 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: - # type=file|default=: reference image space that you wish to warp INTO transforms: '"identity"' # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -186,23 +184,14 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image. - input_image: - # type=file|default=: image to apply transformation to (generally a coregistered functional) reference_image: # type=file|default=: reference image space that you wish to warp INTO - output_image: '"deformed_moving1.nii"' - # type=file: Warped image - # type=str|default='': output file name interpolation: '"Linear"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: - default_value: '0' - # type=float|default=0.0: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: transforms: '["ants_Warp.nii.gz", "trans.mat"]' # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. - invert_transform_flags: '[False, True]' - # type=inputmultiobject|default=[]: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -221,25 +210,16 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image. - input_image: - # type=file|default=: image to apply transformation to (generally a coregistered functional) reference_image: # type=file|default=: reference image space that you wish to warp INTO - output_image: '"deformed_moving1.nii"' - # type=file: Warped image - # type=str|default='': output file name interpolation: '"BSpline"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: - interpolation_parameters: (5,) - # type=traitcompound|default=None: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: default_value: '0' # type=float|default=0.0: - transforms: '["ants_Warp.nii.gz", "trans.mat"]' - # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. invert_transform_flags: '[False, False]' # type=inputmultiobject|default=[]: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -258,23 +238,14 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image. - input_image: - # type=file|default=: image to apply transformation to (generally a coregistered functional) reference_image: # type=file|default=: reference image space that you wish to warp INTO - output_image: '"deformed_moving1.nii"' - # type=file: Warped image - # type=str|default='': output file name interpolation: '"BSpline"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: - interpolation_parameters: (5,) - # type=traitcompound|default=None: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: default_value: '0' # type=float|default=0.0: - transforms: '["identity", "ants_Warp.nii.gz", "trans.mat"]' - # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -297,12 +268,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"moving1.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: '"fixed1.nii"' - # type=file|default=: reference image space that you wish to warp INTO transforms: '"identity"' # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -314,23 +283,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image. - input_image: '"moving1.nii"' - # type=file|default=: image to apply transformation to (generally a coregistered functional) reference_image: '"fixed1.nii"' # type=file|default=: reference image space that you wish to warp INTO - output_image: '"deformed_moving1.nii"' - # type=file: Warped image - # type=str|default='': output file name interpolation: '"Linear"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: - default_value: '0' - # type=float|default=0.0: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: transforms: '["ants_Warp.nii.gz", "trans.mat"]' # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. - invert_transform_flags: '[False, True]' - # type=inputmultiobject|default=[]: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -342,25 +302,16 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image. - input_image: '"moving1.nii"' - # type=file|default=: image to apply transformation to (generally a coregistered functional) reference_image: '"fixed1.nii"' # type=file|default=: reference image space that you wish to warp INTO - output_image: '"deformed_moving1.nii"' - # type=file: Warped image - # type=str|default='': output file name interpolation: '"BSpline"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: - interpolation_parameters: (5,) - # type=traitcompound|default=None: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: default_value: '0' # type=float|default=0.0: - transforms: '["ants_Warp.nii.gz", "trans.mat"]' - # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. invert_transform_flags: '[False, False]' # type=inputmultiobject|default=[]: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -372,23 +323,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image. - input_image: '"moving1.nii"' - # type=file|default=: image to apply transformation to (generally a coregistered functional) reference_image: '"fixed1.nii"' # type=file|default=: reference image space that you wish to warp INTO - output_image: '"deformed_moving1.nii"' - # type=file: Warped image - # type=str|default='': output file name interpolation: '"BSpline"' - # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: - interpolation_parameters: (5,) - # type=traitcompound|default=None: + # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: default_value: '0' # type=float|default=0.0: - transforms: '["identity", "ants_Warp.nii.gz", "trans.mat"]' - # type=inputmultiobject|default=[]: transform files: will be applied in reverse order. For example, the last specified transform will be applied first. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/apply_transforms_to_points.yaml b/example-specs/interface/nipype/ants/apply_transforms_to_points.yaml index 1c2fc6a7..c03d2074 100644 --- a/example-specs/interface/nipype/ants/apply_transforms_to_points.yaml +++ b/example-specs/interface/nipype/ants/apply_transforms_to_points.yaml @@ -6,22 +6,22 @@ # Docs # ---- # ApplyTransformsToPoints, applied to an CSV file, transforms coordinates -# using provided transform (or a set of transforms). +# using provided transform (or a set of transforms). # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.ants import ApplyTransforms +# >>> at = ApplyTransformsToPoints() +# >>> at.inputs.dimension = 3 +# >>> at.inputs.input_file = 'moving.csv' +# >>> at.inputs.transforms = ['trans.mat', 'ants_Warp.nii.gz'] +# >>> at.inputs.invert_transform_flags = [False, False] +# >>> at.cmdline +# 'antsApplyTransformsToPoints --dimensionality 3 --input moving.csv --output moving_transformed.csv --transform [ trans.mat, 0 ] --transform [ ants_Warp.nii.gz, 0 ]' # -# >>> from nipype.interfaces.ants import ApplyTransforms -# >>> at = ApplyTransformsToPoints() -# >>> at.inputs.dimension = 3 -# >>> at.inputs.input_file = 'moving.csv' -# >>> at.inputs.transforms = ['trans.mat', 'ants_Warp.nii.gz'] -# >>> at.inputs.invert_transform_flags = [False, False] -# >>> at.cmdline -# 'antsApplyTransformsToPoints --dimensionality 3 --input moving.csv --output moving_transformed.csv --transform [ trans.mat, 0 ] --transform [ ants_Warp.nii.gz, 0 ]' # # -# task_name: ApplyTransformsToPoints nipype_name: ApplyTransformsToPoints nipype_module: nipype.interfaces.ants.resampling @@ -36,7 +36,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - input_file: text/csv + input_file: generic/file # type=file|default=: Currently, the only input supported is a csv file with columns including x,y (2D), x,y,z (3D) or x,y,z,t,label (4D) column headers. The points should be defined in physical space. If in doubt how to convert coordinates from your files to the space required by antsApplyTransformsToPoints try creating/drawing a simple label volume with only one voxel set to 1 and all others set to 0. Write down the voxel coordinates. Then use ImageMaths LabelStats to find out what coordinates for this voxel antsApplyTransformsToPoints is expecting. transforms: datascience/text-matrix+list-of # type=list|default=[]: transforms that will be applied to the points @@ -63,7 +63,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -88,7 +88,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -107,14 +107,10 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image. - input_file: - # type=file|default=: Currently, the only input supported is a csv file with columns including x,y (2D), x,y,z (3D) or x,y,z,t,label (4D) column headers. The points should be defined in physical space. If in doubt how to convert coordinates from your files to the space required by antsApplyTransformsToPoints try creating/drawing a simple label volume with only one voxel set to 1 and all others set to 0. Write down the voxel coordinates. Then use ImageMaths LabelStats to find out what coordinates for this voxel antsApplyTransformsToPoints is expecting. transforms: # type=list|default=[]: transforms that will be applied to the points - invert_transform_flags: '[False, False]' - # type=list|default=[]: list indicating if a transform should be reversed imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -137,14 +133,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, antsWarp tries to infer the dimensionality from the input image. - input_file: '"moving.csv"' - # type=file|default=: Currently, the only input supported is a csv file with columns including x,y (2D), x,y,z (3D) or x,y,z,t,label (4D) column headers. The points should be defined in physical space. If in doubt how to convert coordinates from your files to the space required by antsApplyTransformsToPoints try creating/drawing a simple label volume with only one voxel set to 1 and all others set to 0. Write down the voxel coordinates. Then use ImageMaths LabelStats to find out what coordinates for this voxel antsApplyTransformsToPoints is expecting. transforms: '["trans.mat", "ants_Warp.nii.gz"]' # type=list|default=[]: transforms that will be applied to the points - invert_transform_flags: '[False, False]' - # type=list|default=[]: list indicating if a transform should be reversed imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/atropos.yaml b/example-specs/interface/nipype/ants/atropos.yaml index 386895a8..04e49fdb 100644 --- a/example-specs/interface/nipype/ants/atropos.yaml +++ b/example-specs/interface/nipype/ants/atropos.yaml @@ -6,82 +6,82 @@ # Docs # ---- # -# A multivariate n-class segmentation algorithm. +# A multivariate n-class segmentation algorithm. # -# A finite mixture modeling (FMM) segmentation approach with possibilities for -# specifying prior constraints. These prior constraints include the specification -# of a prior label image, prior probability images (one for each class), and/or an -# MRF prior to enforce spatial smoothing of the labels. Similar algorithms include -# FAST and SPM. +# A finite mixture modeling (FMM) segmentation approach with possibilities for +# specifying prior constraints. These prior constraints include the specification +# of a prior label image, prior probability images (one for each class), and/or an +# MRF prior to enforce spatial smoothing of the labels. Similar algorithms include +# FAST and SPM. # -# Examples -# -------- -# >>> from nipype.interfaces.ants import Atropos -# >>> at = Atropos( -# ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', -# ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, -# ... mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True, -# ... maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001, -# ... posterior_formulation='Socrates', use_mixture_model_proportions=True) -# >>> at.inputs.initialization = 'Random' -# >>> at.cmdline -# 'Atropos --image-dimensionality 3 --icm [1,1] -# --initialization Random[2] --intensity-image structural.nii -# --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] -# --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] -# --use-random-seed 1' +# Examples +# -------- +# >>> from nipype.interfaces.ants import Atropos +# >>> at = Atropos( +# ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', +# ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, +# ... mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True, +# ... maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001, +# ... posterior_formulation='Socrates', use_mixture_model_proportions=True) +# >>> at.inputs.initialization = 'Random' +# >>> at.cmdline +# 'Atropos --image-dimensionality 3 --icm [1,1] +# --initialization Random[2] --intensity-image structural.nii +# --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] +# --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] +# --use-random-seed 1' # -# >>> at = Atropos( -# ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', -# ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, -# ... mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True, -# ... maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001, -# ... posterior_formulation='Socrates', use_mixture_model_proportions=True) -# >>> at.inputs.initialization = 'KMeans' -# >>> at.inputs.kmeans_init_centers = [100, 200] -# >>> at.cmdline -# 'Atropos --image-dimensionality 3 --icm [1,1] -# --initialization KMeans[2,100,200] --intensity-image structural.nii -# --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] -# --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] -# --use-random-seed 1' +# >>> at = Atropos( +# ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', +# ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, +# ... mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True, +# ... maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001, +# ... posterior_formulation='Socrates', use_mixture_model_proportions=True) +# >>> at.inputs.initialization = 'KMeans' +# >>> at.inputs.kmeans_init_centers = [100, 200] +# >>> at.cmdline +# 'Atropos --image-dimensionality 3 --icm [1,1] +# --initialization KMeans[2,100,200] --intensity-image structural.nii +# --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] +# --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] +# --use-random-seed 1' # -# >>> at = Atropos( -# ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', -# ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, -# ... mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True, -# ... maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001, -# ... posterior_formulation='Socrates', use_mixture_model_proportions=True) -# >>> at.inputs.initialization = 'PriorProbabilityImages' -# >>> at.inputs.prior_image = 'BrainSegmentationPrior%02d.nii.gz' -# >>> at.inputs.prior_weighting = 0.8 -# >>> at.inputs.prior_probability_threshold = 0.0000001 -# >>> at.cmdline -# 'Atropos --image-dimensionality 3 --icm [1,1] -# --initialization PriorProbabilityImages[2,BrainSegmentationPrior%02d.nii.gz,0.8,1e-07] -# --intensity-image structural.nii --likelihood-model Gaussian --mask-image mask.nii -# --mrf [0.2,1x1x1] --convergence [5,1e-06] -# --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] -# --posterior-formulation Socrates[1] --use-random-seed 1' +# >>> at = Atropos( +# ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', +# ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, +# ... mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True, +# ... maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001, +# ... posterior_formulation='Socrates', use_mixture_model_proportions=True) +# >>> at.inputs.initialization = 'PriorProbabilityImages' +# >>> at.inputs.prior_image = 'BrainSegmentationPrior%02d.nii.gz' +# >>> at.inputs.prior_weighting = 0.8 +# >>> at.inputs.prior_probability_threshold = 0.0000001 +# >>> at.cmdline +# 'Atropos --image-dimensionality 3 --icm [1,1] +# --initialization PriorProbabilityImages[2,BrainSegmentationPrior%02d.nii.gz,0.8,1e-07] +# --intensity-image structural.nii --likelihood-model Gaussian --mask-image mask.nii +# --mrf [0.2,1x1x1] --convergence [5,1e-06] +# --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] +# --posterior-formulation Socrates[1] --use-random-seed 1' +# +# >>> at = Atropos( +# ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', +# ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, +# ... mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True, +# ... maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001, +# ... posterior_formulation='Socrates', use_mixture_model_proportions=True) +# >>> at.inputs.initialization = 'PriorLabelImage' +# >>> at.inputs.prior_image = 'segmentation0.nii.gz' +# >>> at.inputs.number_of_tissue_classes = 2 +# >>> at.inputs.prior_weighting = 0.8 +# >>> at.cmdline +# 'Atropos --image-dimensionality 3 --icm [1,1] +# --initialization PriorLabelImage[2,segmentation0.nii.gz,0.8] --intensity-image structural.nii +# --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] +# --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] +# --use-random-seed 1' # -# >>> at = Atropos( -# ... dimension=3, intensity_images='structural.nii', mask_image='mask.nii', -# ... number_of_tissue_classes=2, likelihood_model='Gaussian', save_posteriors=True, -# ... mrf_smoothing_factor=0.2, mrf_radius=[1, 1, 1], icm_use_synchronous_update=True, -# ... maximum_number_of_icm_terations=1, n_iterations=5, convergence_threshold=0.000001, -# ... posterior_formulation='Socrates', use_mixture_model_proportions=True) -# >>> at.inputs.initialization = 'PriorLabelImage' -# >>> at.inputs.prior_image = 'segmentation0.nii.gz' -# >>> at.inputs.number_of_tissue_classes = 2 -# >>> at.inputs.prior_weighting = 0.8 -# >>> at.cmdline -# 'Atropos --image-dimensionality 3 --icm [1,1] -# --initialization PriorLabelImage[2,segmentation0.nii.gz,0.8] --intensity-image structural.nii -# --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] -# --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] -# --use-random-seed 1' # -# task_name: Atropos nipype_name: Atropos nipype_module: nipype.interfaces.ants.segmentation @@ -100,8 +100,6 @@ inputs: # type=inputmultiobject|default=[]: mask_image: medimage/nifti1 # type=file|default=: - out_classified_image_name: Path - # type=file|default=: callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -128,7 +126,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -186,7 +184,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -234,7 +232,7 @@ tests: use_mixture_model_proportions: 'True' # type=bool|default=False: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -253,8 +251,6 @@ tests: # (if not specified, will try to choose a sensible value) initialization: '"KMeans"' # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: - kmeans_init_centers: '[100, 200]' - # type=list|default=[]: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) intensity_images: @@ -284,7 +280,7 @@ tests: use_mixture_model_proportions: 'True' # type=bool|default=False: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -303,12 +299,8 @@ tests: # (if not specified, will try to choose a sensible value) initialization: '"PriorProbabilityImages"' # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: - prior_image: '"BrainSegmentationPrior%02d.nii.gz"' - # type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file. prior_weighting: '0.8' # type=float|default=0.0: - prior_probability_threshold: '0.0000001' - # type=float|default=0.0: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) intensity_images: @@ -338,7 +330,7 @@ tests: use_mixture_model_proportions: 'True' # type=bool|default=False: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -357,12 +349,8 @@ tests: # (if not specified, will try to choose a sensible value) initialization: '"PriorLabelImage"' # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: - prior_image: '"segmentation0.nii.gz"' - # type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file. number_of_tissue_classes: '2' # type=int|default=0: - prior_weighting: '0.8' - # type=float|default=0.0: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) intensity_images: @@ -390,7 +378,7 @@ tests: use_mixture_model_proportions: 'True' # type=bool|default=False: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -405,7 +393,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: Atropos --image-dimensionality 3 --icm [1,1] --initialization Random[2] --intensity-image structural.nii --likelihood-model Gaussian --mask-image mask.nii --mrf [0.2,1x1x1] --convergence [5,1e-06] --output [structural_labeled.nii,POSTERIOR_%02d.nii.gz] --posterior-formulation Socrates[1] --use-random-seed 1 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -442,7 +430,7 @@ doctests: use_mixture_model_proportions: 'True' # type=bool|default=False: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -454,8 +442,6 @@ doctests: # '.mock()' method of the corresponding class is used instead. initialization: '"KMeans"' # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: - kmeans_init_centers: '[100, 200]' - # type=list|default=[]: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) intensity_images: '"structural.nii"' @@ -485,7 +471,7 @@ doctests: use_mixture_model_proportions: 'True' # type=bool|default=False: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -497,12 +483,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. initialization: '"PriorProbabilityImages"' # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: - prior_image: '"BrainSegmentationPrior%02d.nii.gz"' - # type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file. prior_weighting: '0.8' # type=float|default=0.0: - prior_probability_threshold: '0.0000001' - # type=float|default=0.0: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) intensity_images: '"structural.nii"' @@ -532,7 +514,7 @@ doctests: use_mixture_model_proportions: 'True' # type=bool|default=False: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -544,12 +526,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. initialization: '"PriorLabelImage"' # type=enum|default='Random'|allowed['KMeans','Otsu','PriorLabelImage','PriorProbabilityImages','Random']: - prior_image: '"segmentation0.nii.gz"' - # type=traitcompound|default=None: either a string pattern (e.g., 'prior%02d.nii') or an existing vector-image file. number_of_tissue_classes: '2' # type=int|default=0: - prior_weighting: '0.8' - # type=float|default=0.0: dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3, or 4) intensity_images: '"structural.nii"' @@ -577,7 +555,7 @@ doctests: use_mixture_model_proportions: 'True' # type=bool|default=False: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/average_affine_transform.yaml b/example-specs/interface/nipype/ants/average_affine_transform.yaml index e4ea3876..53a3e18d 100644 --- a/example-specs/interface/nipype/ants/average_affine_transform.yaml +++ b/example-specs/interface/nipype/ants/average_affine_transform.yaml @@ -6,17 +6,17 @@ # Docs # ---- # -# Examples -# -------- -# >>> from nipype.interfaces.ants import AverageAffineTransform -# >>> avg = AverageAffineTransform() -# >>> avg.inputs.dimension = 3 -# >>> avg.inputs.transforms = ['trans.mat', 'func_to_struct.mat'] -# >>> avg.inputs.output_affine_transform = 'MYtemplatewarp.mat' -# >>> avg.cmdline -# 'AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat' +# Examples +# -------- +# >>> from nipype.interfaces.ants import AverageAffineTransform +# >>> avg = AverageAffineTransform() +# >>> avg.inputs.dimension = 3 +# >>> avg.inputs.transforms = ['trans.mat', 'func_to_struct.mat'] +# >>> avg.inputs.output_affine_transform = 'MYtemplatewarp.mat' +# >>> avg.cmdline +# 'AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat' +# # -# task_name: AverageAffineTransform nipype_name: AverageAffineTransform nipype_module: nipype.interfaces.ants.utils @@ -31,9 +31,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_affine_transform: Path - # type=file|default=: Outputfname.txt: the name of the resulting transform. - transforms: datascience/text-matrix+list-of + transforms: generic/file+list-of # type=inputmultiobject|default=[]: transforms to average callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -57,7 +55,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -77,7 +75,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -96,12 +94,10 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - transforms: - # type=inputmultiobject|default=[]: transforms to average output_affine_transform: '"MYtemplatewarp.mat"' # type=file|default=: Outputfname.txt: the name of the resulting transform. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -116,7 +112,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: AverageAffineTransform 3 MYtemplatewarp.mat trans.mat func_to_struct.mat +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -124,12 +120,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - transforms: '["trans.mat", "func_to_struct.mat"]' - # type=inputmultiobject|default=[]: transforms to average output_affine_transform: '"MYtemplatewarp.mat"' # type=file|default=: Outputfname.txt: the name of the resulting transform. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/average_images.yaml b/example-specs/interface/nipype/ants/average_images.yaml index 10296843..37de2d8b 100644 --- a/example-specs/interface/nipype/ants/average_images.yaml +++ b/example-specs/interface/nipype/ants/average_images.yaml @@ -6,17 +6,17 @@ # Docs # ---- # -# Examples -# -------- -# >>> from nipype.interfaces.ants import AverageImages -# >>> avg = AverageImages() -# >>> avg.inputs.dimension = 3 -# >>> avg.inputs.output_average_image = "average.nii.gz" -# >>> avg.inputs.normalize = True -# >>> avg.inputs.images = ['rc1s1.nii', 'rc1s1.nii'] -# >>> avg.cmdline -# 'AverageImages 3 average.nii.gz 1 rc1s1.nii rc1s1.nii' -# +# Examples +# -------- +# >>> from nipype.interfaces.ants import AverageImages +# >>> avg = AverageImages() +# >>> avg.inputs.dimension = 3 +# >>> avg.inputs.output_average_image = "average.nii.gz" +# >>> avg.inputs.normalize = True +# >>> avg.inputs.images = ['rc1s1.nii', 'rc1s1.nii'] +# >>> avg.cmdline +# 'AverageImages 3 average.nii.gz 1 rc1s1.nii rc1s1.nii' +# task_name: AverageImages nipype_name: AverageImages nipype_module: nipype.interfaces.ants.utils @@ -31,11 +31,8 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - images: medimage/nifti1+list-of + images: generic/file+list-of # type=inputmultiobject|default=[]: image to apply transformation to (generally a coregistered functional) - output_average_image: Path - # type=file: average image file - # type=file|default='average.nii': the name of the resulting image. callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -52,14 +49,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_average_image: medimage/nifti-gz + output_average_image: generic/file # type=file: average image file # type=file|default='average.nii': the name of the resulting image. callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -82,7 +79,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -101,15 +98,10 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - output_average_image: '"average.nii.gz"' - # type=file: average image file - # type=file|default='average.nii': the name of the resulting image. normalize: 'True' # type=bool|default=False: Normalize: if true, the 2nd image is divided by its mean. This will select the largest image to average into. - images: - # type=inputmultiobject|default=[]: image to apply transformation to (generally a coregistered functional) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -124,7 +116,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: AverageImages 3 average.nii.gz 1 rc1s1.nii rc1s1.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -132,15 +124,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - output_average_image: '"average.nii.gz"' - # type=file: average image file - # type=file|default='average.nii': the name of the resulting image. normalize: 'True' # type=bool|default=False: Normalize: if true, the 2nd image is divided by its mean. This will select the largest image to average into. - images: '["rc1s1.nii", "rc1s1.nii"]' - # type=inputmultiobject|default=[]: image to apply transformation to (generally a coregistered functional) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/brain_extraction.yaml b/example-specs/interface/nipype/ants/brain_extraction.yaml index 7edbfaa6..da16d776 100644 --- a/example-specs/interface/nipype/ants/brain_extraction.yaml +++ b/example-specs/interface/nipype/ants/brain_extraction.yaml @@ -6,21 +6,21 @@ # Docs # ---- # -# Atlas-based brain extraction. +# Atlas-based brain extraction. +# +# Examples +# -------- +# >>> from nipype.interfaces.ants.segmentation import BrainExtraction +# >>> brainextraction = BrainExtraction() +# >>> brainextraction.inputs.dimension = 3 +# >>> brainextraction.inputs.anatomical_image ='T1.nii.gz' +# >>> brainextraction.inputs.brain_template = 'study_template.nii.gz' +# >>> brainextraction.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz' +# >>> brainextraction.cmdline +# 'antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz +# -e study_template.nii.gz -d 3 -s nii.gz -o highres001_' # -# Examples -# -------- -# >>> from nipype.interfaces.ants.segmentation import BrainExtraction -# >>> brainextraction = BrainExtraction() -# >>> brainextraction.inputs.dimension = 3 -# >>> brainextraction.inputs.anatomical_image ='T1.nii.gz' -# >>> brainextraction.inputs.brain_template = 'study_template.nii.gz' -# >>> brainextraction.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz' -# >>> brainextraction.cmdline -# 'antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -# -e study_template.nii.gz -d 3 -s nii.gz -o highres001_' # -# task_name: BrainExtraction nipype_name: BrainExtraction nipype_module: nipype.interfaces.ants.segmentation @@ -35,9 +35,9 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - anatomical_image: medimage/nifti-gz + anatomical_image: generic/file # type=file|default=: Structural image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs. - brain_probability_mask: medimage/nifti-gz + brain_probability_mask: generic/file # type=file|default=: Brain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image. brain_template: medimage/nifti-gz # type=file|default=: Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs. @@ -99,7 +99,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -135,7 +135,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -154,14 +154,10 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - anatomical_image: - # type=file|default=: Structural image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs. brain_template: # type=file|default=: Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs. - brain_probability_mask: - # type=file|default=: Brain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -176,7 +172,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: antsBrainExtraction.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 -s nii.gz -o highres001_ +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -184,14 +180,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - anatomical_image: '"T1.nii.gz"' - # type=file|default=: Structural image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs. brain_template: '"study_template.nii.gz"' # type=file|default=: Anatomical template created using e.g. LPBA40 data set with buildtemplateparallel.sh in ANTs. - brain_probability_mask: '"ProbabilityMaskOfStudyTemplate.nii.gz"' - # type=file|default=: Brain probability mask created using e.g. LPBA40 data set which have brain masks defined, and warped to anatomical template and averaged resulting in a probability image. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/buildtemplateparallel.yaml b/example-specs/interface/nipype/ants/buildtemplateparallel.yaml index 95b1749b..5322e10a 100644 --- a/example-specs/interface/nipype/ants/buildtemplateparallel.yaml +++ b/example-specs/interface/nipype/ants/buildtemplateparallel.yaml @@ -7,21 +7,21 @@ # ---- # Generate a optimal average template # -# .. warning:: +# .. warning:: # -# This can take a VERY long time to complete +# This can take a VERY long time to complete # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.ants.legacy import buildtemplateparallel +# >>> tmpl = buildtemplateparallel() +# >>> tmpl.inputs.in_files = ['T1.nii', 'structural.nii'] +# >>> tmpl.inputs.max_iterations = [30, 90, 20] +# >>> tmpl.cmdline +# 'buildtemplateparallel.sh -d 3 -i 4 -m 30x90x20 -o antsTMPL_ -c 0 -t GR T1.nii structural.nii' # -# >>> from nipype.interfaces.ants.legacy import buildtemplateparallel -# >>> tmpl = buildtemplateparallel() -# >>> tmpl.inputs.in_files = ['T1.nii', 'structural.nii'] -# >>> tmpl.inputs.max_iterations = [30, 90, 20] -# >>> tmpl.cmdline -# 'buildtemplateparallel.sh -d 3 -i 4 -m 30x90x20 -o antsTMPL_ -c 0 -t GR T1.nii structural.nii' # -# task_name: buildtemplateparallel nipype_name: buildtemplateparallel nipype_module: nipype.interfaces.ants.legacy @@ -64,7 +64,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -104,7 +104,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -123,10 +123,8 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=list|default=[]: list of images to generate template from - max_iterations: '[30, 90, 20]' - # type=list|default=[]: maximum number of iterations (must be list of integers in the form [J,K,L...]: J = coarsest resolution iterations, K = middle resolution iterations, L = fine resolution iterations imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -149,10 +147,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["T1.nii", "structural.nii"]' # type=list|default=[]: list of images to generate template from - max_iterations: '[30, 90, 20]' - # type=list|default=[]: maximum number of iterations (must be list of integers in the form [J,K,L...]: J = coarsest resolution iterations, K = middle resolution iterations, L = fine resolution iterations imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/compose_multi_transform.yaml b/example-specs/interface/nipype/ants/compose_multi_transform.yaml index eedf479e..130d88b9 100644 --- a/example-specs/interface/nipype/ants/compose_multi_transform.yaml +++ b/example-specs/interface/nipype/ants/compose_multi_transform.yaml @@ -6,19 +6,19 @@ # Docs # ---- # -# Take a set of transformations and convert them to a single transformation matrix/warpfield. +# Take a set of transformations and convert them to a single transformation matrix/warpfield. +# +# Examples +# -------- +# >>> from nipype.interfaces.ants import ComposeMultiTransform +# >>> compose_transform = ComposeMultiTransform() +# >>> compose_transform.inputs.dimension = 3 +# >>> compose_transform.inputs.transforms = ['struct_to_template.mat', 'func_to_struct.mat'] +# >>> compose_transform.cmdline +# 'ComposeMultiTransform 3 struct_to_template_composed.mat +# struct_to_template.mat func_to_struct.mat' # -# Examples -# -------- -# >>> from nipype.interfaces.ants import ComposeMultiTransform -# >>> compose_transform = ComposeMultiTransform() -# >>> compose_transform.inputs.dimension = 3 -# >>> compose_transform.inputs.transforms = ['struct_to_template.mat', 'func_to_struct.mat'] -# >>> compose_transform.cmdline -# 'ComposeMultiTransform 3 struct_to_template_composed.mat -# struct_to_template.mat func_to_struct.mat' # -# task_name: ComposeMultiTransform nipype_name: ComposeMultiTransform nipype_module: nipype.interfaces.ants.utils @@ -33,12 +33,9 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_transform: Path - # type=file: Composed transform file - # type=file|default=: the name of the resulting transform. reference_image: generic/file # type=file|default=: Reference image (only necessary when output is warpfield) - transforms: datascience/text-matrix+list-of + transforms: generic/file+list-of # type=inputmultiobject|default=[]: transforms to average callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -63,7 +60,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -86,7 +83,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -105,10 +102,8 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - transforms: - # type=inputmultiobject|default=[]: transforms to average imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -123,7 +118,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: ComposeMultiTransform 3 struct_to_template_composed.mat struct_to_template.mat func_to_struct.mat +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -131,10 +126,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - transforms: '["struct_to_template.mat", "func_to_struct.mat"]' - # type=inputmultiobject|default=[]: transforms to average imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/composite_transform_util.yaml b/example-specs/interface/nipype/ants/composite_transform_util.yaml index 9a108932..9d41f8ef 100644 --- a/example-specs/interface/nipype/ants/composite_transform_util.yaml +++ b/example-specs/interface/nipype/ants/composite_transform_util.yaml @@ -6,31 +6,31 @@ # Docs # ---- # -# ANTs utility which can combine or break apart transform files into their individual -# constituent components. +# ANTs utility which can combine or break apart transform files into their individual +# constituent components. # -# Examples -# -------- +# Examples +# -------- # -# >>> from nipype.interfaces.ants import CompositeTransformUtil -# >>> tran = CompositeTransformUtil() -# >>> tran.inputs.process = 'disassemble' -# >>> tran.inputs.in_file = 'output_Composite.h5' -# >>> tran.cmdline -# 'CompositeTransformUtil --disassemble output_Composite.h5 transform' -# >>> tran.run() # doctest: +SKIP +# >>> from nipype.interfaces.ants import CompositeTransformUtil +# >>> tran = CompositeTransformUtil() +# >>> tran.inputs.process = 'disassemble' +# >>> tran.inputs.in_file = 'output_Composite.h5' +# >>> tran.cmdline +# 'CompositeTransformUtil --disassemble output_Composite.h5 transform' +# >>> tran.run() # doctest: +SKIP # -# example for assembling transformation files +# example for assembling transformation files +# +# >>> from nipype.interfaces.ants import CompositeTransformUtil +# >>> tran = CompositeTransformUtil() +# >>> tran.inputs.process = 'assemble' +# >>> tran.inputs.out_file = 'my.h5' +# >>> tran.inputs.in_file = ['AffineTransform.mat', 'DisplacementFieldTransform.nii.gz'] +# >>> tran.cmdline +# 'CompositeTransformUtil --assemble my.h5 AffineTransform.mat DisplacementFieldTransform.nii.gz ' +# >>> tran.run() # doctest: +SKIP # -# >>> from nipype.interfaces.ants import CompositeTransformUtil -# >>> tran = CompositeTransformUtil() -# >>> tran.inputs.process = 'assemble' -# >>> tran.inputs.out_file = 'my.h5' -# >>> tran.inputs.in_file = ['AffineTransform.mat', 'DisplacementFieldTransform.nii.gz'] -# >>> tran.cmdline -# 'CompositeTransformUtil --assemble my.h5 AffineTransform.mat DisplacementFieldTransform.nii.gz ' -# >>> tran.run() # doctest: +SKIP -# task_name: CompositeTransformUtil nipype_name: CompositeTransformUtil nipype_module: nipype.interfaces.ants.registration @@ -45,11 +45,8 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: '[datascience/text-matrix,datascience/hdf5]+list-of' + in_file: datascience/text-matrix+list-of # type=inputmultiobject|default=[]: Input transform file(s) - out_file: Path - # type=file: Compound transformation file - # type=file|default=: Output file path (only used for disassembly). callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -70,14 +67,14 @@ outputs: # type=file: Affine transform component displacement_field: generic/file # type=file: Displacement field component - out_file: datascience/hdf5 + out_file: generic/file # type=file: Compound transformation file # type=file|default=: Output file path (only used for disassembly). callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -100,7 +97,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -119,10 +116,8 @@ tests: # (if not specified, will try to choose a sensible value) process: '"disassemble"' # type=enum|default='assemble'|allowed['assemble','disassemble']: What to do with the transform inputs (assemble or disassemble) - in_file: - # type=inputmultiobject|default=[]: Input transform file(s) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -141,13 +136,10 @@ tests: # (if not specified, will try to choose a sensible value) process: '"assemble"' # type=enum|default='assemble'|allowed['assemble','disassemble']: What to do with the transform inputs (assemble or disassemble) - out_file: '"my.h5"' - # type=file: Compound transformation file - # type=file|default=: Output file path (only used for disassembly). in_file: # type=inputmultiobject|default=[]: Input transform file(s) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -170,10 +162,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. process: '"disassemble"' # type=enum|default='assemble'|allowed['assemble','disassemble']: What to do with the transform inputs (assemble or disassemble) - in_file: '"output_Composite.h5"' - # type=inputmultiobject|default=[]: Input transform file(s) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -185,13 +175,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. process: '"assemble"' # type=enum|default='assemble'|allowed['assemble','disassemble']: What to do with the transform inputs (assemble or disassemble) - out_file: '"my.h5"' - # type=file: Compound transformation file - # type=file|default=: Output file path (only used for disassembly). in_file: '["AffineTransform.mat", "DisplacementFieldTransform.nii.gz"]' # type=inputmultiobject|default=[]: Input transform file(s) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/convert_scalar_image_to_rgb.yaml b/example-specs/interface/nipype/ants/convert_scalar_image_to_rgb.yaml index 8bfd51d0..26ff4d1a 100644 --- a/example-specs/interface/nipype/ants/convert_scalar_image_to_rgb.yaml +++ b/example-specs/interface/nipype/ants/convert_scalar_image_to_rgb.yaml @@ -6,21 +6,21 @@ # Docs # ---- # -# Convert scalar images to RGB. +# Convert scalar images to RGB. +# +# Examples +# -------- +# >>> from nipype.interfaces.ants.visualization import ConvertScalarImageToRGB +# >>> converter = ConvertScalarImageToRGB() +# >>> converter.inputs.dimension = 3 +# >>> converter.inputs.input_image = 'T1.nii.gz' +# >>> converter.inputs.colormap = 'jet' +# >>> converter.inputs.minimum_input = 0 +# >>> converter.inputs.maximum_input = 6 +# >>> converter.cmdline +# 'ConvertScalarImageToRGB 3 T1.nii.gz rgb.nii.gz none jet none 0 6 0 255' # -# Examples -# -------- -# >>> from nipype.interfaces.ants.visualization import ConvertScalarImageToRGB -# >>> converter = ConvertScalarImageToRGB() -# >>> converter.inputs.dimension = 3 -# >>> converter.inputs.input_image = 'T1.nii.gz' -# >>> converter.inputs.colormap = 'jet' -# >>> converter.inputs.minimum_input = 0 -# >>> converter.inputs.maximum_input = 6 -# >>> converter.cmdline -# 'ConvertScalarImageToRGB 3 T1.nii.gz rgb.nii.gz none jet none 0 6 0 255' # -# task_name: ConvertScalarImageToRGB nipype_name: ConvertScalarImageToRGB nipype_module: nipype.interfaces.ants.visualization @@ -35,7 +35,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - input_image: medimage/nifti-gz + input_image: generic/file # type=file|default=: Main input is a 3-D grayscale image. callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -60,7 +60,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -95,7 +95,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -114,16 +114,12 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - input_image: - # type=file|default=: Main input is a 3-D grayscale image. colormap: '"jet"' # type=enum|default='grey'|allowed['autumn','blue','cool','copper','custom','green','grey','hot','hsv','jet','overunder','red','spring','summer','winter']: Select a colormap - minimum_input: '0' - # type=int|default=0: minimum input maximum_input: '6' # type=int|default=0: maximum input imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -138,7 +134,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: ConvertScalarImageToRGB 3 T1.nii.gz rgb.nii.gz none jet none 0 6 0 255 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -146,16 +142,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - input_image: '"T1.nii.gz"' - # type=file|default=: Main input is a 3-D grayscale image. colormap: '"jet"' # type=enum|default='grey'|allowed['autumn','blue','cool','copper','custom','green','grey','hot','hsv','jet','overunder','red','spring','summer','winter']: Select a colormap - minimum_input: '0' - # type=int|default=0: minimum input maximum_input: '6' # type=int|default=0: maximum input imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/cortical_thickness.yaml b/example-specs/interface/nipype/ants/cortical_thickness.yaml index 498b7dea..9d90e674 100644 --- a/example-specs/interface/nipype/ants/cortical_thickness.yaml +++ b/example-specs/interface/nipype/ants/cortical_thickness.yaml @@ -6,25 +6,25 @@ # Docs # ---- # -# Examples -# -------- -# >>> from nipype.interfaces.ants.segmentation import CorticalThickness -# >>> corticalthickness = CorticalThickness() -# >>> corticalthickness.inputs.dimension = 3 -# >>> corticalthickness.inputs.anatomical_image ='T1.nii.gz' -# >>> corticalthickness.inputs.brain_template = 'study_template.nii.gz' -# >>> corticalthickness.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz' -# >>> corticalthickness.inputs.segmentation_priors = ['BrainSegmentationPrior01.nii.gz', -# ... 'BrainSegmentationPrior02.nii.gz', -# ... 'BrainSegmentationPrior03.nii.gz', -# ... 'BrainSegmentationPrior04.nii.gz'] -# >>> corticalthickness.inputs.t1_registration_template = 'brain_study_template.nii.gz' -# >>> corticalthickness.cmdline -# 'antsCorticalThickness.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -# -e study_template.nii.gz -d 3 -s nii.gz -o antsCT_ -# -p nipype_priors/BrainSegmentationPrior%02d.nii.gz -t brain_study_template.nii.gz' +# Examples +# -------- +# >>> from nipype.interfaces.ants.segmentation import CorticalThickness +# >>> corticalthickness = CorticalThickness() +# >>> corticalthickness.inputs.dimension = 3 +# >>> corticalthickness.inputs.anatomical_image ='T1.nii.gz' +# >>> corticalthickness.inputs.brain_template = 'study_template.nii.gz' +# >>> corticalthickness.inputs.brain_probability_mask ='ProbabilityMaskOfStudyTemplate.nii.gz' +# >>> corticalthickness.inputs.segmentation_priors = ['BrainSegmentationPrior01.nii.gz', +# ... 'BrainSegmentationPrior02.nii.gz', +# ... 'BrainSegmentationPrior03.nii.gz', +# ... 'BrainSegmentationPrior04.nii.gz'] +# >>> corticalthickness.inputs.t1_registration_template = 'brain_study_template.nii.gz' +# >>> corticalthickness.cmdline +# 'antsCorticalThickness.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz +# -e study_template.nii.gz -d 3 -s nii.gz -o antsCT_ +# -p nipype_priors/BrainSegmentationPrior%02d.nii.gz -t brain_study_template.nii.gz' +# # -# task_name: CorticalThickness nipype_name: CorticalThickness nipype_module: nipype.interfaces.ants.segmentation @@ -39,9 +39,9 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - anatomical_image: medimage/nifti-gz + anatomical_image: generic/file # type=file|default=: Structural *intensity* image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. - brain_probability_mask: medimage/nifti-gz + brain_probability_mask: generic/file # type=file|default=: brain probability mask in template space brain_template: medimage/nifti-gz # type=file|default=: Anatomical *intensity* template (possibly created using a population data set with buildtemplateparallel.sh in ANTs). This template is *not* skull-stripped. @@ -99,7 +99,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -155,7 +155,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -174,18 +174,14 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - anatomical_image: - # type=file|default=: Structural *intensity* image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. brain_template: # type=file|default=: Anatomical *intensity* template (possibly created using a population data set with buildtemplateparallel.sh in ANTs). This template is *not* skull-stripped. - brain_probability_mask: - # type=file|default=: brain probability mask in template space segmentation_priors: # type=inputmultiobject|default=[]: t1_registration_template: # type=file|default=: Anatomical *intensity* template (assumed to be skull-stripped). A common case would be where this would be the same template as specified in the -e option which is not skull stripped. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -200,7 +196,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: antsCorticalThickness.sh -a T1.nii.gz -m ProbabilityMaskOfStudyTemplate.nii.gz -e study_template.nii.gz -d 3 -s nii.gz -o antsCT_ -p nipype_priors/BrainSegmentationPrior%02d.nii.gz -t brain_study_template.nii.gz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -208,18 +204,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - anatomical_image: '"T1.nii.gz"' - # type=file|default=: Structural *intensity* image, typically T1. If more than one anatomical image is specified, subsequently specified images are used during the segmentation process. However, only the first image is used in the registration of priors. Our suggestion would be to specify the T1 as the first image. brain_template: '"study_template.nii.gz"' # type=file|default=: Anatomical *intensity* template (possibly created using a population data set with buildtemplateparallel.sh in ANTs). This template is *not* skull-stripped. - brain_probability_mask: '"ProbabilityMaskOfStudyTemplate.nii.gz"' - # type=file|default=: brain probability mask in template space - segmentation_priors: '["BrainSegmentationPrior01.nii.gz","BrainSegmentationPrior02.nii.gz","BrainSegmentationPrior03.nii.gz","BrainSegmentationPrior04.nii.gz"]' + segmentation_priors: '["BrainSegmentationPrior01.nii.gz",' # type=inputmultiobject|default=[]: t1_registration_template: '"brain_study_template.nii.gz"' # type=file|default=: Anatomical *intensity* template (assumed to be skull-stripped). A common case would be where this would be the same template as specified in the -e option which is not skull stripped. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/create_jacobian_determinant_image.yaml b/example-specs/interface/nipype/ants/create_jacobian_determinant_image.yaml index 9376585c..3ee96af3 100644 --- a/example-specs/interface/nipype/ants/create_jacobian_determinant_image.yaml +++ b/example-specs/interface/nipype/ants/create_jacobian_determinant_image.yaml @@ -6,16 +6,16 @@ # Docs # ---- # -# Examples -# -------- -# >>> from nipype.interfaces.ants import CreateJacobianDeterminantImage -# >>> jacobian = CreateJacobianDeterminantImage() -# >>> jacobian.inputs.imageDimension = 3 -# >>> jacobian.inputs.deformationField = 'ants_Warp.nii.gz' -# >>> jacobian.inputs.outputImage = 'out_name.nii.gz' -# >>> jacobian.cmdline -# 'CreateJacobianDeterminantImage 3 ants_Warp.nii.gz out_name.nii.gz' -# +# Examples +# -------- +# >>> from nipype.interfaces.ants import CreateJacobianDeterminantImage +# >>> jacobian = CreateJacobianDeterminantImage() +# >>> jacobian.inputs.imageDimension = 3 +# >>> jacobian.inputs.deformationField = 'ants_Warp.nii.gz' +# >>> jacobian.inputs.outputImage = 'out_name.nii.gz' +# >>> jacobian.cmdline +# 'CreateJacobianDeterminantImage 3 ants_Warp.nii.gz out_name.nii.gz' +# task_name: CreateJacobianDeterminantImage nipype_name: CreateJacobianDeterminantImage nipype_module: nipype.interfaces.ants.utils @@ -30,7 +30,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - deformationField: medimage/nifti-gz + deformationField: generic/file # type=file|default=: deformation transformation file outputImage: medimage/nifti-gz # type=file|default=: output filename @@ -56,7 +56,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -80,7 +80,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -99,12 +99,10 @@ tests: # (if not specified, will try to choose a sensible value) imageDimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - deformationField: - # type=file|default=: deformation transformation file outputImage: # type=file|default=: output filename imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -119,7 +117,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: CreateJacobianDeterminantImage 3 ants_Warp.nii.gz out_name.nii.gz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -127,12 +125,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. imageDimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - deformationField: '"ants_Warp.nii.gz"' - # type=file|default=: deformation transformation file outputImage: '"out_name.nii.gz"' # type=file|default=: output filename imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/create_tiled_mosaic.yaml b/example-specs/interface/nipype/ants/create_tiled_mosaic.yaml index bd77eb72..aa95cb28 100644 --- a/example-specs/interface/nipype/ants/create_tiled_mosaic.yaml +++ b/example-specs/interface/nipype/ants/create_tiled_mosaic.yaml @@ -6,26 +6,26 @@ # Docs # ---- # The program CreateTiledMosaic in conjunction with ConvertScalarImageToRGB -# provides useful functionality for common image analysis tasks. The basic -# usage of CreateTiledMosaic is to tile a 3-D image volume slice-wise into -# a 2-D image. +# provides useful functionality for common image analysis tasks. The basic +# usage of CreateTiledMosaic is to tile a 3-D image volume slice-wise into +# a 2-D image. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.ants.visualization import CreateTiledMosaic +# >>> mosaic_slicer = CreateTiledMosaic() +# >>> mosaic_slicer.inputs.input_image = 'T1.nii.gz' +# >>> mosaic_slicer.inputs.rgb_image = 'rgb.nii.gz' +# >>> mosaic_slicer.inputs.mask_image = 'mask.nii.gz' +# >>> mosaic_slicer.inputs.output_image = 'output.png' +# >>> mosaic_slicer.inputs.alpha_value = 0.5 +# >>> mosaic_slicer.inputs.direction = 2 +# >>> mosaic_slicer.inputs.pad_or_crop = '[ -15x -50 , -15x -30 ,0]' +# >>> mosaic_slicer.inputs.slices = '[2 ,100 ,160]' +# >>> mosaic_slicer.cmdline +# 'CreateTiledMosaic -a 0.50 -d 2 -i T1.nii.gz -x mask.nii.gz -o output.png -p [ -15x -50 , -15x -30 ,0] -r rgb.nii.gz -s [2 ,100 ,160]' # -# >>> from nipype.interfaces.ants.visualization import CreateTiledMosaic -# >>> mosaic_slicer = CreateTiledMosaic() -# >>> mosaic_slicer.inputs.input_image = 'T1.nii.gz' -# >>> mosaic_slicer.inputs.rgb_image = 'rgb.nii.gz' -# >>> mosaic_slicer.inputs.mask_image = 'mask.nii.gz' -# >>> mosaic_slicer.inputs.output_image = 'output.png' -# >>> mosaic_slicer.inputs.alpha_value = 0.5 -# >>> mosaic_slicer.inputs.direction = 2 -# >>> mosaic_slicer.inputs.pad_or_crop = '[ -15x -50 , -15x -30 ,0]' -# >>> mosaic_slicer.inputs.slices = '[2 ,100 ,160]' -# >>> mosaic_slicer.cmdline -# 'CreateTiledMosaic -a 0.50 -d 2 -i T1.nii.gz -x mask.nii.gz -o output.png -p [ -15x -50 , -15x -30 ,0] -r rgb.nii.gz -s [2 ,100 ,160]' -# task_name: CreateTiledMosaic nipype_name: CreateTiledMosaic nipype_module: nipype.interfaces.ants.visualization @@ -44,7 +44,7 @@ inputs: # type=file|default=: Main input is a 3-D grayscale image. mask_image: medimage/nifti-gz # type=file|default=: Specifies the ROI of the RGB voxels used. - rgb_image: medimage/nifti-gz + rgb_image: generic/file # type=file|default=: An optional Rgb image can be added as an overlay.It must have the same imagegeometry as the input grayscale image. callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -62,14 +62,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_image: image/png + output_image: generic/file # type=file: image file # type=str|default='output.png': The output consists of the tiled mosaic image. callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -106,7 +106,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -125,23 +125,14 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: Main input is a 3-D grayscale image. - rgb_image: - # type=file|default=: An optional Rgb image can be added as an overlay.It must have the same imagegeometry as the input grayscale image. mask_image: # type=file|default=: Specifies the ROI of the RGB voxels used. - output_image: '"output.png"' - # type=file: image file - # type=str|default='output.png': The output consists of the tiled mosaic image. alpha_value: '0.5' # type=float|default=0.0: If an Rgb image is provided, render the overlay using the specified alpha parameter. - direction: '2' - # type=int|default=0: Specifies the direction of the slices. If no direction is specified, the direction with the coarsest spacing is chosen. pad_or_crop: '"[ -15x -50 , -15x -30 ,0]"' # type=str|default='': argument passed to -p flag:[padVoxelWidth,][lowerPadding[0]xlowerPadding[1],upperPadding[0]xupperPadding[1],constantValue]The user can specify whether to pad or crop a specified voxel-width boundary of each individual slice. For this program, cropping is simply padding with negative voxel-widths.If one pads (+), the user can also specify a constant pad value (default = 0). If a mask is specified, the user can use the mask to define the region, by using the keyword "mask" plus an offset, e.g. "-p mask+3". - slices: '"[2 ,100 ,160]"' - # type=str|default='': Number of slices to increment Slice1xSlice2xSlice3[numberOfSlicesToIncrement,,] imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -164,23 +155,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"T1.nii.gz"' # type=file|default=: Main input is a 3-D grayscale image. - rgb_image: '"rgb.nii.gz"' - # type=file|default=: An optional Rgb image can be added as an overlay.It must have the same imagegeometry as the input grayscale image. mask_image: '"mask.nii.gz"' # type=file|default=: Specifies the ROI of the RGB voxels used. - output_image: '"output.png"' - # type=file: image file - # type=str|default='output.png': The output consists of the tiled mosaic image. alpha_value: '0.5' # type=float|default=0.0: If an Rgb image is provided, render the overlay using the specified alpha parameter. - direction: '2' - # type=int|default=0: Specifies the direction of the slices. If no direction is specified, the direction with the coarsest spacing is chosen. pad_or_crop: '"[ -15x -50 , -15x -30 ,0]"' # type=str|default='': argument passed to -p flag:[padVoxelWidth,][lowerPadding[0]xlowerPadding[1],upperPadding[0]xupperPadding[1],constantValue]The user can specify whether to pad or crop a specified voxel-width boundary of each individual slice. For this program, cropping is simply padding with negative voxel-widths.If one pads (+), the user can also specify a constant pad value (default = 0). If a mask is specified, the user can use the mask to define the region, by using the keyword "mask" plus an offset, e.g. "-p mask+3". - slices: '"[2 ,100 ,160]"' - # type=str|default='': Number of slices to increment Slice1xSlice2xSlice3[numberOfSlicesToIncrement,,] imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/denoise_image.yaml b/example-specs/interface/nipype/ants/denoise_image.yaml index b2e8b304..fcc06788 100644 --- a/example-specs/interface/nipype/ants/denoise_image.yaml +++ b/example-specs/interface/nipype/ants/denoise_image.yaml @@ -6,30 +6,30 @@ # Docs # ---- # -# Examples -# -------- -# >>> import copy -# >>> from nipype.interfaces.ants import DenoiseImage -# >>> denoise = DenoiseImage() -# >>> denoise.inputs.dimension = 3 -# >>> denoise.inputs.input_image = 'im1.nii' -# >>> denoise.cmdline -# 'DenoiseImage -d 3 -i im1.nii -n Gaussian -o im1_noise_corrected.nii -s 1' +# Examples +# -------- +# >>> import copy +# >>> from nipype.interfaces.ants import DenoiseImage +# >>> denoise = DenoiseImage() +# >>> denoise.inputs.dimension = 3 +# >>> denoise.inputs.input_image = 'im1.nii' +# >>> denoise.cmdline +# 'DenoiseImage -d 3 -i im1.nii -n Gaussian -o im1_noise_corrected.nii -s 1' # -# >>> denoise_2 = copy.deepcopy(denoise) -# >>> denoise_2.inputs.output_image = 'output_corrected_image.nii.gz' -# >>> denoise_2.inputs.noise_model = 'Rician' -# >>> denoise_2.inputs.shrink_factor = 2 -# >>> denoise_2.cmdline -# 'DenoiseImage -d 3 -i im1.nii -n Rician -o output_corrected_image.nii.gz -s 2' +# >>> denoise_2 = copy.deepcopy(denoise) +# >>> denoise_2.inputs.output_image = 'output_corrected_image.nii.gz' +# >>> denoise_2.inputs.noise_model = 'Rician' +# >>> denoise_2.inputs.shrink_factor = 2 +# >>> denoise_2.cmdline +# 'DenoiseImage -d 3 -i im1.nii -n Rician -o output_corrected_image.nii.gz -s 2' +# +# >>> denoise_3 = DenoiseImage() +# >>> denoise_3.inputs.input_image = 'im1.nii' +# >>> denoise_3.inputs.save_noise = True +# >>> denoise_3.cmdline +# 'DenoiseImage -i im1.nii -n Gaussian -o [ im1_noise_corrected.nii, im1_noise.nii ] -s 1' # -# >>> denoise_3 = DenoiseImage() -# >>> denoise_3.inputs.input_image = 'im1.nii' -# >>> denoise_3.inputs.save_noise = True -# >>> denoise_3.cmdline -# 'DenoiseImage -i im1.nii -n Gaussian -o [ im1_noise_corrected.nii, im1_noise.nii ] -s 1' # -# task_name: DenoiseImage nipype_name: DenoiseImage nipype_module: nipype.interfaces.ants.segmentation @@ -46,12 +46,6 @@ inputs: # passed to the field in the automatically generated unittests. input_image: medimage/nifti1 # type=file|default=: A scalar image is expected as input for noise correction. - noise_image: Path - # type=file: - # type=file|default=: Filename for the estimated noise. - output_image: Path - # type=file: - # type=file|default=: The output consists of the noise corrected version of the input image. callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -78,7 +72,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -110,7 +104,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -129,10 +123,8 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, the program tries to infer the dimensionality from the input image. - input_image: - # type=file|default=: A scalar image is expected as input for noise correction. imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: copy expected_outputs: @@ -153,12 +145,10 @@ tests: output_image: '"output_corrected_image.nii.gz"' # type=file: # type=file|default=: The output consists of the noise corrected version of the input image. - noise_model: '"Rician"' - # type=enum|default='Gaussian'|allowed['Gaussian','Rician']: Employ a Rician or Gaussian noise model. shrink_factor: '2' # type=int|default=1: Running noise correction on large images can be time consuming. To lessen computation time, the input image can be resampled. The shrink factor, specified as a single integer, describes this resampling. Shrink factor = 1 is the default. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -177,10 +167,8 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: A scalar image is expected as input for noise correction. - save_noise: 'True' - # type=bool|default=False: True if the estimated noise should be saved to file. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -195,7 +183,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: DenoiseImage -d 3 -i im1.nii -n Gaussian -o im1_noise_corrected.nii -s 1 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -203,10 +191,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=2|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, the program tries to infer the dimensionality from the input image. - input_image: '"im1.nii"' - # type=file|default=: A scalar image is expected as input for noise correction. imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -219,12 +205,10 @@ doctests: output_image: '"output_corrected_image.nii.gz"' # type=file: # type=file|default=: The output consists of the noise corrected version of the input image. - noise_model: '"Rician"' - # type=enum|default='Gaussian'|allowed['Gaussian','Rician']: Employ a Rician or Gaussian noise model. shrink_factor: '2' # type=int|default=1: Running noise correction on large images can be time consuming. To lessen computation time, the input image can be resampled. The shrink factor, specified as a single integer, describes this resampling. Shrink factor = 1 is the default. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -236,10 +220,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"im1.nii"' # type=file|default=: A scalar image is expected as input for noise correction. - save_noise: 'True' - # type=bool|default=False: True if the estimated noise should be saved to file. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/gen_warp_fields.yaml b/example-specs/interface/nipype/ants/gen_warp_fields.yaml index 2050e919..06fb4f1b 100644 --- a/example-specs/interface/nipype/ants/gen_warp_fields.yaml +++ b/example-specs/interface/nipype/ants/gen_warp_fields.yaml @@ -54,7 +54,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -90,7 +90,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/ants/image_math.yaml b/example-specs/interface/nipype/ants/image_math.yaml index 32a6b6b3..ed26e677 100644 --- a/example-specs/interface/nipype/ants/image_math.yaml +++ b/example-specs/interface/nipype/ants/image_math.yaml @@ -6,60 +6,60 @@ # Docs # ---- # -# Operations over images. +# Operations over images. # -# Examples -# -------- -# >>> ImageMath( -# ... op1='structural.nii', -# ... operation='+', -# ... op2='2').cmdline -# 'ImageMath 3 structural_maths.nii + structural.nii 2' +# Examples +# -------- +# >>> ImageMath( +# ... op1='structural.nii', +# ... operation='+', +# ... op2='2').cmdline +# 'ImageMath 3 structural_maths.nii + structural.nii 2' # -# >>> ImageMath( -# ... op1='structural.nii', -# ... operation='Project', -# ... op2='1 2').cmdline -# 'ImageMath 3 structural_maths.nii Project structural.nii 1 2' +# >>> ImageMath( +# ... op1='structural.nii', +# ... operation='Project', +# ... op2='1 2').cmdline +# 'ImageMath 3 structural_maths.nii Project structural.nii 1 2' # -# >>> ImageMath( -# ... op1='structural.nii', -# ... operation='G', -# ... op2='4').cmdline -# 'ImageMath 3 structural_maths.nii G structural.nii 4' +# >>> ImageMath( +# ... op1='structural.nii', +# ... operation='G', +# ... op2='4').cmdline +# 'ImageMath 3 structural_maths.nii G structural.nii 4' # -# >>> ImageMath( -# ... op1='structural.nii', -# ... operation='TruncateImageIntensity', -# ... op2='0.005 0.999 256').cmdline -# 'ImageMath 3 structural_maths.nii TruncateImageIntensity structural.nii 0.005 0.999 256' +# >>> ImageMath( +# ... op1='structural.nii', +# ... operation='TruncateImageIntensity', +# ... op2='0.005 0.999 256').cmdline +# 'ImageMath 3 structural_maths.nii TruncateImageIntensity structural.nii 0.005 0.999 256' # -# By default, Nipype copies headers from the first input image (``op1``) -# to the output image. -# For some operations, as the ``PadImage`` operation, the header cannot be copied from inputs to -# outputs, and so ``copy_header`` option is automatically set to ``False``. +# By default, Nipype copies headers from the first input image (``op1``) +# to the output image. +# For some operations, as the ``PadImage`` operation, the header cannot be copied from inputs to +# outputs, and so ``copy_header`` option is automatically set to ``False``. # -# >>> pad = ImageMath( -# ... op1='structural.nii', -# ... operation='PadImage') -# >>> pad.inputs.copy_header -# False +# >>> pad = ImageMath( +# ... op1='structural.nii', +# ... operation='PadImage') +# >>> pad.inputs.copy_header +# False # -# While the operation is set to ``PadImage``, -# setting ``copy_header = True`` will have no effect. +# While the operation is set to ``PadImage``, +# setting ``copy_header = True`` will have no effect. # -# >>> pad.inputs.copy_header = True -# >>> pad.inputs.copy_header -# False +# >>> pad.inputs.copy_header = True +# >>> pad.inputs.copy_header +# False # -# For any other operation, ``copy_header`` can be enabled/disabled normally: +# For any other operation, ``copy_header`` can be enabled/disabled normally: +# +# >>> pad.inputs.operation = "ME" +# >>> pad.inputs.copy_header = True +# >>> pad.inputs.copy_header +# True # -# >>> pad.inputs.operation = "ME" -# >>> pad.inputs.copy_header = True -# >>> pad.inputs.copy_header -# True # -# task_name: ImageMath nipype_name: ImageMath nipype_module: nipype.interfaces.ants.utils @@ -76,9 +76,6 @@ inputs: # passed to the field in the automatically generated unittests. op1: medimage/nifti1 # type=file|default=: first operator - output_image: Path - # type=file: output image file - # type=file|default=: output image file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -102,7 +99,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -129,7 +126,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -153,7 +150,7 @@ tests: op2: '"2"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -177,7 +174,7 @@ tests: op2: '"1 2"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -201,7 +198,7 @@ tests: op2: '"4"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -225,7 +222,7 @@ tests: op2: '"0.005 0.999 256"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -240,7 +237,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: ImageMath 3 structural_maths.nii + structural.nii 2 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -253,7 +250,7 @@ doctests: op2: '"2"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -270,7 +267,7 @@ doctests: op2: '"1 2"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -287,7 +284,7 @@ doctests: op2: '"4"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -304,7 +301,7 @@ doctests: op2: '"0.005 0.999 256"' # type=traitcompound|default=None: second operator imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/joint_fusion.yaml b/example-specs/interface/nipype/ants/joint_fusion.yaml index 8eb6122a..30cfe406 100644 --- a/example-specs/interface/nipype/ants/joint_fusion.yaml +++ b/example-specs/interface/nipype/ants/joint_fusion.yaml @@ -6,79 +6,79 @@ # Docs # ---- # -# An image fusion algorithm. +# An image fusion algorithm. # -# Developed by Hongzhi Wang and Paul Yushkevich, and it won segmentation challenges -# at MICCAI 2012 and MICCAI 2013. -# The original label fusion framework was extended to accommodate intensities by Brian -# Avants. -# This implementation is based on Paul's original ITK-style implementation -# and Brian's ANTsR implementation. +# Developed by Hongzhi Wang and Paul Yushkevich, and it won segmentation challenges +# at MICCAI 2012 and MICCAI 2013. +# The original label fusion framework was extended to accommodate intensities by Brian +# Avants. +# This implementation is based on Paul's original ITK-style implementation +# and Brian's ANTsR implementation. # -# References include 1) H. Wang, J. W. Suh, S. -# Das, J. Pluta, C. Craige, P. Yushkevich, Multi-atlas segmentation with joint -# label fusion IEEE Trans. on Pattern Analysis and Machine Intelligence, 35(3), -# 611-623, 2013. and 2) H. Wang and P. A. Yushkevich, Multi-atlas segmentation -# with joint label fusion and corrective learning--an open source implementation, -# Front. Neuroinform., 2013. +# References include 1) H. Wang, J. W. Suh, S. +# Das, J. Pluta, C. Craige, P. Yushkevich, Multi-atlas segmentation with joint +# label fusion IEEE Trans. on Pattern Analysis and Machine Intelligence, 35(3), +# 611-623, 2013. and 2) H. Wang and P. A. Yushkevich, Multi-atlas segmentation +# with joint label fusion and corrective learning--an open source implementation, +# Front. Neuroinform., 2013. # -# Examples -# -------- -# >>> from nipype.interfaces.ants import JointFusion -# >>> jf = JointFusion() -# >>> jf.inputs.out_label_fusion = 'ants_fusion_label_output.nii' -# >>> jf.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'] ] -# >>> jf.inputs.atlas_segmentation_image = ['segmentation0.nii.gz'] -# >>> jf.inputs.target_image = ['im1.nii'] -# >>> jf.cmdline -# "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz -# -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii']" +# Examples +# -------- +# >>> from nipype.interfaces.ants import JointFusion +# >>> jf = JointFusion() +# >>> jf.inputs.out_label_fusion = 'ants_fusion_label_output.nii' +# >>> jf.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'] ] +# >>> jf.inputs.atlas_segmentation_image = ['segmentation0.nii.gz'] +# >>> jf.inputs.target_image = ['im1.nii'] +# >>> jf.cmdline +# "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz +# -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii']" # -# >>> jf.inputs.target_image = [ ['im1.nii', 'im2.nii'] ] -# >>> jf.cmdline -# "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz -# -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii', 'im2.nii']" +# >>> jf.inputs.target_image = [ ['im1.nii', 'im2.nii'] ] +# >>> jf.cmdline +# "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -l segmentation0.nii.gz +# -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ['im1.nii', 'im2.nii']" # -# >>> jf.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'], -# ... ['rc2s1.nii','rc2s2.nii'] ] -# >>> jf.inputs.atlas_segmentation_image = ['segmentation0.nii.gz', -# ... 'segmentation1.nii.gz'] -# >>> jf.cmdline -# "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] -# -l segmentation0.nii.gz -l segmentation1.nii.gz -b 2.0 -o ants_fusion_label_output.nii -# -s 3x3x3 -t ['im1.nii', 'im2.nii']" +# >>> jf.inputs.atlas_image = [ ['rc1s1.nii','rc1s2.nii'], +# ... ['rc2s1.nii','rc2s2.nii'] ] +# >>> jf.inputs.atlas_segmentation_image = ['segmentation0.nii.gz', +# ... 'segmentation1.nii.gz'] +# >>> jf.cmdline +# "antsJointFusion -a 0.1 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] +# -l segmentation0.nii.gz -l segmentation1.nii.gz -b 2.0 -o ants_fusion_label_output.nii +# -s 3x3x3 -t ['im1.nii', 'im2.nii']" # -# >>> jf.inputs.dimension = 3 -# >>> jf.inputs.alpha = 0.5 -# >>> jf.inputs.beta = 1.0 -# >>> jf.inputs.patch_radius = [3,2,1] -# >>> jf.inputs.search_radius = [3] -# >>> jf.cmdline -# "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] -# -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -o ants_fusion_label_output.nii -# -p 3x2x1 -s 3 -t ['im1.nii', 'im2.nii']" +# >>> jf.inputs.dimension = 3 +# >>> jf.inputs.alpha = 0.5 +# >>> jf.inputs.beta = 1.0 +# >>> jf.inputs.patch_radius = [3,2,1] +# >>> jf.inputs.search_radius = [3] +# >>> jf.cmdline +# "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] +# -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -o ants_fusion_label_output.nii +# -p 3x2x1 -s 3 -t ['im1.nii', 'im2.nii']" # -# >>> jf.inputs.search_radius = ['mask.nii'] -# >>> jf.inputs.verbose = True -# >>> jf.inputs.exclusion_image = ['roi01.nii', 'roi02.nii'] -# >>> jf.inputs.exclusion_image_label = ['1','2'] -# >>> jf.cmdline -# "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] -# -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] -# -o ants_fusion_label_output.nii -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" +# >>> jf.inputs.search_radius = ['mask.nii'] +# >>> jf.inputs.verbose = True +# >>> jf.inputs.exclusion_image = ['roi01.nii', 'roi02.nii'] +# >>> jf.inputs.exclusion_image_label = ['1','2'] +# >>> jf.cmdline +# "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] +# -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] +# -o ants_fusion_label_output.nii -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" +# +# >>> jf.inputs.out_label_fusion = 'ants_fusion_label_output.nii' +# >>> jf.inputs.out_intensity_fusion_name_format = 'ants_joint_fusion_intensity_%d.nii.gz' +# >>> jf.inputs.out_label_post_prob_name_format = 'ants_joint_fusion_posterior_%d.nii.gz' +# >>> jf.inputs.out_atlas_voting_weight_name_format = 'ants_joint_fusion_voting_weight_%d.nii.gz' +# >>> jf.cmdline +# "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] +# -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] +# -o [ants_fusion_label_output.nii, ants_joint_fusion_intensity_%d.nii.gz, +# ants_joint_fusion_posterior_%d.nii.gz, ants_joint_fusion_voting_weight_%d.nii.gz] +# -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" # -# >>> jf.inputs.out_label_fusion = 'ants_fusion_label_output.nii' -# >>> jf.inputs.out_intensity_fusion_name_format = 'ants_joint_fusion_intensity_%d.nii.gz' -# >>> jf.inputs.out_label_post_prob_name_format = 'ants_joint_fusion_posterior_%d.nii.gz' -# >>> jf.inputs.out_atlas_voting_weight_name_format = 'ants_joint_fusion_voting_weight_%d.nii.gz' -# >>> jf.cmdline -# "antsJointFusion -a 0.5 -g ['rc1s1.nii', 'rc1s2.nii'] -g ['rc2s1.nii', 'rc2s2.nii'] -# -l segmentation0.nii.gz -l segmentation1.nii.gz -b 1.0 -d 3 -e 1[roi01.nii] -e 2[roi02.nii] -# -o [ants_fusion_label_output.nii, ants_joint_fusion_intensity_%d.nii.gz, -# ants_joint_fusion_posterior_%d.nii.gz, ants_joint_fusion_voting_weight_%d.nii.gz] -# -p 3x2x1 -s mask.nii -t ['im1.nii', 'im2.nii'] -v" # -# task_name: JointFusion nipype_name: JointFusion nipype_module: nipype.interfaces.ants.segmentation @@ -99,9 +99,6 @@ inputs: # type=list|default=[]: Specify an exclusion region for the given label. mask_image: generic/file # type=file|default=: If a mask image is specified, fusion is only performed in the mask region. - out_label_fusion: Path - # type=file: - # type=file|default=: The output label fusion image. callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -131,7 +128,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -186,7 +183,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -206,14 +203,10 @@ tests: out_label_fusion: '"ants_fusion_label_output.nii"' # type=file: # type=file|default=: The output label fusion image. - atlas_image: '[ ["rc1s1.nii","rc1s2.nii"] ]' - # type=list|default=[]: The atlas image (or multimodal atlas images) assumed to be aligned to a common image domain. atlas_segmentation_image: # type=inputmultiobject|default=[]: The atlas segmentation images. For performing label fusion the number of specified segmentations should be identical to the number of atlas image sets. - target_image: '["im1.nii"]' - # type=list|default=[]: The target image (or multimodal target images) assumed to be aligned to a common image domain. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -233,7 +226,7 @@ tests: target_image: '[ ["im1.nii", "im2.nii"] ]' # type=list|default=[]: The target image (or multimodal target images) assumed to be aligned to a common image domain. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -250,12 +243,12 @@ tests: - inputs: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) - atlas_image: '[ ["rc1s1.nii","rc1s2.nii"],["rc2s1.nii","rc2s2.nii"] ]' + atlas_image: '[ ["rc1s1.nii","rc1s2.nii"],' # type=list|default=[]: The atlas image (or multimodal atlas images) assumed to be aligned to a common image domain. atlas_segmentation_image: # type=inputmultiobject|default=[]: The atlas segmentation images. For performing label fusion the number of specified segmentations should be identical to the number of atlas image sets. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -274,16 +267,12 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, the program tries to infer the dimensionality from the input image. - alpha: '0.5' - # type=float|default=0.1: Regularization term added to matrix Mx for calculating the inverse. Default = 0.1 beta: '1.0' # type=float|default=2.0: Exponent for mapping intensity difference to the joint error. Default = 2.0 - patch_radius: '[3,2,1]' - # type=list|default=[]: Patch radius for similarity measures. Default: 2x2x2 search_radius: '[3]' # type=list|default=[3, 3, 3]: Search radius for similarity measures. Default = 3x3x3. One can also specify an image where the value at the voxel specifies the isotropic search radius at that voxel. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -302,14 +291,10 @@ tests: # (if not specified, will try to choose a sensible value) search_radius: '["mask.nii"]' # type=list|default=[3, 3, 3]: Search radius for similarity measures. Default = 3x3x3. One can also specify an image where the value at the voxel specifies the isotropic search radius at that voxel. - verbose: 'True' - # type=bool|default=False: Verbose output. exclusion_image: # type=list|default=[]: Specify an exclusion region for the given label. - exclusion_image_label: '["1","2"]' - # type=list|default=[]: Specify a label for the exclusion region. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -329,14 +314,10 @@ tests: out_label_fusion: '"ants_fusion_label_output.nii"' # type=file: # type=file|default=: The output label fusion image. - out_intensity_fusion_name_format: '"ants_joint_fusion_intensity_%d.nii.gz"' - # type=str|default='': Optional intensity fusion image file name format. (e.g. "antsJointFusionIntensity_%d.nii.gz") out_label_post_prob_name_format: '"ants_joint_fusion_posterior_%d.nii.gz"' # type=str|default='antsJointFusionPosterior_%d.nii.gz': Optional label posterior probability image file name format. - out_atlas_voting_weight_name_format: '"ants_joint_fusion_voting_weight_%d.nii.gz"' - # type=str|default='antsJointFusionVotingWeight_%d.nii.gz': Optional atlas voting weight image file name format. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -351,7 +332,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: antsJointFusion -a 0.1 -g ["rc1s1.nii", "rc1s2.nii"] -l segmentation0.nii.gz -b 2.0 -o ants_fusion_label_output.nii -s 3x3x3 -t ["im1.nii"] +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -360,14 +341,10 @@ doctests: out_label_fusion: '"ants_fusion_label_output.nii"' # type=file: # type=file|default=: The output label fusion image. - atlas_image: '[ ["rc1s1.nii","rc1s2.nii"] ]' - # type=list|default=[]: The atlas image (or multimodal atlas images) assumed to be aligned to a common image domain. atlas_segmentation_image: '["segmentation0.nii.gz"]' # type=inputmultiobject|default=[]: The atlas segmentation images. For performing label fusion the number of specified segmentations should be identical to the number of atlas image sets. - target_image: '["im1.nii"]' - # type=list|default=[]: The target image (or multimodal target images) assumed to be aligned to a common image domain. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -380,7 +357,7 @@ doctests: target_image: '[ ["im1.nii", "im2.nii"] ]' # type=list|default=[]: The target image (or multimodal target images) assumed to be aligned to a common image domain. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -390,12 +367,12 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - atlas_image: '[ ["rc1s1.nii","rc1s2.nii"],["rc2s1.nii","rc2s2.nii"] ]' + atlas_image: '[ ["rc1s1.nii","rc1s2.nii"],' # type=list|default=[]: The atlas image (or multimodal atlas images) assumed to be aligned to a common image domain. - atlas_segmentation_image: '["segmentation0.nii.gz","segmentation1.nii.gz"]' + atlas_segmentation_image: '["segmentation0.nii.gz",' # type=inputmultiobject|default=[]: The atlas segmentation images. For performing label fusion the number of specified segmentations should be identical to the number of atlas image sets. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -407,16 +384,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3,4]: This option forces the image to be treated as a specified-dimensional image. If not specified, the program tries to infer the dimensionality from the input image. - alpha: '0.5' - # type=float|default=0.1: Regularization term added to matrix Mx for calculating the inverse. Default = 0.1 beta: '1.0' # type=float|default=2.0: Exponent for mapping intensity difference to the joint error. Default = 2.0 - patch_radius: '[3,2,1]' - # type=list|default=[]: Patch radius for similarity measures. Default: 2x2x2 search_radius: '[3]' # type=list|default=[3, 3, 3]: Search radius for similarity measures. Default = 3x3x3. One can also specify an image where the value at the voxel specifies the isotropic search radius at that voxel. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -428,14 +401,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. search_radius: '["mask.nii"]' # type=list|default=[3, 3, 3]: Search radius for similarity measures. Default = 3x3x3. One can also specify an image where the value at the voxel specifies the isotropic search radius at that voxel. - verbose: 'True' - # type=bool|default=False: Verbose output. exclusion_image: '["roi01.nii", "roi02.nii"]' # type=list|default=[]: Specify an exclusion region for the given label. - exclusion_image_label: '["1","2"]' - # type=list|default=[]: Specify a label for the exclusion region. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -448,14 +417,10 @@ doctests: out_label_fusion: '"ants_fusion_label_output.nii"' # type=file: # type=file|default=: The output label fusion image. - out_intensity_fusion_name_format: '"ants_joint_fusion_intensity_%d.nii.gz"' - # type=str|default='': Optional intensity fusion image file name format. (e.g. "antsJointFusionIntensity_%d.nii.gz") out_label_post_prob_name_format: '"ants_joint_fusion_posterior_%d.nii.gz"' # type=str|default='antsJointFusionPosterior_%d.nii.gz': Optional label posterior probability image file name format. - out_atlas_voting_weight_name_format: '"ants_joint_fusion_voting_weight_%d.nii.gz"' - # type=str|default='antsJointFusionVotingWeight_%d.nii.gz': Optional atlas voting weight image file name format. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/kelly_kapowski.yaml b/example-specs/interface/nipype/ants/kelly_kapowski.yaml index 18274e71..cd9ca5c2 100644 --- a/example-specs/interface/nipype/ants/kelly_kapowski.yaml +++ b/example-specs/interface/nipype/ants/kelly_kapowski.yaml @@ -6,29 +6,29 @@ # Docs # ---- # -# Nipype Interface to ANTs' KellyKapowski, also known as DiReCT. +# Nipype Interface to ANTs' KellyKapowski, also known as DiReCT. # -# DiReCT is a registration based estimate of cortical thickness. It was published -# in S. R. Das, B. B. Avants, M. Grossman, and J. C. Gee, Registration based -# cortical thickness measurement, Neuroimage 2009, 45:867--879. +# DiReCT is a registration based estimate of cortical thickness. It was published +# in S. R. Das, B. B. Avants, M. Grossman, and J. C. Gee, Registration based +# cortical thickness measurement, Neuroimage 2009, 45:867--879. +# +# Examples +# -------- +# >>> from nipype.interfaces.ants.segmentation import KellyKapowski +# >>> kk = KellyKapowski() +# >>> kk.inputs.dimension = 3 +# >>> kk.inputs.segmentation_image = "segmentation0.nii.gz" +# >>> kk.inputs.convergence = "[45,0.0,10]" +# >>> kk.inputs.thickness_prior_estimate = 10 +# >>> kk.cmdline +# 'KellyKapowski --convergence "[45,0.0,10]" +# --output "[segmentation0_cortical_thickness.nii.gz,segmentation0_warped_white_matter.nii.gz]" +# --image-dimensionality 3 --gradient-step 0.025000 +# --maximum-number-of-invert-displacement-field-iterations 20 --number-of-integration-points 10 +# --segmentation-image "[segmentation0.nii.gz,2,3]" --smoothing-variance 1.000000 +# --smoothing-velocity-field-parameter 1.500000 --thickness-prior-estimate 10.000000' # -# Examples -# -------- -# >>> from nipype.interfaces.ants.segmentation import KellyKapowski -# >>> kk = KellyKapowski() -# >>> kk.inputs.dimension = 3 -# >>> kk.inputs.segmentation_image = "segmentation0.nii.gz" -# >>> kk.inputs.convergence = "[45,0.0,10]" -# >>> kk.inputs.thickness_prior_estimate = 10 -# >>> kk.cmdline -# 'KellyKapowski --convergence "[45,0.0,10]" -# --output "[segmentation0_cortical_thickness.nii.gz,segmentation0_warped_white_matter.nii.gz]" -# --image-dimensionality 3 --gradient-step 0.025000 -# --maximum-number-of-invert-displacement-field-iterations 20 --number-of-integration-points 10 -# --segmentation-image "[segmentation0.nii.gz,2,3]" --smoothing-variance 1.000000 -# --smoothing-velocity-field-parameter 1.500000 --thickness-prior-estimate 10.000000' # -# task_name: KellyKapowski nipype_name: KellyKapowski nipype_module: nipype.interfaces.ants.segmentation @@ -43,18 +43,12 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - cortical_thickness: Path - # type=file: A thickness map defined in the segmented gray matter. - # type=file|default=: Filename for the cortical thickness. gray_matter_prob_image: generic/file # type=file|default=: In addition to the segmentation image, a gray matter probability image can be used. If no such image is supplied, one is created using the segmentation image and a variance of 1.0 mm. - segmentation_image: medimage/nifti-gz + segmentation_image: generic/file # type=file|default=: A segmentation image must be supplied labeling the gray and white matters. Default values = 2 and 3, respectively. thickness_prior_image: generic/file # type=file|default=: An image containing spatially varying prior thickness values. - warped_white_matter: Path - # type=file: A warped white matter image. - # type=file|default=: Filename for the warped white matter file. white_matter_prob_image: generic/file # type=file|default=: In addition to the segmentation image, a white matter probability image can be used. If no such image is supplied, one is created using the segmentation image and a variance of 1.0 mm. callable_defaults: @@ -83,7 +77,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -133,7 +127,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -152,14 +146,10 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - segmentation_image: - # type=file|default=: A segmentation image must be supplied labeling the gray and white matters. Default values = 2 and 3, respectively. convergence: '"[45,0.0,10]"' # type=str|default='[50,0.001,10]': Convergence is determined by fitting a line to the normalized energy profile of the last N iterations (where N is specified by the window size) and determining the slope which is then compared with the convergence threshold. - thickness_prior_estimate: '10' - # type=float|default=10: Provides a prior constraint on the final thickness measurement in mm. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -174,7 +164,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: KellyKapowski --convergence "[45,0.0,10]" --output "[segmentation0_cortical_thickness.nii.gz,segmentation0_warped_white_matter.nii.gz]" --image-dimensionality 3 --gradient-step 0.025000 --maximum-number-of-invert-displacement-field-iterations 20 --number-of-integration-points 10 --segmentation-image "[segmentation0.nii.gz,2,3]" --smoothing-variance 1.000000 --smoothing-velocity-field-parameter 1.500000 --thickness-prior-estimate 10.000000 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -182,14 +172,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - segmentation_image: '"segmentation0.nii.gz"' - # type=file|default=: A segmentation image must be supplied labeling the gray and white matters. Default values = 2 and 3, respectively. convergence: '"[45,0.0,10]"' # type=str|default='[50,0.001,10]': Convergence is determined by fitting a line to the normalized energy profile of the last N iterations (where N is specified by the window size) and determining the slope which is then compared with the convergence threshold. - thickness_prior_estimate: '10' - # type=float|default=10: Provides a prior constraint on the final thickness measurement in mm. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/label_geometry.yaml b/example-specs/interface/nipype/ants/label_geometry.yaml index 74e485a3..8bcafd59 100644 --- a/example-specs/interface/nipype/ants/label_geometry.yaml +++ b/example-specs/interface/nipype/ants/label_geometry.yaml @@ -6,22 +6,22 @@ # Docs # ---- # -# Extracts geometry measures using a label file and an optional image file +# Extracts geometry measures using a label file and an optional image file # -# Examples -# -------- -# >>> from nipype.interfaces.ants import LabelGeometry -# >>> label_extract = LabelGeometry() -# >>> label_extract.inputs.dimension = 3 -# >>> label_extract.inputs.label_image = 'atlas.nii.gz' -# >>> label_extract.cmdline -# 'LabelGeometryMeasures 3 atlas.nii.gz [] atlas.csv' +# Examples +# -------- +# >>> from nipype.interfaces.ants import LabelGeometry +# >>> label_extract = LabelGeometry() +# >>> label_extract.inputs.dimension = 3 +# >>> label_extract.inputs.label_image = 'atlas.nii.gz' +# >>> label_extract.cmdline +# 'LabelGeometryMeasures 3 atlas.nii.gz [] atlas.csv' +# +# >>> label_extract.inputs.intensity_image = 'ants_Warp.nii.gz' +# >>> label_extract.cmdline +# 'LabelGeometryMeasures 3 atlas.nii.gz ants_Warp.nii.gz atlas.csv' # -# >>> label_extract.inputs.intensity_image = 'ants_Warp.nii.gz' -# >>> label_extract.cmdline -# 'LabelGeometryMeasures 3 atlas.nii.gz ants_Warp.nii.gz atlas.csv' # -# task_name: LabelGeometry nipype_name: LabelGeometry nipype_module: nipype.interfaces.ants.utils @@ -38,7 +38,7 @@ inputs: # passed to the field in the automatically generated unittests. intensity_image: medimage/nifti-gz # type=file|default='[]': Intensity image to extract values from. This is an optional input - label_image: medimage/nifti-gz + label_image: generic/file # type=file|default=: label image to use for extracting geometry measures callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -63,7 +63,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -86,7 +86,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -105,10 +105,8 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - label_image: - # type=file|default=: label image to use for extracting geometry measures imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -128,7 +126,7 @@ tests: intensity_image: # type=file|default='[]': Intensity image to extract values from. This is an optional input imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -143,7 +141,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: LabelGeometryMeasures 3 atlas.nii.gz [] atlas.csv +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -151,10 +149,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - label_image: '"atlas.nii.gz"' - # type=file|default=: label image to use for extracting geometry measures imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -167,7 +163,7 @@ doctests: intensity_image: '"ants_Warp.nii.gz"' # type=file|default='[]': Intensity image to extract values from. This is an optional input imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/laplacian_thickness.yaml b/example-specs/interface/nipype/ants/laplacian_thickness.yaml index 68853a30..c21a43cc 100644 --- a/example-specs/interface/nipype/ants/laplacian_thickness.yaml +++ b/example-specs/interface/nipype/ants/laplacian_thickness.yaml @@ -7,21 +7,21 @@ # ---- # Calculates the cortical thickness from an anatomical image # -# Examples -# -------- +# Examples +# -------- # -# >>> from nipype.interfaces.ants import LaplacianThickness -# >>> cort_thick = LaplacianThickness() -# >>> cort_thick.inputs.input_wm = 'white_matter.nii.gz' -# >>> cort_thick.inputs.input_gm = 'gray_matter.nii.gz' -# >>> cort_thick.cmdline -# 'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz white_matter_thickness.nii.gz' +# >>> from nipype.interfaces.ants import LaplacianThickness +# >>> cort_thick = LaplacianThickness() +# >>> cort_thick.inputs.input_wm = 'white_matter.nii.gz' +# >>> cort_thick.inputs.input_gm = 'gray_matter.nii.gz' +# >>> cort_thick.cmdline +# 'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz white_matter_thickness.nii.gz' +# +# >>> cort_thick.inputs.output_image = 'output_thickness.nii.gz' +# >>> cort_thick.cmdline +# 'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz output_thickness.nii.gz' # -# >>> cort_thick.inputs.output_image = 'output_thickness.nii.gz' -# >>> cort_thick.cmdline -# 'LaplacianThickness white_matter.nii.gz gray_matter.nii.gz output_thickness.nii.gz' # -# task_name: LaplacianThickness nipype_name: LaplacianThickness nipype_module: nipype.interfaces.ants.segmentation @@ -36,7 +36,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - input_gm: medimage/nifti-gz + input_gm: generic/file # type=file|default=: gray matter segmentation image input_wm: medimage/nifti-gz # type=file|default=: white matter segmentation image @@ -63,7 +63,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -94,7 +94,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -113,10 +113,8 @@ tests: # (if not specified, will try to choose a sensible value) input_wm: # type=file|default=: white matter segmentation image - input_gm: - # type=file|default=: gray matter segmentation image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -137,7 +135,7 @@ tests: # type=file: Cortical thickness # type=str|default='': name of output file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -160,10 +158,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_wm: '"white_matter.nii.gz"' # type=file|default=: white matter segmentation image - input_gm: '"gray_matter.nii.gz"' - # type=file|default=: gray matter segmentation image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -177,7 +173,7 @@ doctests: # type=file: Cortical thickness # type=str|default='': name of output file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/measure_image_similarity.yaml b/example-specs/interface/nipype/ants/measure_image_similarity.yaml index a146fd7e..d75c10d7 100644 --- a/example-specs/interface/nipype/ants/measure_image_similarity.yaml +++ b/example-specs/interface/nipype/ants/measure_image_similarity.yaml @@ -8,24 +8,24 @@ # # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.ants import MeasureImageSimilarity +# >>> sim = MeasureImageSimilarity() +# >>> sim.inputs.dimension = 3 +# >>> sim.inputs.metric = 'MI' +# >>> sim.inputs.fixed_image = 'T1.nii' +# >>> sim.inputs.moving_image = 'resting.nii' +# >>> sim.inputs.metric_weight = 1.0 +# >>> sim.inputs.radius_or_number_of_bins = 5 +# >>> sim.inputs.sampling_strategy = 'Regular' +# >>> sim.inputs.sampling_percentage = 1.0 +# >>> sim.inputs.fixed_image_mask = 'mask.nii' +# >>> sim.inputs.moving_image_mask = 'mask.nii.gz' +# >>> sim.cmdline +# 'MeasureImageSimilarity --dimensionality 3 --masks ["mask.nii","mask.nii.gz"] --metric MI["T1.nii","resting.nii",1.0,5,Regular,1.0]' # -# >>> from nipype.interfaces.ants import MeasureImageSimilarity -# >>> sim = MeasureImageSimilarity() -# >>> sim.inputs.dimension = 3 -# >>> sim.inputs.metric = 'MI' -# >>> sim.inputs.fixed_image = 'T1.nii' -# >>> sim.inputs.moving_image = 'resting.nii' -# >>> sim.inputs.metric_weight = 1.0 -# >>> sim.inputs.radius_or_number_of_bins = 5 -# >>> sim.inputs.sampling_strategy = 'Regular' -# >>> sim.inputs.sampling_percentage = 1.0 -# >>> sim.inputs.fixed_image_mask = 'mask.nii' -# >>> sim.inputs.moving_image_mask = 'mask.nii.gz' -# >>> sim.cmdline -# 'MeasureImageSimilarity --dimensionality 3 --masks ["mask.nii","mask.nii.gz"] --metric MI["T1.nii","resting.nii",1.0,5,Regular,1.0]' -# task_name: MeasureImageSimilarity nipype_name: MeasureImageSimilarity nipype_module: nipype.interfaces.ants.registration @@ -44,9 +44,9 @@ inputs: # type=file|default=: Image to which the moving image is warped fixed_image_mask: medimage/nifti1 # type=file|default=: mask used to limit metric sampling region of the fixed image - moving_image: medimage/nifti1 + moving_image: generic/file # type=file|default=: Image to apply transformation to (generally a coregistered functional) - moving_image_mask: medimage/nifti-gz + moving_image_mask: generic/file # type=file|default=: mask used to limit metric sampling region of the moving image callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -70,7 +70,7 @@ outputs: similarity: similarity_callable # type=float: templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -104,7 +104,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -123,26 +123,16 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=2|allowed[2,3,4]: Dimensionality of the fixed/moving image pair - metric: '"MI"' - # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']: fixed_image: # type=file|default=: Image to which the moving image is warped - moving_image: - # type=file|default=: Image to apply transformation to (generally a coregistered functional) metric_weight: '1.0' # type=float|default=1.0: The "metricWeight" variable is not used. - radius_or_number_of_bins: '5' - # type=int|default=0: The number of bins in each stage for the MI and Mattes metric, or the radius for other metrics sampling_strategy: '"Regular"' # type=enum|default='None'|allowed['None','Random','Regular']: Manner of choosing point set over which to optimize the metric. Defaults to "None" (i.e. a dense sampling of one sample per voxel). - sampling_percentage: '1.0' - # type=range|default=None: Percentage of points accessible to the sampling strategy over which to optimize the metric. fixed_image_mask: # type=file|default=: mask used to limit metric sampling region of the fixed image - moving_image_mask: - # type=file|default=: mask used to limit metric sampling region of the moving image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -165,26 +155,16 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=2|allowed[2,3,4]: Dimensionality of the fixed/moving image pair - metric: '"MI"' - # type=enum|default='CC'|allowed['CC','Demons','GC','MI','Mattes','MeanSquares']: fixed_image: '"T1.nii"' # type=file|default=: Image to which the moving image is warped - moving_image: '"resting.nii"' - # type=file|default=: Image to apply transformation to (generally a coregistered functional) metric_weight: '1.0' # type=float|default=1.0: The "metricWeight" variable is not used. - radius_or_number_of_bins: '5' - # type=int|default=0: The number of bins in each stage for the MI and Mattes metric, or the radius for other metrics sampling_strategy: '"Regular"' # type=enum|default='None'|allowed['None','Random','Regular']: Manner of choosing point set over which to optimize the metric. Defaults to "None" (i.e. a dense sampling of one sample per voxel). - sampling_percentage: '1.0' - # type=range|default=None: Percentage of points accessible to the sampling strategy over which to optimize the metric. fixed_image_mask: '"mask.nii"' # type=file|default=: mask used to limit metric sampling region of the fixed image - moving_image_mask: '"mask.nii.gz"' - # type=file|default=: mask used to limit metric sampling region of the moving image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/multiply_images.yaml b/example-specs/interface/nipype/ants/multiply_images.yaml index 76dcb835..253fdef3 100644 --- a/example-specs/interface/nipype/ants/multiply_images.yaml +++ b/example-specs/interface/nipype/ants/multiply_images.yaml @@ -6,17 +6,17 @@ # Docs # ---- # -# Examples -# -------- -# >>> from nipype.interfaces.ants import MultiplyImages -# >>> test = MultiplyImages() -# >>> test.inputs.dimension = 3 -# >>> test.inputs.first_input = 'moving2.nii' -# >>> test.inputs.second_input = 0.25 -# >>> test.inputs.output_product_image = "out.nii" -# >>> test.cmdline -# 'MultiplyImages 3 moving2.nii 0.25 out.nii' -# +# Examples +# -------- +# >>> from nipype.interfaces.ants import MultiplyImages +# >>> test = MultiplyImages() +# >>> test.inputs.dimension = 3 +# >>> test.inputs.first_input = 'moving2.nii' +# >>> test.inputs.second_input = 0.25 +# >>> test.inputs.output_product_image = "out.nii" +# >>> test.cmdline +# 'MultiplyImages 3 moving2.nii 0.25 out.nii' +# task_name: MultiplyImages nipype_name: MultiplyImages nipype_module: nipype.interfaces.ants.utils @@ -31,11 +31,8 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - first_input: medimage/nifti1 + first_input: generic/file # type=file|default=: image 1 - output_product_image: Path - # type=file: average image file - # type=file|default=: Outputfname.nii.gz: the name of the resulting image. callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -52,14 +49,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_product_image: medimage/nifti1 + output_product_image: generic/file # type=file: average image file # type=file|default=: Outputfname.nii.gz: the name of the resulting image. callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -82,7 +79,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -101,15 +98,10 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - first_input: - # type=file|default=: image 1 second_input: '0.25' # type=traitcompound|default=None: image 2 or multiplication weight - output_product_image: '"out.nii"' - # type=file: average image file - # type=file|default=: Outputfname.nii.gz: the name of the resulting image. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -124,7 +116,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: MultiplyImages 3 moving2.nii 0.25 out.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -132,15 +124,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) - first_input: '"moving2.nii"' - # type=file|default=: image 1 second_input: '0.25' # type=traitcompound|default=None: image 2 or multiplication weight - output_product_image: '"out.nii"' - # type=file: average image file - # type=file|default=: Outputfname.nii.gz: the name of the resulting image. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/n4_bias_field_correction.yaml b/example-specs/interface/nipype/ants/n4_bias_field_correction.yaml index a1e5dd7c..5472666e 100644 --- a/example-specs/interface/nipype/ants/n4_bias_field_correction.yaml +++ b/example-specs/interface/nipype/ants/n4_bias_field_correction.yaml @@ -6,71 +6,71 @@ # Docs # ---- # -# Bias field correction. +# Bias field correction. # -# N4 is a variant of the popular N3 (nonparameteric nonuniform normalization) -# retrospective bias correction algorithm. Based on the assumption that the -# corruption of the low frequency bias field can be modeled as a convolution of -# the intensity histogram by a Gaussian, the basic algorithmic protocol is to -# iterate between deconvolving the intensity histogram by a Gaussian, remapping -# the intensities, and then spatially smoothing this result by a B-spline modeling -# of the bias field itself. The modifications from and improvements obtained over -# the original N3 algorithm are described in [Tustison2010]_. +# N4 is a variant of the popular N3 (nonparameteric nonuniform normalization) +# retrospective bias correction algorithm. Based on the assumption that the +# corruption of the low frequency bias field can be modeled as a convolution of +# the intensity histogram by a Gaussian, the basic algorithmic protocol is to +# iterate between deconvolving the intensity histogram by a Gaussian, remapping +# the intensities, and then spatially smoothing this result by a B-spline modeling +# of the bias field itself. The modifications from and improvements obtained over +# the original N3 algorithm are described in [Tustison2010]_. # -# .. [Tustison2010] N. Tustison et al., -# N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging, -# 29(6):1310-1320, June 2010. +# .. [Tustison2010] N. Tustison et al., +# N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging, +# 29(6):1310-1320, June 2010. # -# Examples -# -------- +# Examples +# -------- # -# >>> import copy -# >>> from nipype.interfaces.ants import N4BiasFieldCorrection -# >>> n4 = N4BiasFieldCorrection() -# >>> n4.inputs.dimension = 3 -# >>> n4.inputs.input_image = 'structural.nii' -# >>> n4.inputs.bspline_fitting_distance = 300 -# >>> n4.inputs.shrink_factor = 3 -# >>> n4.inputs.n_iterations = [50,50,30,20] -# >>> n4.cmdline -# 'N4BiasFieldCorrection --bspline-fitting [ 300 ] -# -d 3 --input-image structural.nii -# --convergence [ 50x50x30x20 ] --output structural_corrected.nii -# --shrink-factor 3' +# >>> import copy +# >>> from nipype.interfaces.ants import N4BiasFieldCorrection +# >>> n4 = N4BiasFieldCorrection() +# >>> n4.inputs.dimension = 3 +# >>> n4.inputs.input_image = 'structural.nii' +# >>> n4.inputs.bspline_fitting_distance = 300 +# >>> n4.inputs.shrink_factor = 3 +# >>> n4.inputs.n_iterations = [50,50,30,20] +# >>> n4.cmdline +# 'N4BiasFieldCorrection --bspline-fitting [ 300 ] +# -d 3 --input-image structural.nii +# --convergence [ 50x50x30x20 ] --output structural_corrected.nii +# --shrink-factor 3' # -# >>> n4_2 = copy.deepcopy(n4) -# >>> n4_2.inputs.convergence_threshold = 1e-6 -# >>> n4_2.cmdline -# 'N4BiasFieldCorrection --bspline-fitting [ 300 ] -# -d 3 --input-image structural.nii -# --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii -# --shrink-factor 3' +# >>> n4_2 = copy.deepcopy(n4) +# >>> n4_2.inputs.convergence_threshold = 1e-6 +# >>> n4_2.cmdline +# 'N4BiasFieldCorrection --bspline-fitting [ 300 ] +# -d 3 --input-image structural.nii +# --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii +# --shrink-factor 3' # -# >>> n4_3 = copy.deepcopy(n4_2) -# >>> n4_3.inputs.bspline_order = 5 -# >>> n4_3.cmdline -# 'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] -# -d 3 --input-image structural.nii -# --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii -# --shrink-factor 3' +# >>> n4_3 = copy.deepcopy(n4_2) +# >>> n4_3.inputs.bspline_order = 5 +# >>> n4_3.cmdline +# 'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] +# -d 3 --input-image structural.nii +# --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii +# --shrink-factor 3' # -# >>> n4_4 = N4BiasFieldCorrection() -# >>> n4_4.inputs.input_image = 'structural.nii' -# >>> n4_4.inputs.save_bias = True -# >>> n4_4.inputs.dimension = 3 -# >>> n4_4.cmdline -# 'N4BiasFieldCorrection -d 3 --input-image structural.nii -# --output [ structural_corrected.nii, structural_bias.nii ]' +# >>> n4_4 = N4BiasFieldCorrection() +# >>> n4_4.inputs.input_image = 'structural.nii' +# >>> n4_4.inputs.save_bias = True +# >>> n4_4.inputs.dimension = 3 +# >>> n4_4.cmdline +# 'N4BiasFieldCorrection -d 3 --input-image structural.nii +# --output [ structural_corrected.nii, structural_bias.nii ]' +# +# >>> n4_5 = N4BiasFieldCorrection() +# >>> n4_5.inputs.input_image = 'structural.nii' +# >>> n4_5.inputs.dimension = 3 +# >>> n4_5.inputs.histogram_sharpening = (0.12, 0.02, 200) +# >>> n4_5.cmdline +# 'N4BiasFieldCorrection -d 3 --histogram-sharpening [0.12,0.02,200] +# --input-image structural.nii --output structural_corrected.nii' # -# >>> n4_5 = N4BiasFieldCorrection() -# >>> n4_5.inputs.input_image = 'structural.nii' -# >>> n4_5.inputs.dimension = 3 -# >>> n4_5.inputs.histogram_sharpening = (0.12, 0.02, 200) -# >>> n4_5.cmdline -# 'N4BiasFieldCorrection -d 3 --histogram-sharpening [0.12,0.02,200] -# --input-image structural.nii --output structural_corrected.nii' # -# task_name: N4BiasFieldCorrection nipype_name: N4BiasFieldCorrection nipype_module: nipype.interfaces.ants.segmentation @@ -85,9 +85,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - bias_image: Path - # type=file: Estimated bias - # type=file|default=: Filename for the estimated bias. input_image: medimage/nifti1 # type=file|default=: input for bias correction. Negative values or values close to zero should be processed prior to correction mask_image: generic/file @@ -120,7 +117,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -166,7 +163,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -185,16 +182,12 @@ tests: # (if not specified, will try to choose a sensible value) dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3 or 4) - input_image: - # type=file|default=: input for bias correction. Negative values or values close to zero should be processed prior to correction bspline_fitting_distance: '300' # type=float|default=0.0: - shrink_factor: '3' - # type=int|default=0: n_iterations: '[50,50,30,20]' # type=list|default=[]: imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: copy expected_outputs: @@ -215,7 +208,7 @@ tests: convergence_threshold: 1e-6 # type=float|default=0.0: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -235,7 +228,7 @@ tests: bspline_order: '5' # type=int|default=0: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -254,12 +247,10 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: input for bias correction. Negative values or values close to zero should be processed prior to correction - save_bias: 'True' - # type=bool|default=False: True if the estimated bias should be saved to file. dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3 or 4) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -278,12 +269,10 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: input for bias correction. Negative values or values close to zero should be processed prior to correction - dimension: '3' - # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3 or 4) histogram_sharpening: (0.12, 0.02, 200) # type=tuple|default=(0.15, 0.01, 200): Three-values tuple of histogram sharpening parameters (FWHM, wienerNose, numberOfHistogramBins). These options describe the histogram sharpening parameters, i.e. the deconvolution step parameters described in the original N3 algorithm. The default values have been shown to work fairly well. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -306,16 +295,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3 or 4) - input_image: '"structural.nii"' - # type=file|default=: input for bias correction. Negative values or values close to zero should be processed prior to correction bspline_fitting_distance: '300' # type=float|default=0.0: - shrink_factor: '3' - # type=int|default=0: n_iterations: '[50,50,30,20]' # type=list|default=[]: imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -328,7 +313,7 @@ doctests: convergence_threshold: 1e-6 # type=float|default=0.0: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -341,7 +326,7 @@ doctests: bspline_order: '5' # type=int|default=0: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -353,12 +338,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"structural.nii"' # type=file|default=: input for bias correction. Negative values or values close to zero should be processed prior to correction - save_bias: 'True' - # type=bool|default=False: True if the estimated bias should be saved to file. dimension: '3' # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3 or 4) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -370,12 +353,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"structural.nii"' # type=file|default=: input for bias correction. Negative values or values close to zero should be processed prior to correction - dimension: '3' - # type=enum|default=3|allowed[2,3,4]: image dimension (2, 3 or 4) histogram_sharpening: (0.12, 0.02, 200) # type=tuple|default=(0.15, 0.01, 200): Three-values tuple of histogram sharpening parameters (FWHM, wienerNose, numberOfHistogramBins). These options describe the histogram sharpening parameters, i.e. the deconvolution step parameters described in the original N3 algorithm. The default values have been shown to work fairly well. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/registration.yaml b/example-specs/interface/nipype/ants/registration.yaml index e858cf17..533fa17c 100644 --- a/example-specs/interface/nipype/ants/registration.yaml +++ b/example-specs/interface/nipype/ants/registration.yaml @@ -7,258 +7,258 @@ # ---- # ANTs Registration command for registration of images # -# `antsRegistration `_ registers a ``moving_image`` to a ``fixed_image``, -# using a predefined (sequence of) cost function(s) and transformation operations. -# The cost function is defined using one or more 'metrics', specifically -# local cross-correlation (``CC``), Mean Squares (``MeanSquares``), Demons (``Demons``), -# global correlation (``GC``), or Mutual Information (``Mattes`` or ``MI``). +# `antsRegistration `_ registers a ``moving_image`` to a ``fixed_image``, +# using a predefined (sequence of) cost function(s) and transformation operations. +# The cost function is defined using one or more 'metrics', specifically +# local cross-correlation (``CC``), Mean Squares (``MeanSquares``), Demons (``Demons``), +# global correlation (``GC``), or Mutual Information (``Mattes`` or ``MI``). +# +# ANTS can use both linear (``Translation``, ``Rigid``, ``Affine``, ``CompositeAffine``, +# or ``Translation``) and non-linear transformations (``BSpline``, ``GaussianDisplacementField``, +# ``TimeVaryingVelocityField``, ``TimeVaryingBSplineVelocityField``, ``SyN``, ``BSplineSyN``, +# ``Exponential``, or ``BSplineExponential``). Usually, registration is done in multiple +# *stages*. For example first an Affine, then a Rigid, and ultimately a non-linear +# (Syn)-transformation. +# +# antsRegistration can be initialized using one or more transforms from moving_image +# to fixed_image with the ``initial_moving_transform``-input. For example, when you +# already have a warpfield that corrects for geometrical distortions in an EPI (functional) image, +# that you want to apply before an Affine registration to a structural image. +# You could put this transform into 'intial_moving_transform'. +# +# The Registration-interface can output the resulting transform(s) that map moving_image to +# fixed_image in a single file as a ``composite_transform`` (if ``write_composite_transform`` +# is set to ``True``), or a list of transforms as ``forwards_transforms``. It can also output +# inverse transforms (from ``fixed_image`` to ``moving_image``) in a similar fashion using +# ``inverse_composite_transform``. Note that the order of ``forward_transforms`` is in 'natural' +# order: the first element should be applied first, the last element should be applied last. +# +# Note, however, that ANTS tools always apply lists of transformations in reverse order (the last +# transformation in the list is applied first). Therefore, if the output forward_transforms +# is a list, one can not directly feed it into, for example, ``ants.ApplyTransforms``. To +# make ``ants.ApplyTransforms`` apply the transformations in the same order as ``ants.Registration``, +# you have to provide the list of transformations in reverse order from ``forward_transforms``. +# ``reverse_forward_transforms`` outputs ``forward_transforms`` in reverse order and can be used for +# this purpose. Note also that, because ``composite_transform`` is always a single file, this +# output is preferred for most use-cases. +# +# More information can be found in the `ANTS +# manual `_. +# +# See below for some useful examples. +# +# Examples +# -------- +# +# Set up a Registration node with some default settings. This Node registers +# 'fixed1.nii' to 'moving1.nii' by first fitting a linear 'Affine' transformation, and +# then a non-linear 'SyN' transformation, both using the Mutual Information-cost +# metric. +# +# The registration is initialized by first applying the (linear) transform +# trans.mat. +# +# >>> import copy, pprint +# >>> from nipype.interfaces.ants import Registration +# >>> reg = Registration() +# >>> reg.inputs.fixed_image = 'fixed1.nii' +# >>> reg.inputs.moving_image = 'moving1.nii' +# >>> reg.inputs.output_transform_prefix = "output_" +# >>> reg.inputs.initial_moving_transform = 'trans.mat' +# >>> reg.inputs.transforms = ['Affine', 'SyN'] +# >>> reg.inputs.transform_parameters = [(2.0,), (0.25, 3.0, 0.0)] +# >>> reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]] +# >>> reg.inputs.dimension = 3 +# >>> reg.inputs.write_composite_transform = True +# >>> reg.inputs.collapse_output_transforms = False +# >>> reg.inputs.initialize_transforms_per_stage = False +# >>> reg.inputs.metric = ['Mattes']*2 +# >>> reg.inputs.metric_weight = [1]*2 # Default (value ignored currently by ANTs) +# >>> reg.inputs.radius_or_number_of_bins = [32]*2 +# >>> reg.inputs.sampling_strategy = ['Random', None] +# >>> reg.inputs.sampling_percentage = [0.05, None] +# >>> reg.inputs.convergence_threshold = [1.e-8, 1.e-9] +# >>> reg.inputs.convergence_window_size = [20]*2 +# >>> reg.inputs.smoothing_sigmas = [[1,0], [2,1,0]] +# >>> reg.inputs.sigma_units = ['vox'] * 2 +# >>> reg.inputs.shrink_factors = [[2,1], [3,2,1]] +# >>> reg.inputs.use_estimate_learning_rate_once = [True, True] +# >>> reg.inputs.use_histogram_matching = [True, True] # This is the default +# >>> reg.inputs.output_warped_image = 'output_warped_image.nii.gz' +# >>> reg.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# >>> reg.run() # doctest: +SKIP +# +# Same as reg1, but first invert the initial transform ('trans.mat') before applying it. +# +# >>> reg.inputs.invert_initial_moving_transform = True +# >>> reg1 = copy.deepcopy(reg) +# >>> reg1.inputs.winsorize_lower_quantile = 0.025 +# >>> reg1.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1' +# >>> reg1.run() # doctest: +SKIP +# +# Clip extremely high intensity data points using winsorize_upper_quantile. All data points +# higher than the 0.975 quantile are set to the value of the 0.975 quantile. +# +# >>> reg2 = copy.deepcopy(reg) +# >>> reg2.inputs.winsorize_upper_quantile = 0.975 +# >>> reg2.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1' +# +# Clip extremely low intensity data points using winsorize_lower_quantile. All data points +# lower than the 0.025 quantile are set to the original value at the 0.025 quantile. +# +# +# >>> reg3 = copy.deepcopy(reg) +# >>> reg3.inputs.winsorize_lower_quantile = 0.025 +# >>> reg3.inputs.winsorize_upper_quantile = 0.975 +# >>> reg3.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1' +# +# Use float instead of double for computations (saves memory usage) +# +# >>> reg3a = copy.deepcopy(reg) +# >>> reg3a.inputs.float = True +# >>> reg3a.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# Force to use double instead of float for computations (more precision and memory usage). +# +# >>> reg3b = copy.deepcopy(reg) +# >>> reg3b.inputs.float = False +# >>> reg3b.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# 'collapse_output_transforms' can be used to put all transformation in a single 'composite_transform'- +# file. Note that forward_transforms will now be an empty list. +# +# >>> # Test collapse transforms flag +# >>> reg4 = copy.deepcopy(reg) +# >>> reg4.inputs.save_state = 'trans.mat' +# >>> reg4.inputs.restore_state = 'trans.mat' +# >>> reg4.inputs.initialize_transforms_per_stage = True +# >>> reg4.inputs.collapse_output_transforms = True +# >>> outputs = reg4._list_outputs() +# >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, +# {'composite_transform': '...data/output_Composite.h5', +# 'elapsed_time': , +# 'forward_invert_flags': [], +# 'forward_transforms': [], +# 'inverse_composite_transform': '...data/output_InverseComposite.h5', +# 'inverse_warped_image': , +# 'metric_value': , +# 'reverse_forward_invert_flags': [], +# 'reverse_forward_transforms': [], +# 'reverse_invert_flags': [], +# 'reverse_transforms': [], +# 'save_state': '...data/trans.mat', +# 'warped_image': '...data/output_warped_image.nii.gz'} +# >>> reg4.cmdline +# 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# +# >>> # Test collapse transforms flag +# >>> reg4b = copy.deepcopy(reg4) +# >>> reg4b.inputs.write_composite_transform = False +# >>> outputs = reg4b._list_outputs() +# >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, +# {'composite_transform': , +# 'elapsed_time': , +# 'forward_invert_flags': [False, False], +# 'forward_transforms': ['...data/output_0GenericAffine.mat', +# '...data/output_1Warp.nii.gz'], +# 'inverse_composite_transform': , +# 'inverse_warped_image': , +# 'metric_value': , +# 'reverse_forward_invert_flags': [False, False], +# 'reverse_forward_transforms': ['...data/output_1Warp.nii.gz', +# '...data/output_0GenericAffine.mat'], +# 'reverse_invert_flags': [True, False], +# 'reverse_transforms': ['...data/output_0GenericAffine.mat', '...data/output_1InverseWarp.nii.gz'], +# 'save_state': '...data/trans.mat', +# 'warped_image': '...data/output_warped_image.nii.gz'} +# >>> reg4b.aggregate_outputs() # doctest: +SKIP +# >>> reg4b.cmdline +# 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 0' +# +# One can use multiple similarity metrics in a single registration stage.The Node below first +# performs a linear registration using only the Mutual Information ('Mattes')-metric. +# In a second stage, it performs a non-linear registration ('Syn') using both a +# Mutual Information and a local cross-correlation ('CC')-metric. Both metrics are weighted +# equally ('metric_weight' is .5 for both). The Mutual Information- metric uses 32 bins. +# The local cross-correlations (correlations between every voxel's neighborhoods) is computed +# with a radius of 4. +# +# >>> # Test multiple metrics per stage +# >>> reg5 = copy.deepcopy(reg) +# >>> reg5.inputs.fixed_image = 'fixed1.nii' +# >>> reg5.inputs.moving_image = 'moving1.nii' +# >>> reg5.inputs.metric = ['Mattes', ['Mattes', 'CC']] +# >>> reg5.inputs.metric_weight = [1, [.5,.5]] +# >>> reg5.inputs.radius_or_number_of_bins = [32, [32, 4] ] +# >>> reg5.inputs.sampling_strategy = ['Random', None] # use default strategy in second stage +# >>> reg5.inputs.sampling_percentage = [0.05, [0.05, 0.10]] +# >>> reg5.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed1.nii, moving1.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# ANTS Registration can also use multiple modalities to perform the registration. Here it is assumed +# that fixed1.nii and fixed2.nii are in the same space, and so are moving1.nii and +# moving2.nii. First, a linear registration is performed matching fixed1.nii to moving1.nii, +# then a non-linear registration is performed to match fixed2.nii to moving2.nii, starting from +# the transformation of the first step. +# +# >>> # Test multiple inputS +# >>> reg6 = copy.deepcopy(reg5) +# >>> reg6.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii'] +# >>> reg6.inputs.moving_image = ['moving1.nii', 'moving2.nii'] +# >>> reg6.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed2.nii, moving2.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# Different methods can be used for the interpolation when applying transformations. +# +# >>> # Test Interpolation Parameters (BSpline) +# >>> reg7a = copy.deepcopy(reg) +# >>> reg7a.inputs.interpolation = 'BSpline' +# >>> reg7a.inputs.interpolation_parameters = (3,) +# >>> reg7a.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# >>> # Test Interpolation Parameters (MultiLabel/Gaussian) +# >>> reg7b = copy.deepcopy(reg) +# >>> reg7b.inputs.interpolation = 'Gaussian' +# >>> reg7b.inputs.interpolation_parameters = (1.0, 1.0) +# >>> reg7b.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# BSplineSyN non-linear registration with custom parameters. +# +# >>> # Test Extended Transform Parameters +# >>> reg8 = copy.deepcopy(reg) +# >>> reg8.inputs.transforms = ['Affine', 'BSplineSyN'] +# >>> reg8.inputs.transform_parameters = [(2.0,), (0.25, 26, 0, 3)] +# >>> reg8.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform BSplineSyN[ 0.25, 26, 0, 3 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# Mask the fixed image in the second stage of the registration (but not the first). +# +# >>> # Test masking +# >>> reg9 = copy.deepcopy(reg) +# >>> reg9.inputs.fixed_image_masks = ['NULL', 'fixed1.nii'] +# >>> reg9.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --masks [ NULL, NULL ] --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --masks [ fixed1.nii, NULL ] --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# Here we use both a warpfield and a linear transformation, before registration commences. Note that +# the first transformation that needs to be applied ('ants_Warp.nii.gz') is last in the list of +# 'initial_moving_transform'. +# +# >>> # Test initialization with multiple transforms matrices (e.g., unwarp and affine transform) +# >>> reg10 = copy.deepcopy(reg) +# >>> reg10.inputs.initial_moving_transform = ['func_to_struct.mat', 'ants_Warp.nii.gz'] +# >>> reg10.inputs.invert_initial_moving_transform = [False, False] +# >>> reg10.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ func_to_struct.mat, 0 ] [ ants_Warp.nii.gz, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' # -# ANTS can use both linear (``Translation``, ``Rigid``, ``Affine``, ``CompositeAffine``, -# or ``Translation``) and non-linear transformations (``BSpline``, ``GaussianDisplacementField``, -# ``TimeVaryingVelocityField``, ``TimeVaryingBSplineVelocityField``, ``SyN``, ``BSplineSyN``, -# ``Exponential``, or ``BSplineExponential``). Usually, registration is done in multiple -# *stages*. For example first an Affine, then a Rigid, and ultimately a non-linear -# (Syn)-transformation. -# -# antsRegistration can be initialized using one or more transforms from moving_image -# to fixed_image with the ``initial_moving_transform``-input. For example, when you -# already have a warpfield that corrects for geometrical distortions in an EPI (functional) image, -# that you want to apply before an Affine registration to a structural image. -# You could put this transform into 'intial_moving_transform'. -# -# The Registration-interface can output the resulting transform(s) that map moving_image to -# fixed_image in a single file as a ``composite_transform`` (if ``write_composite_transform`` -# is set to ``True``), or a list of transforms as ``forwards_transforms``. It can also output -# inverse transforms (from ``fixed_image`` to ``moving_image``) in a similar fashion using -# ``inverse_composite_transform``. Note that the order of ``forward_transforms`` is in 'natural' -# order: the first element should be applied first, the last element should be applied last. -# -# Note, however, that ANTS tools always apply lists of transformations in reverse order (the last -# transformation in the list is applied first). Therefore, if the output forward_transforms -# is a list, one can not directly feed it into, for example, ``ants.ApplyTransforms``. To -# make ``ants.ApplyTransforms`` apply the transformations in the same order as ``ants.Registration``, -# you have to provide the list of transformations in reverse order from ``forward_transforms``. -# ``reverse_forward_transforms`` outputs ``forward_transforms`` in reverse order and can be used for -# this purpose. Note also that, because ``composite_transform`` is always a single file, this -# output is preferred for most use-cases. -# -# More information can be found in the `ANTS -# manual `_. -# -# See below for some useful examples. -# -# Examples -# -------- -# -# Set up a Registration node with some default settings. This Node registers -# 'fixed1.nii' to 'moving1.nii' by first fitting a linear 'Affine' transformation, and -# then a non-linear 'SyN' transformation, both using the Mutual Information-cost -# metric. -# -# The registration is initialized by first applying the (linear) transform -# trans.mat. -# -# >>> import copy, pprint -# >>> from nipype.interfaces.ants import Registration -# >>> reg = Registration() -# >>> reg.inputs.fixed_image = 'fixed1.nii' -# >>> reg.inputs.moving_image = 'moving1.nii' -# >>> reg.inputs.output_transform_prefix = "output_" -# >>> reg.inputs.initial_moving_transform = 'trans.mat' -# >>> reg.inputs.transforms = ['Affine', 'SyN'] -# >>> reg.inputs.transform_parameters = [(2.0,), (0.25, 3.0, 0.0)] -# >>> reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]] -# >>> reg.inputs.dimension = 3 -# >>> reg.inputs.write_composite_transform = True -# >>> reg.inputs.collapse_output_transforms = False -# >>> reg.inputs.initialize_transforms_per_stage = False -# >>> reg.inputs.metric = ['Mattes']*2 -# >>> reg.inputs.metric_weight = [1]*2 # Default (value ignored currently by ANTs) -# >>> reg.inputs.radius_or_number_of_bins = [32]*2 -# >>> reg.inputs.sampling_strategy = ['Random', None] -# >>> reg.inputs.sampling_percentage = [0.05, None] -# >>> reg.inputs.convergence_threshold = [1.e-8, 1.e-9] -# >>> reg.inputs.convergence_window_size = [20]*2 -# >>> reg.inputs.smoothing_sigmas = [[1,0], [2,1,0]] -# >>> reg.inputs.sigma_units = ['vox'] * 2 -# >>> reg.inputs.shrink_factors = [[2,1], [3,2,1]] -# >>> reg.inputs.use_estimate_learning_rate_once = [True, True] -# >>> reg.inputs.use_histogram_matching = [True, True] # This is the default -# >>> reg.inputs.output_warped_image = 'output_warped_image.nii.gz' -# >>> reg.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# >>> reg.run() # doctest: +SKIP -# -# Same as reg1, but first invert the initial transform ('trans.mat') before applying it. -# -# >>> reg.inputs.invert_initial_moving_transform = True -# >>> reg1 = copy.deepcopy(reg) -# >>> reg1.inputs.winsorize_lower_quantile = 0.025 -# >>> reg1.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1' -# >>> reg1.run() # doctest: +SKIP -# -# Clip extremely high intensity data points using winsorize_upper_quantile. All data points -# higher than the 0.975 quantile are set to the value of the 0.975 quantile. -# -# >>> reg2 = copy.deepcopy(reg) -# >>> reg2.inputs.winsorize_upper_quantile = 0.975 -# >>> reg2.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1' -# -# Clip extremely low intensity data points using winsorize_lower_quantile. All data points -# lower than the 0.025 quantile are set to the original value at the 0.025 quantile. -# -# -# >>> reg3 = copy.deepcopy(reg) -# >>> reg3.inputs.winsorize_lower_quantile = 0.025 -# >>> reg3.inputs.winsorize_upper_quantile = 0.975 -# >>> reg3.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1' -# -# Use float instead of double for computations (saves memory usage) -# -# >>> reg3a = copy.deepcopy(reg) -# >>> reg3a.inputs.float = True -# >>> reg3a.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# -# Force to use double instead of float for computations (more precision and memory usage). -# -# >>> reg3b = copy.deepcopy(reg) -# >>> reg3b.inputs.float = False -# >>> reg3b.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# -# 'collapse_output_transforms' can be used to put all transformation in a single 'composite_transform'- -# file. Note that forward_transforms will now be an empty list. -# -# >>> # Test collapse transforms flag -# >>> reg4 = copy.deepcopy(reg) -# >>> reg4.inputs.save_state = 'trans.mat' -# >>> reg4.inputs.restore_state = 'trans.mat' -# >>> reg4.inputs.initialize_transforms_per_stage = True -# >>> reg4.inputs.collapse_output_transforms = True -# >>> outputs = reg4._list_outputs() -# >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, -# {'composite_transform': '...data/output_Composite.h5', -# 'elapsed_time': , -# 'forward_invert_flags': [], -# 'forward_transforms': [], -# 'inverse_composite_transform': '...data/output_InverseComposite.h5', -# 'inverse_warped_image': , -# 'metric_value': , -# 'reverse_forward_invert_flags': [], -# 'reverse_forward_transforms': [], -# 'reverse_invert_flags': [], -# 'reverse_transforms': [], -# 'save_state': '...data/trans.mat', -# 'warped_image': '...data/output_warped_image.nii.gz'} -# >>> reg4.cmdline -# 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# -# -# >>> # Test collapse transforms flag -# >>> reg4b = copy.deepcopy(reg4) -# >>> reg4b.inputs.write_composite_transform = False -# >>> outputs = reg4b._list_outputs() -# >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, -# {'composite_transform': , -# 'elapsed_time': , -# 'forward_invert_flags': [False, False], -# 'forward_transforms': ['...data/output_0GenericAffine.mat', -# '...data/output_1Warp.nii.gz'], -# 'inverse_composite_transform': , -# 'inverse_warped_image': , -# 'metric_value': , -# 'reverse_forward_invert_flags': [False, False], -# 'reverse_forward_transforms': ['...data/output_1Warp.nii.gz', -# '...data/output_0GenericAffine.mat'], -# 'reverse_invert_flags': [True, False], -# 'reverse_transforms': ['...data/output_0GenericAffine.mat', '...data/output_1InverseWarp.nii.gz'], -# 'save_state': '...data/trans.mat', -# 'warped_image': '...data/output_warped_image.nii.gz'} -# >>> reg4b.aggregate_outputs() # doctest: +SKIP -# >>> reg4b.cmdline -# 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 0' -# -# One can use multiple similarity metrics in a single registration stage.The Node below first -# performs a linear registation using only the Mutual Information ('Mattes')-metric. -# In a second stage, it performs a non-linear registration ('Syn') using both a -# Mutual Information and a local cross-correlation ('CC')-metric. Both metrics are weighted -# equally ('metric_weight' is .5 for both). The Mutual Information- metric uses 32 bins. -# The local cross-correlations (correlations between every voxel's neighborhoods) is computed -# with a radius of 4. -# -# >>> # Test multiple metrics per stage -# >>> reg5 = copy.deepcopy(reg) -# >>> reg5.inputs.fixed_image = 'fixed1.nii' -# >>> reg5.inputs.moving_image = 'moving1.nii' -# >>> reg5.inputs.metric = ['Mattes', ['Mattes', 'CC']] -# >>> reg5.inputs.metric_weight = [1, [.5,.5]] -# >>> reg5.inputs.radius_or_number_of_bins = [32, [32, 4] ] -# >>> reg5.inputs.sampling_strategy = ['Random', None] # use default strategy in second stage -# >>> reg5.inputs.sampling_percentage = [0.05, [0.05, 0.10]] -# >>> reg5.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed1.nii, moving1.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# -# ANTS Registration can also use multiple modalities to perform the registration. Here it is assumed -# that fixed1.nii and fixed2.nii are in the same space, and so are moving1.nii and -# moving2.nii. First, a linear registration is performed matching fixed1.nii to moving1.nii, -# then a non-linear registration is performed to match fixed2.nii to moving2.nii, starting from -# the transformation of the first step. -# -# >>> # Test multiple inputS -# >>> reg6 = copy.deepcopy(reg5) -# >>> reg6.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii'] -# >>> reg6.inputs.moving_image = ['moving1.nii', 'moving2.nii'] -# >>> reg6.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed2.nii, moving2.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# -# Different methods can be used for the interpolation when applying transformations. -# -# >>> # Test Interpolation Parameters (BSpline) -# >>> reg7a = copy.deepcopy(reg) -# >>> reg7a.inputs.interpolation = 'BSpline' -# >>> reg7a.inputs.interpolation_parameters = (3,) -# >>> reg7a.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# -# >>> # Test Interpolation Parameters (MultiLabel/Gaussian) -# >>> reg7b = copy.deepcopy(reg) -# >>> reg7b.inputs.interpolation = 'Gaussian' -# >>> reg7b.inputs.interpolation_parameters = (1.0, 1.0) -# >>> reg7b.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# -# BSplineSyN non-linear registration with custom parameters. -# -# >>> # Test Extended Transform Parameters -# >>> reg8 = copy.deepcopy(reg) -# >>> reg8.inputs.transforms = ['Affine', 'BSplineSyN'] -# >>> reg8.inputs.transform_parameters = [(2.0,), (0.25, 26, 0, 3)] -# >>> reg8.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform BSplineSyN[ 0.25, 26, 0, 3 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# -# Mask the fixed image in the second stage of the registration (but not the first). -# -# >>> # Test masking -# >>> reg9 = copy.deepcopy(reg) -# >>> reg9.inputs.fixed_image_masks = ['NULL', 'fixed1.nii'] -# >>> reg9.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ NULL, NULL ] --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ fixed1.nii, NULL ] --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# -# Here we use both a warpfield and a linear transformation, before registration commences. Note that -# the first transformation that needs to be applied ('ants_Warp.nii.gz') is last in the list of -# 'initial_moving_transform'. -# -# >>> # Test initialization with multiple transforms matrices (e.g., unwarp and affine transform) -# >>> reg10 = copy.deepcopy(reg) -# >>> reg10.inputs.initial_moving_transform = ['func_to_struct.mat', 'ants_Warp.nii.gz'] -# >>> reg10.inputs.invert_initial_moving_transform = [False, False] -# >>> reg10.cmdline -# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ func_to_struct.mat, 0 ] [ ants_Warp.nii.gz, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' -# task_name: Registration nipype_name: Registration nipype_module: nipype.interfaces.ants.registration @@ -279,15 +279,12 @@ inputs: # type=file|default=: Mask used to limit metric sampling region of the fixed imagein all stages initial_moving_transform: datascience/text-matrix+list-of # type=inputmultiobject|default=[]: A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order. - moving_image: medimage/nifti1+list-of + moving_image: generic/file+list-of # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to moving_image_mask: generic/file # type=file|default=: mask used to limit metric sampling region of the moving imagein all stages - restore_state: datascience/text-matrix + restore_state: generic/file # type=file|default=: Filename for restoring the internal restorable state of the registration - save_state: Path - # type=file: The saved registration state to be restored - # type=file|default=: Filename for saving the internal restorable state of the registration callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -329,7 +326,7 @@ outputs: metric_value: metric_value_callable # type=float: the final value of metric templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -446,7 +443,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -465,54 +462,30 @@ tests: # (if not specified, will try to choose a sensible value) fixed_image: # type=inputmultiobject|default=[]: Image to which the moving_image should be transformed(usually a structural image) - moving_image: - # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to output_transform_prefix: '"output_"' # type=str|default='transform': - initial_moving_transform: - # type=inputmultiobject|default=[]: A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order. transforms: '["Affine", "SyN"]' # type=list|default=[]: - transform_parameters: '[(2.0,), (0.25, 3.0, 0.0)]' - # type=list|default=[]: number_of_iterations: '[[1500, 200], [100, 50, 30]]' # type=list|default=[]: - dimension: '3' - # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) write_composite_transform: 'True' # type=bool|default=False: - collapse_output_transforms: 'False' - # type=bool|default=True: Collapse output transforms. Specifically, enabling this option combines all adjacent linear transforms and composes all adjacent displacement field transforms before writing the results to disk. initialize_transforms_per_stage: 'False' # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). - metric: '["Mattes"]*2' - # type=list|default=[]: the metric(s) to use for each stage. Note that multiple metrics per stage are not supported in ANTS 1.9.1 and earlier. metric_weight: '[1]*2 # Default (value ignored currently by ANTs)' # type=list|default=[1.0]: the metric weight(s) for each stage. The weights must sum to 1 per stage. - radius_or_number_of_bins: '[32]*2' - # type=list|default=[5]: the number of bins in each stage for the MI and Mattes metric, the radius for other metrics sampling_strategy: '["Random", None]' # type=list|default=[]: the metric sampling strategy (strategies) for each stage - sampling_percentage: '[0.05, None]' - # type=list|default=[]: the metric sampling percentage(s) to use for each stage convergence_threshold: '[1.e-8, 1.e-9]' # type=list|default=[1e-06]: - convergence_window_size: '[20]*2' - # type=list|default=[10]: smoothing_sigmas: '[[1,0], [2,1,0]]' # type=list|default=[]: - sigma_units: '["vox"] * 2' - # type=list|default=[]: units for smoothing sigmas shrink_factors: '[[2,1], [3,2,1]]' # type=list|default=[]: - use_estimate_learning_rate_once: '[True, True]' - # type=list|default=[]: use_histogram_matching: '[True, True] # This is the default' # type=traitcompound|default=True: Histogram match the images before registration. - output_warped_image: '"output_warped_image.nii.gz"' - # type=traitcompound|default=None: imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: copy - module: pprint @@ -536,7 +509,7 @@ tests: winsorize_lower_quantile: '0.025' # type=range|default=0.0: The Lower quantile to clip image ranges imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -556,7 +529,7 @@ tests: winsorize_upper_quantile: '0.975' # type=range|default=1.0: The Upper quantile to clip image ranges imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -575,10 +548,8 @@ tests: # (if not specified, will try to choose a sensible value) winsorize_lower_quantile: '0.025' # type=range|default=0.0: The Lower quantile to clip image ranges - winsorize_upper_quantile: '0.975' - # type=range|default=1.0: The Upper quantile to clip image ranges imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -598,7 +569,7 @@ tests: float: 'True' # type=bool|default=False: Use float instead of double for computations. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -618,7 +589,7 @@ tests: float: 'False' # type=bool|default=False: Use float instead of double for computations. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -638,14 +609,10 @@ tests: save_state: '"trans.mat"' # type=file: The saved registration state to be restored # type=file|default=: Filename for saving the internal restorable state of the registration - restore_state: - # type=file|default=: Filename for restoring the internal restorable state of the registration initialize_transforms_per_stage: 'True' # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). - collapse_output_transforms: 'True' - # type=bool|default=True: Collapse output transforms. Specifically, enabling this option combines all adjacent linear transforms and composes all adjacent displacement field transforms before writing the results to disk. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -665,7 +632,7 @@ tests: write_composite_transform: 'False' # type=bool|default=False: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -684,20 +651,14 @@ tests: # (if not specified, will try to choose a sensible value) fixed_image: # type=inputmultiobject|default=[]: Image to which the moving_image should be transformed(usually a structural image) - moving_image: - # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to metric: '["Mattes", ["Mattes", "CC"]]' # type=list|default=[]: the metric(s) to use for each stage. Note that multiple metrics per stage are not supported in ANTS 1.9.1 and earlier. - metric_weight: '[1, [.5,.5]]' - # type=list|default=[1.0]: the metric weight(s) for each stage. The weights must sum to 1 per stage. radius_or_number_of_bins: '[32, [32, 4] ]' # type=list|default=[5]: the number of bins in each stage for the MI and Mattes metric, the radius for other metrics - sampling_strategy: '["Random", None] # use default strategy in second stage' - # type=list|default=[]: the metric sampling strategy (strategies) for each stage sampling_percentage: '[0.05, [0.05, 0.10]]' # type=list|default=[]: the metric sampling percentage(s) to use for each stage imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -716,10 +677,8 @@ tests: # (if not specified, will try to choose a sensible value) fixed_image: # type=inputmultiobject|default=[]: Image to which the moving_image should be transformed(usually a structural image) - moving_image: - # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -738,10 +697,8 @@ tests: # (if not specified, will try to choose a sensible value) interpolation: '"BSpline"' # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: - interpolation_parameters: (3,) - # type=traitcompound|default=None: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -760,10 +717,8 @@ tests: # (if not specified, will try to choose a sensible value) interpolation: '"Gaussian"' # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: - interpolation_parameters: (1.0, 1.0) - # type=traitcompound|default=None: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -782,10 +737,8 @@ tests: # (if not specified, will try to choose a sensible value) transforms: '["Affine", "BSplineSyN"]' # type=list|default=[]: - transform_parameters: '[(2.0,), (0.25, 26, 0, 3)]' - # type=list|default=[]: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -805,7 +758,7 @@ tests: fixed_image_masks: '["NULL", "fixed1.nii"]' # type=inputmultiobject|default=[]: Masks used to limit metric sampling region of the fixed image, defined per registration stage(Use "NULL" to omit a mask at a given stage) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -824,10 +777,8 @@ tests: # (if not specified, will try to choose a sensible value) initial_moving_transform: # type=inputmultiobject|default=[]: A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order. - invert_initial_moving_transform: '[False, False]' - # type=inputmultiobject|default=[]: One boolean or a list of booleans that indicatewhether the inverse(s) of the transform(s) definedin initial_moving_transform should be used. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -842,7 +793,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -850,58 +801,34 @@ doctests: # '.mock()' method of the corresponding class is used instead. fixed_image: '"fixed1.nii"' # type=inputmultiobject|default=[]: Image to which the moving_image should be transformed(usually a structural image) - moving_image: '"moving1.nii"' - # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to output_transform_prefix: '"output_"' # type=str|default='transform': - initial_moving_transform: '"trans.mat"' - # type=inputmultiobject|default=[]: A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order. transforms: '["Affine", "SyN"]' # type=list|default=[]: - transform_parameters: '[(2.0,), (0.25, 3.0, 0.0)]' - # type=list|default=[]: number_of_iterations: '[[1500, 200], [100, 50, 30]]' # type=list|default=[]: - dimension: '3' - # type=enum|default=3|allowed[2,3]: image dimension (2 or 3) write_composite_transform: 'True' # type=bool|default=False: - collapse_output_transforms: 'False' - # type=bool|default=True: Collapse output transforms. Specifically, enabling this option combines all adjacent linear transforms and composes all adjacent displacement field transforms before writing the results to disk. initialize_transforms_per_stage: 'False' # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). - metric: '["Mattes"]*2' - # type=list|default=[]: the metric(s) to use for each stage. Note that multiple metrics per stage are not supported in ANTS 1.9.1 and earlier. metric_weight: '[1]*2 # Default (value ignored currently by ANTs)' # type=list|default=[1.0]: the metric weight(s) for each stage. The weights must sum to 1 per stage. - radius_or_number_of_bins: '[32]*2' - # type=list|default=[5]: the number of bins in each stage for the MI and Mattes metric, the radius for other metrics sampling_strategy: '["Random", None]' # type=list|default=[]: the metric sampling strategy (strategies) for each stage - sampling_percentage: '[0.05, None]' - # type=list|default=[]: the metric sampling percentage(s) to use for each stage convergence_threshold: '[1.e-8, 1.e-9]' # type=list|default=[1e-06]: - convergence_window_size: '[20]*2' - # type=list|default=[10]: smoothing_sigmas: '[[1,0], [2,1,0]]' # type=list|default=[]: - sigma_units: '["vox"] * 2' - # type=list|default=[]: units for smoothing sigmas shrink_factors: '[[2,1], [3,2,1]]' # type=list|default=[]: - use_estimate_learning_rate_once: '[True, True]' - # type=list|default=[]: use_histogram_matching: '[True, True] # This is the default' # type=traitcompound|default=True: Histogram match the images before registration. - output_warped_image: '"output_warped_image.nii.gz"' - # type=traitcompound|default=None: imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -912,11 +839,11 @@ doctests: winsorize_lower_quantile: '0.025' # type=range|default=0.0: The Lower quantile to clip image ranges imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -925,11 +852,11 @@ doctests: winsorize_upper_quantile: '0.975' # type=range|default=1.0: The Upper quantile to clip image ranges imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -937,14 +864,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. winsorize_lower_quantile: '0.025' # type=range|default=0.0: The Lower quantile to clip image ranges - winsorize_upper_quantile: '0.975' - # type=range|default=1.0: The Upper quantile to clip image ranges imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -953,11 +878,11 @@ doctests: float: 'True' # type=bool|default=False: Use float instead of double for computations. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -966,11 +891,11 @@ doctests: float: 'False' # type=bool|default=False: Use float instead of double for computations. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -979,18 +904,14 @@ doctests: save_state: '"trans.mat"' # type=file: The saved registration state to be restored # type=file|default=: Filename for saving the internal restorable state of the registration - restore_state: '"trans.mat"' - # type=file|default=: Filename for restoring the internal restorable state of the registration initialize_transforms_per_stage: 'True' # type=bool|default=False: Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). - collapse_output_transforms: 'True' - # type=bool|default=True: Collapse output transforms. Specifically, enabling this option combines all adjacent linear transforms and composes all adjacent displacement field transforms before writing the results to disk. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 0 +- cmdline: antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 0 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -999,11 +920,11 @@ doctests: write_composite_transform: 'False' # type=bool|default=False: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed1.nii, moving1.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed1.nii, moving1.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -1011,24 +932,18 @@ doctests: # '.mock()' method of the corresponding class is used instead. fixed_image: '"fixed1.nii"' # type=inputmultiobject|default=[]: Image to which the moving_image should be transformed(usually a structural image) - moving_image: '"moving1.nii"' - # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to metric: '["Mattes", ["Mattes", "CC"]]' # type=list|default=[]: the metric(s) to use for each stage. Note that multiple metrics per stage are not supported in ANTS 1.9.1 and earlier. - metric_weight: '[1, [.5,.5]]' - # type=list|default=[1.0]: the metric weight(s) for each stage. The weights must sum to 1 per stage. radius_or_number_of_bins: '[32, [32, 4] ]' # type=list|default=[5]: the number of bins in each stage for the MI and Mattes metric, the radius for other metrics - sampling_strategy: '["Random", None] # use default strategy in second stage' - # type=list|default=[]: the metric sampling strategy (strategies) for each stage sampling_percentage: '[0.05, [0.05, 0.10]]' # type=list|default=[]: the metric sampling percentage(s) to use for each stage imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed2.nii, moving2.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed2.nii, moving2.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -1036,14 +951,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. fixed_image: '["fixed1.nii", "fixed2.nii"]' # type=inputmultiobject|default=[]: Image to which the moving_image should be transformed(usually a structural image) - moving_image: '["moving1.nii", "moving2.nii"]' - # type=inputmultiobject|default=[]: Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -1051,14 +964,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. interpolation: '"BSpline"' # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: - interpolation_parameters: (3,) - # type=traitcompound|default=None: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -1066,14 +977,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. interpolation: '"Gaussian"' # type=enum|default='Linear'|allowed['BSpline','CosineWindowedSinc','Gaussian','GenericLabel','HammingWindowedSinc','LanczosWindowedSinc','Linear','MultiLabel','NearestNeighbor','WelchWindowedSinc']: - interpolation_parameters: (1.0, 1.0) - # type=traitcompound|default=None: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform BSplineSyN[ 0.25, 26, 0, 3 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform BSplineSyN[ 0.25, 26, 0, 3 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -1081,14 +990,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. transforms: '["Affine", "BSplineSyN"]' # type=list|default=[]: - transform_parameters: '[(2.0,), (0.25, 26, 0, 3)]' - # type=list|default=[]: imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ NULL, NULL ] --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ fixed1.nii, NULL ] --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --masks [ NULL, NULL ] --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --masks [ fixed1.nii, NULL ] --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -1097,11 +1004,11 @@ doctests: fixed_image_masks: '["NULL", "fixed1.nii"]' # type=inputmultiobject|default=[]: Masks used to limit metric sampling region of the fixed image, defined per registration stage(Use "NULL" to omit a mask at a given stage) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ func_to_struct.mat, 0 ] [ ants_Warp.nii.gz, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ func_to_struct.mat, 0 ] [ ants_Warp.nii.gz, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -1109,10 +1016,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. initial_moving_transform: '["func_to_struct.mat", "ants_Warp.nii.gz"]' # type=inputmultiobject|default=[]: A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order. - invert_initial_moving_transform: '[False, False]' - # type=inputmultiobject|default=[]: One boolean or a list of booleans that indicatewhether the inverse(s) of the transform(s) definedin initial_moving_transform should be used. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/registration_syn_quick.yaml b/example-specs/interface/nipype/ants/registration_syn_quick.yaml index 5ba7dfd3..83d8e4d8 100644 --- a/example-specs/interface/nipype/ants/registration_syn_quick.yaml +++ b/example-specs/interface/nipype/ants/registration_syn_quick.yaml @@ -6,33 +6,33 @@ # Docs # ---- # -# Registration using a symmetric image normalization method (SyN). -# You can read more in Avants et al.; Med Image Anal., 2008 -# (https://www.ncbi.nlm.nih.gov/pubmed/17659998). +# Registration using a symmetric image normalization method (SyN). +# You can read more in Avants et al.; Med Image Anal., 2008 +# (https://www.ncbi.nlm.nih.gov/pubmed/17659998). # -# Examples -# -------- +# Examples +# -------- # -# >>> from nipype.interfaces.ants import RegistrationSynQuick -# >>> reg = RegistrationSynQuick() -# >>> reg.inputs.fixed_image = 'fixed1.nii' -# >>> reg.inputs.moving_image = 'moving1.nii' -# >>> reg.inputs.num_threads = 2 -# >>> reg.cmdline -# 'antsRegistrationSyNQuick.sh -d 3 -f fixed1.nii -r 32 -m moving1.nii -n 2 -o transform -p d -s 26 -t s' -# >>> reg.run() # doctest: +SKIP +# >>> from nipype.interfaces.ants import RegistrationSynQuick +# >>> reg = RegistrationSynQuick() +# >>> reg.inputs.fixed_image = 'fixed1.nii' +# >>> reg.inputs.moving_image = 'moving1.nii' +# >>> reg.inputs.num_threads = 2 +# >>> reg.cmdline +# 'antsRegistrationSyNQuick.sh -d 3 -f fixed1.nii -r 32 -m moving1.nii -n 2 -o transform -p d -s 26 -t s' +# >>> reg.run() # doctest: +SKIP # -# example for multiple images +# example for multiple images +# +# >>> from nipype.interfaces.ants import RegistrationSynQuick +# >>> reg = RegistrationSynQuick() +# >>> reg.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii'] +# >>> reg.inputs.moving_image = ['moving1.nii', 'moving2.nii'] +# >>> reg.inputs.num_threads = 2 +# >>> reg.cmdline +# 'antsRegistrationSyNQuick.sh -d 3 -f fixed1.nii -f fixed2.nii -r 32 -m moving1.nii -m moving2.nii -n 2 -o transform -p d -s 26 -t s' +# >>> reg.run() # doctest: +SKIP # -# >>> from nipype.interfaces.ants import RegistrationSynQuick -# >>> reg = RegistrationSynQuick() -# >>> reg.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii'] -# >>> reg.inputs.moving_image = ['moving1.nii', 'moving2.nii'] -# >>> reg.inputs.num_threads = 2 -# >>> reg.cmdline -# 'antsRegistrationSyNQuick.sh -d 3 -f fixed1.nii -f fixed2.nii -r 32 -m moving1.nii -m moving2.nii -n 2 -o transform -p d -s 26 -t s' -# >>> reg.run() # doctest: +SKIP -# task_name: RegistrationSynQuick nipype_name: RegistrationSynQuick nipype_module: nipype.interfaces.ants.registration @@ -49,7 +49,7 @@ inputs: # passed to the field in the automatically generated unittests. fixed_image: medimage/nifti1+list-of # type=inputmultiobject|default=[]: Fixed image or source image or reference image - moving_image: medimage/nifti1+list-of + moving_image: generic/file+list-of # type=inputmultiobject|default=[]: Moving image or target image callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -81,7 +81,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -115,7 +115,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -134,12 +134,10 @@ tests: # (if not specified, will try to choose a sensible value) fixed_image: # type=inputmultiobject|default=[]: Fixed image or source image or reference image - moving_image: - # type=inputmultiobject|default=[]: Moving image or target image num_threads: '2' # type=int|default=1: Number of threads (default = 1) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -158,12 +156,10 @@ tests: # (if not specified, will try to choose a sensible value) fixed_image: # type=inputmultiobject|default=[]: Fixed image or source image or reference image - moving_image: - # type=inputmultiobject|default=[]: Moving image or target image num_threads: '2' # type=int|default=1: Number of threads (default = 1) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -186,12 +182,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. fixed_image: '"fixed1.nii"' # type=inputmultiobject|default=[]: Fixed image or source image or reference image - moving_image: '"moving1.nii"' - # type=inputmultiobject|default=[]: Moving image or target image num_threads: '2' # type=int|default=1: Number of threads (default = 1) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -203,12 +197,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. fixed_image: '["fixed1.nii", "fixed2.nii"]' # type=inputmultiobject|default=[]: Fixed image or source image or reference image - moving_image: '["moving1.nii", "moving2.nii"]' - # type=inputmultiobject|default=[]: Moving image or target image num_threads: '2' # type=int|default=1: Number of threads (default = 1) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/resample_image_by_spacing.yaml b/example-specs/interface/nipype/ants/resample_image_by_spacing.yaml index 88212944..499a76d8 100644 --- a/example-specs/interface/nipype/ants/resample_image_by_spacing.yaml +++ b/example-specs/interface/nipype/ants/resample_image_by_spacing.yaml @@ -6,36 +6,36 @@ # Docs # ---- # -# Resample an image with a given spacing. +# Resample an image with a given spacing. # -# Examples -# -------- -# >>> res = ResampleImageBySpacing(dimension=3) -# >>> res.inputs.input_image = 'structural.nii' -# >>> res.inputs.output_image = 'output.nii.gz' -# >>> res.inputs.out_spacing = (4, 4, 4) -# >>> res.cmdline #doctest: +ELLIPSIS -# 'ResampleImageBySpacing 3 structural.nii output.nii.gz 4 4 4' +# Examples +# -------- +# >>> res = ResampleImageBySpacing(dimension=3) +# >>> res.inputs.input_image = 'structural.nii' +# >>> res.inputs.output_image = 'output.nii.gz' +# >>> res.inputs.out_spacing = (4, 4, 4) +# >>> res.cmdline #doctest: +ELLIPSIS +# 'ResampleImageBySpacing 3 structural.nii output.nii.gz 4 4 4' # -# >>> res = ResampleImageBySpacing(dimension=3) -# >>> res.inputs.input_image = 'structural.nii' -# >>> res.inputs.output_image = 'output.nii.gz' -# >>> res.inputs.out_spacing = (4, 4, 4) -# >>> res.inputs.apply_smoothing = True -# >>> res.cmdline #doctest: +ELLIPSIS -# 'ResampleImageBySpacing 3 structural.nii output.nii.gz 4 4 4 1' +# >>> res = ResampleImageBySpacing(dimension=3) +# >>> res.inputs.input_image = 'structural.nii' +# >>> res.inputs.output_image = 'output.nii.gz' +# >>> res.inputs.out_spacing = (4, 4, 4) +# >>> res.inputs.apply_smoothing = True +# >>> res.cmdline #doctest: +ELLIPSIS +# 'ResampleImageBySpacing 3 structural.nii output.nii.gz 4 4 4 1' +# +# >>> res = ResampleImageBySpacing(dimension=3) +# >>> res.inputs.input_image = 'structural.nii' +# >>> res.inputs.output_image = 'output.nii.gz' +# >>> res.inputs.out_spacing = (0.4, 0.4, 0.4) +# >>> res.inputs.apply_smoothing = True +# >>> res.inputs.addvox = 2 +# >>> res.inputs.nn_interp = False +# >>> res.cmdline #doctest: +ELLIPSIS +# 'ResampleImageBySpacing 3 structural.nii output.nii.gz 0.4 0.4 0.4 1 2 0' # -# >>> res = ResampleImageBySpacing(dimension=3) -# >>> res.inputs.input_image = 'structural.nii' -# >>> res.inputs.output_image = 'output.nii.gz' -# >>> res.inputs.out_spacing = (0.4, 0.4, 0.4) -# >>> res.inputs.apply_smoothing = True -# >>> res.inputs.addvox = 2 -# >>> res.inputs.nn_interp = False -# >>> res.cmdline #doctest: +ELLIPSIS -# 'ResampleImageBySpacing 3 structural.nii output.nii.gz 0.4 0.4 0.4 1 2 0' # -# task_name: ResampleImageBySpacing nipype_name: ResampleImageBySpacing nipype_module: nipype.interfaces.ants.utils @@ -52,9 +52,6 @@ inputs: # passed to the field in the automatically generated unittests. input_image: medimage/nifti1 # type=file|default=: input image file - output_image: Path - # type=file: resampled file - # type=file|default=: output image file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -71,14 +68,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_image: medimage/nifti-gz + output_image: generic/file # type=file: resampled file # type=file|default=: output image file callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -107,7 +104,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -126,15 +123,12 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: input image file - output_image: '"output.nii.gz"' - # type=file: resampled file - # type=file|default=: output image file out_spacing: (4, 4, 4) # type=traitcompound|default=None: output spacing dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -153,17 +147,12 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: input image file - output_image: '"output.nii.gz"' - # type=file: resampled file - # type=file|default=: output image file out_spacing: (4, 4, 4) # type=traitcompound|default=None: output spacing - apply_smoothing: 'True' - # type=bool|default=False: smooth before resampling dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -182,21 +171,14 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: input image file - output_image: '"output.nii.gz"' - # type=file: resampled file - # type=file|default=: output image file out_spacing: (0.4, 0.4, 0.4) # type=traitcompound|default=None: output spacing - apply_smoothing: 'True' - # type=bool|default=False: smooth before resampling addvox: '2' # type=int|default=0: addvox pads each dimension by addvox - nn_interp: 'False' - # type=bool|default=False: nn interpolation dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -219,15 +201,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"structural.nii"' # type=file|default=: input image file - output_image: '"output.nii.gz"' - # type=file: resampled file - # type=file|default=: output image file out_spacing: (4, 4, 4) # type=traitcompound|default=None: output spacing dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -239,17 +218,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"structural.nii"' # type=file|default=: input image file - output_image: '"output.nii.gz"' - # type=file: resampled file - # type=file|default=: output image file out_spacing: (4, 4, 4) # type=traitcompound|default=None: output spacing - apply_smoothing: 'True' - # type=bool|default=False: smooth before resampling dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -261,21 +235,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"structural.nii"' # type=file|default=: input image file - output_image: '"output.nii.gz"' - # type=file: resampled file - # type=file|default=: output image file out_spacing: (0.4, 0.4, 0.4) # type=traitcompound|default=None: output spacing - apply_smoothing: 'True' - # type=bool|default=False: smooth before resampling addvox: '2' # type=int|default=0: addvox pads each dimension by addvox - nn_interp: 'False' - # type=bool|default=False: nn interpolation dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/threshold_image.yaml b/example-specs/interface/nipype/ants/threshold_image.yaml index 4e0b85c9..7f3eda94 100644 --- a/example-specs/interface/nipype/ants/threshold_image.yaml +++ b/example-specs/interface/nipype/ants/threshold_image.yaml @@ -6,29 +6,29 @@ # Docs # ---- # -# Apply thresholds on images. +# Apply thresholds on images. # -# Examples -# -------- -# >>> thres = ThresholdImage(dimension=3) -# >>> thres.inputs.input_image = 'structural.nii' -# >>> thres.inputs.output_image = 'output.nii.gz' -# >>> thres.inputs.th_low = 0.5 -# >>> thres.inputs.th_high = 1.0 -# >>> thres.inputs.inside_value = 1.0 -# >>> thres.inputs.outside_value = 0.0 -# >>> thres.cmdline #doctest: +ELLIPSIS -# 'ThresholdImage 3 structural.nii output.nii.gz 0.500000 1.000000 1.000000 0.000000' +# Examples +# -------- +# >>> thres = ThresholdImage(dimension=3) +# >>> thres.inputs.input_image = 'structural.nii' +# >>> thres.inputs.output_image = 'output.nii.gz' +# >>> thres.inputs.th_low = 0.5 +# >>> thres.inputs.th_high = 1.0 +# >>> thres.inputs.inside_value = 1.0 +# >>> thres.inputs.outside_value = 0.0 +# >>> thres.cmdline #doctest: +ELLIPSIS +# 'ThresholdImage 3 structural.nii output.nii.gz 0.500000 1.000000 1.000000 0.000000' +# +# >>> thres = ThresholdImage(dimension=3) +# >>> thres.inputs.input_image = 'structural.nii' +# >>> thres.inputs.output_image = 'output.nii.gz' +# >>> thres.inputs.mode = 'Kmeans' +# >>> thres.inputs.num_thresholds = 4 +# >>> thres.cmdline #doctest: +ELLIPSIS +# 'ThresholdImage 3 structural.nii output.nii.gz Kmeans 4' # -# >>> thres = ThresholdImage(dimension=3) -# >>> thres.inputs.input_image = 'structural.nii' -# >>> thres.inputs.output_image = 'output.nii.gz' -# >>> thres.inputs.mode = 'Kmeans' -# >>> thres.inputs.num_thresholds = 4 -# >>> thres.cmdline #doctest: +ELLIPSIS -# 'ThresholdImage 3 structural.nii output.nii.gz Kmeans 4' # -# task_name: ThresholdImage nipype_name: ThresholdImage nipype_module: nipype.interfaces.ants.utils @@ -47,9 +47,6 @@ inputs: # type=file|default=: input image file input_mask: generic/file # type=file|default=: input mask for Otsu, Kmeans - output_image: Path - # type=file: resampled file - # type=file|default=: output image file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -66,14 +63,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - output_image: medimage/nifti-gz + output_image: generic/file # type=file: resampled file # type=file|default=: output image file callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -110,7 +107,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -129,21 +126,14 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: input image file - output_image: '"output.nii.gz"' - # type=file: resampled file - # type=file|default=: output image file th_low: '0.5' # type=float|default=0.0: lower threshold - th_high: '1.0' - # type=float|default=0.0: upper threshold inside_value: '1.0' # type=float|default=1: inside value - outside_value: '0.0' - # type=float|default=0: outside value dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -162,17 +152,12 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: input image file - output_image: '"output.nii.gz"' - # type=file: resampled file - # type=file|default=: output image file mode: '"Kmeans"' # type=enum|default='Otsu'|allowed['Kmeans','Otsu']: whether to run Otsu / Kmeans thresholding - num_thresholds: '4' - # type=int|default=0: number of thresholds dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -195,21 +180,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"structural.nii"' # type=file|default=: input image file - output_image: '"output.nii.gz"' - # type=file: resampled file - # type=file|default=: output image file th_low: '0.5' # type=float|default=0.0: lower threshold - th_high: '1.0' - # type=float|default=0.0: upper threshold inside_value: '1.0' # type=float|default=1: inside value - outside_value: '0.0' - # type=float|default=0: outside value dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -221,17 +199,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"structural.nii"' # type=file|default=: input image file - output_image: '"output.nii.gz"' - # type=file: resampled file - # type=file|default=: output image file mode: '"Kmeans"' # type=enum|default='Otsu'|allowed['Kmeans','Otsu']: whether to run Otsu / Kmeans thresholding - num_thresholds: '4' - # type=int|default=0: number of thresholds dimension: '3' # type=int|default=3: dimension of output image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/warp_image_multi_transform.yaml b/example-specs/interface/nipype/ants/warp_image_multi_transform.yaml index 3e42e171..61ff1c59 100644 --- a/example-specs/interface/nipype/ants/warp_image_multi_transform.yaml +++ b/example-specs/interface/nipype/ants/warp_image_multi_transform.yaml @@ -7,26 +7,26 @@ # ---- # Warps an image from one space to another # -# Examples -# -------- +# Examples +# -------- # -# >>> from nipype.interfaces.ants import WarpImageMultiTransform -# >>> wimt = WarpImageMultiTransform() -# >>> wimt.inputs.input_image = 'structural.nii' -# >>> wimt.inputs.reference_image = 'ants_deformed.nii.gz' -# >>> wimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] -# >>> wimt.cmdline -# 'WarpImageMultiTransform 3 structural.nii structural_wimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt' +# >>> from nipype.interfaces.ants import WarpImageMultiTransform +# >>> wimt = WarpImageMultiTransform() +# >>> wimt.inputs.input_image = 'structural.nii' +# >>> wimt.inputs.reference_image = 'ants_deformed.nii.gz' +# >>> wimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] +# >>> wimt.cmdline +# 'WarpImageMultiTransform 3 structural.nii structural_wimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt' +# +# >>> wimt = WarpImageMultiTransform() +# >>> wimt.inputs.input_image = 'diffusion_weighted.nii' +# >>> wimt.inputs.reference_image = 'functional.nii' +# >>> wimt.inputs.transformation_series = ['func2anat_coreg_Affine.txt','func2anat_InverseWarp.nii.gz', 'dwi2anat_Warp.nii.gz','dwi2anat_coreg_Affine.txt'] +# >>> wimt.inputs.invert_affine = [1] # this will invert the 1st Affine file: 'func2anat_coreg_Affine.txt' +# >>> wimt.cmdline +# 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii -i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt' # -# >>> wimt = WarpImageMultiTransform() -# >>> wimt.inputs.input_image = 'diffusion_weighted.nii' -# >>> wimt.inputs.reference_image = 'functional.nii' -# >>> wimt.inputs.transformation_series = ['func2anat_coreg_Affine.txt','func2anat_InverseWarp.nii.gz', 'dwi2anat_Warp.nii.gz','dwi2anat_coreg_Affine.txt'] -# >>> wimt.inputs.invert_affine = [1] # this will invert the 1st Affine file: 'func2anat_coreg_Affine.txt' -# >>> wimt.cmdline -# 'WarpImageMultiTransform 3 diffusion_weighted.nii diffusion_weighted_wimt.nii -R functional.nii -i func2anat_coreg_Affine.txt func2anat_InverseWarp.nii.gz dwi2anat_Warp.nii.gz dwi2anat_coreg_Affine.txt' # -# task_name: WarpImageMultiTransform nipype_name: WarpImageMultiTransform nipype_module: nipype.interfaces.ants.resampling @@ -45,10 +45,7 @@ inputs: # type=file|default=: image to apply transformation to (generally a coregistered functional) out_postfix: str # type=file|default='_wimt': Postfix that is prepended to all output files (default = _wimt) - output_image: Path - # type=file: Warped image - # type=file|default=: name of the output warped image - reference_image: medimage/nifti1,medimage/nifti-gz + reference_image: generic/file # type=file|default=: reference image space that you wish to warp INTO transformation_series: '[text/text-file,medimage/nifti-gz]+list-of' # type=inputmultiobject|default=[]: transformation file(s) to be applied @@ -75,7 +72,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields output_image: output_image # type=file: Warped image # type=file|default=: name of the output warped image @@ -115,7 +112,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -134,12 +131,10 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: - # type=file|default=: reference image space that you wish to warp INTO transformation_series: # type=inputmultiobject|default=[]: transformation file(s) to be applied imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -158,14 +153,10 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: - # type=file|default=: reference image space that you wish to warp INTO transformation_series: # type=inputmultiobject|default=[]: transformation file(s) to be applied - invert_affine: '[1] # this will invert the 1st Affine file: "func2anat_coreg_Affine.txt"' - # type=list|default=[]: List of Affine transformations to invert.E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines found in transformation_series. Note that indexing starts with 1 and does not include warp fields. Affine transformations are distinguished from warp fields by the word "affine" included in their filenames. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -188,12 +179,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"structural.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: '"ants_deformed.nii.gz"' - # type=file|default=: reference image space that you wish to warp INTO transformation_series: '["ants_Warp.nii.gz","ants_Affine.txt"]' # type=inputmultiobject|default=[]: transformation file(s) to be applied imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -205,14 +194,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"diffusion_weighted.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: '"functional.nii"' - # type=file|default=: reference image space that you wish to warp INTO transformation_series: '["func2anat_coreg_Affine.txt","func2anat_InverseWarp.nii.gz", "dwi2anat_Warp.nii.gz","dwi2anat_coreg_Affine.txt"]' # type=inputmultiobject|default=[]: transformation file(s) to be applied - invert_affine: '[1] # this will invert the 1st Affine file: "func2anat_coreg_Affine.txt"' - # type=list|default=[]: List of Affine transformations to invert.E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines found in transformation_series. Note that indexing starts with 1 and does not include warp fields. Affine transformations are distinguished from warp fields by the word "affine" included in their filenames. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/ants/warp_time_series_image_multi_transform.yaml b/example-specs/interface/nipype/ants/warp_time_series_image_multi_transform.yaml index 5ec4697c..afc3ce94 100644 --- a/example-specs/interface/nipype/ants/warp_time_series_image_multi_transform.yaml +++ b/example-specs/interface/nipype/ants/warp_time_series_image_multi_transform.yaml @@ -7,25 +7,25 @@ # ---- # Warps a time-series from one space to another # -# Examples -# -------- +# Examples +# -------- # -# >>> from nipype.interfaces.ants import WarpTimeSeriesImageMultiTransform -# >>> wtsimt = WarpTimeSeriesImageMultiTransform() -# >>> wtsimt.inputs.input_image = 'resting.nii' -# >>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' -# >>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] -# >>> wtsimt.cmdline -# 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt' +# >>> from nipype.interfaces.ants import WarpTimeSeriesImageMultiTransform +# >>> wtsimt = WarpTimeSeriesImageMultiTransform() +# >>> wtsimt.inputs.input_image = 'resting.nii' +# >>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' +# >>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] +# >>> wtsimt.cmdline +# 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz ants_Affine.txt' +# +# >>> wtsimt = WarpTimeSeriesImageMultiTransform() +# >>> wtsimt.inputs.input_image = 'resting.nii' +# >>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' +# >>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] +# >>> wtsimt.inputs.invert_affine = [1] # # this will invert the 1st Affine file: ants_Affine.txt +# >>> wtsimt.cmdline +# 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz -i ants_Affine.txt' # -# >>> wtsimt = WarpTimeSeriesImageMultiTransform() -# >>> wtsimt.inputs.input_image = 'resting.nii' -# >>> wtsimt.inputs.reference_image = 'ants_deformed.nii.gz' -# >>> wtsimt.inputs.transformation_series = ['ants_Warp.nii.gz','ants_Affine.txt'] -# >>> wtsimt.inputs.invert_affine = [1] # # this will invert the 1st Affine file: ants_Affine.txt -# >>> wtsimt.cmdline -# 'WarpTimeSeriesImageMultiTransform 4 resting.nii resting_wtsimt.nii -R ants_deformed.nii.gz ants_Warp.nii.gz -i ants_Affine.txt' -# task_name: WarpTimeSeriesImageMultiTransform nipype_name: WarpTimeSeriesImageMultiTransform nipype_module: nipype.interfaces.ants.resampling @@ -42,7 +42,7 @@ inputs: # passed to the field in the automatically generated unittests. input_image: medimage/nifti1 # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: medimage/nifti-gz + reference_image: generic/file # type=file|default=: reference image space that you wish to warp INTO transformation_series: medimage/nifti-gz+list-of # type=inputmultiobject|default=[]: transformation file(s) to be applied @@ -68,7 +68,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -102,7 +102,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -121,12 +121,10 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: - # type=file|default=: reference image space that you wish to warp INTO transformation_series: # type=inputmultiobject|default=[]: transformation file(s) to be applied imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -145,14 +143,10 @@ tests: # (if not specified, will try to choose a sensible value) input_image: # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: - # type=file|default=: reference image space that you wish to warp INTO transformation_series: # type=inputmultiobject|default=[]: transformation file(s) to be applied - invert_affine: '[1] # # this will invert the 1st Affine file: ants_Affine.txt' - # type=list|default=[]: List of Affine transformations to invert.E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines found in transformation_series. Note that indexing starts with 1 and does not include warp fields. Affine transformations are distinguished from warp fields by the word "affine" included in their filenames. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -175,12 +169,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"resting.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: '"ants_deformed.nii.gz"' - # type=file|default=: reference image space that you wish to warp INTO transformation_series: '["ants_Warp.nii.gz","ants_Affine.txt"]' # type=inputmultiobject|default=[]: transformation file(s) to be applied imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -192,14 +184,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. input_image: '"resting.nii"' # type=file|default=: image to apply transformation to (generally a coregistered functional) - reference_image: '"ants_deformed.nii.gz"' - # type=file|default=: reference image space that you wish to warp INTO transformation_series: '["ants_Warp.nii.gz","ants_Affine.txt"]' # type=inputmultiobject|default=[]: transformation file(s) to be applied - invert_affine: '[1] # # this will invert the 1st Affine file: ants_Affine.txt' - # type=list|default=[]: List of Affine transformations to invert.E.g.: [1,4,5] inverts the 1st, 4th, and 5th Affines found in transformation_series. Note that indexing starts with 1 and does not include warp fields. Affine transformations are distinguished from warp fields by the word "affine" included in their filenames. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/add_x_form_to_header.yaml b/example-specs/interface/nipype/freesurfer/add_x_form_to_header.yaml index 1fcb2fad..c5b81459 100644 --- a/example-specs/interface/nipype/freesurfer/add_x_form_to_header.yaml +++ b/example-specs/interface/nipype/freesurfer/add_x_form_to_header.yaml @@ -6,32 +6,32 @@ # Docs # ---- # -# Just adds specified xform to the volume header. +# Just adds specified xform to the volume header. # -# .. danger :: +# .. danger :: # -# Input transform **MUST** be an absolute path to a DataSink'ed transform or -# the output will reference a transform in the workflow cache directory! +# Input transform **MUST** be an absolute path to a DataSink'ed transform or +# the output will reference a transform in the workflow cache directory! # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import AddXFormToHeader -# >>> adder = AddXFormToHeader() -# >>> adder.inputs.in_file = 'norm.mgz' -# >>> adder.inputs.transform = 'trans.mat' -# >>> adder.cmdline -# 'mri_add_xform_to_header trans.mat norm.mgz output.mgz' +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import AddXFormToHeader +# >>> adder = AddXFormToHeader() +# >>> adder.inputs.in_file = 'norm.mgz' +# >>> adder.inputs.transform = 'trans.mat' +# >>> adder.cmdline +# 'mri_add_xform_to_header trans.mat norm.mgz output.mgz' # -# >>> adder.inputs.copy_name = True -# >>> adder.cmdline -# 'mri_add_xform_to_header -c trans.mat norm.mgz output.mgz' -# >>> adder.run() # doctest: +SKIP +# >>> adder.inputs.copy_name = True +# >>> adder.cmdline +# 'mri_add_xform_to_header -c trans.mat norm.mgz output.mgz' +# >>> adder.run() # doctest: +SKIP +# +# References +# ---------- +# [https://surfer.nmr.mgh.harvard.edu/fswiki/mri_add_xform_to_header] # -# References -# ---------- -# [https://surfer.nmr.mgh.harvard.edu/fswiki/mri_add_xform_to_header] # -# task_name: AddXFormToHeader nipype_name: AddXFormToHeader nipype_module: nipype.interfaces.freesurfer.utils @@ -48,12 +48,9 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/mgh-gz # type=file|default=: input volume - out_file: Path - # type=file: output volume - # type=file|default='output.mgz': output volume subjects_dir: generic/directory # type=directory|default=: subjects directory - transform: datascience/text-matrix + transform: generic/file # type=file|default=: xfm file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -78,7 +75,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -103,7 +100,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -122,10 +119,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input volume - transform: - # type=file|default=: xfm file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -145,7 +140,7 @@ tests: copy_name: 'True' # type=bool|default=False: do not try to load the xfmfile, just copy name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -160,7 +155,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_add_xform_to_header trans.mat norm.mgz output.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -168,10 +163,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"norm.mgz"' # type=file|default=: input volume - transform: '"trans.mat"' - # type=file|default=: xfm file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -184,7 +177,7 @@ doctests: copy_name: 'True' # type=bool|default=False: do not try to load the xfmfile, just copy name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/aparc_2_aseg.yaml b/example-specs/interface/nipype/freesurfer/aparc_2_aseg.yaml index 2a51e675..7ce3308d 100644 --- a/example-specs/interface/nipype/freesurfer/aparc_2_aseg.yaml +++ b/example-specs/interface/nipype/freesurfer/aparc_2_aseg.yaml @@ -6,41 +6,41 @@ # Docs # ---- # -# Maps the cortical labels from the automatic cortical parcellation -# (aparc) to the automatic segmentation volume (aseg). The result can be -# used as the aseg would. The algorithm is to find each aseg voxel -# labeled as cortex (3 and 42) and assign it the label of the closest -# cortical vertex. If the voxel is not in the ribbon (as defined by mri/ -# lh.ribbon and rh.ribbon), then the voxel is marked as unknown (0). -# This can be turned off with ``--noribbon``. The cortical parcellation is -# obtained from subject/label/hemi.aparc.annot which should be based on -# the curvature.buckner40.filled.desikan_killiany.gcs atlas. The aseg is -# obtained from subject/mri/aseg.mgz and should be based on the -# RB40_talairach_2005-07-20.gca atlas. If these atlases are used, then the -# segmentations can be viewed with tkmedit and the -# FreeSurferColorLUT.txt color table found in ``$FREESURFER_HOME``. These -# are the default atlases used by ``recon-all``. +# Maps the cortical labels from the automatic cortical parcellation +# (aparc) to the automatic segmentation volume (aseg). The result can be +# used as the aseg would. The algorithm is to find each aseg voxel +# labeled as cortex (3 and 42) and assign it the label of the closest +# cortical vertex. If the voxel is not in the ribbon (as defined by mri/ +# lh.ribbon and rh.ribbon), then the voxel is marked as unknown (0). +# This can be turned off with ``--noribbon``. The cortical parcellation is +# obtained from subject/label/hemi.aparc.annot which should be based on +# the curvature.buckner40.filled.desikan_killiany.gcs atlas. The aseg is +# obtained from subject/mri/aseg.mgz and should be based on the +# RB40_talairach_2005-07-20.gca atlas. If these atlases are used, then the +# segmentations can be viewed with tkmedit and the +# FreeSurferColorLUT.txt color table found in ``$FREESURFER_HOME``. These +# are the default atlases used by ``recon-all``. +# +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import Aparc2Aseg +# >>> aparc2aseg = Aparc2Aseg() +# >>> aparc2aseg.inputs.lh_white = 'lh.pial' +# >>> aparc2aseg.inputs.rh_white = 'lh.pial' +# >>> aparc2aseg.inputs.lh_pial = 'lh.pial' +# >>> aparc2aseg.inputs.rh_pial = 'lh.pial' +# >>> aparc2aseg.inputs.lh_ribbon = 'label.mgz' +# >>> aparc2aseg.inputs.rh_ribbon = 'label.mgz' +# >>> aparc2aseg.inputs.ribbon = 'label.mgz' +# >>> aparc2aseg.inputs.lh_annotation = 'lh.pial' +# >>> aparc2aseg.inputs.rh_annotation = 'lh.pial' +# >>> aparc2aseg.inputs.out_file = 'aparc+aseg.mgz' +# >>> aparc2aseg.inputs.label_wm = True +# >>> aparc2aseg.inputs.rip_unknown = True +# >>> aparc2aseg.cmdline # doctest: +SKIP +# 'mri_aparc2aseg --labelwm --o aparc+aseg.mgz --rip-unknown --s subject_id' # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import Aparc2Aseg -# >>> aparc2aseg = Aparc2Aseg() -# >>> aparc2aseg.inputs.lh_white = 'lh.pial' -# >>> aparc2aseg.inputs.rh_white = 'lh.pial' -# >>> aparc2aseg.inputs.lh_pial = 'lh.pial' -# >>> aparc2aseg.inputs.rh_pial = 'lh.pial' -# >>> aparc2aseg.inputs.lh_ribbon = 'label.mgz' -# >>> aparc2aseg.inputs.rh_ribbon = 'label.mgz' -# >>> aparc2aseg.inputs.ribbon = 'label.mgz' -# >>> aparc2aseg.inputs.lh_annotation = 'lh.pial' -# >>> aparc2aseg.inputs.rh_annotation = 'lh.pial' -# >>> aparc2aseg.inputs.out_file = 'aparc+aseg.mgz' -# >>> aparc2aseg.inputs.label_wm = True -# >>> aparc2aseg.inputs.rip_unknown = True -# >>> aparc2aseg.cmdline # doctest: +SKIP -# 'mri_aparc2aseg --labelwm --o aparc+aseg.mgz --rip-unknown --s subject_id' # -# task_name: Aparc2Aseg nipype_name: Aparc2Aseg nipype_module: nipype.interfaces.freesurfer.utils @@ -61,24 +61,21 @@ inputs: # type=file|default=: filled: generic/file # type=file|default=: Implicit input filled file. Only required with FS v5.3. - lh_annotation: medimage-freesurfer/pial + lh_annotation: generic/file # type=file|default=: Input file must be /label/lh.aparc.annot - lh_pial: medimage-freesurfer/pial + lh_pial: fileformats.medimage_freesurfer.Pial # type=file|default=: Input file must be /surf/lh.pial lh_ribbon: medimage/mgh-gz # type=file|default=: Input file must be /mri/lh.ribbon.mgz - lh_white: medimage-freesurfer/pial + lh_white: fileformats.medimage_freesurfer.Pial # type=file|default=: Input file must be /surf/lh.white - out_file: Path - # type=file: Output aseg file - # type=file|default=: Full path of file to save the output segmentation in - rh_annotation: medimage-freesurfer/pial + rh_annotation: fileformats.medimage_freesurfer.Pial # type=file|default=: Input file must be /label/rh.aparc.annot - rh_pial: medimage-freesurfer/pial + rh_pial: generic/file # type=file|default=: Input file must be /surf/rh.pial - rh_ribbon: medimage/mgh-gz + rh_ribbon: generic/file # type=file|default=: Input file must be /mri/rh.ribbon.mgz - rh_white: medimage-freesurfer/pial + rh_white: generic/file # type=file|default=: Input file must be /surf/rh.white ribbon: medimage/mgh-gz # type=file|default=: Input file must be /mri/ribbon.mgz @@ -100,14 +97,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/mgh-gz + out_file: generic/file # type=file: Output aseg file # type=file|default=: Full path of file to save the output segmentation in callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -162,7 +159,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -181,31 +178,18 @@ tests: # (if not specified, will try to choose a sensible value) lh_white: # type=file|default=: Input file must be /surf/lh.white - rh_white: - # type=file|default=: Input file must be /surf/rh.white lh_pial: # type=file|default=: Input file must be /surf/lh.pial - rh_pial: - # type=file|default=: Input file must be /surf/rh.pial lh_ribbon: # type=file|default=: Input file must be /mri/lh.ribbon.mgz - rh_ribbon: - # type=file|default=: Input file must be /mri/rh.ribbon.mgz ribbon: # type=file|default=: Input file must be /mri/ribbon.mgz - lh_annotation: - # type=file|default=: Input file must be /label/lh.aparc.annot rh_annotation: # type=file|default=: Input file must be /label/rh.aparc.annot - out_file: '"aparc+aseg.mgz"' - # type=file: Output aseg file - # type=file|default=: Full path of file to save the output segmentation in label_wm: 'True' # type=bool|default=False: For each voxel labeled as white matter in the aseg, re-assign its label to be that of the closest cortical point if its distance is less than dmaxctx. - rip_unknown: 'True' - # type=bool|default=False: Do not label WM based on 'unknown' corical label imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -220,7 +204,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_aparc2aseg --labelwm --o aparc+aseg.mgz --rip-unknown --s subject_id +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -228,31 +212,18 @@ doctests: # '.mock()' method of the corresponding class is used instead. lh_white: '"lh.pial"' # type=file|default=: Input file must be /surf/lh.white - rh_white: '"lh.pial"' - # type=file|default=: Input file must be /surf/rh.white lh_pial: '"lh.pial"' # type=file|default=: Input file must be /surf/lh.pial - rh_pial: '"lh.pial"' - # type=file|default=: Input file must be /surf/rh.pial lh_ribbon: '"label.mgz"' # type=file|default=: Input file must be /mri/lh.ribbon.mgz - rh_ribbon: '"label.mgz"' - # type=file|default=: Input file must be /mri/rh.ribbon.mgz ribbon: '"label.mgz"' # type=file|default=: Input file must be /mri/ribbon.mgz - lh_annotation: '"lh.pial"' - # type=file|default=: Input file must be /label/lh.aparc.annot rh_annotation: '"lh.pial"' # type=file|default=: Input file must be /label/rh.aparc.annot - out_file: '"aparc+aseg.mgz"' - # type=file: Output aseg file - # type=file|default=: Full path of file to save the output segmentation in label_wm: 'True' # type=bool|default=False: For each voxel labeled as white matter in the aseg, re-assign its label to be that of the closest cortical point if its distance is less than dmaxctx. - rip_unknown: 'True' - # type=bool|default=False: Do not label WM based on 'unknown' corical label imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/apas_2_aseg.yaml b/example-specs/interface/nipype/freesurfer/apas_2_aseg.yaml index 31df5e7f..9f7a6b18 100644 --- a/example-specs/interface/nipype/freesurfer/apas_2_aseg.yaml +++ b/example-specs/interface/nipype/freesurfer/apas_2_aseg.yaml @@ -6,21 +6,21 @@ # Docs # ---- # -# Converts aparc+aseg.mgz into something like aseg.mgz by replacing the -# cortical segmentations 1000-1035 with 3 and 2000-2035 with 42. The -# advantage of this output is that the cortical label conforms to the -# actual surface (this is not the case with aseg.mgz). +# Converts aparc+aseg.mgz into something like aseg.mgz by replacing the +# cortical segmentations 1000-1035 with 3 and 2000-2035 with 42. The +# advantage of this output is that the cortical label conforms to the +# actual surface (this is not the case with aseg.mgz). +# +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import Apas2Aseg +# >>> apas2aseg = Apas2Aseg() +# >>> apas2aseg.inputs.in_file = 'aseg.mgz' +# >>> apas2aseg.inputs.out_file = 'output.mgz' +# >>> apas2aseg.cmdline +# 'apas2aseg --i aseg.mgz --o output.mgz' # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import Apas2Aseg -# >>> apas2aseg = Apas2Aseg() -# >>> apas2aseg.inputs.in_file = 'aseg.mgz' -# >>> apas2aseg.inputs.out_file = 'output.mgz' -# >>> apas2aseg.cmdline -# 'apas2aseg --i aseg.mgz --o output.mgz' # -# task_name: Apas2Aseg nipype_name: Apas2Aseg nipype_module: nipype.interfaces.freesurfer.utils @@ -37,9 +37,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/mgh-gz # type=file|default=: Input aparc+aseg.mgz - out_file: Path - # type=file: Output aseg file - # type=file|default=: Output aseg file subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -58,14 +55,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/mgh-gz + out_file: generic/file # type=file: Output aseg file # type=file|default=: Output aseg file callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -84,7 +81,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -103,11 +100,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Input aparc+aseg.mgz - out_file: '"output.mgz"' - # type=file: Output aseg file - # type=file|default=: Output aseg file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -122,7 +116,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: apas2aseg --i aseg.mgz --o output.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -130,11 +124,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"aseg.mgz"' # type=file|default=: Input aparc+aseg.mgz - out_file: '"output.mgz"' - # type=file: Output aseg file - # type=file|default=: Output aseg file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/apply_mask.yaml b/example-specs/interface/nipype/freesurfer/apply_mask.yaml index 0c7783f0..fcec10f7 100644 --- a/example-specs/interface/nipype/freesurfer/apply_mask.yaml +++ b/example-specs/interface/nipype/freesurfer/apply_mask.yaml @@ -7,11 +7,11 @@ # ---- # Use Freesurfer's mri_mask to apply a mask to an image. # -# The mask file need not be binarized; it can be thresholded above a given -# value before application. It can also optionally be transformed into input -# space with an LTA matrix. +# The mask file need not be binarized; it can be thresholded above a given +# value before application. It can also optionally be transformed into input +# space with an LTA matrix. +# # -# task_name: ApplyMask nipype_name: ApplyMask nipype_module: nipype.interfaces.freesurfer.utils @@ -30,9 +30,6 @@ inputs: # type=file|default=: input image (will be masked) mask_file: generic/file # type=file|default=: image defining mask space - out_file: Path - # type=file: masked image - # type=file|default=: final image to write subjects_dir: generic/directory # type=directory|default=: subjects directory xfm_file: generic/file @@ -64,7 +61,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -101,7 +98,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/freesurfer/apply_vol_transform.yaml b/example-specs/interface/nipype/freesurfer/apply_vol_transform.yaml index bdbb9706..5a0609a4 100644 --- a/example-specs/interface/nipype/freesurfer/apply_vol_transform.yaml +++ b/example-specs/interface/nipype/freesurfer/apply_vol_transform.yaml @@ -7,19 +7,19 @@ # ---- # Use FreeSurfer mri_vol2vol to apply a transform. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.freesurfer import ApplyVolTransform +# >>> applyreg = ApplyVolTransform() +# >>> applyreg.inputs.source_file = 'structural.nii' +# >>> applyreg.inputs.reg_file = 'register.dat' +# >>> applyreg.inputs.transformed_file = 'struct_warped.nii' +# >>> applyreg.inputs.fs_target = True +# >>> applyreg.cmdline +# 'mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii' # -# >>> from nipype.interfaces.freesurfer import ApplyVolTransform -# >>> applyreg = ApplyVolTransform() -# >>> applyreg.inputs.source_file = 'structural.nii' -# >>> applyreg.inputs.reg_file = 'register.dat' -# >>> applyreg.inputs.transformed_file = 'struct_warped.nii' -# >>> applyreg.inputs.fs_target = True -# >>> applyreg.cmdline -# 'mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii' # -# task_name: ApplyVolTransform nipype_name: ApplyVolTransform nipype_module: nipype.interfaces.freesurfer.preprocess @@ -42,7 +42,7 @@ inputs: # type=file|default=: LTA, invert m3z_file: generic/file # type=file|default=: This is the morph to be applied to the volume. Unless the morph is in mri/transforms (eg.: for talairach.m3z computed by reconall), you will need to specify the full path to this morph and use the --noDefM3zPath flag. - reg_file: datascience/dat-file + reg_file: generic/file # type=file|default=: tkRAS-to-tkRAS matrix (tkregister2 format) source_file: medimage/nifti1 # type=file|default=: Input volume you wish to transform @@ -50,9 +50,6 @@ inputs: # type=directory|default=: subjects directory target_file: generic/file # type=file|default=: Output template volume - transformed_file: Path - # type=file: Path to output file if used normally - # type=file|default=: Output volume xfm_reg_file: generic/file # type=file|default=: ScannerRAS-to-ScannerRAS matrix (MNI format) callable_defaults: @@ -78,7 +75,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields transformed_file: '"struct_warped.nii"' # type=file: Path to output file if used normally # type=file|default=: Output volume @@ -136,7 +133,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -155,15 +152,11 @@ tests: # (if not specified, will try to choose a sensible value) source_file: # type=file|default=: Input volume you wish to transform - reg_file: - # type=file|default=: tkRAS-to-tkRAS matrix (tkregister2 format) transformed_file: '"struct_warped.nii"' # type=file: Path to output file if used normally # type=file|default=: Output volume - fs_target: 'True' - # type=bool|default=False: use orig.mgz from subject in regfile as target imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -186,15 +179,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. source_file: '"structural.nii"' # type=file|default=: Input volume you wish to transform - reg_file: '"register.dat"' - # type=file|default=: tkRAS-to-tkRAS matrix (tkregister2 format) transformed_file: '"struct_warped.nii"' # type=file: Path to output file if used normally # type=file|default=: Output volume - fs_target: 'True' - # type=bool|default=False: use orig.mgz from subject in regfile as target imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/bb_register.yaml b/example-specs/interface/nipype/freesurfer/bb_register.yaml index 99e8f145..4e7a6dc2 100644 --- a/example-specs/interface/nipype/freesurfer/bb_register.yaml +++ b/example-specs/interface/nipype/freesurfer/bb_register.yaml @@ -7,19 +7,19 @@ # ---- # Use FreeSurfer bbregister to register a volume to the Freesurfer anatomical. # -# This program performs within-subject, cross-modal registration using a -# boundary-based cost function. It is required that you have an anatomical -# scan of the subject that has already been recon-all-ed using freesurfer. +# This program performs within-subject, cross-modal registration using a +# boundary-based cost function. It is required that you have an anatomical +# scan of the subject that has already been recon-all-ed using freesurfer. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.freesurfer import BBRegister +# >>> bbreg = BBRegister(subject_id='me', source_file='structural.nii', init='header', contrast_type='t2') +# >>> bbreg.cmdline +# 'bbregister --t2 --init-header --reg structural_bbreg_me.dat --mov structural.nii --s me' # -# >>> from nipype.interfaces.freesurfer import BBRegister -# >>> bbreg = BBRegister(subject_id='me', source_file='structural.nii', init='header', contrast_type='t2') -# >>> bbreg.cmdline -# 'bbregister --t2 --init-header --reg structural_bbreg_me.dat --mov structural.nii --s me' # -# task_name: BBRegister nipype_name: BBRegister nipype_module: nipype.interfaces.freesurfer.preprocess @@ -38,9 +38,6 @@ inputs: # type=file|default=: existing registration file intermediate_file: generic/file # type=file|default=: Intermediate image, e.g. in case of partial FOV - out_reg_file: Path - # type=file: Output registration file - # type=file|default=: output registration file source_file: medimage/nifti1 # type=file|default=: source file to be registered subjects_dir: generic/directory @@ -82,7 +79,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_reg_file: out_reg_file # type=file: Output registration file # type=file|default=: output registration file @@ -138,7 +135,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -164,7 +161,7 @@ tests: contrast_type: '"t2"' # type=enum|default='t1'|allowed['bold','dti','t1','t2']: contrast type of image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -194,7 +191,7 @@ doctests: contrast_type: '"t2"' # type=enum|default='t1'|allowed['bold','dti','t1','t2']: contrast type of image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/binarize.yaml b/example-specs/interface/nipype/freesurfer/binarize.yaml index bf25ed9f..c2f7c754 100644 --- a/example-specs/interface/nipype/freesurfer/binarize.yaml +++ b/example-specs/interface/nipype/freesurfer/binarize.yaml @@ -7,13 +7,13 @@ # ---- # Use FreeSurfer mri_binarize to threshold an input volume # -# Examples -# -------- -# >>> binvol = Binarize(in_file='structural.nii', min=10, binary_file='foo_out.nii') -# >>> binvol.cmdline -# 'mri_binarize --o foo_out.nii --i structural.nii --min 10.000000' +# Examples +# -------- +# >>> binvol = Binarize(in_file='structural.nii', min=10, binary_file='foo_out.nii') +# >>> binvol.cmdline +# 'mri_binarize --o foo_out.nii --i structural.nii --min 10.000000' +# # -# task_name: Binarize nipype_name: Binarize nipype_module: nipype.interfaces.freesurfer.model @@ -28,9 +28,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - binary_file: Path - # type=file: binarized output volume - # type=file|default=: binary output volume in_file: medimage/nifti1 # type=file|default=: input volume mask_file: generic/file @@ -65,7 +62,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields binary_file: '"foo_out.nii"' # type=file: binarized output volume # type=file|default=: binary output volume @@ -136,7 +133,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -161,7 +158,7 @@ tests: # type=file: binarized output volume # type=file|default=: binary output volume imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -176,7 +173,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_binarize --o foo_out.nii --i structural.nii --min 10.000000 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -190,7 +187,7 @@ doctests: # type=file: binarized output volume # type=file|default=: binary output volume imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/ca_label.yaml b/example-specs/interface/nipype/freesurfer/ca_label.yaml index 321806f2..8ad4c404 100644 --- a/example-specs/interface/nipype/freesurfer/ca_label.yaml +++ b/example-specs/interface/nipype/freesurfer/ca_label.yaml @@ -7,23 +7,23 @@ # ---- # Label subcortical structures based in GCA model. # -# See Also -# -------- -# For complete details, see the `FS Documentation -# `__ +# See Also +# -------- +# For complete details, see the `FS Documentation +# `__ +# +# Examples +# -------- +# >>> from nipype.interfaces import freesurfer +# >>> ca_label = freesurfer.CALabel() +# >>> ca_label.inputs.in_file = "norm.mgz" +# >>> ca_label.inputs.out_file = "out.mgz" +# >>> ca_label.inputs.transform = "trans.mat" +# >>> ca_label.inputs.template = "Template_6.nii" # in practice use .gcs extension +# >>> ca_label.cmdline +# 'mri_ca_label norm.mgz trans.mat Template_6.nii out.mgz' # -# Examples -# -------- -# >>> from nipype.interfaces import freesurfer -# >>> ca_label = freesurfer.CALabel() -# >>> ca_label.inputs.in_file = "norm.mgz" -# >>> ca_label.inputs.out_file = "out.mgz" -# >>> ca_label.inputs.transform = "trans.mat" -# >>> ca_label.inputs.template = "Template_6.nii" # in practice use .gcs extension -# >>> ca_label.cmdline -# 'mri_ca_label norm.mgz trans.mat Template_6.nii out.mgz' # -# task_name: CALabel nipype_name: CALabel nipype_module: nipype.interfaces.freesurfer.preprocess @@ -48,12 +48,9 @@ inputs: # type=file|default=: input label intensities file(used in longitudinal processing) label: generic/file # type=file|default=: Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file - out_file: Path - # type=file: Output volume from CALabel - # type=file|default=: Output file for CALabel subjects_dir: generic/directory # type=directory|default=: subjects directory - template: medimage/nifti1 + template: generic/file # type=file|default=: Input template for CALabel transform: datascience/text-matrix # type=file|default=: Input transform for CALabel @@ -73,14 +70,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/mgh-gz + out_file: generic/file # type=file: Output volume from CALabel # type=file|default=: Output file for CALabel callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -121,7 +118,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -140,15 +137,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Input volume for CALabel - out_file: '"out.mgz"' - # type=file: Output volume from CALabel - # type=file|default=: Output file for CALabel transform: # type=file|default=: Input transform for CALabel - template: - # type=file|default=: Input template for CALabel imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -163,7 +155,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_ca_label norm.mgz trans.mat Template_6.nii out.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -171,15 +163,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"norm.mgz"' # type=file|default=: Input volume for CALabel - out_file: '"out.mgz"' - # type=file: Output volume from CALabel - # type=file|default=: Output file for CALabel transform: '"trans.mat"' # type=file|default=: Input transform for CALabel - template: '"Template_6.nii" # in practice use .gcs extension' - # type=file|default=: Input template for CALabel imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/ca_normalize.yaml b/example-specs/interface/nipype/freesurfer/ca_normalize.yaml index b2152c20..cd8156ee 100644 --- a/example-specs/interface/nipype/freesurfer/ca_normalize.yaml +++ b/example-specs/interface/nipype/freesurfer/ca_normalize.yaml @@ -6,24 +6,24 @@ # Docs # ---- # This program creates a normalized volume using the brain volume and an -# input gca file. +# input gca file. # -# See Also -# -------- -# For complete details, see the `FS Documentation -# `__. +# See Also +# -------- +# For complete details, see the `FS Documentation +# `__. +# +# Examples +# -------- +# >>> from nipype.interfaces import freesurfer +# >>> ca_normalize = freesurfer.CANormalize() +# >>> ca_normalize.inputs.in_file = "T1.mgz" +# >>> ca_normalize.inputs.atlas = "atlas.nii.gz" # in practice use .gca atlases +# >>> ca_normalize.inputs.transform = "trans.mat" # in practice use .lta transforms +# >>> ca_normalize.cmdline +# 'mri_ca_normalize T1.mgz atlas.nii.gz trans.mat T1_norm.mgz' # -# Examples -# -------- -# >>> from nipype.interfaces import freesurfer -# >>> ca_normalize = freesurfer.CANormalize() -# >>> ca_normalize.inputs.in_file = "T1.mgz" -# >>> ca_normalize.inputs.atlas = "atlas.nii.gz" # in practice use .gca atlases -# >>> ca_normalize.inputs.transform = "trans.mat" # in practice use .lta transforms -# >>> ca_normalize.cmdline -# 'mri_ca_normalize T1.mgz atlas.nii.gz trans.mat T1_norm.mgz' # -# task_name: CANormalize nipype_name: CANormalize nipype_module: nipype.interfaces.freesurfer.preprocess @@ -38,20 +38,14 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - atlas: medimage/nifti-gz + atlas: generic/file # type=file|default=: The atlas file in gca format - control_points: Path - # type=file: The output control points for Normalize - # type=file|default=: File name for the output control points in_file: medimage/mgh-gz # type=file|default=: The input file for CANormalize long_file: generic/file # type=file|default=: undocumented flag used in longitudinal processing mask: generic/file # type=file|default=: Specifies volume to use as mask - out_file: Path - # type=file: The output file for Normalize - # type=file|default=: The output file for CANormalize subjects_dir: generic/directory # type=directory|default=: subjects directory transform: datascience/text-matrix @@ -82,7 +76,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -112,7 +106,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -131,12 +125,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: The input file for CANormalize - atlas: - # type=file|default=: The atlas file in gca format transform: # type=file|default=: The transform file in lta format imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -151,7 +143,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_ca_normalize T1.mgz atlas.nii.gz trans.mat T1_norm.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -159,12 +151,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"T1.mgz"' # type=file|default=: The input file for CANormalize - atlas: '"atlas.nii.gz" # in practice use .gca atlases' - # type=file|default=: The atlas file in gca format transform: '"trans.mat" # in practice use .lta transforms' # type=file|default=: The transform file in lta format imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/ca_register.yaml b/example-specs/interface/nipype/freesurfer/ca_register.yaml index 69d511ba..5a16f0a2 100644 --- a/example-specs/interface/nipype/freesurfer/ca_register.yaml +++ b/example-specs/interface/nipype/freesurfer/ca_register.yaml @@ -7,21 +7,21 @@ # ---- # Generates a multi-dimensional talairach transform from a gca file and talairach.lta file # -# See Also -# -------- -# For complete details, see the `FS Documentation -# `__ +# See Also +# -------- +# For complete details, see the `FS Documentation +# `__ +# +# Examples +# -------- +# >>> from nipype.interfaces import freesurfer +# >>> ca_register = freesurfer.CARegister() +# >>> ca_register.inputs.in_file = "norm.mgz" +# >>> ca_register.inputs.out_file = "talairach.m3z" +# >>> ca_register.cmdline +# 'mri_ca_register norm.mgz talairach.m3z' # -# Examples -# -------- -# >>> from nipype.interfaces import freesurfer -# >>> ca_register = freesurfer.CARegister() -# >>> ca_register.inputs.in_file = "norm.mgz" -# >>> ca_register.inputs.out_file = "talairach.m3z" -# >>> ca_register.cmdline -# 'mri_ca_register norm.mgz talairach.m3z' # -# task_name: CARegister nipype_name: CARegister nipype_module: nipype.interfaces.freesurfer.preprocess @@ -42,9 +42,6 @@ inputs: # type=inputmultiobject|default=[]: undocumented flag used in longitudinal processing mask: generic/file # type=file|default=: Specifies volume to use as mask - out_file: Path - # type=file: The output file for CARegister - # type=file|default=: The output volume for CARegister subjects_dir: generic/directory # type=directory|default=: subjects directory template: generic/file @@ -67,15 +64,15 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-freesurfer/m3z + out_file: generic/file # type=file: The output file for CARegister # type=file|default=: The output volume for CARegister callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - out_file: '"talairach.m3z"' + # dict[str, str] - `path_template` values to be provided to output fields + out_file: out_file # type=file: The output file for CARegister # type=file|default=: The output volume for CARegister requirements: @@ -116,7 +113,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -135,11 +132,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: The input volume for CARegister - out_file: '"talairach.m3z"' - # type=file: The output file for CARegister - # type=file|default=: The output volume for CARegister imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -154,7 +148,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_ca_register norm.mgz talairach.m3z +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -162,11 +156,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"norm.mgz"' # type=file|default=: The input volume for CARegister - out_file: '"talairach.m3z"' - # type=file: The output file for CARegister - # type=file|default=: The output volume for CARegister imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/check_talairach_alignment.yaml b/example-specs/interface/nipype/freesurfer/check_talairach_alignment.yaml index cabe72a5..d0d07345 100644 --- a/example-specs/interface/nipype/freesurfer/check_talairach_alignment.yaml +++ b/example-specs/interface/nipype/freesurfer/check_talairach_alignment.yaml @@ -6,21 +6,21 @@ # Docs # ---- # -# This program detects Talairach alignment failures +# This program detects Talairach alignment failures # -# Examples -# ======== +# Examples +# ======== # -# >>> from nipype.interfaces.freesurfer import CheckTalairachAlignment -# >>> checker = CheckTalairachAlignment() +# >>> from nipype.interfaces.freesurfer import CheckTalairachAlignment +# >>> checker = CheckTalairachAlignment() # -# >>> checker.inputs.in_file = 'trans.mat' -# >>> checker.inputs.threshold = 0.005 -# >>> checker.cmdline -# 'talairach_afd -T 0.005 -xfm trans.mat' +# >>> checker.inputs.in_file = 'trans.mat' +# >>> checker.inputs.threshold = 0.005 +# >>> checker.cmdline +# 'talairach_afd -T 0.005 -xfm trans.mat' +# +# >>> checker.run() # doctest: +SKIP # -# >>> checker.run() # doctest: +SKIP -# task_name: CheckTalairachAlignment nipype_name: CheckTalairachAlignment nipype_module: nipype.interfaces.freesurfer.utils @@ -61,7 +61,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -81,7 +81,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -100,10 +100,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: specify the talairach.xfm file to check - threshold: '0.005' - # type=float|default=0.01: Talairach transforms for subjects with p-values <= T are considered as very unlikely default=0.010 imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -126,10 +124,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"trans.mat"' # type=file|default=: specify the talairach.xfm file to check - threshold: '0.005' - # type=float|default=0.01: Talairach transforms for subjects with p-values <= T are considered as very unlikely default=0.010 imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/concatenate.yaml b/example-specs/interface/nipype/freesurfer/concatenate.yaml index c3a56076..442d7a3d 100644 --- a/example-specs/interface/nipype/freesurfer/concatenate.yaml +++ b/example-specs/interface/nipype/freesurfer/concatenate.yaml @@ -6,20 +6,20 @@ # Docs # ---- # Use Freesurfer mri_concat to combine several input volumes -# into one output volume. Can concatenate by frames, or compute -# a variety of statistics on the input volumes. +# into one output volume. Can concatenate by frames, or compute +# a variety of statistics on the input volumes. # -# Examples -# -------- -# Combine two input volumes into one volume with two frames +# Examples +# -------- +# Combine two input volumes into one volume with two frames +# +# >>> concat = Concatenate() +# >>> concat.inputs.in_files = ['cont1.nii', 'cont2.nii'] +# >>> concat.inputs.concatenated_file = 'bar.nii' +# >>> concat.cmdline +# 'mri_concat --o bar.nii --i cont1.nii --i cont2.nii' # -# >>> concat = Concatenate() -# >>> concat.inputs.in_files = ['cont1.nii', 'cont2.nii'] -# >>> concat.inputs.concatenated_file = 'bar.nii' -# >>> concat.cmdline -# 'mri_concat --o bar.nii --i cont1.nii --i cont2.nii' # -# task_name: Concatenate nipype_name: Concatenate nipype_module: nipype.interfaces.freesurfer.model @@ -34,9 +34,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - concatenated_file: Path - # type=file: Path/name of the output volume - # type=file|default=: Output volume in_files: medimage/nifti1+list-of # type=inputmultiobject|default=[]: Individual volumes to be concatenated mask_file: generic/file @@ -61,15 +58,15 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - concatenated_file: medimage/nifti1 + concatenated_file: generic/file # type=file: Path/name of the output volume # type=file|default=: Output volume callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - concatenated_file: '"bar.nii"' + # dict[str, str] - `path_template` values to be provided to output fields + concatenated_file: concatenated_file # type=file: Path/name of the output volume # type=file|default=: Output volume requirements: @@ -120,7 +117,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -139,11 +136,8 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=inputmultiobject|default=[]: Individual volumes to be concatenated - concatenated_file: '"bar.nii"' - # type=file: Path/name of the output volume - # type=file|default=: Output volume imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -166,11 +160,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["cont1.nii", "cont2.nii"]' # type=inputmultiobject|default=[]: Individual volumes to be concatenated - concatenated_file: '"bar.nii"' - # type=file: Path/name of the output volume - # type=file|default=: Output volume imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/concatenate_lta.yaml b/example-specs/interface/nipype/freesurfer/concatenate_lta.yaml index c1537a30..dd1cb089 100644 --- a/example-specs/interface/nipype/freesurfer/concatenate_lta.yaml +++ b/example-specs/interface/nipype/freesurfer/concatenate_lta.yaml @@ -6,33 +6,33 @@ # Docs # ---- # Concatenates two consecutive LTA transformations into one overall -# transformation +# transformation # -# Out = LTA2*LTA1 +# Out = LTA2*LTA1 # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import ConcatenateLTA -# >>> conc_lta = ConcatenateLTA() -# >>> conc_lta.inputs.in_lta1 = 'lta1.lta' -# >>> conc_lta.inputs.in_lta2 = 'lta2.lta' -# >>> conc_lta.cmdline -# 'mri_concatenate_lta lta1.lta lta2.lta lta1_concat.lta' +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import ConcatenateLTA +# >>> conc_lta = ConcatenateLTA() +# >>> conc_lta.inputs.in_lta1 = 'lta1.lta' +# >>> conc_lta.inputs.in_lta2 = 'lta2.lta' +# >>> conc_lta.cmdline +# 'mri_concatenate_lta lta1.lta lta2.lta lta1_concat.lta' # -# You can use 'identity.nofile' as the filename for in_lta2, e.g.: +# You can use 'identity.nofile' as the filename for in_lta2, e.g.: # -# >>> conc_lta.inputs.in_lta2 = 'identity.nofile' -# >>> conc_lta.inputs.invert_1 = True -# >>> conc_lta.inputs.out_file = 'inv1.lta' -# >>> conc_lta.cmdline -# 'mri_concatenate_lta -invert1 lta1.lta identity.nofile inv1.lta' +# >>> conc_lta.inputs.in_lta2 = 'identity.nofile' +# >>> conc_lta.inputs.invert_1 = True +# >>> conc_lta.inputs.out_file = 'inv1.lta' +# >>> conc_lta.cmdline +# 'mri_concatenate_lta -invert1 lta1.lta identity.nofile inv1.lta' # -# To create a RAS2RAS transform: +# To create a RAS2RAS transform: +# +# >>> conc_lta.inputs.out_type = 'RAS2RAS' +# >>> conc_lta.cmdline +# 'mri_concatenate_lta -invert1 -out_type 1 lta1.lta identity.nofile inv1.lta' # -# >>> conc_lta.inputs.out_type = 'RAS2RAS' -# >>> conc_lta.cmdline -# 'mri_concatenate_lta -invert1 -out_type 1 lta1.lta identity.nofile inv1.lta' -# task_name: ConcatenateLTA nipype_name: ConcatenateLTA nipype_module: nipype.interfaces.freesurfer.preprocess @@ -47,11 +47,8 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_lta1: medimage-freesurfer/lta + in_lta1: fileformats.medimage_freesurfer.Lta # type=file|default=: maps some src1 to dst1 - out_file: Path - # type=file: the combined LTA maps: src1 to dst2 = LTA2*LTA1 - # type=file|default=: the combined LTA maps: src1 to dst2 = LTA2*LTA1 subjects_dir: generic/directory # type=directory|default=: subjects directory tal_source_file: generic/file @@ -74,14 +71,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-freesurfer/lta + out_file: fileformats.medimage_freesurfer.Lta # type=file: the combined LTA maps: src1 to dst2 = LTA2*LTA1 # type=file|default=: the combined LTA maps: src1 to dst2 = LTA2*LTA1 callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -116,7 +113,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -135,10 +132,8 @@ tests: # (if not specified, will try to choose a sensible value) in_lta1: # type=file|default=: maps some src1 to dst1 - in_lta2: '"lta2.lta"' - # type=traitcompound|default=None: maps dst1(src2) to dst2 imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -157,13 +152,11 @@ tests: # (if not specified, will try to choose a sensible value) in_lta2: '"identity.nofile"' # type=traitcompound|default=None: maps dst1(src2) to dst2 - invert_1: 'True' - # type=bool|default=False: invert in_lta1 before applying it out_file: '"inv1.lta"' # type=file: the combined LTA maps: src1 to dst2 = LTA2*LTA1 # type=file|default=: the combined LTA maps: src1 to dst2 = LTA2*LTA1 imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -183,7 +176,7 @@ tests: out_type: '"RAS2RAS"' # type=enum|default='VOX2VOX'|allowed['RAS2RAS','VOX2VOX']: set final LTA type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -198,7 +191,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_concatenate_lta lta1.lta lta2.lta lta1_concat.lta +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -206,10 +199,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_lta1: '"lta1.lta"' # type=file|default=: maps some src1 to dst1 - in_lta2: '"lta2.lta"' - # type=traitcompound|default=None: maps dst1(src2) to dst2 imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -221,13 +212,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_lta2: '"identity.nofile"' # type=traitcompound|default=None: maps dst1(src2) to dst2 - invert_1: 'True' - # type=bool|default=False: invert in_lta1 before applying it out_file: '"inv1.lta"' # type=file: the combined LTA maps: src1 to dst2 = LTA2*LTA1 # type=file|default=: the combined LTA maps: src1 to dst2 = LTA2*LTA1 imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -240,7 +229,7 @@ doctests: out_type: '"RAS2RAS"' # type=enum|default='VOX2VOX'|allowed['RAS2RAS','VOX2VOX']: set final LTA type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/contrast.yaml b/example-specs/interface/nipype/freesurfer/contrast.yaml index 63402d44..3c13671b 100644 --- a/example-specs/interface/nipype/freesurfer/contrast.yaml +++ b/example-specs/interface/nipype/freesurfer/contrast.yaml @@ -6,23 +6,23 @@ # Docs # ---- # -# Compute surface-wise gray/white contrast +# Compute surface-wise gray/white contrast +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import Contrast +# >>> contrast = Contrast() +# >>> contrast.inputs.subject_id = '10335' +# >>> contrast.inputs.hemisphere = 'lh' +# >>> contrast.inputs.white = 'lh.white' # doctest: +SKIP +# >>> contrast.inputs.thickness = 'lh.thickness' # doctest: +SKIP +# >>> contrast.inputs.annotation = '../label/lh.aparc.annot' # doctest: +SKIP +# >>> contrast.inputs.cortex = '../label/lh.cortex.label' # doctest: +SKIP +# >>> contrast.inputs.rawavg = '../mri/rawavg.mgz' # doctest: +SKIP +# >>> contrast.inputs.orig = '../mri/orig.mgz' # doctest: +SKIP +# >>> contrast.cmdline # doctest: +SKIP +# 'pctsurfcon --lh-only --s 10335' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import Contrast -# >>> contrast = Contrast() -# >>> contrast.inputs.subject_id = '10335' -# >>> contrast.inputs.hemisphere = 'lh' -# >>> contrast.inputs.white = 'lh.white' # doctest: +SKIP -# >>> contrast.inputs.thickness = 'lh.thickness' # doctest: +SKIP -# >>> contrast.inputs.annotation = '../label/lh.aparc.annot' # doctest: +SKIP -# >>> contrast.inputs.cortex = '../label/lh.cortex.label' # doctest: +SKIP -# >>> contrast.inputs.rawavg = '../mri/rawavg.mgz' # doctest: +SKIP -# >>> contrast.inputs.orig = '../mri/orig.mgz' # doctest: +SKIP -# >>> contrast.cmdline # doctest: +SKIP -# 'pctsurfcon --lh-only --s 10335' -# task_name: Contrast nipype_name: Contrast nipype_module: nipype.interfaces.freesurfer.utils @@ -37,19 +37,19 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - annotation: medimage-freesurfer/annot + annotation: fileformats.medimage_freesurfer.Annot # type=file|default=: Input annotation file must be /label/.aparc.annot - cortex: medimage-freesurfer/label + cortex: generic/file # type=file|default=: Input cortex label must be /label/.cortex.label - orig: medimage/mgh-gz + orig: generic/file # type=file|default=: Implicit input file mri/orig.mgz rawavg: medimage/mgh-gz # type=file|default=: Implicit input file mri/rawavg.mgz subjects_dir: generic/directory # type=directory|default=: subjects directory - thickness: medimage-freesurfer/thickness + thickness: generic/file # type=file|default=: Input file must be /surf/?h.thickness - white: medimage-freesurfer/white + white: fileformats.medimage_freesurfer.White # type=file|default=: Input file must be /surf/.white callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -77,7 +77,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -101,7 +101,7 @@ tests: rawavg: # type=file|default=: Implicit input file mri/rawavg.mgz copy_inputs: - # type=bool|default=False: If running as a node, set this to True.This will copy the input files to the node directory. + # type=bool|default=False: If running as a node, set this to True. This will copy the input files to the node directory. subjects_dir: # type=directory|default=: subjects directory args: @@ -109,7 +109,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -128,22 +128,14 @@ tests: # (if not specified, will try to choose a sensible value) subject_id: '"10335"' # type=string|default='subject_id': Subject being processed - hemisphere: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: Hemisphere being processed white: # type=file|default=: Input file must be /surf/.white - thickness: - # type=file|default=: Input file must be /surf/?h.thickness annotation: # type=file|default=: Input annotation file must be /label/.aparc.annot - cortex: - # type=file|default=: Input cortex label must be /label/.cortex.label rawavg: # type=file|default=: Implicit input file mri/rawavg.mgz - orig: - # type=file|default=: Implicit input file mri/orig.mgz imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -158,7 +150,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: pctsurfcon --lh-only --s 10335 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -166,22 +158,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. subject_id: '"10335"' # type=string|default='subject_id': Subject being processed - hemisphere: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: Hemisphere being processed white: '"lh.white" # doctest: +SKIP' # type=file|default=: Input file must be /surf/.white - thickness: '"lh.thickness" # doctest: +SKIP' - # type=file|default=: Input file must be /surf/?h.thickness annotation: '"../label/lh.aparc.annot" # doctest: +SKIP' # type=file|default=: Input annotation file must be /label/.aparc.annot - cortex: '"../label/lh.cortex.label" # doctest: +SKIP' - # type=file|default=: Input cortex label must be /label/.cortex.label rawavg: '"../mri/rawavg.mgz" # doctest: +SKIP' # type=file|default=: Implicit input file mri/rawavg.mgz - orig: '"../mri/orig.mgz" # doctest: +SKIP' - # type=file|default=: Implicit input file mri/orig.mgz imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/curvature.yaml b/example-specs/interface/nipype/freesurfer/curvature.yaml index badc6f49..748c88b3 100644 --- a/example-specs/interface/nipype/freesurfer/curvature.yaml +++ b/example-specs/interface/nipype/freesurfer/curvature.yaml @@ -6,19 +6,19 @@ # Docs # ---- # -# This program will compute the second fundamental form of a cortical -# surface. It will create two new files ..H and -# ..K with the mean and Gaussian curvature respectively. +# This program will compute the second fundamental form of a cortical +# surface. It will create two new files ..H and +# ..K with the mean and Gaussian curvature respectively. +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import Curvature +# >>> curv = Curvature() +# >>> curv.inputs.in_file = 'lh.pial' +# >>> curv.inputs.save = True +# >>> curv.cmdline +# 'mris_curvature -w lh.pial' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import Curvature -# >>> curv = Curvature() -# >>> curv.inputs.in_file = 'lh.pial' -# >>> curv.inputs.save = True -# >>> curv.cmdline -# 'mris_curvature -w lh.pial' -# task_name: Curvature nipype_name: Curvature nipype_module: nipype.interfaces.freesurfer.utils @@ -33,7 +33,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: medimage-freesurfer/pial + in_file: fileformats.medimage_freesurfer.Pial # type=file|default=: Input file for Curvature subjects_dir: generic/directory # type=directory|default=: subjects directory @@ -61,7 +61,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -89,7 +89,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -108,10 +108,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Input file for Curvature - save: 'True' - # type=bool|default=False: Save curvature files (will only generate screen output without this option) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -126,7 +124,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_curvature -w lh.pial +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -134,10 +132,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"lh.pial"' # type=file|default=: Input file for Curvature - save: 'True' - # type=bool|default=False: Save curvature files (will only generate screen output without this option) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/curvature_stats.yaml b/example-specs/interface/nipype/freesurfer/curvature_stats.yaml index f7dc4828..2e93873e 100644 --- a/example-specs/interface/nipype/freesurfer/curvature_stats.yaml +++ b/example-specs/interface/nipype/freesurfer/curvature_stats.yaml @@ -6,42 +6,42 @@ # Docs # ---- # -# In its simplest usage, 'mris_curvature_stats' will compute a set -# of statistics on its input . These statistics are the -# mean and standard deviation of the particular curvature on the -# surface, as well as the results from several surface-based -# integrals. +# In its simplest usage, 'mris_curvature_stats' will compute a set +# of statistics on its input . These statistics are the +# mean and standard deviation of the particular curvature on the +# surface, as well as the results from several surface-based +# integrals. # -# Additionally, 'mris_curvature_stats' can report the max/min -# curvature values, and compute a simple histogram based on -# all curvature values. +# Additionally, 'mris_curvature_stats' can report the max/min +# curvature values, and compute a simple histogram based on +# all curvature values. # -# Curvatures can also be normalised and constrained to a given -# range before computation. +# Curvatures can also be normalised and constrained to a given +# range before computation. # -# Principal curvature (K, H, k1 and k2) calculations on a surface -# structure can also be performed, as well as several functions -# derived from k1 and k2. +# Principal curvature (K, H, k1 and k2) calculations on a surface +# structure can also be performed, as well as several functions +# derived from k1 and k2. # -# Finally, all output to the console, as well as any new -# curvatures that result from the above calculations can be -# saved to a series of text and binary-curvature files. +# Finally, all output to the console, as well as any new +# curvatures that result from the above calculations can be +# saved to a series of text and binary-curvature files. +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import CurvatureStats +# >>> curvstats = CurvatureStats() +# >>> curvstats.inputs.hemisphere = 'lh' +# >>> curvstats.inputs.curvfile1 = 'lh.pial' +# >>> curvstats.inputs.curvfile2 = 'lh.pial' +# >>> curvstats.inputs.surface = 'lh.pial' +# >>> curvstats.inputs.out_file = 'lh.curv.stats' +# >>> curvstats.inputs.values = True +# >>> curvstats.inputs.min_max = True +# >>> curvstats.inputs.write = True +# >>> curvstats.cmdline +# 'mris_curvature_stats -m -o lh.curv.stats -F pial -G --writeCurvatureFiles subject_id lh pial pial' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import CurvatureStats -# >>> curvstats = CurvatureStats() -# >>> curvstats.inputs.hemisphere = 'lh' -# >>> curvstats.inputs.curvfile1 = 'lh.pial' -# >>> curvstats.inputs.curvfile2 = 'lh.pial' -# >>> curvstats.inputs.surface = 'lh.pial' -# >>> curvstats.inputs.out_file = 'lh.curv.stats' -# >>> curvstats.inputs.values = True -# >>> curvstats.inputs.min_max = True -# >>> curvstats.inputs.write = True -# >>> curvstats.cmdline -# 'mris_curvature_stats -m -o lh.curv.stats -F pial -G --writeCurvatureFiles subject_id lh pial pial' -# task_name: CurvatureStats nipype_name: CurvatureStats nipype_module: nipype.interfaces.freesurfer.utils @@ -56,16 +56,13 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - curvfile1: medimage-freesurfer/pial + curvfile1: generic/file # type=file|default=: Input file for CurvatureStats - curvfile2: medimage-freesurfer/pial + curvfile2: fileformats.medimage_freesurfer.Pial # type=file|default=: Input file for CurvatureStats - out_file: Path - # type=file: Output curvature stats file - # type=file|default=: Output curvature stats file subjects_dir: generic/directory # type=directory|default=: subjects directory - surface: medimage-freesurfer/pial + surface: generic/file # type=file|default=: Specify surface file for CurvatureStats callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -83,14 +80,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-freesurfer/stats + out_file: fileformats.medimage_freesurfer.Stats # type=file: Output curvature stats file # type=file|default=: Output curvature stats file callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -117,7 +114,7 @@ tests: write: # type=bool|default=False: Write curvature files copy_inputs: - # type=bool|default=False: If running as a node, set this to True.This will copy the input files to the node directory. + # type=bool|default=False: If running as a node, set this to True. This will copy the input files to the node directory. subjects_dir: # type=directory|default=: subjects directory args: @@ -125,7 +122,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -144,23 +141,15 @@ tests: # (if not specified, will try to choose a sensible value) hemisphere: '"lh"' # type=enum|default='lh'|allowed['lh','rh']: Hemisphere being processed - curvfile1: - # type=file|default=: Input file for CurvatureStats curvfile2: # type=file|default=: Input file for CurvatureStats - surface: - # type=file|default=: Specify surface file for CurvatureStats out_file: '"lh.curv.stats"' # type=file: Output curvature stats file # type=file|default=: Output curvature stats file - values: 'True' - # type=bool|default=False: Triggers a series of derived curvature values min_max: 'True' # type=bool|default=False: Output min / max information for the processed curvature. - write: 'True' - # type=bool|default=False: Write curvature files imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -175,7 +164,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_curvature_stats -m -o lh.curv.stats -F pial -G --writeCurvatureFiles subject_id lh pial pial +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -183,23 +172,15 @@ doctests: # '.mock()' method of the corresponding class is used instead. hemisphere: '"lh"' # type=enum|default='lh'|allowed['lh','rh']: Hemisphere being processed - curvfile1: '"lh.pial"' - # type=file|default=: Input file for CurvatureStats curvfile2: '"lh.pial"' # type=file|default=: Input file for CurvatureStats - surface: '"lh.pial"' - # type=file|default=: Specify surface file for CurvatureStats out_file: '"lh.curv.stats"' # type=file: Output curvature stats file # type=file|default=: Output curvature stats file - values: 'True' - # type=bool|default=False: Triggers a series of derived curvature values min_max: 'True' # type=bool|default=False: Output min / max information for the processed curvature. - write: 'True' - # type=bool|default=False: Write curvature files imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/dicom_convert.yaml b/example-specs/interface/nipype/freesurfer/dicom_convert.yaml index 8dca10ce..df7d6788 100644 --- a/example-specs/interface/nipype/freesurfer/dicom_convert.yaml +++ b/example-specs/interface/nipype/freesurfer/dicom_convert.yaml @@ -7,15 +7,15 @@ # ---- # use fs mri_convert to convert dicom files # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.freesurfer import DICOMConvert +# >>> cvt = DICOMConvert() +# >>> cvt.inputs.dicom_dir = 'dicomdir' +# >>> cvt.inputs.file_mapping = [('nifti', '*.nii'), ('info', 'dicom*.txt'), ('dti', '*dti.bv*')] # -# >>> from nipype.interfaces.freesurfer import DICOMConvert -# >>> cvt = DICOMConvert() -# >>> cvt.inputs.dicom_dir = 'dicomdir' -# >>> cvt.inputs.file_mapping = [('nifti', '*.nii'), ('info', 'dicom*.txt'), ('dti', '*dti.bv*')] # -# task_name: DICOMConvert nipype_name: DICOMConvert nipype_module: nipype.interfaces.freesurfer.preprocess @@ -58,7 +58,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -90,7 +90,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/freesurfer/edit_w_mwith_aseg.yaml b/example-specs/interface/nipype/freesurfer/edit_w_mwith_aseg.yaml index e285c802..c7c08bed 100644 --- a/example-specs/interface/nipype/freesurfer/edit_w_mwith_aseg.yaml +++ b/example-specs/interface/nipype/freesurfer/edit_w_mwith_aseg.yaml @@ -6,20 +6,20 @@ # Docs # ---- # -# Edits a wm file using a segmentation +# Edits a wm file using a segmentation +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import EditWMwithAseg +# >>> editwm = EditWMwithAseg() +# >>> editwm.inputs.in_file = "T1.mgz" +# >>> editwm.inputs.brain_file = "norm.mgz" +# >>> editwm.inputs.seg_file = "aseg.mgz" +# >>> editwm.inputs.out_file = "wm.asegedit.mgz" +# >>> editwm.inputs.keep_in = True +# >>> editwm.cmdline +# 'mri_edit_wm_with_aseg -keep-in T1.mgz norm.mgz aseg.mgz wm.asegedit.mgz' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import EditWMwithAseg -# >>> editwm = EditWMwithAseg() -# >>> editwm.inputs.in_file = "T1.mgz" -# >>> editwm.inputs.brain_file = "norm.mgz" -# >>> editwm.inputs.seg_file = "aseg.mgz" -# >>> editwm.inputs.out_file = "wm.asegedit.mgz" -# >>> editwm.inputs.keep_in = True -# >>> editwm.cmdline -# 'mri_edit_wm_with_aseg -keep-in T1.mgz norm.mgz aseg.mgz wm.asegedit.mgz' -# task_name: EditWMwithAseg nipype_name: EditWMwithAseg nipype_module: nipype.interfaces.freesurfer.preprocess @@ -34,13 +34,10 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - brain_file: medimage/mgh-gz + brain_file: generic/file # type=file|default=: Input brain/T1 file in_file: medimage/mgh-gz # type=file|default=: Input white matter segmentation file - out_file: Path - # type=file: Output edited WM file - # type=file|default=: File to be written as output seg_file: medimage/mgh-gz # type=file|default=: Input presurf segmentation file subjects_dir: generic/directory @@ -61,14 +58,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/mgh-gz + out_file: generic/file # type=file: Output edited WM file # type=file|default=: File to be written as output callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -93,7 +90,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -112,17 +109,12 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Input white matter segmentation file - brain_file: - # type=file|default=: Input brain/T1 file seg_file: # type=file|default=: Input presurf segmentation file - out_file: '"wm.asegedit.mgz"' - # type=file: Output edited WM file - # type=file|default=: File to be written as output keep_in: 'True' # type=bool|default=False: Keep edits as found in input volume imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -137,7 +129,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_edit_wm_with_aseg -keep-in T1.mgz norm.mgz aseg.mgz wm.asegedit.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -145,17 +137,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"T1.mgz"' # type=file|default=: Input white matter segmentation file - brain_file: '"norm.mgz"' - # type=file|default=: Input brain/T1 file seg_file: '"aseg.mgz"' # type=file|default=: Input presurf segmentation file - out_file: '"wm.asegedit.mgz"' - # type=file: Output edited WM file - # type=file|default=: File to be written as output keep_in: 'True' # type=bool|default=False: Keep edits as found in input volume imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/em_register.yaml b/example-specs/interface/nipype/freesurfer/em_register.yaml index 0c9faa40..8c3ef93b 100644 --- a/example-specs/interface/nipype/freesurfer/em_register.yaml +++ b/example-specs/interface/nipype/freesurfer/em_register.yaml @@ -7,18 +7,18 @@ # ---- # This program creates a transform in lta format # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import EMRegister -# >>> register = EMRegister() -# >>> register.inputs.in_file = 'norm.mgz' -# >>> register.inputs.template = 'aseg.mgz' -# >>> register.inputs.out_file = 'norm_transform.lta' -# >>> register.inputs.skull = True -# >>> register.inputs.nbrspacing = 9 -# >>> register.cmdline -# 'mri_em_register -uns 9 -skull norm.mgz aseg.mgz norm_transform.lta' -# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import EMRegister +# >>> register = EMRegister() +# >>> register.inputs.in_file = 'norm.mgz' +# >>> register.inputs.template = 'aseg.mgz' +# >>> register.inputs.out_file = 'norm_transform.lta' +# >>> register.inputs.skull = True +# >>> register.inputs.nbrspacing = 9 +# >>> register.cmdline +# 'mri_em_register -uns 9 -skull norm.mgz aseg.mgz norm_transform.lta' +# task_name: EMRegister nipype_name: EMRegister nipype_module: nipype.interfaces.freesurfer.registration @@ -37,12 +37,9 @@ inputs: # type=file|default=: in brain volume mask: generic/file # type=file|default=: use volume as a mask - out_file: Path - # type=file: output transform - # type=file|default=: output transform subjects_dir: generic/directory # type=directory|default=: subjects directory - template: medimage/mgh-gz + template: generic/file # type=file|default=: template gca transform: generic/file # type=file|default=: Previously computed transform @@ -62,14 +59,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-freesurfer/lta + out_file: fileformats.medimage_freesurfer.Lta # type=file: output transform # type=file|default=: output transform callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -100,7 +97,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -119,17 +116,13 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: in brain volume - template: - # type=file|default=: template gca out_file: '"norm_transform.lta"' # type=file: output transform # type=file|default=: output transform - skull: 'True' - # type=bool|default=False: align to atlas containing skull (uns=5) nbrspacing: '9' # type=int|default=0: align to atlas containing skull setting unknown_nbr_spacing = nbrspacing imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -144,7 +137,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_em_register -uns 9 -skull norm.mgz aseg.mgz norm_transform.lta +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -152,17 +145,13 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"norm.mgz"' # type=file|default=: in brain volume - template: '"aseg.mgz"' - # type=file|default=: template gca out_file: '"norm_transform.lta"' # type=file: output transform # type=file|default=: output transform - skull: 'True' - # type=bool|default=False: align to atlas containing skull (uns=5) nbrspacing: '9' # type=int|default=0: align to atlas containing skull setting unknown_nbr_spacing = nbrspacing imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/euler_number.yaml b/example-specs/interface/nipype/freesurfer/euler_number.yaml index c0d1ee6f..963344b7 100644 --- a/example-specs/interface/nipype/freesurfer/euler_number.yaml +++ b/example-specs/interface/nipype/freesurfer/euler_number.yaml @@ -6,16 +6,16 @@ # Docs # ---- # -# This program computes EulerNumber for a cortical surface +# This program computes EulerNumber for a cortical surface +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import EulerNumber +# >>> ft = EulerNumber() +# >>> ft.inputs.in_file = 'lh.pial' +# >>> ft.cmdline +# 'mris_euler_number lh.pial' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import EulerNumber -# >>> ft = EulerNumber() -# >>> ft.inputs.in_file = 'lh.pial' -# >>> ft.cmdline -# 'mris_euler_number lh.pial' -# task_name: EulerNumber nipype_name: EulerNumber nipype_module: nipype.interfaces.freesurfer.utils @@ -30,7 +30,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: medimage-freesurfer/pial + in_file: fileformats.medimage_freesurfer.Pial # type=file|default=: Input file for EulerNumber subjects_dir: generic/directory # type=directory|default=: subjects directory @@ -58,7 +58,7 @@ outputs: euler: euler_callable # type=int: Euler number of cortical surface. A value of 2 signals a topologically correct surface model with no holes templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -74,7 +74,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -94,7 +94,7 @@ tests: in_file: # type=file|default=: Input file for EulerNumber imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -109,7 +109,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_euler_number lh.pial +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -118,7 +118,7 @@ doctests: in_file: '"lh.pial"' # type=file|default=: Input file for EulerNumber imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/extract_main_component.yaml b/example-specs/interface/nipype/freesurfer/extract_main_component.yaml index e4f9a7f7..4fc35138 100644 --- a/example-specs/interface/nipype/freesurfer/extract_main_component.yaml +++ b/example-specs/interface/nipype/freesurfer/extract_main_component.yaml @@ -7,15 +7,15 @@ # ---- # Extract the main component of a tessellated surface # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.freesurfer import ExtractMainComponent +# >>> mcmp = ExtractMainComponent(in_file='lh.pial') +# >>> mcmp.cmdline +# 'mris_extract_main_component lh.pial lh.maincmp' # -# >>> from nipype.interfaces.freesurfer import ExtractMainComponent -# >>> mcmp = ExtractMainComponent(in_file='lh.pial') -# >>> mcmp.cmdline -# 'mris_extract_main_component lh.pial lh.maincmp' # -# task_name: ExtractMainComponent nipype_name: ExtractMainComponent nipype_module: nipype.interfaces.freesurfer.utils @@ -30,11 +30,8 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: medimage-freesurfer/pial + in_file: fileformats.medimage_freesurfer.Pial # type=file|default=: input surface file - out_file: Path - # type=file: surface containing main component - # type=file|default=: surface containing main component callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -58,7 +55,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -75,7 +72,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -95,7 +92,7 @@ tests: in_file: # type=file|default=: input surface file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -119,7 +116,7 @@ doctests: in_file: '"lh.pial"' # type=file|default=: input surface file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/fit_ms_params.yaml b/example-specs/interface/nipype/freesurfer/fit_ms_params.yaml index 14a4922e..180fc326 100644 --- a/example-specs/interface/nipype/freesurfer/fit_ms_params.yaml +++ b/example-specs/interface/nipype/freesurfer/fit_ms_params.yaml @@ -7,16 +7,16 @@ # ---- # Estimate tissue parameters from a set of FLASH images. # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import FitMSParams -# >>> msfit = FitMSParams() -# >>> msfit.inputs.in_files = ['flash_05.mgz', 'flash_30.mgz'] -# >>> msfit.inputs.out_dir = 'flash_parameters' -# >>> msfit.cmdline -# 'mri_ms_fitparms flash_05.mgz flash_30.mgz flash_parameters' +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import FitMSParams +# >>> msfit = FitMSParams() +# >>> msfit.inputs.in_files = ['flash_05.mgz', 'flash_30.mgz'] +# >>> msfit.inputs.out_dir = 'flash_parameters' +# >>> msfit.cmdline +# 'mri_ms_fitparms flash_05.mgz flash_30.mgz flash_parameters' +# # -# task_name: FitMSParams nipype_name: FitMSParams nipype_module: nipype.interfaces.freesurfer.preprocess @@ -33,8 +33,6 @@ inputs: # passed to the field in the automatically generated unittests. in_files: medimage/mgh-gz+list-of # type=list|default=[]: list of FLASH images (must be in mgh format) - out_dir: Path - # type=directory|default=: directory to store output in subjects_dir: generic/directory # type=directory|default=: subjects directory xfm_list: generic/file+list-of @@ -67,7 +65,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -93,7 +91,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -112,10 +110,8 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=list|default=[]: list of FLASH images (must be in mgh format) - out_dir: '"flash_parameters"' - # type=directory|default=: directory to store output in imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -130,7 +126,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_ms_fitparms flash_05.mgz flash_30.mgz flash_parameters +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -138,10 +134,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["flash_05.mgz", "flash_30.mgz"]' # type=list|default=[]: list of FLASH images (must be in mgh format) - out_dir: '"flash_parameters"' - # type=directory|default=: directory to store output in imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/fix_topology.yaml b/example-specs/interface/nipype/freesurfer/fix_topology.yaml index c66b8dc4..3d5addd9 100644 --- a/example-specs/interface/nipype/freesurfer/fix_topology.yaml +++ b/example-specs/interface/nipype/freesurfer/fix_topology.yaml @@ -6,24 +6,24 @@ # Docs # ---- # -# This program computes a mapping from the unit sphere onto the surface -# of the cortex from a previously generated approximation of the -# cortical surface, thus guaranteeing a topologically correct surface. +# This program computes a mapping from the unit sphere onto the surface +# of the cortex from a previously generated approximation of the +# cortical surface, thus guaranteeing a topologically correct surface. +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import FixTopology +# >>> ft = FixTopology() +# >>> ft.inputs.in_orig = 'lh.orig' # doctest: +SKIP +# >>> ft.inputs.in_inflated = 'lh.inflated' # doctest: +SKIP +# >>> ft.inputs.sphere = 'lh.qsphere.nofix' # doctest: +SKIP +# >>> ft.inputs.hemisphere = 'lh' +# >>> ft.inputs.subject_id = '10335' +# >>> ft.inputs.mgz = True +# >>> ft.inputs.ga = True +# >>> ft.cmdline # doctest: +SKIP +# 'mris_fix_topology -ga -mgz -sphere qsphere.nofix 10335 lh' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import FixTopology -# >>> ft = FixTopology() -# >>> ft.inputs.in_orig = 'lh.orig' # doctest: +SKIP -# >>> ft.inputs.in_inflated = 'lh.inflated' # doctest: +SKIP -# >>> ft.inputs.sphere = 'lh.qsphere.nofix' # doctest: +SKIP -# >>> ft.inputs.hemisphere = 'lh' -# >>> ft.inputs.subject_id = '10335' -# >>> ft.inputs.mgz = True -# >>> ft.inputs.ga = True -# >>> ft.cmdline # doctest: +SKIP -# 'mris_fix_topology -ga -mgz -sphere qsphere.nofix 10335 lh' -# task_name: FixTopology nipype_name: FixTopology nipype_module: nipype.interfaces.freesurfer.utils @@ -40,13 +40,13 @@ inputs: # passed to the field in the automatically generated unittests. in_brain: generic/file # type=file|default=: Implicit input brain.mgz - in_inflated: medimage-freesurfer/inflated + in_inflated: generic/file # type=file|default=: Undocumented input file .inflated - in_orig: medimage-freesurfer/orig + in_orig: fileformats.medimage_freesurfer.Orig # type=file|default=: Undocumented input file .orig in_wm: generic/file # type=file|default=: Implicit input wm.mgz - sphere: medimage-freesurfer/nofix + sphere: fileformats.medimage_freesurfer.Nofix # type=file|default=: Sphere input file subjects_dir: generic/directory # type=directory|default=: subjects directory @@ -72,7 +72,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -108,7 +108,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -127,20 +127,14 @@ tests: # (if not specified, will try to choose a sensible value) in_orig: # type=file|default=: Undocumented input file .orig - in_inflated: - # type=file|default=: Undocumented input file .inflated sphere: # type=file|default=: Sphere input file - hemisphere: '"lh"' - # type=string|default='': Hemisphere being processed subject_id: '"10335"' # type=string|default='subject_id': Subject being processed - mgz: 'True' - # type=bool|default=False: No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu ga: 'True' # type=bool|default=False: No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -155,7 +149,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_fix_topology -ga -mgz -sphere qsphere.nofix 10335 lh +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -163,20 +157,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_orig: '"lh.orig" # doctest: +SKIP' # type=file|default=: Undocumented input file .orig - in_inflated: '"lh.inflated" # doctest: +SKIP' - # type=file|default=: Undocumented input file .inflated sphere: '"lh.qsphere.nofix" # doctest: +SKIP' # type=file|default=: Sphere input file - hemisphere: '"lh"' - # type=string|default='': Hemisphere being processed subject_id: '"10335"' # type=string|default='subject_id': Subject being processed - mgz: 'True' - # type=bool|default=False: No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu ga: 'True' # type=bool|default=False: No documentation. Direct questions to analysis-bugs@nmr.mgh.harvard.edu imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/fuse_segmentations.yaml b/example-specs/interface/nipype/freesurfer/fuse_segmentations.yaml index 10a30f9a..a5c25f69 100644 --- a/example-specs/interface/nipype/freesurfer/fuse_segmentations.yaml +++ b/example-specs/interface/nipype/freesurfer/fuse_segmentations.yaml @@ -7,19 +7,19 @@ # ---- # fuse segmentations together from multiple timepoints # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import FuseSegmentations -# >>> fuse = FuseSegmentations() -# >>> fuse.inputs.subject_id = 'tp.long.A.template' -# >>> fuse.inputs.timepoints = ['tp1', 'tp2'] -# >>> fuse.inputs.out_file = 'aseg.fused.mgz' -# >>> fuse.inputs.in_segmentations = ['aseg.mgz', 'aseg.mgz'] -# >>> fuse.inputs.in_segmentations_noCC = ['aseg.mgz', 'aseg.mgz'] -# >>> fuse.inputs.in_norms = ['norm.mgz', 'norm.mgz', 'norm.mgz'] -# >>> fuse.cmdline -# 'mri_fuse_segmentations -n norm.mgz -a aseg.mgz -c aseg.mgz tp.long.A.template tp1 tp2' -# +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import FuseSegmentations +# >>> fuse = FuseSegmentations() +# >>> fuse.inputs.subject_id = 'tp.long.A.template' +# >>> fuse.inputs.timepoints = ['tp1', 'tp2'] +# >>> fuse.inputs.out_file = 'aseg.fused.mgz' +# >>> fuse.inputs.in_segmentations = ['aseg.mgz', 'aseg.mgz'] +# >>> fuse.inputs.in_segmentations_noCC = ['aseg.mgz', 'aseg.mgz'] +# >>> fuse.inputs.in_norms = ['norm.mgz', 'norm.mgz', 'norm.mgz'] +# >>> fuse.cmdline +# 'mri_fuse_segmentations -n norm.mgz -a aseg.mgz -c aseg.mgz tp.long.A.template tp1 tp2' +# task_name: FuseSegmentations nipype_name: FuseSegmentations nipype_module: nipype.interfaces.freesurfer.longitudinal @@ -34,15 +34,12 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_norms: medimage/mgh-gz+list-of + in_norms: generic/file+list-of # type=inputmultiobject|default=[]: -n - name of norm file to use (default: norm.mgs) must include the corresponding norm file for all given timepoints as well as for the current subject - in_segmentations: medimage/mgh-gz+list-of + in_segmentations: generic/file+list-of # type=inputmultiobject|default=[]: name of aseg file to use (default: aseg.mgz) must include the aseg files for all the given timepoints in_segmentations_noCC: medimage/mgh-gz+list-of # type=inputmultiobject|default=[]: name of aseg file w/o CC labels (default: aseg.auto_noCCseg.mgz) must include the corresponding file for all the given timepoints - out_file: Path - # type=file: output fused segmentation file - # type=file|default=: output fused segmentation file subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -68,7 +65,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -95,7 +92,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -114,19 +111,13 @@ tests: # (if not specified, will try to choose a sensible value) subject_id: '"tp.long.A.template"' # type=string|default='': subject_id being processed - timepoints: '["tp1", "tp2"]' - # type=inputmultiobject|default=[]: subject_ids or timepoints to be processed out_file: '"aseg.fused.mgz"' # type=file: output fused segmentation file # type=file|default=: output fused segmentation file - in_segmentations: - # type=inputmultiobject|default=[]: name of aseg file to use (default: aseg.mgz) must include the aseg files for all the given timepoints in_segmentations_noCC: # type=inputmultiobject|default=[]: name of aseg file w/o CC labels (default: aseg.auto_noCCseg.mgz) must include the corresponding file for all the given timepoints - in_norms: - # type=inputmultiobject|default=[]: -n - name of norm file to use (default: norm.mgs) must include the corresponding norm file for all given timepoints as well as for the current subject imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -141,7 +132,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_fuse_segmentations -n norm.mgz -a aseg.mgz -c aseg.mgz tp.long.A.template tp1 tp2 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -149,19 +140,13 @@ doctests: # '.mock()' method of the corresponding class is used instead. subject_id: '"tp.long.A.template"' # type=string|default='': subject_id being processed - timepoints: '["tp1", "tp2"]' - # type=inputmultiobject|default=[]: subject_ids or timepoints to be processed out_file: '"aseg.fused.mgz"' # type=file: output fused segmentation file # type=file|default=: output fused segmentation file - in_segmentations: '["aseg.mgz", "aseg.mgz"]' - # type=inputmultiobject|default=[]: name of aseg file to use (default: aseg.mgz) must include the aseg files for all the given timepoints in_segmentations_noCC: '["aseg.mgz", "aseg.mgz"]' # type=inputmultiobject|default=[]: name of aseg file w/o CC labels (default: aseg.auto_noCCseg.mgz) must include the corresponding file for all the given timepoints - in_norms: '["norm.mgz", "norm.mgz", "norm.mgz"]' - # type=inputmultiobject|default=[]: -n - name of norm file to use (default: norm.mgs) must include the corresponding norm file for all given timepoints as well as for the current subject imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/glm_fit.yaml b/example-specs/interface/nipype/freesurfer/glm_fit.yaml index 3d8ffc12..995ff333 100644 --- a/example-specs/interface/nipype/freesurfer/glm_fit.yaml +++ b/example-specs/interface/nipype/freesurfer/glm_fit.yaml @@ -7,15 +7,15 @@ # ---- # Use FreeSurfer's mri_glmfit to specify and estimate a general linear model. # -# Examples -# -------- -# >>> glmfit = GLMFit() -# >>> glmfit.inputs.in_file = 'functional.nii' -# >>> glmfit.inputs.one_sample = True -# >>> glmfit.cmdline == 'mri_glmfit --glmdir %s --y functional.nii --osgm'%os.getcwd() -# True +# Examples +# -------- +# >>> glmfit = GLMFit() +# >>> glmfit.inputs.in_file = 'functional.nii' +# >>> glmfit.inputs.one_sample = True +# >>> glmfit.cmdline == 'mri_glmfit --glmdir %s --y functional.nii --osgm'%os.getcwd() +# True +# # -# task_name: GLMFit nipype_name: GLMFit nipype_module: nipype.interfaces.freesurfer.model @@ -42,9 +42,6 @@ inputs: # type=file|default=: input 4D file label_file: generic/file # type=file|default=: use label as mask, surfaces only - mask_file: Path - # type=file: map of the mask used in the analysis - # type=file|default=: binary mask per_voxel_reg: generic/file+list-of # type=inputmultiobject|default=[]: per-voxel regressors sim_done_file: generic/file @@ -107,7 +104,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields glm_dir: glm_dir # type=directory: output directory # type=str|default='': save outputs to dir @@ -217,6 +214,10 @@ tests: # type=tuple|default=(, , 0.0): RefTac TimeSec k2prime : perform MRTM2 kinetic modeling logan: # type=tuple|default=(, , 0.0): RefTac TimeSec tstar : perform Logan kinetic modeling + bp_clip_neg: + # type=bool|default=False: set negative BP voxels to zero + bp_clip_max: + # type=float|default=0.0: set BP voxels above max to max force_perm: # type=bool|default=False: force perumtation test, even when design matrix is not orthog diag: @@ -244,7 +245,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -263,10 +264,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input 4D file - one_sample: 'True' - # type=bool|default=False: construct X and C as a one-sample group mean imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -289,10 +288,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input 4D file - one_sample: 'True' - # type=bool|default=False: construct X and C as a one-sample group mean imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/gtm_seg.yaml b/example-specs/interface/nipype/freesurfer/gtm_seg.yaml index 9b533db5..58308967 100644 --- a/example-specs/interface/nipype/freesurfer/gtm_seg.yaml +++ b/example-specs/interface/nipype/freesurfer/gtm_seg.yaml @@ -7,13 +7,13 @@ # ---- # create an anatomical segmentation for the geometric transfer matrix (GTM). # -# Examples -# -------- -# >>> gtmseg = GTMSeg() -# >>> gtmseg.inputs.subject_id = 'subject_id' -# >>> gtmseg.cmdline -# 'gtmseg --o gtmseg.mgz --s subject_id' -# +# Examples +# -------- +# >>> gtmseg = GTMSeg() +# >>> gtmseg.inputs.subject_id = 'subject_id' +# >>> gtmseg.cmdline +# 'gtmseg --o gtmseg.mgz --s subject_id' +# task_name: GTMSeg nipype_name: GTMSeg nipype_module: nipype.interfaces.freesurfer.petsurfer @@ -30,9 +30,6 @@ inputs: # passed to the field in the automatically generated unittests. colortable: generic/file # type=file|default=: colortable - out_file: Path - # type=file: GTM segmentation - # type=file|default='gtmseg.mgz': output volume relative to subject/mri subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -58,7 +55,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -107,7 +104,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -127,7 +124,7 @@ tests: subject_id: '"subject_id"' # type=string|default='': subject id imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -142,7 +139,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: gtmseg --o gtmseg.mgz --s subject_id +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -151,7 +148,7 @@ doctests: subject_id: '"subject_id"' # type=string|default='': subject id imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/gtmpvc.yaml b/example-specs/interface/nipype/freesurfer/gtmpvc.yaml index 26856dbc..90b84fbc 100644 --- a/example-specs/interface/nipype/freesurfer/gtmpvc.yaml +++ b/example-specs/interface/nipype/freesurfer/gtmpvc.yaml @@ -5,34 +5,34 @@ # # Docs # ---- -# create an anatomical segmentation for the geometric transfer matrix (GTM). +# Perform Partial Volume Correction (PVC) to PET Data. # -# Examples -# -------- -# >>> gtmpvc = GTMPVC() -# >>> gtmpvc.inputs.in_file = 'sub-01_ses-baseline_pet.nii.gz' -# >>> gtmpvc.inputs.segmentation = 'gtmseg.mgz' -# >>> gtmpvc.inputs.reg_file = 'sub-01_ses-baseline_pet_mean_reg.lta' -# >>> gtmpvc.inputs.pvc_dir = 'pvc' -# >>> gtmpvc.inputs.psf = 4 -# >>> gtmpvc.inputs.default_seg_merge = True -# >>> gtmpvc.inputs.auto_mask = (1, 0.1) -# >>> gtmpvc.inputs.km_ref = ['8 47'] -# >>> gtmpvc.inputs.km_hb = ['11 12 50 51'] -# >>> gtmpvc.inputs.no_rescale = True -# >>> gtmpvc.inputs.save_input = True -# >>> gtmpvc.cmdline # doctest: +NORMALIZE_WHITESPACE -# 'mri_gtmpvc --auto-mask 1.000000 0.100000 --default-seg-merge --i sub-01_ses-baseline_pet.nii.gz --km-hb 11 12 50 51 --km-ref 8 47 --no-rescale --psf 4.000000 --o pvc --reg sub-01_ses-baseline_pet_mean_reg.lta --save-input --seg gtmseg.mgz' +# Examples +# -------- +# >>> gtmpvc = GTMPVC() +# >>> gtmpvc.inputs.in_file = 'sub-01_ses-baseline_pet.nii.gz' +# >>> gtmpvc.inputs.segmentation = 'gtmseg.mgz' +# >>> gtmpvc.inputs.reg_file = 'sub-01_ses-baseline_pet_mean_reg.lta' +# >>> gtmpvc.inputs.pvc_dir = 'pvc' +# >>> gtmpvc.inputs.psf = 4 +# >>> gtmpvc.inputs.default_seg_merge = True +# >>> gtmpvc.inputs.auto_mask = (1, 0.1) +# >>> gtmpvc.inputs.km_ref = ['8 47'] +# >>> gtmpvc.inputs.km_hb = ['11 12 50 51'] +# >>> gtmpvc.inputs.no_rescale = True +# >>> gtmpvc.inputs.save_input = True +# >>> gtmpvc.cmdline # doctest: +NORMALIZE_WHITESPACE +# 'mri_gtmpvc --auto-mask 1.000000 0.100000 --default-seg-merge --i sub-01_ses-baseline_pet.nii.gz --km-hb 11 12 50 51 --km-ref 8 47 --no-rescale --psf 4.000000 --o pvc --reg sub-01_ses-baseline_pet_mean_reg.lta --save-input --seg gtmseg.mgz' +# +# >>> gtmpvc = GTMPVC() +# >>> gtmpvc.inputs.in_file = 'sub-01_ses-baseline_pet.nii.gz' +# >>> gtmpvc.inputs.segmentation = 'gtmseg.mgz' +# >>> gtmpvc.inputs.regheader = True +# >>> gtmpvc.inputs.pvc_dir = 'pvc' +# >>> gtmpvc.inputs.mg = (0.5, ["ROI1", "ROI2"]) +# >>> gtmpvc.cmdline # doctest: +NORMALIZE_WHITESPACE +# 'mri_gtmpvc --i sub-01_ses-baseline_pet.nii.gz --mg 0.5 ROI1 ROI2 --o pvc --regheader --seg gtmseg.mgz' # -# >>> gtmpvc = GTMPVC() -# >>> gtmpvc.inputs.in_file = 'sub-01_ses-baseline_pet.nii.gz' -# >>> gtmpvc.inputs.segmentation = 'gtmseg.mgz' -# >>> gtmpvc.inputs.regheader = True -# >>> gtmpvc.inputs.pvc_dir = 'pvc' -# >>> gtmpvc.inputs.mg = (0.5, ["ROI1", "ROI2"]) -# >>> gtmpvc.cmdline # doctest: +NORMALIZE_WHITESPACE -# 'mri_gtmpvc --i sub-01_ses-baseline_pet.nii.gz --mg 0.5 ROI1 ROI2 --o pvc --regheader --seg gtmseg.mgz' -# task_name: GTMPVC nipype_name: GTMPVC nipype_module: nipype.interfaces.freesurfer.petsurfer @@ -55,9 +55,9 @@ inputs: # type=file|default=: input volume - source data to pvc mask_file: generic/file # type=file|default=: ignore areas outside of the mask (in input vol space) - reg_file: medimage-freesurfer/lta + reg_file: fileformats.medimage_freesurfer.Lta # type=file|default=: LTA registration file that maps PET to anatomical - segmentation: medimage/mgh-gz + segmentation: generic/file # type=file|default=: segfile : anatomical segmentation to define regions for GTM subjects_dir: generic/directory # type=directory|default=: subjects directory @@ -77,6 +77,8 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. + eres: generic/file + # type=file: 4D PET file of residual error after PVC (smoothed with PSF) gtm_file: generic/file # type=file: TACs for all regions with GTM PVC gtm_stats: generic/file @@ -113,6 +115,14 @@ outputs: # type=file: Registration file to go from PET to anat reg_rbvpet2anat: generic/file # type=file: Registration file to go from RBV corrected PET to anat + seg: generic/file + # type=file: Segmentation file of regions used for PVC + seg_ctab: generic/file + # type=file: Color table file for segmentation file + tissue_fraction: generic/file + # type=file: 4D PET file of tissue fraction before PVC + tissue_fraction_psf: generic/file + # type=file: 4D PET file of tissue fraction after PVC (smoothed with PSF) yhat: generic/file # type=file: 4D PET file of signal estimate (yhat) after PVC (smoothed with PSF) yhat0: generic/file @@ -125,8 +135,8 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - pvc_dir: '"pvc"' + # dict[str, str] - `path_template` values to be provided to output fields + pvc_dir: pvc_dir # type=directory: output directory # type=str|default='': save outputs to dir requirements: @@ -254,7 +264,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -273,29 +283,18 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input volume - source data to pvc - segmentation: - # type=file|default=: segfile : anatomical segmentation to define regions for GTM reg_file: # type=file|default=: LTA registration file that maps PET to anatomical - pvc_dir: '"pvc"' - # type=directory: output directory - # type=str|default='': save outputs to dir psf: '4' # type=float|default=0.0: scanner PSF FWHM in mm - default_seg_merge: 'True' - # type=bool|default=False: default schema for merging ROIs auto_mask: (1, 0.1) # type=tuple|default=(0.0, 0.0): FWHM thresh : automatically compute mask - km_ref: '["8 47"]' - # type=list|default=[]: RefId1 RefId2 ... : compute reference TAC for KM as mean of given RefIds km_hb: '["11 12 50 51"]' # type=list|default=[]: RefId1 RefId2 ... : compute HiBinding TAC for KM as mean of given RefIds - no_rescale: 'True' - # type=bool|default=False: do not global rescale such that mean of reference region is scaleref save_input: 'True' # type=bool|default=False: saves rescaled input as input.rescaled.nii.gz imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -314,17 +313,12 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input volume - source data to pvc - segmentation: - # type=file|default=: segfile : anatomical segmentation to define regions for GTM regheader: 'True' # type=bool|default=False: assume input and seg share scanner space - pvc_dir: '"pvc"' - # type=directory: output directory - # type=str|default='': save outputs to dir mg: (0.5, ["ROI1", "ROI2"]) # type=tuple|default=: gmthresh RefId1 RefId2 ...: perform Mueller-Gaertner PVC, gmthresh is min gm pvf bet 0 and 1 imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -339,7 +333,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_gtmpvc --auto-mask 1.000000 0.100000 --default-seg-merge --i sub-01_ses-baseline_pet.nii.gz --km-hb 11 12 50 51 --km-ref 8 47 --no-rescale --psf 4.000000 --o pvc --reg sub-01_ses-baseline_pet_mean_reg.lta --save-input --seg gtmseg.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -347,29 +341,18 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"sub-01_ses-baseline_pet.nii.gz"' # type=file|default=: input volume - source data to pvc - segmentation: '"gtmseg.mgz"' - # type=file|default=: segfile : anatomical segmentation to define regions for GTM reg_file: '"sub-01_ses-baseline_pet_mean_reg.lta"' # type=file|default=: LTA registration file that maps PET to anatomical - pvc_dir: '"pvc"' - # type=directory: output directory - # type=str|default='': save outputs to dir psf: '4' # type=float|default=0.0: scanner PSF FWHM in mm - default_seg_merge: 'True' - # type=bool|default=False: default schema for merging ROIs auto_mask: (1, 0.1) # type=tuple|default=(0.0, 0.0): FWHM thresh : automatically compute mask - km_ref: '["8 47"]' - # type=list|default=[]: RefId1 RefId2 ... : compute reference TAC for KM as mean of given RefIds km_hb: '["11 12 50 51"]' # type=list|default=[]: RefId1 RefId2 ... : compute HiBinding TAC for KM as mean of given RefIds - no_rescale: 'True' - # type=bool|default=False: do not global rescale such that mean of reference region is scaleref save_input: 'True' # type=bool|default=False: saves rescaled input as input.rescaled.nii.gz imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -381,17 +364,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"sub-01_ses-baseline_pet.nii.gz"' # type=file|default=: input volume - source data to pvc - segmentation: '"gtmseg.mgz"' - # type=file|default=: segfile : anatomical segmentation to define regions for GTM regheader: 'True' # type=bool|default=False: assume input and seg share scanner space - pvc_dir: '"pvc"' - # type=directory: output directory - # type=str|default='': save outputs to dir mg: (0.5, ["ROI1", "ROI2"]) # type=tuple|default=: gmthresh RefId1 RefId2 ...: perform Mueller-Gaertner PVC, gmthresh is min gm pvf bet 0 and 1 imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/image_info.yaml b/example-specs/interface/nipype/freesurfer/image_info.yaml index 9e2c025a..a2caa2f7 100644 --- a/example-specs/interface/nipype/freesurfer/image_info.yaml +++ b/example-specs/interface/nipype/freesurfer/image_info.yaml @@ -66,7 +66,7 @@ outputs: vox_sizes: vox_sizes_callable # type=tuple: voxel sizes (mm) templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -82,7 +82,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/freesurfer/jacobian.yaml b/example-specs/interface/nipype/freesurfer/jacobian.yaml index 5407295c..14fc8deb 100644 --- a/example-specs/interface/nipype/freesurfer/jacobian.yaml +++ b/example-specs/interface/nipype/freesurfer/jacobian.yaml @@ -6,17 +6,17 @@ # Docs # ---- # -# This program computes the Jacobian of a surface mapping. +# This program computes the Jacobian of a surface mapping. +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import Jacobian +# >>> jacobian = Jacobian() +# >>> jacobian.inputs.in_origsurf = 'lh.pial' +# >>> jacobian.inputs.in_mappedsurf = 'lh.pial' +# >>> jacobian.cmdline +# 'mris_jacobian lh.pial lh.pial lh.jacobian' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import Jacobian -# >>> jacobian = Jacobian() -# >>> jacobian.inputs.in_origsurf = 'lh.pial' -# >>> jacobian.inputs.in_mappedsurf = 'lh.pial' -# >>> jacobian.cmdline -# 'mris_jacobian lh.pial lh.pial lh.jacobian' -# task_name: Jacobian nipype_name: Jacobian nipype_module: nipype.interfaces.freesurfer.utils @@ -31,13 +31,10 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_mappedsurf: medimage-freesurfer/pial + in_mappedsurf: generic/file # type=file|default=: Mapped surface - in_origsurf: medimage-freesurfer/pial + in_origsurf: fileformats.medimage_freesurfer.Pial # type=file|default=: Original surface - out_file: Path - # type=file: Output Jacobian of the surface mapping - # type=file|default=: Output Jacobian of the surface mapping subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -63,7 +60,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -84,7 +81,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -103,10 +100,8 @@ tests: # (if not specified, will try to choose a sensible value) in_origsurf: # type=file|default=: Original surface - in_mappedsurf: - # type=file|default=: Mapped surface imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -121,7 +116,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_jacobian lh.pial lh.pial lh.jacobian +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -129,10 +124,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_origsurf: '"lh.pial"' # type=file|default=: Original surface - in_mappedsurf: '"lh.pial"' - # type=file|default=: Mapped surface imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/label_2_annot.yaml b/example-specs/interface/nipype/freesurfer/label_2_annot.yaml index c036e823..88edcbad 100644 --- a/example-specs/interface/nipype/freesurfer/label_2_annot.yaml +++ b/example-specs/interface/nipype/freesurfer/label_2_annot.yaml @@ -6,20 +6,20 @@ # Docs # ---- # -# Converts a set of surface labels to an annotation file +# Converts a set of surface labels to an annotation file +# +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import Label2Annot +# >>> l2a = Label2Annot() +# >>> l2a.inputs.hemisphere = 'lh' +# >>> l2a.inputs.subject_id = '10335' +# >>> l2a.inputs.in_labels = ['lh.aparc.label'] +# >>> l2a.inputs.orig = 'lh.pial' +# >>> l2a.inputs.out_annot = 'test' +# >>> l2a.cmdline +# 'mris_label2annot --hemi lh --l lh.aparc.label --a test --s 10335' # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import Label2Annot -# >>> l2a = Label2Annot() -# >>> l2a.inputs.hemisphere = 'lh' -# >>> l2a.inputs.subject_id = '10335' -# >>> l2a.inputs.in_labels = ['lh.aparc.label'] -# >>> l2a.inputs.orig = 'lh.pial' -# >>> l2a.inputs.out_annot = 'test' -# >>> l2a.cmdline -# 'mris_label2annot --hemi lh --l lh.aparc.label --a test --s 10335' -# task_name: Label2Annot nipype_name: Label2Annot nipype_module: nipype.interfaces.freesurfer.model @@ -36,7 +36,7 @@ inputs: # passed to the field in the automatically generated unittests. color_table: generic/file # type=file|default=: File that defines the structure names, their indices, and their color - orig: medimage-freesurfer/pial + orig: generic/file # type=file|default=: implicit {hemisphere}.orig subjects_dir: generic/directory # type=directory|default=: subjects directory @@ -62,7 +62,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -94,7 +94,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -113,16 +113,12 @@ tests: # (if not specified, will try to choose a sensible value) hemisphere: '"lh"' # type=enum|default='lh'|allowed['lh','rh']: Input hemisphere - subject_id: '"10335"' - # type=string|default='subject_id': Subject name/ID in_labels: '["lh.aparc.label"]' # type=list|default=[]: List of input label files - orig: - # type=file|default=: implicit {hemisphere}.orig out_annot: '"test"' # type=string|default='': Name of the annotation to create imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -137,7 +133,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_label2annot --hemi lh --l lh.aparc.label --a test --s 10335 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -145,16 +141,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. hemisphere: '"lh"' # type=enum|default='lh'|allowed['lh','rh']: Input hemisphere - subject_id: '"10335"' - # type=string|default='subject_id': Subject name/ID in_labels: '["lh.aparc.label"]' # type=list|default=[]: List of input label files - orig: '"lh.pial"' - # type=file|default=: implicit {hemisphere}.orig out_annot: '"test"' # type=string|default='': Name of the annotation to create imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/label_2_label.yaml b/example-specs/interface/nipype/freesurfer/label_2_label.yaml index c4b87986..934f9f55 100644 --- a/example-specs/interface/nipype/freesurfer/label_2_label.yaml +++ b/example-specs/interface/nipype/freesurfer/label_2_label.yaml @@ -6,31 +6,31 @@ # Docs # ---- # -# Converts a label in one subject's space to a label -# in another subject's space using either talairach or spherical -# as an intermediate registration space. +# Converts a label in one subject's space to a label +# in another subject's space using either talairach or spherical +# as an intermediate registration space. # -# If a source mask is used, then the input label must have been -# created from a surface (ie, the vertex numbers are valid). The -# format can be anything supported by mri_convert or curv or paint. -# Vertices in the source label that do not meet threshold in the -# mask will be removed from the label. +# If a source mask is used, then the input label must have been +# created from a surface (ie, the vertex numbers are valid). The +# format can be anything supported by mri_convert or curv or paint. +# Vertices in the source label that do not meet threshold in the +# mask will be removed from the label. +# +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import Label2Label +# >>> l2l = Label2Label() +# >>> l2l.inputs.hemisphere = 'lh' +# >>> l2l.inputs.subject_id = '10335' +# >>> l2l.inputs.sphere_reg = 'lh.pial' +# >>> l2l.inputs.white = 'lh.pial' +# >>> l2l.inputs.source_subject = 'fsaverage' +# >>> l2l.inputs.source_label = 'lh-pial.stl' +# >>> l2l.inputs.source_white = 'lh.pial' +# >>> l2l.inputs.source_sphere_reg = 'lh.pial' +# >>> l2l.cmdline +# 'mri_label2label --hemi lh --trglabel lh-pial_converted.stl --regmethod surface --srclabel lh-pial.stl --srcsubject fsaverage --trgsubject 10335' # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import Label2Label -# >>> l2l = Label2Label() -# >>> l2l.inputs.hemisphere = 'lh' -# >>> l2l.inputs.subject_id = '10335' -# >>> l2l.inputs.sphere_reg = 'lh.pial' -# >>> l2l.inputs.white = 'lh.pial' -# >>> l2l.inputs.source_subject = 'fsaverage' -# >>> l2l.inputs.source_label = 'lh-pial.stl' -# >>> l2l.inputs.source_white = 'lh.pial' -# >>> l2l.inputs.source_sphere_reg = 'lh.pial' -# >>> l2l.cmdline -# 'mri_label2label --hemi lh --trglabel lh-pial_converted.stl --regmethod surface --srclabel lh-pial.stl --srcsubject fsaverage --trgsubject 10335' -# task_name: Label2Label nipype_name: Label2Label nipype_module: nipype.interfaces.freesurfer.model @@ -45,20 +45,17 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: Path - # type=file: Output label - # type=file|default=: Target label - source_label: model/stl + source_label: generic/file # type=file|default=: Source label - source_sphere_reg: medimage-freesurfer/pial + source_sphere_reg: generic/file # type=file|default=: Implicit input .sphere.reg - source_white: medimage-freesurfer/pial + source_white: fileformats.medimage_freesurfer.Pial # type=file|default=: Implicit input .white - sphere_reg: medimage-freesurfer/pial + sphere_reg: fileformats.medimage_freesurfer.Pial # type=file|default=: Implicit input .sphere.reg subjects_dir: generic/directory # type=directory|default=: subjects directory - white: medimage-freesurfer/pial + white: generic/file # type=file|default=: Implicit input .white callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -83,7 +80,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -112,7 +109,7 @@ tests: registration_method: # type=enum|default='surface'|allowed['surface','volume']: Registration method copy_inputs: - # type=bool|default=False: If running as a node, set this to True.This will copy the input files to the node directory. + # type=bool|default=False: If running as a node, set this to True. This will copy the input files to the node directory. subjects_dir: # type=directory|default=: subjects directory args: @@ -120,7 +117,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -139,22 +136,14 @@ tests: # (if not specified, will try to choose a sensible value) hemisphere: '"lh"' # type=enum|default='lh'|allowed['lh','rh']: Input hemisphere - subject_id: '"10335"' - # type=string|default='subject_id': Target subject sphere_reg: # type=file|default=: Implicit input .sphere.reg - white: - # type=file|default=: Implicit input .white source_subject: '"fsaverage"' # type=string|default='': Source subject name - source_label: - # type=file|default=: Source label source_white: # type=file|default=: Implicit input .white - source_sphere_reg: - # type=file|default=: Implicit input .sphere.reg imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -169,7 +158,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_label2label --hemi lh --trglabel lh-pial_converted.stl --regmethod surface --srclabel lh-pial.stl --srcsubject fsaverage --trgsubject 10335 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -177,22 +166,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. hemisphere: '"lh"' # type=enum|default='lh'|allowed['lh','rh']: Input hemisphere - subject_id: '"10335"' - # type=string|default='subject_id': Target subject sphere_reg: '"lh.pial"' # type=file|default=: Implicit input .sphere.reg - white: '"lh.pial"' - # type=file|default=: Implicit input .white source_subject: '"fsaverage"' # type=string|default='': Source subject name - source_label: '"lh-pial.stl"' - # type=file|default=: Source label source_white: '"lh.pial"' # type=file|default=: Implicit input .white - source_sphere_reg: '"lh.pial"' - # type=file|default=: Implicit input .sphere.reg imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/label_2_vol.yaml b/example-specs/interface/nipype/freesurfer/label_2_vol.yaml index 3cd323a6..eecb7bd4 100644 --- a/example-specs/interface/nipype/freesurfer/label_2_vol.yaml +++ b/example-specs/interface/nipype/freesurfer/label_2_vol.yaml @@ -7,13 +7,13 @@ # ---- # Make a binary volume from a Freesurfer label # -# Examples -# -------- -# >>> binvol = Label2Vol(label_file='cortex.label', template_file='structural.nii', reg_file='register.dat', fill_thresh=0.5, vol_label_file='foo_out.nii') -# >>> binvol.cmdline -# 'mri_label2vol --fillthresh 0.5 --label cortex.label --reg register.dat --temp structural.nii --o foo_out.nii' +# Examples +# -------- +# >>> binvol = Label2Vol(label_file='cortex.label', template_file='structural.nii', reg_file='register.dat', fill_thresh=0.5, vol_label_file='foo_out.nii') +# >>> binvol.cmdline +# 'mri_label2vol --fillthresh 0.5 --label cortex.label --reg register.dat --temp structural.nii --o foo_out.nii' +# # -# task_name: Label2Vol nipype_name: Label2Vol nipype_module: nipype.interfaces.freesurfer.model @@ -36,7 +36,7 @@ inputs: # type=file|default=: file with each frame is nhits for a label map_label_stat: generic/file # type=file|default=: map the label stats field into the vol - reg_file: datascience/dat-file + reg_file: fileformats.medimage_freesurfer.Dat # type=file|default=: tkregister style matrix VolXYZ = R*LabelXYZ reg_header: generic/file # type=file|default=: label template volume @@ -46,9 +46,6 @@ inputs: # type=directory|default=: subjects directory template_file: medimage/nifti1 # type=file|default=: output template volume - vol_label_file: Path - # type=file: output volume - # type=file|default=: output volume callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -72,7 +69,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields vol_label_file: '"foo_out.nii"' # type=file: output volume # type=file|default=: output volume @@ -128,7 +125,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -157,7 +154,7 @@ tests: # type=file: output volume # type=file|default=: output volume imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -172,7 +169,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_label2vol --fillthresh 0.5 --label cortex.label --reg register.dat --temp structural.nii --o foo_out.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -190,7 +187,7 @@ doctests: # type=file: output volume # type=file|default=: output volume imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/logan.yaml b/example-specs/interface/nipype/freesurfer/logan.yaml new file mode 100644 index 00000000..7dbb3505 --- /dev/null +++ b/example-specs/interface/nipype/freesurfer/logan.yaml @@ -0,0 +1,300 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'nipype.interfaces.freesurfer.petsurfer.Logan' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Docs +# ---- +# Perform Logan kinetic modeling. +# Examples +# -------- +# >>> logan = Logan() +# >>> logan.inputs.in_file = 'tac.nii' +# >>> logan.inputs.logan = ('ref_tac.dat', 'timing.dat', 2600) +# >>> logan.inputs.glm_dir = 'logan' +# >>> logan.cmdline +# 'mri_glmfit --glmdir logan --y tac.nii --logan ref_tac.dat timing.dat 2600' +# +task_name: Logan +nipype_name: Logan +nipype_module: nipype.interfaces.freesurfer.petsurfer +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + contrast: generic/file+list-of + # type=inputmultiobject|default=[]: contrast file + design: generic/file + # type=file|default=: design matrix file + fixed_fx_dof_file: generic/file + # type=file|default=: text file with dof for fixed effects analysis + fixed_fx_var: generic/file + # type=file|default=: for fixed effects analysis + in_file: medimage/nifti1 + # type=file|default=: input 4D file + label_file: generic/file + # type=file|default=: use label as mask, surfaces only + per_voxel_reg: generic/file+list-of + # type=inputmultiobject|default=[]: per-voxel regressors + sim_done_file: generic/file + # type=file|default=: create file when simulation finished + subjects_dir: generic/directory + # type=directory|default=: subjects directory + weight_file: generic/file + # type=file|default=: weight for each input at each voxel + weighted_ls: generic/file + # type=file|default=: weighted least squares + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + beta_file: generic/file + # type=file: map of regression coefficients + bp_file: generic/file + # type=file: Binding potential estimates + dof_file: generic/file + # type=file: text file with effective degrees-of-freedom for the analysis + error_file: generic/file + # type=file: map of residual error + error_stddev_file: generic/file + # type=file: map of residual error standard deviation + error_var_file: generic/file + # type=file: map of residual error variance + estimate_file: generic/file + # type=file: map of the estimated Y values + frame_eigenvectors: generic/file + # type=file: matrix of frame eigenvectors from residual PCA + fwhm_file: generic/file + # type=file: text file with estimated smoothness + glm_dir: generic/directory + # type=directory: output directory + # type=str|default='': save outputs to dir + k2p_file: generic/file + # type=file: estimate of k2p parameter + mask_file: generic/file + # type=file: map of the mask used in the analysis + # type=file|default=: binary mask + singular_values: generic/file + # type=file: matrix singular values from residual PCA + spatial_eigenvectors: generic/file + # type=file: map of spatial eigenvectors from residual PCA + svd_stats_file: generic/file + # type=file: text file summarizing the residual PCA + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `path_template` values to be provided to output fields + glm_dir: '"logan"' + # type=directory: output directory + # type=str|default='': save outputs to dir + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + logan: + # type=tuple|default=(, , 0.0): RefTac TimeSec tstar : perform Logan kinetic modeling + glm_dir: + # type=directory: output directory + # type=str|default='': save outputs to dir + in_file: + # type=file|default=: input 4D file + fsgd: + # type=tuple|default=(, 'doss'): freesurfer descriptor file + design: + # type=file|default=: design matrix file + contrast: + # type=inputmultiobject|default=[]: contrast file + one_sample: + # type=bool|default=False: construct X and C as a one-sample group mean + no_contrast_ok: + # type=bool|default=False: do not fail if no contrasts specified + per_voxel_reg: + # type=inputmultiobject|default=[]: per-voxel regressors + self_reg: + # type=tuple|default=(0, 0, 0): self-regressor from index col row slice + weighted_ls: + # type=file|default=: weighted least squares + fixed_fx_var: + # type=file|default=: for fixed effects analysis + fixed_fx_dof: + # type=int|default=0: dof for fixed effects analysis + fixed_fx_dof_file: + # type=file|default=: text file with dof for fixed effects analysis + weight_file: + # type=file|default=: weight for each input at each voxel + weight_inv: + # type=bool|default=False: invert weights + weight_sqrt: + # type=bool|default=False: sqrt of weights + fwhm: + # type=range|default=0.0: smooth input by fwhm + var_fwhm: + # type=range|default=0.0: smooth variance by fwhm + no_mask_smooth: + # type=bool|default=False: do not mask when smoothing + no_est_fwhm: + # type=bool|default=False: turn off FWHM output estimation + mask_file: + # type=file: map of the mask used in the analysis + # type=file|default=: binary mask + label_file: + # type=file|default=: use label as mask, surfaces only + cortex: + # type=bool|default=False: use subjects ?h.cortex.label as label + invert_mask: + # type=bool|default=False: invert mask + prune: + # type=bool|default=False: remove voxels that do not have a non-zero value at each frame (def) + no_prune: + # type=bool|default=False: do not prune + prune_thresh: + # type=float|default=0.0: prune threshold. Default is FLT_MIN + compute_log_y: + # type=bool|default=False: compute natural log of y prior to analysis + save_estimate: + # type=bool|default=False: save signal estimate (yhat) + save_residual: + # type=bool|default=False: save residual error (eres) + save_res_corr_mtx: + # type=bool|default=False: save residual error spatial correlation matrix (eres.scm). Big! + surf: + # type=bool|default=False: analysis is on a surface mesh + subject_id: + # type=str|default='': subject id for surface geometry + hemi: + # type=enum|default='lh'|allowed['lh','rh']: surface hemisphere + surf_geo: + # type=str|default='white': surface geometry name (e.g. white, pial) + simulation: + # type=tuple|default=('perm', 0, 0.0, ''): nulltype nsim thresh csdbasename + sim_sign: + # type=enum|default='abs'|allowed['abs','neg','pos']: abs, pos, or neg + uniform: + # type=tuple|default=(0.0, 0.0): use uniform distribution instead of gaussian + pca: + # type=bool|default=False: perform pca/svd analysis on residual + calc_AR1: + # type=bool|default=False: compute and save temporal AR1 of residual + save_cond: + # type=bool|default=False: flag to save design matrix condition at each voxel + vox_dump: + # type=tuple|default=(0, 0, 0): dump voxel GLM and exit + seed: + # type=int|default=0: used for synthesizing noise + synth: + # type=bool|default=False: replace input with gaussian + resynth_test: + # type=int|default=0: test GLM by resynthsis + profile: + # type=int|default=0: niters : test speed + mrtm1: + # type=tuple|default=(, ): RefTac TimeSec : perform MRTM1 kinetic modeling + mrtm2: + # type=tuple|default=(, , 0.0): RefTac TimeSec k2prime : perform MRTM2 kinetic modeling + bp_clip_neg: + # type=bool|default=False: set negative BP voxels to zero + bp_clip_max: + # type=float|default=0.0: set BP voxels above max to max + force_perm: + # type=bool|default=False: force perumtation test, even when design matrix is not orthog + diag: + # type=int|default=0: Gdiag_no : set diagnostic level + diag_cluster: + # type=bool|default=False: save sig volume and exit from first sim loop + debug: + # type=bool|default=False: turn on debugging + check_opts: + # type=bool|default=False: don't run anything, just check options and exit + allow_repeated_subjects: + # type=bool|default=False: allow subject names to repeat in the fsgd file (must appear before --fsgd + allow_ill_cond: + # type=bool|default=False: allow ill-conditioned design matrices + sim_done_file: + # type=file|default=: create file when simulation finished + nii: + # type=bool|default=False: save outputs as nii + nii_gz: + # type=bool|default=False: save outputs as nii.gz + subjects_dir: + # type=directory|default=: subjects directory + args: + # type=str|default='': Additional parameters to the command + environ: + # type=dict|default={}: Environment variables + imports: + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + in_file: + # type=file|default=: input 4D file + glm_dir: '"logan"' + # type=directory: output directory + # type=str|default='': save outputs to dir + imports: + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: +- cmdline: + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + in_file: '"tac.nii"' + # type=file|default=: input 4D file + glm_dir: '"logan"' + # type=directory: output directory + # type=str|default='': save outputs to dir + imports: + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/lta_convert.yaml b/example-specs/interface/nipype/freesurfer/lta_convert.yaml index 64dee1bc..80b65ee5 100644 --- a/example-specs/interface/nipype/freesurfer/lta_convert.yaml +++ b/example-specs/interface/nipype/freesurfer/lta_convert.yaml @@ -6,12 +6,12 @@ # Docs # ---- # Convert different transformation formats. -# Some formats may require you to pass an image if the geometry information -# is missing form the transform file format. +# Some formats may require you to pass an image if the geometry information +# is missing form the transform file format. +# +# For complete details, see the `lta_convert documentation. +# `_ # -# For complete details, see the `lta_convert documentation. -# `_ -# task_name: LTAConvert nipype_name: LTAConvert nipype_module: nipype.interfaces.freesurfer.utils @@ -75,7 +75,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -124,7 +124,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/freesurfer/make_average_subject.yaml b/example-specs/interface/nipype/freesurfer/make_average_subject.yaml index 3e10edbc..324ee794 100644 --- a/example-specs/interface/nipype/freesurfer/make_average_subject.yaml +++ b/example-specs/interface/nipype/freesurfer/make_average_subject.yaml @@ -7,15 +7,15 @@ # ---- # Make an average freesurfer subject # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.freesurfer import MakeAverageSubject +# >>> avg = MakeAverageSubject(subjects_ids=['s1', 's2']) +# >>> avg.cmdline +# 'make_average_subject --out average --subjects s1 s2' # -# >>> from nipype.interfaces.freesurfer import MakeAverageSubject -# >>> avg = MakeAverageSubject(subjects_ids=['s1', 's2']) -# >>> avg.cmdline -# 'make_average_subject --out average --subjects s1 s2' # -# task_name: MakeAverageSubject nipype_name: MakeAverageSubject nipype_module: nipype.interfaces.freesurfer.utils @@ -30,8 +30,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_name: Path - # type=file|default='average': name for the average subject subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -56,7 +54,7 @@ outputs: average_subject_name: average_subject_name_callable # type=str: Output registration file templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -74,7 +72,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -94,7 +92,7 @@ tests: subjects_ids: '["s1", "s2"]' # type=list|default=[]: freesurfer subjects ids to average imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -118,7 +116,7 @@ doctests: subjects_ids: '["s1", "s2"]' # type=list|default=[]: freesurfer subjects ids to average imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/make_surfaces.yaml b/example-specs/interface/nipype/freesurfer/make_surfaces.yaml index a1f7cbb7..b61d5b40 100644 --- a/example-specs/interface/nipype/freesurfer/make_surfaces.yaml +++ b/example-specs/interface/nipype/freesurfer/make_surfaces.yaml @@ -6,27 +6,27 @@ # Docs # ---- # -# This program positions the tessellation of the cortical surface at the -# white matter surface, then the gray matter surface and generate -# surface files for these surfaces as well as a 'curvature' file for the -# cortical thickness, and a surface file which approximates layer IV of -# the cortical sheet. +# This program positions the tessellation of the cortical surface at the +# white matter surface, then the gray matter surface and generate +# surface files for these surfaces as well as a 'curvature' file for the +# cortical thickness, and a surface file which approximates layer IV of +# the cortical sheet. +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import MakeSurfaces +# >>> makesurfaces = MakeSurfaces() +# >>> makesurfaces.inputs.hemisphere = 'lh' +# >>> makesurfaces.inputs.subject_id = '10335' +# >>> makesurfaces.inputs.in_orig = 'lh.pial' +# >>> makesurfaces.inputs.in_wm = 'wm.mgz' +# >>> makesurfaces.inputs.in_filled = 'norm.mgz' +# >>> makesurfaces.inputs.in_label = 'aparc+aseg.nii' +# >>> makesurfaces.inputs.in_T1 = 'T1.mgz' +# >>> makesurfaces.inputs.orig_pial = 'lh.pial' +# >>> makesurfaces.cmdline +# 'mris_make_surfaces -T1 T1.mgz -orig pial -orig_pial pial 10335 lh' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import MakeSurfaces -# >>> makesurfaces = MakeSurfaces() -# >>> makesurfaces.inputs.hemisphere = 'lh' -# >>> makesurfaces.inputs.subject_id = '10335' -# >>> makesurfaces.inputs.in_orig = 'lh.pial' -# >>> makesurfaces.inputs.in_wm = 'wm.mgz' -# >>> makesurfaces.inputs.in_filled = 'norm.mgz' -# >>> makesurfaces.inputs.in_label = 'aparc+aseg.nii' -# >>> makesurfaces.inputs.in_T1 = 'T1.mgz' -# >>> makesurfaces.inputs.orig_pial = 'lh.pial' -# >>> makesurfaces.cmdline -# 'mris_make_surfaces -T1 T1.mgz -orig pial -orig_pial pial 10335 lh' -# task_name: MakeSurfaces nipype_name: MakeSurfaces nipype_module: nipype.interfaces.freesurfer.utils @@ -47,15 +47,15 @@ inputs: # type=file|default=: Input segmentation file in_filled: medimage/mgh-gz # type=file|default=: Implicit input file filled.mgz - in_label: medimage/nifti1 + in_label: generic/file # type=file|default=: Implicit input label/.aparc.annot - in_orig: medimage-freesurfer/pial + in_orig: fileformats.medimage_freesurfer.Pial # type=file|default=: Implicit input file .orig in_white: generic/file # type=file|default=: Implicit input that is sometimes used - in_wm: medimage/mgh-gz + in_wm: generic/file # type=file|default=: Implicit input file wm.mgz - orig_pial: medimage-freesurfer/pial + orig_pial: generic/file # type=file|default=: Specify a pial surface to start with orig_white: generic/file # type=file|default=: Specify a white surface to start with @@ -93,7 +93,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -139,7 +139,7 @@ tests: white: # type=string|default='': White surface name copy_inputs: - # type=bool|default=False: If running as a node, set this to True.This will copy the input files to the node directory. + # type=bool|default=False: If running as a node, set this to True. This will copy the input files to the node directory. subjects_dir: # type=directory|default=: subjects directory args: @@ -147,7 +147,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -166,22 +166,14 @@ tests: # (if not specified, will try to choose a sensible value) hemisphere: '"lh"' # type=enum|default='lh'|allowed['lh','rh']: Hemisphere being processed - subject_id: '"10335"' - # type=string|default='subject_id': Subject being processed in_orig: # type=file|default=: Implicit input file .orig - in_wm: - # type=file|default=: Implicit input file wm.mgz in_filled: # type=file|default=: Implicit input file filled.mgz - in_label: - # type=file|default=: Implicit input label/.aparc.annot in_T1: # type=file|default=: Input brain or T1 file - orig_pial: - # type=file|default=: Specify a pial surface to start with imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -196,7 +188,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_make_surfaces -T1 T1.mgz -orig pial -orig_pial pial 10335 lh +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -204,22 +196,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. hemisphere: '"lh"' # type=enum|default='lh'|allowed['lh','rh']: Hemisphere being processed - subject_id: '"10335"' - # type=string|default='subject_id': Subject being processed in_orig: '"lh.pial"' # type=file|default=: Implicit input file .orig - in_wm: '"wm.mgz"' - # type=file|default=: Implicit input file wm.mgz in_filled: '"norm.mgz"' # type=file|default=: Implicit input file filled.mgz - in_label: '"aparc+aseg.nii"' - # type=file|default=: Implicit input label/.aparc.annot in_T1: '"T1.mgz"' # type=file|default=: Input brain or T1 file - orig_pial: '"lh.pial"' - # type=file|default=: Specify a pial surface to start with imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mni_bias_correction.yaml b/example-specs/interface/nipype/freesurfer/mni_bias_correction.yaml index 93da1006..74c8374c 100644 --- a/example-specs/interface/nipype/freesurfer/mni_bias_correction.yaml +++ b/example-specs/interface/nipype/freesurfer/mni_bias_correction.yaml @@ -5,33 +5,33 @@ # # Docs # ---- -# Wrapper for nu_correct, a program from the Montreal Neurological Insitute (MNI) -# used for correcting intensity non-uniformity (ie, bias fields). You must have the -# MNI software installed on your system to run this. See [www.bic.mni.mcgill.ca/software/N3] -# for more info. +# Wrapper for nu_correct, a program from the Montreal Neurological Institute (MNI) +# used for correcting intensity non-uniformity (ie, bias fields). You must have the +# MNI software installed on your system to run this. See [www.bic.mni.mcgill.ca/software/N3] +# for more info. # -# mri_nu_correct.mni uses float internally instead of uchar. It also rescales the output so -# that the global mean is the same as that of the input. These two changes are linked and -# can be turned off with --no-float +# mri_nu_correct.mni uses float internally instead of uchar. It also rescales the output so +# that the global mean is the same as that of the input. These two changes are linked and +# can be turned off with --no-float # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import MNIBiasCorrection -# >>> correct = MNIBiasCorrection() -# >>> correct.inputs.in_file = "norm.mgz" -# >>> correct.inputs.iterations = 6 -# >>> correct.inputs.protocol_iterations = 1000 -# >>> correct.inputs.distance = 50 -# >>> correct.cmdline -# 'mri_nu_correct.mni --distance 50 --i norm.mgz --n 6 --o norm_output.mgz --proto-iters 1000' +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import MNIBiasCorrection +# >>> correct = MNIBiasCorrection() +# >>> correct.inputs.in_file = "norm.mgz" +# >>> correct.inputs.iterations = 6 +# >>> correct.inputs.protocol_iterations = 1000 +# >>> correct.inputs.distance = 50 +# >>> correct.cmdline +# 'mri_nu_correct.mni --distance 50 --i norm.mgz --n 6 --o norm_output.mgz --proto-iters 1000' +# +# References +# ---------- +# [http://freesurfer.net/fswiki/mri_nu_correct.mni] +# [http://www.bic.mni.mcgill.ca/software/N3] +# [https://github.com/BIC-MNI/N3] # -# References -# ---------- -# [http://freesurfer.net/fswiki/mri_nu_correct.mni] -# [http://www.bic.mni.mcgill.ca/software/N3] -# [https://github.com/BIC-MNI/N3] # -# task_name: MNIBiasCorrection nipype_name: MNIBiasCorrection nipype_module: nipype.interfaces.freesurfer.preprocess @@ -50,9 +50,6 @@ inputs: # type=file|default=: input volume. Input can be any format accepted by mri_convert. mask: generic/file # type=file|default=: brainmask volume. Input can be any format accepted by mri_convert. - out_file: Path - # type=file: output volume - # type=file|default=: output volume. Output can be any format accepted by mri_convert. If the output format is COR, then the directory must exist. subjects_dir: generic/directory # type=directory|default=: subjects directory transform: generic/file @@ -80,7 +77,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -115,7 +112,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -134,14 +131,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input volume. Input can be any format accepted by mri_convert. - iterations: '6' - # type=int|default=4: Number of iterations to run nu_correct. Default is 4. This is the number of times that nu_correct is repeated (ie, using the output from the previous run as the input for the next). This is different than the -iterations option to nu_correct. protocol_iterations: '1000' # type=int|default=0: Passes Np as argument of the -iterations flag of nu_correct. This is different than the --n flag above. Default is not to pass nu_correct the -iterations flag. - distance: '50' - # type=int|default=0: N3 -distance option imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -156,7 +149,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_nu_correct.mni --distance 50 --i norm.mgz --n 6 --o norm_output.mgz --proto-iters 1000 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -164,14 +157,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"norm.mgz"' # type=file|default=: input volume. Input can be any format accepted by mri_convert. - iterations: '6' - # type=int|default=4: Number of iterations to run nu_correct. Default is 4. This is the number of times that nu_correct is repeated (ie, using the output from the previous run as the input for the next). This is different than the -iterations option to nu_correct. protocol_iterations: '1000' # type=int|default=0: Passes Np as argument of the -iterations flag of nu_correct. This is different than the --n flag above. Default is not to pass nu_correct the -iterations flag. - distance: '50' - # type=int|default=0: N3 -distance option imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mp_rto_mni305.yaml b/example-specs/interface/nipype/freesurfer/mp_rto_mni305.yaml index 9ce30dca..9081276d 100644 --- a/example-specs/interface/nipype/freesurfer/mp_rto_mni305.yaml +++ b/example-specs/interface/nipype/freesurfer/mp_rto_mni305.yaml @@ -6,26 +6,26 @@ # Docs # ---- # -# For complete details, see FreeSurfer documentation +# For complete details, see FreeSurfer documentation +# +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import MPRtoMNI305, Info +# >>> mprtomni305 = MPRtoMNI305() +# >>> mprtomni305.inputs.target = 'structural.nii' +# >>> mprtomni305.inputs.reference_dir = '.' # doctest: +SKIP +# >>> mprtomni305.cmdline # doctest: +SKIP +# 'mpr2mni305 output' +# >>> mprtomni305.inputs.out_file = 'struct_out' # doctest: +SKIP +# >>> mprtomni305.cmdline # doctest: +SKIP +# 'mpr2mni305 struct_out' # doctest: +SKIP +# >>> mprtomni305.inputs.environ['REFDIR'] == os.path.join(Info.home(), 'average') # doctest: +SKIP +# True +# >>> mprtomni305.inputs.environ['MPR2MNI305_TARGET'] # doctest: +SKIP +# 'structural' +# >>> mprtomni305.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import MPRtoMNI305, Info -# >>> mprtomni305 = MPRtoMNI305() -# >>> mprtomni305.inputs.target = 'structural.nii' -# >>> mprtomni305.inputs.reference_dir = '.' # doctest: +SKIP -# >>> mprtomni305.cmdline # doctest: +SKIP -# 'mpr2mni305 output' -# >>> mprtomni305.inputs.out_file = 'struct_out' # doctest: +SKIP -# >>> mprtomni305.cmdline # doctest: +SKIP -# 'mpr2mni305 struct_out' # doctest: +SKIP -# >>> mprtomni305.inputs.environ['REFDIR'] == os.path.join(Info.home(), 'average') # doctest: +SKIP -# True -# >>> mprtomni305.inputs.environ['MPR2MNI305_TARGET'] # doctest: +SKIP -# 'structural' -# >>> mprtomni305.run() # doctest: +SKIP # -# task_name: MPRtoMNI305 nipype_name: MPRtoMNI305 nipype_module: nipype.interfaces.freesurfer.registration @@ -70,7 +70,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -90,7 +90,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -109,12 +109,10 @@ tests: # (if not specified, will try to choose a sensible value) target: '"structural.nii"' # type=string|default='': input atlas file - reference_dir: '"." # doctest: +SKIP' - # type=directory|default='': TODO out_file: '"struct_out" # doctest: +SKIP' # type=file: The output file '_to__t4_vox2vox.txt' imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -129,7 +127,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: structural +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -137,12 +135,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. target: '"structural.nii"' # type=string|default='': input atlas file - reference_dir: '"." # doctest: +SKIP' - # type=directory|default='': TODO out_file: '"struct_out" # doctest: +SKIP' # type=file: The output file '_to__t4_vox2vox.txt' imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mr_is_ca_label.yaml b/example-specs/interface/nipype/freesurfer/mr_is_ca_label.yaml index 5f59bc82..1f61ee3f 100644 --- a/example-specs/interface/nipype/freesurfer/mr_is_ca_label.yaml +++ b/example-specs/interface/nipype/freesurfer/mr_is_ca_label.yaml @@ -6,30 +6,30 @@ # Docs # ---- # -# For a single subject, produces an annotation file, in which each -# cortical surface vertex is assigned a neuroanatomical label.This -# automatic procedure employs data from a previously-prepared atlas -# file. An atlas file is created from a training set, capturing region -# data manually drawn by neuroanatomists combined with statistics on -# variability correlated to geometric information derived from the -# cortical model (sulcus and curvature). Besides the atlases provided -# with FreeSurfer, new ones can be prepared using mris_ca_train). +# For a single subject, produces an annotation file, in which each +# cortical surface vertex is assigned a neuroanatomical label.This +# automatic procedure employs data from a previously-prepared atlas +# file. An atlas file is created from a training set, capturing region +# data manually drawn by neuroanatomists combined with statistics on +# variability correlated to geometric information derived from the +# cortical model (sulcus and curvature). Besides the atlases provided +# with FreeSurfer, new ones can be prepared using mris_ca_train). # -# Examples -# ======== +# Examples +# ======== +# +# >>> from nipype.interfaces import freesurfer +# >>> ca_label = freesurfer.MRIsCALabel() +# >>> ca_label.inputs.subject_id = "test" +# >>> ca_label.inputs.hemisphere = "lh" +# >>> ca_label.inputs.canonsurf = "lh.pial" +# >>> ca_label.inputs.curv = "lh.pial" +# >>> ca_label.inputs.sulc = "lh.pial" +# >>> ca_label.inputs.classifier = "im1.nii" # in pracice, use .gcs extension +# >>> ca_label.inputs.smoothwm = "lh.pial" +# >>> ca_label.cmdline +# 'mris_ca_label test lh lh.pial im1.nii lh.aparc.annot' # -# >>> from nipype.interfaces import freesurfer -# >>> ca_label = freesurfer.MRIsCALabel() -# >>> ca_label.inputs.subject_id = "test" -# >>> ca_label.inputs.hemisphere = "lh" -# >>> ca_label.inputs.canonsurf = "lh.pial" -# >>> ca_label.inputs.curv = "lh.pial" -# >>> ca_label.inputs.sulc = "lh.pial" -# >>> ca_label.inputs.classifier = "im1.nii" # in pracice, use .gcs extension -# >>> ca_label.inputs.smoothwm = "lh.pial" -# >>> ca_label.cmdline -# 'mris_ca_label test lh lh.pial im1.nii lh.aparc.annot' -# task_name: MRIsCALabel nipype_name: MRIsCALabel nipype_module: nipype.interfaces.freesurfer.preprocess @@ -46,22 +46,19 @@ inputs: # passed to the field in the automatically generated unittests. aseg: generic/file # type=file|default=: Undocumented flag. Autorecon3 uses ../mri/aseg.presurf.mgz as input file - canonsurf: medimage-freesurfer/pial + canonsurf: fileformats.medimage_freesurfer.Pial # type=file|default=: Input canonical surface file - classifier: medimage/nifti1 + classifier: generic/file # type=file|default=: Classifier array input file - curv: medimage-freesurfer/pial + curv: generic/file # type=file|default=: implicit input {hemisphere}.curv label: generic/file # type=file|default=: Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file - out_file: Path - # type=file: Output volume from MRIsCALabel - # type=file|default=: Annotated surface output file - smoothwm: medimage-freesurfer/pial + smoothwm: fileformats.medimage_freesurfer.Pial # type=file|default=: implicit input {hemisphere}.smoothwm subjects_dir: generic/directory # type=directory|default=: subjects directory - sulc: medimage-freesurfer/pial + sulc: fileformats.medimage_freesurfer.Pial # type=file|default=: implicit input {hemisphere}.sulc callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -86,7 +83,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -127,7 +124,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -146,20 +143,14 @@ tests: # (if not specified, will try to choose a sensible value) subject_id: '"test"' # type=string|default='subject_id': Subject name or ID - hemisphere: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: Hemisphere ('lh' or 'rh') canonsurf: # type=file|default=: Input canonical surface file - curv: - # type=file|default=: implicit input {hemisphere}.curv sulc: # type=file|default=: implicit input {hemisphere}.sulc - classifier: - # type=file|default=: Classifier array input file smoothwm: # type=file|default=: implicit input {hemisphere}.smoothwm imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -182,20 +173,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. subject_id: '"test"' # type=string|default='subject_id': Subject name or ID - hemisphere: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: Hemisphere ('lh' or 'rh') canonsurf: '"lh.pial"' # type=file|default=: Input canonical surface file - curv: '"lh.pial"' - # type=file|default=: implicit input {hemisphere}.curv sulc: '"lh.pial"' # type=file|default=: implicit input {hemisphere}.sulc - classifier: '"im1.nii" # in pracice, use .gcs extension' - # type=file|default=: Classifier array input file smoothwm: '"lh.pial"' # type=file|default=: implicit input {hemisphere}.smoothwm imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mr_is_calc.yaml b/example-specs/interface/nipype/freesurfer/mr_is_calc.yaml index d641db48..ede76b0f 100644 --- a/example-specs/interface/nipype/freesurfer/mr_is_calc.yaml +++ b/example-specs/interface/nipype/freesurfer/mr_is_calc.yaml @@ -6,28 +6,28 @@ # Docs # ---- # -# 'mris_calc' is a simple calculator that operates on FreeSurfer -# curvatures and volumes. In most cases, the calculator functions with -# three arguments: two inputs and an linking them. Some -# actions, however, operate with only one input . In all cases, -# the first input is the name of a FreeSurfer curvature overlay -# (e.g. rh.curv) or volume file (e.g. orig.mgz). For two inputs, the -# calculator first assumes that the second input is a file. If, however, -# this second input file doesn't exist, the calculator assumes it refers -# to a float number, which is then processed according to .Note: -# and should typically be generated on the same subject. +# 'mris_calc' is a simple calculator that operates on FreeSurfer +# curvatures and volumes. In most cases, the calculator functions with +# three arguments: two inputs and an linking them. Some +# actions, however, operate with only one input . In all cases, +# the first input is the name of a FreeSurfer curvature overlay +# (e.g. rh.curv) or volume file (e.g. orig.mgz). For two inputs, the +# calculator first assumes that the second input is a file. If, however, +# this second input file doesn't exist, the calculator assumes it refers +# to a float number, which is then processed according to .Note: +# and should typically be generated on the same subject. +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import MRIsCalc +# >>> example = MRIsCalc() +# >>> example.inputs.in_file1 = 'lh.area' # doctest: +SKIP +# >>> example.inputs.in_file2 = 'lh.area.pial' # doctest: +SKIP +# >>> example.inputs.action = 'add' +# >>> example.inputs.out_file = 'area.mid' +# >>> example.cmdline # doctest: +SKIP +# 'mris_calc -o lh.area.mid lh.area add lh.area.pial' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import MRIsCalc -# >>> example = MRIsCalc() -# >>> example.inputs.in_file1 = 'lh.area' # doctest: +SKIP -# >>> example.inputs.in_file2 = 'lh.area.pial' # doctest: +SKIP -# >>> example.inputs.action = 'add' -# >>> example.inputs.out_file = 'area.mid' -# >>> example.cmdline # doctest: +SKIP -# 'mris_calc -o lh.area.mid lh.area add lh.area.pial' -# task_name: MRIsCalc nipype_name: MRIsCalc nipype_module: nipype.interfaces.freesurfer.utils @@ -42,13 +42,10 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file1: medimage-freesurfer/area + in_file1: fileformats.medimage_freesurfer.Area # type=file|default=: Input file 1 - in_file2: medimage-freesurfer/pial + in_file2: generic/file # type=file|default=: Input file 2 - out_file: Path - # type=file: Output file after calculation - # type=file|default=: Output file after calculation subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -67,14 +64,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: audio/sp-midi + out_file: generic/file # type=file: Output file after calculation # type=file|default=: Output file after calculation callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -101,7 +98,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -120,15 +117,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file1: # type=file|default=: Input file 1 - in_file2: - # type=file|default=: Input file 2 action: '"add"' # type=string|default='': Action to perform on input file(s) - out_file: '"area.mid"' - # type=file: Output file after calculation - # type=file|default=: Output file after calculation imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -143,7 +135,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_calc -o lh.area.mid lh.area add lh.area.pial +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -151,15 +143,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file1: '"lh.area" # doctest: +SKIP' # type=file|default=: Input file 1 - in_file2: '"lh.area.pial" # doctest: +SKIP' - # type=file|default=: Input file 2 action: '"add"' # type=string|default='': Action to perform on input file(s) - out_file: '"area.mid"' - # type=file: Output file after calculation - # type=file|default=: Output file after calculation imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mr_is_combine.yaml b/example-specs/interface/nipype/freesurfer/mr_is_combine.yaml index 1bc45974..9d827600 100644 --- a/example-specs/interface/nipype/freesurfer/mr_is_combine.yaml +++ b/example-specs/interface/nipype/freesurfer/mr_is_combine.yaml @@ -6,29 +6,29 @@ # Docs # ---- # -# Uses Freesurfer's ``mris_convert`` to combine two surface files into one. +# Uses Freesurfer's ``mris_convert`` to combine two surface files into one. # -# For complete details, see the `mris_convert Documentation. -# `_ +# For complete details, see the `mris_convert Documentation. +# `_ # -# If given an ``out_file`` that does not begin with ``'lh.'`` or ``'rh.'``, -# ``mris_convert`` will prepend ``'lh.'`` to the file name. -# To avoid this behavior, consider setting ``out_file = './'``, or -# leaving out_file blank. +# If given an ``out_file`` that does not begin with ``'lh.'`` or ``'rh.'``, +# ``mris_convert`` will prepend ``'lh.'`` to the file name. +# To avoid this behavior, consider setting ``out_file = './'``, or +# leaving out_file blank. # -# In a Node/Workflow, ``out_file`` is interpreted literally. +# In a Node/Workflow, ``out_file`` is interpreted literally. # -# Example -# ------- +# Example +# ------- +# +# >>> import nipype.interfaces.freesurfer as fs +# >>> mris = fs.MRIsCombine() +# >>> mris.inputs.in_files = ['lh.pial', 'rh.pial'] +# >>> mris.inputs.out_file = 'bh.pial' +# >>> mris.cmdline +# 'mris_convert --combinesurfs lh.pial rh.pial bh.pial' +# >>> mris.run() # doctest: +SKIP # -# >>> import nipype.interfaces.freesurfer as fs -# >>> mris = fs.MRIsCombine() -# >>> mris.inputs.in_files = ['lh.pial', 'rh.pial'] -# >>> mris.inputs.out_file = 'bh.pial' -# >>> mris.cmdline -# 'mris_convert --combinesurfs lh.pial rh.pial bh.pial' -# >>> mris.run() # doctest: +SKIP -# task_name: MRIsCombine nipype_name: MRIsCombine nipype_module: nipype.interfaces.freesurfer.utils @@ -45,9 +45,6 @@ inputs: # passed to the field in the automatically generated unittests. in_files: medimage-freesurfer/pial+list-of # type=list|default=[]: Two surfaces to be combined. - out_file: Path - # type=file: Output filename. Combined surfaces from in_files. - # type=file|default=: Output filename. Combined surfaces from in_files. subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -66,15 +63,15 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-freesurfer/pial + out_file: generic/file # type=file: Output filename. Combined surfaces from in_files. # type=file|default=: Output filename. Combined surfaces from in_files. callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - out_file: '"bh.pial"' + # dict[str, str] - `path_template` values to be provided to output fields + out_file: out_file # type=file: Output filename. Combined surfaces from in_files. # type=file|default=: Output filename. Combined surfaces from in_files. requirements: @@ -95,7 +92,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -114,11 +111,8 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=list|default=[]: Two surfaces to be combined. - out_file: '"bh.pial"' - # type=file: Output filename. Combined surfaces from in_files. - # type=file|default=: Output filename. Combined surfaces from in_files. imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.interfaces.freesurfer as fs expected_outputs: @@ -142,11 +136,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["lh.pial", "rh.pial"]' # type=list|default=[]: Two surfaces to be combined. - out_file: '"bh.pial"' - # type=file: Output filename. Combined surfaces from in_files. - # type=file|default=: Output filename. Combined surfaces from in_files. imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mr_is_convert.yaml b/example-specs/interface/nipype/freesurfer/mr_is_convert.yaml index cdf8f3ab..61949380 100644 --- a/example-specs/interface/nipype/freesurfer/mr_is_convert.yaml +++ b/example-specs/interface/nipype/freesurfer/mr_is_convert.yaml @@ -6,17 +6,17 @@ # Docs # ---- # -# Uses Freesurfer's mris_convert to convert surface files to various formats +# Uses Freesurfer's mris_convert to convert surface files to various formats # -# Example -# ------- +# Example +# ------- +# +# >>> import nipype.interfaces.freesurfer as fs +# >>> mris = fs.MRIsConvert() +# >>> mris.inputs.in_file = 'lh.pial' +# >>> mris.inputs.out_datatype = 'gii' +# >>> mris.run() # doctest: +SKIP # -# >>> import nipype.interfaces.freesurfer as fs -# >>> mris = fs.MRIsConvert() -# >>> mris.inputs.in_file = 'lh.pial' -# >>> mris.inputs.out_datatype = 'gii' -# >>> mris.run() # doctest: +SKIP -# task_name: MRIsConvert nipype_name: MRIsConvert nipype_module: nipype.interfaces.freesurfer.utils @@ -41,8 +41,6 @@ inputs: # type=file|default=: infile is .label file, label is name of this label labelstats_outfile: generic/file # type=file|default=: outfile is name of gifti file to which label stats will be written - out_file: Path - # type=file|default=: output filename or True to generate one parcstats_file: generic/file # type=file|default=: infile is name of text file containing label/val pairs scalarcurv_file: generic/file @@ -73,7 +71,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -127,7 +125,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/freesurfer/mr_is_expand.yaml b/example-specs/interface/nipype/freesurfer/mr_is_expand.yaml index 108e04f5..b86212dd 100644 --- a/example-specs/interface/nipype/freesurfer/mr_is_expand.yaml +++ b/example-specs/interface/nipype/freesurfer/mr_is_expand.yaml @@ -6,20 +6,20 @@ # Docs # ---- # -# Expands a surface (typically ?h.white) outwards while maintaining -# smoothness and self-intersection constraints. +# Expands a surface (typically ?h.white) outwards while maintaining +# smoothness and self-intersection constraints. +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import MRIsExpand +# >>> mris_expand = MRIsExpand(thickness=True, distance=0.5) +# >>> mris_expand.inputs.in_file = 'lh.white' +# >>> mris_expand.cmdline +# 'mris_expand -thickness lh.white 0.5 expanded' +# >>> mris_expand.inputs.out_name = 'graymid' +# >>> mris_expand.cmdline +# 'mris_expand -thickness lh.white 0.5 graymid' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import MRIsExpand -# >>> mris_expand = MRIsExpand(thickness=True, distance=0.5) -# >>> mris_expand.inputs.in_file = 'lh.white' -# >>> mris_expand.cmdline -# 'mris_expand -thickness lh.white 0.5 expanded' -# >>> mris_expand.inputs.out_name = 'graymid' -# >>> mris_expand.cmdline -# 'mris_expand -thickness lh.white 0.5 graymid' -# task_name: MRIsExpand nipype_name: MRIsExpand nipype_module: nipype.interfaces.freesurfer.utils @@ -34,7 +34,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: medimage-freesurfer/white + in_file: fileformats.medimage_freesurfer.White # type=file|default=: Surface to expand subjects_dir: generic/directory # type=directory|default=: subjects directory @@ -60,7 +60,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -98,7 +98,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -124,7 +124,7 @@ tests: distance: '0.5' # type=float|default=0.0: Distance in mm or fraction of cortical thickness imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -139,7 +139,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_expand -thickness lh.white 0.5 graymid +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -154,7 +154,7 @@ doctests: distance: '0.5' # type=float|default=0.0: Distance in mm or fraction of cortical thickness imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mr_is_inflate.yaml b/example-specs/interface/nipype/freesurfer/mr_is_inflate.yaml index 7a75efd9..4afb1810 100644 --- a/example-specs/interface/nipype/freesurfer/mr_is_inflate.yaml +++ b/example-specs/interface/nipype/freesurfer/mr_is_inflate.yaml @@ -6,17 +6,17 @@ # Docs # ---- # -# This program will inflate a cortical surface. +# This program will inflate a cortical surface. +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import MRIsInflate +# >>> inflate = MRIsInflate() +# >>> inflate.inputs.in_file = 'lh.pial' +# >>> inflate.inputs.no_save_sulc = True +# >>> inflate.cmdline # doctest: +SKIP +# 'mris_inflate -no-save-sulc lh.pial lh.inflated' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import MRIsInflate -# >>> inflate = MRIsInflate() -# >>> inflate.inputs.in_file = 'lh.pial' -# >>> inflate.inputs.no_save_sulc = True -# >>> inflate.cmdline # doctest: +SKIP -# 'mris_inflate -no-save-sulc lh.pial lh.inflated' -# task_name: MRIsInflate nipype_name: MRIsInflate nipype_module: nipype.interfaces.freesurfer.utils @@ -31,14 +31,8 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: medimage-freesurfer/pial + in_file: fileformats.medimage_freesurfer.Pial # type=file|default=: Input file for MRIsInflate - out_file: Path - # type=file: Output file for MRIsInflate - # type=file|default=: Output file for MRIsInflate - out_sulc: Path - # type=file: Output sulc file - # type=file|default=: Output sulc file subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -67,7 +61,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -91,7 +85,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -110,10 +104,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Input file for MRIsInflate - no_save_sulc: 'True' - # type=bool|default=False: Do not save sulc file as output imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -128,7 +120,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_inflate -no-save-sulc lh.pial lh.inflated +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -136,10 +128,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"lh.pial"' # type=file|default=: Input file for MRIsInflate - no_save_sulc: 'True' - # type=bool|default=False: Do not save sulc file as output imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mri_convert.yaml b/example-specs/interface/nipype/freesurfer/mri_convert.yaml index 8bf361c9..63de1373 100644 --- a/example-specs/interface/nipype/freesurfer/mri_convert.yaml +++ b/example-specs/interface/nipype/freesurfer/mri_convert.yaml @@ -7,20 +7,20 @@ # ---- # use fs mri_convert to manipulate files # -# .. note:: -# Adds niigz as an output type option +# .. note:: +# Adds niigz as an output type option # -# Examples -# -------- +# Examples +# -------- +# +# >>> mc = MRIConvert() +# >>> mc.inputs.in_file = 'structural.nii' +# >>> mc.inputs.out_file = 'outfile.mgz' +# >>> mc.inputs.out_type = 'mgz' +# >>> mc.cmdline +# 'mri_convert --out_type mgz --input_volume structural.nii --output_volume outfile.mgz' # -# >>> mc = MRIConvert() -# >>> mc.inputs.in_file = 'structural.nii' -# >>> mc.inputs.out_file = 'outfile.mgz' -# >>> mc.inputs.out_type = 'mgz' -# >>> mc.cmdline -# 'mri_convert --out_type mgz --input_volume structural.nii --output_volume outfile.mgz' # -# task_name: MRIConvert nipype_name: MRIConvert nipype_module: nipype.interfaces.freesurfer.preprocess @@ -47,9 +47,6 @@ inputs: # type=file|default=: File to read/convert in_like: generic/file # type=file|default=: input looks like - out_file: Path - # type=outputmultiobject: converted output file - # type=file|default=: output filename or True to generate one reslice_like: generic/file # type=file|default=: reslice output to match file sdcm_list: generic/file @@ -74,15 +71,15 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/mgh-gz+list-of + out_file: generic/file+list-of # type=outputmultiobject: converted output file # type=file|default=: output filename or True to generate one callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - out_file: '"outfile.mgz"' + # dict[str, str] - `path_template` values to be provided to output fields + out_file: out_file # type=outputmultiobject: converted output file # type=file|default=: output filename or True to generate one requirements: @@ -265,7 +262,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -284,13 +281,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: File to read/convert - out_file: '"outfile.mgz"' - # type=outputmultiobject: converted output file - # type=file|default=: output filename or True to generate one out_type: '"mgz"' # type=enum|default='cor'|allowed['afni','analyze','analyze4d','bfloat','brik','bshort','cor','gdf','mgh','mgz','minc','nifti1','nii','niigz','otl','outline','sdt','spm']: output file type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -313,13 +307,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: File to read/convert - out_file: '"outfile.mgz"' - # type=outputmultiobject: converted output file - # type=file|default=: output filename or True to generate one out_type: '"mgz"' # type=enum|default='cor'|allowed['afni','analyze','analyze4d','bfloat','brik','bshort','cor','gdf','mgh','mgz','minc','nifti1','nii','niigz','otl','outline','sdt','spm']: output file type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mri_coreg.yaml b/example-specs/interface/nipype/freesurfer/mri_coreg.yaml index 44e2bb81..264fa18c 100644 --- a/example-specs/interface/nipype/freesurfer/mri_coreg.yaml +++ b/example-specs/interface/nipype/freesurfer/mri_coreg.yaml @@ -7,38 +7,38 @@ # ---- # This program registers one volume to another # -# mri_coreg is a C reimplementation of spm_coreg in FreeSurfer +# mri_coreg is a C reimplementation of spm_coreg in FreeSurfer # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import MRICoreg -# >>> coreg = MRICoreg() -# >>> coreg.inputs.source_file = 'moving1.nii' -# >>> coreg.inputs.reference_file = 'fixed1.nii' -# >>> coreg.inputs.subjects_dir = '.' -# >>> coreg.cmdline # doctest: +ELLIPSIS -# 'mri_coreg --lta .../registration.lta --ref fixed1.nii --mov moving1.nii --sd .' +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import MRICoreg +# >>> coreg = MRICoreg() +# >>> coreg.inputs.source_file = 'moving1.nii' +# >>> coreg.inputs.reference_file = 'fixed1.nii' +# >>> coreg.inputs.subjects_dir = '.' +# >>> coreg.cmdline # doctest: +ELLIPSIS +# 'mri_coreg --lta .../registration.lta --ref fixed1.nii --mov moving1.nii --sd .' # -# If passing a subject ID, the reference mask may be disabled: +# If passing a subject ID, the reference mask may be disabled: # -# >>> coreg = MRICoreg() -# >>> coreg.inputs.source_file = 'moving1.nii' -# >>> coreg.inputs.subjects_dir = '.' -# >>> coreg.inputs.subject_id = 'fsaverage' -# >>> coreg.inputs.reference_mask = False -# >>> coreg.cmdline # doctest: +ELLIPSIS -# 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --mov moving1.nii --sd .' +# >>> coreg = MRICoreg() +# >>> coreg.inputs.source_file = 'moving1.nii' +# >>> coreg.inputs.subjects_dir = '.' +# >>> coreg.inputs.subject_id = 'fsaverage' +# >>> coreg.inputs.reference_mask = False +# >>> coreg.cmdline # doctest: +ELLIPSIS +# 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --mov moving1.nii --sd .' # -# Spatial scales may be specified as a list of one or two separations: +# Spatial scales may be specified as a list of one or two separations: # -# >>> coreg.inputs.sep = [4] -# >>> coreg.cmdline # doctest: +ELLIPSIS -# 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --sep 4 --mov moving1.nii --sd .' +# >>> coreg.inputs.sep = [4] +# >>> coreg.cmdline # doctest: +ELLIPSIS +# 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --sep 4 --mov moving1.nii --sd .' +# +# >>> coreg.inputs.sep = [4, 5] +# >>> coreg.cmdline # doctest: +ELLIPSIS +# 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --sep 4 --sep 5 --mov moving1.nii --sd .' # -# >>> coreg.inputs.sep = [4, 5] -# >>> coreg.cmdline # doctest: +ELLIPSIS -# 'mri_coreg --s fsaverage --no-ref-mask --lta .../registration.lta --sep 4 --sep 5 --mov moving1.nii --sd .' -# task_name: MRICoreg nipype_name: MRICoreg nipype_module: nipype.interfaces.freesurfer.registration @@ -53,7 +53,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - reference_file: medimage/nifti1 + reference_file: generic/file # type=file|default=: reference (target) file source_file: medimage/nifti1 # type=file|default=: source file to be registered @@ -88,7 +88,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -163,7 +163,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -182,12 +182,10 @@ tests: # (if not specified, will try to choose a sensible value) source_file: # type=file|default=: source file to be registered - reference_file: - # type=file|default=: reference (target) file subjects_dir: '"."' # type=directory|default=: FreeSurfer SUBJECTS_DIR imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -206,14 +204,10 @@ tests: # (if not specified, will try to choose a sensible value) source_file: # type=file|default=: source file to be registered - subjects_dir: '"."' - # type=directory|default=: FreeSurfer SUBJECTS_DIR subject_id: '"fsaverage"' # type=str|default='': freesurfer subject ID (implies ``reference_mask == aparc+aseg.mgz`` unless otherwise specified) - reference_mask: 'False' - # type=traitcompound|default=None: mask reference volume with given mask, or None if ``False`` imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -233,7 +227,7 @@ tests: sep: '[4]' # type=list|default=[]: set spatial scales, in voxels (default [2, 4]) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -253,7 +247,7 @@ tests: sep: '[4, 5]' # type=list|default=[]: set spatial scales, in voxels (default [2, 4]) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -268,7 +262,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_coreg --lta .../registration.lta --ref fixed1.nii --mov moving1.nii --sd . +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -276,12 +270,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. source_file: '"moving1.nii"' # type=file|default=: source file to be registered - reference_file: '"fixed1.nii"' - # type=file|default=: reference (target) file subjects_dir: '"."' # type=directory|default=: FreeSurfer SUBJECTS_DIR imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -293,14 +285,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. source_file: '"moving1.nii"' # type=file|default=: source file to be registered - subjects_dir: '"."' - # type=directory|default=: FreeSurfer SUBJECTS_DIR subject_id: '"fsaverage"' # type=str|default='': freesurfer subject ID (implies ``reference_mask == aparc+aseg.mgz`` unless otherwise specified) - reference_mask: 'False' - # type=traitcompound|default=None: mask reference volume with given mask, or None if ``False`` imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -313,7 +301,7 @@ doctests: sep: '[4]' # type=list|default=[]: set spatial scales, in voxels (default [2, 4]) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -326,7 +314,7 @@ doctests: sep: '[4, 5]' # type=list|default=[]: set spatial scales, in voxels (default [2, 4]) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mri_fill.yaml b/example-specs/interface/nipype/freesurfer/mri_fill.yaml index 5147e808..10242b1d 100644 --- a/example-specs/interface/nipype/freesurfer/mri_fill.yaml +++ b/example-specs/interface/nipype/freesurfer/mri_fill.yaml @@ -6,18 +6,18 @@ # Docs # ---- # -# This program creates hemispheric cutting planes and fills white matter -# with specific values for subsequent surface tessellation. +# This program creates hemispheric cutting planes and fills white matter +# with specific values for subsequent surface tessellation. +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import MRIFill +# >>> fill = MRIFill() +# >>> fill.inputs.in_file = 'wm.mgz' # doctest: +SKIP +# >>> fill.inputs.out_file = 'filled.mgz' # doctest: +SKIP +# >>> fill.cmdline # doctest: +SKIP +# 'mri_fill wm.mgz filled.mgz' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import MRIFill -# >>> fill = MRIFill() -# >>> fill.inputs.in_file = 'wm.mgz' # doctest: +SKIP -# >>> fill.inputs.out_file = 'filled.mgz' # doctest: +SKIP -# >>> fill.cmdline # doctest: +SKIP -# 'mri_fill wm.mgz filled.mgz' -# task_name: MRIFill nipype_name: MRIFill nipype_module: nipype.interfaces.freesurfer.utils @@ -34,12 +34,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/mgh-gz # type=file|default=: Input white matter file - log_file: Path - # type=file: Output log file from MRIFill - # type=file|default=: Output log file for MRIFill - out_file: Path - # type=file: Output file from MRIFill - # type=file|default=: Output filled volume file name for MRIFill segmentation: generic/file # type=file|default=: Input segmentation file for MRIFill subjects_dir: generic/directory @@ -65,14 +59,14 @@ outputs: log_file: generic/file # type=file: Output log file from MRIFill # type=file|default=: Output log file for MRIFill - out_file: medimage/mgh-gz + out_file: generic/file # type=file: Output file from MRIFill # type=file|default=: Output filled volume file name for MRIFill callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -98,7 +92,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -117,11 +111,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Input white matter file - out_file: '"filled.mgz" # doctest: +SKIP' - # type=file: Output file from MRIFill - # type=file|default=: Output filled volume file name for MRIFill imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -136,7 +127,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_fill wm.mgz filled.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -144,11 +135,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"wm.mgz" # doctest: +SKIP' # type=file|default=: Input white matter file - out_file: '"filled.mgz" # doctest: +SKIP' - # type=file: Output file from MRIFill - # type=file|default=: Output filled volume file name for MRIFill imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mri_marching_cubes.yaml b/example-specs/interface/nipype/freesurfer/mri_marching_cubes.yaml index a81ebcc2..c32aedae 100644 --- a/example-specs/interface/nipype/freesurfer/mri_marching_cubes.yaml +++ b/example-specs/interface/nipype/freesurfer/mri_marching_cubes.yaml @@ -6,18 +6,18 @@ # Docs # ---- # -# Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume +# Uses Freesurfer's mri_mc to create surfaces by tessellating a given input volume # -# Example -# ------- +# Example +# ------- +# +# >>> import nipype.interfaces.freesurfer as fs +# >>> mc = fs.MRIMarchingCubes() +# >>> mc.inputs.in_file = 'aseg.mgz' +# >>> mc.inputs.label_value = 17 +# >>> mc.inputs.out_file = 'lh.hippocampus' +# >>> mc.run() # doctest: +SKIP # -# >>> import nipype.interfaces.freesurfer as fs -# >>> mc = fs.MRIMarchingCubes() -# >>> mc.inputs.in_file = 'aseg.mgz' -# >>> mc.inputs.label_value = 17 -# >>> mc.inputs.out_file = 'lh.hippocampus' -# >>> mc.run() # doctest: +SKIP -# task_name: MRIMarchingCubes nipype_name: MRIMarchingCubes nipype_module: nipype.interfaces.freesurfer.utils @@ -34,8 +34,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: Input volume to tessellate voxels from. - out_file: Path - # type=file|default=: output filename or True to generate one subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -62,7 +60,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -84,7 +82,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/freesurfer/mri_pretess.yaml b/example-specs/interface/nipype/freesurfer/mri_pretess.yaml index 857e6651..515f5a13 100644 --- a/example-specs/interface/nipype/freesurfer/mri_pretess.yaml +++ b/example-specs/interface/nipype/freesurfer/mri_pretess.yaml @@ -6,24 +6,24 @@ # Docs # ---- # -# Uses Freesurfer's mri_pretess to prepare volumes to be tessellated. +# Uses Freesurfer's mri_pretess to prepare volumes to be tessellated. # -# Changes white matter (WM) segmentation so that the neighbors of all -# voxels labeled as WM have a face in common - no edges or corners -# allowed. +# Changes white matter (WM) segmentation so that the neighbors of all +# voxels labeled as WM have a face in common - no edges or corners +# allowed. +# +# Example +# ------- +# >>> import nipype.interfaces.freesurfer as fs +# >>> pretess = fs.MRIPretess() +# >>> pretess.inputs.in_filled = 'wm.mgz' +# >>> pretess.inputs.in_norm = 'norm.mgz' +# >>> pretess.inputs.nocorners = True +# >>> pretess.cmdline +# 'mri_pretess -nocorners wm.mgz wm norm.mgz wm_pretesswm.mgz' +# >>> pretess.run() # doctest: +SKIP # -# Example -# ------- -# >>> import nipype.interfaces.freesurfer as fs -# >>> pretess = fs.MRIPretess() -# >>> pretess.inputs.in_filled = 'wm.mgz' -# >>> pretess.inputs.in_norm = 'norm.mgz' -# >>> pretess.inputs.nocorners = True -# >>> pretess.cmdline -# 'mri_pretess -nocorners wm.mgz wm norm.mgz wm_pretesswm.mgz' -# >>> pretess.run() # doctest: +SKIP # -# task_name: MRIPretess nipype_name: MRIPretess nipype_module: nipype.interfaces.freesurfer.utils @@ -40,11 +40,8 @@ inputs: # passed to the field in the automatically generated unittests. in_filled: medimage/mgh-gz # type=file|default=: filled volume, usually wm.mgz - in_norm: medimage/mgh-gz + in_norm: generic/file # type=file|default=: the normalized, brain-extracted T1w image. Usually norm.mgz - out_file: Path - # type=file: output file after mri_pretess - # type=file|default=: the output file after mri_pretess. subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -70,7 +67,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -99,7 +96,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -118,12 +115,10 @@ tests: # (if not specified, will try to choose a sensible value) in_filled: # type=file|default=: filled volume, usually wm.mgz - in_norm: - # type=file|default=: the normalized, brain-extracted T1w image. Usually norm.mgz nocorners: 'True' # type=bool|default=False: do not remove corner configurations in addition to edge ones. imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.interfaces.freesurfer as fs expected_outputs: @@ -139,7 +134,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_pretess -nocorners wm.mgz wm norm.mgz wm_pretesswm.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -147,12 +142,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_filled: '"wm.mgz"' # type=file|default=: filled volume, usually wm.mgz - in_norm: '"norm.mgz"' - # type=file|default=: the normalized, brain-extracted T1w image. Usually norm.mgz nocorners: 'True' # type=bool|default=False: do not remove corner configurations in addition to edge ones. imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mri_tessellate.yaml b/example-specs/interface/nipype/freesurfer/mri_tessellate.yaml index cfec799c..824b9670 100644 --- a/example-specs/interface/nipype/freesurfer/mri_tessellate.yaml +++ b/example-specs/interface/nipype/freesurfer/mri_tessellate.yaml @@ -6,18 +6,18 @@ # Docs # ---- # -# Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume +# Uses Freesurfer's mri_tessellate to create surfaces by tessellating a given input volume # -# Example -# ------- +# Example +# ------- +# +# >>> import nipype.interfaces.freesurfer as fs +# >>> tess = fs.MRITessellate() +# >>> tess.inputs.in_file = 'aseg.mgz' +# >>> tess.inputs.label_value = 17 +# >>> tess.inputs.out_file = 'lh.hippocampus' +# >>> tess.run() # doctest: +SKIP # -# >>> import nipype.interfaces.freesurfer as fs -# >>> tess = fs.MRITessellate() -# >>> tess.inputs.in_file = 'aseg.mgz' -# >>> tess.inputs.label_value = 17 -# >>> tess.inputs.out_file = 'lh.hippocampus' -# >>> tess.run() # doctest: +SKIP -# task_name: MRITessellate nipype_name: MRITessellate nipype_module: nipype.interfaces.freesurfer.utils @@ -34,8 +34,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: Input volume to tessellate voxels from. - out_file: Path - # type=file|default=: output filename or True to generate one subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -62,7 +60,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -86,7 +84,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/freesurfer/mris_preproc.yaml b/example-specs/interface/nipype/freesurfer/mris_preproc.yaml index 8dba105c..24adcd89 100644 --- a/example-specs/interface/nipype/freesurfer/mris_preproc.yaml +++ b/example-specs/interface/nipype/freesurfer/mris_preproc.yaml @@ -6,19 +6,19 @@ # Docs # ---- # Use FreeSurfer mris_preproc to prepare a group of contrasts for -# a second level analysis +# a second level analysis +# +# Examples +# -------- +# >>> preproc = MRISPreproc() +# >>> preproc.inputs.target = 'fsaverage' +# >>> preproc.inputs.hemi = 'lh' +# >>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), ('cont1a.nii', 'register.dat')] +# >>> preproc.inputs.out_file = 'concatenated_file.mgz' +# >>> preproc.cmdline +# 'mris_preproc --hemi lh --out concatenated_file.mgz --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat' # -# Examples -# -------- -# >>> preproc = MRISPreproc() -# >>> preproc.inputs.target = 'fsaverage' -# >>> preproc.inputs.hemi = 'lh' -# >>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), ('cont1a.nii', 'register.dat')] -# >>> preproc.inputs.out_file = 'concatenated_file.mgz' -# >>> preproc.cmdline -# 'mris_preproc --hemi lh --out concatenated_file.mgz --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat' # -# task_name: MRISPreproc nipype_name: MRISPreproc nipype_module: nipype.interfaces.freesurfer.model @@ -35,9 +35,6 @@ inputs: # passed to the field in the automatically generated unittests. fsgd_file: generic/file # type=file|default=: specify subjects using fsgd file - out_file: Path - # type=file: preprocessed output file - # type=file|default=: output filename subject_file: generic/file # type=file|default=: file specifying subjects separated by white space subjects_dir: generic/directory @@ -60,15 +57,15 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/mgh-gz + out_file: generic/file # type=file: preprocessed output file # type=file|default=: output filename callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - out_file: '"concatenated_file.mgz"' + # dict[str, str] - `path_template` values to be provided to output fields + out_file: out_file # type=file: preprocessed output file # type=file|default=: output filename requirements: @@ -121,7 +118,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -140,15 +137,10 @@ tests: # (if not specified, will try to choose a sensible value) target: '"fsaverage"' # type=str|default='': target subject name - hemi: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: hemisphere for source and target vol_measure_file: '[("cont1.nii", "register.dat"), ("cont1a.nii", "register.dat")]' # type=inputmultiobject|default=[]: list of volume measure and reg file tuples - out_file: '"concatenated_file.mgz"' - # type=file: preprocessed output file - # type=file|default=: output filename imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -163,7 +155,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_preproc --hemi lh --out concatenated_file.mgz --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -171,15 +163,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. target: '"fsaverage"' # type=str|default='': target subject name - hemi: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: hemisphere for source and target vol_measure_file: '[("cont1.nii", "register.dat"), ("cont1a.nii", "register.dat")]' # type=inputmultiobject|default=[]: list of volume measure and reg file tuples - out_file: '"concatenated_file.mgz"' - # type=file: preprocessed output file - # type=file|default=: output filename imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mris_preproc_recon_all.yaml b/example-specs/interface/nipype/freesurfer/mris_preproc_recon_all.yaml index f475db2e..e2858f65 100644 --- a/example-specs/interface/nipype/freesurfer/mris_preproc_recon_all.yaml +++ b/example-specs/interface/nipype/freesurfer/mris_preproc_recon_all.yaml @@ -7,17 +7,17 @@ # ---- # Extends MRISPreproc to allow it to be used in a recon-all workflow # -# Examples -# -------- -# >>> preproc = MRISPreprocReconAll() -# >>> preproc.inputs.target = 'fsaverage' -# >>> preproc.inputs.hemi = 'lh' -# >>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), ('cont1a.nii', 'register.dat')] -# >>> preproc.inputs.out_file = 'concatenated_file.mgz' -# >>> preproc.cmdline -# 'mris_preproc --hemi lh --out concatenated_file.mgz --s subject_id --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat' +# Examples +# -------- +# >>> preproc = MRISPreprocReconAll() +# >>> preproc.inputs.target = 'fsaverage' +# >>> preproc.inputs.hemi = 'lh' +# >>> preproc.inputs.vol_measure_file = [('cont1.nii', 'register.dat'), ('cont1a.nii', 'register.dat')] +# >>> preproc.inputs.out_file = 'concatenated_file.mgz' +# >>> preproc.cmdline +# 'mris_preproc --hemi lh --out concatenated_file.mgz --s subject_id --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat' +# # -# task_name: MRISPreprocReconAll nipype_name: MRISPreprocReconAll nipype_module: nipype.interfaces.freesurfer.model @@ -36,9 +36,6 @@ inputs: # type=file|default=: specify subjects using fsgd file lh_surfreg_target: generic/file # type=file|default=: Implicit target surface registration file - out_file: Path - # type=file: preprocessed output file - # type=file|default=: output filename rh_surfreg_target: generic/file # type=file|default=: Implicit target surface registration file subject_file: generic/file @@ -65,15 +62,15 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/mgh-gz + out_file: generic/file # type=file: preprocessed output file # type=file|default=: output filename callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - out_file: '"concatenated_file.mgz"' + # dict[str, str] - `path_template` values to be provided to output fields + out_file: out_file # type=file: preprocessed output file # type=file|default=: output filename requirements: @@ -136,7 +133,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -155,15 +152,10 @@ tests: # (if not specified, will try to choose a sensible value) target: '"fsaverage"' # type=str|default='': target subject name - hemi: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: hemisphere for source and target vol_measure_file: '[("cont1.nii", "register.dat"), ("cont1a.nii", "register.dat")]' # type=inputmultiobject|default=[]: list of volume measure and reg file tuples - out_file: '"concatenated_file.mgz"' - # type=file: preprocessed output file - # type=file|default=: output filename imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -178,7 +170,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_preproc --hemi lh --out concatenated_file.mgz --s subject_id --target fsaverage --iv cont1.nii register.dat --iv cont1a.nii register.dat +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -186,15 +178,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. target: '"fsaverage"' # type=str|default='': target subject name - hemi: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: hemisphere for source and target vol_measure_file: '[("cont1.nii", "register.dat"), ("cont1a.nii", "register.dat")]' # type=inputmultiobject|default=[]: list of volume measure and reg file tuples - out_file: '"concatenated_file.mgz"' - # type=file: preprocessed output file - # type=file|default=: output filename imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mrtm1.yaml b/example-specs/interface/nipype/freesurfer/mrtm1.yaml new file mode 100644 index 00000000..cb157d86 --- /dev/null +++ b/example-specs/interface/nipype/freesurfer/mrtm1.yaml @@ -0,0 +1,301 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'nipype.interfaces.freesurfer.petsurfer.MRTM1' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Docs +# ---- +# Perform MRTM1 kinetic modeling. +# +# Examples +# -------- +# >>> mrtm = MRTM1() +# >>> mrtm.inputs.in_file = 'tac.nii' +# >>> mrtm.inputs.mrtm1 = ('ref_tac.dat', 'timing.dat') +# >>> mrtm.inputs.glm_dir = 'mrtm' +# >>> mrtm.cmdline +# 'mri_glmfit --glmdir mrtm --y tac.nii --mrtm1 ref_tac.dat timing.dat' +# +task_name: MRTM1 +nipype_name: MRTM1 +nipype_module: nipype.interfaces.freesurfer.petsurfer +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + contrast: generic/file+list-of + # type=inputmultiobject|default=[]: contrast file + design: generic/file + # type=file|default=: design matrix file + fixed_fx_dof_file: generic/file + # type=file|default=: text file with dof for fixed effects analysis + fixed_fx_var: generic/file + # type=file|default=: for fixed effects analysis + in_file: medimage/nifti1 + # type=file|default=: input 4D file + label_file: generic/file + # type=file|default=: use label as mask, surfaces only + per_voxel_reg: generic/file+list-of + # type=inputmultiobject|default=[]: per-voxel regressors + sim_done_file: generic/file + # type=file|default=: create file when simulation finished + subjects_dir: generic/directory + # type=directory|default=: subjects directory + weight_file: generic/file + # type=file|default=: weight for each input at each voxel + weighted_ls: generic/file + # type=file|default=: weighted least squares + callable_defaults: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set as the `default` method of input fields + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + beta_file: generic/file + # type=file: map of regression coefficients + bp_file: generic/file + # type=file: Binding potential estimates + dof_file: generic/file + # type=file: text file with effective degrees-of-freedom for the analysis + error_file: generic/file + # type=file: map of residual error + error_stddev_file: generic/file + # type=file: map of residual error standard deviation + error_var_file: generic/file + # type=file: map of residual error variance + estimate_file: generic/file + # type=file: map of the estimated Y values + frame_eigenvectors: generic/file + # type=file: matrix of frame eigenvectors from residual PCA + fwhm_file: generic/file + # type=file: text file with estimated smoothness + glm_dir: generic/directory + # type=directory: output directory + # type=str|default='': save outputs to dir + k2p_file: generic/file + # type=file: estimate of k2p parameter + mask_file: generic/file + # type=file: map of the mask used in the analysis + # type=file|default=: binary mask + singular_values: generic/file + # type=file: matrix singular values from residual PCA + spatial_eigenvectors: generic/file + # type=file: map of spatial eigenvectors from residual PCA + svd_stats_file: generic/file + # type=file: text file summarizing the residual PCA + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `path_template` values to be provided to output fields + glm_dir: '"mrtm"' + # type=directory: output directory + # type=str|default='': save outputs to dir + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + mrtm1: + # type=tuple|default=(, ): RefTac TimeSec : perform MRTM1 kinetic modeling + glm_dir: + # type=directory: output directory + # type=str|default='': save outputs to dir + in_file: + # type=file|default=: input 4D file + fsgd: + # type=tuple|default=(, 'doss'): freesurfer descriptor file + design: + # type=file|default=: design matrix file + contrast: + # type=inputmultiobject|default=[]: contrast file + one_sample: + # type=bool|default=False: construct X and C as a one-sample group mean + no_contrast_ok: + # type=bool|default=False: do not fail if no contrasts specified + per_voxel_reg: + # type=inputmultiobject|default=[]: per-voxel regressors + self_reg: + # type=tuple|default=(0, 0, 0): self-regressor from index col row slice + weighted_ls: + # type=file|default=: weighted least squares + fixed_fx_var: + # type=file|default=: for fixed effects analysis + fixed_fx_dof: + # type=int|default=0: dof for fixed effects analysis + fixed_fx_dof_file: + # type=file|default=: text file with dof for fixed effects analysis + weight_file: + # type=file|default=: weight for each input at each voxel + weight_inv: + # type=bool|default=False: invert weights + weight_sqrt: + # type=bool|default=False: sqrt of weights + fwhm: + # type=range|default=0.0: smooth input by fwhm + var_fwhm: + # type=range|default=0.0: smooth variance by fwhm + no_mask_smooth: + # type=bool|default=False: do not mask when smoothing + no_est_fwhm: + # type=bool|default=False: turn off FWHM output estimation + mask_file: + # type=file: map of the mask used in the analysis + # type=file|default=: binary mask + label_file: + # type=file|default=: use label as mask, surfaces only + cortex: + # type=bool|default=False: use subjects ?h.cortex.label as label + invert_mask: + # type=bool|default=False: invert mask + prune: + # type=bool|default=False: remove voxels that do not have a non-zero value at each frame (def) + no_prune: + # type=bool|default=False: do not prune + prune_thresh: + # type=float|default=0.0: prune threshold. Default is FLT_MIN + compute_log_y: + # type=bool|default=False: compute natural log of y prior to analysis + save_estimate: + # type=bool|default=False: save signal estimate (yhat) + save_residual: + # type=bool|default=False: save residual error (eres) + save_res_corr_mtx: + # type=bool|default=False: save residual error spatial correlation matrix (eres.scm). Big! + surf: + # type=bool|default=False: analysis is on a surface mesh + subject_id: + # type=str|default='': subject id for surface geometry + hemi: + # type=enum|default='lh'|allowed['lh','rh']: surface hemisphere + surf_geo: + # type=str|default='white': surface geometry name (e.g. white, pial) + simulation: + # type=tuple|default=('perm', 0, 0.0, ''): nulltype nsim thresh csdbasename + sim_sign: + # type=enum|default='abs'|allowed['abs','neg','pos']: abs, pos, or neg + uniform: + # type=tuple|default=(0.0, 0.0): use uniform distribution instead of gaussian + pca: + # type=bool|default=False: perform pca/svd analysis on residual + calc_AR1: + # type=bool|default=False: compute and save temporal AR1 of residual + save_cond: + # type=bool|default=False: flag to save design matrix condition at each voxel + vox_dump: + # type=tuple|default=(0, 0, 0): dump voxel GLM and exit + seed: + # type=int|default=0: used for synthesizing noise + synth: + # type=bool|default=False: replace input with gaussian + resynth_test: + # type=int|default=0: test GLM by resynthsis + profile: + # type=int|default=0: niters : test speed + mrtm2: + # type=tuple|default=(, , 0.0): RefTac TimeSec k2prime : perform MRTM2 kinetic modeling + logan: + # type=tuple|default=(, , 0.0): RefTac TimeSec tstar : perform Logan kinetic modeling + bp_clip_neg: + # type=bool|default=False: set negative BP voxels to zero + bp_clip_max: + # type=float|default=0.0: set BP voxels above max to max + force_perm: + # type=bool|default=False: force perumtation test, even when design matrix is not orthog + diag: + # type=int|default=0: Gdiag_no : set diagnostic level + diag_cluster: + # type=bool|default=False: save sig volume and exit from first sim loop + debug: + # type=bool|default=False: turn on debugging + check_opts: + # type=bool|default=False: don't run anything, just check options and exit + allow_repeated_subjects: + # type=bool|default=False: allow subject names to repeat in the fsgd file (must appear before --fsgd + allow_ill_cond: + # type=bool|default=False: allow ill-conditioned design matrices + sim_done_file: + # type=file|default=: create file when simulation finished + nii: + # type=bool|default=False: save outputs as nii + nii_gz: + # type=bool|default=False: save outputs as nii.gz + subjects_dir: + # type=directory|default=: subjects directory + args: + # type=str|default='': Additional parameters to the command + environ: + # type=dict|default={}: Environment variables + imports: + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + in_file: + # type=file|default=: input 4D file + glm_dir: '"mrtm"' + # type=directory: output directory + # type=str|default='': save outputs to dir + imports: + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: +- cmdline: + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + in_file: '"tac.nii"' + # type=file|default=: input 4D file + glm_dir: '"mrtm"' + # type=directory: output directory + # type=str|default='': save outputs to dir + imports: + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/mrtm2.yaml b/example-specs/interface/nipype/freesurfer/mrtm2.yaml index 2a15b1d1..3f8e9a9d 100644 --- a/example-specs/interface/nipype/freesurfer/mrtm2.yaml +++ b/example-specs/interface/nipype/freesurfer/mrtm2.yaml @@ -6,15 +6,15 @@ # Docs # ---- # Perform MRTM2 kinetic modeling. -# Examples -# -------- -# >>> mrtm2 = MRTM2() -# >>> mrtm2.inputs.in_file = 'tac.nii' -# >>> mrtm2.inputs.mrtm2 = ('ref_tac.dat', 'timing.dat', 0.07872) -# >>> mrtm2.inputs.glm_dir = 'mrtm2' -# >>> mrtm2.cmdline -# 'mri_glmfit --glmdir mrtm2 --y tac.nii --mrtm2 ref_tac.dat timing.dat 0.078720' -# +# Examples +# -------- +# >>> mrtm2 = MRTM2() +# >>> mrtm2.inputs.in_file = 'tac.nii' +# >>> mrtm2.inputs.mrtm2 = ('ref_tac.dat', 'timing.dat', 0.07872) +# >>> mrtm2.inputs.glm_dir = 'mrtm2' +# >>> mrtm2.cmdline +# 'mri_glmfit --glmdir mrtm2 --y tac.nii --mrtm2 ref_tac.dat timing.dat 0.078720' +# task_name: MRTM2 nipype_name: MRTM2 nipype_module: nipype.interfaces.freesurfer.petsurfer @@ -41,9 +41,6 @@ inputs: # type=file|default=: input 4D file label_file: generic/file # type=file|default=: use label as mask, surfaces only - mask_file: Path - # type=file: map of the mask used in the analysis - # type=file|default=: binary mask per_voxel_reg: generic/file+list-of # type=inputmultiobject|default=[]: per-voxel regressors sim_done_file: generic/file @@ -106,7 +103,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields glm_dir: '"mrtm2"' # type=directory: output directory # type=str|default='': save outputs to dir @@ -216,6 +213,10 @@ tests: # type=tuple|default=(, ): RefTac TimeSec : perform MRTM1 kinetic modeling logan: # type=tuple|default=(, , 0.0): RefTac TimeSec tstar : perform Logan kinetic modeling + bp_clip_neg: + # type=bool|default=False: set negative BP voxels to zero + bp_clip_max: + # type=float|default=0.0: set BP voxels above max to max force_perm: # type=bool|default=False: force perumtation test, even when design matrix is not orthog diag: @@ -243,7 +244,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -262,13 +263,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input 4D file - mrtm2: ("ref_tac.dat", "timing.dat", 0.07872) - # type=tuple|default=(, , 0.0): RefTac TimeSec k2prime : perform MRTM2 kinetic modeling glm_dir: '"mrtm2"' # type=directory: output directory # type=str|default='': save outputs to dir imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -283,7 +282,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_glmfit --glmdir mrtm2 --y tac.nii --mrtm2 ref_tac.dat timing.dat 0.078720 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -291,13 +290,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"tac.nii"' # type=file|default=: input 4D file - mrtm2: ("ref_tac.dat", "timing.dat", 0.07872) - # type=tuple|default=(, , 0.0): RefTac TimeSec k2prime : perform MRTM2 kinetic modeling glm_dir: '"mrtm2"' # type=directory: output directory # type=str|default='': save outputs to dir imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/ms__lda.yaml b/example-specs/interface/nipype/freesurfer/ms__lda.yaml index 94af270a..da588c6a 100644 --- a/example-specs/interface/nipype/freesurfer/ms__lda.yaml +++ b/example-specs/interface/nipype/freesurfer/ms__lda.yaml @@ -7,16 +7,16 @@ # ---- # Perform LDA reduction on the intensity space of an arbitrary # of FLASH images # -# Examples -# -------- -# >>> grey_label = 2 -# >>> white_label = 3 -# >>> zero_value = 1 -# >>> optimalWeights = MS_LDA(lda_labels=[grey_label, white_label], label_file='label.mgz', weight_file='weights.txt', shift=zero_value, vol_synth_file='synth_out.mgz', conform=True, use_weights=True, images=['FLASH1.mgz', 'FLASH2.mgz', 'FLASH3.mgz']) -# >>> optimalWeights.cmdline -# 'mri_ms_LDA -conform -label label.mgz -lda 2 3 -shift 1 -W -synth synth_out.mgz -weight weights.txt FLASH1.mgz FLASH2.mgz FLASH3.mgz' +# Examples +# -------- +# >>> grey_label = 2 +# >>> white_label = 3 +# >>> zero_value = 1 +# >>> optimalWeights = MS_LDA(lda_labels=[grey_label, white_label], label_file='label.mgz', weight_file='weights.txt', shift=zero_value, vol_synth_file='synth_out.mgz', conform=True, use_weights=True, images=['FLASH1.mgz', 'FLASH2.mgz', 'FLASH3.mgz']) +# >>> optimalWeights.cmdline +# 'mri_ms_LDA -conform -label label.mgz -lda 2 3 -shift 1 -W -synth synth_out.mgz -weight weights.txt FLASH1.mgz FLASH2.mgz FLASH3.mgz' +# # -# task_name: MS_LDA nipype_name: MS_LDA nipype_module: nipype.interfaces.freesurfer.model @@ -39,12 +39,6 @@ inputs: # type=file|default=: filename of the brain mask volume subjects_dir: generic/directory # type=directory|default=: subjects directory - vol_synth_file: Path - # type=file: - # type=file|default=: filename for the synthesized output volume - weight_file: Path - # type=file: - # type=file|default=: filename for the LDA weights (input or output) callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -71,7 +65,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -105,7 +99,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -141,7 +135,7 @@ tests: images: # type=inputmultiobject|default=[]: list of input FLASH images imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -156,7 +150,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_ms_LDA -conform -label label.mgz -lda 2 3 -shift 1 -W -synth synth_out.mgz -weight weights.txt FLASH1.mgz FLASH2.mgz FLASH3.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -181,7 +175,7 @@ doctests: images: '["FLASH1.mgz", "FLASH2.mgz", "FLASH3.mgz"]' # type=inputmultiobject|default=[]: list of input FLASH images imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/normalize.yaml b/example-specs/interface/nipype/freesurfer/normalize.yaml index 704a66b1..265ec9a2 100644 --- a/example-specs/interface/nipype/freesurfer/normalize.yaml +++ b/example-specs/interface/nipype/freesurfer/normalize.yaml @@ -6,19 +6,19 @@ # Docs # ---- # -# Normalize the white-matter, optionally based on control points. The -# input volume is converted into a new volume where white matter image -# values all range around 110. +# Normalize the white-matter, optionally based on control points. The +# input volume is converted into a new volume where white matter image +# values all range around 110. +# +# Examples +# ======== +# >>> from nipype.interfaces import freesurfer +# >>> normalize = freesurfer.Normalize() +# >>> normalize.inputs.in_file = "T1.mgz" +# >>> normalize.inputs.gradient = 1 +# >>> normalize.cmdline +# 'mri_normalize -g 1 T1.mgz T1_norm.mgz' # -# Examples -# ======== -# >>> from nipype.interfaces import freesurfer -# >>> normalize = freesurfer.Normalize() -# >>> normalize.inputs.in_file = "T1.mgz" -# >>> normalize.inputs.gradient = 1 -# >>> normalize.cmdline -# 'mri_normalize -g 1 T1.mgz T1_norm.mgz' -# task_name: Normalize nipype_name: Normalize nipype_module: nipype.interfaces.freesurfer.preprocess @@ -37,9 +37,6 @@ inputs: # type=file|default=: The input file for Normalize mask: generic/file # type=file|default=: The input mask file for Normalize - out_file: Path - # type=file: The output file for Normalize - # type=file|default=: The output file for Normalize segmentation: generic/file # type=file|default=: The input segmentation for Normalize subjects_dir: generic/directory @@ -69,7 +66,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -96,7 +93,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -115,10 +112,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: The input file for Normalize - gradient: '1' - # type=int|default=0: use max intensity/mm gradient g (default=1) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -133,7 +128,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_normalize -g 1 T1.mgz T1_norm.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -141,10 +136,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"T1.mgz"' # type=file|default=: The input file for Normalize - gradient: '1' - # type=int|default=0: use max intensity/mm gradient g (default=1) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/one_sample_t_test.yaml b/example-specs/interface/nipype/freesurfer/one_sample_t_test.yaml index a92ef111..45725cef 100644 --- a/example-specs/interface/nipype/freesurfer/one_sample_t_test.yaml +++ b/example-specs/interface/nipype/freesurfer/one_sample_t_test.yaml @@ -32,9 +32,6 @@ inputs: # type=file|default=: input 4D file label_file: generic/file # type=file|default=: use label as mask, surfaces only - mask_file: Path - # type=file: map of the mask used in the analysis - # type=file|default=: binary mask per_voxel_reg: generic/file+list-of # type=inputmultiobject|default=[]: per-voxel regressors sim_done_file: generic/file @@ -97,7 +94,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields glm_dir: glm_dir # type=directory: output directory # type=str|default='': save outputs to dir @@ -207,6 +204,10 @@ tests: # type=tuple|default=(, , 0.0): RefTac TimeSec k2prime : perform MRTM2 kinetic modeling logan: # type=tuple|default=(, , 0.0): RefTac TimeSec tstar : perform Logan kinetic modeling + bp_clip_neg: + # type=bool|default=False: set negative BP voxels to zero + bp_clip_max: + # type=float|default=0.0: set BP voxels above max to max force_perm: # type=bool|default=False: force perumtation test, even when design matrix is not orthog diag: @@ -234,7 +235,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/freesurfer/paint.yaml b/example-specs/interface/nipype/freesurfer/paint.yaml index b9eb0eac..7859f0e6 100644 --- a/example-specs/interface/nipype/freesurfer/paint.yaml +++ b/example-specs/interface/nipype/freesurfer/paint.yaml @@ -6,23 +6,23 @@ # Docs # ---- # -# This program is useful for extracting one of the arrays ("a variable") -# from a surface-registration template file. The output is a file -# containing a surface-worth of per-vertex values, saved in "curvature" -# format. Because the template data is sampled to a particular surface -# mesh, this conjures the idea of "painting to a surface". +# This program is useful for extracting one of the arrays ("a variable") +# from a surface-registration template file. The output is a file +# containing a surface-worth of per-vertex values, saved in "curvature" +# format. Because the template data is sampled to a particular surface +# mesh, this conjures the idea of "painting to a surface". +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import Paint +# >>> paint = Paint() +# >>> paint.inputs.in_surf = 'lh.pial' +# >>> paint.inputs.template = 'aseg.mgz' +# >>> paint.inputs.averages = 5 +# >>> paint.inputs.out_file = 'lh.avg_curv' +# >>> paint.cmdline +# 'mrisp_paint -a 5 aseg.mgz lh.pial lh.avg_curv' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import Paint -# >>> paint = Paint() -# >>> paint.inputs.in_surf = 'lh.pial' -# >>> paint.inputs.template = 'aseg.mgz' -# >>> paint.inputs.averages = 5 -# >>> paint.inputs.out_file = 'lh.avg_curv' -# >>> paint.cmdline -# 'mrisp_paint -a 5 aseg.mgz lh.pial lh.avg_curv' -# task_name: Paint nipype_name: Paint nipype_module: nipype.interfaces.freesurfer.registration @@ -37,14 +37,11 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_surf: medimage-freesurfer/pial + in_surf: fileformats.medimage_freesurfer.Pial # type=file|default=: Surface file with grid (vertices) onto which the template data is to be sampled or 'painted' - out_file: Path - # type=file: File containing a surface-worth of per-vertex values, saved in 'curvature' format. - # type=file|default=: File containing a surface-worth of per-vertex values, saved in 'curvature' format. subjects_dir: generic/directory # type=directory|default=: subjects directory - template: medimage/mgh-gz + template: generic/file # type=file|default=: Template file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -69,7 +66,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -94,7 +91,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -113,15 +110,10 @@ tests: # (if not specified, will try to choose a sensible value) in_surf: # type=file|default=: Surface file with grid (vertices) onto which the template data is to be sampled or 'painted' - template: - # type=file|default=: Template file averages: '5' # type=int|default=0: Average curvature patterns - out_file: '"lh.avg_curv"' - # type=file: File containing a surface-worth of per-vertex values, saved in 'curvature' format. - # type=file|default=: File containing a surface-worth of per-vertex values, saved in 'curvature' format. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -136,7 +128,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mrisp_paint -a 5 aseg.mgz lh.pial lh.avg_curv +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -144,15 +136,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_surf: '"lh.pial"' # type=file|default=: Surface file with grid (vertices) onto which the template data is to be sampled or 'painted' - template: '"aseg.mgz"' - # type=file|default=: Template file averages: '5' # type=int|default=0: Average curvature patterns - out_file: '"lh.avg_curv"' - # type=file: File containing a surface-worth of per-vertex values, saved in 'curvature' format. - # type=file|default=: File containing a surface-worth of per-vertex values, saved in 'curvature' format. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/parcellation_stats.yaml b/example-specs/interface/nipype/freesurfer/parcellation_stats.yaml index f9d82ab0..3867cf60 100644 --- a/example-specs/interface/nipype/freesurfer/parcellation_stats.yaml +++ b/example-specs/interface/nipype/freesurfer/parcellation_stats.yaml @@ -6,31 +6,31 @@ # Docs # ---- # -# This program computes a number of anatomical properties. +# This program computes a number of anatomical properties. +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import ParcellationStats +# >>> import os +# >>> parcstats = ParcellationStats() +# >>> parcstats.inputs.subject_id = '10335' +# >>> parcstats.inputs.hemisphere = 'lh' +# >>> parcstats.inputs.wm = './../mri/wm.mgz' # doctest: +SKIP +# >>> parcstats.inputs.transform = './../mri/transforms/talairach.xfm' # doctest: +SKIP +# >>> parcstats.inputs.brainmask = './../mri/brainmask.mgz' # doctest: +SKIP +# >>> parcstats.inputs.aseg = './../mri/aseg.presurf.mgz' # doctest: +SKIP +# >>> parcstats.inputs.ribbon = './../mri/ribbon.mgz' # doctest: +SKIP +# >>> parcstats.inputs.lh_pial = 'lh.pial' # doctest: +SKIP +# >>> parcstats.inputs.rh_pial = 'lh.pial' # doctest: +SKIP +# >>> parcstats.inputs.lh_white = 'lh.white' # doctest: +SKIP +# >>> parcstats.inputs.rh_white = 'rh.white' # doctest: +SKIP +# >>> parcstats.inputs.thickness = 'lh.thickness' # doctest: +SKIP +# >>> parcstats.inputs.surface = 'white' +# >>> parcstats.inputs.out_table = 'lh.test.stats' +# >>> parcstats.inputs.out_color = 'test.ctab' +# >>> parcstats.cmdline # doctest: +SKIP +# 'mris_anatomical_stats -c test.ctab -f lh.test.stats 10335 lh white' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import ParcellationStats -# >>> import os -# >>> parcstats = ParcellationStats() -# >>> parcstats.inputs.subject_id = '10335' -# >>> parcstats.inputs.hemisphere = 'lh' -# >>> parcstats.inputs.wm = './../mri/wm.mgz' # doctest: +SKIP -# >>> parcstats.inputs.transform = './../mri/transforms/talairach.xfm' # doctest: +SKIP -# >>> parcstats.inputs.brainmask = './../mri/brainmask.mgz' # doctest: +SKIP -# >>> parcstats.inputs.aseg = './../mri/aseg.presurf.mgz' # doctest: +SKIP -# >>> parcstats.inputs.ribbon = './../mri/ribbon.mgz' # doctest: +SKIP -# >>> parcstats.inputs.lh_pial = 'lh.pial' # doctest: +SKIP -# >>> parcstats.inputs.rh_pial = 'lh.pial' # doctest: +SKIP -# >>> parcstats.inputs.lh_white = 'lh.white' # doctest: +SKIP -# >>> parcstats.inputs.rh_white = 'rh.white' # doctest: +SKIP -# >>> parcstats.inputs.thickness = 'lh.thickness' # doctest: +SKIP -# >>> parcstats.inputs.surface = 'white' -# >>> parcstats.inputs.out_table = 'lh.test.stats' -# >>> parcstats.inputs.out_color = 'test.ctab' -# >>> parcstats.cmdline # doctest: +SKIP -# 'mris_anatomical_stats -c test.ctab -f lh.test.stats 10335 lh white' -# task_name: ParcellationStats nipype_name: ParcellationStats nipype_module: nipype.interfaces.freesurfer.utils @@ -45,7 +45,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - aseg: medimage/mgh-gz + aseg: generic/file # type=file|default=: Input file must be /mri/aseg.presurf.mgz brainmask: medimage/mgh-gz # type=file|default=: Input file must be /mri/brainmask.mgz @@ -57,27 +57,21 @@ inputs: # type=file|default=: Input cortex label in_label: generic/file # type=file|default=: limit calculations to specified label - lh_pial: medimage-freesurfer/pial + lh_pial: generic/file # type=file|default=: Input file must be /surf/lh.pial - lh_white: medimage-freesurfer/white + lh_white: generic/file # type=file|default=: Input file must be /surf/lh.white - out_color: Path - # type=file: Output annotation files's colortable to text file - # type=file|default=: Output annotation files's colortable to text file - out_table: Path - # type=file: Table output to tablefile - # type=file|default=: Table output to tablefile - rh_pial: medimage-freesurfer/pial + rh_pial: fileformats.medimage_freesurfer.Pial # type=file|default=: Input file must be /surf/rh.pial - rh_white: medimage-freesurfer/white + rh_white: fileformats.medimage_freesurfer.White # type=file|default=: Input file must be /surf/rh.white ribbon: medimage/mgh-gz # type=file|default=: Input file must be /mri/ribbon.mgz subjects_dir: generic/directory # type=directory|default=: subjects directory - thickness: medimage-freesurfer/thickness + thickness: generic/file # type=file|default=: Input file must be /surf/?h.thickness - transform: medimage-freesurfer/xfm + transform: generic/file # type=file|default=: Input file must be /mri/transforms/talairach.xfm wm: medimage/mgh-gz # type=file|default=: Input file must be /mri/wm.mgz @@ -97,21 +91,21 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_color: medimage-freesurfer/ctab + out_color: fileformats.medimage_freesurfer.Ctab # type=file: Output annotation files's colortable to text file # type=file|default=: Output annotation files's colortable to text file - out_table: medimage-freesurfer/stats + out_table: generic/file # type=file: Table output to tablefile # type=file|default=: Table output to tablefile callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_color: '"test.ctab"' # type=file: Output annotation files's colortable to text file # type=file|default=: Output annotation files's colortable to text file - out_table: '"lh.test.stats"' + out_table: out_table # type=file: Table output to tablefile # type=file|default=: Table output to tablefile requirements: @@ -165,7 +159,7 @@ tests: # type=file: Output annotation files's colortable to text file # type=file|default=: Output annotation files's colortable to text file copy_inputs: - # type=bool|default=False: If running as a node, set this to True.This will copy the input files to the node directory. + # type=bool|default=False: If running as a node, set this to True. This will copy the input files to the node directory. th3: # type=bool|default=False: turns on new vertex-wise volume calc for mris_anat_stats subjects_dir: @@ -175,7 +169,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -194,38 +188,23 @@ tests: # (if not specified, will try to choose a sensible value) subject_id: '"10335"' # type=string|default='subject_id': Subject being processed - hemisphere: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: Hemisphere being processed wm: # type=file|default=: Input file must be /mri/wm.mgz - transform: - # type=file|default=: Input file must be /mri/transforms/talairach.xfm brainmask: # type=file|default=: Input file must be /mri/brainmask.mgz - aseg: - # type=file|default=: Input file must be /mri/aseg.presurf.mgz ribbon: # type=file|default=: Input file must be /mri/ribbon.mgz - lh_pial: - # type=file|default=: Input file must be /surf/lh.pial rh_pial: # type=file|default=: Input file must be /surf/rh.pial - lh_white: - # type=file|default=: Input file must be /surf/lh.white rh_white: # type=file|default=: Input file must be /surf/rh.white - thickness: - # type=file|default=: Input file must be /surf/?h.thickness surface: '"white"' # type=string|default='': Input surface (e.g. 'white') - out_table: '"lh.test.stats"' - # type=file: Table output to tablefile - # type=file|default=: Table output to tablefile out_color: '"test.ctab"' # type=file: Output annotation files's colortable to text file # type=file|default=: Output annotation files's colortable to text file imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: os expected_outputs: @@ -241,7 +220,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_anatomical_stats -c test.ctab -f lh.test.stats 10335 lh white +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -249,38 +228,23 @@ doctests: # '.mock()' method of the corresponding class is used instead. subject_id: '"10335"' # type=string|default='subject_id': Subject being processed - hemisphere: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: Hemisphere being processed wm: '"./../mri/wm.mgz" # doctest: +SKIP' # type=file|default=: Input file must be /mri/wm.mgz - transform: '"./../mri/transforms/talairach.xfm" # doctest: +SKIP' - # type=file|default=: Input file must be /mri/transforms/talairach.xfm brainmask: '"./../mri/brainmask.mgz" # doctest: +SKIP' # type=file|default=: Input file must be /mri/brainmask.mgz - aseg: '"./../mri/aseg.presurf.mgz" # doctest: +SKIP' - # type=file|default=: Input file must be /mri/aseg.presurf.mgz ribbon: '"./../mri/ribbon.mgz" # doctest: +SKIP' # type=file|default=: Input file must be /mri/ribbon.mgz - lh_pial: '"lh.pial" # doctest: +SKIP' - # type=file|default=: Input file must be /surf/lh.pial rh_pial: '"lh.pial" # doctest: +SKIP' # type=file|default=: Input file must be /surf/rh.pial - lh_white: '"lh.white" # doctest: +SKIP' - # type=file|default=: Input file must be /surf/lh.white rh_white: '"rh.white" # doctest: +SKIP' # type=file|default=: Input file must be /surf/rh.white - thickness: '"lh.thickness" # doctest: +SKIP' - # type=file|default=: Input file must be /surf/?h.thickness surface: '"white"' # type=string|default='': Input surface (e.g. 'white') - out_table: '"lh.test.stats"' - # type=file: Table output to tablefile - # type=file|default=: Table output to tablefile out_color: '"test.ctab"' # type=file: Output annotation files's colortable to text file # type=file|default=: Output annotation files's colortable to text file imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/parse_dicom_dir.yaml b/example-specs/interface/nipype/freesurfer/parse_dicom_dir.yaml index e534cf0a..069ecf0e 100644 --- a/example-specs/interface/nipype/freesurfer/parse_dicom_dir.yaml +++ b/example-specs/interface/nipype/freesurfer/parse_dicom_dir.yaml @@ -7,18 +7,18 @@ # ---- # Uses mri_parse_sdcmdir to get information from dicom directories # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.freesurfer import ParseDICOMDir +# >>> dcminfo = ParseDICOMDir() +# >>> dcminfo.inputs.dicom_dir = '.' +# >>> dcminfo.inputs.sortbyrun = True +# >>> dcminfo.inputs.summarize = True +# >>> dcminfo.cmdline +# 'mri_parse_sdcmdir --d . --o dicominfo.txt --sortbyrun --summarize' # -# >>> from nipype.interfaces.freesurfer import ParseDICOMDir -# >>> dcminfo = ParseDICOMDir() -# >>> dcminfo.inputs.dicom_dir = '.' -# >>> dcminfo.inputs.sortbyrun = True -# >>> dcminfo.inputs.summarize = True -# >>> dcminfo.cmdline -# 'mri_parse_sdcmdir --d . --o dicominfo.txt --sortbyrun --summarize' # -# task_name: ParseDICOMDir nipype_name: ParseDICOMDir nipype_module: nipype.interfaces.freesurfer.preprocess @@ -35,9 +35,6 @@ inputs: # passed to the field in the automatically generated unittests. dicom_dir: generic/directory # type=directory|default=: path to siemens dicom directory - dicom_info_file: Path - # type=file: text file containing dicom information - # type=file|default='dicominfo.txt': file to which results are written subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -63,7 +60,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -86,7 +83,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -105,12 +102,10 @@ tests: # (if not specified, will try to choose a sensible value) dicom_dir: '"."' # type=directory|default=: path to siemens dicom directory - sortbyrun: 'True' - # type=bool|default=False: assign run numbers summarize: 'True' # type=bool|default=False: only print out info for run leaders imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -133,12 +128,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. dicom_dir: '"."' # type=directory|default=: path to siemens dicom directory - sortbyrun: 'True' - # type=bool|default=False: assign run numbers summarize: 'True' # type=bool|default=False: only print out info for run leaders imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/recon_all.yaml b/example-specs/interface/nipype/freesurfer/recon_all.yaml index 703edf64..c6a9143d 100644 --- a/example-specs/interface/nipype/freesurfer/recon_all.yaml +++ b/example-specs/interface/nipype/freesurfer/recon_all.yaml @@ -6,58 +6,76 @@ # Docs # ---- # Uses recon-all to generate surfaces and parcellations of structural data -# from anatomical images of a subject. +# from anatomical images of a subject. # -# Examples -# -------- +# Examples +# -------- # -# >>> from nipype.interfaces.freesurfer import ReconAll -# >>> reconall = ReconAll() -# >>> reconall.inputs.subject_id = 'foo' -# >>> reconall.inputs.directive = 'all' -# >>> reconall.inputs.subjects_dir = '.' -# >>> reconall.inputs.T1_files = 'structural.nii' -# >>> reconall.cmdline -# 'recon-all -all -i structural.nii -subjid foo -sd .' -# >>> reconall.inputs.flags = "-qcache" -# >>> reconall.cmdline -# 'recon-all -all -i structural.nii -qcache -subjid foo -sd .' -# >>> reconall.inputs.flags = ["-cw256", "-qcache"] -# >>> reconall.cmdline -# 'recon-all -all -i structural.nii -cw256 -qcache -subjid foo -sd .' +# >>> from nipype.interfaces.freesurfer import ReconAll +# >>> reconall = ReconAll() +# >>> reconall.inputs.subject_id = 'foo' +# >>> reconall.inputs.directive = 'all' +# >>> reconall.inputs.subjects_dir = '.' +# >>> reconall.inputs.T1_files = ['structural.nii'] +# >>> reconall.cmdline +# 'recon-all -all -i structural.nii -subjid foo -sd .' +# >>> reconall.inputs.flags = "-qcache" +# >>> reconall.cmdline +# 'recon-all -all -i structural.nii -qcache -subjid foo -sd .' +# >>> reconall.inputs.flags = ["-cw256", "-qcache"] +# >>> reconall.cmdline +# 'recon-all -all -i structural.nii -cw256 -qcache -subjid foo -sd .' # -# Hemisphere may be specified regardless of directive: +# Hemisphere may be specified regardless of directive: # -# >>> reconall.inputs.flags = [] -# >>> reconall.inputs.hemi = 'lh' -# >>> reconall.cmdline -# 'recon-all -all -i structural.nii -hemi lh -subjid foo -sd .' +# >>> reconall.inputs.flags = [] +# >>> reconall.inputs.hemi = 'lh' +# >>> reconall.cmdline +# 'recon-all -all -i structural.nii -hemi lh -subjid foo -sd .' # -# ``-autorecon-hemi`` uses the ``-hemi`` input to specify the hemisphere -# to operate upon: +# ``-autorecon-hemi`` uses the ``-hemi`` input to specify the hemisphere +# to operate upon: # -# >>> reconall.inputs.directive = 'autorecon-hemi' -# >>> reconall.cmdline -# 'recon-all -autorecon-hemi lh -i structural.nii -subjid foo -sd .' +# >>> reconall.inputs.directive = 'autorecon-hemi' +# >>> reconall.cmdline +# 'recon-all -autorecon-hemi lh -i structural.nii -subjid foo -sd .' # -# Hippocampal subfields can accept T1 and T2 images: +# Hippocampal subfields can accept T1 and T2 images: +# +# >>> reconall_subfields = ReconAll() +# >>> reconall_subfields.inputs.subject_id = 'foo' +# >>> reconall_subfields.inputs.directive = 'all' +# >>> reconall_subfields.inputs.subjects_dir = '.' +# >>> reconall_subfields.inputs.T1_files = ['structural.nii'] +# >>> reconall_subfields.inputs.hippocampal_subfields_T1 = True +# >>> reconall_subfields.cmdline +# 'recon-all -all -i structural.nii -hippocampal-subfields-T1 -subjid foo -sd .' +# >>> reconall_subfields.inputs.hippocampal_subfields_T2 = ( +# ... 'structural.nii', 'test') +# >>> reconall_subfields.cmdline +# 'recon-all -all -i structural.nii -hippocampal-subfields-T1T2 structural.nii test -subjid foo -sd .' +# >>> reconall_subfields.inputs.hippocampal_subfields_T1 = False +# >>> reconall_subfields.cmdline +# 'recon-all -all -i structural.nii -hippocampal-subfields-T2 structural.nii test -subjid foo -sd .' +# +# Base template creation for longitudinal pipeline: +# >>> baserecon = ReconAll() +# >>> baserecon.inputs.base_template_id = 'sub-template' +# >>> baserecon.inputs.base_timepoint_ids = ['ses-1','ses-2'] +# >>> baserecon.inputs.directive = 'all' +# >>> baserecon.inputs.subjects_dir = '.' +# >>> baserecon.cmdline +# 'recon-all -all -base sub-template -base-tp ses-1 -base-tp ses-2 -sd .' +# +# Longitudinal timepoint run: +# >>> longrecon = ReconAll() +# >>> longrecon.inputs.longitudinal_timepoint_id = 'ses-1' +# >>> longrecon.inputs.longitudinal_template_id = 'sub-template' +# >>> longrecon.inputs.directive = 'all' +# >>> longrecon.inputs.subjects_dir = '.' +# >>> longrecon.cmdline +# 'recon-all -all -long ses-1 sub-template -sd .' # -# >>> reconall_subfields = ReconAll() -# >>> reconall_subfields.inputs.subject_id = 'foo' -# >>> reconall_subfields.inputs.directive = 'all' -# >>> reconall_subfields.inputs.subjects_dir = '.' -# >>> reconall_subfields.inputs.T1_files = 'structural.nii' -# >>> reconall_subfields.inputs.hippocampal_subfields_T1 = True -# >>> reconall_subfields.cmdline -# 'recon-all -all -i structural.nii -hippocampal-subfields-T1 -subjid foo -sd .' -# >>> reconall_subfields.inputs.hippocampal_subfields_T2 = ( -# ... 'structural.nii', 'test') -# >>> reconall_subfields.cmdline -# 'recon-all -all -i structural.nii -hippocampal-subfields-T1T2 structural.nii test -subjid foo -sd .' -# >>> reconall_subfields.inputs.hippocampal_subfields_T1 = False -# >>> reconall_subfields.cmdline -# 'recon-all -all -i structural.nii -hippocampal-subfields-T2 structural.nii test -subjid foo -sd .' -# task_name: ReconAll nipype_name: ReconAll nipype_module: nipype.interfaces.freesurfer.preprocess @@ -74,15 +92,12 @@ inputs: # passed to the field in the automatically generated unittests. FLAIR_file: generic/file # type=file|default=: Convert FLAIR image to orig directory - T1_files: medimage/nifti1+list-of + T1_files: generic/file+list-of # type=inputmultiobject|default=[]: name of T1 file to process T2_file: generic/file # type=file|default=: Convert T2 image to orig directory expert: generic/file # type=file|default=: Set parameters using expert file - subjects_dir: Path - # type=directory: Freesurfer subjects directory. - # type=directory|default=: path to subjects directory callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -148,7 +163,7 @@ outputs: orig: generic/file # type=file: Base image conformed to Freesurfer space pial: generic/file+list-of - # type=outputmultiobject: Gray matter/pia mater surface meshes + # type=outputmultiobject: Gray matter/pia matter surface meshes rawavg: generic/file # type=file: Volume formed by averaging input images ribbon: generic/file+list-of @@ -183,7 +198,7 @@ outputs: # type=str: Subject name for whom to retrieve data # type=str|default='recon_all': subject name templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields subjects_dir: '"."' # type=directory: Freesurfer subjects directory. # type=directory|default=: path to subjects directory @@ -235,6 +250,14 @@ tests: # type=directory|default=: path to subjects directory flags: # type=inputmultiobject|default=[]: additional parameters + base_template_id: + # type=str|default='': base template id + base_timepoint_ids: + # type=inputmultiobject|default=[]: processed timepoint to use in template + longitudinal_timepoint_id: + # type=str|default='': longitudinal session/timepoint id + longitudinal_template_id: + # type=str|default='': longitudinal base template id talairach: # type=str|default='': Flags to pass to talairach commands mri_normalize: @@ -292,7 +315,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -312,17 +335,13 @@ tests: subject_id: '"foo"' # type=str: Subject name for whom to retrieve data # type=str|default='recon_all': subject name - directive: '"all"' - # type=enum|default='all'|allowed['all','autorecon-hemi','autorecon-pial','autorecon1','autorecon2','autorecon2-cp','autorecon2-inflate1','autorecon2-perhemi','autorecon2-volonly','autorecon2-wm','autorecon3','autorecon3-T2pial','localGI','qcache']: process directive subjects_dir: '"."' # type=directory: Freesurfer subjects directory. # type=directory|default=: path to subjects directory - T1_files: - # type=inputmultiobject|default=[]: name of T1 file to process flags: '["-cw256", "-qcache"]' # type=inputmultiobject|default=[]: additional parameters imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -341,10 +360,8 @@ tests: # (if not specified, will try to choose a sensible value) flags: '[]' # type=inputmultiobject|default=[]: additional parameters - hemi: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: hemisphere to process imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -364,7 +381,7 @@ tests: directive: '"autorecon-hemi"' # type=enum|default='all'|allowed['all','autorecon-hemi','autorecon-pial','autorecon1','autorecon2','autorecon2-cp','autorecon2-inflate1','autorecon2-perhemi','autorecon2-volonly','autorecon2-wm','autorecon3','autorecon3-T2pial','localGI','qcache']: process directive imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -384,19 +401,59 @@ tests: subject_id: '"foo"' # type=str: Subject name for whom to retrieve data # type=str|default='recon_all': subject name - directive: '"all"' - # type=enum|default='all'|allowed['all','autorecon-hemi','autorecon-pial','autorecon1','autorecon2','autorecon2-cp','autorecon2-inflate1','autorecon2-perhemi','autorecon2-volonly','autorecon2-wm','autorecon3','autorecon3-T2pial','localGI','qcache']: process directive subjects_dir: '"."' # type=directory: Freesurfer subjects directory. # type=directory|default=: path to subjects directory - T1_files: - # type=inputmultiobject|default=[]: name of T1 file to process hippocampal_subfields_T1: 'False' # type=bool|default=False: segment hippocampal subfields using input T1 scan - hippocampal_subfields_T2: ("structural.nii", "test") + hippocampal_subfields_T2: ( # type=tuple|default=(, ''): segment hippocampal subfields using T2 scan, identified by ID (may be combined with hippocampal_subfields_T1) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + base_template_id: '"sub-template"' + # type=str|default='': base template id + directive: '"all"' + # type=enum|default='all'|allowed['all','autorecon-hemi','autorecon-pial','autorecon1','autorecon2','autorecon2-cp','autorecon2-inflate1','autorecon2-perhemi','autorecon2-volonly','autorecon2-wm','autorecon3','autorecon3-T2pial','localGI','qcache']: process directive + imports: + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + longitudinal_timepoint_id: '"ses-1"' + # type=str|default='': longitudinal session/timepoint id + directive: '"all"' + # type=enum|default='all'|allowed['all','autorecon-hemi','autorecon-pial','autorecon1','autorecon2','autorecon2-cp','autorecon2-inflate1','autorecon2-perhemi','autorecon2-volonly','autorecon2-wm','autorecon3','autorecon3-T2pial','localGI','qcache']: process directive + imports: + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -420,17 +477,13 @@ doctests: subject_id: '"foo"' # type=str: Subject name for whom to retrieve data # type=str|default='recon_all': subject name - directive: '"all"' - # type=enum|default='all'|allowed['all','autorecon-hemi','autorecon-pial','autorecon1','autorecon2','autorecon2-cp','autorecon2-inflate1','autorecon2-perhemi','autorecon2-volonly','autorecon2-wm','autorecon3','autorecon3-T2pial','localGI','qcache']: process directive subjects_dir: '"."' # type=directory: Freesurfer subjects directory. # type=directory|default=: path to subjects directory - T1_files: '"structural.nii"' - # type=inputmultiobject|default=[]: name of T1 file to process flags: '["-cw256", "-qcache"]' # type=inputmultiobject|default=[]: additional parameters imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -442,10 +495,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. flags: '[]' # type=inputmultiobject|default=[]: additional parameters - hemi: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: hemisphere to process imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -458,7 +509,7 @@ doctests: directive: '"autorecon-hemi"' # type=enum|default='all'|allowed['all','autorecon-hemi','autorecon-pial','autorecon1','autorecon2','autorecon2-cp','autorecon2-inflate1','autorecon2-perhemi','autorecon2-volonly','autorecon2-wm','autorecon3','autorecon3-T2pial','localGI','qcache']: process directive imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -471,19 +522,45 @@ doctests: subject_id: '"foo"' # type=str: Subject name for whom to retrieve data # type=str|default='recon_all': subject name - directive: '"all"' - # type=enum|default='all'|allowed['all','autorecon-hemi','autorecon-pial','autorecon1','autorecon2','autorecon2-cp','autorecon2-inflate1','autorecon2-perhemi','autorecon2-volonly','autorecon2-wm','autorecon3','autorecon3-T2pial','localGI','qcache']: process directive subjects_dir: '"."' # type=directory: Freesurfer subjects directory. # type=directory|default=: path to subjects directory - T1_files: '"structural.nii"' - # type=inputmultiobject|default=[]: name of T1 file to process hippocampal_subfields_T1: 'False' # type=bool|default=False: segment hippocampal subfields using input T1 scan - hippocampal_subfields_T2: ("structural.nii", "test") + hippocampal_subfields_T2: ( # type=tuple|default=(, ''): segment hippocampal subfields using T2 scan, identified by ID (may be combined with hippocampal_subfields_T1) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + base_template_id: '"sub-template"' + # type=str|default='': base template id + directive: '"all"' + # type=enum|default='all'|allowed['all','autorecon-hemi','autorecon-pial','autorecon1','autorecon2','autorecon2-cp','autorecon2-inflate1','autorecon2-perhemi','autorecon2-volonly','autorecon2-wm','autorecon3','autorecon3-T2pial','localGI','qcache']: process directive + imports: + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + longitudinal_timepoint_id: '"ses-1"' + # type=str|default='': longitudinal session/timepoint id + directive: '"all"' + # type=enum|default='all'|allowed['all','autorecon-hemi','autorecon-pial','autorecon1','autorecon2','autorecon2-cp','autorecon2-inflate1','autorecon2-perhemi','autorecon2-volonly','autorecon2-wm','autorecon3','autorecon3-T2pial','localGI','qcache']: process directive + imports: + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/register.yaml b/example-specs/interface/nipype/freesurfer/register.yaml index 1d14a92d..b4caabf5 100644 --- a/example-specs/interface/nipype/freesurfer/register.yaml +++ b/example-specs/interface/nipype/freesurfer/register.yaml @@ -7,19 +7,19 @@ # ---- # This program registers a surface to an average surface template. # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import Register -# >>> register = Register() -# >>> register.inputs.in_surf = 'lh.pial' -# >>> register.inputs.in_smoothwm = 'lh.pial' -# >>> register.inputs.in_sulc = 'lh.pial' -# >>> register.inputs.target = 'aseg.mgz' -# >>> register.inputs.out_file = 'lh.pial.reg' -# >>> register.inputs.curv = True -# >>> register.cmdline -# 'mris_register -curv lh.pial aseg.mgz lh.pial.reg' -# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import Register +# >>> register = Register() +# >>> register.inputs.in_surf = 'lh.pial' +# >>> register.inputs.in_smoothwm = 'lh.pial' +# >>> register.inputs.in_sulc = 'lh.pial' +# >>> register.inputs.target = 'aseg.mgz' +# >>> register.inputs.out_file = 'lh.pial.reg' +# >>> register.inputs.curv = True +# >>> register.cmdline +# 'mris_register -curv lh.pial aseg.mgz lh.pial.reg' +# task_name: Register nipype_name: Register nipype_module: nipype.interfaces.freesurfer.registration @@ -34,18 +34,15 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_smoothwm: medimage-freesurfer/pial + in_smoothwm: generic/file # type=file|default=: Undocumented input file ${SUBJECTS_DIR}/surf/{hemisphere}.smoothwm - in_sulc: medimage-freesurfer/pial + in_sulc: fileformats.medimage_freesurfer.Pial # type=file|default=: Undocumented mandatory input file ${SUBJECTS_DIR}/surf/{hemisphere}.sulc - in_surf: medimage-freesurfer/pial + in_surf: fileformats.medimage_freesurfer.Pial # type=file|default=: Surface to register, often {hemi}.sphere - out_file: Path - # type=file: Output surface file to capture registration - # type=file|default=: Output surface file to capture registration subjects_dir: generic/directory # type=directory|default=: subjects directory - target: medimage/mgh-gz + target: generic/file # type=file|default=: The data to register to. In normal recon-all usage, this is a template file for average surface. callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -63,14 +60,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-freesurfer/reg + out_file: fileformats.medimage_freesurfer.Reg # type=file: Output surface file to capture registration # type=file|default=: Output surface file to capture registration callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: '"lh.pial.reg"' # type=file: Output surface file to capture registration # type=file|default=: Output surface file to capture registration @@ -100,7 +97,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -119,19 +116,13 @@ tests: # (if not specified, will try to choose a sensible value) in_surf: # type=file|default=: Surface to register, often {hemi}.sphere - in_smoothwm: - # type=file|default=: Undocumented input file ${SUBJECTS_DIR}/surf/{hemisphere}.smoothwm in_sulc: # type=file|default=: Undocumented mandatory input file ${SUBJECTS_DIR}/surf/{hemisphere}.sulc - target: - # type=file|default=: The data to register to. In normal recon-all usage, this is a template file for average surface. out_file: '"lh.pial.reg"' # type=file: Output surface file to capture registration # type=file|default=: Output surface file to capture registration - curv: 'True' - # type=bool|default=False: Use smoothwm curvature for final alignment imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -146,7 +137,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_register -curv lh.pial aseg.mgz lh.pial.reg +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -154,19 +145,13 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_surf: '"lh.pial"' # type=file|default=: Surface to register, often {hemi}.sphere - in_smoothwm: '"lh.pial"' - # type=file|default=: Undocumented input file ${SUBJECTS_DIR}/surf/{hemisphere}.smoothwm in_sulc: '"lh.pial"' # type=file|default=: Undocumented mandatory input file ${SUBJECTS_DIR}/surf/{hemisphere}.sulc - target: '"aseg.mgz"' - # type=file|default=: The data to register to. In normal recon-all usage, this is a template file for average surface. out_file: '"lh.pial.reg"' # type=file: Output surface file to capture registration # type=file|default=: Output surface file to capture registration - curv: 'True' - # type=bool|default=False: Use smoothwm curvature for final alignment imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/register_av_ito_talairach.yaml b/example-specs/interface/nipype/freesurfer/register_av_ito_talairach.yaml index 2ea2a019..07f6fc12 100644 --- a/example-specs/interface/nipype/freesurfer/register_av_ito_talairach.yaml +++ b/example-specs/interface/nipype/freesurfer/register_av_ito_talairach.yaml @@ -6,30 +6,30 @@ # Docs # ---- # -# converts the vox2vox from talairach_avi to a talairach.xfm file +# converts the vox2vox from talairach_avi to a talairach.xfm file # -# This is a script that converts the vox2vox from talairach_avi to a -# talairach.xfm file. It is meant to replace the following cmd line: +# This is a script that converts the vox2vox from talairach_avi to a +# talairach.xfm file. It is meant to replace the following cmd line: # -# tkregister2_cmdl --mov $InVol --targ $FREESURFER_HOME/average/mni305.cor.mgz --xfmout ${XFM} --vox2vox talsrcimg_to_${target}_t4_vox2vox.txt --noedit --reg talsrcimg.reg.tmp.dat -# set targ = $FREESURFER_HOME/average/mni305.cor.mgz -# set subject = mgh-02407836-v2 -# set InVol = $SUBJECTS_DIR/$subject/mri/orig.mgz -# set vox2vox = $SUBJECTS_DIR/$subject/mri/transforms/talsrcimg_to_711-2C_as_mni_average_305_t4_vox2vox.txt +# tkregister2_cmdl --mov $InVol --targ $FREESURFER_HOME/average/mni305.cor.mgz --xfmout ${XFM} --vox2vox talsrcimg_to_${target}_t4_vox2vox.txt --noedit --reg talsrcimg.reg.tmp.dat +# set targ = $FREESURFER_HOME/average/mni305.cor.mgz +# set subject = mgh-02407836-v2 +# set InVol = $SUBJECTS_DIR/$subject/mri/orig.mgz +# set vox2vox = $SUBJECTS_DIR/$subject/mri/transforms/talsrcimg_to_711-2C_as_mni_average_305_t4_vox2vox.txt # -# Examples -# ======== +# Examples +# ======== # -# >>> from nipype.interfaces.freesurfer import RegisterAVItoTalairach -# >>> register = RegisterAVItoTalairach() -# >>> register.inputs.in_file = 'structural.mgz' # doctest: +SKIP -# >>> register.inputs.target = 'mni305.cor.mgz' # doctest: +SKIP -# >>> register.inputs.vox2vox = 'talsrcimg_to_structural_t4_vox2vox.txt' # doctest: +SKIP -# >>> register.cmdline # doctest: +SKIP -# 'avi2talxfm structural.mgz mni305.cor.mgz talsrcimg_to_structural_t4_vox2vox.txt talairach.auto.xfm' +# >>> from nipype.interfaces.freesurfer import RegisterAVItoTalairach +# >>> register = RegisterAVItoTalairach() +# >>> register.inputs.in_file = 'structural.mgz' # doctest: +SKIP +# >>> register.inputs.target = 'mni305.cor.mgz' # doctest: +SKIP +# >>> register.inputs.vox2vox = 'talsrcimg_to_structural_t4_vox2vox.txt' # doctest: +SKIP +# >>> register.cmdline # doctest: +SKIP +# 'avi2talxfm structural.mgz mni305.cor.mgz talsrcimg_to_structural_t4_vox2vox.txt talairach.auto.xfm' +# +# >>> register.run() # doctest: +SKIP # -# >>> register.run() # doctest: +SKIP -# task_name: RegisterAVItoTalairach nipype_name: RegisterAVItoTalairach nipype_module: nipype.interfaces.freesurfer.registration @@ -46,12 +46,9 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/mgh-gz # type=file|default=: The input file - out_file: Path - # type=file: The output file for RegisterAVItoTalairach - # type=file|default='talairach.auto.xfm': The transform output subjects_dir: generic/directory # type=directory|default=: subjects directory - target: medimage/mgh-gz + target: generic/file # type=file|default=: The target file vox2vox: text/text-file # type=file|default=: The vox2vox file @@ -80,7 +77,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -103,7 +100,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -122,12 +119,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: The input file - target: - # type=file|default=: The target file vox2vox: # type=file|default=: The vox2vox file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -150,12 +145,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.mgz" # doctest: +SKIP' # type=file|default=: The input file - target: '"mni305.cor.mgz" # doctest: +SKIP' - # type=file|default=: The target file vox2vox: '"talsrcimg_to_structural_t4_vox2vox.txt" # doctest: +SKIP' # type=file|default=: The vox2vox file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/relabel_hypointensities.yaml b/example-specs/interface/nipype/freesurfer/relabel_hypointensities.yaml index 00a02be9..8be14235 100644 --- a/example-specs/interface/nipype/freesurfer/relabel_hypointensities.yaml +++ b/example-specs/interface/nipype/freesurfer/relabel_hypointensities.yaml @@ -6,19 +6,19 @@ # Docs # ---- # -# Relabel Hypointensities +# Relabel Hypointensities +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import RelabelHypointensities +# >>> relabelhypos = RelabelHypointensities() +# >>> relabelhypos.inputs.lh_white = 'lh.pial' +# >>> relabelhypos.inputs.rh_white = 'lh.pial' +# >>> relabelhypos.inputs.surf_directory = '.' +# >>> relabelhypos.inputs.aseg = 'aseg.mgz' +# >>> relabelhypos.cmdline +# 'mri_relabel_hypointensities aseg.mgz . aseg.hypos.mgz' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import RelabelHypointensities -# >>> relabelhypos = RelabelHypointensities() -# >>> relabelhypos.inputs.lh_white = 'lh.pial' -# >>> relabelhypos.inputs.rh_white = 'lh.pial' -# >>> relabelhypos.inputs.surf_directory = '.' -# >>> relabelhypos.inputs.aseg = 'aseg.mgz' -# >>> relabelhypos.cmdline -# 'mri_relabel_hypointensities aseg.mgz . aseg.hypos.mgz' -# task_name: RelabelHypointensities nipype_name: RelabelHypointensities nipype_module: nipype.interfaces.freesurfer.utils @@ -33,14 +33,11 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - aseg: medimage/mgh-gz + aseg: generic/file # type=file|default=: Input aseg file - lh_white: medimage-freesurfer/pial + lh_white: fileformats.medimage_freesurfer.Pial # type=file|default=: Implicit input file must be lh.white - out_file: Path - # type=file: Output aseg file - # type=file|default=: Output aseg file - rh_white: medimage-freesurfer/pial + rh_white: generic/file # type=file|default=: Implicit input file must be rh.white subjects_dir: generic/directory # type=directory|default=: subjects directory @@ -69,7 +66,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -94,7 +91,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -113,14 +110,10 @@ tests: # (if not specified, will try to choose a sensible value) lh_white: # type=file|default=: Implicit input file must be lh.white - rh_white: - # type=file|default=: Implicit input file must be rh.white surf_directory: '"."' # type=directory|default='.': Directory containing lh.white and rh.white - aseg: - # type=file|default=: Input aseg file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -135,7 +128,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_relabel_hypointensities aseg.mgz . aseg.hypos.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -143,14 +136,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. lh_white: '"lh.pial"' # type=file|default=: Implicit input file must be lh.white - rh_white: '"lh.pial"' - # type=file|default=: Implicit input file must be rh.white surf_directory: '"."' # type=directory|default='.': Directory containing lh.white and rh.white - aseg: '"aseg.mgz"' - # type=file|default=: Input aseg file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/remove_intersection.yaml b/example-specs/interface/nipype/freesurfer/remove_intersection.yaml index 5abc2285..18bbe3e4 100644 --- a/example-specs/interface/nipype/freesurfer/remove_intersection.yaml +++ b/example-specs/interface/nipype/freesurfer/remove_intersection.yaml @@ -6,16 +6,16 @@ # Docs # ---- # -# This program removes the intersection of the given MRI +# This program removes the intersection of the given MRI +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import RemoveIntersection +# >>> ri = RemoveIntersection() +# >>> ri.inputs.in_file = 'lh.pial' +# >>> ri.cmdline +# 'mris_remove_intersection lh.pial lh.pial' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import RemoveIntersection -# >>> ri = RemoveIntersection() -# >>> ri.inputs.in_file = 'lh.pial' -# >>> ri.cmdline -# 'mris_remove_intersection lh.pial lh.pial' -# task_name: RemoveIntersection nipype_name: RemoveIntersection nipype_module: nipype.interfaces.freesurfer.utils @@ -30,11 +30,8 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: medimage-freesurfer/pial + in_file: fileformats.medimage_freesurfer.Pial # type=file|default=: Input file for RemoveIntersection - out_file: Path - # type=file: Output file for RemoveIntersection - # type=file|default=: Output file for RemoveIntersection subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -60,7 +57,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -79,7 +76,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -99,7 +96,7 @@ tests: in_file: # type=file|default=: Input file for RemoveIntersection imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -114,7 +111,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_remove_intersection lh.pial lh.pial +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -123,7 +120,7 @@ doctests: in_file: '"lh.pial"' # type=file|default=: Input file for RemoveIntersection imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/remove_neck.yaml b/example-specs/interface/nipype/freesurfer/remove_neck.yaml index b1678134..9aebb244 100644 --- a/example-specs/interface/nipype/freesurfer/remove_neck.yaml +++ b/example-specs/interface/nipype/freesurfer/remove_neck.yaml @@ -6,19 +6,19 @@ # Docs # ---- # -# Crops the neck out of the mri image +# Crops the neck out of the mri image # -# Examples -# ======== +# Examples +# ======== +# +# >>> from nipype.interfaces.freesurfer import TalairachQC +# >>> remove_neck = RemoveNeck() +# >>> remove_neck.inputs.in_file = 'norm.mgz' +# >>> remove_neck.inputs.transform = 'trans.mat' +# >>> remove_neck.inputs.template = 'trans.mat' +# >>> remove_neck.cmdline +# 'mri_remove_neck norm.mgz trans.mat trans.mat norm_noneck.mgz' # -# >>> from nipype.interfaces.freesurfer import TalairachQC -# >>> remove_neck = RemoveNeck() -# >>> remove_neck.inputs.in_file = 'norm.mgz' -# >>> remove_neck.inputs.transform = 'trans.mat' -# >>> remove_neck.inputs.template = 'trans.mat' -# >>> remove_neck.cmdline -# 'mri_remove_neck norm.mgz trans.mat trans.mat norm_noneck.mgz' -# task_name: RemoveNeck nipype_name: RemoveNeck nipype_module: nipype.interfaces.freesurfer.utils @@ -35,14 +35,11 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/mgh-gz # type=file|default=: Input file for RemoveNeck - out_file: Path - # type=file: Output file with neck removed - # type=file|default=: Output file for RemoveNeck subjects_dir: generic/directory # type=directory|default=: subjects directory template: datascience/text-matrix # type=file|default=: Input template file for RemoveNeck - transform: datascience/text-matrix + transform: generic/file # type=file|default=: Input transform file for RemoveNeck callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -67,7 +64,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -92,7 +89,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -111,12 +108,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Input file for RemoveNeck - transform: - # type=file|default=: Input transform file for RemoveNeck template: # type=file|default=: Input template file for RemoveNeck imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -139,12 +134,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"norm.mgz"' # type=file|default=: Input file for RemoveNeck - transform: '"trans.mat"' - # type=file|default=: Input transform file for RemoveNeck template: '"trans.mat"' # type=file|default=: Input template file for RemoveNeck imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/resample.yaml b/example-specs/interface/nipype/freesurfer/resample.yaml index 4f716d5c..f9b1ba6e 100644 --- a/example-specs/interface/nipype/freesurfer/resample.yaml +++ b/example-specs/interface/nipype/freesurfer/resample.yaml @@ -7,18 +7,18 @@ # ---- # Use FreeSurfer mri_convert to up or down-sample image files # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces import freesurfer +# >>> resampler = freesurfer.Resample() +# >>> resampler.inputs.in_file = 'structural.nii' +# >>> resampler.inputs.resampled_file = 'resampled.nii' +# >>> resampler.inputs.voxel_size = (2.1, 2.1, 2.1) +# >>> resampler.cmdline +# 'mri_convert -vs 2.10 2.10 2.10 -i structural.nii -o resampled.nii' # -# >>> from nipype.interfaces import freesurfer -# >>> resampler = freesurfer.Resample() -# >>> resampler.inputs.in_file = 'structural.nii' -# >>> resampler.inputs.resampled_file = 'resampled.nii' -# >>> resampler.inputs.voxel_size = (2.1, 2.1, 2.1) -# >>> resampler.cmdline -# 'mri_convert -vs 2.10 2.10 2.10 -i structural.nii -o resampled.nii' # -# task_name: Resample nipype_name: Resample nipype_module: nipype.interfaces.freesurfer.preprocess @@ -35,9 +35,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: file to resample - resampled_file: Path - # type=file: output filename - # type=file|default=: output filename subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -56,15 +53,15 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - resampled_file: medimage/nifti1 + resampled_file: generic/file # type=file: output filename # type=file|default=: output filename callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - resampled_file: '"resampled.nii"' + # dict[str, str] - `path_template` values to be provided to output fields + resampled_file: resampled_file # type=file: output filename # type=file|default=: output filename requirements: @@ -87,7 +84,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -106,13 +103,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: file to resample - resampled_file: '"resampled.nii"' - # type=file: output filename - # type=file|default=: output filename voxel_size: (2.1, 2.1, 2.1) # type=tuple|default=(0.0, 0.0, 0.0): triplet of output voxel sizes imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -135,13 +129,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: file to resample - resampled_file: '"resampled.nii"' - # type=file: output filename - # type=file|default=: output filename voxel_size: (2.1, 2.1, 2.1) # type=tuple|default=(0.0, 0.0, 0.0): triplet of output voxel sizes imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/robust_register.yaml b/example-specs/interface/nipype/freesurfer/robust_register.yaml index 8c7146eb..c1536659 100644 --- a/example-specs/interface/nipype/freesurfer/robust_register.yaml +++ b/example-specs/interface/nipype/freesurfer/robust_register.yaml @@ -6,25 +6,25 @@ # Docs # ---- # Perform intramodal linear registration (translation and rotation) using -# robust statistics. +# robust statistics. # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import RobustRegister -# >>> reg = RobustRegister() -# >>> reg.inputs.source_file = 'structural.nii' -# >>> reg.inputs.target_file = 'T1.nii' -# >>> reg.inputs.auto_sens = True -# >>> reg.inputs.init_orient = True -# >>> reg.cmdline # doctest: +ELLIPSIS -# 'mri_robust_register --satit --initorient --lta .../structural_robustreg.lta --mov structural.nii --dst T1.nii' +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import RobustRegister +# >>> reg = RobustRegister() +# >>> reg.inputs.source_file = 'structural.nii' +# >>> reg.inputs.target_file = 'T1.nii' +# >>> reg.inputs.auto_sens = True +# >>> reg.inputs.init_orient = True +# >>> reg.cmdline # doctest: +ELLIPSIS +# 'mri_robust_register --satit --initorient --lta .../structural_robustreg.lta --mov structural.nii --dst T1.nii' +# +# References +# ---------- +# Reuter, M, Rosas, HD, and Fischl, B, (2010). Highly Accurate Inverse +# Consistent Registration: A Robust Approach. Neuroimage 53(4) 1181-96. # -# References -# ---------- -# Reuter, M, Rosas, HD, and Fischl, B, (2010). Highly Accurate Inverse -# Consistent Registration: A Robust Approach. Neuroimage 53(4) 1181-96. # -# task_name: RobustRegister nipype_name: RobustRegister nipype_module: nipype.interfaces.freesurfer.preprocess @@ -49,7 +49,7 @@ inputs: # type=file|default=: volume to be registered subjects_dir: generic/directory # type=directory|default=: subjects directory - target_file: medimage/nifti1 + target_file: generic/file # type=file|default=: target volume for the registration callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -95,7 +95,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -175,7 +175,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -194,14 +194,10 @@ tests: # (if not specified, will try to choose a sensible value) source_file: # type=file|default=: volume to be registered - target_file: - # type=file|default=: target volume for the registration auto_sens: 'True' # type=bool|default=False: auto-detect good sensitivity - init_orient: 'True' - # type=bool|default=False: use moments for initial orient (recommended for stripped brains) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -216,7 +212,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_robust_register --satit --initorient --lta .../structural_robustreg.lta --mov structural.nii --dst T1.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -224,14 +220,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. source_file: '"structural.nii"' # type=file|default=: volume to be registered - target_file: '"T1.nii"' - # type=file|default=: target volume for the registration auto_sens: 'True' # type=bool|default=False: auto-detect good sensitivity - init_orient: 'True' - # type=bool|default=False: use moments for initial orient (recommended for stripped brains) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/robust_template.yaml b/example-specs/interface/nipype/freesurfer/robust_template.yaml index 03224b00..db308e1b 100644 --- a/example-specs/interface/nipype/freesurfer/robust_template.yaml +++ b/example-specs/interface/nipype/freesurfer/robust_template.yaml @@ -7,42 +7,42 @@ # ---- # construct an unbiased robust template for longitudinal volumes # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import RobustTemplate -# >>> template = RobustTemplate() -# >>> template.inputs.in_files = ['structural.nii', 'functional.nii'] -# >>> template.inputs.auto_detect_sensitivity = True -# >>> template.inputs.average_metric = 'mean' -# >>> template.inputs.initial_timepoint = 1 -# >>> template.inputs.fixed_timepoint = True -# >>> template.inputs.no_iteration = True -# >>> template.inputs.subsample_threshold = 200 -# >>> template.cmdline #doctest: -# 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template mri_robust_template_out.mgz --subsample 200' -# >>> template.inputs.out_file = 'T1.nii' -# >>> template.cmdline #doctest: -# 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --subsample 200' +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import RobustTemplate +# >>> template = RobustTemplate() +# >>> template.inputs.in_files = ['structural.nii', 'functional.nii'] +# >>> template.inputs.auto_detect_sensitivity = True +# >>> template.inputs.average_metric = 'mean' +# >>> template.inputs.initial_timepoint = 1 +# >>> template.inputs.fixed_timepoint = True +# >>> template.inputs.no_iteration = True +# >>> template.inputs.subsample_threshold = 200 +# >>> template.cmdline #doctest: +# 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template mri_robust_template_out.mgz --subsample 200' +# >>> template.inputs.out_file = 'T1.nii' +# >>> template.cmdline #doctest: +# 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --subsample 200' # -# >>> template.inputs.transform_outputs = ['structural.lta', -# ... 'functional.lta'] -# >>> template.inputs.scaled_intensity_outputs = ['structural-iscale.txt', -# ... 'functional-iscale.txt'] -# >>> template.cmdline #doctest: +ELLIPSIS -# 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --iscaleout .../structural-iscale.txt .../functional-iscale.txt --subsample 200 --lta .../structural.lta .../functional.lta' +# >>> template.inputs.transform_outputs = ['structural.lta', +# ... 'functional.lta'] +# >>> template.inputs.scaled_intensity_outputs = ['structural-iscale.txt', +# ... 'functional-iscale.txt'] +# >>> template.cmdline #doctest: +ELLIPSIS +# 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --iscaleout .../structural-iscale.txt .../functional-iscale.txt --subsample 200 --lta .../structural.lta .../functional.lta' # -# >>> template.inputs.transform_outputs = True -# >>> template.inputs.scaled_intensity_outputs = True -# >>> template.cmdline #doctest: +ELLIPSIS -# 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --iscaleout .../is1.txt .../is2.txt --subsample 200 --lta .../tp1.lta .../tp2.lta' +# >>> template.inputs.transform_outputs = True +# >>> template.inputs.scaled_intensity_outputs = True +# >>> template.cmdline #doctest: +ELLIPSIS +# 'mri_robust_template --satit --average 0 --fixtp --mov structural.nii functional.nii --inittp 1 --noit --template T1.nii --iscaleout .../is1.txt .../is2.txt --subsample 200 --lta .../tp1.lta .../tp2.lta' # -# >>> template.run() #doctest: +SKIP +# >>> template.run() #doctest: +SKIP +# +# References +# ---------- +# [https://surfer.nmr.mgh.harvard.edu/fswiki/mri_robust_template] # -# References -# ---------- -# [https://surfer.nmr.mgh.harvard.edu/fswiki/mri_robust_template] # -# task_name: RobustTemplate nipype_name: RobustTemplate nipype_module: nipype.interfaces.freesurfer.longitudinal @@ -63,9 +63,6 @@ inputs: # type=inputmultiobject|default=[]: use initial intensity scales initial_transforms: generic/file+list-of # type=inputmultiobject|default=[]: use initial transforms (lta) on source - out_file: Path - # type=file: output template volume (final mean/median image) - # type=file|default='mri_robust_template_out.mgz': output template volume (final mean/median image) subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -97,7 +94,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -126,7 +123,7 @@ tests: average_metric: # type=enum|default='median'|allowed['mean','median']: construct template from: 0 Mean, 1 Median (default) initial_timepoint: - # type=int|default=0: use TP# for spacial init (default random), 0: no init + # type=int|default=0: use TP# for special init (default random), 0: no init fixed_timepoint: # type=bool|default=False: map everything to init TP# (init TP is not resampled) no_iteration: @@ -144,7 +141,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -163,23 +160,17 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=inputmultiobject|default=[]: input movable volumes to be aligned to common mean/median template - auto_detect_sensitivity: 'True' - # type=bool|default=False: auto-detect good sensitivity (recommended for head or full brain scans) average_metric: '"mean"' # type=enum|default='median'|allowed['mean','median']: construct template from: 0 Mean, 1 Median (default) - initial_timepoint: '1' - # type=int|default=0: use TP# for spacial init (default random), 0: no init fixed_timepoint: 'True' # type=bool|default=False: map everything to init TP# (init TP is not resampled) - no_iteration: 'True' - # type=bool|default=False: do not iterate, just create first template subsample_threshold: '200' # type=int|default=0: subsample if dim > # on all axes (default no subs.) out_file: '"T1.nii"' # type=file: output template volume (final mean/median image) # type=file|default='mri_robust_template_out.mgz': output template volume (final mean/median image) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -196,14 +187,14 @@ tests: - inputs: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) - transform_outputs: '["structural.lta","functional.lta"]' + transform_outputs: '["structural.lta",' # type=outputmultiobject: output xform files from moving to template # type=traitcompound|default=[None]: output xforms to template (for each input) - scaled_intensity_outputs: '["structural-iscale.txt","functional-iscale.txt"]' + scaled_intensity_outputs: '["structural-iscale.txt",' # type=outputmultiobject: output final intensity scales # type=traitcompound|default=[None]: final intensity scales (will activate --iscale) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -223,11 +214,8 @@ tests: transform_outputs: 'True' # type=outputmultiobject: output xform files from moving to template # type=traitcompound|default=[None]: output xforms to template (for each input) - scaled_intensity_outputs: 'True' - # type=outputmultiobject: output final intensity scales - # type=traitcompound|default=[None]: final intensity scales (will activate --iscale) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -250,23 +238,17 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["structural.nii", "functional.nii"]' # type=inputmultiobject|default=[]: input movable volumes to be aligned to common mean/median template - auto_detect_sensitivity: 'True' - # type=bool|default=False: auto-detect good sensitivity (recommended for head or full brain scans) average_metric: '"mean"' # type=enum|default='median'|allowed['mean','median']: construct template from: 0 Mean, 1 Median (default) - initial_timepoint: '1' - # type=int|default=0: use TP# for spacial init (default random), 0: no init fixed_timepoint: 'True' # type=bool|default=False: map everything to init TP# (init TP is not resampled) - no_iteration: 'True' - # type=bool|default=False: do not iterate, just create first template subsample_threshold: '200' # type=int|default=0: subsample if dim > # on all axes (default no subs.) out_file: '"T1.nii"' # type=file: output template volume (final mean/median image) # type=file|default='mri_robust_template_out.mgz': output template volume (final mean/median image) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -276,14 +258,14 @@ doctests: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. - transform_outputs: '["structural.lta","functional.lta"]' + transform_outputs: '["structural.lta",' # type=outputmultiobject: output xform files from moving to template # type=traitcompound|default=[None]: output xforms to template (for each input) - scaled_intensity_outputs: '["structural-iscale.txt","functional-iscale.txt"]' + scaled_intensity_outputs: '["structural-iscale.txt",' # type=outputmultiobject: output final intensity scales # type=traitcompound|default=[None]: final intensity scales (will activate --iscale) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -296,11 +278,8 @@ doctests: transform_outputs: 'True' # type=outputmultiobject: output xform files from moving to template # type=traitcompound|default=[None]: output xforms to template (for each input) - scaled_intensity_outputs: 'True' - # type=outputmultiobject: output final intensity scales - # type=traitcompound|default=[None]: final intensity scales (will activate --iscale) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/sample_to_surface.yaml b/example-specs/interface/nipype/freesurfer/sample_to_surface.yaml index d5660f11..3ac0cf23 100644 --- a/example-specs/interface/nipype/freesurfer/sample_to_surface.yaml +++ b/example-specs/interface/nipype/freesurfer/sample_to_surface.yaml @@ -7,33 +7,33 @@ # ---- # Sample a volume to the cortical surface using Freesurfer's mri_vol2surf. # -# You must supply a sampling method, range, and units. You can project -# either a given distance (in mm) or a given fraction of the cortical -# thickness at that vertex along the surface normal from the target surface, -# and then set the value of that vertex to be either the value at that point -# or the average or maximum value found along the projection vector. +# You must supply a sampling method, range, and units. You can project +# either a given distance (in mm) or a given fraction of the cortical +# thickness at that vertex along the surface normal from the target surface, +# and then set the value of that vertex to be either the value at that point +# or the average or maximum value found along the projection vector. # -# By default, the surface will be saved as a vector with a length equal to the -# number of vertices on the target surface. This is not a problem for Freesurfer -# programs, but if you intend to use the file with interfaces to another package, -# you must set the ``reshape`` input to True, which will factor the surface vector -# into a matrix with dimensions compatible with proper Nifti files. +# By default, the surface will be saved as a vector with a length equal to the +# number of vertices on the target surface. This is not a problem for Freesurfer +# programs, but if you intend to use the file with interfaces to another package, +# you must set the ``reshape`` input to True, which will factor the surface vector +# into a matrix with dimensions compatible with proper Nifti files. # -# Examples -# -------- +# Examples +# -------- +# +# >>> import nipype.interfaces.freesurfer as fs +# >>> sampler = fs.SampleToSurface(hemi="lh") +# >>> sampler.inputs.source_file = "cope1.nii.gz" +# >>> sampler.inputs.reg_file = "register.dat" +# >>> sampler.inputs.sampling_method = "average" +# >>> sampler.inputs.sampling_range = 1 +# >>> sampler.inputs.sampling_units = "frac" +# >>> sampler.cmdline # doctest: +ELLIPSIS +# 'mri_vol2surf --hemi lh --o ...lh.cope1.mgz --reg register.dat --projfrac-avg 1.000 --mov cope1.nii.gz' +# >>> res = sampler.run() # doctest: +SKIP # -# >>> import nipype.interfaces.freesurfer as fs -# >>> sampler = fs.SampleToSurface(hemi="lh") -# >>> sampler.inputs.source_file = "cope1.nii.gz" -# >>> sampler.inputs.reg_file = "register.dat" -# >>> sampler.inputs.sampling_method = "average" -# >>> sampler.inputs.sampling_range = 1 -# >>> sampler.inputs.sampling_units = "frac" -# >>> sampler.cmdline # doctest: +ELLIPSIS -# 'mri_vol2surf --hemi lh --o ...lh.cope1.mgz --reg register.dat --projfrac-avg 1.000 --mov cope1.nii.gz' -# >>> res = sampler.run() # doctest: +SKIP # -# task_name: SampleToSurface nipype_name: SampleToSurface nipype_module: nipype.interfaces.freesurfer.utils @@ -50,12 +50,9 @@ inputs: # passed to the field in the automatically generated unittests. mask_label: generic/file # type=file|default=: label file to mask output with - out_file: Path - # type=file: surface file - # type=file|default=: surface file to write reference_file: generic/file # type=file|default=: reference volume (default is orig.mgz) - reg_file: datascience/dat-file + reg_file: generic/file # type=file|default=: source-to-reference registration file source_file: medimage/nifti-gz # type=file|default=: volume to sample values from @@ -90,7 +87,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: surface file # type=file|default=: surface file to write @@ -180,7 +177,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -199,18 +196,14 @@ tests: # (if not specified, will try to choose a sensible value) source_file: # type=file|default=: volume to sample values from - reg_file: - # type=file|default=: source-to-reference registration file sampling_method: '"average"' # type=enum|default='point'|allowed['average','max','point']: how to sample -- at a point or at the max or average over a range - sampling_range: '1' - # type=traitcompound|default=None: sampling range - a point or a tuple of (min, max, step) sampling_units: '"frac"' # type=enum|default='mm'|allowed['frac','mm']: sampling range type -- either 'mm' or 'frac' hemi: '"lh"' # type=enum|default='lh'|allowed['lh','rh']: target hemisphere imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.interfaces.freesurfer as fs expected_outputs: @@ -234,18 +227,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. source_file: '"cope1.nii.gz"' # type=file|default=: volume to sample values from - reg_file: '"register.dat"' - # type=file|default=: source-to-reference registration file sampling_method: '"average"' # type=enum|default='point'|allowed['average','max','point']: how to sample -- at a point or at the max or average over a range - sampling_range: '1' - # type=traitcompound|default=None: sampling range - a point or a tuple of (min, max, step) sampling_units: '"frac"' # type=enum|default='mm'|allowed['frac','mm']: sampling range type -- either 'mm' or 'frac' hemi: '"lh"' # type=enum|default='lh'|allowed['lh','rh']: target hemisphere imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/seg_stats.yaml b/example-specs/interface/nipype/freesurfer/seg_stats.yaml index a1c4acd3..d63a5b2f 100644 --- a/example-specs/interface/nipype/freesurfer/seg_stats.yaml +++ b/example-specs/interface/nipype/freesurfer/seg_stats.yaml @@ -7,19 +7,19 @@ # ---- # Use FreeSurfer mri_segstats for ROI analysis # -# Examples -# -------- -# >>> import nipype.interfaces.freesurfer as fs -# >>> ss = fs.SegStats() -# >>> ss.inputs.annot = ('PWS04', 'lh', 'aparc') -# >>> ss.inputs.in_file = 'functional.nii' -# >>> ss.inputs.subjects_dir = '.' -# >>> ss.inputs.avgwf_txt_file = 'avgwf.txt' -# >>> ss.inputs.summary_file = 'summary.stats' -# >>> ss.cmdline -# 'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --i functional.nii --sum ./summary.stats' +# Examples +# -------- +# >>> import nipype.interfaces.freesurfer as fs +# >>> ss = fs.SegStats() +# >>> ss.inputs.annot = ('PWS04', 'lh', 'aparc') +# >>> ss.inputs.in_file = 'functional.nii' +# >>> ss.inputs.subjects_dir = '.' +# >>> ss.inputs.avgwf_txt_file = 'avgwf.txt' +# >>> ss.inputs.summary_file = 'summary.stats' +# >>> ss.cmdline +# 'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --i functional.nii --sum ./summary.stats' +# # -# task_name: SegStats nipype_name: SegStats nipype_module: nipype.interfaces.freesurfer.model @@ -40,7 +40,7 @@ inputs: # type=file|default=: color table file with seg id names gca_color_table: generic/file # type=file|default=: get color table from GCA (CMA) - in_file: medimage/nifti1 + in_file: generic/file # type=file|default=: Use the segmentation to report stats on this volume in_intensity: generic/file # type=file|default=: Undocumented input norm.mgz file @@ -52,9 +52,6 @@ inputs: # type=file|default=: segmentation volume path subjects_dir: generic/directory # type=directory|default=: subjects directory - summary_file: Path - # type=file: Segmentation summary statistics table - # type=file|default=: Segmentation stats summary table file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -74,20 +71,20 @@ outputs: avgwf_file: generic/file # type=file: Volume with functional statistics averaged over segs # type=traitcompound|default=None: Save as binary volume (bool or filename) - avgwf_txt_file: text/text-file + avgwf_txt_file: generic/file # type=file: Text file with functional statistics averaged over segs # type=traitcompound|default=None: Save average waveform into file (bool or filename) sf_avg_file: generic/file # type=file: Text file with func statistics averaged over segs and framss # type=traitcompound|default=None: Save mean across space and time - summary_file: medimage-freesurfer/stats + summary_file: fileformats.medimage_freesurfer.Stats # type=file: Segmentation summary statistics table # type=file|default=: Segmentation stats summary table file callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields summary_file: '"summary.stats"' # type=file: Segmentation summary statistics table # type=file|default=: Segmentation stats summary table file @@ -188,7 +185,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -207,18 +204,13 @@ tests: # (if not specified, will try to choose a sensible value) annot: ("PWS04", "lh", "aparc") # type=tuple|default=('', 'lh', ''): subject hemi parc : use surface parcellation - in_file: - # type=file|default=: Use the segmentation to report stats on this volume subjects_dir: '"."' # type=directory|default=: subjects directory - avgwf_txt_file: '"avgwf.txt"' - # type=file: Text file with functional statistics averaged over segs - # type=traitcompound|default=None: Save average waveform into file (bool or filename) summary_file: '"summary.stats"' # type=file: Segmentation summary statistics table # type=file|default=: Segmentation stats summary table file imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.interfaces.freesurfer as fs expected_outputs: @@ -234,7 +226,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --i functional.nii --sum ./summary.stats +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -242,18 +234,13 @@ doctests: # '.mock()' method of the corresponding class is used instead. annot: ("PWS04", "lh", "aparc") # type=tuple|default=('', 'lh', ''): subject hemi parc : use surface parcellation - in_file: '"functional.nii"' - # type=file|default=: Use the segmentation to report stats on this volume subjects_dir: '"."' # type=directory|default=: subjects directory - avgwf_txt_file: '"avgwf.txt"' - # type=file: Text file with functional statistics averaged over segs - # type=traitcompound|default=None: Save average waveform into file (bool or filename) summary_file: '"summary.stats"' # type=file: Segmentation summary statistics table # type=file|default=: Segmentation stats summary table file imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/seg_stats_recon_all.yaml b/example-specs/interface/nipype/freesurfer/seg_stats_recon_all.yaml index f2b969ac..71b4fc10 100644 --- a/example-specs/interface/nipype/freesurfer/seg_stats_recon_all.yaml +++ b/example-specs/interface/nipype/freesurfer/seg_stats_recon_all.yaml @@ -6,42 +6,42 @@ # Docs # ---- # -# This class inherits SegStats and modifies it for use in a recon-all workflow. -# This implementation mandates implicit inputs that SegStats. -# To ensure backwards compatibility of SegStats, this class was created. +# This class inherits SegStats and modifies it for use in a recon-all workflow. +# This implementation mandates implicit inputs that SegStats. +# To ensure backwards compatibility of SegStats, this class was created. +# +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import SegStatsReconAll +# >>> segstatsreconall = SegStatsReconAll() +# >>> segstatsreconall.inputs.annot = ('PWS04', 'lh', 'aparc') +# >>> segstatsreconall.inputs.avgwf_txt_file = 'avgwf.txt' +# >>> segstatsreconall.inputs.summary_file = 'summary.stats' +# >>> segstatsreconall.inputs.subject_id = '10335' +# >>> segstatsreconall.inputs.ribbon = 'wm.mgz' +# >>> segstatsreconall.inputs.transform = 'trans.mat' +# >>> segstatsreconall.inputs.presurf_seg = 'wm.mgz' +# >>> segstatsreconall.inputs.lh_orig_nofix = 'lh.pial' +# >>> segstatsreconall.inputs.rh_orig_nofix = 'lh.pial' +# >>> segstatsreconall.inputs.lh_pial = 'lh.pial' +# >>> segstatsreconall.inputs.rh_pial = 'lh.pial' +# >>> segstatsreconall.inputs.lh_white = 'lh.pial' +# >>> segstatsreconall.inputs.rh_white = 'lh.pial' +# >>> segstatsreconall.inputs.empty = True +# >>> segstatsreconall.inputs.brain_vol = 'brain-vol-from-seg' +# >>> segstatsreconall.inputs.exclude_ctx_gm_wm = True +# >>> segstatsreconall.inputs.supratent = True +# >>> segstatsreconall.inputs.subcort_gm = True +# >>> segstatsreconall.inputs.etiv = True +# >>> segstatsreconall.inputs.wm_vol_from_surf = True +# >>> segstatsreconall.inputs.cortex_vol_from_surf = True +# >>> segstatsreconall.inputs.total_gray = True +# >>> segstatsreconall.inputs.euler = True +# >>> segstatsreconall.inputs.exclude_id = 0 +# >>> segstatsreconall.cmdline +# 'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --brain-vol-from-seg --surf-ctx-vol --empty --etiv --euler --excl-ctxgmwm --excludeid 0 --subcortgray --subject 10335 --supratent --totalgray --surf-wm-vol --sum ./summary.stats' # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import SegStatsReconAll -# >>> segstatsreconall = SegStatsReconAll() -# >>> segstatsreconall.inputs.annot = ('PWS04', 'lh', 'aparc') -# >>> segstatsreconall.inputs.avgwf_txt_file = 'avgwf.txt' -# >>> segstatsreconall.inputs.summary_file = 'summary.stats' -# >>> segstatsreconall.inputs.subject_id = '10335' -# >>> segstatsreconall.inputs.ribbon = 'wm.mgz' -# >>> segstatsreconall.inputs.transform = 'trans.mat' -# >>> segstatsreconall.inputs.presurf_seg = 'wm.mgz' -# >>> segstatsreconall.inputs.lh_orig_nofix = 'lh.pial' -# >>> segstatsreconall.inputs.rh_orig_nofix = 'lh.pial' -# >>> segstatsreconall.inputs.lh_pial = 'lh.pial' -# >>> segstatsreconall.inputs.rh_pial = 'lh.pial' -# >>> segstatsreconall.inputs.lh_white = 'lh.pial' -# >>> segstatsreconall.inputs.rh_white = 'lh.pial' -# >>> segstatsreconall.inputs.empty = True -# >>> segstatsreconall.inputs.brain_vol = 'brain-vol-from-seg' -# >>> segstatsreconall.inputs.exclude_ctx_gm_wm = True -# >>> segstatsreconall.inputs.supratent = True -# >>> segstatsreconall.inputs.subcort_gm = True -# >>> segstatsreconall.inputs.etiv = True -# >>> segstatsreconall.inputs.wm_vol_from_surf = True -# >>> segstatsreconall.inputs.cortex_vol_from_surf = True -# >>> segstatsreconall.inputs.total_gray = True -# >>> segstatsreconall.inputs.euler = True -# >>> segstatsreconall.inputs.exclude_id = 0 -# >>> segstatsreconall.cmdline -# 'mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --brain-vol-from-seg --surf-ctx-vol --empty --etiv --euler --excl-ctxgmwm --excludeid 0 --subcortgray --subject 10335 --supratent --totalgray --surf-wm-vol --sum ./summary.stats' # -# task_name: SegStatsReconAll nipype_name: SegStatsReconAll nipype_module: nipype.interfaces.freesurfer.model @@ -68,11 +68,11 @@ inputs: # type=file|default=: Use the segmentation to report stats on this volume in_intensity: generic/file # type=file|default=: Undocumented input norm.mgz file - lh_orig_nofix: medimage-freesurfer/pial + lh_orig_nofix: generic/file # type=file|default=: Input lh.orig.nofix - lh_pial: medimage-freesurfer/pial + lh_pial: generic/file # type=file|default=: Input file must be /surf/lh.pial - lh_white: medimage-freesurfer/pial + lh_white: generic/file # type=file|default=: Input file must be /surf/lh.white mask_file: generic/file # type=file|default=: Mask volume (same size as seg @@ -80,11 +80,11 @@ inputs: # type=file|default=: Compensate for partial voluming presurf_seg: medimage/mgh-gz # type=file|default=: Input segmentation volume - rh_orig_nofix: medimage-freesurfer/pial + rh_orig_nofix: fileformats.medimage_freesurfer.Pial # type=file|default=: Input rh.orig.nofix - rh_pial: medimage-freesurfer/pial + rh_pial: fileformats.medimage_freesurfer.Pial # type=file|default=: Input file must be /surf/rh.pial - rh_white: medimage-freesurfer/pial + rh_white: fileformats.medimage_freesurfer.Pial # type=file|default=: Input file must be /surf/rh.white ribbon: medimage/mgh-gz # type=file|default=: Input file mri/ribbon.mgz @@ -92,10 +92,7 @@ inputs: # type=file|default=: segmentation volume path subjects_dir: generic/directory # type=directory|default=: subjects directory - summary_file: Path - # type=file: Segmentation summary statistics table - # type=file|default=: Segmentation stats summary table file - transform: datascience/text-matrix + transform: generic/file # type=file|default=: Input transform file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -116,20 +113,20 @@ outputs: avgwf_file: generic/file # type=file: Volume with functional statistics averaged over segs # type=traitcompound|default=None: Save as binary volume (bool or filename) - avgwf_txt_file: text/text-file + avgwf_txt_file: generic/file # type=file: Text file with functional statistics averaged over segs # type=traitcompound|default=None: Save average waveform into file (bool or filename) sf_avg_file: generic/file # type=file: Text file with func statistics averaged over segs and framss # type=traitcompound|default=None: Save mean across space and time - summary_file: medimage-freesurfer/stats + summary_file: fileformats.medimage_freesurfer.Stats # type=file: Segmentation summary statistics table # type=file|default=: Segmentation stats summary table file callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields summary_file: '"summary.stats"' # type=file: Segmentation summary statistics table # type=file|default=: Segmentation stats summary table file @@ -254,7 +251,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -273,56 +270,31 @@ tests: # (if not specified, will try to choose a sensible value) annot: ("PWS04", "lh", "aparc") # type=tuple|default=('', 'lh', ''): subject hemi parc : use surface parcellation - avgwf_txt_file: '"avgwf.txt"' - # type=file: Text file with functional statistics averaged over segs - # type=traitcompound|default=None: Save average waveform into file (bool or filename) summary_file: '"summary.stats"' # type=file: Segmentation summary statistics table # type=file|default=: Segmentation stats summary table file - subject_id: '"10335"' - # type=string|default='subject_id': Subject id being processed ribbon: # type=file|default=: Input file mri/ribbon.mgz - transform: - # type=file|default=: Input transform file presurf_seg: # type=file|default=: Input segmentation volume - lh_orig_nofix: - # type=file|default=: Input lh.orig.nofix rh_orig_nofix: # type=file|default=: Input rh.orig.nofix - lh_pial: - # type=file|default=: Input file must be /surf/lh.pial rh_pial: # type=file|default=: Input file must be /surf/rh.pial - lh_white: - # type=file|default=: Input file must be /surf/lh.white rh_white: # type=file|default=: Input file must be /surf/rh.white - empty: 'True' - # type=bool|default=False: Report on segmentations listed in the color table brain_vol: '"brain-vol-from-seg"' # type=enum|default='brain-vol-from-seg'|allowed['brain-vol-from-seg','brainmask']: Compute brain volume either with ``brainmask`` or ``brain-vol-from-seg`` - exclude_ctx_gm_wm: 'True' - # type=bool|default=False: exclude cortical gray and white matter supratent: 'True' # type=bool|default=False: Undocumented input flag - subcort_gm: 'True' - # type=bool|default=False: Compute volume of subcortical gray matter etiv: 'True' # type=bool|default=False: Compute ICV from talairach transform - wm_vol_from_surf: 'True' - # type=bool|default=False: Compute wm volume from surf cortex_vol_from_surf: 'True' # type=bool|default=False: Compute cortex volume from surf - total_gray: 'True' - # type=bool|default=False: Compute volume of total gray matter euler: 'True' # type=bool|default=False: Write out number of defect holes in orig.nofix based on the euler number - exclude_id: '0' - # type=int|default=0: Exclude seg id from report imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -337,7 +309,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_segstats --annot PWS04 lh aparc --avgwf ./avgwf.txt --brain-vol-from-seg --surf-ctx-vol --empty --etiv --euler --excl-ctxgmwm --excludeid 0 --subcortgray --subject 10335 --supratent --totalgray --surf-wm-vol --sum ./summary.stats +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -345,56 +317,31 @@ doctests: # '.mock()' method of the corresponding class is used instead. annot: ("PWS04", "lh", "aparc") # type=tuple|default=('', 'lh', ''): subject hemi parc : use surface parcellation - avgwf_txt_file: '"avgwf.txt"' - # type=file: Text file with functional statistics averaged over segs - # type=traitcompound|default=None: Save average waveform into file (bool or filename) summary_file: '"summary.stats"' # type=file: Segmentation summary statistics table # type=file|default=: Segmentation stats summary table file - subject_id: '"10335"' - # type=string|default='subject_id': Subject id being processed ribbon: '"wm.mgz"' # type=file|default=: Input file mri/ribbon.mgz - transform: '"trans.mat"' - # type=file|default=: Input transform file presurf_seg: '"wm.mgz"' # type=file|default=: Input segmentation volume - lh_orig_nofix: '"lh.pial"' - # type=file|default=: Input lh.orig.nofix rh_orig_nofix: '"lh.pial"' # type=file|default=: Input rh.orig.nofix - lh_pial: '"lh.pial"' - # type=file|default=: Input file must be /surf/lh.pial rh_pial: '"lh.pial"' # type=file|default=: Input file must be /surf/rh.pial - lh_white: '"lh.pial"' - # type=file|default=: Input file must be /surf/lh.white rh_white: '"lh.pial"' # type=file|default=: Input file must be /surf/rh.white - empty: 'True' - # type=bool|default=False: Report on segmentations listed in the color table brain_vol: '"brain-vol-from-seg"' # type=enum|default='brain-vol-from-seg'|allowed['brain-vol-from-seg','brainmask']: Compute brain volume either with ``brainmask`` or ``brain-vol-from-seg`` - exclude_ctx_gm_wm: 'True' - # type=bool|default=False: exclude cortical gray and white matter supratent: 'True' # type=bool|default=False: Undocumented input flag - subcort_gm: 'True' - # type=bool|default=False: Compute volume of subcortical gray matter etiv: 'True' # type=bool|default=False: Compute ICV from talairach transform - wm_vol_from_surf: 'True' - # type=bool|default=False: Compute wm volume from surf cortex_vol_from_surf: 'True' # type=bool|default=False: Compute cortex volume from surf - total_gray: 'True' - # type=bool|default=False: Compute volume of total gray matter euler: 'True' # type=bool|default=False: Write out number of defect holes in orig.nofix based on the euler number - exclude_id: '0' - # type=int|default=0: Exclude seg id from report imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/segment_cc.yaml b/example-specs/interface/nipype/freesurfer/segment_cc.yaml index fc4cee03..f7cf1279 100644 --- a/example-specs/interface/nipype/freesurfer/segment_cc.yaml +++ b/example-specs/interface/nipype/freesurfer/segment_cc.yaml @@ -6,26 +6,26 @@ # Docs # ---- # -# This program segments the corpus callosum into five separate labels in -# the subcortical segmentation volume 'aseg.mgz'. The divisions of the -# cc are equally spaced in terms of distance along the primary -# eigendirection (pretty much the long axis) of the cc. The lateral -# extent can be changed with the -T parameter, where -# is the distance off the midline (so -T 1 would result in -# the who CC being 3mm thick). The default is 2 so it's 5mm thick. The -# aseg.stats values should be volume. +# This program segments the corpus callosum into five separate labels in +# the subcortical segmentation volume 'aseg.mgz'. The divisions of the +# cc are equally spaced in terms of distance along the primary +# eigendirection (pretty much the long axis) of the cc. The lateral +# extent can be changed with the -T parameter, where +# is the distance off the midline (so -T 1 would result in +# the who CC being 3mm thick). The default is 2 so it's 5mm thick. The +# aseg.stats values should be volume. +# +# Examples +# ======== +# >>> from nipype.interfaces import freesurfer +# >>> SegmentCC_node = freesurfer.SegmentCC() +# >>> SegmentCC_node.inputs.in_file = "aseg.mgz" +# >>> SegmentCC_node.inputs.in_norm = "norm.mgz" +# >>> SegmentCC_node.inputs.out_rotation = "cc.lta" +# >>> SegmentCC_node.inputs.subject_id = "test" +# >>> SegmentCC_node.cmdline +# 'mri_cc -aseg aseg.mgz -o aseg.auto.mgz -lta cc.lta test' # -# Examples -# ======== -# >>> from nipype.interfaces import freesurfer -# >>> SegmentCC_node = freesurfer.SegmentCC() -# >>> SegmentCC_node.inputs.in_file = "aseg.mgz" -# >>> SegmentCC_node.inputs.in_norm = "norm.mgz" -# >>> SegmentCC_node.inputs.out_rotation = "cc.lta" -# >>> SegmentCC_node.inputs.subject_id = "test" -# >>> SegmentCC_node.cmdline -# 'mri_cc -aseg aseg.mgz -o aseg.auto.mgz -lta cc.lta test' -# task_name: SegmentCC nipype_name: SegmentCC nipype_module: nipype.interfaces.freesurfer.preprocess @@ -42,14 +42,8 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/mgh-gz # type=file|default=: Input aseg file to read from subjects directory - in_norm: medimage/mgh-gz + in_norm: generic/file # type=file|default=: Required undocumented input {subject}/mri/norm.mgz - out_file: Path - # type=file: Output segmentation uncluding corpus collosum - # type=file|default=: Filename to write aseg including CC - out_rotation: Path - # type=file: Output lta rotation file - # type=file|default=: Global filepath for writing rotation lta subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -69,16 +63,16 @@ outputs: # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. out_file: generic/file - # type=file: Output segmentation uncluding corpus collosum + # type=file: Output segmentation including corpus collosum # type=file|default=: Filename to write aseg including CC - out_rotation: medimage-freesurfer/lta + out_rotation: fileformats.medimage_freesurfer.Lta # type=file: Output lta rotation file # type=file|default=: Global filepath for writing rotation lta callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -90,7 +84,7 @@ tests: in_norm: # type=file|default=: Required undocumented input {subject}/mri/norm.mgz out_file: - # type=file: Output segmentation uncluding corpus collosum + # type=file: Output segmentation including corpus collosum # type=file|default=: Filename to write aseg including CC out_rotation: # type=file: Output lta rotation file @@ -98,7 +92,7 @@ tests: subject_id: # type=string|default='subject_id': Subject name copy_inputs: - # type=bool|default=False: If running as a node, set this to True.This will copy the input files to the node directory. + # type=bool|default=False: If running as a node, set this to True. This will copy the input files to the node directory. subjects_dir: # type=directory|default=: subjects directory args: @@ -106,7 +100,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -125,15 +119,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Input aseg file to read from subjects directory - in_norm: - # type=file|default=: Required undocumented input {subject}/mri/norm.mgz out_rotation: '"cc.lta"' # type=file: Output lta rotation file # type=file|default=: Global filepath for writing rotation lta - subject_id: '"test"' - # type=string|default='subject_id': Subject name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -148,7 +138,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_cc -aseg aseg.mgz -o aseg.auto.mgz -lta cc.lta test +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -156,15 +146,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"aseg.mgz"' # type=file|default=: Input aseg file to read from subjects directory - in_norm: '"norm.mgz"' - # type=file|default=: Required undocumented input {subject}/mri/norm.mgz out_rotation: '"cc.lta"' # type=file: Output lta rotation file # type=file|default=: Global filepath for writing rotation lta - subject_id: '"test"' - # type=string|default='subject_id': Subject name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/segment_wm.yaml b/example-specs/interface/nipype/freesurfer/segment_wm.yaml index 6aa21efd..7a1040bc 100644 --- a/example-specs/interface/nipype/freesurfer/segment_wm.yaml +++ b/example-specs/interface/nipype/freesurfer/segment_wm.yaml @@ -6,20 +6,20 @@ # Docs # ---- # -# This program segments white matter from the input volume. The input -# volume should be normalized such that white matter voxels are -# ~110-valued, and the volume is conformed to 256^3. +# This program segments white matter from the input volume. The input +# volume should be normalized such that white matter voxels are +# ~110-valued, and the volume is conformed to 256^3. # # -# Examples -# ======== -# >>> from nipype.interfaces import freesurfer -# >>> SegmentWM_node = freesurfer.SegmentWM() -# >>> SegmentWM_node.inputs.in_file = "norm.mgz" -# >>> SegmentWM_node.inputs.out_file = "wm.seg.mgz" -# >>> SegmentWM_node.cmdline -# 'mri_segment norm.mgz wm.seg.mgz' -# +# Examples +# ======== +# >>> from nipype.interfaces import freesurfer +# >>> SegmentWM_node = freesurfer.SegmentWM() +# >>> SegmentWM_node.inputs.in_file = "norm.mgz" +# >>> SegmentWM_node.inputs.out_file = "wm.seg.mgz" +# >>> SegmentWM_node.cmdline +# 'mri_segment norm.mgz wm.seg.mgz' +# task_name: SegmentWM nipype_name: SegmentWM nipype_module: nipype.interfaces.freesurfer.preprocess @@ -36,9 +36,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/mgh-gz # type=file|default=: Input file for SegmentWM - out_file: Path - # type=file: Output white matter segmentation - # type=file|default=: File to be written as output for SegmentWM subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -57,14 +54,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/mgh-gz + out_file: generic/file # type=file: Output white matter segmentation # type=file|default=: File to be written as output for SegmentWM callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -83,7 +80,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -102,11 +99,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Input file for SegmentWM - out_file: '"wm.seg.mgz"' - # type=file: Output white matter segmentation - # type=file|default=: File to be written as output for SegmentWM imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -121,7 +115,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_segment norm.mgz wm.seg.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -129,11 +123,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"norm.mgz"' # type=file|default=: Input file for SegmentWM - out_file: '"wm.seg.mgz"' - # type=file: Output white matter segmentation - # type=file|default=: File to be written as output for SegmentWM imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/smooth.yaml b/example-specs/interface/nipype/freesurfer/smooth.yaml index e41ed92d..7e2d2cdf 100644 --- a/example-specs/interface/nipype/freesurfer/smooth.yaml +++ b/example-specs/interface/nipype/freesurfer/smooth.yaml @@ -7,24 +7,24 @@ # ---- # Use FreeSurfer mris_volsmooth to smooth a volume # -# This function smoothes cortical regions on a surface and non-cortical -# regions in volume. +# This function smoothes cortical regions on a surface and non-cortical +# regions in volume. # -# .. note:: -# Cortical voxels are mapped to the surface (3D->2D) and then the -# smoothed values from the surface are put back into the volume to fill -# the cortical ribbon. If data is smoothed with this algorithm, one has to -# be careful about how further processing is interpreted. +# .. note:: +# Cortical voxels are mapped to the surface (3D->2D) and then the +# smoothed values from the surface are put back into the volume to fill +# the cortical ribbon. If data is smoothed with this algorithm, one has to +# be careful about how further processing is interpreted. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.freesurfer import Smooth +# >>> smoothvol = Smooth(in_file='functional.nii', smoothed_file = 'foo_out.nii', reg_file='register.dat', surface_fwhm=10, vol_fwhm=6) +# >>> smoothvol.cmdline +# 'mris_volsmooth --i functional.nii --reg register.dat --o foo_out.nii --fwhm 10.000000 --vol-fwhm 6.000000' # -# >>> from nipype.interfaces.freesurfer import Smooth -# >>> smoothvol = Smooth(in_file='functional.nii', smoothed_file = 'foo_out.nii', reg_file='register.dat', surface_fwhm=10, vol_fwhm=6) -# >>> smoothvol.cmdline -# 'mris_volsmooth --i functional.nii --reg register.dat --o foo_out.nii --fwhm 10.000000 --vol-fwhm 6.000000' # -# task_name: Smooth nipype_name: Smooth nipype_module: nipype.interfaces.freesurfer.preprocess @@ -41,11 +41,8 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: source volume - reg_file: datascience/dat-file + reg_file: fileformats.medimage_freesurfer.Dat # type=file|default=: registers volume to surface anatomical - smoothed_file: Path - # type=file: smoothed input volume - # type=file|default=: output volume subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -71,7 +68,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields smoothed_file: '"foo_out.nii"' # type=file: smoothed input volume # type=file|default=: output volume @@ -105,7 +102,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -134,7 +131,7 @@ tests: vol_fwhm: '6' # type=range|default=0.0: volume smoothing outside of surface imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -167,7 +164,7 @@ doctests: vol_fwhm: '6' # type=range|default=0.0: volume smoothing outside of surface imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/smooth_tessellation.yaml b/example-specs/interface/nipype/freesurfer/smooth_tessellation.yaml index 408bb92a..4816596b 100644 --- a/example-specs/interface/nipype/freesurfer/smooth_tessellation.yaml +++ b/example-specs/interface/nipype/freesurfer/smooth_tessellation.yaml @@ -6,21 +6,21 @@ # Docs # ---- # -# Smooth a tessellated surface. +# Smooth a tessellated surface. # -# See Also -# -------- -# `nipype.interfaces.freesurfer.utils.SurfaceSmooth`_ interface for smoothing a scalar field -# along a surface manifold +# See Also +# -------- +# `nipype.interfaces.freesurfer.utils.SurfaceSmooth`_ interface for smoothing a scalar field +# along a surface manifold +# +# Example +# ------- +# >>> import nipype.interfaces.freesurfer as fs +# >>> smooth = fs.SmoothTessellation() +# >>> smooth.inputs.in_file = 'lh.hippocampus.stl' +# >>> smooth.run() # doctest: +SKIP # -# Example -# ------- -# >>> import nipype.interfaces.freesurfer as fs -# >>> smooth = fs.SmoothTessellation() -# >>> smooth.inputs.in_file = 'lh.hippocampus.stl' -# >>> smooth.run() # doctest: +SKIP # -# task_name: SmoothTessellation nipype_name: SmoothTessellation nipype_module: nipype.interfaces.freesurfer.utils @@ -37,12 +37,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: Input volume to tessellate voxels from. - out_area_file: Path - # type=file|default=: Write area to ``?h.areaname`` (default "area") - out_curvature_file: Path - # type=file|default=: Write curvature to ``?h.curvname`` (default "curv") - out_file: Path - # type=file|default=: output filename or True to generate one subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -69,7 +63,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -111,7 +105,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/freesurfer/sphere.yaml b/example-specs/interface/nipype/freesurfer/sphere.yaml index 372e2600..4fd47875 100644 --- a/example-specs/interface/nipype/freesurfer/sphere.yaml +++ b/example-specs/interface/nipype/freesurfer/sphere.yaml @@ -6,16 +6,16 @@ # Docs # ---- # -# This program will add a template into an average surface +# This program will add a template into an average surface +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import Sphere +# >>> sphere = Sphere() +# >>> sphere.inputs.in_file = 'lh.pial' +# >>> sphere.cmdline +# 'mris_sphere lh.pial lh.sphere' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import Sphere -# >>> sphere = Sphere() -# >>> sphere.inputs.in_file = 'lh.pial' -# >>> sphere.cmdline -# 'mris_sphere lh.pial lh.sphere' -# task_name: Sphere nipype_name: Sphere nipype_module: nipype.interfaces.freesurfer.utils @@ -30,13 +30,10 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: medimage-freesurfer/pial + in_file: fileformats.medimage_freesurfer.Pial # type=file|default=: Input file for Sphere in_smoothwm: generic/file # type=file|default=: Input surface required when -q flag is not selected - out_file: Path - # type=file: Output file for Sphere - # type=file|default=: Output file for Sphere subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -62,7 +59,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -89,7 +86,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -109,7 +106,7 @@ tests: in_file: # type=file|default=: Input file for Sphere imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -124,7 +121,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_sphere lh.pial lh.sphere +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -133,7 +130,7 @@ doctests: in_file: '"lh.pial"' # type=file|default=: Input file for Sphere imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/spherical_average.yaml b/example-specs/interface/nipype/freesurfer/spherical_average.yaml index 44ab8d71..9d33d71a 100644 --- a/example-specs/interface/nipype/freesurfer/spherical_average.yaml +++ b/example-specs/interface/nipype/freesurfer/spherical_average.yaml @@ -6,25 +6,25 @@ # Docs # ---- # -# This program will add a template into an average surface. +# This program will add a template into an average surface. +# +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import SphericalAverage +# >>> sphericalavg = SphericalAverage() +# >>> sphericalavg.inputs.out_file = 'test.out' +# >>> sphericalavg.inputs.in_average = '.' +# >>> sphericalavg.inputs.in_surf = 'lh.pial' +# >>> sphericalavg.inputs.hemisphere = 'lh' +# >>> sphericalavg.inputs.fname = 'lh.entorhinal' +# >>> sphericalavg.inputs.which = 'label' +# >>> sphericalavg.inputs.subject_id = '10335' +# >>> sphericalavg.inputs.erode = 2 +# >>> sphericalavg.inputs.threshold = 5 +# >>> sphericalavg.cmdline +# 'mris_spherical_average -erode 2 -o 10335 -t 5.0 label lh.entorhinal lh pial . test.out' # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import SphericalAverage -# >>> sphericalavg = SphericalAverage() -# >>> sphericalavg.inputs.out_file = 'test.out' -# >>> sphericalavg.inputs.in_average = '.' -# >>> sphericalavg.inputs.in_surf = 'lh.pial' -# >>> sphericalavg.inputs.hemisphere = 'lh' -# >>> sphericalavg.inputs.fname = 'lh.entorhinal' -# >>> sphericalavg.inputs.which = 'label' -# >>> sphericalavg.inputs.subject_id = '10335' -# >>> sphericalavg.inputs.erode = 2 -# >>> sphericalavg.inputs.threshold = 5 -# >>> sphericalavg.cmdline -# 'mris_spherical_average -erode 2 -o 10335 -t 5.0 label lh.entorhinal lh pial . test.out' # -# task_name: SphericalAverage nipype_name: SphericalAverage nipype_module: nipype.interfaces.freesurfer.model @@ -39,15 +39,10 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_average: Path - # type=directory|default=: Average subject in_orig: generic/file # type=file|default=: Original surface filename - in_surf: medimage-freesurfer/pial + in_surf: fileformats.medimage_freesurfer.Pial # type=file|default=: Input surface file - out_file: Path - # type=file: Output label - # type=file|default=: Output filename subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -68,14 +63,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage-freesurfer/out + out_file: fileformats.medimage_freesurfer.Out # type=file: Output label # type=file|default=: Output filename callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: '"test.out"' # type=file: Output label # type=file|default=: Output filename @@ -113,7 +108,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -133,24 +128,16 @@ tests: out_file: '"test.out"' # type=file: Output label # type=file|default=: Output filename - in_average: '"."' - # type=directory|default=: Average subject in_surf: # type=file|default=: Input surface file - hemisphere: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: Input hemisphere fname: '"lh.entorhinal"' # type=string|default='': Filename from the average subject directory. Example: to use rh.entorhinal.label as the input label filename, set fname to 'rh.entorhinal' and which to 'label'. The program will then search for ``/label/rh.entorhinal.label`` - which: '"label"' - # type=enum|default='coords'|allowed['area','coords','curv','label','vals']: No documentation subject_id: '"10335"' # type=string|default='': Output subject id - erode: '2' - # type=int|default=0: Undocumented threshold: '5' # type=float|default=0.0: Undocumented imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -165,7 +152,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_spherical_average -erode 2 -o 10335 -t 5.0 label lh.entorhinal lh pial . test.out +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -174,24 +161,16 @@ doctests: out_file: '"test.out"' # type=file: Output label # type=file|default=: Output filename - in_average: '"."' - # type=directory|default=: Average subject in_surf: '"lh.pial"' # type=file|default=: Input surface file - hemisphere: '"lh"' - # type=enum|default='lh'|allowed['lh','rh']: Input hemisphere fname: '"lh.entorhinal"' # type=string|default='': Filename from the average subject directory. Example: to use rh.entorhinal.label as the input label filename, set fname to 'rh.entorhinal' and which to 'label'. The program will then search for ``/label/rh.entorhinal.label`` - which: '"label"' - # type=enum|default='coords'|allowed['area','coords','curv','label','vals']: No documentation subject_id: '"10335"' # type=string|default='': Output subject id - erode: '2' - # type=int|default=0: Undocumented threshold: '5' # type=float|default=0.0: Undocumented imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/surface_2_vol_transform.yaml b/example-specs/interface/nipype/freesurfer/surface_2_vol_transform.yaml index 60377be7..6f8a4d94 100644 --- a/example-specs/interface/nipype/freesurfer/surface_2_vol_transform.yaml +++ b/example-specs/interface/nipype/freesurfer/surface_2_vol_transform.yaml @@ -7,21 +7,21 @@ # ---- # Use FreeSurfer mri_surf2vol to apply a transform. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.freesurfer import Surface2VolTransform +# >>> xfm2vol = Surface2VolTransform() +# >>> xfm2vol.inputs.source_file = 'lh.cope1.mgz' +# >>> xfm2vol.inputs.reg_file = 'register.mat' +# >>> xfm2vol.inputs.hemi = 'lh' +# >>> xfm2vol.inputs.template_file = 'cope1.nii.gz' +# >>> xfm2vol.inputs.subjects_dir = '.' +# >>> xfm2vol.cmdline +# 'mri_surf2vol --hemi lh --volreg register.mat --surfval lh.cope1.mgz --sd . --template cope1.nii.gz --outvol lh.cope1_asVol.nii --vtxvol lh.cope1_asVol_vertex.nii' +# >>> res = xfm2vol.run()# doctest: +SKIP # -# >>> from nipype.interfaces.freesurfer import Surface2VolTransform -# >>> xfm2vol = Surface2VolTransform() -# >>> xfm2vol.inputs.source_file = 'lh.cope1.mgz' -# >>> xfm2vol.inputs.reg_file = 'register.mat' -# >>> xfm2vol.inputs.hemi = 'lh' -# >>> xfm2vol.inputs.template_file = 'cope1.nii.gz' -# >>> xfm2vol.inputs.subjects_dir = '.' -# >>> xfm2vol.cmdline -# 'mri_surf2vol --hemi lh --volreg register.mat --surfval lh.cope1.mgz --sd . --template cope1.nii.gz --outvol lh.cope1_asVol.nii --vtxvol lh.cope1_asVol_vertex.nii' -# >>> res = xfm2vol.run()# doctest: +SKIP # -# task_name: Surface2VolTransform nipype_name: Surface2VolTransform nipype_module: nipype.interfaces.freesurfer.utils @@ -36,18 +36,12 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - reg_file: datascience/text-matrix + reg_file: generic/file # type=file|default=: tkRAS-to-tkRAS matrix (tkregister2 format) source_file: medimage/mgh-gz # type=file|default=: This is the source of the surface values - template_file: medimage/nifti-gz + template_file: generic/file # type=file|default=: Output template volume - transformed_file: Path - # type=file: Path to output file if used normally - # type=file|default=: Output volume - vertexvol_file: Path - # type=file: vertex map volume path id. Optional - # type=file|default=: Path name of the vertex output volume, which is the same as output volume except that the value of each voxel is the vertex-id that is mapped to that voxel. callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -74,7 +68,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -110,7 +104,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -129,16 +123,12 @@ tests: # (if not specified, will try to choose a sensible value) source_file: # type=file|default=: This is the source of the surface values - reg_file: - # type=file|default=: tkRAS-to-tkRAS matrix (tkregister2 format) hemi: '"lh"' # type=str|default='': hemisphere of data - template_file: - # type=file|default=: Output template volume subjects_dir: '"."' # type=str|default='': freesurfer subjects directory defaults to $SUBJECTS_DIR imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -161,16 +151,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. source_file: '"lh.cope1.mgz"' # type=file|default=: This is the source of the surface values - reg_file: '"register.mat"' - # type=file|default=: tkRAS-to-tkRAS matrix (tkregister2 format) hemi: '"lh"' # type=str|default='': hemisphere of data - template_file: '"cope1.nii.gz"' - # type=file|default=: Output template volume subjects_dir: '"."' # type=str|default='': freesurfer subjects directory defaults to $SUBJECTS_DIR imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/surface_smooth.yaml b/example-specs/interface/nipype/freesurfer/surface_smooth.yaml index de2587ae..1a5d7f6d 100644 --- a/example-specs/interface/nipype/freesurfer/surface_smooth.yaml +++ b/example-specs/interface/nipype/freesurfer/surface_smooth.yaml @@ -7,30 +7,30 @@ # ---- # Smooth a surface image with mri_surf2surf. # -# The surface is smoothed by an iterative process of averaging the -# value at each vertex with those of its adjacent neighbors. You may supply -# either the number of iterations to run or a desired effective FWHM of the -# smoothing process. If the latter, the underlying program will calculate -# the correct number of iterations internally. +# The surface is smoothed by an iterative process of averaging the +# value at each vertex with those of its adjacent neighbors. You may supply +# either the number of iterations to run or a desired effective FWHM of the +# smoothing process. If the latter, the underlying program will calculate +# the correct number of iterations internally. # -# See Also -# -------- -# `nipype.interfaces.freesurfer.utils.SmoothTessellation`_ interface for -# smoothing a tessellated surface (e.g. in gifti or .stl) +# See Also +# -------- +# `nipype.interfaces.freesurfer.utils.SmoothTessellation`_ interface for +# smoothing a tessellated surface (e.g. in gifti or .stl) +# +# Examples +# -------- +# >>> import nipype.interfaces.freesurfer as fs +# >>> smoother = fs.SurfaceSmooth() +# >>> smoother.inputs.in_file = "lh.cope1.mgz" +# >>> smoother.inputs.subject_id = "subj_1" +# >>> smoother.inputs.hemi = "lh" +# >>> smoother.inputs.fwhm = 5 +# >>> smoother.cmdline # doctest: +ELLIPSIS +# 'mri_surf2surf --cortex --fwhm 5.0000 --hemi lh --sval lh.cope1.mgz --tval ...lh.cope1_smooth5.mgz --s subj_1' +# >>> smoother.run() # doctest: +SKIP # -# Examples -# -------- -# >>> import nipype.interfaces.freesurfer as fs -# >>> smoother = fs.SurfaceSmooth() -# >>> smoother.inputs.in_file = "lh.cope1.mgz" -# >>> smoother.inputs.subject_id = "subj_1" -# >>> smoother.inputs.hemi = "lh" -# >>> smoother.inputs.fwhm = 5 -# >>> smoother.cmdline # doctest: +ELLIPSIS -# 'mri_surf2surf --cortex --fwhm 5.0000 --hemi lh --sval lh.cope1.mgz --tval ...lh.cope1_smooth5.mgz --s subj_1' -# >>> smoother.run() # doctest: +SKIP # -# task_name: SurfaceSmooth nipype_name: SurfaceSmooth nipype_module: nipype.interfaces.freesurfer.utils @@ -47,9 +47,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/mgh-gz # type=file|default=: source surface file - out_file: Path - # type=file: smoothed surface file - # type=file|default=: surface file to write subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -75,7 +72,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: smoothed surface file # type=file|default=: surface file to write @@ -109,7 +106,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -128,14 +125,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: source surface file - subject_id: '"subj_1"' - # type=string|default='': subject id of surface file hemi: '"lh"' # type=enum|default='lh'|allowed['lh','rh']: hemisphere to operate on - fwhm: '5' - # type=float|default=0.0: effective FWHM of the smoothing process imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.interfaces.freesurfer as fs expected_outputs: @@ -151,7 +144,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_surf2surf --cortex --fwhm 5.0000 --hemi lh --sval lh.cope1.mgz --tval ...lh.cope1_smooth5.mgz --s subj_1 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -159,14 +152,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"lh.cope1.mgz"' # type=file|default=: source surface file - subject_id: '"subj_1"' - # type=string|default='': subject id of surface file hemi: '"lh"' # type=enum|default='lh'|allowed['lh','rh']: hemisphere to operate on - fwhm: '5' - # type=float|default=0.0: effective FWHM of the smoothing process imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/surface_snapshots.yaml b/example-specs/interface/nipype/freesurfer/surface_snapshots.yaml index 31c1f362..4b5f6294 100644 --- a/example-specs/interface/nipype/freesurfer/surface_snapshots.yaml +++ b/example-specs/interface/nipype/freesurfer/surface_snapshots.yaml @@ -7,29 +7,29 @@ # ---- # Use Tksurfer to save pictures of the cortical surface. # -# By default, this takes snapshots of the lateral, medial, ventral, -# and dorsal surfaces. See the ``six_images`` option to add the -# anterior and posterior surfaces. +# By default, this takes snapshots of the lateral, medial, ventral, +# and dorsal surfaces. See the ``six_images`` option to add the +# anterior and posterior surfaces. # -# You may also supply your own tcl script (see the Freesurfer wiki for -# information on scripting tksurfer). The screenshot stem is set as the -# environment variable "_SNAPSHOT_STEM", which you can use in your -# own scripts. +# You may also supply your own tcl script (see the Freesurfer wiki for +# information on scripting tksurfer). The screenshot stem is set as the +# environment variable "_SNAPSHOT_STEM", which you can use in your +# own scripts. # -# Node that this interface will not run if you do not have graphics -# enabled on your system. +# Node that this interface will not run if you do not have graphics +# enabled on your system. # -# Examples -# -------- +# Examples +# -------- +# +# >>> import nipype.interfaces.freesurfer as fs +# >>> shots = fs.SurfaceSnapshots(subject_id="fsaverage", hemi="lh", surface="pial") +# >>> shots.inputs.overlay = "zstat1.nii.gz" +# >>> shots.inputs.overlay_range = (2.3, 6) +# >>> shots.inputs.overlay_reg = "register.dat" +# >>> res = shots.run() # doctest: +SKIP # -# >>> import nipype.interfaces.freesurfer as fs -# >>> shots = fs.SurfaceSnapshots(subject_id="fsaverage", hemi="lh", surface="pial") -# >>> shots.inputs.overlay = "zstat1.nii.gz" -# >>> shots.inputs.overlay_range = (2.3, 6) -# >>> shots.inputs.overlay_reg = "register.dat" -# >>> res = shots.run() # doctest: +SKIP # -# task_name: SurfaceSnapshots nipype_name: SurfaceSnapshots nipype_module: nipype.interfaces.freesurfer.utils @@ -58,8 +58,6 @@ inputs: # type=file|default=: load a patch subjects_dir: generic/directory # type=directory|default=: subjects directory - tcl_script: Path - # type=file|default=: override default screenshot script callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -84,7 +82,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -160,7 +158,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/freesurfer/surface_transform.yaml b/example-specs/interface/nipype/freesurfer/surface_transform.yaml index bb24de3e..c8fa8540 100644 --- a/example-specs/interface/nipype/freesurfer/surface_transform.yaml +++ b/example-specs/interface/nipype/freesurfer/surface_transform.yaml @@ -7,22 +7,22 @@ # ---- # Transform a surface file from one subject to another via a spherical registration. # -# Both the source and target subject must reside in your Subjects Directory, -# and they must have been processed with recon-all, unless you are transforming -# to one of the icosahedron meshes. +# Both the source and target subject must reside in your Subjects Directory, +# and they must have been processed with recon-all, unless you are transforming +# to one of the icosahedron meshes. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.freesurfer import SurfaceTransform +# >>> sxfm = SurfaceTransform() +# >>> sxfm.inputs.source_file = "lh.cope1.nii.gz" +# >>> sxfm.inputs.source_subject = "my_subject" +# >>> sxfm.inputs.target_subject = "fsaverage" +# >>> sxfm.inputs.hemi = "lh" +# >>> sxfm.run() # doctest: +SKIP # -# >>> from nipype.interfaces.freesurfer import SurfaceTransform -# >>> sxfm = SurfaceTransform() -# >>> sxfm.inputs.source_file = "lh.cope1.nii.gz" -# >>> sxfm.inputs.source_subject = "my_subject" -# >>> sxfm.inputs.target_subject = "fsaverage" -# >>> sxfm.inputs.hemi = "lh" -# >>> sxfm.run() # doctest: +SKIP # -# task_name: SurfaceTransform nipype_name: SurfaceTransform nipype_module: nipype.interfaces.freesurfer.utils @@ -37,9 +37,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: Path - # type=file: transformed surface file - # type=file|default=: surface file to write source_annot_file: generic/file # type=file|default=: surface annotation file source_file: generic/file @@ -69,7 +66,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: transformed surface file # type=file|default=: surface file to write @@ -109,7 +106,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/freesurfer/synthesize_flash.yaml b/example-specs/interface/nipype/freesurfer/synthesize_flash.yaml index b33b3f0b..48b0fec7 100644 --- a/example-specs/interface/nipype/freesurfer/synthesize_flash.yaml +++ b/example-specs/interface/nipype/freesurfer/synthesize_flash.yaml @@ -7,17 +7,17 @@ # ---- # Synthesize a FLASH acquisition from T1 and proton density maps. # -# Examples -# -------- -# >>> from nipype.interfaces.freesurfer import SynthesizeFLASH -# >>> syn = SynthesizeFLASH(tr=20, te=3, flip_angle=30) -# >>> syn.inputs.t1_image = 'T1.mgz' -# >>> syn.inputs.pd_image = 'PD.mgz' -# >>> syn.inputs.out_file = 'flash_30syn.mgz' -# >>> syn.cmdline -# 'mri_synthesize 20.00 30.00 3.000 T1.mgz PD.mgz flash_30syn.mgz' +# Examples +# -------- +# >>> from nipype.interfaces.freesurfer import SynthesizeFLASH +# >>> syn = SynthesizeFLASH(tr=20, te=3, flip_angle=30) +# >>> syn.inputs.t1_image = 'T1.mgz' +# >>> syn.inputs.pd_image = 'PD.mgz' +# >>> syn.inputs.out_file = 'flash_30syn.mgz' +# >>> syn.cmdline +# 'mri_synthesize 20.00 30.00 3.000 T1.mgz PD.mgz flash_30syn.mgz' +# # -# task_name: SynthesizeFLASH nipype_name: SynthesizeFLASH nipype_module: nipype.interfaces.freesurfer.preprocess @@ -32,10 +32,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: Path - # type=file: synthesized FLASH acquisition - # type=file|default=: image to write - pd_image: medimage/mgh-gz + pd_image: generic/file # type=file|default=: image of proton density values subjects_dir: generic/directory # type=directory|default=: subjects directory @@ -64,7 +61,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: '"flash_30syn.mgz"' # type=file: synthesized FLASH acquisition # type=file|default=: image to write @@ -96,7 +93,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -115,8 +112,6 @@ tests: # (if not specified, will try to choose a sensible value) t1_image: # type=file|default=: image of T1 values - pd_image: - # type=file|default=: image of proton density values out_file: '"flash_30syn.mgz"' # type=file: synthesized FLASH acquisition # type=file|default=: image to write @@ -127,7 +122,7 @@ tests: flip_angle: '30' # type=float|default=0.0: flip angle (in degrees) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -142,7 +137,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_synthesize 20.00 30.00 3.000 T1.mgz PD.mgz flash_30syn.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -150,8 +145,6 @@ doctests: # '.mock()' method of the corresponding class is used instead. t1_image: '"T1.mgz"' # type=file|default=: image of T1 values - pd_image: '"PD.mgz"' - # type=file|default=: image of proton density values out_file: '"flash_30syn.mgz"' # type=file: synthesized FLASH acquisition # type=file|default=: image to write @@ -162,7 +155,7 @@ doctests: flip_angle: '30' # type=float|default=0.0: flip angle (in degrees) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/talairach_avi.yaml b/example-specs/interface/nipype/freesurfer/talairach_avi.yaml index 7cb19425..d9f65f16 100644 --- a/example-specs/interface/nipype/freesurfer/talairach_avi.yaml +++ b/example-specs/interface/nipype/freesurfer/talairach_avi.yaml @@ -6,24 +6,24 @@ # Docs # ---- # -# Front-end for Avi Snyders image registration tool. Computes the -# talairach transform that maps the input volume to the MNI average_305. -# This does not add the xfm to the header of the input file. When called -# by recon-all, the xfm is added to the header after the transform is -# computed. +# Front-end for Avi Snyders image registration tool. Computes the +# talairach transform that maps the input volume to the MNI average_305. +# This does not add the xfm to the header of the input file. When called +# by recon-all, the xfm is added to the header after the transform is +# computed. # -# Examples -# ======== +# Examples +# ======== # -# >>> from nipype.interfaces.freesurfer import TalairachAVI -# >>> example = TalairachAVI() -# >>> example.inputs.in_file = 'norm.mgz' -# >>> example.inputs.out_file = 'trans.mat' -# >>> example.cmdline -# 'talairach_avi --i norm.mgz --xfm trans.mat' +# >>> from nipype.interfaces.freesurfer import TalairachAVI +# >>> example = TalairachAVI() +# >>> example.inputs.in_file = 'norm.mgz' +# >>> example.inputs.out_file = 'trans.mat' +# >>> example.cmdline +# 'talairach_avi --i norm.mgz --xfm trans.mat' +# +# >>> example.run() # doctest: +SKIP # -# >>> example.run() # doctest: +SKIP -# task_name: TalairachAVI nipype_name: TalairachAVI nipype_module: nipype.interfaces.freesurfer.utils @@ -40,9 +40,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/mgh-gz # type=file|default=: input volume - out_file: Path - # type=file: The output transform for TalairachAVI - # type=file|default=: output xfm file subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -61,7 +58,7 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: datascience/text-matrix + out_file: generic/file # type=file: The output transform for TalairachAVI # type=file|default=: output xfm file out_log: generic/file @@ -72,7 +69,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -93,7 +90,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -112,11 +109,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input volume - out_file: '"trans.mat"' - # type=file: The output transform for TalairachAVI - # type=file|default=: output xfm file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -139,11 +133,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"norm.mgz"' # type=file|default=: input volume - out_file: '"trans.mat"' - # type=file: The output transform for TalairachAVI - # type=file|default=: output xfm file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/talairach_qc.yaml b/example-specs/interface/nipype/freesurfer/talairach_qc.yaml index bf3984d6..2979727b 100644 --- a/example-specs/interface/nipype/freesurfer/talairach_qc.yaml +++ b/example-specs/interface/nipype/freesurfer/talairach_qc.yaml @@ -6,15 +6,15 @@ # Docs # ---- # -# Examples -# ======== +# Examples +# ======== +# +# >>> from nipype.interfaces.freesurfer import TalairachQC +# >>> qc = TalairachQC() +# >>> qc.inputs.log_file = 'dirs.txt' +# >>> qc.cmdline +# 'tal_QC_AZS dirs.txt' # -# >>> from nipype.interfaces.freesurfer import TalairachQC -# >>> qc = TalairachQC() -# >>> qc.inputs.log_file = 'dirs.txt' -# >>> qc.cmdline -# 'tal_QC_AZS dirs.txt' -# task_name: TalairachQC nipype_name: TalairachQC nipype_module: nipype.interfaces.freesurfer.utils @@ -29,9 +29,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - log_file: Path - # type=file: The output log - # type=file|default=: The log file for TalairachQC subjects_dir: generic/directory # type=directory|default=: subjects directory callable_defaults: @@ -57,7 +54,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -74,7 +71,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -95,7 +92,7 @@ tests: # type=file: The output log # type=file|default=: The log file for TalairachQC imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -120,7 +117,7 @@ doctests: # type=file: The output log # type=file|default=: The log file for TalairachQC imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/tkregister_2.yaml b/example-specs/interface/nipype/freesurfer/tkregister_2.yaml index 1d91aab2..09eb8a0b 100644 --- a/example-specs/interface/nipype/freesurfer/tkregister_2.yaml +++ b/example-specs/interface/nipype/freesurfer/tkregister_2.yaml @@ -7,34 +7,34 @@ # ---- # # -# Examples -# -------- -# Get transform matrix between orig (*tkRAS*) and native (*scannerRAS*) -# coordinates in Freesurfer. Implements the first step of mapping surfaces -# to native space in `this guide -# `__. +# Examples +# -------- +# Get transform matrix between orig (*tkRAS*) and native (*scannerRAS*) +# coordinates in Freesurfer. Implements the first step of mapping surfaces +# to native space in `this guide +# `__. # -# >>> from nipype.interfaces.freesurfer import Tkregister2 -# >>> tk2 = Tkregister2(reg_file='T1_to_native.dat') -# >>> tk2.inputs.moving_image = 'T1.mgz' -# >>> tk2.inputs.target_image = 'structural.nii' -# >>> tk2.inputs.reg_header = True -# >>> tk2.cmdline -# 'tkregister2 --mov T1.mgz --noedit --reg T1_to_native.dat --regheader --targ structural.nii' -# >>> tk2.run() # doctest: +SKIP +# >>> from nipype.interfaces.freesurfer import Tkregister2 +# >>> tk2 = Tkregister2(reg_file='T1_to_native.dat') +# >>> tk2.inputs.moving_image = 'T1.mgz' +# >>> tk2.inputs.target_image = 'structural.nii' +# >>> tk2.inputs.reg_header = True +# >>> tk2.cmdline +# 'tkregister2 --mov T1.mgz --noedit --reg T1_to_native.dat --regheader --targ structural.nii' +# >>> tk2.run() # doctest: +SKIP # -# The example below uses tkregister2 without the manual editing -# stage to convert FSL-style registration matrix (.mat) to -# FreeSurfer-style registration matrix (.dat) +# The example below uses tkregister2 without the manual editing +# stage to convert FSL-style registration matrix (.mat) to +# FreeSurfer-style registration matrix (.dat) +# +# >>> from nipype.interfaces.freesurfer import Tkregister2 +# >>> tk2 = Tkregister2() +# >>> tk2.inputs.moving_image = 'epi.nii' +# >>> tk2.inputs.fsl_in_matrix = 'flirt.mat' +# >>> tk2.cmdline +# 'tkregister2 --fsl flirt.mat --mov epi.nii --noedit --reg register.dat' +# >>> tk2.run() # doctest: +SKIP # -# >>> from nipype.interfaces.freesurfer import Tkregister2 -# >>> tk2 = Tkregister2() -# >>> tk2.inputs.moving_image = 'epi.nii' -# >>> tk2.inputs.fsl_in_matrix = 'flirt.mat' -# >>> tk2.cmdline -# 'tkregister2 --fsl flirt.mat --mov epi.nii --noedit --reg register.dat' -# >>> tk2.run() # doctest: +SKIP -# task_name: Tkregister2 nipype_name: Tkregister2 nipype_module: nipype.interfaces.freesurfer.utils @@ -49,18 +49,15 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - fsl_in_matrix: datascience/text-matrix + fsl_in_matrix: generic/file # type=file|default=: fsl-style registration input matrix lta_in: generic/file # type=file|default=: use a matrix in MNI coordinates as initial registration moving_image: medimage/nifti1,medimage/mgh-gz # type=file|default=: moving volume - reg_file: Path - # type=file: freesurfer-style registration file - # type=file|default='register.dat': freesurfer-style registration file subjects_dir: generic/directory # type=directory|default=: subjects directory - target_image: medimage/nifti1 + target_image: generic/file # type=file|default=: target volume xfm: generic/file # type=file|default=: use a matrix in MNI coordinates as initial registration @@ -84,14 +81,14 @@ outputs: # type=file: FSL-style registration file lta_file: generic/file # type=file: LTA-style registration file - reg_file: datascience/dat-file + reg_file: fileformats.medimage_freesurfer.Dat # type=file: freesurfer-style registration file # type=file|default='register.dat': freesurfer-style registration file callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -126,7 +123,7 @@ tests: # type=file: freesurfer-style registration file # type=file|default='register.dat': freesurfer-style registration file reg_header: - # type=bool|default=False: compute regstration from headers + # type=bool|default=False: compute registration from headers fstal: # type=bool|default=False: set mov to be tal and reg to be tal xfm movscale: @@ -138,7 +135,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -157,15 +154,13 @@ tests: # (if not specified, will try to choose a sensible value) moving_image: # type=file|default=: moving volume - target_image: - # type=file|default=: target volume reg_header: 'True' - # type=bool|default=False: compute regstration from headers + # type=bool|default=False: compute registration from headers reg_file: '"T1_to_native.dat"' # type=file: freesurfer-style registration file # type=file|default='register.dat': freesurfer-style registration file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -184,10 +179,8 @@ tests: # (if not specified, will try to choose a sensible value) moving_image: # type=file|default=: moving volume - fsl_in_matrix: - # type=file|default=: fsl-style registration input matrix imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -210,15 +203,13 @@ doctests: # '.mock()' method of the corresponding class is used instead. moving_image: '"T1.mgz"' # type=file|default=: moving volume - target_image: '"structural.nii"' - # type=file|default=: target volume reg_header: 'True' - # type=bool|default=False: compute regstration from headers + # type=bool|default=False: compute registration from headers reg_file: '"T1_to_native.dat"' # type=file: freesurfer-style registration file # type=file|default='register.dat': freesurfer-style registration file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -230,10 +221,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. moving_image: '"epi.nii"' # type=file|default=: moving volume - fsl_in_matrix: '"flirt.mat"' - # type=file|default=: fsl-style registration input matrix imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/unpack_sdicom_dir.yaml b/example-specs/interface/nipype/freesurfer/unpack_sdicom_dir.yaml index e3fab34a..0ed49044 100644 --- a/example-specs/interface/nipype/freesurfer/unpack_sdicom_dir.yaml +++ b/example-specs/interface/nipype/freesurfer/unpack_sdicom_dir.yaml @@ -7,21 +7,21 @@ # ---- # Use unpacksdcmdir to convert dicom files # -# Call unpacksdcmdir -help from the command line to see more information on -# using this command. +# Call unpacksdcmdir -help from the command line to see more information on +# using this command. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.freesurfer import UnpackSDICOMDir +# >>> unpack = UnpackSDICOMDir() +# >>> unpack.inputs.source_dir = '.' +# >>> unpack.inputs.output_dir = '.' +# >>> unpack.inputs.run_info = (5, 'mprage', 'nii', 'struct') +# >>> unpack.inputs.dir_structure = 'generic' +# >>> unpack.cmdline +# 'unpacksdcmdir -generic -targ . -run 5 mprage nii struct -src .' # -# >>> from nipype.interfaces.freesurfer import UnpackSDICOMDir -# >>> unpack = UnpackSDICOMDir() -# >>> unpack.inputs.source_dir = '.' -# >>> unpack.inputs.output_dir = '.' -# >>> unpack.inputs.run_info = (5, 'mprage', 'nii', 'struct') -# >>> unpack.inputs.dir_structure = 'generic' -# >>> unpack.cmdline -# 'unpacksdcmdir -generic -targ . -run 5 mprage nii struct -src .' -# task_name: UnpackSDICOMDir nipype_name: UnpackSDICOMDir nipype_module: nipype.interfaces.freesurfer.preprocess @@ -70,7 +70,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -106,7 +106,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -125,14 +125,10 @@ tests: # (if not specified, will try to choose a sensible value) source_dir: '"."' # type=directory|default=: directory with the DICOM files - output_dir: '"."' - # type=directory|default=: top directory into which the files will be unpacked run_info: (5, "mprage", "nii", "struct") # type=tuple|default=(0, '', '', ''): runno subdir format name : spec unpacking rules on cmdline - dir_structure: '"generic"' - # type=enum|default='fsfast'|allowed['fsfast','generic']: unpack to specified directory structures imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -155,14 +151,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. source_dir: '"."' # type=directory|default=: directory with the DICOM files - output_dir: '"."' - # type=directory|default=: top directory into which the files will be unpacked run_info: (5, "mprage", "nii", "struct") # type=tuple|default=(0, '', '', ''): runno subdir format name : spec unpacking rules on cmdline - dir_structure: '"generic"' - # type=enum|default='fsfast'|allowed['fsfast','generic']: unpack to specified directory structures imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/volume_mask.yaml b/example-specs/interface/nipype/freesurfer/volume_mask.yaml index 2fea6433..48a4efb8 100644 --- a/example-specs/interface/nipype/freesurfer/volume_mask.yaml +++ b/example-specs/interface/nipype/freesurfer/volume_mask.yaml @@ -6,31 +6,31 @@ # Docs # ---- # -# Computes a volume mask, at the same resolution as the -# /mri/brain.mgz. The volume mask contains 4 values: LH_WM -# (default 10), LH_GM (default 100), RH_WM (default 20), RH_GM (default -# 200). -# The algorithm uses the 4 surfaces situated in /surf/ -# [lh|rh].[white|pial] and labels voxels based on the -# signed-distance function from the surface. +# Computes a volume mask, at the same resolution as the +# /mri/brain.mgz. The volume mask contains 4 values: LH_WM +# (default 10), LH_GM (default 100), RH_WM (default 20), RH_GM (default +# 200). +# The algorithm uses the 4 surfaces situated in /surf/ +# [lh|rh].[white|pial] and labels voxels based on the +# signed-distance function from the surface. +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import VolumeMask +# >>> volmask = VolumeMask() +# >>> volmask.inputs.left_whitelabel = 2 +# >>> volmask.inputs.left_ribbonlabel = 3 +# >>> volmask.inputs.right_whitelabel = 41 +# >>> volmask.inputs.right_ribbonlabel = 42 +# >>> volmask.inputs.lh_pial = 'lh.pial' +# >>> volmask.inputs.rh_pial = 'lh.pial' +# >>> volmask.inputs.lh_white = 'lh.pial' +# >>> volmask.inputs.rh_white = 'lh.pial' +# >>> volmask.inputs.subject_id = '10335' +# >>> volmask.inputs.save_ribbon = True +# >>> volmask.cmdline +# 'mris_volmask --label_left_ribbon 3 --label_left_white 2 --label_right_ribbon 42 --label_right_white 41 --save_ribbon 10335' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import VolumeMask -# >>> volmask = VolumeMask() -# >>> volmask.inputs.left_whitelabel = 2 -# >>> volmask.inputs.left_ribbonlabel = 3 -# >>> volmask.inputs.right_whitelabel = 41 -# >>> volmask.inputs.right_ribbonlabel = 42 -# >>> volmask.inputs.lh_pial = 'lh.pial' -# >>> volmask.inputs.rh_pial = 'lh.pial' -# >>> volmask.inputs.lh_white = 'lh.pial' -# >>> volmask.inputs.rh_white = 'lh.pial' -# >>> volmask.inputs.subject_id = '10335' -# >>> volmask.inputs.save_ribbon = True -# >>> volmask.cmdline -# 'mris_volmask --label_left_ribbon 3 --label_left_white 2 --label_right_ribbon 42 --label_right_white 41 --save_ribbon 10335' -# task_name: VolumeMask nipype_name: VolumeMask nipype_module: nipype.interfaces.freesurfer.utils @@ -49,13 +49,13 @@ inputs: # type=file|default=: Implicit aseg.mgz segmentation. Specify a different aseg by using the 'in_aseg' input. in_aseg: generic/file # type=file|default=: Input aseg file for VolumeMask - lh_pial: medimage-freesurfer/pial + lh_pial: fileformats.medimage_freesurfer.Pial # type=file|default=: Implicit input left pial surface - lh_white: medimage-freesurfer/pial + lh_white: fileformats.medimage_freesurfer.Pial # type=file|default=: Implicit input left white matter surface - rh_pial: medimage-freesurfer/pial + rh_pial: generic/file # type=file|default=: Implicit input right pial surface - rh_white: medimage-freesurfer/pial + rh_white: generic/file # type=file|default=: Implicit input right white matter surface subjects_dir: generic/directory # type=directory|default=: subjects directory @@ -85,7 +85,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -117,7 +117,7 @@ tests: save_ribbon: # type=bool|default=False: option to save just the ribbon for the hemispheres in the format ?h.ribbon.mgz copy_inputs: - # type=bool|default=False: If running as a node, set this to True.This will copy the implicit input files to the node directory. + # type=bool|default=False: If running as a node, set this to True. This will copy the implicit input files to the node directory. subjects_dir: # type=directory|default=: subjects directory args: @@ -125,7 +125,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -144,26 +144,16 @@ tests: # (if not specified, will try to choose a sensible value) left_whitelabel: '2' # type=int|default=0: Left white matter label - left_ribbonlabel: '3' - # type=int|default=0: Left cortical ribbon label right_whitelabel: '41' # type=int|default=0: Right white matter label - right_ribbonlabel: '42' - # type=int|default=0: Right cortical ribbon label lh_pial: # type=file|default=: Implicit input left pial surface - rh_pial: - # type=file|default=: Implicit input right pial surface lh_white: # type=file|default=: Implicit input left white matter surface - rh_white: - # type=file|default=: Implicit input right white matter surface subject_id: '"10335"' # type=string|default='subject_id': Subject being processed - save_ribbon: 'True' - # type=bool|default=False: option to save just the ribbon for the hemispheres in the format ?h.ribbon.mgz imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -178,7 +168,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mris_volmask --label_left_ribbon 3 --label_left_white 2 --label_right_ribbon 42 --label_right_white 41 --save_ribbon 10335 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -186,26 +176,16 @@ doctests: # '.mock()' method of the corresponding class is used instead. left_whitelabel: '2' # type=int|default=0: Left white matter label - left_ribbonlabel: '3' - # type=int|default=0: Left cortical ribbon label right_whitelabel: '41' # type=int|default=0: Right white matter label - right_ribbonlabel: '42' - # type=int|default=0: Right cortical ribbon label lh_pial: '"lh.pial"' # type=file|default=: Implicit input left pial surface - rh_pial: '"lh.pial"' - # type=file|default=: Implicit input right pial surface lh_white: '"lh.pial"' # type=file|default=: Implicit input left white matter surface - rh_white: '"lh.pial"' - # type=file|default=: Implicit input right white matter surface subject_id: '"10335"' # type=string|default='subject_id': Subject being processed - save_ribbon: 'True' - # type=bool|default=False: option to save just the ribbon for the hemispheres in the format ?h.ribbon.mgz imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/freesurfer/watershed_skull_strip.yaml b/example-specs/interface/nipype/freesurfer/watershed_skull_strip.yaml index c5ff31ff..0d97e979 100644 --- a/example-specs/interface/nipype/freesurfer/watershed_skull_strip.yaml +++ b/example-specs/interface/nipype/freesurfer/watershed_skull_strip.yaml @@ -6,29 +6,29 @@ # Docs # ---- # This program strips skull and other outer non-brain tissue and -# produces the brain volume from T1 volume or the scanned volume. +# produces the brain volume from T1 volume or the scanned volume. # -# The "watershed" segmentation algorithm was used to determine the -# intensity values for white matter, grey matter, and CSF. -# A force field was then used to fit a spherical surface to the brain. -# The shape of the surface fit was then evaluated against a previously -# derived template. +# The "watershed" segmentation algorithm was used to determine the +# intensity values for white matter, grey matter, and CSF. +# A force field was then used to fit a spherical surface to the brain. +# The shape of the surface fit was then evaluated against a previously +# derived template. # -# The default parameters are: -w 0.82 -b 0.32 -h 10 -seedpt -ta -wta +# The default parameters are: -w 0.82 -b 0.32 -h 10 -seedpt -ta -wta # -# (Segonne 2004) +# (Segonne 2004) +# +# Examples +# ======== +# >>> from nipype.interfaces.freesurfer import WatershedSkullStrip +# >>> skullstrip = WatershedSkullStrip() +# >>> skullstrip.inputs.in_file = "T1.mgz" +# >>> skullstrip.inputs.t1 = True +# >>> skullstrip.inputs.transform = "transforms/talairach_with_skull.lta" +# >>> skullstrip.inputs.out_file = "brainmask.auto.mgz" +# >>> skullstrip.cmdline +# 'mri_watershed -T1 transforms/talairach_with_skull.lta T1.mgz brainmask.auto.mgz' # -# Examples -# ======== -# >>> from nipype.interfaces.freesurfer import WatershedSkullStrip -# >>> skullstrip = WatershedSkullStrip() -# >>> skullstrip.inputs.in_file = "T1.mgz" -# >>> skullstrip.inputs.t1 = True -# >>> skullstrip.inputs.transform = "transforms/talairach_with_skull.lta" -# >>> skullstrip.inputs.out_file = "brainmask.auto.mgz" -# >>> skullstrip.cmdline -# 'mri_watershed -T1 transforms/talairach_with_skull.lta T1.mgz brainmask.auto.mgz' -# task_name: WatershedSkullStrip nipype_name: WatershedSkullStrip nipype_module: nipype.interfaces.freesurfer.preprocess @@ -47,12 +47,9 @@ inputs: # type=file|default=: in_file: medimage/mgh-gz # type=file|default=: input volume - out_file: Path - # type=file: skull stripped brain volume - # type=file|default='brainmask.auto.mgz': output volume subjects_dir: generic/directory # type=directory|default=: subjects directory - transform: medimage-freesurfer/lta + transform: fileformats.medimage_freesurfer.Lta # type=file|default=: undocumented callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -70,14 +67,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/mgh-gz + out_file: generic/file # type=file: skull stripped brain volume # type=file|default='brainmask.auto.mgz': output volume callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -102,7 +99,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -121,15 +118,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input volume - t1: 'True' - # type=bool|default=False: specify T1 input volume (T1 grey value = 110) transform: # type=file|default=: undocumented - out_file: '"brainmask.auto.mgz"' - # type=file: skull stripped brain volume - # type=file|default='brainmask.auto.mgz': output volume imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -144,7 +136,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mri_watershed -T1 transforms/talairach_with_skull.lta T1.mgz brainmask.auto.mgz +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -152,15 +144,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"T1.mgz"' # type=file|default=: input volume - t1: 'True' - # type=bool|default=False: specify T1 input volume (T1 grey value = 110) transform: '"transforms/talairach_with_skull.lta"' # type=file|default=: undocumented - out_file: '"brainmask.auto.mgz"' - # type=file: skull stripped brain volume - # type=file|default='brainmask.auto.mgz': output volume imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/accuracy_tester.yaml b/example-specs/interface/nipype/fsl/accuracy_tester.yaml index 54388d14..11126e7a 100644 --- a/example-specs/interface/nipype/fsl/accuracy_tester.yaml +++ b/example-specs/interface/nipype/fsl/accuracy_tester.yaml @@ -6,9 +6,9 @@ # Docs # ---- # -# Test the accuracy of an existing training dataset on a set of hand-labelled subjects. -# Note: This may or may not be working. Couldn't presently not confirm because fix fails on this (even outside of nipype) without leaving an error msg. -# +# Test the accuracy of an existing training dataset on a set of hand-labelled subjects. +# Note: This may or may not be working. Couldn't presently not confirm because fix fails on this (even outside of nipype) without leaving an error msg. +# task_name: AccuracyTester nipype_name: AccuracyTester nipype_module: nipype.interfaces.fsl.fix @@ -25,9 +25,6 @@ inputs: # passed to the field in the automatically generated unittests. mel_icas: generic/directory+list-of # type=inputmultiobject|default=[]: Melodic output directories - output_directory: Path - # type=directory: Path to folder in which to store the results of the accuracy test. - # type=directory|default=: Path to folder in which to store the results of the accuracy test. trained_wts_file: generic/file # type=file|default=: trained-weights file callable_defaults: @@ -53,7 +50,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -72,7 +69,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/apply_mask.yaml b/example-specs/interface/nipype/fsl/apply_mask.yaml index 53f7d018..cbef55d8 100644 --- a/example-specs/interface/nipype/fsl/apply_mask.yaml +++ b/example-specs/interface/nipype/fsl/apply_mask.yaml @@ -24,9 +24,6 @@ inputs: # type=file|default=: image to operate on mask_file: generic/file # type=file|default=: binary image defining mask space - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -50,7 +47,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -74,13 +71,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/apply_topup.yaml b/example-specs/interface/nipype/fsl/apply_topup.yaml index 07ed94f2..4e5efe88 100644 --- a/example-specs/interface/nipype/fsl/apply_topup.yaml +++ b/example-specs/interface/nipype/fsl/apply_topup.yaml @@ -6,29 +6,29 @@ # Docs # ---- # -# Interface for FSL topup, a tool for estimating and correcting -# susceptibility induced distortions. -# `General reference -# `_ -# and `use example -# `_. +# Interface for FSL topup, a tool for estimating and correcting +# susceptibility induced distortions. +# `General reference +# `_ +# and `use example +# `_. # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import ApplyTOPUP +# >>> applytopup = ApplyTOPUP() +# >>> applytopup.inputs.in_files = ["epi.nii", "epi_rev.nii"] +# >>> applytopup.inputs.encoding_file = "topup_encoding.txt" +# >>> applytopup.inputs.in_topup_fieldcoef = "topup_fieldcoef.nii.gz" +# >>> applytopup.inputs.in_topup_movpar = "topup_movpar.txt" +# >>> applytopup.inputs.output_type = "NIFTI_GZ" +# >>> applytopup.cmdline # doctest: +ELLIPSIS +# 'applytopup --datain=topup_encoding.txt --imain=epi.nii,epi_rev.nii --inindex=1,2 --topup=topup --out=epi_corrected.nii.gz' +# >>> res = applytopup.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import ApplyTOPUP -# >>> applytopup = ApplyTOPUP() -# >>> applytopup.inputs.in_files = ["epi.nii", "epi_rev.nii"] -# >>> applytopup.inputs.encoding_file = "topup_encoding.txt" -# >>> applytopup.inputs.in_topup_fieldcoef = "topup_fieldcoef.nii.gz" -# >>> applytopup.inputs.in_topup_movpar = "topup_movpar.txt" -# >>> applytopup.inputs.output_type = "NIFTI_GZ" -# >>> applytopup.cmdline # doctest: +ELLIPSIS -# 'applytopup --datain=topup_encoding.txt --imain=epi.nii,epi_rev.nii --inindex=1,2 --topup=topup --out=epi_corrected.nii.gz' -# >>> res = applytopup.run() # doctest: +SKIP # -# task_name: ApplyTOPUP nipype_name: ApplyTOPUP nipype_module: nipype.interfaces.fsl.epi @@ -43,17 +43,14 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - encoding_file: text/text-file + encoding_file: generic/file # type=file|default=: name of text file with PE directions/times in_files: medimage/nifti1+list-of # type=inputmultiobject|default=[]: name of file with images in_topup_fieldcoef: medimage/nifti-gz # type=file|default=: topup file containing the field coefficients - in_topup_movpar: text/text-file + in_topup_movpar: generic/file # type=file|default=: topup movpar.txt file - out_corrected: Path - # type=file: name of 4D image file with unwarped images - # type=file|default=: output (warped) image callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -77,7 +74,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -104,13 +101,13 @@ tests: datatype: # type=enum|default='char'|allowed['char','double','float','int','short']: force output data type output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -129,16 +126,12 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=inputmultiobject|default=[]: name of file with images - encoding_file: - # type=file|default=: name of text file with PE directions/times in_topup_fieldcoef: # type=file|default=: topup file containing the field coefficients - in_topup_movpar: - # type=file|default=: topup movpar.txt file output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -161,16 +154,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["epi.nii", "epi_rev.nii"]' # type=inputmultiobject|default=[]: name of file with images - encoding_file: '"topup_encoding.txt"' - # type=file|default=: name of text file with PE directions/times in_topup_fieldcoef: '"topup_fieldcoef.nii.gz"' # type=file|default=: topup file containing the field coefficients - in_topup_movpar: '"topup_movpar.txt"' - # type=file|default=: topup movpar.txt file output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/apply_warp.yaml b/example-specs/interface/nipype/fsl/apply_warp.yaml index 08f921ef..dd2acaf1 100644 --- a/example-specs/interface/nipype/fsl/apply_warp.yaml +++ b/example-specs/interface/nipype/fsl/apply_warp.yaml @@ -7,18 +7,18 @@ # ---- # FSL's applywarp wrapper to apply the results of a FNIRT registration # -# Examples -# -------- -# >>> from nipype.interfaces import fsl -# >>> from nipype.testing import example_data -# >>> aw = fsl.ApplyWarp() -# >>> aw.inputs.in_file = example_data('structural.nii') -# >>> aw.inputs.ref_file = example_data('mni.nii') -# >>> aw.inputs.field_file = 'my_coefficients_filed.nii' #doctest: +SKIP -# >>> res = aw.run() #doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import fsl +# >>> from nipype.testing import example_data +# >>> aw = fsl.ApplyWarp() +# >>> aw.inputs.in_file = example_data('structural.nii') +# >>> aw.inputs.ref_file = example_data('mni.nii') +# >>> aw.inputs.field_file = 'my_coefficients_filed.nii' #doctest: +SKIP +# >>> res = aw.run() #doctest: +SKIP +# # # -# task_name: ApplyWarp nipype_name: ApplyWarp nipype_module: nipype.interfaces.fsl.preprocess @@ -39,9 +39,6 @@ inputs: # type=file|default=: image to be warped mask_file: generic/file # type=file|default=: filename for mask image (in reference space) - out_file: Path - # type=file: Warped output file - # type=file|default=: output filename postmat: generic/file # type=file|default=: filename for post-transform (affine matrix) premat: generic/file @@ -71,7 +68,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: Warped output file # type=file|default=: output filename @@ -109,13 +106,13 @@ tests: interp: # type=enum|default='nn'|allowed['nn','sinc','spline','trilinear']: interpolation method output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/apply_xfm.yaml b/example-specs/interface/nipype/fsl/apply_xfm.yaml index ae9ca242..36272152 100644 --- a/example-specs/interface/nipype/fsl/apply_xfm.yaml +++ b/example-specs/interface/nipype/fsl/apply_xfm.yaml @@ -6,25 +6,25 @@ # Docs # ---- # Currently just a light wrapper around FLIRT, -# with no modifications +# with no modifications # -# ApplyXFM is used to apply an existing transform to an image +# ApplyXFM is used to apply an existing transform to an image # # -# Examples -# -------- +# Examples +# -------- +# +# >>> import nipype.interfaces.fsl as fsl +# >>> from nipype.testing import example_data +# >>> applyxfm = fsl.preprocess.ApplyXFM() +# >>> applyxfm.inputs.in_file = example_data('structural.nii') +# >>> applyxfm.inputs.in_matrix_file = example_data('trans.mat') +# >>> applyxfm.inputs.out_file = 'newfile.nii' +# >>> applyxfm.inputs.reference = example_data('mni.nii') +# >>> applyxfm.inputs.apply_xfm = True +# >>> result = applyxfm.run() # doctest: +SKIP # -# >>> import nipype.interfaces.fsl as fsl -# >>> from nipype.testing import example_data -# >>> applyxfm = fsl.preprocess.ApplyXFM() -# >>> applyxfm.inputs.in_file = example_data('structural.nii') -# >>> applyxfm.inputs.in_matrix_file = example_data('trans.mat') -# >>> applyxfm.inputs.out_file = 'newfile.nii' -# >>> applyxfm.inputs.reference = example_data('mni.nii') -# >>> applyxfm.inputs.apply_xfm = True -# >>> result = applyxfm.run() # doctest: +SKIP # -# task_name: ApplyXFM nipype_name: ApplyXFM nipype_module: nipype.interfaces.fsl.preprocess @@ -49,15 +49,6 @@ inputs: # type=file|default=: input 4x4 affine matrix in_weight: generic/file # type=file|default=: File for input weighting volume - out_file: Path - # type=file: path/name of registered file (if generated) - # type=file|default=: registered output file - out_log: Path - # type=file: path/name of output log (if generated) - # type=file|default=: output log - out_matrix_file: Path - # type=file: path/name of calculated affine transform (if generated) - # type=file|default=: output affine matrix in 4x4 asciii format ref_weight: generic/file # type=file|default=: File for reference weighting volume reference: generic/file @@ -99,7 +90,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -204,13 +195,13 @@ tests: bbrslope: # type=float|default=0.0: value of bbr slope output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/ar1_image.yaml b/example-specs/interface/nipype/fsl/ar1_image.yaml index 722dc9a2..290082ad 100644 --- a/example-specs/interface/nipype/fsl/ar1_image.yaml +++ b/example-specs/interface/nipype/fsl/ar1_image.yaml @@ -6,9 +6,9 @@ # Docs # ---- # Use fslmaths to generate an AR1 coefficient image across a -# given dimension. (Should use -odt float and probably demean first) +# given dimension. (Should use -odt float and probably demean first) +# # -# task_name: AR1Image nipype_name: AR1Image nipype_module: nipype.interfaces.fsl.maths @@ -25,9 +25,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -51,7 +48,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -62,7 +59,7 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) dimension: - # type=enum|default='T'|allowed['T','X','Y','Z']: dimension to find AR(1) coefficientacross + # type=enum|default='T'|allowed['T','X','Y','Z']: dimension to find AR(1) coefficient across in_file: # type=file|default=: image to operate on out_file: @@ -75,13 +72,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/av_scale.yaml b/example-specs/interface/nipype/fsl/av_scale.yaml index ca203626..db573883 100644 --- a/example-specs/interface/nipype/fsl/av_scale.yaml +++ b/example-specs/interface/nipype/fsl/av_scale.yaml @@ -7,15 +7,15 @@ # ---- # Use FSL avscale command to extract info from mat file output of FLIRT # -# Examples -# -------- +# Examples +# -------- +# +# >>> avscale = AvScale() +# >>> avscale.inputs.mat_file = 'flirt.mat' +# >>> res = avscale.run() # doctest: +SKIP # -# >>> avscale = AvScale() -# >>> avscale.inputs.mat_file = 'flirt.mat' -# >>> res = avscale.run() # doctest: +SKIP # # -# task_name: AvScale nipype_name: AvScale nipype_module: nipype.interfaces.fsl.utils @@ -60,7 +60,7 @@ outputs: left_right_orientation_preserved: left_right_orientation_preserved_callable # type=bool: True if LR orientation preserved templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -78,7 +78,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/b0_calc.yaml b/example-specs/interface/nipype/fsl/b0_calc.yaml index a277b112..bb96fb95 100644 --- a/example-specs/interface/nipype/fsl/b0_calc.yaml +++ b/example-specs/interface/nipype/fsl/b0_calc.yaml @@ -6,23 +6,23 @@ # Docs # ---- # -# B0 inhomogeneities occur at interfaces of materials with different magnetic susceptibilities, -# such as tissue-air interfaces. These differences lead to distortion in the local magnetic field, -# as Maxwell’s equations need to be satisfied. An example of B0 inhomogneity is the first volume -# of the 4D volume ```$FSLDIR/data/possum/b0_ppm.nii.gz```. +# B0 inhomogeneities occur at interfaces of materials with different magnetic susceptibilities, +# such as tissue-air interfaces. These differences lead to distortion in the local magnetic field, +# as Maxwell’s equations need to be satisfied. An example of B0 inhomogneity is the first volume +# of the 4D volume ```$FSLDIR/data/possum/b0_ppm.nii.gz```. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import B0Calc +# >>> b0calc = B0Calc() +# >>> b0calc.inputs.in_file = 'tissue+air_map.nii' +# >>> b0calc.inputs.z_b0 = 3.0 +# >>> b0calc.inputs.output_type = "NIFTI_GZ" +# >>> b0calc.cmdline +# 'b0calc -i tissue+air_map.nii -o tissue+air_map_b0field.nii.gz --chi0=4.000000e-07 -d -9.450000e-06 --extendboundary=1.00 --b0x=0.00 --gx=0.0000 --b0y=0.00 --gy=0.0000 --b0=3.00 --gz=0.0000' # -# >>> from nipype.interfaces.fsl import B0Calc -# >>> b0calc = B0Calc() -# >>> b0calc.inputs.in_file = 'tissue+air_map.nii' -# >>> b0calc.inputs.z_b0 = 3.0 -# >>> b0calc.inputs.output_type = "NIFTI_GZ" -# >>> b0calc.cmdline -# 'b0calc -i tissue+air_map.nii -o tissue+air_map_b0field.nii.gz --chi0=4.000000e-07 -d -9.450000e-06 --extendboundary=1.00 --b0x=0.00 --gx=0.0000 --b0y=0.00 --gy=0.0000 --b0=3.00 --gz=0.0000' # -# task_name: B0Calc nipype_name: B0Calc nipype_module: nipype.interfaces.fsl.possum @@ -39,9 +39,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: filename of input image (usually a tissue/air segmentation) - out_file: Path - # type=file: filename of B0 output volume - # type=file|default=: filename of B0 output volume callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -65,7 +62,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -102,13 +99,13 @@ tests: directconv: # type=bool|default=False: use direct (image space) convolution, not FFT output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -127,12 +124,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: filename of input image (usually a tissue/air segmentation) - z_b0: '3.0' - # type=float|default=1.0: Value for zeroth-order b0 field (z-component), in Tesla output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -155,12 +150,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"tissue+air_map.nii"' # type=file|default=: filename of input image (usually a tissue/air segmentation) - z_b0: '3.0' - # type=float|default=1.0: Value for zeroth-order b0 field (z-component), in Tesla output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/bedpostx5.yaml b/example-specs/interface/nipype/fsl/bedpostx5.yaml index 8708280e..67fb2324 100644 --- a/example-specs/interface/nipype/fsl/bedpostx5.yaml +++ b/example-specs/interface/nipype/fsl/bedpostx5.yaml @@ -6,29 +6,29 @@ # Docs # ---- # -# BEDPOSTX stands for Bayesian Estimation of Diffusion Parameters Obtained -# using Sampling Techniques. The X stands for modelling Crossing Fibres. -# bedpostx runs Markov Chain Monte Carlo sampling to build up distributions -# on diffusion parameters at each voxel. It creates all the files necessary -# for running probabilistic tractography. For an overview of the modelling -# carried out within bedpostx see this `technical report -# `_. +# BEDPOSTX stands for Bayesian Estimation of Diffusion Parameters Obtained +# using Sampling Techniques. The X stands for modelling Crossing Fibres. +# bedpostx runs Markov Chain Monte Carlo sampling to build up distributions +# on diffusion parameters at each voxel. It creates all the files necessary +# for running probabilistic tractography. For an overview of the modelling +# carried out within bedpostx see this `technical report +# `_. # # -# .. note:: Consider using -# :func:`niflow.nipype1.workflows.fsl.dmri.create_bedpostx_pipeline` instead. +# .. note:: Consider using +# :func:`niflow.nipype1.workflows.fsl.dmri.create_bedpostx_pipeline` instead. # # -# Example -# ------- +# Example +# ------- +# +# >>> from nipype.interfaces import fsl +# >>> bedp = fsl.BEDPOSTX5(bvecs='bvecs', bvals='bvals', dwi='diffusion.nii', +# ... mask='mask.nii', n_fibres=1) +# >>> bedp.cmdline +# 'bedpostx bedpostx -b 0 --burnin_noard=0 --forcedir -n 1 -j 5000 -s 1 --updateproposalevery=40' # -# >>> from nipype.interfaces import fsl -# >>> bedp = fsl.BEDPOSTX5(bvecs='bvecs', bvals='bvals', dwi='diffusion.nii', -# ... mask='mask.nii', n_fibres=1) -# >>> bedp.cmdline -# 'bedpostx bedpostx -b 0 --burnin_noard=0 --forcedir -n 1 -j 5000 -s 1 --updateproposalevery=40' # -# task_name: BEDPOSTX5 nipype_name: BEDPOSTX5 nipype_module: nipype.interfaces.fsl.dti @@ -78,7 +78,7 @@ outputs: dyads_dispersion: generic/file+list-of # type=outputmultiobject: Dispersion mean_S0samples: generic/file - # type=file: Mean of distribution on T2wbaseline signal intensity S0 + # type=file: Mean of distribution on T2w baseline signal intensity S0 mean_dsamples: generic/file # type=file: Mean of distribution on diffusivity d mean_fsamples: generic/file+list-of @@ -97,7 +97,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -159,13 +159,13 @@ tests: force_dir: # type=bool|default=True: use the actual directory name given (do not add + to make a new directory) output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -193,7 +193,7 @@ tests: n_fibres: '1' # type=range|default=2: Maximum number of fibres to fit in each voxel imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -225,7 +225,7 @@ doctests: n_fibres: '1' # type=range|default=2: Maximum number of fibres to fit in each voxel imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/bet.yaml b/example-specs/interface/nipype/fsl/bet.yaml index 9067cd21..2efca0cf 100644 --- a/example-specs/interface/nipype/fsl/bet.yaml +++ b/example-specs/interface/nipype/fsl/bet.yaml @@ -7,21 +7,21 @@ # ---- # FSL BET wrapper for skull stripping # -# For complete details, see the `BET Documentation. -# `_ +# For complete details, see the `BET Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import fsl +# >>> btr = fsl.BET() +# >>> btr.inputs.in_file = 'structural.nii' +# >>> btr.inputs.frac = 0.7 +# >>> btr.inputs.out_file = 'brain_anat.nii' +# >>> btr.cmdline +# 'bet structural.nii brain_anat.nii -f 0.70' +# >>> res = btr.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import fsl -# >>> btr = fsl.BET() -# >>> btr.inputs.in_file = 'structural.nii' -# >>> btr.inputs.frac = 0.7 -# >>> btr.inputs.out_file = 'brain_anat.nii' -# >>> btr.cmdline -# 'bet structural.nii brain_anat.nii -f 0.70' -# >>> res = btr.run() # doctest: +SKIP # -# task_name: BET nipype_name: BET nipype_module: nipype.interfaces.fsl.preprocess @@ -38,9 +38,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: input file to skull strip - out_file: Path - # type=file: path/name of skullstripped file (if generated) - # type=file|default=: name of output skull stripped image t2_guided: generic/file # type=file|default=: as with creating surfaces, when also feeding in non-brain-extracted T2 (includes registrations) callable_defaults: @@ -88,7 +85,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: '"brain_anat.nii"' # type=file: path/name of skullstripped file (if generated) # type=file|default=: name of output skull stripped image @@ -138,13 +135,13 @@ tests: reduce_bias: # type=bool|default=False: bias field and neck cleanup output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -163,13 +160,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file to skull strip - frac: '0.7' - # type=float|default=0.0: fractional intensity threshold out_file: '"brain_anat.nii"' # type=file: path/name of skullstripped file (if generated) # type=file|default=: name of output skull stripped image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -184,7 +179,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: bet structural.nii brain_anat.nii -f 0.70 +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -192,13 +187,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: input file to skull strip - frac: '0.7' - # type=float|default=0.0: fractional intensity threshold out_file: '"brain_anat.nii"' # type=file: path/name of skullstripped file (if generated) # type=file|default=: name of output skull stripped image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/binary_maths.yaml b/example-specs/interface/nipype/fsl/binary_maths.yaml index bd135d34..2cd16f6f 100644 --- a/example-specs/interface/nipype/fsl/binary_maths.yaml +++ b/example-specs/interface/nipype/fsl/binary_maths.yaml @@ -6,9 +6,9 @@ # Docs # ---- # Use fslmaths to perform mathematical operations using a second image or -# a numeric value. +# a numeric value. +# # -# task_name: BinaryMaths nipype_name: BinaryMaths nipype_module: nipype.interfaces.fsl.maths @@ -27,9 +27,6 @@ inputs: # type=file|default=: image to operate on operand_file: generic/file # type=file|default=: second image to perform operation with - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -53,7 +50,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -81,13 +78,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/change_data_type.yaml b/example-specs/interface/nipype/fsl/change_data_type.yaml index e1c6460d..c9bd805b 100644 --- a/example-specs/interface/nipype/fsl/change_data_type.yaml +++ b/example-specs/interface/nipype/fsl/change_data_type.yaml @@ -22,9 +22,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -48,7 +45,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -70,13 +67,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/classifier.yaml b/example-specs/interface/nipype/fsl/classifier.yaml index 2b42cb02..a6d7122f 100644 --- a/example-specs/interface/nipype/fsl/classifier.yaml +++ b/example-specs/interface/nipype/fsl/classifier.yaml @@ -6,8 +6,8 @@ # Docs # ---- # -# Classify ICA components using a specific training dataset ( is in the range 0-100, typically 5-20). -# +# Classify ICA components using a specific training dataset ( is in the range 0-100, typically 5-20). +# task_name: Classifier nipype_name: Classifier nipype_module: nipype.interfaces.fsl.fix @@ -22,9 +22,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - artifacts_list_file: Path - # type=file: Text file listing which ICs are artifacts; can be the output from classification or can be created manually - # type=file|default=: Text file listing which ICs are artifacts; can be the output from classification or can be created manually mel_ica: generic/directory # type=directory|default=: Melodic output directory or directories trained_wts_file: generic/file @@ -52,7 +49,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -73,7 +70,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/cleaner.yaml b/example-specs/interface/nipype/fsl/cleaner.yaml index 6be0935a..5a5eee46 100644 --- a/example-specs/interface/nipype/fsl/cleaner.yaml +++ b/example-specs/interface/nipype/fsl/cleaner.yaml @@ -6,8 +6,8 @@ # Docs # ---- # -# Extract features (for later training and/or classifying) -# +# Extract features (for later training and/or classifying) +# task_name: Cleaner nipype_name: Cleaner nipype_module: nipype.interfaces.fsl.fix @@ -52,7 +52,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -78,7 +78,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/cluster.yaml b/example-specs/interface/nipype/fsl/cluster.yaml index 0a2b28a1..d0ce81b8 100644 --- a/example-specs/interface/nipype/fsl/cluster.yaml +++ b/example-specs/interface/nipype/fsl/cluster.yaml @@ -7,18 +7,18 @@ # ---- # Uses FSL cluster to perform clustering on statistical output # -# Examples -# -------- +# Examples +# -------- +# +# >>> cl = Cluster() +# >>> cl.inputs.threshold = 2.3 +# >>> cl.inputs.in_file = 'zstat1.nii.gz' +# >>> cl.inputs.out_localmax_txt_file = 'stats.txt' +# >>> cl.inputs.use_mm = True +# >>> cl.cmdline +# 'cluster --in=zstat1.nii.gz --olmax=stats.txt --thresh=2.3000000000 --mm' # -# >>> cl = Cluster() -# >>> cl.inputs.threshold = 2.3 -# >>> cl.inputs.in_file = 'zstat1.nii.gz' -# >>> cl.inputs.out_localmax_txt_file = 'stats.txt' -# >>> cl.inputs.use_mm = True -# >>> cl.cmdline -# 'cluster --in=zstat1.nii.gz --olmax=stats.txt --thresh=2.3000000000 --mm' # -# task_name: Cluster nipype_name: Cluster nipype_module: nipype.interfaces.fsl.model @@ -35,12 +35,12 @@ inputs: # passed to the field in the automatically generated unittests. cope_file: generic/file # type=file|default=: cope volume - in_file: medimage/nifti-gz + in_file: generic/file # type=file|default=: input volume std_space_file: generic/file # type=file|default=: filename for standard-space volume warpfield_file: generic/file - # type=file|default=: file contining warpfield + # type=file|default=: file containing warpfield xfm_file: generic/file # type=file|default=: filename for Linear: input->standard-space transform. Non-linear: input->highres transform callable_defaults: @@ -79,7 +79,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -135,15 +135,15 @@ tests: num_maxima: # type=int|default=0: no of local maxima to report warpfield_file: - # type=file|default=: file contining warpfield + # type=file|default=: file containing warpfield output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -162,14 +162,10 @@ tests: # (if not specified, will try to choose a sensible value) threshold: '2.3' # type=float|default=0.0: threshold for input volume - in_file: - # type=file|default=: input volume out_localmax_txt_file: '"stats.txt"' # type=traitcompound|default=None: local maxima text file - use_mm: 'True' - # type=bool|default=False: use mm, not voxel, coordinates imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -192,14 +188,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. threshold: '2.3' # type=float|default=0.0: threshold for input volume - in_file: '"zstat1.nii.gz"' - # type=file|default=: input volume out_localmax_txt_file: '"stats.txt"' # type=traitcompound|default=None: local maxima text file - use_mm: 'True' - # type=bool|default=False: use mm, not voxel, coordinates imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/complex.yaml b/example-specs/interface/nipype/fsl/complex.yaml index 19104469..e770894b 100644 --- a/example-specs/interface/nipype/fsl/complex.yaml +++ b/example-specs/interface/nipype/fsl/complex.yaml @@ -7,16 +7,16 @@ # ---- # fslcomplex is a tool for converting complex data # -# Examples -# -------- +# Examples +# -------- +# +# >>> cplx = Complex() +# >>> cplx.inputs.complex_in_file = "complex.nii" +# >>> cplx.real_polar = True +# >>> res = cplx.run() # doctest: +SKIP # -# >>> cplx = Complex() -# >>> cplx.inputs.complex_in_file = "complex.nii" -# >>> cplx.real_polar = True -# >>> res = cplx.run() # doctest: +SKIP # # -# task_name: Complex nipype_name: Complex nipype_module: nipype.interfaces.fsl.utils @@ -35,29 +35,14 @@ inputs: # type=file|default=: complex_in_file2: generic/file # type=file|default=: - complex_out_file: Path - # type=file: - # type=file|default=: imaginary_in_file: generic/file # type=file|default=: - imaginary_out_file: Path - # type=file: - # type=file|default=: magnitude_in_file: generic/file # type=file|default=: - magnitude_out_file: Path - # type=file: - # type=file|default=: phase_in_file: generic/file # type=file|default=: - phase_out_file: Path - # type=file: - # type=file|default=: real_in_file: generic/file # type=file|default=: - real_out_file: Path - # type=file: - # type=file|default=: callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -93,7 +78,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields complex_out_file: complex_out_file # type=file: # type=file|default=: @@ -159,13 +144,13 @@ tests: complex_merge: # type=bool|default=False: output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/contrast_mgr.yaml b/example-specs/interface/nipype/fsl/contrast_mgr.yaml index a3f5071f..d4d78552 100644 --- a/example-specs/interface/nipype/fsl/contrast_mgr.yaml +++ b/example-specs/interface/nipype/fsl/contrast_mgr.yaml @@ -7,10 +7,10 @@ # ---- # Use FSL contrast_mgr command to evaluate contrasts # -# In interface mode this file assumes that all the required inputs are in the -# same location. This has deprecated for FSL versions 5.0.7+ as the necessary -# corrections file is no longer generated by FILMGLS. -# +# In interface mode this file assumes that all the required inputs are in the +# same location. This has deprecated for FSL versions 5.0.7+ as the necessary +# corrections file is no longer generated by FILMGLS. +# task_name: ContrastMgr nipype_name: ContrastMgr nipype_module: nipype.interfaces.fsl.model @@ -71,7 +71,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -95,13 +95,13 @@ tests: suffix: # type=str|default='': suffix to put on the end of the cope filename before the contrast number, default is nothing output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/convert_warp.yaml b/example-specs/interface/nipype/fsl/convert_warp.yaml index e4d553f6..dac91c50 100644 --- a/example-specs/interface/nipype/fsl/convert_warp.yaml +++ b/example-specs/interface/nipype/fsl/convert_warp.yaml @@ -6,24 +6,24 @@ # Docs # ---- # Use FSL `convertwarp `_ -# for combining multiple transforms into one. +# for combining multiple transforms into one. # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import ConvertWarp +# >>> warputils = ConvertWarp() +# >>> warputils.inputs.warp1 = "warpfield.nii" +# >>> warputils.inputs.reference = "T1.nii" +# >>> warputils.inputs.relwarp = True +# >>> warputils.inputs.output_type = "NIFTI_GZ" +# >>> warputils.cmdline # doctest: +ELLIPSIS +# 'convertwarp --ref=T1.nii --rel --warp1=warpfield.nii --out=T1_concatwarp.nii.gz' +# >>> res = warputils.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import ConvertWarp -# >>> warputils = ConvertWarp() -# >>> warputils.inputs.warp1 = "warpfield.nii" -# >>> warputils.inputs.reference = "T1.nii" -# >>> warputils.inputs.relwarp = True -# >>> warputils.inputs.output_type = "NIFTI_GZ" -# >>> warputils.cmdline # doctest: +ELLIPSIS -# 'convertwarp --ref=T1.nii --rel --warp1=warpfield.nii --out=T1_concatwarp.nii.gz' -# >>> res = warputils.run() # doctest: +SKIP # # -# task_name: ConvertWarp nipype_name: ConvertWarp nipype_module: nipype.interfaces.fsl.utils @@ -40,14 +40,11 @@ inputs: # passed to the field in the automatically generated unittests. midmat: generic/file # type=file|default=: Name of file containing mid-warp-affine transform - out_file: Path - # type=file: Name of output file, containing the warp as field or coefficients. - # type=file|default=: Name of output file, containing warps that are the combination of all those given as arguments. The format of this will be a field-file (rather than spline coefficients) with any affine components included. postmat: generic/file # type=file|default=: Name of file containing an affine transform (applied last). It could e.g. be an affine transform that maps the MNI152-space into a better approximation to the Talairach-space (if indeed there is one). premat: generic/file # type=file|default=: filename for pre-transform (affine matrix) - reference: medimage/nifti1 + reference: generic/file # type=file|default=: Name of a file in target space of the full transform. shift_in_file: generic/file # type=file|default=: Name of file containing a "shiftmap", a non-linear transform with displacements only in one direction (applied first, before premat). This would typically be a fieldmap that has been pre-processed using fugue that maps a subjects functional (EPI) data onto an undistorted space (i.e. a space that corresponds to his/her true anatomy). @@ -78,7 +75,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -119,13 +116,13 @@ tests: out_relwarp: # type=bool|default=False: If set it indicates that the warps in --out should be relative, i.e. the values in --out are displacements from the coordinates in --ref. output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -144,14 +141,10 @@ tests: # (if not specified, will try to choose a sensible value) warp1: # type=file|default=: Name of file containing initial warp-fields/coefficients (follows premat). This could e.g. be a fnirt-transform from a subjects structural scan to an average of a group of subjects. - reference: - # type=file|default=: Name of a file in target space of the full transform. relwarp: 'True' # type=bool|default=False: If set it indicates that the warps in --warp1/2 should be interpreted as relative. I.e. the values in --warp1/2 are displacements from the coordinates in the next space. - output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -174,14 +167,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. warp1: '"warpfield.nii"' # type=file|default=: Name of file containing initial warp-fields/coefficients (follows premat). This could e.g. be a fnirt-transform from a subjects structural scan to an average of a group of subjects. - reference: '"T1.nii"' - # type=file|default=: Name of a file in target space of the full transform. relwarp: 'True' # type=bool|default=False: If set it indicates that the warps in --warp1/2 should be interpreted as relative. I.e. the values in --warp1/2 are displacements from the coordinates in the next space. - output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/convert_xfm.yaml b/example-specs/interface/nipype/fsl/convert_xfm.yaml index 6cc22d8f..b1f6e353 100644 --- a/example-specs/interface/nipype/fsl/convert_xfm.yaml +++ b/example-specs/interface/nipype/fsl/convert_xfm.yaml @@ -7,19 +7,19 @@ # ---- # Use the FSL utility convert_xfm to modify FLIRT transformation matrices. # -# Examples -# -------- +# Examples +# -------- +# +# >>> import nipype.interfaces.fsl as fsl +# >>> invt = fsl.ConvertXFM() +# >>> invt.inputs.in_file = "flirt.mat" +# >>> invt.inputs.invert_xfm = True +# >>> invt.inputs.out_file = 'flirt_inv.mat' +# >>> invt.cmdline +# 'convert_xfm -omat flirt_inv.mat -inverse flirt.mat' # -# >>> import nipype.interfaces.fsl as fsl -# >>> invt = fsl.ConvertXFM() -# >>> invt.inputs.in_file = "flirt.mat" -# >>> invt.inputs.invert_xfm = True -# >>> invt.inputs.out_file = 'flirt_inv.mat' -# >>> invt.cmdline -# 'convert_xfm -omat flirt_inv.mat -inverse flirt.mat' # # -# task_name: ConvertXFM nipype_name: ConvertXFM nipype_module: nipype.interfaces.fsl.utils @@ -38,9 +38,6 @@ inputs: # type=file|default=: input transformation matrix in_file2: generic/file # type=file|default=: second input matrix (for use with fix_scale_skew or concat_xfm) - out_file: Path - # type=file: output transformation matrix - # type=file|default=: final transformation matrix callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -64,7 +61,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: '"flirt_inv.mat"' # type=file: output transformation matrix # type=file|default=: final transformation matrix @@ -88,13 +85,13 @@ tests: # type=file: output transformation matrix # type=file|default=: final transformation matrix output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -113,13 +110,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input transformation matrix - invert_xfm: 'True' - # type=bool|default=False: invert input transformation out_file: '"flirt_inv.mat"' # type=file: output transformation matrix # type=file|default=: final transformation matrix imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.interfaces.fsl as fsl expected_outputs: @@ -143,13 +138,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"flirt.mat"' # type=file|default=: input transformation matrix - invert_xfm: 'True' - # type=bool|default=False: invert input transformation out_file: '"flirt_inv.mat"' # type=file: output transformation matrix # type=file|default=: final transformation matrix imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/copy_geom.yaml b/example-specs/interface/nipype/fsl/copy_geom.yaml index 677859c6..44453174 100644 --- a/example-specs/interface/nipype/fsl/copy_geom.yaml +++ b/example-specs/interface/nipype/fsl/copy_geom.yaml @@ -6,13 +6,13 @@ # Docs # ---- # Use fslcpgeom to copy the header geometry information to another image. -# Copy certain parts of the header information (image dimensions, voxel -# dimensions, voxel dimensions units string, image orientation/origin or -# qform/sform info) from one image to another. Note that only copies from -# Analyze to Analyze or Nifti to Nifti will work properly. Copying from -# different files will result in loss of information or potentially incorrect -# settings. -# +# Copy certain parts of the header information (image dimensions, voxel +# dimensions, voxel dimensions units string, image orientation/origin or +# qform/sform info) from one image to another. Note that only copies from +# Analyze to Analyze or Nifti to Nifti will work properly. Copying from +# different files will result in loss of information or potentially incorrect +# settings. +# task_name: CopyGeom nipype_name: CopyGeom nipype_module: nipype.interfaces.fsl.utils @@ -53,7 +53,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -67,13 +67,13 @@ tests: ignore_dims: # type=bool|default=False: Do not copy image dimensions output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/dilate_image.yaml b/example-specs/interface/nipype/fsl/dilate_image.yaml index 5f29bf3c..e135ccbd 100644 --- a/example-specs/interface/nipype/fsl/dilate_image.yaml +++ b/example-specs/interface/nipype/fsl/dilate_image.yaml @@ -24,9 +24,6 @@ inputs: # type=file|default=: image to operate on kernel_file: generic/file # type=file|default=: use external file for kernel - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -50,7 +47,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -80,13 +77,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/distance_map.yaml b/example-specs/interface/nipype/fsl/distance_map.yaml index d027bbdb..ccb3a66b 100644 --- a/example-specs/interface/nipype/fsl/distance_map.yaml +++ b/example-specs/interface/nipype/fsl/distance_map.yaml @@ -6,17 +6,17 @@ # Docs # ---- # Use FSL's distancemap to generate a map of the distance to the nearest -# nonzero voxel. +# nonzero voxel. # -# Example -# ------- +# Example +# ------- +# +# >>> import nipype.interfaces.fsl as fsl +# >>> mapper = fsl.DistanceMap() +# >>> mapper.inputs.in_file = "skeleton_mask.nii.gz" +# >>> mapper.run() # doctest: +SKIP # -# >>> import nipype.interfaces.fsl as fsl -# >>> mapper = fsl.DistanceMap() -# >>> mapper.inputs.in_file = "skeleton_mask.nii.gz" -# >>> mapper.run() # doctest: +SKIP # -# task_name: DistanceMap nipype_name: DistanceMap nipype_module: nipype.interfaces.fsl.dti @@ -31,9 +31,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - distance_map: Path - # type=file: value is distance to nearest nonzero voxels - # type=file|default=: distance map to write in_file: generic/file # type=file|default=: image to calculate distance values for mask_file: generic/file @@ -64,7 +61,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields distance_map: distance_map # type=file: value is distance to nearest nonzero voxels # type=file|default=: distance map to write @@ -87,13 +84,13 @@ tests: # type=file: value is distance to nearest nonzero voxels # type=file|default=: distance map to write output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/dti_fit.yaml b/example-specs/interface/nipype/fsl/dti_fit.yaml index 19a0a2e4..d89462d7 100644 --- a/example-specs/interface/nipype/fsl/dti_fit.yaml +++ b/example-specs/interface/nipype/fsl/dti_fit.yaml @@ -6,22 +6,22 @@ # Docs # ---- # Use FSL dtifit command for fitting a diffusion tensor model at each -# voxel +# voxel # -# Example -# ------- +# Example +# ------- +# +# >>> from nipype.interfaces import fsl +# >>> dti = fsl.DTIFit() +# >>> dti.inputs.dwi = 'diffusion.nii' +# >>> dti.inputs.bvecs = 'bvecs' +# >>> dti.inputs.bvals = 'bvals' +# >>> dti.inputs.base_name = 'TP' +# >>> dti.inputs.mask = 'mask.nii' +# >>> dti.cmdline +# 'dtifit -k diffusion.nii -o TP -m mask.nii -r bvecs -b bvals' # -# >>> from nipype.interfaces import fsl -# >>> dti = fsl.DTIFit() -# >>> dti.inputs.dwi = 'diffusion.nii' -# >>> dti.inputs.bvecs = 'bvecs' -# >>> dti.inputs.bvals = 'bvals' -# >>> dti.inputs.base_name = 'TP' -# >>> dti.inputs.mask = 'mask.nii' -# >>> dti.cmdline -# 'dtifit -k diffusion.nii -o TP -m mask.nii -r bvecs -b bvals' # -# task_name: DTIFit nipype_name: DTIFit nipype_module: nipype.interfaces.fsl.dti @@ -38,7 +38,7 @@ inputs: # passed to the field in the automatically generated unittests. bvals: medimage/bval # type=file|default=: b values file - bvecs: medimage/bvec + bvecs: generic/file # type=file|default=: b vectors file cni: generic/file # type=file|default=: input counfound regressors @@ -93,7 +93,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -134,13 +134,13 @@ tests: gradnonlin: # type=file|default=: gradient non linearities output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -159,16 +159,12 @@ tests: # (if not specified, will try to choose a sensible value) dwi: # type=file|default=: diffusion weighted image data file - bvecs: - # type=file|default=: b vectors file bvals: # type=file|default=: b values file - base_name: '"TP"' - # type=str|default='dtifit_': base_name that all output files will start with mask: # type=file|default=: bet binary mask file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -191,16 +187,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. dwi: '"diffusion.nii"' # type=file|default=: diffusion weighted image data file - bvecs: '"bvecs"' - # type=file|default=: b vectors file bvals: '"bvals"' # type=file|default=: b values file - base_name: '"TP"' - # type=str|default='dtifit_': base_name that all output files will start with mask: '"mask.nii"' # type=file|default=: bet binary mask file imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/dual_regression.yaml b/example-specs/interface/nipype/fsl/dual_regression.yaml index 06b866e5..dc1978c0 100644 --- a/example-specs/interface/nipype/fsl/dual_regression.yaml +++ b/example-specs/interface/nipype/fsl/dual_regression.yaml @@ -7,21 +7,21 @@ # ---- # Wrapper Script for Dual Regression Workflow # -# Examples -# -------- +# Examples +# -------- +# +# >>> dual_regression = DualRegression() +# >>> dual_regression.inputs.in_files = ["functional.nii", "functional2.nii", "functional3.nii"] +# >>> dual_regression.inputs.group_IC_maps_4D = "allFA.nii" +# >>> dual_regression.inputs.des_norm = False +# >>> dual_regression.inputs.one_sample_group_mean = True +# >>> dual_regression.inputs.n_perm = 10 +# >>> dual_regression.inputs.out_dir = "my_output_directory" +# >>> dual_regression.cmdline +# 'dual_regression allFA.nii 0 -1 10 my_output_directory functional.nii functional2.nii functional3.nii' +# >>> dual_regression.run() # doctest: +SKIP # -# >>> dual_regression = DualRegression() -# >>> dual_regression.inputs.in_files = ["functional.nii", "functional2.nii", "functional3.nii"] -# >>> dual_regression.inputs.group_IC_maps_4D = "allFA.nii" -# >>> dual_regression.inputs.des_norm = False -# >>> dual_regression.inputs.one_sample_group_mean = True -# >>> dual_regression.inputs.n_perm = 10 -# >>> dual_regression.inputs.out_dir = "my_output_directory" -# >>> dual_regression.cmdline -# 'dual_regression allFA.nii 0 -1 10 my_output_directory functional.nii functional2.nii functional3.nii' -# >>> dual_regression.run() # doctest: +SKIP # -# task_name: DualRegression nipype_name: DualRegression nipype_module: nipype.interfaces.fsl.model @@ -40,13 +40,10 @@ inputs: # type=file|default=: Design contrasts for final cross-subject modelling with randomise design_file: generic/file # type=file|default=: Design matrix for final cross-subject modelling with randomise - group_IC_maps_4D: medimage/nifti1 + group_IC_maps_4D: generic/file # type=file|default=: 4D image containing spatial IC maps (melodic_IC) from the whole-group ICA analysis in_files: medimage/nifti1+list-of # type=inputmultiobject|default=[]: List all subjects' preprocessed, standard-space 4D datasets - out_dir: Path - # type=directory: - # type=directory|default='output': This directory will be created to hold all output and logfiles callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -70,8 +67,8 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - out_dir: '"my_output_directory"' + # dict[str, str] - `path_template` values to be provided to output fields + out_dir: out_dir # type=directory: # type=directory|default='output': This directory will be created to hold all output and logfiles requirements: @@ -98,13 +95,13 @@ tests: # type=directory: # type=directory|default='output': This directory will be created to hold all output and logfiles output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -123,19 +120,12 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=inputmultiobject|default=[]: List all subjects' preprocessed, standard-space 4D datasets - group_IC_maps_4D: - # type=file|default=: 4D image containing spatial IC maps (melodic_IC) from the whole-group ICA analysis des_norm: 'False' # type=bool|default=True: Whether to variance-normalise the timecourses used as the stage-2 regressors; True is default and recommended - one_sample_group_mean: 'True' - # type=bool|default=False: perform 1-sample group-mean test instead of generic permutation test n_perm: '10' # type=int|default=0: Number of permutations for randomise; set to 1 for just raw tstat output, set to 0 to not run randomise at all. - out_dir: '"my_output_directory"' - # type=directory: - # type=directory|default='output': This directory will be created to hold all output and logfiles imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -158,19 +148,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["functional.nii", "functional2.nii", "functional3.nii"]' # type=inputmultiobject|default=[]: List all subjects' preprocessed, standard-space 4D datasets - group_IC_maps_4D: '"allFA.nii"' - # type=file|default=: 4D image containing spatial IC maps (melodic_IC) from the whole-group ICA analysis des_norm: 'False' # type=bool|default=True: Whether to variance-normalise the timecourses used as the stage-2 regressors; True is default and recommended - one_sample_group_mean: 'True' - # type=bool|default=False: perform 1-sample group-mean test instead of generic permutation test n_perm: '10' # type=int|default=0: Number of permutations for randomise; set to 1 for just raw tstat output, set to 0 to not run randomise at all. - out_dir: '"my_output_directory"' - # type=directory: - # type=directory|default='output': This directory will be created to hold all output and logfiles imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/eddy.yaml b/example-specs/interface/nipype/fsl/eddy.yaml index 368ad6bc..fa2a097a 100644 --- a/example-specs/interface/nipype/fsl/eddy.yaml +++ b/example-specs/interface/nipype/fsl/eddy.yaml @@ -6,44 +6,44 @@ # Docs # ---- # -# Interface for FSL eddy, a tool for estimating and correcting eddy -# currents induced distortions. `User guide -# `__ and -# `more info regarding acqp file -# `_. +# Interface for FSL eddy, a tool for estimating and correcting eddy +# currents induced distortions. `User guide +# `__ and +# `more info regarding acqp file +# `_. # -# Examples -# -------- +# Examples +# -------- # -# >>> from nipype.interfaces.fsl import Eddy +# >>> from nipype.interfaces.fsl import Eddy # -# Running eddy on a CPU using OpenMP: -# >>> eddy = Eddy() -# >>> eddy.inputs.in_file = 'epi.nii' -# >>> eddy.inputs.in_mask = 'epi_mask.nii' -# >>> eddy.inputs.in_index = 'epi_index.txt' -# >>> eddy.inputs.in_acqp = 'epi_acqp.txt' -# >>> eddy.inputs.in_bvec = 'bvecs.scheme' -# >>> eddy.inputs.in_bval = 'bvals.scheme' -# >>> eddy.cmdline # doctest: +ELLIPSIS -# 'eddy_openmp --flm=quadratic --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --interp=spline --resamp=jac --niter=5 --nvoxhp=1000 --out=.../eddy_corrected --slm=none' +# Running eddy on a CPU using OpenMP: +# >>> eddy = Eddy() +# >>> eddy.inputs.in_file = 'epi.nii' +# >>> eddy.inputs.in_mask = 'epi_mask.nii' +# >>> eddy.inputs.in_index = 'epi_index.txt' +# >>> eddy.inputs.in_acqp = 'epi_acqp.txt' +# >>> eddy.inputs.in_bvec = 'bvecs.scheme' +# >>> eddy.inputs.in_bval = 'bvals.scheme' +# >>> eddy.cmdline # doctest: +ELLIPSIS +# 'eddy_openmp --flm=quadratic --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --interp=spline --resamp=jac --niter=5 --nvoxhp=1000 --out=.../eddy_corrected --slm=none' # -# Running eddy on an Nvidia GPU using cuda: -# >>> eddy.inputs.use_cuda = True -# >>> eddy.cmdline # doctest: +ELLIPSIS -# 'eddy_cuda --flm=quadratic --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --interp=spline --resamp=jac --niter=5 --nvoxhp=1000 --out=.../eddy_corrected --slm=none' +# Running eddy on an Nvidia GPU using cuda: +# >>> eddy.inputs.use_cuda = True +# >>> eddy.cmdline # doctest: +ELLIPSIS +# 'eddy_cuda --flm=quadratic --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --interp=spline --resamp=jac --niter=5 --nvoxhp=1000 --out=.../eddy_corrected --slm=none' +# +# Running eddy with slice-to-volume motion correction: +# >>> eddy.inputs.mporder = 6 +# >>> eddy.inputs.slice2vol_niter = 5 +# >>> eddy.inputs.slice2vol_lambda = 1 +# >>> eddy.inputs.slice2vol_interp = 'trilinear' +# >>> eddy.inputs.slice_order = 'epi_slspec.txt' +# >>> eddy.cmdline # doctest: +ELLIPSIS +# 'eddy_cuda --flm=quadratic --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --interp=spline --resamp=jac --mporder=6 --niter=5 --nvoxhp=1000 --out=.../eddy_corrected --s2v_interp=trilinear --s2v_lambda=1 --s2v_niter=5 --slspec=epi_slspec.txt --slm=none' +# >>> res = eddy.run() # doctest: +SKIP # -# Running eddy with slice-to-volume motion correction: -# >>> eddy.inputs.mporder = 6 -# >>> eddy.inputs.slice2vol_niter = 5 -# >>> eddy.inputs.slice2vol_lambda = 1 -# >>> eddy.inputs.slice2vol_interp = 'trilinear' -# >>> eddy.inputs.slice_order = 'epi_slspec.txt' -# >>> eddy.cmdline # doctest: +ELLIPSIS -# 'eddy_cuda --flm=quadratic --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --interp=spline --resamp=jac --mporder=6 --niter=5 --nvoxhp=1000 --out=.../eddy_corrected --s2v_interp=trilinear --s2v_lambda=1 --s2v_niter=5 --slspec=epi_slspec.txt --slm=none' -# >>> res = eddy.run() # doctest: +SKIP # -# task_name: Eddy nipype_name: Eddy nipype_module: nipype.interfaces.fsl.epi @@ -134,7 +134,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -236,13 +236,13 @@ tests: residuals: # type=bool|default=False: Output Residuals output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -264,7 +264,7 @@ tests: in_index: # type=file|default=: File containing indices for all volumes in --imain into --acqp and --topup imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -284,7 +284,7 @@ tests: use_cuda: 'True' # type=bool|default=False: Run eddy using cuda gpu imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -303,16 +303,12 @@ tests: # (if not specified, will try to choose a sensible value) mporder: '6' # type=int|default=0: Order of slice-to-vol movement model - slice2vol_niter: '5' - # type=int|default=0: Number of iterations for slice-to-vol slice2vol_lambda: '1' # type=int|default=0: Regularisation weight for slice-to-vol movement (reasonable range 1-10) - slice2vol_interp: '"trilinear"' - # type=enum|default='trilinear'|allowed['spline','trilinear']: Slice-to-vol interpolation model for estimation step slice_order: # type=file|default='': Name of text file completely specifying slice/group acquisition imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -338,11 +334,11 @@ doctests: in_index: '"epi_index.txt"' # type=file|default=: File containing indices for all volumes in --imain into --acqp and --topup imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: eddy_cuda --flm=quadratic --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --interp=spline --resamp=jac --niter=5 --nvoxhp=1000 --out=.../eddy_corrected --slm=none +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -351,11 +347,11 @@ doctests: use_cuda: 'True' # type=bool|default=False: Run eddy using cuda gpu imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS -- cmdline: eddy_cuda --flm=quadratic --ff=10.0 --acqp=epi_acqp.txt --bvals=bvals.scheme --bvecs=bvecs.scheme --imain=epi.nii --index=epi_index.txt --mask=epi_mask.nii --interp=spline --resamp=jac --mporder=6 --niter=5 --nvoxhp=1000 --out=.../eddy_corrected --s2v_interp=trilinear --s2v_lambda=1 --s2v_niter=5 --slspec=epi_slspec.txt --slm=none +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -363,16 +359,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. mporder: '6' # type=int|default=0: Order of slice-to-vol movement model - slice2vol_niter: '5' - # type=int|default=0: Number of iterations for slice-to-vol slice2vol_lambda: '1' # type=int|default=0: Regularisation weight for slice-to-vol movement (reasonable range 1-10) - slice2vol_interp: '"trilinear"' - # type=enum|default='trilinear'|allowed['spline','trilinear']: Slice-to-vol interpolation model for estimation step slice_order: '"epi_slspec.txt"' # type=file|default='': Name of text file completely specifying slice/group acquisition imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/eddy_correct.yaml b/example-specs/interface/nipype/fsl/eddy_correct.yaml index 6094f537..a03479dd 100644 --- a/example-specs/interface/nipype/fsl/eddy_correct.yaml +++ b/example-specs/interface/nipype/fsl/eddy_correct.yaml @@ -7,19 +7,19 @@ # ---- # # -# .. warning:: Deprecated in FSL. Please use -# :class:`nipype.interfaces.fsl.epi.Eddy` instead +# .. warning:: Deprecated in FSL. Please use +# :class:`nipype.interfaces.fsl.epi.Eddy` instead # -# Example -# ------- +# Example +# ------- +# +# >>> from nipype.interfaces.fsl import EddyCorrect +# >>> eddyc = EddyCorrect(in_file='diffusion.nii', +# ... out_file="diffusion_edc.nii", ref_num=0) +# >>> eddyc.cmdline +# 'eddy_correct diffusion.nii diffusion_edc.nii 0' # -# >>> from nipype.interfaces.fsl import EddyCorrect -# >>> eddyc = EddyCorrect(in_file='diffusion.nii', -# ... out_file="diffusion_edc.nii", ref_num=0) -# >>> eddyc.cmdline -# 'eddy_correct diffusion.nii diffusion_edc.nii 0' # -# task_name: EddyCorrect nipype_name: EddyCorrect nipype_module: nipype.interfaces.fsl.epi @@ -36,8 +36,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: 4D input file - out_file: Path - # type=file|default=: 4D output file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -60,7 +58,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -74,13 +72,13 @@ tests: ref_num: # type=int|default=0: reference number output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -104,7 +102,7 @@ tests: ref_num: '0' # type=int|default=0: reference number imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -132,7 +130,7 @@ doctests: ref_num: '0' # type=int|default=0: reference number imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/eddy_quad.yaml b/example-specs/interface/nipype/fsl/eddy_quad.yaml index 916b9fd5..b90b8ca4 100644 --- a/example-specs/interface/nipype/fsl/eddy_quad.yaml +++ b/example-specs/interface/nipype/fsl/eddy_quad.yaml @@ -6,29 +6,29 @@ # Docs # ---- # -# Interface for FSL eddy_quad, a tool for generating single subject reports -# and storing the quality assessment indices for each subject. -# `User guide `__ +# Interface for FSL eddy_quad, a tool for generating single subject reports +# and storing the quality assessment indices for each subject. +# `User guide `__ # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import EddyQuad +# >>> quad = EddyQuad() +# >>> quad.inputs.base_name = 'eddy_corrected' +# >>> quad.inputs.idx_file = 'epi_index.txt' +# >>> quad.inputs.param_file = 'epi_acqp.txt' +# >>> quad.inputs.mask_file = 'epi_mask.nii' +# >>> quad.inputs.bval_file = 'bvals.scheme' +# >>> quad.inputs.bvec_file = 'bvecs.scheme' +# >>> quad.inputs.output_dir = 'eddy_corrected.qc' +# >>> quad.inputs.field = 'fieldmap_phase_fslprepared.nii' +# >>> quad.inputs.verbose = True +# >>> quad.cmdline +# 'eddy_quad eddy_corrected --bvals bvals.scheme --bvecs bvecs.scheme --field fieldmap_phase_fslprepared.nii --eddyIdx epi_index.txt --mask epi_mask.nii --output-dir eddy_corrected.qc --eddyParams epi_acqp.txt --verbose' +# >>> res = quad.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import EddyQuad -# >>> quad = EddyQuad() -# >>> quad.inputs.base_name = 'eddy_corrected' -# >>> quad.inputs.idx_file = 'epi_index.txt' -# >>> quad.inputs.param_file = 'epi_acqp.txt' -# >>> quad.inputs.mask_file = 'epi_mask.nii' -# >>> quad.inputs.bval_file = 'bvals.scheme' -# >>> quad.inputs.bvec_file = 'bvecs.scheme' -# >>> quad.inputs.output_dir = 'eddy_corrected.qc' -# >>> quad.inputs.field = 'fieldmap_phase_fslprepared.nii' -# >>> quad.inputs.verbose = True -# >>> quad.cmdline -# 'eddy_quad eddy_corrected --bvals bvals.scheme --bvecs bvecs.scheme --field fieldmap_phase_fslprepared.nii --eddyIdx epi_index.txt --mask epi_mask.nii --output-dir eddy_corrected.qc --eddyParams epi_acqp.txt --verbose' -# >>> res = quad.run() # doctest: +SKIP # -# task_name: EddyQuad nipype_name: EddyQuad nipype_module: nipype.interfaces.fsl.epi @@ -93,7 +93,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -121,13 +121,13 @@ tests: verbose: # type=bool|default=False: Display debug messages output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -149,7 +149,7 @@ tests: output_dir: '"eddy_corrected.qc"' # type=str|default='': Output directory - default = '.qc' imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -175,7 +175,7 @@ doctests: output_dir: '"eddy_corrected.qc"' # type=str|default='': Output directory - default = '.qc' imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/epi_de_warp.yaml b/example-specs/interface/nipype/fsl/epi_de_warp.yaml index fd2d31c2..7ea369de 100644 --- a/example-specs/interface/nipype/fsl/epi_de_warp.yaml +++ b/example-specs/interface/nipype/fsl/epi_de_warp.yaml @@ -6,27 +6,27 @@ # Docs # ---- # -# Wraps the unwarping script `epidewarp.fsl -# `_. +# Wraps the unwarping script `epidewarp.fsl +# `_. # -# .. warning:: deprecated in FSL, please use -# :func:`niflow.nipype1.workflows.dmri.preprocess.epi.sdc_fmb` instead. +# .. warning:: deprecated in FSL, please use +# :func:`niflow.nipype1.workflows.dmri.preprocess.epi.sdc_fmb` instead. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import EPIDeWarp +# >>> dewarp = EPIDeWarp() +# >>> dewarp.inputs.epi_file = "functional.nii" +# >>> dewarp.inputs.mag_file = "magnitude.nii" +# >>> dewarp.inputs.dph_file = "phase.nii" +# >>> dewarp.inputs.output_type = "NIFTI_GZ" +# >>> dewarp.cmdline # doctest: +ELLIPSIS +# 'epidewarp.fsl --mag magnitude.nii --dph phase.nii --epi functional.nii --esp 0.58 --exfdw .../exfdw.nii.gz --nocleanup --sigma 2 --tediff 2.46 --tmpdir .../temp --vsm .../vsm.nii.gz' +# >>> res = dewarp.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import EPIDeWarp -# >>> dewarp = EPIDeWarp() -# >>> dewarp.inputs.epi_file = "functional.nii" -# >>> dewarp.inputs.mag_file = "magnitude.nii" -# >>> dewarp.inputs.dph_file = "phase.nii" -# >>> dewarp.inputs.output_type = "NIFTI_GZ" -# >>> dewarp.cmdline # doctest: +ELLIPSIS -# 'epidewarp.fsl --mag magnitude.nii --dph phase.nii --epi functional.nii --esp 0.58 --exfdw .../exfdw.nii.gz --nocleanup --sigma 2 --tediff 2.46 --tmpdir .../temp --vsm .../vsm.nii.gz' -# >>> res = dewarp.run() # doctest: +SKIP # # -# task_name: EPIDeWarp nipype_name: EPIDeWarp nipype_module: nipype.interfaces.fsl.epi @@ -47,7 +47,7 @@ inputs: # type=file|default=: EPI volume to unwarp exf_file: generic/file # type=file|default=: example func volume (or use epi) - mag_file: medimage/nifti1 + mag_file: generic/file # type=file|default=: Magnitude file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -82,7 +82,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields exfdw: exfdw # type=file: dewarped functional volume example # type=string|default='': dewarped example func volume @@ -120,13 +120,13 @@ tests: cleanup: # type=bool|default=False: cleanup output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -145,14 +145,10 @@ tests: # (if not specified, will try to choose a sensible value) epi_file: # type=file|default=: EPI volume to unwarp - mag_file: - # type=file|default=: Magnitude file dph_file: # type=file|default=: Phase file assumed to be scaled from 0 to 4095 - output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -175,14 +171,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. epi_file: '"functional.nii"' # type=file|default=: EPI volume to unwarp - mag_file: '"magnitude.nii"' - # type=file|default=: Magnitude file dph_file: '"phase.nii"' # type=file|default=: Phase file assumed to be scaled from 0 to 4095 - output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/epi_reg.yaml b/example-specs/interface/nipype/fsl/epi_reg.yaml index c187a6aa..dc1737b6 100644 --- a/example-specs/interface/nipype/fsl/epi_reg.yaml +++ b/example-specs/interface/nipype/fsl/epi_reg.yaml @@ -7,28 +7,28 @@ # ---- # # -# Runs FSL epi_reg script for simultaneous coregistration and fieldmap -# unwarping. +# Runs FSL epi_reg script for simultaneous coregistration and fieldmap +# unwarping. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import EpiReg +# >>> epireg = EpiReg() +# >>> epireg.inputs.epi='epi.nii' +# >>> epireg.inputs.t1_head='T1.nii' +# >>> epireg.inputs.t1_brain='T1_brain.nii' +# >>> epireg.inputs.out_base='epi2struct' +# >>> epireg.inputs.fmap='fieldmap_phase_fslprepared.nii' +# >>> epireg.inputs.fmapmag='fieldmap_mag.nii' +# >>> epireg.inputs.fmapmagbrain='fieldmap_mag_brain.nii' +# >>> epireg.inputs.echospacing=0.00067 +# >>> epireg.inputs.pedir='y' +# >>> epireg.cmdline # doctest: +ELLIPSIS +# 'epi_reg --echospacing=0.000670 --fmap=fieldmap_phase_fslprepared.nii --fmapmag=fieldmap_mag.nii --fmapmagbrain=fieldmap_mag_brain.nii --noclean --pedir=y --epi=epi.nii --t1=T1.nii --t1brain=T1_brain.nii --out=epi2struct' +# >>> epireg.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import EpiReg -# >>> epireg = EpiReg() -# >>> epireg.inputs.epi='epi.nii' -# >>> epireg.inputs.t1_head='T1.nii' -# >>> epireg.inputs.t1_brain='T1_brain.nii' -# >>> epireg.inputs.out_base='epi2struct' -# >>> epireg.inputs.fmap='fieldmap_phase_fslprepared.nii' -# >>> epireg.inputs.fmapmag='fieldmap_mag.nii' -# >>> epireg.inputs.fmapmagbrain='fieldmap_mag_brain.nii' -# >>> epireg.inputs.echospacing=0.00067 -# >>> epireg.inputs.pedir='y' -# >>> epireg.cmdline # doctest: +ELLIPSIS -# 'epi_reg --echospacing=0.000670 --fmap=fieldmap_phase_fslprepared.nii --fmapmag=fieldmap_mag.nii --fmapmagbrain=fieldmap_mag_brain.nii --noclean --pedir=y --epi=epi.nii --t1=T1.nii --t1brain=T1_brain.nii --out=epi2struct' -# >>> epireg.run() # doctest: +SKIP # -# task_name: EpiReg nipype_name: EpiReg nipype_module: nipype.interfaces.fsl.epi @@ -47,19 +47,16 @@ inputs: # type=file|default=: EPI image fmap: medimage/nifti1 # type=file|default=: fieldmap image (in rad/s) - fmapmag: medimage/nifti1 + fmapmag: generic/file # type=file|default=: fieldmap magnitude image - wholehead fmapmagbrain: medimage/nifti1 # type=file|default=: fieldmap magnitude image - brain extracted t1_brain: medimage/nifti1 # type=file|default=: brain extracted T1 image - t1_head: medimage/nifti1 + t1_head: generic/file # type=file|default=: wholehead T1 image weight_image: generic/file # type=file|default=: weighting image (in T1 space) - wmseg: Path - # type=file: white matter segmentation used in flirt bbr - # type=file|default=: white matter segmentation of T1 image, has to be named like the t1brain and end on _wmseg callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -109,7 +106,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -144,13 +141,13 @@ tests: no_clean: # type=bool|default=True: do not clean up intermediate files output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -169,24 +166,16 @@ tests: # (if not specified, will try to choose a sensible value) epi: # type=file|default=: EPI image - t1_head: - # type=file|default=: wholehead T1 image t1_brain: # type=file|default=: brain extracted T1 image - out_base: '"epi2struct"' - # type=string|default='epi2struct': output base name fmap: # type=file|default=: fieldmap image (in rad/s) - fmapmag: - # type=file|default=: fieldmap magnitude image - wholehead fmapmagbrain: # type=file|default=: fieldmap magnitude image - brain extracted - echospacing: '0.00067' - # type=float|default=0.0: Effective EPI echo spacing (sometimes called dwell time) - in seconds pedir: '"y"' # type=enum|default='x'|allowed['-x','-y','-z','x','y','z']: phase encoding direction, dir = x/y/z/-x/-y/-z imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -209,24 +198,16 @@ doctests: # '.mock()' method of the corresponding class is used instead. epi: '"epi.nii"' # type=file|default=: EPI image - t1_head: '"T1.nii"' - # type=file|default=: wholehead T1 image t1_brain: '"T1_brain.nii"' # type=file|default=: brain extracted T1 image - out_base: '"epi2struct"' - # type=string|default='epi2struct': output base name fmap: '"fieldmap_phase_fslprepared.nii"' # type=file|default=: fieldmap image (in rad/s) - fmapmag: '"fieldmap_mag.nii"' - # type=file|default=: fieldmap magnitude image - wholehead fmapmagbrain: '"fieldmap_mag_brain.nii"' # type=file|default=: fieldmap magnitude image - brain extracted - echospacing: '0.00067' - # type=float|default=0.0: Effective EPI echo spacing (sometimes called dwell time) - in seconds pedir: '"y"' # type=enum|default='x'|allowed['-x','-y','-z','x','y','z']: phase encoding direction, dir = x/y/z/-x/-y/-z imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/erode_image.yaml b/example-specs/interface/nipype/fsl/erode_image.yaml index 9bcb725e..202d8436 100644 --- a/example-specs/interface/nipype/fsl/erode_image.yaml +++ b/example-specs/interface/nipype/fsl/erode_image.yaml @@ -24,9 +24,6 @@ inputs: # type=file|default=: image to operate on kernel_file: generic/file # type=file|default=: use external file for kernel - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -50,7 +47,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -80,13 +77,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/extract_roi.yaml b/example-specs/interface/nipype/fsl/extract_roi.yaml index a58d7400..c4d456fb 100644 --- a/example-specs/interface/nipype/fsl/extract_roi.yaml +++ b/example-specs/interface/nipype/fsl/extract_roi.yaml @@ -6,29 +6,29 @@ # Docs # ---- # Uses FSL Fslroi command to extract region of interest (ROI) -# from an image. +# from an image. # -# You can a) take a 3D ROI from a 3D data set (or if it is 4D, the -# same ROI is taken from each time point and a new 4D data set is -# created), b) extract just some time points from a 4D data set, or -# c) control time and space limits to the ROI. Note that the -# arguments are minimum index and size (not maximum index). So to -# extract voxels 10 to 12 inclusive you would specify 10 and 3 (not -# 10 and 12). +# You can a) take a 3D ROI from a 3D data set (or if it is 4D, the +# same ROI is taken from each time point and a new 4D data set is +# created), b) extract just some time points from a 4D data set, or +# c) control time and space limits to the ROI. Note that the +# arguments are minimum index and size (not maximum index). So to +# extract voxels 10 to 12 inclusive you would specify 10 and 3 (not +# 10 and 12). # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import ExtractROI +# >>> from nipype.testing import anatfile +# >>> fslroi = ExtractROI(in_file=anatfile, roi_file='bar.nii', t_min=0, +# ... t_size=1) +# >>> fslroi.cmdline == 'fslroi %s bar.nii 0 1' % anatfile +# True # -# >>> from nipype.interfaces.fsl import ExtractROI -# >>> from nipype.testing import anatfile -# >>> fslroi = ExtractROI(in_file=anatfile, roi_file='bar.nii', t_min=0, -# ... t_size=1) -# >>> fslroi.cmdline == 'fslroi %s bar.nii 0 1' % anatfile -# True # # -# task_name: ExtractROI nipype_name: ExtractROI nipype_module: nipype.interfaces.fsl.utils @@ -45,9 +45,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: input file - roi_file: Path - # type=file: - # type=file|default=: output file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -71,7 +68,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields roi_file: '"bar.nii"' # type=file: # type=file|default=: output file @@ -105,13 +102,13 @@ tests: crop_list: # type=list|default=[]: list of two tuples specifying crop options output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -138,7 +135,7 @@ tests: t_size: '1' # type=int|default=0: imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.testing name: anatfile @@ -172,7 +169,7 @@ doctests: t_size: '1' # type=int|default=0: imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/fast.yaml b/example-specs/interface/nipype/fsl/fast.yaml index 007930a3..c692625a 100644 --- a/example-specs/interface/nipype/fsl/fast.yaml +++ b/example-specs/interface/nipype/fsl/fast.yaml @@ -7,20 +7,20 @@ # ---- # FSL FAST wrapper for segmentation and bias correction # -# For complete details, see the `FAST Documentation. -# `_ +# For complete details, see the `FAST Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import fsl +# >>> fast = fsl.FAST() +# >>> fast.inputs.in_files = 'structural.nii' +# >>> fast.inputs.out_basename = 'fast_' +# >>> fast.cmdline +# 'fast -o fast_ -S 1 structural.nii' +# >>> out = fast.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import fsl -# >>> fastr = fsl.FAST() -# >>> fastr.inputs.in_files = 'structural.nii' -# >>> fastr.inputs.out_basename = 'fast_' -# >>> fastr.cmdline -# 'fast -o fast_ -S 1 structural.nii' -# >>> out = fastr.run() # doctest: +SKIP # -# task_name: FAST nipype_name: FAST nipype_module: nipype.interfaces.fsl.preprocess @@ -43,8 +43,6 @@ inputs: # type=file|default=: Filename containing intensities other_priors: generic/file+list-of # type=inputmultiobject|default=[]: alternative prior images - out_basename: Path - # type=file|default=: base name of output files callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -82,7 +80,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -135,13 +133,13 @@ tests: # type=outputmultiobject: # type=bool|default=False: outputs individual probability maps output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -160,10 +158,8 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=inputmultiobject|default=[]: image, or multi-channel set of images, to be segmented - out_basename: '"fast_"' - # type=file|default=: base name of output files imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -178,7 +174,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: fast -o fast_ -S 1 structural.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -186,10 +182,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '"structural.nii"' # type=inputmultiobject|default=[]: image, or multi-channel set of images, to be segmented - out_basename: '"fast_"' - # type=file|default=: base name of output files imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/feat.yaml b/example-specs/interface/nipype/fsl/feat.yaml index 9bde3588..f34b5732 100644 --- a/example-specs/interface/nipype/fsl/feat.yaml +++ b/example-specs/interface/nipype/fsl/feat.yaml @@ -44,7 +44,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -54,13 +54,13 @@ tests: fsf_file: # type=file|default=: File specifying the feat design spec file output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/feat_model.yaml b/example-specs/interface/nipype/fsl/feat_model.yaml index 430e8cea..abdd671f 100644 --- a/example-specs/interface/nipype/fsl/feat_model.yaml +++ b/example-specs/interface/nipype/fsl/feat_model.yaml @@ -54,7 +54,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -66,13 +66,13 @@ tests: ev_files: # type=list|default=[]: Event spec files generated by level1design output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/feature_extractor.yaml b/example-specs/interface/nipype/fsl/feature_extractor.yaml index fdd69d31..5588be0f 100644 --- a/example-specs/interface/nipype/fsl/feature_extractor.yaml +++ b/example-specs/interface/nipype/fsl/feature_extractor.yaml @@ -6,8 +6,8 @@ # Docs # ---- # -# Extract features (for later training and/or classifying) -# +# Extract features (for later training and/or classifying) +# task_name: FeatureExtractor nipype_name: FeatureExtractor nipype_module: nipype.interfaces.fsl.fix @@ -22,9 +22,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - mel_ica: Path - # type=directory: Melodic output directory or directories - # type=directory|default=: Melodic output directory or directories callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -48,7 +45,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -63,7 +60,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/filmgls.yaml b/example-specs/interface/nipype/fsl/filmgls.yaml index 2e15e916..a0a90f9f 100644 --- a/example-specs/interface/nipype/fsl/filmgls.yaml +++ b/example-specs/interface/nipype/fsl/filmgls.yaml @@ -7,30 +7,30 @@ # ---- # Use FSL film_gls command to fit a design matrix to voxel timeseries # -# Examples -# -------- +# Examples +# -------- # -# Initialize with no options, assigning them when calling run: +# Initialize with no options, assigning them when calling run: # -# >>> from nipype.interfaces import fsl -# >>> fgls = fsl.FILMGLS() -# >>> res = fgls.run('in_file', 'design_file', 'thresh', rn='stats') #doctest: +SKIP +# >>> from nipype.interfaces import fsl +# >>> fgls = fsl.FILMGLS() +# >>> res = fgls.run('in_file', 'design_file', 'thresh', rn='stats') #doctest: +SKIP # -# Assign options through the ``inputs`` attribute: +# Assign options through the ``inputs`` attribute: # -# >>> fgls = fsl.FILMGLS() -# >>> fgls.inputs.in_file = 'functional.nii' -# >>> fgls.inputs.design_file = 'design.mat' -# >>> fgls.inputs.threshold = 10 -# >>> fgls.inputs.results_dir = 'stats' -# >>> res = fgls.run() #doctest: +SKIP +# >>> fgls = fsl.FILMGLS() +# >>> fgls.inputs.in_file = 'functional.nii' +# >>> fgls.inputs.design_file = 'design.mat' +# >>> fgls.inputs.threshold = 10 +# >>> fgls.inputs.results_dir = 'stats' +# >>> res = fgls.run() #doctest: +SKIP # -# Specify options when creating an instance: +# Specify options when creating an instance: +# +# >>> fgls = fsl.FILMGLS(in_file='functional.nii', design_file='design.mat', threshold=10, results_dir='stats') +# >>> res = fgls.run() #doctest: +SKIP # -# >>> fgls = fsl.FILMGLS(in_file='functional.nii', design_file='design.mat', threshold=10, results_dir='stats') -# >>> res = fgls.run() #doctest: +SKIP # -# task_name: FILMGLS nipype_name: FILMGLS nipype_module: nipype.interfaces.fsl.model @@ -51,9 +51,6 @@ inputs: # type=file|default=: contrast file containing F-contrasts in_file: generic/file # type=file|default=: input data file - results_dir: Path - # type=directory: directory storing model estimation output - # type=directory|default='results': directory to store results in surface: generic/file # type=file|default=: input surface for autocorr smoothing in surface-based analyses tcon_file: generic/file @@ -105,7 +102,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -152,13 +149,13 @@ tests: # type=directory: directory storing model estimation output # type=directory|default='results': directory to store results in output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/filter_regressor.yaml b/example-specs/interface/nipype/fsl/filter_regressor.yaml index f3af84ba..fbbdae49 100644 --- a/example-specs/interface/nipype/fsl/filter_regressor.yaml +++ b/example-specs/interface/nipype/fsl/filter_regressor.yaml @@ -7,8 +7,8 @@ # ---- # Data de-noising by regressing out part of a design matrix # -# Uses simple OLS regression on 4D images -# +# Uses simple OLS regression on 4D images +# task_name: FilterRegressor nipype_name: FilterRegressor nipype_module: nipype.interfaces.fsl.utils @@ -29,9 +29,6 @@ inputs: # type=file|default=: input file name (4D image) mask: generic/file # type=file|default=: mask image file name - out_file: Path - # type=file: output file name for the filtered data - # type=file|default=: output file name for the filtered data callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -55,7 +52,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: output file name for the filtered data # type=file|default=: output file name for the filtered data @@ -83,13 +80,13 @@ tests: out_vnscales: # type=bool|default=False: output scaling factors for variance normalization output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/find_the_biggest.yaml b/example-specs/interface/nipype/fsl/find_the_biggest.yaml index 5955dcbb..9afad823 100644 --- a/example-specs/interface/nipype/fsl/find_the_biggest.yaml +++ b/example-specs/interface/nipype/fsl/find_the_biggest.yaml @@ -6,21 +6,21 @@ # Docs # ---- # -# Use FSL find_the_biggest for performing hard segmentation on -# the outputs of connectivity-based thresholding in probtrack. -# For complete details, see the `FDT -# Documentation. `_ +# Use FSL find_the_biggest for performing hard segmentation on +# the outputs of connectivity-based thresholding in probtrack. +# For complete details, see the `FDT +# Documentation. `_ # -# Example -# ------- +# Example +# ------- +# +# >>> from nipype.interfaces import fsl +# >>> ldir = ['seeds_to_M1.nii', 'seeds_to_M2.nii'] +# >>> fBig = fsl.FindTheBiggest(in_files=ldir, out_file='biggestSegmentation') +# >>> fBig.cmdline +# 'find_the_biggest seeds_to_M1.nii seeds_to_M2.nii biggestSegmentation' # -# >>> from nipype.interfaces import fsl -# >>> ldir = ['seeds_to_M1.nii', 'seeds_to_M2.nii'] -# >>> fBig = fsl.FindTheBiggest(in_files=ldir, out_file='biggestSegmentation') -# >>> fBig.cmdline -# 'find_the_biggest seeds_to_M1.nii seeds_to_M2.nii biggestSegmentation' # -# task_name: FindTheBiggest nipype_name: FindTheBiggest nipype_module: nipype.interfaces.fsl.dti @@ -37,9 +37,6 @@ inputs: # passed to the field in the automatically generated unittests. in_files: generic/file+list-of # type=list|default=[]: a list of input volumes or a singleMatrixFile - out_file: Path - # type=file: output file indexed in order of input files - # type=file|default=: file with the resulting segmentation callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -63,7 +60,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: '"biggestSegmentation"' # type=file: output file indexed in order of input files # type=file|default=: file with the resulting segmentation @@ -79,13 +76,13 @@ tests: # type=file: output file indexed in order of input files # type=file|default=: file with the resulting segmentation output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -108,7 +105,7 @@ tests: # type=file: output file indexed in order of input files # type=file|default=: file with the resulting segmentation imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -135,7 +132,7 @@ doctests: # type=file: output file indexed in order of input files # type=file|default=: file with the resulting segmentation imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/first.yaml b/example-specs/interface/nipype/fsl/first.yaml index bb3e4307..60b459cd 100644 --- a/example-specs/interface/nipype/fsl/first.yaml +++ b/example-specs/interface/nipype/fsl/first.yaml @@ -7,18 +7,18 @@ # ---- # FSL run_first_all wrapper for segmentation of subcortical volumes # -# http://www.fmrib.ox.ac.uk/fsl/first/index.html +# http://www.fmrib.ox.ac.uk/fsl/first/index.html # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces import fsl +# >>> first = fsl.FIRST() +# >>> first.inputs.in_file = 'structural.nii' +# >>> first.inputs.out_file = 'segmented.nii' +# >>> res = first.run() #doctest: +SKIP # -# >>> from nipype.interfaces import fsl -# >>> first = fsl.FIRST() -# >>> first.inputs.in_file = 'structural.nii' -# >>> first.inputs.out_file = 'segmented.nii' -# >>> res = first.run() #doctest: +SKIP # -# task_name: FIRST nipype_name: FIRST nipype_module: nipype.interfaces.fsl.preprocess @@ -37,8 +37,6 @@ inputs: # type=file|default=: Affine matrix to use (e.g. img2std.mat) (does not re-run registration) in_file: generic/file # type=file|default=: input data file - out_file: Path - # type=file|default='segmented': output data file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -67,7 +65,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -93,13 +91,13 @@ tests: affine_file: # type=file|default=: Affine matrix to use (e.g. img2std.mat) (does not re-run registration) output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/flameo.yaml b/example-specs/interface/nipype/fsl/flameo.yaml index 2b735aaa..d651e82b 100644 --- a/example-specs/interface/nipype/fsl/flameo.yaml +++ b/example-specs/interface/nipype/fsl/flameo.yaml @@ -7,24 +7,24 @@ # ---- # Use FSL flameo command to perform higher level model fits # -# Examples -# -------- +# Examples +# -------- # -# Initialize FLAMEO with no options, assigning them when calling run: +# Initialize FLAMEO with no options, assigning them when calling run: +# +# >>> from nipype.interfaces import fsl +# >>> flameo = fsl.FLAMEO() +# >>> flameo.inputs.cope_file = 'cope.nii.gz' +# >>> flameo.inputs.var_cope_file = 'varcope.nii.gz' +# >>> flameo.inputs.cov_split_file = 'cov_split.mat' +# >>> flameo.inputs.design_file = 'design.mat' +# >>> flameo.inputs.t_con_file = 'design.con' +# >>> flameo.inputs.mask_file = 'mask.nii' +# >>> flameo.inputs.run_mode = 'fe' +# >>> flameo.cmdline +# 'flameo --copefile=cope.nii.gz --covsplitfile=cov_split.mat --designfile=design.mat --ld=stats --maskfile=mask.nii --runmode=fe --tcontrastsfile=design.con --varcopefile=varcope.nii.gz' # -# >>> from nipype.interfaces import fsl -# >>> flameo = fsl.FLAMEO() -# >>> flameo.inputs.cope_file = 'cope.nii.gz' -# >>> flameo.inputs.var_cope_file = 'varcope.nii.gz' -# >>> flameo.inputs.cov_split_file = 'cov_split.mat' -# >>> flameo.inputs.design_file = 'design.mat' -# >>> flameo.inputs.t_con_file = 'design.con' -# >>> flameo.inputs.mask_file = 'mask.nii' -# >>> flameo.inputs.run_mode = 'fe' -# >>> flameo.cmdline -# 'flameo --copefile=cope.nii.gz --covsplitfile=cov_split.mat --designfile=design.mat --ld=stats --maskfile=mask.nii --runmode=fe --tcontrastsfile=design.con --varcopefile=varcope.nii.gz' # -# task_name: FLAMEO nipype_name: FLAMEO nipype_module: nipype.interfaces.fsl.model @@ -43,7 +43,7 @@ inputs: # type=file|default=: cope regressor data file cov_split_file: datascience/text-matrix # type=file|default=: ascii matrix specifying the groups the covariance is split into - design_file: datascience/text-matrix + design_file: generic/file # type=file|default=: design matrix file dof_var_cope_file: generic/file # type=file|default=: dof data file for varcope data @@ -51,11 +51,11 @@ inputs: # type=file|default=: ascii matrix specifying f-contrasts log_dir: generic/directory # type=directory|default='stats': - mask_file: medimage/nifti1 + mask_file: generic/file # type=file|default=: mask file - t_con_file: medimage-fsl/con + t_con_file: fileformats.medimage_fsl.Con # type=file|default=: ascii matrix specifying t-contrasts - var_cope_file: medimage/nifti-gz + var_cope_file: generic/file # type=file|default=: varcope weightings data file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -101,7 +101,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -145,13 +145,13 @@ tests: log_dir: # type=directory|default='stats': output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -170,20 +170,14 @@ tests: # (if not specified, will try to choose a sensible value) cope_file: # type=file|default=: cope regressor data file - var_cope_file: - # type=file|default=: varcope weightings data file cov_split_file: # type=file|default=: ascii matrix specifying the groups the covariance is split into - design_file: - # type=file|default=: design matrix file t_con_file: # type=file|default=: ascii matrix specifying t-contrasts - mask_file: - # type=file|default=: mask file run_mode: '"fe"' # type=enum|default='fe'|allowed['fe','flame1','flame12','ols']: inference to perform imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -206,20 +200,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. cope_file: '"cope.nii.gz"' # type=file|default=: cope regressor data file - var_cope_file: '"varcope.nii.gz"' - # type=file|default=: varcope weightings data file cov_split_file: '"cov_split.mat"' # type=file|default=: ascii matrix specifying the groups the covariance is split into - design_file: '"design.mat"' - # type=file|default=: design matrix file t_con_file: '"design.con"' # type=file|default=: ascii matrix specifying t-contrasts - mask_file: '"mask.nii"' - # type=file|default=: mask file run_mode: '"fe"' # type=enum|default='fe'|allowed['fe','flame1','flame12','ols']: inference to perform imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/flirt.yaml b/example-specs/interface/nipype/fsl/flirt.yaml index 5e39a3a1..8326eae1 100644 --- a/example-specs/interface/nipype/fsl/flirt.yaml +++ b/example-specs/interface/nipype/fsl/flirt.yaml @@ -7,25 +7,25 @@ # ---- # FSL FLIRT wrapper for coregistration # -# For complete details, see the `FLIRT Documentation. -# `_ +# For complete details, see the `FLIRT Documentation. +# `_ # -# To print out the command line help, use: -# fsl.FLIRT().inputs_help() +# To print out the command line help, use: +# fsl.FLIRT().inputs_help() +# +# Examples +# -------- +# >>> from nipype.interfaces import fsl +# >>> from nipype.testing import example_data +# >>> flt = fsl.FLIRT(bins=640, cost_func='mutualinfo') +# >>> flt.inputs.in_file = 'structural.nii' +# >>> flt.inputs.reference = 'mni.nii' +# >>> flt.inputs.output_type = "NIFTI_GZ" +# >>> flt.cmdline # doctest: +ELLIPSIS +# 'flirt -in structural.nii -ref mni.nii -out structural_flirt.nii.gz -omat structural_flirt.mat -bins 640 -searchcost mutualinfo' +# >>> res = flt.run() #doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import fsl -# >>> from nipype.testing import example_data -# >>> flt = fsl.FLIRT(bins=640, cost_func='mutualinfo') -# >>> flt.inputs.in_file = 'structural.nii' -# >>> flt.inputs.reference = 'mni.nii' -# >>> flt.inputs.output_type = "NIFTI_GZ" -# >>> flt.cmdline # doctest: +ELLIPSIS -# 'flirt -in structural.nii -ref mni.nii -out structural_flirt.nii.gz -omat structural_flirt.mat -bins 640 -searchcost mutualinfo' -# >>> res = flt.run() #doctest: +SKIP # -# task_name: FLIRT nipype_name: FLIRT nipype_module: nipype.interfaces.fsl.preprocess @@ -50,18 +50,9 @@ inputs: # type=file|default=: input 4x4 affine matrix in_weight: generic/file # type=file|default=: File for input weighting volume - out_file: Path - # type=file: path/name of registered file (if generated) - # type=file|default=: registered output file - out_log: Path - # type=file: path/name of output log (if generated) - # type=file|default=: output log - out_matrix_file: Path - # type=file: path/name of calculated affine transform (if generated) - # type=file|default=: output affine matrix in 4x4 asciii format ref_weight: generic/file # type=file|default=: File for reference weighting volume - reference: medimage/nifti1 + reference: generic/file # type=file|default=: reference file schedule: generic/file # type=file|default=: replaces default schedule @@ -100,7 +91,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -205,13 +196,13 @@ tests: bbrslope: # type=float|default=0.0: value of bbr slope output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -230,16 +221,14 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input file - reference: - # type=file|default=: reference file output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type bins: '640' # type=int|default=0: number of histogram bins cost_func: '"mutualinfo"' # type=enum|default='mutualinfo'|allowed['bbr','corratio','labeldiff','leastsq','mutualinfo','normcorr','normmi']: cost function imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.testing name: example_data @@ -257,7 +246,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: flirt -in structural.nii -ref mni.nii -out structural_flirt.nii.gz -omat structural_flirt.mat -bins 640 -searchcost mutualinfo +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -265,16 +254,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"structural.nii"' # type=file|default=: input file - reference: '"mni.nii"' - # type=file|default=: reference file output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type bins: '640' # type=int|default=0: number of histogram bins cost_func: '"mutualinfo"' # type=enum|default='mutualinfo'|allowed['bbr','corratio','labeldiff','leastsq','mutualinfo','normcorr','normmi']: cost function imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/fnirt.yaml b/example-specs/interface/nipype/fsl/fnirt.yaml index 84d3b6d1..a952b56b 100644 --- a/example-specs/interface/nipype/fsl/fnirt.yaml +++ b/example-specs/interface/nipype/fsl/fnirt.yaml @@ -7,34 +7,34 @@ # ---- # FSL FNIRT wrapper for non-linear registration # -# For complete details, see the `FNIRT Documentation. -# `_ +# For complete details, see the `FNIRT Documentation. +# `_ # -# Examples -# -------- -# >>> from nipype.interfaces import fsl -# >>> from nipype.testing import example_data -# >>> fnt = fsl.FNIRT(affine_file=example_data('trans.mat')) -# >>> res = fnt.run(ref_file=example_data('mni.nii', in_file=example_data('structural.nii')) #doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import fsl +# >>> from nipype.testing import example_data +# >>> fnt = fsl.FNIRT(affine_file=example_data('trans.mat')) +# >>> res = fnt.run(ref_file=example_data('mni.nii', in_file=example_data('structural.nii')) #doctest: +SKIP # -# T1 -> Mni153 +# T1 -> Mni153 # -# >>> from nipype.interfaces import fsl -# >>> fnirt_mprage = fsl.FNIRT() -# >>> fnirt_mprage.inputs.in_fwhm = [8, 4, 2, 2] -# >>> fnirt_mprage.inputs.subsampling_scheme = [4, 2, 1, 1] +# >>> from nipype.interfaces import fsl +# >>> fnirt_mprage = fsl.FNIRT() +# >>> fnirt_mprage.inputs.in_fwhm = [8, 4, 2, 2] +# >>> fnirt_mprage.inputs.subsampling_scheme = [4, 2, 1, 1] # -# Specify the resolution of the warps +# Specify the resolution of the warps # -# >>> fnirt_mprage.inputs.warp_resolution = (6, 6, 6) -# >>> res = fnirt_mprage.run(in_file='structural.nii', ref_file='mni.nii', warped_file='warped.nii', fieldcoeff_file='fieldcoeff.nii')#doctest: +SKIP +# >>> fnirt_mprage.inputs.warp_resolution = (6, 6, 6) +# >>> res = fnirt_mprage.run(in_file='structural.nii', ref_file='mni.nii', warped_file='warped.nii', fieldcoeff_file='fieldcoeff.nii')#doctest: +SKIP # -# We can check the command line and confirm that it's what we expect. +# We can check the command line and confirm that it's what we expect. +# +# >>> fnirt_mprage.cmdline #doctest: +SKIP +# 'fnirt --cout=fieldcoeff.nii --in=structural.nii --infwhm=8,4,2,2 --ref=mni.nii --subsamp=4,2,1,1 --warpres=6,6,6 --iout=warped.nii' # -# >>> fnirt_mprage.cmdline #doctest: +SKIP -# 'fnirt --cout=fieldcoeff.nii --in=structural.nii --infwhm=8,4,2,2 --ref=mni.nii --subsamp=4,2,1,1 --warpres=6,6,6 --iout=warped.nii' # -# task_name: FNIRT nipype_name: FNIRT nipype_module: nipype.interfaces.fsl.preprocess @@ -59,16 +59,10 @@ inputs: # type=file|default=: name of file with mask in input image space inwarp_file: generic/file # type=file|default=: name of file containing initial non-linear warps - log_file: Path - # type=file: Name of log-file - # type=file|default=: Name of log-file ref_file: generic/file # type=file|default=: name of reference image refmask_file: generic/file # type=file|default=: name of file with mask in reference space - warped_file: Path - # type=file: warped image - # type=file|default=: name of output image callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -110,7 +104,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields log_file: log_file # type=file: Name of log-file # type=file|default=: Name of log-file @@ -213,13 +207,13 @@ tests: hessian_precision: # type=enum|default='double'|allowed['double','float']: Precision for representing Hessian, double or float. Default double output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -238,12 +232,10 @@ tests: # (if not specified, will try to choose a sensible value) in_fwhm: '[8, 4, 2, 2]' # type=list|default=[]: FWHM (in mm) of gaussian smoothing kernel for input volume, default [6, 4, 2, 2] - subsampling_scheme: '[4, 2, 1, 1]' - # type=list|default=[]: sub-sampling scheme, list, default [4, 2, 1, 1] warp_resolution: (6, 6, 6) # type=tuple|default=(0, 0, 0): (approximate) resolution (in mm) of warp basis in x-, y- and z-direction, default 10, 10, 10 imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.testing name: example_data @@ -269,12 +261,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_fwhm: '[8, 4, 2, 2]' # type=list|default=[]: FWHM (in mm) of gaussian smoothing kernel for input volume, default [6, 4, 2, 2] - subsampling_scheme: '[4, 2, 1, 1]' - # type=list|default=[]: sub-sampling scheme, list, default [4, 2, 1, 1] warp_resolution: (6, 6, 6) # type=tuple|default=(0, 0, 0): (approximate) resolution (in mm) of warp basis in x-, y- and z-direction, default 10, 10, 10 imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/fugue.yaml b/example-specs/interface/nipype/fsl/fugue.yaml index 20d9554f..8a92f886 100644 --- a/example-specs/interface/nipype/fsl/fugue.yaml +++ b/example-specs/interface/nipype/fsl/fugue.yaml @@ -7,67 +7,67 @@ # ---- # FSL FUGUE set of tools for EPI distortion correction # -# `FUGUE `_ is, most generally, -# a set of tools for EPI distortion correction. +# `FUGUE `_ is, most generally, +# a set of tools for EPI distortion correction. # -# Distortions may be corrected for -# 1. improving registration with non-distorted images (e.g. structurals), -# or -# 2. dealing with motion-dependent changes. +# Distortions may be corrected for +# 1. improving registration with non-distorted images (e.g. structurals), +# or +# 2. dealing with motion-dependent changes. # -# FUGUE is designed to deal only with the first case - -# improving registration. +# FUGUE is designed to deal only with the first case - +# improving registration. # # -# Examples -# -------- +# Examples +# -------- # # -# Unwarping an input image (shift map is known): +# Unwarping an input image (shift map is known): # -# >>> from nipype.interfaces.fsl.preprocess import FUGUE -# >>> fugue = FUGUE() -# >>> fugue.inputs.in_file = 'epi.nii' -# >>> fugue.inputs.mask_file = 'epi_mask.nii' -# >>> fugue.inputs.shift_in_file = 'vsm.nii' # Previously computed with fugue as well -# >>> fugue.inputs.unwarp_direction = 'y' -# >>> fugue.inputs.output_type = "NIFTI_GZ" -# >>> fugue.cmdline # doctest: +ELLIPSIS -# 'fugue --in=epi.nii --mask=epi_mask.nii --loadshift=vsm.nii --unwarpdir=y --unwarp=epi_unwarped.nii.gz' -# >>> fugue.run() #doctest: +SKIP +# >>> from nipype.interfaces.fsl.preprocess import FUGUE +# >>> fugue = FUGUE() +# >>> fugue.inputs.in_file = 'epi.nii' +# >>> fugue.inputs.mask_file = 'epi_mask.nii' +# >>> fugue.inputs.shift_in_file = 'vsm.nii' # Previously computed with fugue as well +# >>> fugue.inputs.unwarp_direction = 'y' +# >>> fugue.inputs.output_type = "NIFTI_GZ" +# >>> fugue.cmdline # doctest: +ELLIPSIS +# 'fugue --in=epi.nii --mask=epi_mask.nii --loadshift=vsm.nii --unwarpdir=y --unwarp=epi_unwarped.nii.gz' +# >>> fugue.run() #doctest: +SKIP # # -# Warping an input image (shift map is known): +# Warping an input image (shift map is known): # -# >>> from nipype.interfaces.fsl.preprocess import FUGUE -# >>> fugue = FUGUE() -# >>> fugue.inputs.in_file = 'epi.nii' -# >>> fugue.inputs.forward_warping = True -# >>> fugue.inputs.mask_file = 'epi_mask.nii' -# >>> fugue.inputs.shift_in_file = 'vsm.nii' # Previously computed with fugue as well -# >>> fugue.inputs.unwarp_direction = 'y' -# >>> fugue.inputs.output_type = "NIFTI_GZ" -# >>> fugue.cmdline # doctest: +ELLIPSIS -# 'fugue --in=epi.nii --mask=epi_mask.nii --loadshift=vsm.nii --unwarpdir=y --warp=epi_warped.nii.gz' -# >>> fugue.run() #doctest: +SKIP +# >>> from nipype.interfaces.fsl.preprocess import FUGUE +# >>> fugue = FUGUE() +# >>> fugue.inputs.in_file = 'epi.nii' +# >>> fugue.inputs.forward_warping = True +# >>> fugue.inputs.mask_file = 'epi_mask.nii' +# >>> fugue.inputs.shift_in_file = 'vsm.nii' # Previously computed with fugue as well +# >>> fugue.inputs.unwarp_direction = 'y' +# >>> fugue.inputs.output_type = "NIFTI_GZ" +# >>> fugue.cmdline # doctest: +ELLIPSIS +# 'fugue --in=epi.nii --mask=epi_mask.nii --loadshift=vsm.nii --unwarpdir=y --warp=epi_warped.nii.gz' +# >>> fugue.run() #doctest: +SKIP # # -# Computing the vsm (unwrapped phase map is known): +# Computing the vsm (unwrapped phase map is known): +# +# >>> from nipype.interfaces.fsl.preprocess import FUGUE +# >>> fugue = FUGUE() +# >>> fugue.inputs.phasemap_in_file = 'epi_phasediff.nii' +# >>> fugue.inputs.mask_file = 'epi_mask.nii' +# >>> fugue.inputs.dwell_to_asym_ratio = (0.77e-3 * 3) / 2.46e-3 +# >>> fugue.inputs.unwarp_direction = 'y' +# >>> fugue.inputs.save_shift = True +# >>> fugue.inputs.output_type = "NIFTI_GZ" +# >>> fugue.cmdline # doctest: +ELLIPSIS +# 'fugue --dwelltoasym=0.9390243902 --mask=epi_mask.nii --phasemap=epi_phasediff.nii --saveshift=epi_phasediff_vsm.nii.gz --unwarpdir=y' +# >>> fugue.run() #doctest: +SKIP # -# >>> from nipype.interfaces.fsl.preprocess import FUGUE -# >>> fugue = FUGUE() -# >>> fugue.inputs.phasemap_in_file = 'epi_phasediff.nii' -# >>> fugue.inputs.mask_file = 'epi_mask.nii' -# >>> fugue.inputs.dwell_to_asym_ratio = (0.77e-3 * 3) / 2.46e-3 -# >>> fugue.inputs.unwarp_direction = 'y' -# >>> fugue.inputs.save_shift = True -# >>> fugue.inputs.output_type = "NIFTI_GZ" -# >>> fugue.cmdline # doctest: +ELLIPSIS -# 'fugue --dwelltoasym=0.9390243902 --mask=epi_mask.nii --phasemap=epi_phasediff.nii --saveshift=epi_phasediff_vsm.nii.gz --unwarpdir=y' -# >>> fugue.run() #doctest: +SKIP # # -# task_name: FUGUE nipype_name: FUGUE nipype_module: nipype.interfaces.fsl.preprocess @@ -84,9 +84,6 @@ inputs: # passed to the field in the automatically generated unittests. fmap_in_file: generic/file # type=file|default=: filename for loading fieldmap (rad/s) - fmap_out_file: Path - # type=file: fieldmap file - # type=file|default=: filename for saving fieldmap (rad/s) in_file: medimage/nifti1 # type=file|default=: filename of input volume mask_file: medimage/nifti1 @@ -95,15 +92,6 @@ inputs: # type=file|default=: filename for input phase image shift_in_file: medimage/nifti1 # type=file|default=: filename for reading pixel shift volume - shift_out_file: Path - # type=file: voxel shift map file - # type=file|default=: filename for saving pixel shift volume - unwarped_file: Path - # type=file: unwarped file - # type=file|default=: apply unwarping and save as filename - warped_file: Path - # type=file: forward warped file - # type=file|default=: apply forward warping and save as filename callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -136,7 +124,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -212,13 +200,13 @@ tests: save_unmasked_fmap: # type=bool|default=False: saves the unmasked fieldmap when using --savefmap output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -237,16 +225,12 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: filename of input volume - mask_file: - # type=file|default=: filename for loading valid mask shift_in_file: # type=file|default=: filename for reading pixel shift volume - unwarp_direction: '"y"' - # type=enum|default='x'|allowed['x','x-','y','y-','z','z-']: specifies direction of warping (default y) output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -265,18 +249,12 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: filename of input volume - forward_warping: 'True' - # type=bool|default=False: apply forward warping instead of unwarping mask_file: # type=file|default=: filename for loading valid mask - shift_in_file: - # type=file|default=: filename for reading pixel shift volume unwarp_direction: '"y"' # type=enum|default='x'|allowed['x','x-','y','y-','z','z-']: specifies direction of warping (default y) - output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -295,18 +273,12 @@ tests: # (if not specified, will try to choose a sensible value) phasemap_in_file: # type=file|default=: filename for input phase image - mask_file: - # type=file|default=: filename for loading valid mask dwell_to_asym_ratio: (0.77e-3 * 3) / 2.46e-3 # type=float|default=0.0: set the dwell to asym time ratio - unwarp_direction: '"y"' - # type=enum|default='x'|allowed['x','x-','y','y-','z','z-']: specifies direction of warping (default y) save_shift: 'True' # type=bool|default=False: write pixel shift volume - output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -329,16 +301,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"epi.nii"' # type=file|default=: filename of input volume - mask_file: '"epi_mask.nii"' - # type=file|default=: filename for loading valid mask shift_in_file: '"vsm.nii" # Previously computed with fugue as well' # type=file|default=: filename for reading pixel shift volume - unwarp_direction: '"y"' - # type=enum|default='x'|allowed['x','x-','y','y-','z','z-']: specifies direction of warping (default y) output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -350,18 +318,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"epi.nii"' # type=file|default=: filename of input volume - forward_warping: 'True' - # type=bool|default=False: apply forward warping instead of unwarping mask_file: '"epi_mask.nii"' # type=file|default=: filename for loading valid mask - shift_in_file: '"vsm.nii" # Previously computed with fugue as well' - # type=file|default=: filename for reading pixel shift volume unwarp_direction: '"y"' # type=enum|default='x'|allowed['x','x-','y','y-','z','z-']: specifies direction of warping (default y) - output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -373,18 +335,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. phasemap_in_file: '"epi_phasediff.nii"' # type=file|default=: filename for input phase image - mask_file: '"epi_mask.nii"' - # type=file|default=: filename for loading valid mask dwell_to_asym_ratio: (0.77e-3 * 3) / 2.46e-3 # type=float|default=0.0: set the dwell to asym time ratio - unwarp_direction: '"y"' - # type=enum|default='x'|allowed['x','x-','y','y-','z','z-']: specifies direction of warping (default y) save_shift: 'True' # type=bool|default=False: write pixel shift volume - output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/glm.yaml b/example-specs/interface/nipype/fsl/glm.yaml index 532b6d85..efe15f33 100644 --- a/example-specs/interface/nipype/fsl/glm.yaml +++ b/example-specs/interface/nipype/fsl/glm.yaml @@ -6,16 +6,16 @@ # Docs # ---- # -# FSL GLM: +# FSL GLM: +# +# Example +# ------- +# >>> import nipype.interfaces.fsl as fsl +# >>> glm = fsl.GLM(in_file='functional.nii', design='maps.nii', output_type='NIFTI') +# >>> glm.cmdline +# 'fsl_glm -i functional.nii -d maps.nii -o functional_glm.nii' # -# Example -# ------- -# >>> import nipype.interfaces.fsl as fsl -# >>> glm = fsl.GLM(in_file='functional.nii', design='maps.nii', output_type='NIFTI') -# >>> glm.cmdline -# 'fsl_glm -i functional.nii -d maps.nii -o functional_glm.nii' # -# task_name: GLM nipype_name: GLM nipype_module: nipype.interfaces.fsl.model @@ -38,32 +38,6 @@ inputs: # type=file|default=: input file name (text matrix or 3D/4D image file) mask: generic/file # type=file|default=: mask image file name if input is image - out_cope: Path - # type=outputmultiobject: output file name for COPEs (either as text file or image) - # type=file|default=: output file name for COPE (either as txt or image - out_data_name: Path - # type=file|default=: output file name for pre-processed data - out_f_name: Path - # type=file|default=: output file name for F-value of full model fit - out_file: Path - # type=file: file name of GLM parameters (if generated) - # type=file|default=: filename for GLM parameter estimates (GLM betas) - out_p_name: Path - # type=file|default=: output file name for p-values of Z-stats (either as text file or image) - out_pf_name: Path - # type=file|default=: output file name for p-value for full model fit - out_res_name: Path - # type=file|default=: output file name for residuals - out_sigsq_name: Path - # type=file|default=: output file name for residual noise variance sigma-square - out_t_name: Path - # type=file|default=: output file name for t-stats (either as txt or image - out_varcb_name: Path - # type=file|default=: output file name for variance of COPEs - out_vnscales_name: Path - # type=file|default=: output file name for scaling factors for variance normalisation - out_z_name: Path - # type=file|default=: output file name for Z-stats (either as txt or image callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -110,7 +84,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -162,13 +136,13 @@ tests: out_vnscales_name: # type=file|default=: output file name for scaling factors for variance normalisation output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -190,9 +164,9 @@ tests: design: # type=file|default=: file name of the GLM design matrix (text time courses for temporal regression or an image file for spatial regression) output_type: '"NIFTI"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.interfaces.fsl as fsl expected_outputs: @@ -208,7 +182,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: fsl_glm -i functional.nii -d maps.nii -o functional_glm.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -219,9 +193,9 @@ doctests: design: '"maps.nii"' # type=file|default=: file name of the GLM design matrix (text time courses for temporal regression or an image file for spatial regression) output_type: '"NIFTI"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/ica__aroma.yaml b/example-specs/interface/nipype/fsl/ica__aroma.yaml index 2a980089..e50aaa74 100644 --- a/example-specs/interface/nipype/fsl/ica__aroma.yaml +++ b/example-specs/interface/nipype/fsl/ica__aroma.yaml @@ -6,32 +6,32 @@ # Docs # ---- # -# Interface for the ICA_AROMA.py script. +# Interface for the ICA_AROMA.py script. # -# ICA-AROMA (i.e. 'ICA-based Automatic Removal Of Motion Artifacts') concerns -# a data-driven method to identify and remove motion-related independent -# components from fMRI data. To that end it exploits a small, but robust -# set of theoretically motivated features, preventing the need for classifier -# re-training and therefore providing direct and easy applicability. +# ICA-AROMA (i.e. 'ICA-based Automatic Removal Of Motion Artifacts') concerns +# a data-driven method to identify and remove motion-related independent +# components from fMRI data. To that end it exploits a small, but robust +# set of theoretically motivated features, preventing the need for classifier +# re-training and therefore providing direct and easy applicability. # -# See link for further documentation: https://github.com/rhr-pruim/ICA-AROMA +# See link for further documentation: https://github.com/rhr-pruim/ICA-AROMA # -# Example -# ------- +# Example +# ------- +# +# >>> from nipype.interfaces.fsl import ICA_AROMA +# >>> from nipype.testing import example_data +# >>> AROMA_obj = ICA_AROMA() +# >>> AROMA_obj.inputs.in_file = 'functional.nii' +# >>> AROMA_obj.inputs.mat_file = 'func_to_struct.mat' +# >>> AROMA_obj.inputs.fnirt_warp_file = 'warpfield.nii' +# >>> AROMA_obj.inputs.motion_parameters = 'fsl_mcflirt_movpar.txt' +# >>> AROMA_obj.inputs.mask = 'mask.nii.gz' +# >>> AROMA_obj.inputs.denoise_type = 'both' +# >>> AROMA_obj.inputs.out_dir = 'ICA_testout' +# >>> AROMA_obj.cmdline # doctest: +ELLIPSIS +# 'ICA_AROMA.py -den both -warp warpfield.nii -i functional.nii -m mask.nii.gz -affmat func_to_struct.mat -mc fsl_mcflirt_movpar.txt -o .../ICA_testout' # -# >>> from nipype.interfaces.fsl import ICA_AROMA -# >>> from nipype.testing import example_data -# >>> AROMA_obj = ICA_AROMA() -# >>> AROMA_obj.inputs.in_file = 'functional.nii' -# >>> AROMA_obj.inputs.mat_file = 'func_to_struct.mat' -# >>> AROMA_obj.inputs.fnirt_warp_file = 'warpfield.nii' -# >>> AROMA_obj.inputs.motion_parameters = 'fsl_mcflirt_movpar.txt' -# >>> AROMA_obj.inputs.mask = 'mask.nii.gz' -# >>> AROMA_obj.inputs.denoise_type = 'both' -# >>> AROMA_obj.inputs.out_dir = 'ICA_testout' -# >>> AROMA_obj.cmdline # doctest: +ELLIPSIS -# 'ICA_AROMA.py -den both -warp warpfield.nii -i functional.nii -m mask.nii.gz -affmat func_to_struct.mat -mc fsl_mcflirt_movpar.txt -o .../ICA_testout' -# task_name: ICA_AROMA nipype_name: ICA_AROMA nipype_module: nipype.interfaces.fsl.aroma @@ -54,15 +54,12 @@ inputs: # type=file|default=: volume to be denoised mask: medimage/nifti-gz # type=file|default=: path/name volume mask - mat_file: datascience/text-matrix + mat_file: generic/file # type=file|default=: path/name of the mat-file describing the affine registration (e.g. FSL FLIRT) of the functional data to structural space (.mat file) melodic_dir: generic/directory # type=directory|default=: path to MELODIC directory if MELODIC has already been run - motion_parameters: text/text-file + motion_parameters: generic/file # type=file|default=: motion parameters file - out_dir: Path - # type=directory: directory contains (in addition to the denoised files): melodic.ica + classified_motion_components + classification_overview + feature_scores + melodic_ic_mni) - # type=directory|default='out': output directory callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -90,7 +87,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -125,7 +122,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -144,21 +141,15 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: volume to be denoised - mat_file: - # type=file|default=: path/name of the mat-file describing the affine registration (e.g. FSL FLIRT) of the functional data to structural space (.mat file) fnirt_warp_file: # type=file|default=: File name of the warp-file describing the non-linear registration (e.g. FSL FNIRT) of the structural data to MNI152 space (.nii.gz) - motion_parameters: - # type=file|default=: motion parameters file mask: # type=file|default=: path/name volume mask - denoise_type: '"both"' - # type=enum|default='nonaggr'|allowed['aggr','both','no','nonaggr']: Type of denoising strategy: -no: only classification, no denoising -nonaggr (default): non-aggresssive denoising, i.e. partial component regression -aggr: aggressive denoising, i.e. full component regression -both: both aggressive and non-aggressive denoising (two outputs) out_dir: '"ICA_testout"' # type=directory: directory contains (in addition to the denoised files): melodic.ica + classified_motion_components + classification_overview + feature_scores + melodic_ic_mni) # type=directory|default='out': output directory imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.testing name: example_data @@ -184,21 +175,15 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: volume to be denoised - mat_file: '"func_to_struct.mat"' - # type=file|default=: path/name of the mat-file describing the affine registration (e.g. FSL FLIRT) of the functional data to structural space (.mat file) fnirt_warp_file: '"warpfield.nii"' # type=file|default=: File name of the warp-file describing the non-linear registration (e.g. FSL FNIRT) of the structural data to MNI152 space (.nii.gz) - motion_parameters: '"fsl_mcflirt_movpar.txt"' - # type=file|default=: motion parameters file mask: '"mask.nii.gz"' # type=file|default=: path/name volume mask - denoise_type: '"both"' - # type=enum|default='nonaggr'|allowed['aggr','both','no','nonaggr']: Type of denoising strategy: -no: only classification, no denoising -nonaggr (default): non-aggresssive denoising, i.e. partial component regression -aggr: aggressive denoising, i.e. full component regression -both: both aggressive and non-aggressive denoising (two outputs) out_dir: '"ICA_testout"' # type=directory: directory contains (in addition to the denoised files): melodic.ica + classified_motion_components + classification_overview + feature_scores + melodic_ic_mni) # type=directory|default='out': output directory imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/image_maths.yaml b/example-specs/interface/nipype/fsl/image_maths.yaml index f156853a..766ee44d 100644 --- a/example-specs/interface/nipype/fsl/image_maths.yaml +++ b/example-specs/interface/nipype/fsl/image_maths.yaml @@ -6,21 +6,21 @@ # Docs # ---- # Use FSL fslmaths command to allow mathematical manipulation of images -# `FSL info `_ +# `FSL info `_ # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces import fsl +# >>> from nipype.testing import anatfile +# >>> maths = fsl.ImageMaths(in_file=anatfile, op_string= '-add 5', +# ... out_file='foo_maths.nii') +# >>> maths.cmdline == 'fslmaths %s -add 5 foo_maths.nii' % anatfile +# True # -# >>> from nipype.interfaces import fsl -# >>> from nipype.testing import anatfile -# >>> maths = fsl.ImageMaths(in_file=anatfile, op_string= '-add 5', -# ... out_file='foo_maths.nii') -# >>> maths.cmdline == 'fslmaths %s -add 5 foo_maths.nii' % anatfile -# True # # -# task_name: ImageMaths nipype_name: ImageMaths nipype_module: nipype.interfaces.fsl.utils @@ -41,9 +41,6 @@ inputs: # type=file|default=: mask_file: generic/file # type=file|default=: use (following image>0) to mask current image - out_file: Path - # type=file: - # type=file|default=: callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -67,7 +64,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: '"foo_maths.nii"' # type=file: # type=file|default=: @@ -93,13 +90,13 @@ tests: out_data_type: # type=enum|default='char'|allowed['char','double','float','input','int','short']: output datatype, one of (char, short, int, float, double, input) output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -124,7 +121,7 @@ tests: # type=file: # type=file|default=: imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.testing name: anatfile @@ -156,7 +153,7 @@ doctests: # type=file: # type=file|default=: imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/image_meants.yaml b/example-specs/interface/nipype/fsl/image_meants.yaml index 37df8536..9a5d5485 100644 --- a/example-specs/interface/nipype/fsl/image_meants.yaml +++ b/example-specs/interface/nipype/fsl/image_meants.yaml @@ -6,10 +6,10 @@ # Docs # ---- # Use fslmeants for printing the average timeseries (intensities) to -# the screen (or saves to a file). The average is taken over all voxels -# in the mask (or all voxels in the image if no mask is specified) +# the screen (or saves to a file). The average is taken over all voxels +# in the mask (or all voxels in the image if no mask is specified) +# # -# task_name: ImageMeants nipype_name: ImageMeants nipype_module: nipype.interfaces.fsl.utils @@ -28,9 +28,6 @@ inputs: # type=file|default=: input file for computing the average timeseries mask: generic/file # type=file|default=: input 3D mask - out_file: Path - # type=file: path/name of output text matrix - # type=file|default=: name of output text matrix callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -54,7 +51,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: path/name of output text matrix # type=file|default=: name of output text matrix @@ -86,13 +83,13 @@ tests: transpose: # type=bool|default=False: output results in transpose format (one row per voxel/mean) output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/image_stats.yaml b/example-specs/interface/nipype/fsl/image_stats.yaml index 8c2f3fcb..be0fa57f 100644 --- a/example-specs/interface/nipype/fsl/image_stats.yaml +++ b/example-specs/interface/nipype/fsl/image_stats.yaml @@ -6,21 +6,21 @@ # Docs # ---- # Use FSL fslstats command to calculate stats from images -# `FSL info -# `_ +# `FSL info +# `_ # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import ImageStats +# >>> from nipype.testing import funcfile +# >>> stats = ImageStats(in_file=funcfile, op_string= '-M') +# >>> stats.cmdline == 'fslstats %s -M'%funcfile +# True # -# >>> from nipype.interfaces.fsl import ImageStats -# >>> from nipype.testing import funcfile -# >>> stats = ImageStats(in_file=funcfile, op_string= '-M') -# >>> stats.cmdline == 'fslstats %s -M'%funcfile -# True # # -# task_name: ImageStats nipype_name: ImageStats nipype_module: nipype.interfaces.fsl.utils @@ -63,7 +63,7 @@ outputs: out_stat: out_stat_callable # type=any: stats output templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -81,13 +81,13 @@ tests: index_mask_file: # type=file|default=: generate separate n submasks from indexMask, for indexvalues 1..n where n is the maximum index value in indexMask, and generate statistics for each submask output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -109,7 +109,7 @@ tests: op_string: '"-M"' # type=str|default='': string defining the operation, options are applied in order, e.g. -M -l 10 -M will report the non-zero mean, apply a threshold and then report the new nonzero mean imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.testing name: funcfile @@ -138,7 +138,7 @@ doctests: op_string: '"-M"' # type=str|default='': string defining the operation, options are applied in order, e.g. -M -l 10 -M will report the non-zero mean, apply a threshold and then report the new nonzero mean imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/inv_warp.yaml b/example-specs/interface/nipype/fsl/inv_warp.yaml index 9151af0e..5ad529d2 100644 --- a/example-specs/interface/nipype/fsl/inv_warp.yaml +++ b/example-specs/interface/nipype/fsl/inv_warp.yaml @@ -6,23 +6,23 @@ # Docs # ---- # -# Use FSL Invwarp to invert a FNIRT warp +# Use FSL Invwarp to invert a FNIRT warp # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import InvWarp +# >>> invwarp = InvWarp() +# >>> invwarp.inputs.warp = "struct2mni.nii" +# >>> invwarp.inputs.reference = "anatomical.nii" +# >>> invwarp.inputs.output_type = "NIFTI_GZ" +# >>> invwarp.cmdline +# 'invwarp --out=struct2mni_inverse.nii.gz --ref=anatomical.nii --warp=struct2mni.nii' +# >>> res = invwarp.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import InvWarp -# >>> invwarp = InvWarp() -# >>> invwarp.inputs.warp = "struct2mni.nii" -# >>> invwarp.inputs.reference = "anatomical.nii" -# >>> invwarp.inputs.output_type = "NIFTI_GZ" -# >>> invwarp.cmdline -# 'invwarp --out=struct2mni_inverse.nii.gz --ref=anatomical.nii --warp=struct2mni.nii' -# >>> res = invwarp.run() # doctest: +SKIP # # -# task_name: InvWarp nipype_name: InvWarp nipype_module: nipype.interfaces.fsl.utils @@ -37,10 +37,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - inverse_warp: Path - # type=file: Name of output file, containing warps that are the "reverse" of those in --warp. - # type=file|default=: Name of output file, containing warps that are the "reverse" of those in --warp. This will be a field-file (rather than a file of spline coefficients), and it will have any affine component included as part of the displacements. - reference: medimage/nifti1 + reference: generic/file # type=file|default=: Name of a file in target space. Note that the target space is now different from the target space that was used to create the --warp file. It would typically be the file that was specified with the --in argument when running fnirt. warp: medimage/nifti1 # type=file|default=: Name of file containing warp-coefficients/fields. This would typically be the output from the --cout switch of fnirt (but can also use fields, like the output from --fout). @@ -67,7 +64,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -96,13 +93,13 @@ tests: jacobian_max: # type=float|default=0.0: Maximum acceptable Jacobian value for constraint (default 100.0) output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -121,12 +118,10 @@ tests: # (if not specified, will try to choose a sensible value) warp: # type=file|default=: Name of file containing warp-coefficients/fields. This would typically be the output from the --cout switch of fnirt (but can also use fields, like the output from --fout). - reference: - # type=file|default=: Name of a file in target space. Note that the target space is now different from the target space that was used to create the --warp file. It would typically be the file that was specified with the --in argument when running fnirt. output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -149,12 +144,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. warp: '"struct2mni.nii"' # type=file|default=: Name of file containing warp-coefficients/fields. This would typically be the output from the --cout switch of fnirt (but can also use fields, like the output from --fout). - reference: '"anatomical.nii"' - # type=file|default=: Name of a file in target space. Note that the target space is now different from the target space that was used to create the --warp file. It would typically be the file that was specified with the --in argument when running fnirt. output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/isotropic_smooth.yaml b/example-specs/interface/nipype/fsl/isotropic_smooth.yaml index 6ffdabde..5b7041f7 100644 --- a/example-specs/interface/nipype/fsl/isotropic_smooth.yaml +++ b/example-specs/interface/nipype/fsl/isotropic_smooth.yaml @@ -22,9 +22,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -48,7 +45,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -74,13 +71,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/l2_model.yaml b/example-specs/interface/nipype/fsl/l2_model.yaml index fb142c54..68695ae9 100644 --- a/example-specs/interface/nipype/fsl/l2_model.yaml +++ b/example-specs/interface/nipype/fsl/l2_model.yaml @@ -7,13 +7,13 @@ # ---- # Generate subject specific second level model # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import L2Model +# >>> model = L2Model(num_copes=3) # 3 sessions # -# >>> from nipype.interfaces.fsl import L2Model -# >>> model = L2Model(num_copes=3) # 3 sessions # -# task_name: L2Model nipype_name: L2Model nipype_module: nipype.interfaces.fsl.model @@ -54,7 +54,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -64,7 +64,7 @@ tests: num_copes: # type=range|default=1: number of copes to be combined imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/level_1_design.yaml b/example-specs/interface/nipype/fsl/level_1_design.yaml index a85f6a94..8d366132 100644 --- a/example-specs/interface/nipype/fsl/level_1_design.yaml +++ b/example-specs/interface/nipype/fsl/level_1_design.yaml @@ -7,16 +7,16 @@ # ---- # Generate FEAT specific files # -# Examples -# -------- +# Examples +# -------- +# +# >>> level1design = Level1Design() +# >>> level1design.inputs.interscan_interval = 2.5 +# >>> level1design.inputs.bases = {'dgamma':{'derivs': False}} +# >>> level1design.inputs.session_info = 'session_info.npz' +# >>> level1design.run() # doctest: +SKIP # -# >>> level1design = Level1Design() -# >>> level1design.inputs.interscan_interval = 2.5 -# >>> level1design.inputs.bases = {'dgamma':{'derivs': False}} -# >>> level1design.inputs.session_info = 'session_info.npz' -# >>> level1design.run() # doctest: +SKIP # -# task_name: Level1Design nipype_name: Level1Design nipype_module: nipype.interfaces.fsl.model @@ -53,7 +53,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -73,7 +73,7 @@ tests: contrasts: # type=list|default=[]: List of contrasts with each contrast being a list of the form - [('name', 'stat', [condition list], [weight list], [session list])]. if session list is None or not provided, all sessions are used. For F contrasts, the condition list should contain previously defined T-contrasts. imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/make_dyadic_vectors.yaml b/example-specs/interface/nipype/fsl/make_dyadic_vectors.yaml index e41d3cd0..e5761851 100644 --- a/example-specs/interface/nipype/fsl/make_dyadic_vectors.yaml +++ b/example-specs/interface/nipype/fsl/make_dyadic_vectors.yaml @@ -6,7 +6,7 @@ # Docs # ---- # Create vector volume representing mean principal diffusion direction -# and its uncertainty (dispersion) +# and its uncertainty (dispersion) task_name: MakeDyadicVectors nipype_name: MakeDyadicVectors nipype_module: nipype.interfaces.fsl.dti @@ -53,7 +53,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -71,13 +71,13 @@ tests: perc: # type=float|default=0.0: the {perc}% angle of the output cone of uncertainty (output will be in degrees) output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/maths_command.yaml b/example-specs/interface/nipype/fsl/maths_command.yaml index e9acb762..2e3da19c 100644 --- a/example-specs/interface/nipype/fsl/maths_command.yaml +++ b/example-specs/interface/nipype/fsl/maths_command.yaml @@ -22,9 +22,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -48,7 +45,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -70,13 +67,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/max_image.yaml b/example-specs/interface/nipype/fsl/max_image.yaml index 1d0e2d02..0573e615 100644 --- a/example-specs/interface/nipype/fsl/max_image.yaml +++ b/example-specs/interface/nipype/fsl/max_image.yaml @@ -7,16 +7,16 @@ # ---- # Use fslmaths to generate a max image across a given dimension. # -# Examples -# -------- -# >>> from nipype.interfaces.fsl.maths import MaxImage -# >>> maxer = MaxImage() -# >>> maxer.inputs.in_file = "functional.nii" # doctest: +SKIP -# >>> maxer.dimension = "T" -# >>> maxer.cmdline # doctest: +SKIP -# 'fslmaths functional.nii -Tmax functional_max.nii' +# Examples +# -------- +# >>> from nipype.interfaces.fsl.maths import MaxImage +# >>> maxer = MaxImage() +# >>> maxer.inputs.in_file = "functional.nii" # doctest: +SKIP +# >>> maxer.dimension = "T" +# >>> maxer.cmdline # doctest: +SKIP +# 'fslmaths functional.nii -Tmax functional_max.nii' +# # -# task_name: MaxImage nipype_name: MaxImage nipype_module: nipype.interfaces.fsl.maths @@ -33,9 +33,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -59,7 +56,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -83,13 +80,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -109,7 +106,7 @@ tests: in_file: # type=file|default=: image to operate on imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -124,7 +121,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: fslmaths functional.nii -Tmax functional_max.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -133,7 +130,7 @@ doctests: in_file: '"functional.nii" # doctest: +SKIP' # type=file|default=: image to operate on imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/maxn_image.yaml b/example-specs/interface/nipype/fsl/maxn_image.yaml index ac326f39..9df306f7 100644 --- a/example-specs/interface/nipype/fsl/maxn_image.yaml +++ b/example-specs/interface/nipype/fsl/maxn_image.yaml @@ -6,9 +6,9 @@ # Docs # ---- # Use fslmaths to generate an image of index of max across -# a given dimension. +# a given dimension. +# # -# task_name: MaxnImage nipype_name: MaxnImage nipype_module: nipype.interfaces.fsl.maths @@ -25,9 +25,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -51,7 +48,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -75,13 +72,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/mcflirt.yaml b/example-specs/interface/nipype/fsl/mcflirt.yaml index f1ea002e..f62d1a07 100644 --- a/example-specs/interface/nipype/fsl/mcflirt.yaml +++ b/example-specs/interface/nipype/fsl/mcflirt.yaml @@ -7,21 +7,21 @@ # ---- # FSL MCFLIRT wrapper for within-modality motion correction # -# For complete details, see the `MCFLIRT Documentation. -# `_ +# For complete details, see the `MCFLIRT Documentation. +# `_ +# +# Examples +# -------- +# >>> from nipype.interfaces import fsl +# >>> mcflt = fsl.MCFLIRT() +# >>> mcflt.inputs.in_file = 'functional.nii' +# >>> mcflt.inputs.cost = 'mutualinfo' +# >>> mcflt.inputs.out_file = 'moco.nii' +# >>> mcflt.cmdline +# 'mcflirt -in functional.nii -cost mutualinfo -out moco.nii' +# >>> res = mcflt.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces import fsl -# >>> mcflt = fsl.MCFLIRT() -# >>> mcflt.inputs.in_file = 'functional.nii' -# >>> mcflt.inputs.cost = 'mutualinfo' -# >>> mcflt.inputs.out_file = 'moco.nii' -# >>> mcflt.cmdline -# 'mcflirt -in functional.nii -cost mutualinfo -out moco.nii' -# >>> res = mcflt.run() # doctest: +SKIP # -# task_name: MCFLIRT nipype_name: MCFLIRT nipype_module: nipype.interfaces.fsl.preprocess @@ -40,9 +40,6 @@ inputs: # type=file|default=: timeseries to motion-correct init: generic/file # type=file|default=: initial transformation matrix - out_file: Path - # type=file: motion-corrected timeseries - # type=file|default=: file to write ref_file: generic/file # type=file|default=: target image for motion correction callable_defaults: @@ -80,7 +77,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: '"moco.nii"' # type=file: motion-corrected timeseries # type=file|default=: file to write @@ -132,13 +129,13 @@ tests: ref_file: # type=file|default=: target image for motion correction output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -157,13 +154,11 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: timeseries to motion-correct - cost: '"mutualinfo"' - # type=enum|default='mutualinfo'|allowed['corratio','leastsquares','mutualinfo','normcorr','normmi','woods']: cost function to optimize out_file: '"moco.nii"' # type=file: motion-corrected timeseries # type=file|default=: file to write imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -178,7 +173,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: mcflirt -in functional.nii -cost mutualinfo -out moco.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -186,13 +181,11 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: timeseries to motion-correct - cost: '"mutualinfo"' - # type=enum|default='mutualinfo'|allowed['corratio','leastsquares','mutualinfo','normcorr','normmi','woods']: cost function to optimize out_file: '"moco.nii"' # type=file: motion-corrected timeseries # type=file|default=: file to write imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/mean_image.yaml b/example-specs/interface/nipype/fsl/mean_image.yaml index c30b5413..c84d94e7 100644 --- a/example-specs/interface/nipype/fsl/mean_image.yaml +++ b/example-specs/interface/nipype/fsl/mean_image.yaml @@ -22,9 +22,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -48,7 +45,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -72,13 +69,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/median_image.yaml b/example-specs/interface/nipype/fsl/median_image.yaml index 9b40b414..92d047e0 100644 --- a/example-specs/interface/nipype/fsl/median_image.yaml +++ b/example-specs/interface/nipype/fsl/median_image.yaml @@ -22,9 +22,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -48,7 +45,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -72,13 +69,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/melodic.yaml b/example-specs/interface/nipype/fsl/melodic.yaml index 05f40e83..37627ea6 100644 --- a/example-specs/interface/nipype/fsl/melodic.yaml +++ b/example-specs/interface/nipype/fsl/melodic.yaml @@ -6,30 +6,30 @@ # Docs # ---- # Multivariate Exploratory Linear Optimised Decomposition into Independent -# Components +# Components # -# Examples -# -------- +# Examples +# -------- +# +# >>> melodic_setup = MELODIC() +# >>> melodic_setup.inputs.approach = 'tica' +# >>> melodic_setup.inputs.in_files = ['functional.nii', 'functional2.nii', 'functional3.nii'] +# >>> melodic_setup.inputs.no_bet = True +# >>> melodic_setup.inputs.bg_threshold = 10 +# >>> melodic_setup.inputs.tr_sec = 1.5 +# >>> melodic_setup.inputs.mm_thresh = 0.5 +# >>> melodic_setup.inputs.out_stats = True +# >>> melodic_setup.inputs.t_des = 'timeDesign.mat' +# >>> melodic_setup.inputs.t_con = 'timeDesign.con' +# >>> melodic_setup.inputs.s_des = 'subjectDesign.mat' +# >>> melodic_setup.inputs.s_con = 'subjectDesign.con' +# >>> melodic_setup.inputs.out_dir = 'groupICA.out' +# >>> melodic_setup.cmdline +# 'melodic -i functional.nii,functional2.nii,functional3.nii -a tica --bgthreshold=10.000000 --mmthresh=0.500000 --nobet -o groupICA.out --Ostats --Scon=subjectDesign.con --Sdes=subjectDesign.mat --Tcon=timeDesign.con --Tdes=timeDesign.mat --tr=1.500000' +# >>> melodic_setup.run() # doctest: +SKIP # -# >>> melodic_setup = MELODIC() -# >>> melodic_setup.inputs.approach = 'tica' -# >>> melodic_setup.inputs.in_files = ['functional.nii', 'functional2.nii', 'functional3.nii'] -# >>> melodic_setup.inputs.no_bet = True -# >>> melodic_setup.inputs.bg_threshold = 10 -# >>> melodic_setup.inputs.tr_sec = 1.5 -# >>> melodic_setup.inputs.mm_thresh = 0.5 -# >>> melodic_setup.inputs.out_stats = True -# >>> melodic_setup.inputs.t_des = 'timeDesign.mat' -# >>> melodic_setup.inputs.t_con = 'timeDesign.con' -# >>> melodic_setup.inputs.s_des = 'subjectDesign.mat' -# >>> melodic_setup.inputs.s_con = 'subjectDesign.con' -# >>> melodic_setup.inputs.out_dir = 'groupICA.out' -# >>> melodic_setup.cmdline -# 'melodic -i functional.nii,functional2.nii,functional3.nii -a tica --bgthreshold=10.000000 --mmthresh=0.500000 --nobet -o groupICA.out --Ostats --Scon=subjectDesign.con --Sdes=subjectDesign.mat --Tcon=timeDesign.con --Tdes=timeDesign.mat --tr=1.500000' -# >>> melodic_setup.run() # doctest: +SKIP # # -# task_name: MELODIC nipype_name: MELODIC nipype_module: nipype.interfaces.fsl.model @@ -48,24 +48,21 @@ inputs: # type=file|default=: filename of the IC components file for mixture modelling bg_image: generic/file # type=file|default=: specify background image for report (default: mean image) - in_files: medimage/nifti1+list-of + in_files: generic/file+list-of # type=inputmultiobject|default=[]: input file names (either single file name or a list) mask: generic/file # type=file|default=: file name of mask for thresholding mix: generic/file # type=file|default=: mixing matrix for mixture modelling / filtering - out_dir: Path - # type=directory: - # type=directory|default=: output directory name - s_con: medimage-fsl/con + s_con: fileformats.medimage_fsl.Con # type=file|default=: t-contrast matrix across subject-domain - s_des: datascience/text-matrix + s_des: generic/file # type=file|default=: design matrix across subject-domain smode: generic/file # type=file|default=: matrix of session modes for report generation - t_con: medimage-fsl/con + t_con: fileformats.medimage_fsl.Con # type=file|default=: t-contrast matrix across time-domain - t_des: datascience/text-matrix + t_des: generic/file # type=file|default=: design matrix across time-domain callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -92,8 +89,8 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - out_dir: '"groupICA.out"' + # dict[str, str] - `path_template` values to be provided to output fields + out_dir: out_dir # type=directory: # type=directory|default=: output directory name requirements: @@ -200,13 +197,13 @@ tests: remove_deriv: # type=bool|default=False: removes every second entry in paradigm file (EV derivatives) output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -225,31 +222,18 @@ tests: # (if not specified, will try to choose a sensible value) approach: '"tica"' # type=str|default='': approach for decomposition, 2D: defl, symm (default), 3D: tica (default), concat - in_files: - # type=inputmultiobject|default=[]: input file names (either single file name or a list) no_bet: 'True' # type=bool|default=False: switch off BET - bg_threshold: '10' - # type=float|default=0.0: brain/non-brain threshold used to mask non-brain voxels, as a percentage (only if --nobet selected) tr_sec: '1.5' # type=float|default=0.0: TR in seconds - mm_thresh: '0.5' - # type=float|default=0.0: threshold for Mixture Model based inference out_stats: 'True' # type=bool|default=False: output thresholded maps and probability maps - t_des: - # type=file|default=: design matrix across time-domain t_con: # type=file|default=: t-contrast matrix across time-domain - s_des: - # type=file|default=: design matrix across subject-domain s_con: # type=file|default=: t-contrast matrix across subject-domain - out_dir: '"groupICA.out"' - # type=directory: - # type=directory|default=: output directory name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -272,31 +256,18 @@ doctests: # '.mock()' method of the corresponding class is used instead. approach: '"tica"' # type=str|default='': approach for decomposition, 2D: defl, symm (default), 3D: tica (default), concat - in_files: '["functional.nii", "functional2.nii", "functional3.nii"]' - # type=inputmultiobject|default=[]: input file names (either single file name or a list) no_bet: 'True' # type=bool|default=False: switch off BET - bg_threshold: '10' - # type=float|default=0.0: brain/non-brain threshold used to mask non-brain voxels, as a percentage (only if --nobet selected) tr_sec: '1.5' # type=float|default=0.0: TR in seconds - mm_thresh: '0.5' - # type=float|default=0.0: threshold for Mixture Model based inference out_stats: 'True' # type=bool|default=False: output thresholded maps and probability maps - t_des: '"timeDesign.mat"' - # type=file|default=: design matrix across time-domain t_con: '"timeDesign.con"' # type=file|default=: t-contrast matrix across time-domain - s_des: '"subjectDesign.mat"' - # type=file|default=: design matrix across subject-domain s_con: '"subjectDesign.con"' # type=file|default=: t-contrast matrix across subject-domain - out_dir: '"groupICA.out"' - # type=directory: - # type=directory|default=: output directory name imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/merge.yaml b/example-specs/interface/nipype/fsl/merge.yaml index dcf85517..4b957720 100644 --- a/example-specs/interface/nipype/fsl/merge.yaml +++ b/example-specs/interface/nipype/fsl/merge.yaml @@ -7,29 +7,29 @@ # ---- # Use fslmerge to concatenate images # -# Images can be concatenated across time, x, y, or z dimensions. Across the -# time (t) dimension the TR is set by default to 1 sec. +# Images can be concatenated across time, x, y, or z dimensions. Across the +# time (t) dimension the TR is set by default to 1 sec. # -# Note: to set the TR to a different value, specify 't' for dimension and -# specify the TR value in seconds for the tr input. The dimension will be -# automatically updated to 'tr'. +# Note: to set the TR to a different value, specify 't' for dimension and +# specify the TR value in seconds for the tr input. The dimension will be +# automatically updated to 'tr'. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import Merge +# >>> merger = Merge() +# >>> merger.inputs.in_files = ['functional2.nii', 'functional3.nii'] +# >>> merger.inputs.dimension = 't' +# >>> merger.inputs.output_type = 'NIFTI_GZ' +# >>> merger.cmdline +# 'fslmerge -t functional2_merged.nii.gz functional2.nii functional3.nii' +# >>> merger.inputs.tr = 2.25 +# >>> merger.cmdline +# 'fslmerge -tr functional2_merged.nii.gz functional2.nii functional3.nii 2.25' # -# >>> from nipype.interfaces.fsl import Merge -# >>> merger = Merge() -# >>> merger.inputs.in_files = ['functional2.nii', 'functional3.nii'] -# >>> merger.inputs.dimension = 't' -# >>> merger.inputs.output_type = 'NIFTI_GZ' -# >>> merger.cmdline -# 'fslmerge -t functional2_merged.nii.gz functional2.nii functional3.nii' -# >>> merger.inputs.tr = 2.25 -# >>> merger.cmdline -# 'fslmerge -tr functional2_merged.nii.gz functional2.nii functional3.nii 2.25' # # -# task_name: Merge nipype_name: Merge nipype_module: nipype.interfaces.fsl.utils @@ -46,9 +46,6 @@ inputs: # passed to the field in the automatically generated unittests. in_files: medimage/nifti1+list-of # type=list|default=[]: - merged_file: Path - # type=file: - # type=file|default=: callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -72,7 +69,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -89,13 +86,13 @@ tests: # type=file: # type=file|default=: output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -114,14 +111,12 @@ tests: # (if not specified, will try to choose a sensible value) in_files: # type=list|default=[]: - dimension: '"t"' - # type=enum|default='t'|allowed['a','t','x','y','z']: dimension along which to merge, optionally set tr input when dimension is t output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type tr: '2.25' # type=float|default=0.0: use to specify TR in seconds (default is 1.00 sec), overrides dimension and sets it to tr imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -144,14 +139,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_files: '["functional2.nii", "functional3.nii"]' # type=list|default=[]: - dimension: '"t"' - # type=enum|default='t'|allowed['a','t','x','y','z']: dimension along which to merge, optionally set tr input when dimension is t output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type tr: '2.25' # type=float|default=0.0: use to specify TR in seconds (default is 1.00 sec), overrides dimension and sets it to tr imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/min_image.yaml b/example-specs/interface/nipype/fsl/min_image.yaml index d6c07210..4a474c4e 100644 --- a/example-specs/interface/nipype/fsl/min_image.yaml +++ b/example-specs/interface/nipype/fsl/min_image.yaml @@ -22,9 +22,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -48,7 +45,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -72,13 +69,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/motion_outliers.yaml b/example-specs/interface/nipype/fsl/motion_outliers.yaml index cca7ee4a..85affe20 100644 --- a/example-specs/interface/nipype/fsl/motion_outliers.yaml +++ b/example-specs/interface/nipype/fsl/motion_outliers.yaml @@ -6,16 +6,16 @@ # Docs # ---- # -# Use FSL fsl_motion_outliers`http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FSLMotionOutliers`_ to find outliers in timeseries (4d) data. -# Examples -# -------- -# >>> from nipype.interfaces.fsl import MotionOutliers -# >>> mo = MotionOutliers() -# >>> mo.inputs.in_file = "epi.nii" -# >>> mo.cmdline # doctest: +ELLIPSIS -# 'fsl_motion_outliers -i epi.nii -o epi_outliers.txt -p epi_metrics.png -s epi_metrics.txt' -# >>> res = mo.run() # doctest: +SKIP -# +# Use FSL fsl_motion_outliers`http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FSLMotionOutliers`_ to find outliers in timeseries (4d) data. +# Examples +# -------- +# >>> from nipype.interfaces.fsl import MotionOutliers +# >>> mo = MotionOutliers() +# >>> mo.inputs.in_file = "epi.nii" +# >>> mo.cmdline # doctest: +ELLIPSIS +# 'fsl_motion_outliers -i epi.nii -o epi_outliers.txt -p epi_metrics.png -s epi_metrics.txt' +# >>> res = mo.run() # doctest: +SKIP +# task_name: MotionOutliers nipype_name: MotionOutliers nipype_module: nipype.interfaces.fsl.utils @@ -34,15 +34,6 @@ inputs: # type=file|default=: unfiltered 4D image mask: generic/file # type=file|default=: mask image for calculating metric - out_file: Path - # type=file: - # type=file|default=: output outlier file name - out_metric_plot: Path - # type=file: - # type=file|default=: output metric values plot (DVARS etc.) file name - out_metric_values: Path - # type=file: - # type=file|default=: output metric values (DVARS etc.) file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -72,7 +63,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -101,13 +92,13 @@ tests: # type=file: # type=file|default=: output metric values plot (DVARS etc.) file name output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -127,7 +118,7 @@ tests: in_file: # type=file|default=: unfiltered 4D image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -142,7 +133,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: fsl_motion_outliers -i epi.nii -o epi_outliers.txt -p epi_metrics.png -s epi_metrics.txt +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -151,7 +142,7 @@ doctests: in_file: '"epi.nii"' # type=file|default=: unfiltered 4D image imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/multi_image_maths.yaml b/example-specs/interface/nipype/fsl/multi_image_maths.yaml index 72a24c1b..040a0014 100644 --- a/example-specs/interface/nipype/fsl/multi_image_maths.yaml +++ b/example-specs/interface/nipype/fsl/multi_image_maths.yaml @@ -7,18 +7,18 @@ # ---- # Use fslmaths to perform a sequence of mathematical operations. # -# Examples -# -------- -# >>> from nipype.interfaces.fsl import MultiImageMaths -# >>> maths = MultiImageMaths() -# >>> maths.inputs.in_file = "functional.nii" -# >>> maths.inputs.op_string = "-add %s -mul -1 -div %s" -# >>> maths.inputs.operand_files = ["functional2.nii", "functional3.nii"] -# >>> maths.inputs.out_file = "functional4.nii" -# >>> maths.cmdline -# 'fslmaths functional.nii -add functional2.nii -mul -1 -div functional3.nii functional4.nii' +# Examples +# -------- +# >>> from nipype.interfaces.fsl import MultiImageMaths +# >>> maths = MultiImageMaths() +# >>> maths.inputs.in_file = "functional.nii" +# >>> maths.inputs.op_string = "-add %s -mul -1 -div %s" +# >>> maths.inputs.operand_files = ["functional2.nii", "functional3.nii"] +# >>> maths.inputs.out_file = "functional4.nii" +# >>> maths.cmdline +# 'fslmaths functional.nii -add functional2.nii -mul -1 -div functional3.nii functional4.nii' +# # -# task_name: MultiImageMaths nipype_name: MultiImageMaths nipype_module: nipype.interfaces.fsl.maths @@ -37,9 +37,6 @@ inputs: # type=file|default=: image to operate on operand_files: medimage/nifti1+list-of # type=inputmultiobject|default=[]: list of file names to plug into op string - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -56,15 +53,15 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: medimage/nifti1 + out_file: generic/file # type=file: image written after calculations # type=file|default=: image to write callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields - out_file: '"functional4.nii"' + # dict[str, str] - `path_template` values to be provided to output fields + out_file: out_file # type=file: image written after calculations # type=file|default=: image to write requirements: @@ -89,13 +86,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -114,15 +111,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: image to operate on - op_string: '"-add %s -mul -1 -div %s"' - # type=string|default='': python formatted string of operations to perform operand_files: # type=inputmultiobject|default=[]: list of file names to plug into op string - out_file: '"functional4.nii"' - # type=file: image written after calculations - # type=file|default=: image to write imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -137,7 +129,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: fslmaths functional.nii -add functional2.nii -mul -1 -div functional3.nii functional4.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -145,15 +137,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: image to operate on - op_string: '"-add %s -mul -1 -div %s"' - # type=string|default='': python formatted string of operations to perform operand_files: '["functional2.nii", "functional3.nii"]' # type=inputmultiobject|default=[]: list of file names to plug into op string - out_file: '"functional4.nii"' - # type=file: image written after calculations - # type=file|default=: image to write imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/multiple_regress_design.yaml b/example-specs/interface/nipype/fsl/multiple_regress_design.yaml index bd39d051..1f59c347 100644 --- a/example-specs/interface/nipype/fsl/multiple_regress_design.yaml +++ b/example-specs/interface/nipype/fsl/multiple_regress_design.yaml @@ -7,23 +7,23 @@ # ---- # Generate multiple regression design # -# .. note:: -# FSL does not demean columns for higher level analysis. +# .. note:: +# FSL does not demean columns for higher level analysis. # -# Please see `FSL documentation -# `_ -# for more details on model specification for higher level analysis. +# Please see `FSL documentation +# `_ +# for more details on model specification for higher level analysis. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import MultipleRegressDesign +# >>> model = MultipleRegressDesign() +# >>> model.inputs.contrasts = [['group mean', 'T',['reg1'],[1]]] +# >>> model.inputs.regressors = dict(reg1=[1, 1, 1], reg2=[2.,-4, 3]) +# >>> model.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import MultipleRegressDesign -# >>> model = MultipleRegressDesign() -# >>> model.inputs.contrasts = [['group mean', 'T',['reg1'],[1]]] -# >>> model.inputs.regressors = dict(reg1=[1, 1, 1], reg2=[2.,-4, 3]) -# >>> model.run() # doctest: +SKIP # -# task_name: MultipleRegressDesign nipype_name: MultipleRegressDesign nipype_module: nipype.interfaces.fsl.model @@ -66,7 +66,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -80,7 +80,7 @@ tests: groups: # type=list|default=[]: list of group identifiers (defaults to single group) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/overlay.yaml b/example-specs/interface/nipype/fsl/overlay.yaml index 155e80a0..e37f1b1b 100644 --- a/example-specs/interface/nipype/fsl/overlay.yaml +++ b/example-specs/interface/nipype/fsl/overlay.yaml @@ -6,23 +6,23 @@ # Docs # ---- # Use FSL's overlay command to combine background and statistical images -# into one volume +# into one volume # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces import fsl +# >>> combine = fsl.Overlay() +# >>> combine.inputs.background_image = 'mean_func.nii.gz' +# >>> combine.inputs.auto_thresh_bg = True +# >>> combine.inputs.stat_image = 'zstat1.nii.gz' +# >>> combine.inputs.stat_thresh = (3.5, 10) +# >>> combine.inputs.show_negative_stats = True +# >>> res = combine.run() #doctest: +SKIP # -# >>> from nipype.interfaces import fsl -# >>> combine = fsl.Overlay() -# >>> combine.inputs.background_image = 'mean_func.nii.gz' -# >>> combine.inputs.auto_thresh_bg = True -# >>> combine.inputs.stat_image = 'zstat1.nii.gz' -# >>> combine.inputs.stat_thresh = (3.5, 10) -# >>> combine.inputs.show_negative_stats = True -# >>> res = combine.run() #doctest: +SKIP # # -# task_name: Overlay nipype_name: Overlay nipype_module: nipype.interfaces.fsl.utils @@ -39,9 +39,6 @@ inputs: # passed to the field in the automatically generated unittests. background_image: generic/file # type=file|default=: image to use as background - out_file: Path - # type=file: combined image volume - # type=file|default=: combined image volume stat_image: generic/file # type=file|default=: statistical image to overlay in color stat_image2: generic/file @@ -69,7 +66,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: combined image volume # type=file|default=: combined image volume @@ -107,13 +104,13 @@ tests: # type=file: combined image volume # type=file|default=: combined image volume output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/percentile_image.yaml b/example-specs/interface/nipype/fsl/percentile_image.yaml index 01a51566..8cc4a4ac 100644 --- a/example-specs/interface/nipype/fsl/percentile_image.yaml +++ b/example-specs/interface/nipype/fsl/percentile_image.yaml @@ -7,17 +7,17 @@ # ---- # Use fslmaths to generate a percentile image across a given dimension. # -# Examples -# -------- -# >>> from nipype.interfaces.fsl.maths import MaxImage -# >>> percer = PercentileImage() -# >>> percer.inputs.in_file = "functional.nii" # doctest: +SKIP -# >>> percer.dimension = "T" -# >>> percer.perc = 90 -# >>> percer.cmdline # doctest: +SKIP -# 'fslmaths functional.nii -Tperc 90 functional_perc.nii' +# Examples +# -------- +# >>> from nipype.interfaces.fsl.maths import MaxImage +# >>> percer = PercentileImage() +# >>> percer.inputs.in_file = "functional.nii" # doctest: +SKIP +# >>> percer.dimension = "T" +# >>> percer.perc = 90 +# >>> percer.cmdline # doctest: +SKIP +# 'fslmaths functional.nii -Tperc 90 functional_perc.nii' +# # -# task_name: PercentileImage nipype_name: PercentileImage nipype_module: nipype.interfaces.fsl.maths @@ -34,9 +34,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -60,7 +57,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -86,13 +83,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -112,7 +109,7 @@ tests: in_file: # type=file|default=: image to operate on imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -127,7 +124,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: fslmaths functional.nii -Tperc 90 functional_perc.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -136,7 +133,7 @@ doctests: in_file: '"functional.nii" # doctest: +SKIP' # type=file|default=: image to operate on imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/plot_motion_params.yaml b/example-specs/interface/nipype/fsl/plot_motion_params.yaml index 997f47b1..4b59e61b 100644 --- a/example-specs/interface/nipype/fsl/plot_motion_params.yaml +++ b/example-specs/interface/nipype/fsl/plot_motion_params.yaml @@ -6,31 +6,31 @@ # Docs # ---- # Use fsl_tsplot to plot the estimated motion parameters from a -# realignment program. +# realignment program. # # -# Examples -# -------- +# Examples +# -------- # -# >>> import nipype.interfaces.fsl as fsl -# >>> plotter = fsl.PlotMotionParams() -# >>> plotter.inputs.in_file = 'functional.par' -# >>> plotter.inputs.in_source = 'fsl' -# >>> plotter.inputs.plot_type = 'rotations' -# >>> res = plotter.run() #doctest: +SKIP +# >>> import nipype.interfaces.fsl as fsl +# >>> plotter = fsl.PlotMotionParams() +# >>> plotter.inputs.in_file = 'functional.par' +# >>> plotter.inputs.in_source = 'fsl' +# >>> plotter.inputs.plot_type = 'rotations' +# >>> res = plotter.run() #doctest: +SKIP # # -# Notes -# ----- +# Notes +# ----- +# +# The 'in_source' attribute determines the order of columns that are expected +# in the source file. FSL prints motion parameters in the order rotations, +# translations, while SPM prints them in the opposite order. This interface +# should be able to plot timecourses of motion parameters generated from +# other sources as long as they fall under one of these two patterns. For +# more flexibility, see the :class:`fsl.PlotTimeSeries` interface. # -# The 'in_source' attribute determines the order of columns that are expected -# in the source file. FSL prints motion parameters in the order rotations, -# translations, while SPM prints them in the opposite order. This interface -# should be able to plot timecourses of motion parameters generated from -# other sources as long as they fall under one of these two patterns. For -# more flexibility, see the :class:`fsl.PlotTimeSeries` interface. # -# task_name: PlotMotionParams nipype_name: PlotMotionParams nipype_module: nipype.interfaces.fsl.utils @@ -45,9 +45,6 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: Path - # type=file: image to write - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -71,7 +68,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image to write # type=file|default=: image to write @@ -93,13 +90,13 @@ tests: # type=file: image to write # type=file|default=: image to write output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/plot_time_series.yaml b/example-specs/interface/nipype/fsl/plot_time_series.yaml index 8d0c3cd8..bbc6d8e3 100644 --- a/example-specs/interface/nipype/fsl/plot_time_series.yaml +++ b/example-specs/interface/nipype/fsl/plot_time_series.yaml @@ -7,18 +7,18 @@ # ---- # Use fsl_tsplot to create images of time course plots. # -# Examples -# -------- +# Examples +# -------- +# +# >>> import nipype.interfaces.fsl as fsl +# >>> plotter = fsl.PlotTimeSeries() +# >>> plotter.inputs.in_file = 'functional.par' +# >>> plotter.inputs.title = 'Functional timeseries' +# >>> plotter.inputs.labels = ['run1', 'run2'] +# >>> plotter.run() #doctest: +SKIP # -# >>> import nipype.interfaces.fsl as fsl -# >>> plotter = fsl.PlotTimeSeries() -# >>> plotter.inputs.in_file = 'functional.par' -# >>> plotter.inputs.title = 'Functional timeseries' -# >>> plotter.inputs.labels = ['run1', 'run2'] -# >>> plotter.run() #doctest: +SKIP # # -# task_name: PlotTimeSeries nipype_name: PlotTimeSeries nipype_module: nipype.interfaces.fsl.utils @@ -35,9 +35,6 @@ inputs: # passed to the field in the automatically generated unittests. legend_file: generic/file # type=file|default=: legend file - out_file: Path - # type=file: image to write - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -61,7 +58,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image to write # type=file|default=: image to write @@ -103,13 +100,13 @@ tests: # type=file: image to write # type=file|default=: image to write output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/power_spectrum.yaml b/example-specs/interface/nipype/fsl/power_spectrum.yaml index 770bf1da..1c37758a 100644 --- a/example-specs/interface/nipype/fsl/power_spectrum.yaml +++ b/example-specs/interface/nipype/fsl/power_spectrum.yaml @@ -7,16 +7,16 @@ # ---- # Use FSL PowerSpectrum command for power spectrum estimation. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces import fsl +# >>> pspec = fsl.PowerSpectrum() +# >>> pspec.inputs.in_file = 'functional.nii' +# >>> res = pspec.run() # doctest: +SKIP # -# >>> from nipype.interfaces import fsl -# >>> pspec = fsl.PowerSpectrum() -# >>> pspec.inputs.in_file = 'functional.nii' -# >>> res = pspec.run() # doctest: +SKIP # # -# task_name: PowerSpectrum nipype_name: PowerSpectrum nipype_module: nipype.interfaces.fsl.utils @@ -33,9 +33,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: input 4D file to estimate the power spectrum - out_file: Path - # type=file: path/name of the output 4D power spectrum file - # type=file|default=: name of output 4D file for power spectrum callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -59,7 +56,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: path/name of the output 4D power spectrum file # type=file|default=: name of output 4D file for power spectrum @@ -75,13 +72,13 @@ tests: # type=file: path/name of the output 4D power spectrum file # type=file|default=: name of output 4D file for power spectrum output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/prelude.yaml b/example-specs/interface/nipype/fsl/prelude.yaml index ac6a82c3..bcbe4c25 100644 --- a/example-specs/interface/nipype/fsl/prelude.yaml +++ b/example-specs/interface/nipype/fsl/prelude.yaml @@ -7,12 +7,12 @@ # ---- # FSL prelude wrapper for phase unwrapping # -# Examples -# -------- +# Examples +# -------- +# +# Please insert examples for use of this command # -# Please insert examples for use of this command # -# task_name: PRELUDE nipype_name: PRELUDE nipype_module: nipype.interfaces.fsl.preprocess @@ -41,9 +41,6 @@ inputs: # type=file|default=: saving the raw phase output savemask_file: generic/file # type=file|default=: saving the mask volume - unwrapped_phase_file: Path - # type=file: unwrapped phase file - # type=file|default=: file containing unwrapepd phase callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -67,7 +64,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields unwrapped_phase_file: unwrapped_phase_file # type=file: unwrapped phase file # type=file|default=: file containing unwrapepd phase @@ -111,13 +108,13 @@ tests: removeramps: # type=bool|default=False: remove phase ramps during unwrapping output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/prepare_fieldmap.yaml b/example-specs/interface/nipype/fsl/prepare_fieldmap.yaml index 90a0e12f..adfee998 100644 --- a/example-specs/interface/nipype/fsl/prepare_fieldmap.yaml +++ b/example-specs/interface/nipype/fsl/prepare_fieldmap.yaml @@ -6,27 +6,27 @@ # Docs # ---- # -# Interface for the fsl_prepare_fieldmap script (FSL 5.0) +# Interface for the fsl_prepare_fieldmap script (FSL 5.0) # -# Prepares a fieldmap suitable for FEAT from SIEMENS data - saves output in -# rad/s format (e.g. ```fsl_prepare_fieldmap SIEMENS -# images_3_gre_field_mapping images_4_gre_field_mapping fmap_rads 2.65```). +# Prepares a fieldmap suitable for FEAT from SIEMENS data - saves output in +# rad/s format (e.g. ```fsl_prepare_fieldmap SIEMENS +# images_3_gre_field_mapping images_4_gre_field_mapping fmap_rads 2.65```). # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import PrepareFieldmap +# >>> prepare = PrepareFieldmap() +# >>> prepare.inputs.in_phase = "phase.nii" +# >>> prepare.inputs.in_magnitude = "magnitude.nii" +# >>> prepare.inputs.output_type = "NIFTI_GZ" +# >>> prepare.cmdline # doctest: +ELLIPSIS +# 'fsl_prepare_fieldmap SIEMENS phase.nii magnitude.nii .../phase_fslprepared.nii.gz 2.460000' +# >>> res = prepare.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import PrepareFieldmap -# >>> prepare = PrepareFieldmap() -# >>> prepare.inputs.in_phase = "phase.nii" -# >>> prepare.inputs.in_magnitude = "magnitude.nii" -# >>> prepare.inputs.output_type = "NIFTI_GZ" -# >>> prepare.cmdline # doctest: +ELLIPSIS -# 'fsl_prepare_fieldmap SIEMENS phase.nii magnitude.nii .../phase_fslprepared.nii.gz 2.460000' -# >>> res = prepare.run() # doctest: +SKIP # # -# task_name: PrepareFieldmap nipype_name: PrepareFieldmap nipype_module: nipype.interfaces.fsl.epi @@ -41,13 +41,10 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_magnitude: medimage/nifti1 + in_magnitude: generic/file # type=file|default=: Magnitude difference map, brain extracted in_phase: medimage/nifti1 # type=file|default=: Phase difference map, in SIEMENS format range from 0-4096 or 0-8192) - out_fieldmap: Path - # type=file: output name for prepared fieldmap - # type=file|default=: output name for prepared fieldmap callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -71,7 +68,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -92,13 +89,13 @@ tests: # type=file: output name for prepared fieldmap # type=file|default=: output name for prepared fieldmap output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -117,12 +114,10 @@ tests: # (if not specified, will try to choose a sensible value) in_phase: # type=file|default=: Phase difference map, in SIEMENS format range from 0-4096 or 0-8192) - in_magnitude: - # type=file|default=: Magnitude difference map, brain extracted output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -145,12 +140,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_phase: '"phase.nii"' # type=file|default=: Phase difference map, in SIEMENS format range from 0-4096 or 0-8192) - in_magnitude: '"magnitude.nii"' - # type=file|default=: Magnitude difference map, brain extracted output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/prob_track_x.yaml b/example-specs/interface/nipype/fsl/prob_track_x.yaml index 03071e9a..87d4f1df 100644 --- a/example-specs/interface/nipype/fsl/prob_track_x.yaml +++ b/example-specs/interface/nipype/fsl/prob_track_x.yaml @@ -5,17 +5,17 @@ # # Docs # ---- -# Use FSL probtrackx for tractography on bedpostx results +# Use FSL probtrackx for tractography on bedpostx results # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces import fsl +# >>> pbx = fsl.ProbTrackX(samples_base_name='merged', mask='mask.nii', seed='MASK_average_thal_right.nii', mode='seedmask', xfm='trans.mat', n_samples=3, n_steps=10, force_dir=True, opd=True, os2t=True, target_masks = ['targets_MASK1.nii', 'targets_MASK2.nii'], thsamples='merged_thsamples.nii', fsamples='merged_fsamples.nii', phsamples='merged_phsamples.nii', out_dir='.') +# >>> pbx.cmdline +# 'probtrackx --forcedir -m mask.nii --mode=seedmask --nsamples=3 --nsteps=10 --opd --os2t --dir=. --samples=merged --seed=MASK_average_thal_right.nii --targetmasks=targets.txt --xfm=trans.mat' # -# >>> from nipype.interfaces import fsl -# >>> pbx = fsl.ProbTrackX(samples_base_name='merged', mask='mask.nii', seed='MASK_average_thal_right.nii', mode='seedmask', xfm='trans.mat', n_samples=3, n_steps=10, force_dir=True, opd=True, os2t=True, target_masks = ['targets_MASK1.nii', 'targets_MASK2.nii'], thsamples='merged_thsamples.nii', fsamples='merged_fsamples.nii', phsamples='merged_phsamples.nii', out_dir='.') -# >>> pbx.cmdline -# 'probtrackx --forcedir -m mask.nii --mode=seedmask --nsamples=3 --nsteps=10 --opd --os2t --dir=. --samples=merged --seed=MASK_average_thal_right.nii --targetmasks=targets.txt --xfm=trans.mat' # -# task_name: ProbTrackX nipype_name: ProbTrackX nipype_module: nipype.interfaces.fsl.dti @@ -42,8 +42,6 @@ inputs: # type=file|default=: second bet binary mask (in diffusion space) in twomask_symm mode mesh: generic/file # type=file|default=: Freesurfer-type surface descriptor (in ascii format) - out_dir: Path - # type=directory|default=: directory to put the final volumes in phsamples: medimage/nifti1+list-of # type=inputmultiobject|default=[]: seed_ref: generic/file @@ -92,7 +90,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -152,7 +150,7 @@ tests: c_thresh: # type=float|default=0.0: curvature threshold - default=0.2 sample_random_points: - # type=bool|default=False: sample random points within seed voxels + # type=float|default=0.0: sample random points within seed voxels step_length: # type=float|default=0.0: step_length in mm - default=0.5 loop_check: @@ -166,19 +164,19 @@ tests: mod_euler: # type=bool|default=False: use modified euler streamlining random_seed: - # type=bool|default=False: random seed + # type=int|default=0: random seed s2tastext: # type=bool|default=False: output seed-to-target counts as a text file (useful when seeding from a mesh) verbose: # type=enum|default=0|allowed[0,1,2]: Verbose level, [0-2]. Level 2 is required to output particle files. output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -226,7 +224,7 @@ tests: out_dir: '"."' # type=directory|default=: directory to put the final volumes in imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -278,7 +276,7 @@ doctests: out_dir: '"."' # type=directory|default=: directory to put the final volumes in imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/prob_track_x2.yaml b/example-specs/interface/nipype/fsl/prob_track_x2.yaml index 5b8140db..736d563f 100644 --- a/example-specs/interface/nipype/fsl/prob_track_x2.yaml +++ b/example-specs/interface/nipype/fsl/prob_track_x2.yaml @@ -7,22 +7,22 @@ # ---- # Use FSL probtrackx2 for tractography on bedpostx results # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces import fsl +# >>> pbx2 = fsl.ProbTrackX2() +# >>> pbx2.inputs.seed = 'seed_source.nii.gz' +# >>> pbx2.inputs.thsamples = 'merged_th1samples.nii.gz' +# >>> pbx2.inputs.fsamples = 'merged_f1samples.nii.gz' +# >>> pbx2.inputs.phsamples = 'merged_ph1samples.nii.gz' +# >>> pbx2.inputs.mask = 'nodif_brain_mask.nii.gz' +# >>> pbx2.inputs.out_dir = '.' +# >>> pbx2.inputs.n_samples = 3 +# >>> pbx2.inputs.n_steps = 10 +# >>> pbx2.cmdline +# 'probtrackx2 --forcedir -m nodif_brain_mask.nii.gz --nsamples=3 --nsteps=10 --opd --dir=. --samples=merged --seed=seed_source.nii.gz' # -# >>> from nipype.interfaces import fsl -# >>> pbx2 = fsl.ProbTrackX2() -# >>> pbx2.inputs.seed = 'seed_source.nii.gz' -# >>> pbx2.inputs.thsamples = 'merged_th1samples.nii.gz' -# >>> pbx2.inputs.fsamples = 'merged_f1samples.nii.gz' -# >>> pbx2.inputs.phsamples = 'merged_ph1samples.nii.gz' -# >>> pbx2.inputs.mask = 'nodif_brain_mask.nii.gz' -# >>> pbx2.inputs.out_dir = '.' -# >>> pbx2.inputs.n_samples = 3 -# >>> pbx2.inputs.n_steps = 10 -# >>> pbx2.cmdline -# 'probtrackx2 --forcedir -m nodif_brain_mask.nii.gz --nsamples=3 --nsteps=10 --opd --dir=. --samples=merged --seed=seed_source.nii.gz' -# task_name: ProbTrackX2 nipype_name: ProbTrackX2 nipype_module: nipype.interfaces.fsl.dti @@ -51,9 +51,7 @@ inputs: # type=file|default=: Column-space mask used for Nxn connectivity matrix mask: medimage/nifti-gz # type=file|default=: bet binary mask file in diffusion space - out_dir: Path - # type=directory|default=: directory to put the final volumes in - phsamples: medimage/nifti-gz+list-of + phsamples: generic/file+list-of # type=inputmultiobject|default=[]: seed_ref: generic/file # type=file|default=: reference vol to define seed space in simple mode - diffusion space assumed if absent @@ -67,7 +65,7 @@ inputs: # type=file|default=: Brain mask in DTI space target_masks: generic/file+list-of # type=inputmultiobject|default=[]: list of target masks - required for seeds_to_targets classification - thsamples: medimage/nifti-gz+list-of + thsamples: generic/file+list-of # type=inputmultiobject|default=[]: waypoints: generic/file # type=file|default=: waypoint mask or ascii list of waypoint masks - only keep paths going through ALL the masks @@ -115,7 +113,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -203,7 +201,7 @@ tests: c_thresh: # type=float|default=0.0: curvature threshold - default=0.2 sample_random_points: - # type=bool|default=False: sample random points within seed voxels + # type=float|default=0.0: sample random points within seed voxels step_length: # type=float|default=0.0: step_length in mm - default=0.5 loop_check: @@ -217,19 +215,19 @@ tests: mod_euler: # type=bool|default=False: use modified euler streamlining random_seed: - # type=bool|default=False: random seed + # type=int|default=0: random seed s2tastext: # type=bool|default=False: output seed-to-target counts as a text file (useful when seeding from a mesh) verbose: # type=enum|default=0|allowed[0,1,2]: Verbose level, [0-2]. Level 2 is required to output particle files. output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -248,22 +246,14 @@ tests: # (if not specified, will try to choose a sensible value) seed: '"seed_source.nii.gz"' # type=traitcompound|default=None: seed volume(s), or voxel(s) or freesurfer label file - thsamples: - # type=inputmultiobject|default=[]: fsamples: # type=inputmultiobject|default=[]: - phsamples: - # type=inputmultiobject|default=[]: mask: # type=file|default=: bet binary mask file in diffusion space - out_dir: '"."' - # type=directory|default=: directory to put the final volumes in n_samples: '3' # type=int|default=5000: number of samples - default=5000 - n_steps: '10' - # type=int|default=0: number of steps per sample - default=2000 imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -286,22 +276,14 @@ doctests: # '.mock()' method of the corresponding class is used instead. seed: '"seed_source.nii.gz"' # type=traitcompound|default=None: seed volume(s), or voxel(s) or freesurfer label file - thsamples: '"merged_th1samples.nii.gz"' - # type=inputmultiobject|default=[]: fsamples: '"merged_f1samples.nii.gz"' # type=inputmultiobject|default=[]: - phsamples: '"merged_ph1samples.nii.gz"' - # type=inputmultiobject|default=[]: mask: '"nodif_brain_mask.nii.gz"' # type=file|default=: bet binary mask file in diffusion space - out_dir: '"."' - # type=directory|default=: directory to put the final volumes in n_samples: '3' # type=int|default=5000: number of samples - default=5000 - n_steps: '10' - # type=int|default=0: number of steps per sample - default=2000 imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/proj_thresh.yaml b/example-specs/interface/nipype/fsl/proj_thresh.yaml index 5ad55028..f9deb51f 100644 --- a/example-specs/interface/nipype/fsl/proj_thresh.yaml +++ b/example-specs/interface/nipype/fsl/proj_thresh.yaml @@ -6,19 +6,19 @@ # Docs # ---- # Use FSL proj_thresh for thresholding some outputs of probtrack -# For complete details, see the FDT Documentation -# +# For complete details, see the FDT Documentation +# # -# Example -# ------- +# Example +# ------- +# +# >>> from nipype.interfaces import fsl +# >>> ldir = ['seeds_to_M1.nii', 'seeds_to_M2.nii'] +# >>> pThresh = fsl.ProjThresh(in_files=ldir, threshold=3) +# >>> pThresh.cmdline +# 'proj_thresh seeds_to_M1.nii seeds_to_M2.nii 3' # -# >>> from nipype.interfaces import fsl -# >>> ldir = ['seeds_to_M1.nii', 'seeds_to_M2.nii'] -# >>> pThresh = fsl.ProjThresh(in_files=ldir, threshold=3) -# >>> pThresh.cmdline -# 'proj_thresh seeds_to_M1.nii seeds_to_M2.nii 3' # -# task_name: ProjThresh nipype_name: ProjThresh nipype_module: nipype.interfaces.fsl.dti @@ -57,7 +57,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -69,13 +69,13 @@ tests: threshold: # type=int|default=0: threshold indicating minimum number of seed voxels entering this mask region output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -97,7 +97,7 @@ tests: threshold: '3' # type=int|default=0: threshold indicating minimum number of seed voxels entering this mask region imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -123,7 +123,7 @@ doctests: threshold: '3' # type=int|default=0: threshold indicating minimum number of seed voxels entering this mask region imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/randomise.yaml b/example-specs/interface/nipype/fsl/randomise.yaml index 6b7ee93e..f1d1032f 100644 --- a/example-specs/interface/nipype/fsl/randomise.yaml +++ b/example-specs/interface/nipype/fsl/randomise.yaml @@ -6,17 +6,17 @@ # Docs # ---- # FSL Randomise: feeds the 4D projected FA data into GLM -# modelling and thresholding -# in order to find voxels which correlate with your model +# modelling and thresholding +# in order to find voxels which correlate with your model +# +# Example +# ------- +# >>> import nipype.interfaces.fsl as fsl +# >>> rand = fsl.Randomise(in_file='allFA.nii', mask = 'mask.nii', tcon='design.con', design_mat='design.mat') +# >>> rand.cmdline +# 'randomise -i allFA.nii -o "randomise" -d design.mat -t design.con -m mask.nii' # -# Example -# ------- -# >>> import nipype.interfaces.fsl as fsl -# >>> rand = fsl.Randomise(in_file='allFA.nii', mask = 'mask.nii', tcon='design.con', design_mat='design.mat') -# >>> rand.cmdline -# 'randomise -i allFA.nii -o "randomise" -d design.mat -t design.con -m mask.nii' # -# task_name: Randomise nipype_name: Randomise nipype_module: nipype.interfaces.fsl.model @@ -39,7 +39,7 @@ inputs: # type=file|default=: 4D input file mask: medimage/nifti1 # type=file|default=: mask image - tcon: medimage-fsl/con + tcon: fileformats.medimage_fsl.Con # type=file|default=: t contrasts file x_block_labels: generic/file # type=file|default=: exchangeability block labels file @@ -75,7 +75,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -137,13 +137,13 @@ tests: tfce_C: # type=float|default=0.0: TFCE connectivity (6 or 26; default=6) output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -169,7 +169,7 @@ tests: design_mat: # type=file|default=: design matrix file imports: &id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys - module: nipype.interfaces.fsl as fsl expected_outputs: @@ -185,7 +185,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: randomise -i allFA.nii -o "randomise" -d design.mat -t design.con -m mask.nii +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -200,7 +200,7 @@ doctests: design_mat: '"design.mat"' # type=file|default=: design matrix file imports: *id001 - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/reorient_2_std.yaml b/example-specs/interface/nipype/fsl/reorient_2_std.yaml index 17ce6e8e..2fb5e285 100644 --- a/example-specs/interface/nipype/fsl/reorient_2_std.yaml +++ b/example-specs/interface/nipype/fsl/reorient_2_std.yaml @@ -6,18 +6,18 @@ # Docs # ---- # fslreorient2std is a tool for reorienting the image to match the -# approximate orientation of the standard template images (MNI152). +# approximate orientation of the standard template images (MNI152). # # -# Examples -# -------- +# Examples +# -------- +# +# >>> reorient = Reorient2Std() +# >>> reorient.inputs.in_file = "functional.nii" +# >>> res = reorient.run() # doctest: +SKIP # -# >>> reorient = Reorient2Std() -# >>> reorient.inputs.in_file = "functional.nii" -# >>> res = reorient.run() # doctest: +SKIP # # -# task_name: Reorient2Std nipype_name: Reorient2Std nipype_module: nipype.interfaces.fsl.utils @@ -34,9 +34,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: - out_file: Path - # type=file: - # type=file|default=: callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -60,7 +57,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: # type=file|default=: @@ -76,13 +73,13 @@ tests: # type=file: # type=file|default=: output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/robust_fov.yaml b/example-specs/interface/nipype/fsl/robust_fov.yaml index 151fb656..462199e1 100644 --- a/example-specs/interface/nipype/fsl/robust_fov.yaml +++ b/example-specs/interface/nipype/fsl/robust_fov.yaml @@ -7,9 +7,9 @@ # ---- # Automatically crops an image removing lower head and neck. # -# Interface is stable 5.0.0 to 5.0.9, but default brainsize changed from -# 150mm to 170mm. -# +# Interface is stable 5.0.0 to 5.0.9, but default brainsize changed from +# 150mm to 170mm. +# task_name: RobustFOV nipype_name: RobustFOV nipype_module: nipype.interfaces.fsl.utils @@ -26,12 +26,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: input filename - out_roi: Path - # type=file: ROI volume output name - # type=file|default=: ROI volume output name - out_transform: Path - # type=file: Transformation matrix in_file to out_roi output name - # type=file|default=: Transformation matrix in_file to out_roi output name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -58,7 +52,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -76,13 +70,13 @@ tests: # type=file: Transformation matrix in_file to out_roi output name # type=file|default=: Transformation matrix in_file to out_roi output name output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/sig_loss.yaml b/example-specs/interface/nipype/fsl/sig_loss.yaml index 67dcec9f..2d594137 100644 --- a/example-specs/interface/nipype/fsl/sig_loss.yaml +++ b/example-specs/interface/nipype/fsl/sig_loss.yaml @@ -7,16 +7,16 @@ # ---- # Estimates signal loss from a field map (in rad/s) # -# Examples -# -------- +# Examples +# -------- +# +# >>> sigloss = SigLoss() +# >>> sigloss.inputs.in_file = "phase.nii" +# >>> sigloss.inputs.echo_time = 0.03 +# >>> res = sigloss.run() # doctest: +SKIP # -# >>> sigloss = SigLoss() -# >>> sigloss.inputs.in_file = "phase.nii" -# >>> sigloss.inputs.echo_time = 0.03 -# >>> res = sigloss.run() # doctest: +SKIP # # -# task_name: SigLoss nipype_name: SigLoss nipype_module: nipype.interfaces.fsl.utils @@ -35,9 +35,6 @@ inputs: # type=file|default=: b0 fieldmap file mask_file: generic/file # type=file|default=: brain mask file - out_file: Path - # type=file: signal loss estimate file - # type=file|default=: output signal loss estimate file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -61,7 +58,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: signal loss estimate file # type=file|default=: output signal loss estimate file @@ -83,13 +80,13 @@ tests: slice_direction: # type=enum|default='x'|allowed['x','y','z']: slicing direction output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/slice.yaml b/example-specs/interface/nipype/fsl/slice.yaml index 92408dd8..c9317de6 100644 --- a/example-specs/interface/nipype/fsl/slice.yaml +++ b/example-specs/interface/nipype/fsl/slice.yaml @@ -8,18 +8,18 @@ # Use fslslice to split a 3D file into lots of 2D files (along z-axis). # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import Slice +# >>> slice = Slice() +# >>> slice.inputs.in_file = 'functional.nii' +# >>> slice.inputs.out_base_name = 'sl' +# >>> slice.cmdline +# 'fslslice functional.nii sl' # -# >>> from nipype.interfaces.fsl import Slice -# >>> slice = Slice() -# >>> slice.inputs.in_file = 'functional.nii' -# >>> slice.inputs.out_base_name = 'sl' -# >>> slice.cmdline -# 'fslslice functional.nii sl' # # -# task_name: Slice nipype_name: Slice nipype_module: nipype.interfaces.fsl.utils @@ -58,7 +58,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -70,13 +70,13 @@ tests: out_base_name: # type=str|default='': outputs prefix output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -95,10 +95,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: input filename - out_base_name: '"sl"' - # type=str|default='': outputs prefix imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -121,10 +119,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"functional.nii"' # type=file|default=: input filename - out_base_name: '"sl"' - # type=str|default='': outputs prefix imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/slice_timer.yaml b/example-specs/interface/nipype/fsl/slice_timer.yaml index dff7e082..a8529a80 100644 --- a/example-specs/interface/nipype/fsl/slice_timer.yaml +++ b/example-specs/interface/nipype/fsl/slice_timer.yaml @@ -7,16 +7,16 @@ # ---- # FSL slicetimer wrapper to perform slice timing correction # -# Examples -# -------- -# >>> from nipype.interfaces import fsl -# >>> from nipype.testing import example_data -# >>> st = fsl.SliceTimer() -# >>> st.inputs.in_file = example_data('functional.nii') -# >>> st.inputs.interleaved = True -# >>> result = st.run() #doctest: +SKIP +# Examples +# -------- +# >>> from nipype.interfaces import fsl +# >>> from nipype.testing import example_data +# >>> st = fsl.SliceTimer() +# >>> st.inputs.in_file = example_data('functional.nii') +# >>> st.inputs.interleaved = True +# >>> result = st.run() #doctest: +SKIP +# # -# task_name: SliceTimer nipype_name: SliceTimer nipype_module: nipype.interfaces.fsl.preprocess @@ -37,8 +37,6 @@ inputs: # type=file|default=: slice timings, in fractions of TR, range 0:1 (default is 0.5 = no shift) in_file: generic/file # type=file|default=: filename of input timeseries - out_file: Path - # type=file|default=: filename of output timeseries callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -63,7 +61,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -89,13 +87,13 @@ tests: custom_order: # type=file|default=: filename of single-column custom interleave order file (first slice is referred to as 1 not 0) output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/slicer.yaml b/example-specs/interface/nipype/fsl/slicer.yaml index da04f77e..18404afc 100644 --- a/example-specs/interface/nipype/fsl/slicer.yaml +++ b/example-specs/interface/nipype/fsl/slicer.yaml @@ -8,19 +8,19 @@ # Use FSL's slicer command to output a png image from a volume. # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces import fsl +# >>> from nipype.testing import example_data +# >>> slice = fsl.Slicer() +# >>> slice.inputs.in_file = example_data('functional.nii') +# >>> slice.inputs.all_axial = True +# >>> slice.inputs.image_width = 750 +# >>> res = slice.run() #doctest: +SKIP # -# >>> from nipype.interfaces import fsl -# >>> from nipype.testing import example_data -# >>> slice = fsl.Slicer() -# >>> slice.inputs.in_file = example_data('functional.nii') -# >>> slice.inputs.all_axial = True -# >>> slice.inputs.image_width = 750 -# >>> res = slice.run() #doctest: +SKIP # # -# task_name: Slicer nipype_name: Slicer nipype_module: nipype.interfaces.fsl.utils @@ -41,9 +41,6 @@ inputs: # type=file|default=: volume to display edge overlay for (useful for checking registration in_file: generic/file # type=file|default=: input volume - out_file: Path - # type=file: picture to write - # type=file|default=: picture to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -67,7 +64,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: picture to write # type=file|default=: picture to write @@ -113,13 +110,13 @@ tests: scaling: # type=float|default=0.0: image scale output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/smm.yaml b/example-specs/interface/nipype/fsl/smm.yaml index 04aad0b3..9434d25d 100644 --- a/example-specs/interface/nipype/fsl/smm.yaml +++ b/example-specs/interface/nipype/fsl/smm.yaml @@ -6,11 +6,11 @@ # Docs # ---- # -# Spatial Mixture Modelling. For more detail on the spatial mixture modelling -# see Mixture Models with Adaptive Spatial Regularisation for Segmentation -# with an Application to FMRI Data; Woolrich, M., Behrens, T., Beckmann, C., -# and Smith, S.; IEEE Trans. Medical Imaging, 24(1):1-11, 2005. -# +# Spatial Mixture Modelling. For more detail on the spatial mixture modelling +# see Mixture Models with Adaptive Spatial Regularisation for Segmentation +# with an Application to FMRI Data; Woolrich, M., Behrens, T., Beckmann, C., +# and Smith, S.; IEEE Trans. Medical Imaging, 24(1):1-11, 2005. +# task_name: SMM nipype_name: SMM nipype_module: nipype.interfaces.fsl.model @@ -55,7 +55,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -69,13 +69,13 @@ tests: no_deactivation_class: # type=bool|default=False: enforces no deactivation class output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/smooth.yaml b/example-specs/interface/nipype/fsl/smooth.yaml index fd618dbe..4b8599ab 100644 --- a/example-specs/interface/nipype/fsl/smooth.yaml +++ b/example-specs/interface/nipype/fsl/smooth.yaml @@ -6,41 +6,41 @@ # Docs # ---- # -# Use fslmaths to smooth the image +# Use fslmaths to smooth the image # -# Examples -# -------- +# Examples +# -------- # -# Setting the kernel width using sigma: +# Setting the kernel width using sigma: # -# >>> sm = Smooth() -# >>> sm.inputs.output_type = 'NIFTI_GZ' -# >>> sm.inputs.in_file = 'functional2.nii' -# >>> sm.inputs.sigma = 8.0 -# >>> sm.cmdline # doctest: +ELLIPSIS -# 'fslmaths functional2.nii -kernel gauss 8.000 -fmean functional2_smooth.nii.gz' +# >>> sm = Smooth() +# >>> sm.inputs.output_type = 'NIFTI_GZ' +# >>> sm.inputs.in_file = 'functional2.nii' +# >>> sm.inputs.sigma = 8.0 +# >>> sm.cmdline # doctest: +ELLIPSIS +# 'fslmaths functional2.nii -kernel gauss 8.000 -fmean functional2_smooth.nii.gz' # -# Setting the kernel width using fwhm: +# Setting the kernel width using fwhm: # -# >>> sm = Smooth() -# >>> sm.inputs.output_type = 'NIFTI_GZ' -# >>> sm.inputs.in_file = 'functional2.nii' -# >>> sm.inputs.fwhm = 8.0 -# >>> sm.cmdline # doctest: +ELLIPSIS -# 'fslmaths functional2.nii -kernel gauss 3.397 -fmean functional2_smooth.nii.gz' +# >>> sm = Smooth() +# >>> sm.inputs.output_type = 'NIFTI_GZ' +# >>> sm.inputs.in_file = 'functional2.nii' +# >>> sm.inputs.fwhm = 8.0 +# >>> sm.cmdline # doctest: +ELLIPSIS +# 'fslmaths functional2.nii -kernel gauss 3.397 -fmean functional2_smooth.nii.gz' # -# One of sigma or fwhm must be set: +# One of sigma or fwhm must be set: +# +# >>> from nipype.interfaces.fsl import Smooth +# >>> sm = Smooth() +# >>> sm.inputs.output_type = 'NIFTI_GZ' +# >>> sm.inputs.in_file = 'functional2.nii' +# >>> sm.cmdline #doctest: +ELLIPSIS +# Traceback (most recent call last): +# ... +# ValueError: Smooth requires a value for one of the inputs ... # -# >>> from nipype.interfaces.fsl import Smooth -# >>> sm = Smooth() -# >>> sm.inputs.output_type = 'NIFTI_GZ' -# >>> sm.inputs.in_file = 'functional2.nii' -# >>> sm.cmdline #doctest: +ELLIPSIS -# Traceback (most recent call last): -# ... -# ValueError: Smooth requires a value for one of the inputs ... # -# task_name: Smooth nipype_name: Smooth nipype_module: nipype.interfaces.fsl.utils @@ -55,10 +55,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - in_file: medimage/nifti1 - # type=file|default=: - smoothed_file: Path - # type=file: + in_file: generic/file # type=file|default=: callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -83,7 +80,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -100,13 +97,13 @@ tests: # type=file: # type=file|default=: output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -124,13 +121,11 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type - in_file: - # type=file|default=: + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type sigma: '8.0' # type=float|default=0.0: gaussian kernel sigma in mm (not voxels) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -148,13 +143,11 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type - in_file: - # type=file|default=: + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type fwhm: '8.0' # type=float|default=0.0: gaussian kernel fwhm, will be converted to sigma in mm (not voxels) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -172,11 +165,9 @@ tests: # dict[str, str] - values to provide to inputs fields in the task initialisation # (if not specified, will try to choose a sensible value) output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type - in_file: - # type=file|default=: + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -198,13 +189,11 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type - in_file: '"functional2.nii"' - # type=file|default=: + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type sigma: '8.0' # type=float|default=0.0: gaussian kernel sigma in mm (not voxels) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -215,13 +204,11 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type - in_file: '"functional2.nii"' - # type=file|default=: + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type fwhm: '8.0' # type=float|default=0.0: gaussian kernel fwhm, will be converted to sigma in mm (not voxels) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS @@ -232,11 +219,9 @@ doctests: # If the field is of file-format type and the value is None, then the # '.mock()' method of the corresponding class is used instead. output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type - in_file: '"functional2.nii"' - # type=file|default=: + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/smooth_estimate.yaml b/example-specs/interface/nipype/fsl/smooth_estimate.yaml index fdea86af..bf5b0d23 100644 --- a/example-specs/interface/nipype/fsl/smooth_estimate.yaml +++ b/example-specs/interface/nipype/fsl/smooth_estimate.yaml @@ -7,16 +7,16 @@ # ---- # Estimates the smoothness of an image # -# Examples -# -------- +# Examples +# -------- +# +# >>> est = SmoothEstimate() +# >>> est.inputs.zstat_file = 'zstat1.nii.gz' +# >>> est.inputs.mask_file = 'mask.nii' +# >>> est.cmdline +# 'smoothest --mask=mask.nii --zstat=zstat1.nii.gz' # -# >>> est = SmoothEstimate() -# >>> est.inputs.zstat_file = 'zstat1.nii.gz' -# >>> est.inputs.mask_file = 'mask.nii' -# >>> est.cmdline -# 'smoothest --mask=mask.nii --zstat=zstat1.nii.gz' # -# task_name: SmoothEstimate nipype_name: SmoothEstimate nipype_module: nipype.interfaces.fsl.model @@ -31,7 +31,7 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - mask_file: medimage/nifti1 + mask_file: generic/file # type=file|default=: brain mask volume residual_fit_file: generic/file # type=file|default=: residual-fit image file @@ -63,7 +63,7 @@ outputs: volume: volume_callable # type=int: number of voxels in mask templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -79,13 +79,13 @@ tests: zstat_file: # type=file|default=: zstat image file output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -104,10 +104,8 @@ tests: # (if not specified, will try to choose a sensible value) zstat_file: # type=file|default=: zstat image file - mask_file: - # type=file|default=: brain mask volume imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -130,10 +128,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. zstat_file: '"zstat1.nii.gz"' # type=file|default=: zstat image file - mask_file: '"mask.nii"' - # type=file|default=: brain mask volume imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/spatial_filter.yaml b/example-specs/interface/nipype/fsl/spatial_filter.yaml index 2f09a218..5d65c711 100644 --- a/example-specs/interface/nipype/fsl/spatial_filter.yaml +++ b/example-specs/interface/nipype/fsl/spatial_filter.yaml @@ -24,9 +24,6 @@ inputs: # type=file|default=: image to operate on kernel_file: generic/file # type=file|default=: use external file for kernel - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -50,7 +47,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -80,13 +77,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/split.yaml b/example-specs/interface/nipype/fsl/split.yaml index afa9aeaf..0e7489a2 100644 --- a/example-specs/interface/nipype/fsl/split.yaml +++ b/example-specs/interface/nipype/fsl/split.yaml @@ -6,8 +6,8 @@ # Docs # ---- # Uses FSL Fslsplit command to separate a volume into images in -# time, x, y or z dimension. -# +# time, x, y or z dimension. +# task_name: Split nipype_name: Split nipype_module: nipype.interfaces.fsl.utils @@ -46,7 +46,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -60,13 +60,13 @@ tests: dimension: # type=enum|default='t'|allowed['t','x','y','z']: dimension along which the file will be split output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/std_image.yaml b/example-specs/interface/nipype/fsl/std_image.yaml index 7e77e98b..997a41f7 100644 --- a/example-specs/interface/nipype/fsl/std_image.yaml +++ b/example-specs/interface/nipype/fsl/std_image.yaml @@ -6,8 +6,8 @@ # Docs # ---- # Use fslmaths to generate a standard deviation in an image across a given -# dimension. -# +# dimension. +# task_name: StdImage nipype_name: StdImage nipype_module: nipype.interfaces.fsl.maths @@ -24,9 +24,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -50,7 +47,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -74,13 +71,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/susan.yaml b/example-specs/interface/nipype/fsl/susan.yaml index 527da306..63e950a1 100644 --- a/example-specs/interface/nipype/fsl/susan.yaml +++ b/example-specs/interface/nipype/fsl/susan.yaml @@ -7,22 +7,22 @@ # ---- # FSL SUSAN wrapper to perform smoothing # -# For complete details, see the `SUSAN Documentation. -# `_ +# For complete details, see the `SUSAN Documentation. +# `_ # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces import fsl +# >>> from nipype.testing import example_data +# >>> anatfile # doctest: +SKIP +# anatomical.nii # doctest: +SKIP +# >>> sus = fsl.SUSAN() +# >>> sus.inputs.in_file = example_data('structural.nii') +# >>> sus.inputs.brightness_threshold = 2000.0 +# >>> sus.inputs.fwhm = 8.0 +# >>> result = sus.run() # doctest: +SKIP # -# >>> from nipype.interfaces import fsl -# >>> from nipype.testing import example_data -# >>> anatfile # doctest: +SKIP -# anatomical.nii # doctest: +SKIP -# >>> sus = fsl.SUSAN() -# >>> sus.inputs.in_file = example_data('structural.nii') -# >>> sus.inputs.brightness_threshold = 2000.0 -# >>> sus.inputs.fwhm = 8.0 -# >>> result = sus.run() # doctest: +SKIP -# task_name: SUSAN nipype_name: SUSAN nipype_module: nipype.interfaces.fsl.preprocess @@ -39,8 +39,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: filename of input timeseries - out_file: Path - # type=file|default=: output file name callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -65,7 +63,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -87,13 +85,13 @@ tests: out_file: # type=file|default=: output file name output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/swap_dimensions.yaml b/example-specs/interface/nipype/fsl/swap_dimensions.yaml index f0be3678..bd36947b 100644 --- a/example-specs/interface/nipype/fsl/swap_dimensions.yaml +++ b/example-specs/interface/nipype/fsl/swap_dimensions.yaml @@ -7,12 +7,12 @@ # ---- # Use fslswapdim to alter the orientation of an image. # -# This interface accepts a three-tuple corresponding to the new -# orientation. You may either provide dimension ids in the form of -# (-)x, (-)y, or (-z), or nifti-syle dimension codes -# (RL, LR, AP, PA, IS, SI). +# This interface accepts a three-tuple corresponding to the new +# orientation. You may either provide dimension ids in the form of +# (-)x, (-)y, or (-z), or nifti-syle dimension codes +# (RL, LR, AP, PA, IS, SI). +# # -# task_name: SwapDimensions nipype_name: SwapDimensions nipype_module: nipype.interfaces.fsl.utils @@ -29,9 +29,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: input image - out_file: Path - # type=file: image with new dimensions - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -55,7 +52,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image with new dimensions # type=file|default=: image to write @@ -73,13 +70,13 @@ tests: # type=file: image with new dimensions # type=file|default=: image to write output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/temporal_filter.yaml b/example-specs/interface/nipype/fsl/temporal_filter.yaml index 0534a5be..0d56579e 100644 --- a/example-specs/interface/nipype/fsl/temporal_filter.yaml +++ b/example-specs/interface/nipype/fsl/temporal_filter.yaml @@ -6,9 +6,9 @@ # Docs # ---- # Use fslmaths to apply a low, high, or bandpass temporal filter to a -# timeseries. +# timeseries. +# # -# task_name: TemporalFilter nipype_name: TemporalFilter nipype_module: nipype.interfaces.fsl.maths @@ -25,9 +25,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -51,7 +48,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -77,13 +74,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/text_2_vest.yaml b/example-specs/interface/nipype/fsl/text_2_vest.yaml index 05073070..7dc4b3b2 100644 --- a/example-specs/interface/nipype/fsl/text_2_vest.yaml +++ b/example-specs/interface/nipype/fsl/text_2_vest.yaml @@ -6,19 +6,19 @@ # Docs # ---- # -# Use FSL Text2Vest`https://web.mit.edu/fsl_v5.0.10/fsl/doc/wiki/GLM(2f)CreatingDesignMatricesByHand.html`_ -# to convert your plain text design matrix data into the format used by the FSL tools. +# Use FSL Text2Vest`https://web.mit.edu/fsl_v5.0.10/fsl/doc/wiki/GLM(2f)CreatingDesignMatricesByHand.html`_ +# to convert your plain text design matrix data into the format used by the FSL tools. +# +# Examples +# -------- +# >>> from nipype.interfaces.fsl import Text2Vest +# >>> t2v = Text2Vest() +# >>> t2v.inputs.in_file = "design.txt" +# >>> t2v.inputs.out_file = "design.mat" +# >>> t2v.cmdline +# 'Text2Vest design.txt design.mat' +# >>> res = t2v.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces.fsl import Text2Vest -# >>> t2v = Text2Vest() -# >>> t2v.inputs.in_file = "design.txt" -# >>> t2v.inputs.out_file = "design.mat" -# >>> t2v.cmdline -# 'Text2Vest design.txt design.mat' -# >>> res = t2v.run() # doctest: +SKIP -# task_name: Text2Vest nipype_name: Text2Vest nipype_module: nipype.interfaces.fsl.utils @@ -35,9 +35,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: text/text-file # type=file|default=: plain text file representing your design, contrast, or f-test matrix - out_file: Path - # type=file: matrix data in the format used by FSL tools - # type=file|default=: file name to store matrix data in the format used by FSL tools (e.g., design.mat, design.con design.fts) callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -54,14 +51,14 @@ outputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - out_file: datascience/text-matrix + out_file: generic/file # type=file: matrix data in the format used by FSL tools # type=file|default=: file name to store matrix data in the format used by FSL tools (e.g., design.mat, design.con design.fts) callables: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -74,13 +71,13 @@ tests: # type=file: matrix data in the format used by FSL tools # type=file|default=: file name to store matrix data in the format used by FSL tools (e.g., design.mat, design.con design.fts) output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -99,11 +96,8 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: plain text file representing your design, contrast, or f-test matrix - out_file: '"design.mat"' - # type=file: matrix data in the format used by FSL tools - # type=file|default=: file name to store matrix data in the format used by FSL tools (e.g., design.mat, design.con design.fts) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -118,7 +112,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: Text2Vest design.txt design.mat +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -126,11 +120,8 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"design.txt"' # type=file|default=: plain text file representing your design, contrast, or f-test matrix - out_file: '"design.mat"' - # type=file: matrix data in the format used by FSL tools - # type=file|default=: file name to store matrix data in the format used by FSL tools (e.g., design.mat, design.con design.fts) imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/threshold.yaml b/example-specs/interface/nipype/fsl/threshold.yaml index a9c7d62e..8c950c75 100644 --- a/example-specs/interface/nipype/fsl/threshold.yaml +++ b/example-specs/interface/nipype/fsl/threshold.yaml @@ -22,9 +22,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -48,7 +45,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -78,13 +75,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/topup.yaml b/example-specs/interface/nipype/fsl/topup.yaml index 128a7536..b4331133 100644 --- a/example-specs/interface/nipype/fsl/topup.yaml +++ b/example-specs/interface/nipype/fsl/topup.yaml @@ -6,27 +6,27 @@ # Docs # ---- # -# Interface for FSL topup, a tool for estimating and correcting -# susceptibility induced distortions. See FSL documentation for -# `reference `_, -# `usage examples -# `_, -# and `exemplary config files -# `_. +# Interface for FSL topup, a tool for estimating and correcting +# susceptibility induced distortions. See FSL documentation for +# `reference `_, +# `usage examples +# `_, +# and `exemplary config files +# `_. # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import TOPUP +# >>> topup = TOPUP() +# >>> topup.inputs.in_file = "b0_b0rev.nii" +# >>> topup.inputs.encoding_file = "topup_encoding.txt" +# >>> topup.inputs.output_type = "NIFTI_GZ" +# >>> topup.cmdline # doctest: +ELLIPSIS +# 'topup --config=b02b0.cnf --datain=topup_encoding.txt --imain=b0_b0rev.nii --out=b0_b0rev_base --iout=b0_b0rev_corrected.nii.gz --fout=b0_b0rev_field.nii.gz --jacout=jac --logout=b0_b0rev_topup.log --rbmout=xfm --dfout=warpfield' +# >>> res = topup.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import TOPUP -# >>> topup = TOPUP() -# >>> topup.inputs.in_file = "b0_b0rev.nii" -# >>> topup.inputs.encoding_file = "topup_encoding.txt" -# >>> topup.inputs.output_type = "NIFTI_GZ" -# >>> topup.cmdline # doctest: +ELLIPSIS -# 'topup --config=b02b0.cnf --datain=topup_encoding.txt --imain=b0_b0rev.nii --out=b0_b0rev_base --iout=b0_b0rev_corrected.nii.gz --fout=b0_b0rev_field.nii.gz --jacout=jac --logout=b0_b0rev_topup.log --rbmout=xfm --dfout=warpfield' -# >>> res = topup.run() # doctest: +SKIP # -# task_name: TOPUP nipype_name: TOPUP nipype_module: nipype.interfaces.fsl.epi @@ -41,21 +41,10 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - encoding_file: text/text-file + encoding_file: generic/file # type=file|default=: name of text file with PE directions/times in_file: medimage/nifti1 # type=file|default=: name of 4D file with images - out_base: Path - # type=file|default=: base-name of output files (spline coefficients (Hz) and movement parameters) - out_corrected: Path - # type=file: name of 4D image file with unwarped images - # type=file|default=: name of 4D image file with unwarped images - out_field: Path - # type=file: name of image file with field (Hz) - # type=file|default=: name of image file with field (Hz) - out_logfile: Path - # type=file: name of log-file - # type=file|default=: name of log-file callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -97,7 +86,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -160,13 +149,13 @@ tests: regrid: # type=enum|default=1|allowed[0,1]: If set (=1), the calculations are done in a different grid output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -185,12 +174,10 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: name of 4D file with images - encoding_file: - # type=file|default=: name of text file with PE directions/times output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -213,12 +200,10 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"b0_b0rev.nii"' # type=file|default=: name of 4D file with images - encoding_file: '"topup_encoding.txt"' - # type=file|default=: name of text file with PE directions/times output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/tract_skeleton.yaml b/example-specs/interface/nipype/fsl/tract_skeleton.yaml index 05335385..cd4dd73e 100644 --- a/example-specs/interface/nipype/fsl/tract_skeleton.yaml +++ b/example-specs/interface/nipype/fsl/tract_skeleton.yaml @@ -6,28 +6,28 @@ # Docs # ---- # Use FSL's tbss_skeleton to skeletonise an FA image or project arbitrary -# values onto a skeleton. +# values onto a skeleton. # -# There are two ways to use this interface. To create a skeleton from an FA -# image, just supply the ``in_file`` and set ``skeleton_file`` to True (or -# specify a skeleton filename. To project values onto a skeleton, you must -# set ``project_data`` to True, and then also supply values for -# ``threshold``, ``distance_map``, and ``data_file``. The -# ``search_mask_file`` and ``use_cingulum_mask`` inputs are also used in data -# projection, but ``use_cingulum_mask`` is set to True by default. This mask -# controls where the projection algorithm searches within a circular space -# around a tract, rather than in a single perpendicular direction. +# There are two ways to use this interface. To create a skeleton from an FA +# image, just supply the ``in_file`` and set ``skeleton_file`` to True (or +# specify a skeleton filename. To project values onto a skeleton, you must +# set ``project_data`` to True, and then also supply values for +# ``threshold``, ``distance_map``, and ``data_file``. The +# ``search_mask_file`` and ``use_cingulum_mask`` inputs are also used in data +# projection, but ``use_cingulum_mask`` is set to True by default. This mask +# controls where the projection algorithm searches within a circular space +# around a tract, rather than in a single perpendicular direction. # -# Example -# ------- +# Example +# ------- +# +# >>> import nipype.interfaces.fsl as fsl +# >>> skeletor = fsl.TractSkeleton() +# >>> skeletor.inputs.in_file = "all_FA.nii.gz" +# >>> skeletor.inputs.skeleton_file = True +# >>> skeletor.run() # doctest: +SKIP # -# >>> import nipype.interfaces.fsl as fsl -# >>> skeletor = fsl.TractSkeleton() -# >>> skeletor.inputs.in_file = "all_FA.nii.gz" -# >>> skeletor.inputs.skeleton_file = True -# >>> skeletor.run() # doctest: +SKIP # -# task_name: TractSkeleton nipype_name: TractSkeleton nipype_module: nipype.interfaces.fsl.dti @@ -52,9 +52,6 @@ inputs: # type=file|default=: distance map image in_file: generic/file # type=file|default=: input image (typically mean FA volume) - projected_data: Path - # type=file: input data projected onto skeleton - # type=file|default=: input data projected onto skeleton search_mask_file: generic/file # type=file|default=: mask in which to use alternate search rule callable_defaults: @@ -83,7 +80,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -115,13 +112,13 @@ tests: # type=file: tract skeleton image # type=traitcompound|default=None: write out skeleton image output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/training.yaml b/example-specs/interface/nipype/fsl/training.yaml index 52ac3028..6c08dba5 100644 --- a/example-specs/interface/nipype/fsl/training.yaml +++ b/example-specs/interface/nipype/fsl/training.yaml @@ -6,8 +6,8 @@ # Docs # ---- # -# Train the classifier based on your own FEAT/MELODIC output directory. -# +# Train the classifier based on your own FEAT/MELODIC output directory. +# task_name: Training nipype_name: Training nipype_module: nipype.interfaces.fsl.fix @@ -46,7 +46,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -64,7 +64,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/training_set_creator.yaml b/example-specs/interface/nipype/fsl/training_set_creator.yaml index af7716c7..3123f079 100644 --- a/example-specs/interface/nipype/fsl/training_set_creator.yaml +++ b/example-specs/interface/nipype/fsl/training_set_creator.yaml @@ -6,13 +6,13 @@ # Docs # ---- # Goes through set of provided melodic output directories, to find all -# the ones that have a hand_labels_noise.txt file in them. +# the ones that have a hand_labels_noise.txt file in them. +# +# This is outsourced as a separate class, so that the pipeline is +# rerun every time a handlabeled file has been changed, or a new one +# created. # -# This is outsourced as a separate class, so that the pipeline is -# rerun every time a handlabeled file has been changed, or a new one -# created. # -# task_name: TrainingSetCreator nipype_name: TrainingSetCreator nipype_module: nipype.interfaces.fsl.fix @@ -51,7 +51,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -61,7 +61,7 @@ tests: mel_icas_in: # type=inputmultiobject|default=[]: Melodic output directories imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/unary_maths.yaml b/example-specs/interface/nipype/fsl/unary_maths.yaml index 2c0829b0..2ded9dba 100644 --- a/example-specs/interface/nipype/fsl/unary_maths.yaml +++ b/example-specs/interface/nipype/fsl/unary_maths.yaml @@ -22,9 +22,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: generic/file # type=file|default=: image to operate on - out_file: Path - # type=file: image written after calculations - # type=file|default=: image to write callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -48,7 +45,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: out_file # type=file: image written after calculations # type=file|default=: image to write @@ -72,13 +69,13 @@ tests: nan2zeros: # type=bool|default=False: change NaNs to zeros before doing anything output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/interface/nipype/fsl/vec_reg.yaml b/example-specs/interface/nipype/fsl/vec_reg.yaml index 58dd74e1..f1c337ed 100644 --- a/example-specs/interface/nipype/fsl/vec_reg.yaml +++ b/example-specs/interface/nipype/fsl/vec_reg.yaml @@ -6,18 +6,18 @@ # Docs # ---- # Use FSL vecreg for registering vector data -# For complete details, see the FDT Documentation -# +# For complete details, see the FDT Documentation +# # -# Example -# ------- +# Example +# ------- +# +# >>> from nipype.interfaces import fsl +# >>> vreg = fsl.VecReg(in_file='diffusion.nii', affine_mat='trans.mat', ref_vol='mni.nii', out_file='diffusion_vreg.nii') +# >>> vreg.cmdline +# 'vecreg -t trans.mat -i diffusion.nii -o diffusion_vreg.nii -r mni.nii' # -# >>> from nipype.interfaces import fsl -# >>> vreg = fsl.VecReg(in_file='diffusion.nii', affine_mat='trans.mat', ref_vol='mni.nii', out_file='diffusion_vreg.nii') -# >>> vreg.cmdline -# 'vecreg -t trans.mat -i diffusion.nii -o diffusion_vreg.nii -r mni.nii' # -# task_name: VecReg nipype_name: VecReg nipype_module: nipype.interfaces.fsl.dti @@ -38,9 +38,6 @@ inputs: # type=file|default=: filename for input vector or tensor field mask: generic/file # type=file|default=: brain mask in input space - out_file: Path - # type=file: path/name of filename for the registered vector or tensor field - # type=file|default=: filename for output registered vector or tensor field ref_mask: generic/file # type=file|default=: brain mask in output space (useful for speed up of nonlinear reg) ref_vol: medimage/nifti1 @@ -74,7 +71,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields out_file: '"diffusion_vreg.nii"' # type=file: path/name of filename for the registered vector or tensor field # type=file|default=: filename for output registered vector or tensor field @@ -106,13 +103,13 @@ tests: ref_mask: # type=file|default=: brain mask in output space (useful for speed up of nonlinear reg) output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -139,7 +136,7 @@ tests: # type=file: path/name of filename for the registered vector or tensor field # type=file|default=: filename for output registered vector or tensor field imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -170,7 +167,7 @@ doctests: # type=file: path/name of filename for the registered vector or tensor field # type=file|default=: filename for output registered vector or tensor field imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/vest_2_text.yaml b/example-specs/interface/nipype/fsl/vest_2_text.yaml index 599cedc7..4dc852b1 100644 --- a/example-specs/interface/nipype/fsl/vest_2_text.yaml +++ b/example-specs/interface/nipype/fsl/vest_2_text.yaml @@ -6,18 +6,18 @@ # Docs # ---- # -# Use FSL Vest2Text`https://web.mit.edu/fsl_v5.0.10/fsl/doc/wiki/GLM(2f)CreatingDesignMatricesByHand.html`_ -# to convert your design.mat design.con and design.fts files into plain text. +# Use FSL Vest2Text`https://web.mit.edu/fsl_v5.0.10/fsl/doc/wiki/GLM(2f)CreatingDesignMatricesByHand.html`_ +# to convert your design.mat design.con and design.fts files into plain text. +# +# Examples +# -------- +# >>> from nipype.interfaces.fsl import Vest2Text +# >>> v2t = Vest2Text() +# >>> v2t.inputs.in_file = "design.mat" +# >>> v2t.cmdline +# 'Vest2Text design.mat design.txt' +# >>> res = v2t.run() # doctest: +SKIP # -# Examples -# -------- -# >>> from nipype.interfaces.fsl import Vest2Text -# >>> v2t = Vest2Text() -# >>> v2t.inputs.in_file = "design.mat" -# >>> v2t.cmdline -# 'Vest2Text design.mat design.txt' -# >>> res = v2t.run() # doctest: +SKIP -# task_name: Vest2Text nipype_name: Vest2Text nipype_module: nipype.interfaces.fsl.utils @@ -34,9 +34,6 @@ inputs: # passed to the field in the automatically generated unittests. in_file: datascience/text-matrix # type=file|default=: matrix data stored in the format used by FSL tools - out_file: Path - # type=file: plain text representation of FSL matrix - # type=file|default='design.txt': file name to store text output from matrix callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set as the `default` method of input fields @@ -60,7 +57,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -73,13 +70,13 @@ tests: # type=file: plain text representation of FSL matrix # type=file|default='design.txt': file name to store text output from matrix output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -99,7 +96,7 @@ tests: in_file: # type=file|default=: matrix data stored in the format used by FSL tools imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -114,7 +111,7 @@ tests: # bool - whether the unittest is expected to fail or not. Set to false # when you are satisfied with the edits you have made to this file doctests: -- cmdline: Vest2Text design.mat design.txt +- cmdline: # str - the expected cmdline output inputs: # dict[str, str] - name-value pairs for inputs to be provided to the doctest. @@ -123,7 +120,7 @@ doctests: in_file: '"design.mat"' # type=file|default=: matrix data stored in the format used by FSL tools imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/warp_points.yaml b/example-specs/interface/nipype/fsl/warp_points.yaml index 2bb497cf..cae998ef 100644 --- a/example-specs/interface/nipype/fsl/warp_points.yaml +++ b/example-specs/interface/nipype/fsl/warp_points.yaml @@ -6,27 +6,27 @@ # Docs # ---- # Use FSL `img2imgcoord `_ -# to transform point sets. Accepts plain text files and vtk files. +# to transform point sets. Accepts plain text files and vtk files. # -# .. Note:: transformation of TrackVis trk files is not yet implemented +# .. Note:: transformation of TrackVis trk files is not yet implemented # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import WarpPoints +# >>> warppoints = WarpPoints() +# >>> warppoints.inputs.in_coords = 'surf.txt' +# >>> warppoints.inputs.src_file = 'epi.nii' +# >>> warppoints.inputs.dest_file = 'T1.nii' +# >>> warppoints.inputs.warp_file = 'warpfield.nii' +# >>> warppoints.inputs.coord_mm = True +# >>> warppoints.cmdline # doctest: +ELLIPSIS +# 'img2imgcoord -mm -dest T1.nii -src epi.nii -warp warpfield.nii surf.txt' +# >>> res = warppoints.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import WarpPoints -# >>> warppoints = WarpPoints() -# >>> warppoints.inputs.in_coords = 'surf.txt' -# >>> warppoints.inputs.src_file = 'epi.nii' -# >>> warppoints.inputs.dest_file = 'T1.nii' -# >>> warppoints.inputs.warp_file = 'warpfield.nii' -# >>> warppoints.inputs.coord_mm = True -# >>> warppoints.cmdline # doctest: +ELLIPSIS -# 'img2imgcoord -mm -dest T1.nii -src epi.nii -warp warpfield.nii surf.txt' -# >>> res = warppoints.run() # doctest: +SKIP # # -# task_name: WarpPoints nipype_name: WarpPoints nipype_module: nipype.interfaces.fsl.utils @@ -45,12 +45,9 @@ inputs: # type=file|default=: filename of destination image in_coords: text/text-file # type=file|default=: filename of file containing coordinates - out_file: Path - # type=file: Name of output file, containing the warp as field or coefficients. - # type=file|default=: output file name - src_file: medimage/nifti1 + src_file: generic/file # type=file|default=: filename of source image - warp_file: medimage/nifti1 + warp_file: generic/file # type=file|default=: filename of warpfield (e.g. intermediate2dest_warp.nii.gz) xfm_file: generic/file # type=file|default=: filename of affine transform (e.g. source2dest.mat) @@ -77,7 +74,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -106,7 +103,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -125,16 +122,12 @@ tests: # (if not specified, will try to choose a sensible value) in_coords: # type=file|default=: filename of file containing coordinates - src_file: - # type=file|default=: filename of source image dest_file: # type=file|default=: filename of destination image - warp_file: - # type=file|default=: filename of warpfield (e.g. intermediate2dest_warp.nii.gz) coord_mm: 'True' # type=bool|default=False: all coordinates in mm imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -157,16 +150,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_coords: '"surf.txt"' # type=file|default=: filename of file containing coordinates - src_file: '"epi.nii"' - # type=file|default=: filename of source image dest_file: '"T1.nii"' # type=file|default=: filename of destination image - warp_file: '"warpfield.nii"' - # type=file|default=: filename of warpfield (e.g. intermediate2dest_warp.nii.gz) coord_mm: 'True' # type=bool|default=False: all coordinates in mm imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/warp_points_from_std.yaml b/example-specs/interface/nipype/fsl/warp_points_from_std.yaml index fbf7d17c..62a27b33 100644 --- a/example-specs/interface/nipype/fsl/warp_points_from_std.yaml +++ b/example-specs/interface/nipype/fsl/warp_points_from_std.yaml @@ -6,27 +6,27 @@ # Docs # ---- # -# Use FSL `std2imgcoord `_ -# to transform point sets to standard space coordinates. Accepts plain text coordinates -# files. +# Use FSL `std2imgcoord `_ +# to transform point sets to standard space coordinates. Accepts plain text coordinates +# files. # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import WarpPointsFromStd +# >>> warppoints = WarpPointsFromStd() +# >>> warppoints.inputs.in_coords = 'surf.txt' +# >>> warppoints.inputs.img_file = 'T1.nii' +# >>> warppoints.inputs.std_file = 'mni.nii' +# >>> warppoints.inputs.warp_file = 'warpfield.nii' +# >>> warppoints.inputs.coord_mm = True +# >>> warppoints.cmdline # doctest: +ELLIPSIS +# 'std2imgcoord -mm -img T1.nii -std mni.nii -warp warpfield.nii surf.txt' +# >>> res = warppoints.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import WarpPointsFromStd -# >>> warppoints = WarpPointsFromStd() -# >>> warppoints.inputs.in_coords = 'surf.txt' -# >>> warppoints.inputs.img_file = 'T1.nii' -# >>> warppoints.inputs.std_file = 'mni.nii' -# >>> warppoints.inputs.warp_file = 'warpfield.nii' -# >>> warppoints.inputs.coord_mm = True -# >>> warppoints.cmdline # doctest: +ELLIPSIS -# 'std2imgcoord -mm -img T1.nii -std mni.nii -warp warpfield.nii surf.txt' -# >>> res = warppoints.run() # doctest: +SKIP # # -# task_name: WarpPointsFromStd nipype_name: WarpPointsFromStd nipype_module: nipype.interfaces.fsl.utils @@ -41,13 +41,13 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - img_file: medimage/nifti1 + img_file: generic/file # type=file|default=: filename of a destination image in_coords: text/text-file # type=file|default=: filename of file containing coordinates std_file: medimage/nifti1 # type=file|default=: filename of the image in standard space - warp_file: medimage/nifti1 + warp_file: generic/file # type=file|default=: filename of warpfield (e.g. intermediate2dest_warp.nii.gz) xfm_file: generic/file # type=file|default=: filename of affine transform (e.g. source2dest.mat) @@ -73,7 +73,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -99,7 +99,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -118,16 +118,12 @@ tests: # (if not specified, will try to choose a sensible value) in_coords: # type=file|default=: filename of file containing coordinates - img_file: - # type=file|default=: filename of a destination image std_file: # type=file|default=: filename of the image in standard space - warp_file: - # type=file|default=: filename of warpfield (e.g. intermediate2dest_warp.nii.gz) coord_mm: 'True' # type=bool|default=False: all coordinates in mm imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -150,16 +146,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_coords: '"surf.txt"' # type=file|default=: filename of file containing coordinates - img_file: '"T1.nii"' - # type=file|default=: filename of a destination image std_file: '"mni.nii"' # type=file|default=: filename of the image in standard space - warp_file: '"warpfield.nii"' - # type=file|default=: filename of warpfield (e.g. intermediate2dest_warp.nii.gz) coord_mm: 'True' # type=bool|default=False: all coordinates in mm imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/warp_points_to_std.yaml b/example-specs/interface/nipype/fsl/warp_points_to_std.yaml index 395843ca..b35c75d5 100644 --- a/example-specs/interface/nipype/fsl/warp_points_to_std.yaml +++ b/example-specs/interface/nipype/fsl/warp_points_to_std.yaml @@ -6,29 +6,29 @@ # Docs # ---- # -# Use FSL `img2stdcoord `_ -# to transform point sets to standard space coordinates. Accepts plain text -# files and vtk files. +# Use FSL `img2stdcoord `_ +# to transform point sets to standard space coordinates. Accepts plain text +# files and vtk files. # -# .. Note:: transformation of TrackVis trk files is not yet implemented +# .. Note:: transformation of TrackVis trk files is not yet implemented # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import WarpPointsToStd +# >>> warppoints = WarpPointsToStd() +# >>> warppoints.inputs.in_coords = 'surf.txt' +# >>> warppoints.inputs.img_file = 'T1.nii' +# >>> warppoints.inputs.std_file = 'mni.nii' +# >>> warppoints.inputs.warp_file = 'warpfield.nii' +# >>> warppoints.inputs.coord_mm = True +# >>> warppoints.cmdline # doctest: +ELLIPSIS +# 'img2stdcoord -mm -img T1.nii -std mni.nii -warp warpfield.nii surf.txt' +# >>> res = warppoints.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import WarpPointsToStd -# >>> warppoints = WarpPointsToStd() -# >>> warppoints.inputs.in_coords = 'surf.txt' -# >>> warppoints.inputs.img_file = 'T1.nii' -# >>> warppoints.inputs.std_file = 'mni.nii' -# >>> warppoints.inputs.warp_file = 'warpfield.nii' -# >>> warppoints.inputs.coord_mm = True -# >>> warppoints.cmdline # doctest: +ELLIPSIS -# 'img2stdcoord -mm -img T1.nii -std mni.nii -warp warpfield.nii surf.txt' -# >>> res = warppoints.run() # doctest: +SKIP # # -# task_name: WarpPointsToStd nipype_name: WarpPointsToStd nipype_module: nipype.interfaces.fsl.utils @@ -43,18 +43,15 @@ inputs: # from the nipype interface, but you may want to be more specific, particularly # for file types, where specifying the format also specifies the file that will be # passed to the field in the automatically generated unittests. - img_file: medimage/nifti1 + img_file: generic/file # type=file|default=: filename of input image in_coords: text/text-file # type=file|default=: filename of file containing coordinates - out_file: Path - # type=file: Name of output file, containing the warp as field or coefficients. - # type=file|default=: output file name premat_file: generic/file # type=file|default=: filename of pre-warp affine transform (e.g. example_func2highres.mat) std_file: medimage/nifti1 # type=file|default=: filename of destination image - warp_file: medimage/nifti1 + warp_file: generic/file # type=file|default=: filename of warpfield (e.g. intermediate2dest_warp.nii.gz) xfm_file: generic/file # type=file|default=: filename of affine transform (e.g. source2dest.mat) @@ -81,7 +78,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -112,7 +109,7 @@ tests: environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -131,16 +128,12 @@ tests: # (if not specified, will try to choose a sensible value) in_coords: # type=file|default=: filename of file containing coordinates - img_file: - # type=file|default=: filename of input image std_file: # type=file|default=: filename of destination image - warp_file: - # type=file|default=: filename of warpfield (e.g. intermediate2dest_warp.nii.gz) coord_mm: 'True' # type=bool|default=False: all coordinates in mm imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -163,16 +156,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_coords: '"surf.txt"' # type=file|default=: filename of file containing coordinates - img_file: '"T1.nii"' - # type=file|default=: filename of input image std_file: '"mni.nii"' # type=file|default=: filename of destination image - warp_file: '"warpfield.nii"' - # type=file|default=: filename of warpfield (e.g. intermediate2dest_warp.nii.gz) coord_mm: 'True' # type=bool|default=False: all coordinates in mm imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/warp_utils.yaml b/example-specs/interface/nipype/fsl/warp_utils.yaml index 3132d259..69b99297 100644 --- a/example-specs/interface/nipype/fsl/warp_utils.yaml +++ b/example-specs/interface/nipype/fsl/warp_utils.yaml @@ -6,25 +6,25 @@ # Docs # ---- # Use FSL `fnirtfileutils `_ -# to convert field->coefficients, coefficients->field, coefficients->other_coefficients etc +# to convert field->coefficients, coefficients->field, coefficients->other_coefficients etc # # -# Examples -# -------- +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import WarpUtils +# >>> warputils = WarpUtils() +# >>> warputils.inputs.in_file = "warpfield.nii" +# >>> warputils.inputs.reference = "T1.nii" +# >>> warputils.inputs.out_format = 'spline' +# >>> warputils.inputs.warp_resolution = (10,10,10) +# >>> warputils.inputs.output_type = "NIFTI_GZ" +# >>> warputils.cmdline # doctest: +ELLIPSIS +# 'fnirtfileutils --in=warpfield.nii --outformat=spline --ref=T1.nii --warpres=10.0000,10.0000,10.0000 --out=warpfield_coeffs.nii.gz' +# >>> res = invwarp.run() # doctest: +SKIP # -# >>> from nipype.interfaces.fsl import WarpUtils -# >>> warputils = WarpUtils() -# >>> warputils.inputs.in_file = "warpfield.nii" -# >>> warputils.inputs.reference = "T1.nii" -# >>> warputils.inputs.out_format = 'spline' -# >>> warputils.inputs.warp_resolution = (10,10,10) -# >>> warputils.inputs.output_type = "NIFTI_GZ" -# >>> warputils.cmdline # doctest: +ELLIPSIS -# 'fnirtfileutils --in=warpfield.nii --outformat=spline --ref=T1.nii --warpres=10.0000,10.0000,10.0000 --out=warpfield_coeffs.nii.gz' -# >>> res = invwarp.run() # doctest: +SKIP # # -# task_name: WarpUtils nipype_name: WarpUtils nipype_module: nipype.interfaces.fsl.utils @@ -41,13 +41,7 @@ inputs: # passed to the field in the automatically generated unittests. in_file: medimage/nifti1 # type=file|default=: Name of file containing warp-coefficients/fields. This would typically be the output from the --cout switch of fnirt (but can also use fields, like the output from --fout). - out_file: Path - # type=file: Name of output file, containing the warp as field or coefficients. - # type=file|default=: Name of output file. The format of the output depends on what other parameters are set. The default format is a (4D) field-file. If the --outformat is set to spline the format will be a (4D) file of spline coefficients. - out_jacobian: Path - # type=file: Name of output file, containing the map of the determinant of the Jacobian - # type=file|default=: Specifies that a (3D) file of Jacobian determinants corresponding to --in should be produced and written to filename. - reference: medimage/nifti1 + reference: generic/file # type=file|default=: Name of a file in target space. Note that the target space is now different from the target space that was used to create the --warp file. It would typically be the file that was specified with the --in argument when running fnirt. callable_defaults: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` @@ -75,7 +69,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -103,13 +97,13 @@ tests: with_affine: # type=bool|default=False: Specifies that the affine transform (i.e. that which was specified for the --aff parameter in fnirt) should be included as displacements in the --out file. That can be useful for interfacing with software that cannot decode FSL/fnirt coefficient-files (where the affine transform is stored separately from the displacements). output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -128,16 +122,12 @@ tests: # (if not specified, will try to choose a sensible value) in_file: # type=file|default=: Name of file containing warp-coefficients/fields. This would typically be the output from the --cout switch of fnirt (but can also use fields, like the output from --fout). - reference: - # type=file|default=: Name of a file in target space. Note that the target space is now different from the target space that was used to create the --warp file. It would typically be the file that was specified with the --in argument when running fnirt. out_format: '"spline"' # type=enum|default='spline'|allowed['field','spline']: Specifies the output format. If set to field (default) the output will be a (4D) field-file. If set to spline the format will be a (4D) file of spline coefficients. - warp_resolution: (10,10,10) - # type=tuple|default=(0.0, 0.0, 0.0): Specifies the resolution/knot-spacing of the splines pertaining to the coefficients in the --out file. This parameter is only relevant if --outformat is set to spline. It should be noted that if the --in file has a higher resolution, the resulting coefficients will pertain to the closest (in a least-squares sense) file in the space of fields with the --warpres resolution. It should also be noted that the resolution will always be an integer multiple of the voxel size. output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically @@ -160,16 +150,12 @@ doctests: # '.mock()' method of the corresponding class is used instead. in_file: '"warpfield.nii"' # type=file|default=: Name of file containing warp-coefficients/fields. This would typically be the output from the --cout switch of fnirt (but can also use fields, like the output from --fout). - reference: '"T1.nii"' - # type=file|default=: Name of a file in target space. Note that the target space is now different from the target space that was used to create the --warp file. It would typically be the file that was specified with the --in argument when running fnirt. out_format: '"spline"' # type=enum|default='spline'|allowed['field','spline']: Specifies the output format. If set to field (default) the output will be a (4D) field-file. If set to spline the format will be a (4D) file of spline coefficients. - warp_resolution: (10,10,10) - # type=tuple|default=(0.0, 0.0, 0.0): Specifies the resolution/knot-spacing of the splines pertaining to the coefficients in the --out file. This parameter is only relevant if --outformat is set to spline. It should be noted that if the --in file has a higher resolution, the resulting coefficients will pertain to the closest (in a least-squares sense) file in the space of fields with the --warpres resolution. It should also be noted that the resolution will always be an integer multiple of the voxel size. output_type: '"NIFTI_GZ"' - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys directive: # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/interface/nipype/fsl/x_fibres_5.yaml b/example-specs/interface/nipype/fsl/x_fibres_5.yaml index 9ea798fb..26b39475 100644 --- a/example-specs/interface/nipype/fsl/x_fibres_5.yaml +++ b/example-specs/interface/nipype/fsl/x_fibres_5.yaml @@ -6,9 +6,9 @@ # Docs # ---- # -# Perform model parameters estimation for local (voxelwise) diffusion -# parameters -# +# Perform model parameters estimation for local (voxelwise) diffusion +# parameters +# task_name: XFibres5 nipype_name: XFibres5 nipype_module: nipype.interfaces.fsl.dti @@ -56,7 +56,7 @@ outputs: fsamples: generic/file+list-of # type=outputmultiobject: Samples from the distribution on f anisotropy mean_S0samples: generic/file - # type=file: Mean of distribution on T2wbaseline signal intensity S0 + # type=file: Mean of distribution on T2w baseline signal intensity S0 mean_dsamples: generic/file # type=file: Mean of distribution on diffusivity d mean_fsamples: generic/file+list-of @@ -71,7 +71,7 @@ outputs: # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` # to set to the `callable` attribute of output fields templates: - # dict[str, str] - `output_file_template` values to be provided to output fields + # dict[str, str] - `path_template` values to be provided to output fields requirements: # dict[str, list[str]] - input fields that are required to be provided for the output field to be present tests: @@ -127,13 +127,13 @@ tests: force_dir: # type=bool|default=True: use the actual directory name given (do not add + to make a new directory) output_type: - # type=enum|default='NIFTI'|allowed['NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type + # type=enum|default='NIFTI'|allowed['GIFTI','NIFTI','NIFTI_GZ','NIFTI_PAIR','NIFTI_PAIR_GZ']: FSL output type args: # type=str|default='': Additional parameters to the command environ: # type=dict|default={}: Environment variables imports: - # list[nipype2pydra.task.base.importstatement] - list import statements required by the test, with each list item + # list[nipype2pydra.statements.imports.explicitimport] - list import statements required by the test, with each list item # consisting of 'module', 'name', and optionally 'alias' keys expected_outputs: # dict[str, str] - expected values for selected outputs, noting that tests will typically diff --git a/example-specs/pkg-gen/nipype.yaml b/example-specs/pkg-gen/nipype.yaml index 89a44589..1778c829 100644 --- a/example-specs/pkg-gen/nipype.yaml +++ b/example-specs/pkg-gen/nipype.yaml @@ -294,9 +294,9 @@ freesurfer: - nipype.interfaces.freesurfer.model.SphericalAverage - nipype.interfaces.freesurfer.petsurfer.GTMSeg - nipype.interfaces.freesurfer.petsurfer.GTMPVC - - nipype.interfaces.freesurfer.petsurfer.MRTM + - nipype.interfaces.freesurfer.petsurfer.MRTM1 - nipype.interfaces.freesurfer.petsurfer.MRTM2 - - nipype.interfaces.freesurfer.petsurfer.LoganRef + - nipype.interfaces.freesurfer.petsurfer.Logan - nipype.interfaces.freesurfer.preprocess.ParseDICOMDir - nipype.interfaces.freesurfer.preprocess.UnpackSDICOMDir - nipype.interfaces.freesurfer.preprocess.MRIConvert diff --git a/example-specs/pkg-gen/qsmxt.yaml b/example-specs/pkg-gen/qsmxt.yaml new file mode 100644 index 00000000..783e1a3f --- /dev/null +++ b/example-specs/pkg-gen/qsmxt.yaml @@ -0,0 +1,49 @@ +qsmxt: + interfaces: + - qsmxt.interfaces.nipype_interface_addtojson.AddToJsonInterface + - qsmxt.interfaces.nipype_interface_analyse.AnalyseInterface + - qsmxt.interfaces.nipype_interface_axialsampling.AxialSamplingInterface + - qsmxt.interfaces.nipype_interface_axialsampling.ResampleLikeInterface + - qsmxt.interfaces.nipype_interface_bet2.Bet2Interface + - qsmxt.interfaces.nipype_interface_clearswi.ClearSwiInterface + - qsmxt.interfaces.nipype_interface_combinemagnitude.CombineMagnitudeInterface + - qsmxt.interfaces.nipype_interface_copyfile.DynamicCopyFiles + - qsmxt.interfaces.nipype_interface_erode.ErosionInterface + - qsmxt.interfaces.nipype_interface_fastsurfer.FastSurferInterface + - qsmxt.interfaces.nipype_interface_hdbet.HDBETInterface + - qsmxt.interfaces.nipype_interface_json.JsonInterface + - qsmxt.interfaces.nipype_interface_laplacian_unwrapping.LaplacianInterface + - qsmxt.interfaces.nipype_interface_makehomogeneous.MakeHomogeneousInterface + - qsmxt.interfaces.nipype_interface_masking.MaskingInterface + - qsmxt.interfaces.nipype_interface_mgz2nii.Mgz2NiiInterface + - qsmxt.interfaces.nipype_interface_nextqsm.NextqsmInterface + - qsmxt.interfaces.nipype_interface_nextqsm.NormalizeB0Interface + - qsmxt.interfaces.nipype_interface_nii2dcm.Nii2DcmInterface + - qsmxt.interfaces.nipype_interface_nonzeroaverage.NonzeroAverageInterface + - qsmxt.interfaces.nipype_interface_phaseweights.RomeoMaskingInterface + - qsmxt.interfaces.nipype_interface_phaseweights.PbMaskingInterface + - qsmxt.interfaces.nipype_interface_processphase.ScalePhaseInterface + - qsmxt.interfaces.nipype_interface_processphase.PhaseToNormalizedInterface + - qsmxt.interfaces.nipype_interface_processphase.FreqToPhaseInterface + - qsmxt.interfaces.nipype_interface_processphase.FreqToNormalizedInterface + - qsmxt.interfaces.nipype_interface_qsm_referencing.ReferenceQSMInterface + - qsmxt.interfaces.nipype_interface_qsmjl.LaplacianUnwrappingInterface + - qsmxt.interfaces.nipype_interface_qsmjl.PhaseToFreqInterface + - qsmxt.interfaces.nipype_interface_qsmjl.VsharpInterface + - qsmxt.interfaces.nipype_interface_qsmjl.PdfInterface + - qsmxt.interfaces.nipype_interface_qsmjl.RtsQsmInterface + - qsmxt.interfaces.nipype_interface_qsmjl.TvQsmInterface + - qsmxt.interfaces.nipype_interface_resample_like.ResampleLikeInterface + - qsmxt.interfaces.nipype_interface_romeo.RomeoB0Interface + - qsmxt.interfaces.nipype_interface_t2star_r2star.T2sR2sInterface + - qsmxt.interfaces.nipype_interface_tgv_qsm_jl.TGVQSMJlInterface + - qsmxt.interfaces.nipype_interface_tgv_qsm.TGVQSMInterface + - qsmxt.interfaces.nipype_interface_twopass.TwopassNiftiInterface + workflows: + - qsmxt.workflows.masking.masking_workflow + - qsmxt.workflows.qsm.get_preceding_node_and_attribute + - qsmxt.workflows.qsm.get_matching_files + - qsmxt.workflows.qsm.init_qsm_workflow + - qsmxt.workflows.qsm.qsm_workflow + - qsmxt.workflows.template.init_template_workflow + - qsmxt.scripts.antsBuildTemplate.ANTSTemplateBuildSingleIterationWF \ No newline at end of file diff --git a/nipype2pydra/helpers.py b/nipype2pydra/helpers.py index 10e164f6..7e186fec 100644 --- a/nipype2pydra/helpers.py +++ b/nipype2pydra/helpers.py @@ -386,16 +386,28 @@ def _converted_code(self) -> ty.Tuple[str, ty.List[str]]: used_configs = set() src = replace_undefined(self.src)[len("class ") :] - name, bases, class_body = extract_args(src, drop_parens=True) - bases = [ - b - for b in bases - if not self.package.is_omitted(getattr(self.nipype_module, b)) - ] + defn, class_body = src.split(":", 1) + if "(" in defn: + name, orig_bases, class_body = extract_args(src, drop_parens=True) + class_body = class_body[1:].strip() + + bases = [] + for base_name in orig_bases: + try: + base = getattr(self.nipype_module, base_name) + except AttributeError: + bases.append(base_name) + continue + if not self.package.is_omitted(base): + bases.append(base_name) + else: + name = defn + bases = [] + class_body = class_body.strip() parts = re.split(r"\n (?!\s|\))", class_body, flags=re.MULTILINE) converted_parts = [] - for part in parts[1:]: + for part in parts: if part.startswith("def"): converted_func, func_used_configs = self._convert_function(part) converted_parts.append(converted_func) diff --git a/nipype2pydra/interface/__init__.py b/nipype2pydra/interface/__init__.py index 72bf0715..c78a5e57 100644 --- a/nipype2pydra/interface/__init__.py +++ b/nipype2pydra/interface/__init__.py @@ -1,6 +1,6 @@ from .base import BaseInterfaceConverter -from .function import FunctionInterfaceConverter -from .shell_command import ShellCommandInterfaceConverter +from .python import PythonInterfaceConverter +from .shell import ShellInterfaceConverter from .base import ( InputsConverter, OutputsConverter, @@ -11,8 +11,8 @@ __all__ = [ "BaseInterfaceConverter", - "FunctionInterfaceConverter", - "ShellCommandInterfaceConverter", + "PythonInterfaceConverter", + "ShellInterfaceConverter", "InputsConverter", "OutputsConverter", "TestGenerator", diff --git a/nipype2pydra/interface/base.py b/nipype2pydra/interface/base.py index 71baa583..a94ea22d 100644 --- a/nipype2pydra/interface/base.py +++ b/nipype2pydra/interface/base.py @@ -19,8 +19,9 @@ BaseInterface, ) from nipype.interfaces.base.core import SimpleInterface -from pydra.engine import specs -from pydra.engine.helpers import ensure_list +from pydra.utils.typing import MultiInputObj, MultiOutputObj, MultiOutputFile +from fileformats.generic import File, Directory +from pydra.utils.general import ensure_list from .. import symbols from ..utils import ( import_module_from_path, @@ -44,7 +45,6 @@ from_list_to_imports, make_imports_absolute, ) -from fileformats.generic import File import nipype2pydra.package from nipype2pydra.exceptions import UnmatchedParensException @@ -131,7 +131,7 @@ class OutputsConverter(SpecConverter): names of methods/callable classes defined in the adjacent `*_callables.py` to set to the `callable` attribute of output fields templates : dict[str, str], optional - `output_file_template` values to be provided to output fields + `path_template` values to be provided to output fields requirements : dict[str, list[str]] input fields that are required to be provided for the output field to be present """ @@ -147,9 +147,7 @@ class OutputsConverter(SpecConverter): templates: ty.Dict[str, str] = attrs.field( factory=dict, converter=default_if_none(factory=dict), # type: ignore - metadata={ - "help": "`output_file_template` values to be provided to output fields" - }, + metadata={"help": "`path_template` values to be provided to output fields"}, ) requirements: ty.Dict[str, ty.List[str]] = attrs.field( factory=dict, @@ -284,7 +282,7 @@ def from_dict_to_outputs(obj: ty.Union[OutputsConverter, dict]) -> OutputsConver def from_list_to_tests( - obj: ty.Union[ty.List[TestGenerator], list] + obj: ty.Union[ty.List[TestGenerator], list], ) -> ty.List[TestGenerator]: if obj is None: return [] @@ -292,7 +290,7 @@ def from_list_to_tests( def from_list_to_doctests( - obj: ty.Union[ty.List[DocTestGenerator], list] + obj: ty.Union[ty.List[DocTestGenerator], list], ) -> ty.List[DocTestGenerator]: if obj is None: return [] @@ -504,6 +502,7 @@ def write( converted_code=self.converted_code, used=self.used, find_replace=self.find_replace + self.package.find_replace, + interface_module=True, ) self.package.write_pkg_inits( @@ -544,7 +543,7 @@ def _convert_input_fields(self): continue pydra_fld, pos = self.pydra_fld_input(fld, name) pydra_meta = pydra_fld[-1] - if "output_file_template" in pydra_meta: + if "path_template" in pydra_meta: has_template.append(name) pydra_fields_dict[name] = (name,) + pydra_fld if pos is not None: @@ -571,7 +570,7 @@ def pydra_fld_input(self, field, nm): else: pydra_default = None - pydra_metadata = {"help_string": ""} + pydra_metadata = {"help": ""} for key in self.INPUT_KEYS: pydra_key_nm = self.NAME_MAPPING.get(key, key) val = getattr(field, key) @@ -594,20 +593,20 @@ def pydra_fld_input(self, field, nm): else: tmpl = template if nm in self.nipype_interface.output_spec().class_trait_names(): - pydra_metadata["output_file_template"] = tmpl - if pydra_type in [specs.File, specs.Directory]: + pydra_metadata["path_template"] = tmpl + if pydra_type in [File, Directory]: pydra_type = Path elif getattr(field, "genfile"): if nm in self.outputs.templates: try: - pydra_metadata["output_file_template"] = self.outputs.templates[nm] + pydra_metadata["path_template"] = self.outputs.templates[nm] except KeyError: raise Exception( - f"{nm} is has genfile=True and therefore needs an 'output_file_template' value" + f"{nm} is has genfile=True and therefore needs an 'path_template' value" ) if pydra_type in [ - specs.File, - specs.Directory, + File, + Directory, ]: # since this is a template, the file doesn't exist pydra_type = Path elif nm not in self.inputs.callable_defaults: @@ -677,9 +676,9 @@ def pydra_fld_output(self, field, name): pydra_metadata["requires"] = pydra_metadata["requires"][0] if name in self.outputs.templates: - pydra_metadata["output_file_template"] = self.interface_spec[ - "output_templates" - ][name] + pydra_metadata["path_template"] = self.interface_spec["output_templates"][ + name + ] elif name in self.outputs.callables: pydra_metadata["callable"] = self.outputs.callables[name] return (pydra_type, pydra_metadata) @@ -728,25 +727,30 @@ def pydra_type_converter(self, field, spec_type, name): if isinstance(field.inner_traits[0].trait_type, traits_extension.File): pydra_type = ty.List[File] else: - pydra_type = specs.MultiInputObj + pydra_type = MultiInputObj elif isinstance(trait_tp, traits_extension.OutputMultiObject): if isinstance(field.inner_traits[0].trait_type, traits_extension.File): - pydra_type = specs.MultiOutputFile + pydra_type = MultiOutputFile else: - pydra_type = specs.MultiOutputObj - elif isinstance(trait_tp, traits.trait_types.List): - if isinstance(field.inner_traits[0].trait_type, traits_extension.File): + pydra_type = MultiOutputObj + elif isinstance(trait_tp, (traits.trait_types.List, traits.trait_types.Tuple)): + seq_type = list if isinstance(trait_tp, traits.trait_types.List) else tuple + if not field.inner_traits: + pydra_type = seq_type[ty.Any] + elif isinstance(field.inner_traits[0].trait_type, traits_extension.File): if spec_type == "input": - pydra_type = ty.List[File] + pydra_type = seq_type[File] else: - pydra_type = specs.MultiOutputFile + pydra_type = MultiOutputFile else: - pydra_type = list + pydra_type = seq_type[ + self.pydra_type_converter(field.inner_traits[0], spec_type, name) + ] elif isinstance(trait_tp, traits_extension.File): if ( spec_type == "output" or trait_tp.exists is True ): # TODO check the hash_file metadata in nipype - pydra_type = specs.File + pydra_type = File else: pydra_type = Path else: @@ -883,7 +887,7 @@ def _converted_test(self): spec_str += f" task.inputs.{nm} = {value}\n" if hasattr(self.nipype_interface, "_cmd"): spec_str += r' print(f"CMDLINE: {task.cmdline}\n\n")' + "\n" - spec_str += " res = task(plugin=PassAfterTimeoutWorker)\n" + spec_str += " res = task(worker=PassAfterTimeoutWorker)\n" spec_str += " print('RESULT: ', res)\n" for name, value in test.expected_outputs.items(): spec_str += f" assert res.output.{name} == {value}\n" @@ -1391,7 +1395,7 @@ def unwrap_nested_methods( "xor", ] OUTPUT_KEYS = ["desc"] - NAME_MAPPING = {"desc": "help_string"} + NAME_MAPPING = {"desc": "help"} TRAITS_IRREL = [ "output_type", diff --git a/nipype2pydra/interface/loaders.py b/nipype2pydra/interface/loaders.py index f47c76dc..3ccdc1be 100644 --- a/nipype2pydra/interface/loaders.py +++ b/nipype2pydra/interface/loaders.py @@ -6,8 +6,8 @@ def get_converter(nipype_module: str, nipype_name: str, **kwargs): nipype_interface = getattr(import_module(nipype_module), nipype_name) if hasattr(nipype_interface, "_cmd"): - from .shell_command import ShellCommandInterfaceConverter as Converter + from .shell import ShellInterfaceConverter as Converter else: - from .function import FunctionInterfaceConverter as Converter + from .python import PythonInterfaceConverter as Converter return Converter(nipype_module=nipype_module, nipype_name=nipype_name, **kwargs) diff --git a/nipype2pydra/interface/function.py b/nipype2pydra/interface/python.py similarity index 67% rename from nipype2pydra/interface/function.py rename to nipype2pydra/interface/python.py index 1994e7fb..9e536884 100644 --- a/nipype2pydra/interface/function.py +++ b/nipype2pydra/interface/python.py @@ -1,11 +1,10 @@ import typing as ty import re import inspect -from operator import attrgetter +from operator import attrgetter, itemgetter from functools import cached_property import logging import attrs -from nipype.interfaces.base import BaseInterface, TraitedSpec from .base import BaseInterfaceConverter from ..symbols import UsedSymbols, get_return_line, find_super_method @@ -13,8 +12,22 @@ logger = logging.getLogger("nipype2pydra") +def type2str(type_): + """Convert a type to a string representation.""" + if isinstance(type_, str): + return type_ + if type_ is ty.Any: + return "ty.Any" + elif hasattr(type_, "__name__"): + return type_.__name__ + elif hasattr(type_, "__qualname__"): + return type_.__qualname__ + else: + return str(type_).replace("typing.", "ty.") + + @attrs.define(slots=False) -class FunctionInterfaceConverter(BaseInterfaceConverter): +class PythonInterfaceConverter(BaseInterfaceConverter): converter_type = "function" @@ -36,33 +49,32 @@ def generate_code(self, input_fields, nonstd_types, output_fields) -> ty.Tuple[ """ base_imports = [ - "import pydra.mark", "import logging", - "from logging import getLogger", - "from pydra.engine.task import FunctionTask", "import attrs", + "from logging import getLogger", + "from pydra.compose import python", ] - def types_to_names(spec_fields): - spec_fields_str = [] - for el in spec_fields: - el = list(el) - tp_str = str(el[1]) - if tp_str.startswith(" 1: - spec_str += "ty.Tuple[" + ", ".join(output_type_names) + "]" - else: - spec_str += output_type_names[0] - spec_str += ':\n """\n' + spec_str = "@python.define\n" + spec_str += f"class {self.task_name}(python.Task['{self.task_name}.Outputs']):\n" + spec_str += ' """\n' spec_str += self.create_doctests( input_fields=input_fields, nonstd_types=nonstd_types ) spec_str += ' """\n' - spec_str += method_body + "\n" - spec_str += "\n return {}".format(", ".join(output_names)) - spec_str += "\n\n# Nipype methods converted into functions\n\n" + for inpt in input_fields: + if len(inpt) == 4: + name, type_, default, _ = inpt + spec_str += f" {name}: {type2str(type_)} = {default}\n" + else: + name, type_, _ = inpt + spec_str += f" {name}: {type2str(type_)}\n" + + spec_str += "\n\n class Outputs(python.Outputs):\n" + for outpt in output_fields: + name, type_, _ = outpt + spec_str += f" {name}: {type2str(type_)}\n" + + spec_str += "\n @staticmethod\n" + spec_str += ( + " def function(" + + ", ".join(f"{i[0]}: {type2str(i[1])}" for i in input_fields) + + ")" + ) + output_types = [type2str(o[1]) for o in output_fields] + if any(t is not ty.Any for t in output_types): + spec_str += " -> " + if len(output_types) > 1: + spec_str += "tuples[" + ", ".join(output_types) + "]" + else: + spec_str += output_types[0] + spec_str += ":\n" + spec_str += " " + method_body.replace("\n", "\n ") + "\n" + spec_str += "\n return {}".format(", ".join(output_names)) + for m in sorted(self.used.methods, key=attrgetter("__name__")): if m.__name__ not in self.included_methods: @@ -179,6 +200,17 @@ def types_to_names(spec_fields): )[1], ) + for name, (m, super_base) in sorted( + self.used.supers.items(), key=itemgetter(0) + ): + spec_str += "\n\n" + self.process_method( + m, + input_names, + output_names, + super_base=super_base, + new_name=name, + ) + # Replace runtime attributes additional_imports = set() for attr, repl, imprt in self.RUNTIME_ATTRS: diff --git a/nipype2pydra/interface/shell_command.py b/nipype2pydra/interface/shell.py similarity index 83% rename from nipype2pydra/interface/shell_command.py rename to nipype2pydra/interface/shell.py index 0e59e605..50d2a7f5 100644 --- a/nipype2pydra/interface/shell_command.py +++ b/nipype2pydra/interface/shell.py @@ -13,9 +13,11 @@ extract_args, find_super_method, cleanup_function_body, + type_to_str, ) from fileformats.core.mixin import WithClassifiers -from fileformats.generic import File, Directory +from fileformats.generic import File +from pydra.utils.typing import is_optional logger = logging.getLogger("nipype2pydra") @@ -24,7 +26,7 @@ @attrs.define(slots=False) -class ShellCommandInterfaceConverter(BaseInterfaceConverter): +class ShellInterfaceConverter(BaseInterfaceConverter): converter_type = "shell_command" _format_argstrs: ty.Dict[str, str] = attrs.field(factory=dict) @@ -49,6 +51,16 @@ def included_methods(self) -> ty.Tuple[str, ...]: def generate_code(self, input_fields, nonstd_types, output_fields) -> str: """ + Parameters + ---------- + input_fields : list[tuple[str, type, dict] | tuple[str, type, object, dict]] + list of input fields, each field is a tuple of (name, type, metadata) or + (name, type, default, metadata) + nonstd_types : set[type] + set of non-standard types + output_fields : list[tuple[str, type, dict]] + list of output fields, each field is a tuple of (name, type, metadata) + Returns ------- converted_code : str @@ -58,13 +70,10 @@ def generate_code(self, input_fields, nonstd_types, output_fields) -> str: """ base_imports = [ - "from pydra.engine import specs", "import os", + "from pydra.compose import shell", ] - task_base = "ShellCommandTask" - base_imports.append("from pydra.engine import ShellCommandTask") - try: executable = self.nipype_interface._cmd except AttributeError: @@ -77,56 +86,68 @@ def generate_code(self, input_fields, nonstd_types, output_fields) -> str: "try the FunctionInterfaceConverter class instead" ) - def unwrap_field_type(t): - if issubclass(t, WithClassifiers) and t.is_classified: - unwraped_classifiers = ", ".join( - unwrap_field_type(c) for c in t.classifiers - ) - return f"{t.unclassified.__name__}[{unwraped_classifiers}]" - return t.__name__ - nonstd_types = copy(nonstd_types) - def types_to_names(spec_fields): - spec_fields_str = [] - for el in spec_fields: - el = list(el) - field_type = el[1] - if inspect.isclass(field_type) and issubclass( - field_type, WithClassifiers - ): - field_type_str = unwrap_field_type(field_type) - else: - field_type_str = str(field_type) - if field_type_str.startswith(" list: return [] objs = [] for address in addresses: - parts = address.split(".") - mod = import_module(".".join(parts[:-1])) - objs.append(getattr(mod, parts[-1])) + if not isinstance(address, str): + objs.append(address) + else: + parts = address.split(".") + mod = import_module(".".join(parts[:-1])) + objs.append(getattr(mod, parts[-1])) return objs @@ -657,7 +660,9 @@ def package_dir(self, package_root: Path) -> Path: def write_post_release_file(self, fspath: Path): - if ".dev" in self.nipype_package.__version__: + pkg_version = getattr(self.nipype_package, "__version__", "0.1.0") + + if ".dev" in pkg_version: logger.warning( ( "using development version of nipype2pydra (%s), " @@ -677,7 +682,7 @@ def write_post_release_file(self, fspath: Path): self.name, ) - src_pkg_version = self.nipype_package.__version__.split(".dev")[0] + src_pkg_version = pkg_version.split(".dev")[0] nipype2pydra_version = nipype2pydra.__version__.split(".dev")[0] post_release = (src_pkg_version + nipype2pydra_version).replace(".", "") @@ -836,6 +841,7 @@ def write_to_module( find_replace: ty.Optional[ty.List[ty.Tuple[str, str]]] = None, inline_intra_pkg: bool = False, additional_imports: ty.Optional[ty.List[ImportStatement]] = None, + interface_module: bool = False, ): """Writes the given imports, constants, classes, and functions to the file at the given path, merging with existing code if it exists""" @@ -870,8 +876,15 @@ def write_to_module( existing_imports = parse_imports(existing_import_strs, relative_to=module_name) converter_imports = [] + src_module_name = self.untranslate_submodule(module_name) + if interface_module: + src_module_name = ".".join(src_module_name.split(".")[:-1]) + for klass in used.classes: - if f"\nclass {klass.__name__}(" not in code_str: + if ( + klass.__module__ == src_module_name + and f"\nclass {klass.__name__}(" not in code_str + ): try: class_converter = self.classes[full_address(klass)] converter_imports.extend(class_converter.used.import_stmts) @@ -903,7 +916,10 @@ def write_to_module( code_str += "\n" + converted_code + "\n" for func in sorted(used.functions, key=attrgetter("__name__")): - if f"\ndef {func.__name__}(" not in code_str: + if ( + func.__module__ == src_module_name + and f"\ndef {func.__name__}(" not in code_str + ): if func.__name__ in self.functions: function_converter = self.functions[full_address(func)] converter_imports.extend(function_converter.used.import_stmts) @@ -1116,10 +1132,8 @@ def write_pkg_inits( f.write(code_str) BASE_INIT_TEMPLATE = """\"\"\" -This is a basic doctest demonstrating that the package and pydra can both be successfully -imported. +This is a basic doctest showing the package can be imported. ->>> import pydra.engine >>> import pydra.tasks.{pkg} \"\"\" diff --git a/nipype2pydra/pkg_gen/__init__.py b/nipype2pydra/pkg_gen/__init__.py index 26bb23b1..6ff93eb4 100644 --- a/nipype2pydra/pkg_gen/__init__.py +++ b/nipype2pydra/pkg_gen/__init__.py @@ -93,7 +93,7 @@ class NipypeInterface: input_helps: ty.Dict[str, str] = attrs.field(factory=dict) output_helps: ty.Dict[str, str] = attrs.field(factory=dict) file_inputs: ty.List[str] = attrs.field(factory=list) - path_inputs: ty.List[str] = attrs.field(factory=list) + # path_inputs: ty.List[str] = attrs.field(factory=list) str_inputs: ty.List[str] = attrs.field(factory=list) file_outputs: ty.List[str] = attrs.field(factory=list) template_outputs: ty.List[str] = attrs.field(factory=list) @@ -188,8 +188,8 @@ def parse( parsed.input_helps[inpt_name] = f"{inpt_mdata}: {inpt_desc}" trait_type_name = type(inpt.trait_type).__name__ if inpt.genfile: - if trait_type_name in ("File", "Directory"): - parsed.path_inputs.append(inpt_name) + # if trait_type_name in ("File", "Directory"): + # parsed.path_inputs.append(inpt_name) if inpt_name in (parsed.file_outputs + parsed.dir_outputs): parsed.template_outputs.append(inpt_name) else: @@ -204,8 +204,8 @@ def parse( ): if "fix" in inpt_name: parsed.str_inputs.append(inpt_name) - else: - parsed.path_inputs.append(inpt_name) + # else: + # parsed.path_inputs.append(inpt_name) else: parsed.file_inputs.append(inpt_name) elif trait_type_name == "Directory" and inpt_name not in parsed.dir_outputs: @@ -230,8 +230,8 @@ def parse( else: parsed.dir_inputs.append(inpt_name) parsed.multi_inputs.append(inpt_name) - elif trait_type_name in ("File", "Directory"): - parsed.path_inputs.append(inpt_name) + # elif trait_type_name in ("File", "Directory"): + # parsed.path_inputs.append(inpt_name) return parsed def generate_yaml_spec(self) -> str: @@ -239,7 +239,7 @@ def generate_yaml_spec(self) -> str: input_types = {i: File for i in self.file_inputs} input_types.update({i: Directory for i in self.dir_inputs}) - input_types.update({i: Path for i in self.path_inputs}) + # input_types.update({i: Path for i in self.path_inputs}) input_types.update({i: str for i in self.str_inputs}) output_types = {o: File for o in self.file_outputs} output_types.update({o: Directory for o in self.dir_outputs}) @@ -284,6 +284,8 @@ def generate_yaml_spec(self) -> str: non_mime = [Path, str] def type2str(tp): + if isinstance(tp, str): + return tp if tp in non_mime: return tp.__name__ return fileformats.core.to_mime(tp, official=False) @@ -765,7 +767,11 @@ def copy_ignore(_, names): # Replace "CHANGEME" string with pkg name for fspath in pkg_dir.glob("**/*"): - if fspath.is_dir() or fspath.suffix in (".pyc", ".pyo", ".pyd"): + if ( + fspath.is_dir() + or fspath.suffix in (".pyc", ".pyo", ".pyd") + or fspath.name.startswith(".") + ): continue with open(fspath) as f: contents = f.read() @@ -1121,7 +1127,15 @@ def insert_args_in_method_calls( all_constants = set() for mod_name, methods in grouped_methods.items(): mod = import_module(mod_name) - used = UsedSymbols.find(mod, methods, omit_classes=(BaseInterface, TraitedSpec)) + used = UsedSymbols.find( + mod, + methods, + package=PackageConverter( + name=mod_name.split(".")[-1], + nipype_name=mod_name, + omit_classes=(BaseInterface, TraitedSpec), + ), + ) all_funcs.update(methods) for func in used.functions: all_funcs.add(cleanup_function_body(get_source_code(func))) diff --git a/nipype2pydra/pkg_gen/resources/templates/init.py b/nipype2pydra/pkg_gen/resources/templates/init.py index 0816d9c2..991d5b08 100644 --- a/nipype2pydra/pkg_gen/resources/templates/init.py +++ b/nipype2pydra/pkg_gen/resources/templates/init.py @@ -2,7 +2,6 @@ This is a basic doctest demonstrating that the package and pydra can both be successfully imported. ->>> import pydra.engine >>> import pydra.tasks.CHANGEME """ diff --git a/nipype2pydra/statements/imports.py b/nipype2pydra/statements/imports.py index a1b993a9..208c5e75 100644 --- a/nipype2pydra/statements/imports.py +++ b/nipype2pydra/statements/imports.py @@ -585,16 +585,14 @@ def parse_imports( [ "import attrs", # attrs is included in imports in case we reference attrs.NOTHING "from fileformats.generic import File, Directory", - "from pydra.engine.specs import MultiInputObj", + "from pydra.utils.typing import MultiInputObj", "from pathlib import Path", "import json", "import yaml", "import logging", - "import pydra.mark", + "from pydra.compose import python", "import typing as ty", - "from pydra.engine import Workflow", - "from pydra.engine.task import FunctionTask", - "from pydra.engine.specs import SpecInfo, BaseSpec", + "from pydra.compose import python, shell, workflow", ] ) diff --git a/nipype2pydra/statements/workflow_build.py b/nipype2pydra/statements/workflow_build.py index 5cf0307f..5fce9006 100644 --- a/nipype2pydra/statements/workflow_build.py +++ b/nipype2pydra/statements/workflow_build.py @@ -233,7 +233,7 @@ def __str__(self): base_task_name = f"{self.source_name}_{self.source_out.varname}_to_{self.target_name}_{self.target_in}" intf_name = f"{base_task_name}_callable" code_str += ( - f"\n{self.indent}@pydra.mark.task\n" + f"\n{self.indent}@python.define\n" f"{self.indent}def {intf_name}(in_: ty.Any) -> ty.Any:\n" f"{self.indent} return {self.source_out.callable}(in_)\n\n" f"{self.indent}{self.workflow_variable}.add(" @@ -255,7 +255,7 @@ def __str__(self): # to add an "identity" node to pass it through intf_name = f"{base_task_name}_identity" code_str += ( - f"\n{self.indent}@pydra.mark.task\n" + f"\n{self.indent}@python.define\n" f"{self.indent}def {intf_name}({self.wf_in_name}: ty.Any) -> ty.Any:\n" f"{self.indent} return {self.wf_in_name}\n\n" f"{self.indent}{self.workflow_variable}.add(" @@ -285,15 +285,20 @@ def parse( conns = [args] conn_stmts = [] for conn in conns: - src, tgt, field_conns_str = extract_args(conn)[1] - if ( - field_conns_str.startswith("(") - and len(extract_args(field_conns_str)[1]) == 1 - ): - field_conns_str = extract_args(field_conns_str)[1][0] - field_conns = extract_args(field_conns_str)[1] - for field_conn in field_conns: - out, in_ = extract_args(field_conn)[1] + if isinstance(conn, str): + src, tgt, field_conns_str = extract_args(conn)[1] + if ( + field_conns_str.startswith("(") + and len(extract_args(field_conns_str)[1]) == 1 + ): + field_conns_str = extract_args(field_conns_str)[1][0] + field_conns = [ + extract_args(c)[1] for c in extract_args(field_conns_str)[1] + ] + else: + src, out, tgt, in_ = conn + field_conns = [ (out, in_ ) ] + for out, in_ in field_conns: pre, args, post = extract_args(out) if args is not None: varname, callable_str = args @@ -525,7 +530,7 @@ def parse( indent = match.group(1) varname = match.group(2) args = extract_args(statement)[1] - node_kwargs = match_kwargs(args, AddInterfaceStatement.SIGNATURE) + node_kwargs = match_kwargs([a.replace("\n", "") for a in args], AddInterfaceStatement.SIGNATURE) intf_name, intf_args, intf_post = extract_args(node_kwargs["interface"]) if "iterables" in node_kwargs: iterables = [ @@ -829,7 +834,7 @@ class WorkflowInitStatement: workflow_converter: "WorkflowConverter" match_re = re.compile( - r"\s+(\w+)\s*=.*\bWorkflow\(.*name\s*=\s*([^,=\)]+)", + r"\s+(\w+)\s*=.*\bWorkflow\((?:|.*name\s*=)\s*([^,=\)]+)", flags=re.MULTILINE, ) @@ -887,6 +892,9 @@ def match_kwargs(args: ty.List[str], sig: ty.List[str]) -> ty.Dict[str, str]: kwargs[key] = val else: if found_kw: + if arg.startswith("#"): + # Ignore comments + continue raise ValueError( f"Non-keyword arg '{arg}' found after keyword arg in {args}" ) diff --git a/nipype2pydra/symbols.py b/nipype2pydra/symbols.py index 74bd8ac1..af8d99fe 100644 --- a/nipype2pydra/symbols.py +++ b/nipype2pydra/symbols.py @@ -11,7 +11,7 @@ import itertools from functools import cached_property import attrs -from nipype.interfaces.base import BaseInterface, TraitedSpec, isdefined, Undefined +from nipype.interfaces.base import BaseInterface, BaseTraitedSpec, isdefined, Undefined from nipype.interfaces.base import traits_extension from .utils.misc import ( split_source_into_statements, @@ -401,7 +401,11 @@ def _find_referenced( intra_pkg_objs[imported.object.__name__].add(obj) elif inspect.isclass(obj): class_def = (obj.__name__, obj) - if class_def not in self.imported_classes: + if ( + class_def + not in self.imported_classes + + self.package.omit_classes + ): self.imported_classes.append(class_def) intra_pkg_objs[imported.object.__name__].add(obj) else: @@ -472,7 +476,7 @@ def _get_used_symbols(self, function_bodies, module): local_class.__name__ in used_symbols and local_class not in self.classes ): - if issubclass(local_class, (BaseInterface, TraitedSpec)): + if issubclass(local_class, (BaseInterface, BaseTraitedSpec)): continue self.classes.append(local_class) class_body = inspect.getsource(local_class) diff --git a/nipype2pydra/testing.py b/nipype2pydra/testing.py index be55f21c..063637a8 100644 --- a/nipype2pydra/testing.py +++ b/nipype2pydra/testing.py @@ -5,8 +5,8 @@ def for_testing_line_number_of_function(): import logging # noqa: E402 import asyncio # noqa: E402 -from pydra.engine.core import Result, TaskBase # noqa: E402 -from pydra.engine.workers import ConcurrentFuturesWorker # noqa: E402 +from pydra.workers.cf import ConcurrentFuturesWorker # noqa: E402 +from pydra.engine.result import Result # noqa: E402 logger = logging.getLogger("pydra") @@ -30,7 +30,7 @@ def run_el(self, runnable, rerun=False, **kwargs): """Run a task.""" return self.exec_with_timeout(runnable, rerun=rerun) - async def exec_with_timeout(self, runnable: TaskBase, rerun=False): + async def exec_with_timeout(self, runnable, rerun=False): try: result = await asyncio.wait_for( self.exec_as_coro(runnable, rerun=rerun), timeout=self.timeout diff --git a/nipype2pydra/utils/__init__.py b/nipype2pydra/utils/__init__.py index c6bb9b95..5da12c13 100644 --- a/nipype2pydra/utils/__init__.py +++ b/nipype2pydra/utils/__init__.py @@ -23,5 +23,6 @@ find_super_method, strip_comments, min_indentation, + type_to_str, INBUILT_NIPYPE_TRAIT_NAMES, ) diff --git a/nipype2pydra/utils/misc.py b/nipype2pydra/utils/misc.py index a49d6315..d6214c00 100644 --- a/nipype2pydra/utils/misc.py +++ b/nipype2pydra/utils/misc.py @@ -10,6 +10,7 @@ from pathlib import Path from fileformats.core import FileSet, from_mime from fileformats.core.mixin import WithClassifiers +from pydra.utils.typing import is_union, is_optional from ..exceptions import ( UnmatchedParensException, UnmatchedQuoteException, @@ -22,7 +23,7 @@ from importlib import import_module from logging import getLogger -from pydra.engine.specs import MultiInputObj +from pydra.utils.typing import MultiInputObj logger = getLogger("nipype2pydra") @@ -582,3 +583,29 @@ def find_super_method( def strip_comments(src: str) -> str: return re.sub(r"^\s+#.*", "", src, flags=re.MULTILINE) + + +def type_to_str(type_: type, mandatory: bool = False) -> str: + """Convert a type to a string representation""" + if hasattr(type_, "__name__"): + type_str = type_.__name__ + else: + type_str = str(type_) + if is_union(type_): + args = [t if t is not type(None) else None for t in ty.get_args(type_)] + if not mandatory and not is_optional(type_): + args.append(None) + return " | ".join( + type_to_str(a, mandatory=True) if a is not None else "None" for a in args + ) + if origin := ty.get_origin(type_): + args = [type_to_str(arg, mandatory=True) for arg in ty.get_args(type_)] + type_str = f"{origin.__name__}[{', '.join(args)}]" + module = origin.__module__ + else: + module = type_.__module__ + if module == "typing": + type_str = "ty." + type_str + if not mandatory: + type_str += " | None" + return type_str diff --git a/nipype2pydra/utils/tests/test_utils_imports.py b/nipype2pydra/utils/tests/test_utils_imports.py index b0ce1cc1..b3924ade 100644 --- a/nipype2pydra/utils/tests/test_utils_imports.py +++ b/nipype2pydra/utils/tests/test_utils_imports.py @@ -26,12 +26,12 @@ def test_import_statement2(): def test_import_statement3(): - import_stmt = "from pydra.engine.specs import MultiInputObj as MIO" + import_stmt = "from pydra.utils.typing import MultiInputObj as MIO" assert ImportStatement.matches(import_stmt) imports = parse_imports(import_stmt) assert len(imports) == 1 stmt = imports[0] - assert stmt.module_name == "pydra.engine.specs" + assert stmt.module_name == "pydra.utils.typing" assert stmt.imported["MIO"].name == "MultiInputObj" diff --git a/nipype2pydra/workflow.py b/nipype2pydra/workflow.py index a2bf9086..fff775e5 100644 --- a/nipype2pydra/workflow.py +++ b/nipype2pydra/workflow.py @@ -969,7 +969,7 @@ def test_{self.name}_build(): @pytest.mark.skip(reason="Appropriate inputs for this workflow haven't been specified yet") def test_{self.name}_run(): workflow = {self.name}({args_str}) - result = workflow(plugin='serial') + result = workflow(worker='debug') print(result.out) """ return code_str @@ -990,7 +990,7 @@ def test_used(self): + parse_imports( [ f"from {self.output_module} import {self.name}", - "from pydra.engine import Workflow", + "from pydra.compose import workflow", ] ) ), diff --git a/pyproject.toml b/pyproject.toml index 937e0c26..5e0a0ca5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,7 +11,7 @@ dependencies = [ "black", "attrs>=22.1.0", "nipype", - "pydra", + "pydra >=1.0a", "PyYAML>=6.0", "fileformats >=0.8", "fileformats-medimage >=0.4", @@ -50,11 +50,8 @@ test = [ "pytest-cov>=2.12.1", "fileformats-medimage-extras", "fileformats-medimage-afni", - "fileformats-medimage-ants", - "fileformats-medimage-freesurfer", - "fileformats-medimage-fsl", "niworkflows", - "mriqc", +# "mriqc", "nipy", "nireports", "nitime", @@ -93,7 +90,7 @@ exclude = ["/tests"] [tool.black] line-length = 88 -target-version = ['py37'] +target-version = ['py311'] exclude = "nipype2pydra/_version.py" [tool.codespell]